From 73287b2b576fa6a1ff1b61467286cbd5458881d2 Mon Sep 17 00:00:00 2001 From: antirez Date: Tue, 18 May 2010 00:36:48 +0200 Subject: [PATCH] buliding of release.h moved into an external script. Avoided recompialtion of redis.c if git sha1 is the same as the previous one --- Changelog | 83 +++++++++ Makefile | 10 +- doc/AppendOnlyFileHowto.html | 42 ----- doc/AuthCommand.html | 39 ---- doc/Benchmarks.html | 129 ------------- doc/BgrewriteaofCommand.html | 41 ----- doc/BgsaveCommand.html | 39 ---- doc/CommandReference.html | 47 ----- doc/Comparisons.html | 42 ----- doc/Configuration.html | 38 ---- doc/ConnectionHandlingSidebar.html | 36 ---- doc/ControlCommandsSidebar.html | 36 ---- doc/Credits.html | 38 ---- doc/DbsizeCommand.html | 38 ---- doc/DelCommand.html | 42 ----- doc/DesignPatterns.html | 37 ---- doc/ExistsCommand.html | 42 ----- doc/ExpireCommand.html | 86 --------- doc/FAQ.html | 70 -------- doc/Features.html | 38 ---- doc/FlushallCommand.html | 39 ---- doc/FlushdbCommand.html | 39 ---- doc/FromSqlToDataStructures.html | 37 ---- doc/GenericCommandsSidebar.html | 36 ---- doc/GetCommand.html | 39 ---- doc/GetsetCommand.html | 38 ---- doc/IncrCommand.html | 43 ----- doc/InfoCommand.html | 48 ----- doc/IntroductionToRedisDataTypes.html | 153 ---------------- doc/KeysCommand.html | 42 ----- doc/LastsaveCommand.html | 39 ---- doc/LindexCommand.html | 41 ----- doc/ListCommandsSidebar.html | 36 ---- doc/Lists.html | 42 ----- doc/LlenCommand.html | 41 ----- doc/LpopCommand.html | 41 ----- doc/LrangeCommand.html | 47 ----- doc/LremCommand.html | 41 ----- doc/LsetCommand.html | 38 ---- doc/LtrimCommand.html | 47 ----- doc/MgetCommand.html | 52 ------ doc/MonitorCommand.html | 63 ------- doc/MoveCommand.html | 42 ----- doc/MsetCommand.html | 44 ----- doc/ObjectHashMappers.html | 39 ---- doc/Pipelining.html | 36 ---- doc/ProgrammingExamples.html | 38 ---- doc/ProtocolSpecification.html | 142 --------------- doc/QuickStart.html | 68 ------- doc/QuitCommand.html | 38 ---- doc/README.html | 88 --------- doc/RandomkeyCommand.html | 39 ---- doc/Redis0100ChangeLog.html | 67 ------- doc/Redis0900ChangeLog.html | 56 ------ doc/Redis_1_2_0_Changelog.html | 40 ----- doc/RenameCommand.html | 39 ---- doc/RenamenxCommand.html | 42 ----- doc/ReplicationHowto.html | 43 ----- doc/ReplyTypes.html | 42 ----- doc/RoadMap.html | 38 ---- doc/RpoplpushCommand.html | 44 ----- doc/RpushCommand.html | 40 ----- doc/SaddCommand.html | 41 ----- doc/SaveCommand.html | 39 ---- doc/ScardCommand.html | 41 ----- doc/SdiffCommand.html | 45 ----- doc/SdiffstoreCommand.html | 38 ---- doc/SelectCommand.html | 39 ---- doc/SetCommand.html | 39 ---- doc/SetCommandsSidebar.html | 36 ---- doc/SetnxCommand.html | 51 ------ doc/Sets.html | 36 ---- doc/ShutdownCommand.html | 39 ---- doc/SideBar.html | 36 ---- doc/SinterCommand.html | 40 ----- doc/SinterstoreCommand.html | 39 ---- doc/SismemberCommand.html | 42 ----- doc/SlaveofCommand.html | 41 ----- doc/SmembersCommand.html | 38 ---- doc/SmoveCommand.html | 44 ----- doc/SortCommand.html | 75 -------- doc/SortedSetCommandsSidebar.html | 36 ---- doc/SortedSets.html | 36 ---- doc/Speed.html | 38 ---- doc/SponsorshipHistory.html | 38 ---- doc/SpopCommand.html | 40 ----- doc/SrandmemberCommand.html | 40 ----- doc/SremCommand.html | 42 ----- doc/StringCommandsSidebar.html | 36 ---- doc/Strings.html | 37 ---- doc/SunionCommand.html | 40 ----- doc/SunionstoreCommand.html | 38 ---- doc/SupportedLanguages.html | 59 ------ doc/SupportedPlatforms.html | 37 ---- doc/TemplateCommand.html | 38 ---- doc/TtlCommand.html | 38 ---- doc/TwitterAlikeExample.html | 250 -------------------------- doc/TypeCommand.html | 46 ----- doc/UnstableSource.html | 39 ---- doc/ZaddCommand.html | 43 ----- doc/ZcardCommand.html | 41 ----- doc/ZincrbyCommand.html | 42 ----- doc/ZrangeCommand.html | 42 ----- doc/ZrangebyscoreCommand.html | 73 -------- doc/ZremCommand.html | 42 ----- doc/ZremrangebyscoreCommand.html | 39 ---- doc/ZscoreCommand.html | 41 ----- doc/index.html | 41 ----- mkreleasehdr.sh | 9 + 109 files changed, 94 insertions(+), 5086 deletions(-) delete mode 100644 doc/AppendOnlyFileHowto.html delete mode 100644 doc/AuthCommand.html delete mode 100644 doc/Benchmarks.html delete mode 100644 doc/BgrewriteaofCommand.html delete mode 100644 doc/BgsaveCommand.html delete mode 100644 doc/CommandReference.html delete mode 100644 doc/Comparisons.html delete mode 100644 doc/Configuration.html delete mode 100644 doc/ConnectionHandlingSidebar.html delete mode 100644 doc/ControlCommandsSidebar.html delete mode 100644 doc/Credits.html delete mode 100644 doc/DbsizeCommand.html delete mode 100644 doc/DelCommand.html delete mode 100644 doc/DesignPatterns.html delete mode 100644 doc/ExistsCommand.html delete mode 100644 doc/ExpireCommand.html delete mode 100644 doc/FAQ.html delete mode 100644 doc/Features.html delete mode 100644 doc/FlushallCommand.html delete mode 100644 doc/FlushdbCommand.html delete mode 100644 doc/FromSqlToDataStructures.html delete mode 100644 doc/GenericCommandsSidebar.html delete mode 100644 doc/GetCommand.html delete mode 100644 doc/GetsetCommand.html delete mode 100644 doc/IncrCommand.html delete mode 100644 doc/InfoCommand.html delete mode 100644 doc/IntroductionToRedisDataTypes.html delete mode 100644 doc/KeysCommand.html delete mode 100644 doc/LastsaveCommand.html delete mode 100644 doc/LindexCommand.html delete mode 100644 doc/ListCommandsSidebar.html delete mode 100644 doc/Lists.html delete mode 100644 doc/LlenCommand.html delete mode 100644 doc/LpopCommand.html delete mode 100644 doc/LrangeCommand.html delete mode 100644 doc/LremCommand.html delete mode 100644 doc/LsetCommand.html delete mode 100644 doc/LtrimCommand.html delete mode 100644 doc/MgetCommand.html delete mode 100644 doc/MonitorCommand.html delete mode 100644 doc/MoveCommand.html delete mode 100644 doc/MsetCommand.html delete mode 100644 doc/ObjectHashMappers.html delete mode 100644 doc/Pipelining.html delete mode 100644 doc/ProgrammingExamples.html delete mode 100644 doc/ProtocolSpecification.html delete mode 100644 doc/QuickStart.html delete mode 100644 doc/QuitCommand.html delete mode 100644 doc/README.html delete mode 100644 doc/RandomkeyCommand.html delete mode 100644 doc/Redis0100ChangeLog.html delete mode 100644 doc/Redis0900ChangeLog.html delete mode 100644 doc/Redis_1_2_0_Changelog.html delete mode 100644 doc/RenameCommand.html delete mode 100644 doc/RenamenxCommand.html delete mode 100644 doc/ReplicationHowto.html delete mode 100644 doc/ReplyTypes.html delete mode 100644 doc/RoadMap.html delete mode 100644 doc/RpoplpushCommand.html delete mode 100644 doc/RpushCommand.html delete mode 100644 doc/SaddCommand.html delete mode 100644 doc/SaveCommand.html delete mode 100644 doc/ScardCommand.html delete mode 100644 doc/SdiffCommand.html delete mode 100644 doc/SdiffstoreCommand.html delete mode 100644 doc/SelectCommand.html delete mode 100644 doc/SetCommand.html delete mode 100644 doc/SetCommandsSidebar.html delete mode 100644 doc/SetnxCommand.html delete mode 100644 doc/Sets.html delete mode 100644 doc/ShutdownCommand.html delete mode 100644 doc/SideBar.html delete mode 100644 doc/SinterCommand.html delete mode 100644 doc/SinterstoreCommand.html delete mode 100644 doc/SismemberCommand.html delete mode 100644 doc/SlaveofCommand.html delete mode 100644 doc/SmembersCommand.html delete mode 100644 doc/SmoveCommand.html delete mode 100644 doc/SortCommand.html delete mode 100644 doc/SortedSetCommandsSidebar.html delete mode 100644 doc/SortedSets.html delete mode 100644 doc/Speed.html delete mode 100644 doc/SponsorshipHistory.html delete mode 100644 doc/SpopCommand.html delete mode 100644 doc/SrandmemberCommand.html delete mode 100644 doc/SremCommand.html delete mode 100644 doc/StringCommandsSidebar.html delete mode 100644 doc/Strings.html delete mode 100644 doc/SunionCommand.html delete mode 100644 doc/SunionstoreCommand.html delete mode 100644 doc/SupportedLanguages.html delete mode 100644 doc/SupportedPlatforms.html delete mode 100644 doc/TemplateCommand.html delete mode 100644 doc/TtlCommand.html delete mode 100644 doc/TwitterAlikeExample.html delete mode 100644 doc/TypeCommand.html delete mode 100644 doc/UnstableSource.html delete mode 100644 doc/ZaddCommand.html delete mode 100644 doc/ZcardCommand.html delete mode 100644 doc/ZincrbyCommand.html delete mode 100644 doc/ZrangeCommand.html delete mode 100644 doc/ZrangebyscoreCommand.html delete mode 100644 doc/ZremCommand.html delete mode 100644 doc/ZremrangebyscoreCommand.html delete mode 100644 doc/ZscoreCommand.html delete mode 100644 doc/index.html create mode 100755 mkreleasehdr.sh diff --git a/Changelog b/Changelog index a657927a..0ddf1c70 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,86 @@ +2010-05-16 Redis version is now 1.3.12 (antirez) +2010-05-16 redis version is now 1.3.11 (antirez) +2010-05-16 random refactoring and speedups (antirez) +2010-05-16 faster INCR with very little efforts... (antirez) +2010-05-15 Merge branch 'redis-cli-fix' of http://github.com/tizoc/redis (antirez) +2010-05-15 added pid info to the check memory leaks test, so that those tests don't appear to be duplicated (antirez) +2010-05-15 Merge branch 'integration' of git://github.com/pietern/redis (antirez) +2010-05-14 more endianess detection fix for SHA1 (antirez) +2010-05-14 fixed a warning seen with some GCC version under Linux (antirez) +2010-05-14 initial rough integration test for replication (Pieter Noordhuis) +2010-05-14 store entire server object on the stack instead of just the client (Pieter Noordhuis) +2010-05-14 proc to retrieve values from INFO properties (Pieter Noordhuis) +2010-05-14 one more fix for endianess detection (antirez) +2010-05-14 Fixed sha1.c compilation on Linux, due to endianess detection lameness (antirez) +2010-05-14 ZUNION,ZINTER -> ZUNIONSTORE,ZINTERSTORE (antirez) +2010-05-14 minor fixes to the new test suite, html doc updated (antirez) +2010-05-14 wait for redis-server to be settled and ready for connections (Pieter Noordhuis) +2010-05-14 fix cleaning up tmp folder (Pieter Noordhuis) +2010-05-14 update makefile to use the new test suite (Pieter Noordhuis) +2010-05-14 check for memory leaks before killing a server (Pieter Noordhuis) +2010-05-14 extract code to kill a server to a separate proc (Pieter Noordhuis) +2010-05-14 start servers on different ports to prevent conflicts (Pieter Noordhuis) +2010-05-14 use DEBUG DIGEST in new test suite (Pieter Noordhuis) +2010-05-14 split test suite into multiple files; runs redis-server in isolation (Pieter Noordhuis) +2010-05-14 use DEBUG DIGEST in the test instead of a function that was doing a similar work, but in a much slower and buggy way (antirez) +2010-05-14 Don't rely on cliReadReply being able to return on shutdown (Bruno Deferrari) +2010-05-14 If command is a shutdown, ignore errors on reply (Bruno Deferrari) +2010-05-14 DEBUG DIGEST implemented, in order to improve the ability to test persistence and replication consistency (antirez) +2010-05-13 makefile deps updated (antirez) +2010-05-13 conflicts resolved (antirez) +2010-05-13 feed SETEX as SET and EXPIREAT to AOF (Pieter Noordhuis) +2010-05-13 very strong speedup in saving time performance when there are many integers in the dataset. Instead of decoding the object before to pass them to the rdbSaveObject layer we check asap if the object is integer encoded and can be written on disk as an integer. (antirez) +2010-05-13 include limits.h otherwise no double precison macros (antirez) +2010-05-13 explicitly checks with ifdefs if our floating point and long long assumptions are verified (antirez) +2010-05-13 Yet another version of the double saving code, with comments explaining what's happening there (antirez) +2010-05-12 added overflow check in the double -> long long conversion trick to avoid integer overflows. I think this was not needed in practical terms, but it is safer (antirez) +2010-05-12 use withscores when performing the dataset digest (antirez) +2010-05-12 If a float can be casted to a long long without rounding loss, we can use the integer conversion function to write the score on disk. This is a seriuous speedup (antirez) +2010-05-12 fixed compilation warnings in the AOF sanity check tool (antirez) +2010-05-12 Merge branch 'vm-speedup' (antirez) +2010-05-11 fix to return error when calling INCR on a non-string type (Pieter Noordhuis) +2010-05-11 load objects encoded from disk directly without useless conversion (antirez) +2010-05-11 fixed a problem leading to crashes, as keys can't be currently specially encoded, so we can't encode integers at object loading time... For now this can be fixed passing a few flags, or later can be fixed allowing encoded keys as well (antirez) +2010-05-11 long long to string conversion speedup applied in other places as well. Still the code has bugs, fixing right now... (antirez) +2010-05-11 hand written code to turn a long long into a string -> very big speed win (antirez) +2010-05-11 added specialized function to compare string objects for perfect match that is optimized for this task (antirez) +2010-05-11 better use of encoding inforamtion in dictEncObjKeyCompare (antirez) +2010-05-10 CONFIG now can change appendfsync policy at run time (antirez) +2010-05-10 CONFIG command now supports hot modification of RDB saving parameters. (antirez) +2010-05-10 while loading the rdb file don't add the key to the dictionary at all if it's already expired, instead of removing it just after the insertion. (antirez) +2010-05-10 Merge branch 'check-aof' of git://github.com/pietern/redis (antirez) +2010-05-08 minor changes to improve code readability (antirez) +2010-05-08 swap objects out directly while loading an RDB file if we detect we can't stay in the vm max memory limits anyway (antirez) +2010-05-07 change command names no longer used to zunion/zinter (Pieter Noordhuis) +2010-05-07 DEBUG POPULATE command for fast creation of test databases (antirez) +2010-05-07 update TODO (Pieter Noordhuis) +2010-05-07 swap arguments in blockClientOnSwappedKeys to be consistent (Pieter Noordhuis) +2010-05-07 added function that preloads all keys needed to execute a MULTI/EXEC block (Pieter Noordhuis) +2010-05-07 add sanity check to zunionInterBlockClientOnSwappedKeys, as the number of keys used is provided as argument to the function (Pieter Noordhuis) +2010-05-07 make prototype of custom function to preload keys from the vm match the prototype of waitForMultipleSwappedKeys (Pieter Noordhuis) +2010-05-07 extract preloading of multiple keys according to the command prototype to a separate function (Pieter Noordhuis) +2010-05-07 make append only filename configurable (Pieter Noordhuis) +2010-05-07 don't load value from VM for EXISTS (Pieter Noordhuis) +2010-05-07 swap file name pid expansion removed. Not suited for mission critical software... (antirez) +2010-05-07 Swap file is now locked (antirez) +2010-05-06 Merge branch 'master' into aof-speedup (antirez) +2010-05-06 log error and quit when the AOF contains an unfinished MULTI (antirez) +2010-05-06 log error and quit when the AOF contains an unfinished MULTI (Pieter Noordhuis) +2010-05-06 Merge branch 'master' into check-aof (Pieter Noordhuis) +2010-05-06 hincrby should report an error when called against a hash key that doesn't contain an integer (Pieter Noordhuis) +2010-05-06 AOF writes are now accumulated into a buffer and flushed into disk just before re-entering the event loop. A lot less writes but still this guarantees that AOF is written before the client gets a positive reply about a write operation, as no reply is trasnmitted before re-entering into the event loop. (antirez) +2010-05-06 clarified a few messages in redis.conf (antirez) +2010-05-05 ask for confirmation before AOF is truncated (Pieter Noordhuis) +2010-05-05 str can be free'd outside readString (Pieter Noordhuis) +2010-05-05 moved argument parsing around (Pieter Noordhuis) +2010-05-05 ignore redis-check-aof binary (Pieter Noordhuis) +2010-05-05 allow AOF to be fixed by truncating to the portion of the file that is valid (Pieter Noordhuis) +2010-05-05 tool to check if AOF is valid (Pieter Noordhuis) +2010-05-02 included fmacros.h in linenose.c to avoid compilation warnings on Linux (antirez) +2010-05-02 compilation fix for mac os x (antirez) +2010-05-02 Merge branch 'master' of git@github.com:antirez/redis (antirez) +2010-05-02 On Linux now fdatasync() is used insetad of fsync() in order to flush the AOF file kernel buffers (antirez) +2010-04-30 More tests for APPEND and tests for SUBSTR (antirez) 2010-04-30 linenoise.c updated, now redis-cli can be used in a pipe (antirez) 2010-04-29 redis-cli minor fix (less segfault is better) (antirez) 2010-04-29 New MONITOR output format with timestamp, every command in a single line, string representations (antirez) diff --git a/Makefile b/Makefile index 635cb189..949f0b0c 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ # Copyright (C) 2009 Salvatore Sanfilippo # This file is released under the BSD license, see the COPYING file +release_hdr := $(shell sh -c './mkreleasehdr.sh') uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') OPTIMIZATION?=-O2 ifeq ($(uname_S),SunOS) @@ -14,8 +15,6 @@ endif CCOPT= $(CFLAGS) $(CCLINK) $(ARCH) $(PROF) DEBUG?= -g -rdynamic -ggdb -GIT_SHA1:=$(shell sh -c '(git show-ref --head --hash=8 2> /dev/null || echo 00000000) | head -n1') -GIT_DIRTY:=$(shell sh -c 'git status -s 2> /dev/null | wc -l') OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o BENCHOBJ = ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o CLIOBJ = anet.o sds.o adlist.o redis-cli.o zmalloc.o linenoise.o @@ -56,7 +55,7 @@ sds.o: sds.c sds.h zmalloc.h zipmap.o: zipmap.c zmalloc.h zmalloc.o: zmalloc.c config.h -redis-server: releaseheader $(OBJ) +redis-server: $(OBJ) $(CC) -o $(PRGNAME) $(CCOPT) $(DEBUG) $(OBJ) @echo "" @echo "Hint: To run the test-redis.tcl script is a good idea." @@ -76,11 +75,6 @@ redis-check-dump: $(CHECKDUMPOBJ) redis-check-aof: $(CHECKAOFOBJ) $(CC) -o $(CHECKAOFPRGNAME) $(CCOPT) $(DEBUG) $(CHECKAOFOBJ) -releaseheader: - @echo "#define REDIS_GIT_SHA1 \"$(GIT_SHA1)\"" > release.h - @echo "#define REDIS_GIT_DIRTY $(GIT_DIRTY)" >> release.h - @touch redis.c # force recompile of redis.c - .c.o: $(CC) -c $(CFLAGS) $(DEBUG) $(COMPILE_TIME) $< diff --git a/doc/AppendOnlyFileHowto.html b/doc/AppendOnlyFileHowto.html deleted file mode 100644 index 1a0e468a..00000000 --- a/doc/AppendOnlyFileHowto.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
- - -

AppendOnlyFileHowto

- -
- -
- -
- #sidebar RedisGuides -

Append Only File HOWTO

General Information

Append only file is an alternative durability option for Redis. What this mean? Let's start with some fact:

  • For default Redis saves snapshots of the dataset on disk, in a binary file called dump.rdb (by default at least). For instance you can configure Redis to save the dataset every 60 seconds if there are at least 100 changes in the dataset, or every 1000 seconds if there is at least a single change in the dataset. This is known as "Snapshotting".
  • Snapshotting is not very durable. If your computer running Redis stops, your power line fails, or you write killall -9 redis-server for a mistake, the latest data written on Redis will get lost. There are applications where this is not a big deal. There are applications where this is not acceptable and Redis was not an option for this applications.
-What is the solution? To use append only file as alternative to snapshotting. How it works?

  • It is an 1.1 only feature.
  • You have to turn it on editing the configuration file. Just make sure you have "appendonly yes" somewhere.
  • Append only files work this way: every time Redis receive a command that changes the dataset (for instance a SET or LPUSH command) it appends this command in the append only file. When you restart Redis it will first re-play the append only file to rebuild the state.
-

Log rewriting

As you can guess... the append log file gets bigger and bigger, every time there is a new operation changing the dataset. Even if you set always the same key "mykey" to the values of "1", "2", "3", ... up to 10000000000 in the end you'll have just a single key in the dataset, just a few bytes! but how big will be the append log file? Very very big.

So Redis supports an interesting feature: it is able to rebuild the append log file, in background, without to stop processing client commands. The key is the command BGREWRITEAOF. This command basically is able to use the dataset in memory in order to rewrite the shortest sequence of commands able to rebuild the exact dataset that is currently in memory.

So from time to time when the log gets too big, try this command. It's safe as if it fails you will not lost your old log (but you can make a backup copy given that currently 1.1 is still in beta!).

Wait... but how does this work?

Basically it uses the same fork() copy-on-write trick that snapshotting already uses. This is how the algorithm works:

  • Redis forks, so now we have a child and a parent.
  • The child starts writing the new append log file in a temporary file.
  • The parent accumulates all the new changes in an in-memory buffer (but at the same time it writes the new changes in the old append only file, so if the rewriting fails, we are safe).
  • When the child finished to rewrite the file, the parent gets a signal, and append the in-memory buffer at the end of the file generated by the child.
  • Profit! Now Redis atomically renames the old file into the new one, and starts appending new data into the new file.
-

How durable is the append only file?

Check redis.conf, you can configure how many times Redis will fsync() data on disk. There are three options:

  • Fsync() every time a new command is appended to the append log file. Very very slow, very safe.
  • Fsync() one time every second. Fast enough, and you can lose 1 second of data if there is a disaster.
  • Never fsync(), just put your data in the hands of the Operating System. The faster and unsafer method.
-Warning: by default Redis will fsync() after every command! This is because the Redis authors want to ship a default configuration that is the safest pick. But the best compromise for most datasets is to fsync() one time every second. - -
- -
-
- - - diff --git a/doc/AuthCommand.html b/doc/AuthCommand.html deleted file mode 100644 index ff734a60..00000000 --- a/doc/AuthCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -AuthCommand: Contents
  AUTH _password_
    Return value -
- -

AuthCommand

- -
- -
- -
- #sidebar ConnectionHandlingSidebar

AUTH _password_

Request for authentication in a password protected Redis server.A Redis server can be instructed to require a password before to allow clientsto issue commands. This is done using the requirepass directive in theRedis configuration file.
-
If the password given by the client is correct the server replies withan OK status code reply and starts accepting commands from the client.Otherwise an error is returned and the clients needs to try a new password.Note that for the high performance nature of Redis it is possible to trya lot of passwords in parallel in very short time, so make sure to generatea strong and very long password so that this attack is infeasible.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/Benchmarks.html b/doc/Benchmarks.html deleted file mode 100644 index aadc3d93..00000000 --- a/doc/Benchmarks.html +++ /dev/null @@ -1,129 +0,0 @@ - - - - - - - -
- - - -
-
- -Benchmarks: Contents
  How Fast is Redis?
  Latency percentiles -
- -

Benchmarks

- -
- -
- -
-

How Fast is Redis?

Redis includes the redis-benchmark utility that simulates SETs/GETs done by N clients at the same time sending M total queries (it is similar to the Apache's ab utility). Below you'll find the full output of the benchmark executed against a Linux box.

  • The test was done with 50 simultaneous clients performing 100000 requests.
  • The value SET and GET is a 256 bytes string.
  • The Linux box is running Linux 2.6, it's Xeon X3320 2.5Ghz.
  • Text executed using the loopback interface (127.0.0.1).
-Results: about 110000 SETs per second, about 81000 GETs per second.

Latency percentiles

-./redis-benchmark -n 100000
-
-====== SET ======
-  100007 requests completed in 0.88 seconds
-  50 parallel clients
-  3 bytes payload
-  keep alive: 1
-
-58.50% <= 0 milliseconds
-99.17% <= 1 milliseconds
-99.58% <= 2 milliseconds
-99.85% <= 3 milliseconds
-99.90% <= 6 milliseconds
-100.00% <= 9 milliseconds
-114293.71 requests per second
-
-====== GET ======
-  100000 requests completed in 1.23 seconds
-  50 parallel clients
-  3 bytes payload
-  keep alive: 1
-
-43.12% <= 0 milliseconds
-96.82% <= 1 milliseconds
-98.62% <= 2 milliseconds
-100.00% <= 3 milliseconds
-81234.77 requests per second
-
-====== INCR ======
-  100018 requests completed in 1.46 seconds
-  50 parallel clients
-  3 bytes payload
-  keep alive: 1
-
-32.32% <= 0 milliseconds
-96.67% <= 1 milliseconds
-99.14% <= 2 milliseconds
-99.83% <= 3 milliseconds
-99.88% <= 4 milliseconds
-99.89% <= 5 milliseconds
-99.96% <= 9 milliseconds
-100.00% <= 18 milliseconds
-68458.59 requests per second
-
-====== LPUSH ======
-  100004 requests completed in 1.14 seconds
-  50 parallel clients
-  3 bytes payload
-  keep alive: 1
-
-62.27% <= 0 milliseconds
-99.74% <= 1 milliseconds
-99.85% <= 2 milliseconds
-99.86% <= 3 milliseconds
-99.89% <= 5 milliseconds
-99.93% <= 7 milliseconds
-99.96% <= 9 milliseconds
-100.00% <= 22 milliseconds
-100.00% <= 208 milliseconds
-88109.25 requests per second
-
-====== LPOP ======
-  100001 requests completed in 1.39 seconds
-  50 parallel clients
-  3 bytes payload
-  keep alive: 1
-
-54.83% <= 0 milliseconds
-97.34% <= 1 milliseconds
-99.95% <= 2 milliseconds
-99.96% <= 3 milliseconds
-99.96% <= 4 milliseconds
-100.00% <= 9 milliseconds
-100.00% <= 208 milliseconds
-71994.96 requests per second
-
Notes: changing the payload from 256 to 1024 or 4096 bytes does not change the numbers significantly (but reply packets are glued together up to 1024 bytes so GETs may be slower with big payloads). The same for the number of clients, from 50 to 256 clients I got the same numbers. With only 10 clients it starts to get a bit slower.

You can expect different results from different boxes. For example a low profile box like Intel core duo T5500 clocked at 1.66Ghz running Linux 2.6 will output the following: -
- ./redis-benchmark -q -n 100000
-SET: 53684.38 requests per second
-GET: 45497.73 requests per second
-INCR: 39370.47 requests per second
-LPUSH: 34803.41 requests per second
-LPOP: 37367.20 requests per second
-
Another one using a 64 bit box, a Xeon L5420 clocked at 2.5 Ghz:

- ./redis-benchmark -q -n 100000
-PING: 111731.84 requests per second
-SET: 108114.59 requests per second
-GET: 98717.67 requests per second
-INCR: 95241.91 requests per second
-LPUSH: 104712.05 requests per second
-LPOP: 93722.59 requests per second
-
-
- -
-
- - - diff --git a/doc/BgrewriteaofCommand.html b/doc/BgrewriteaofCommand.html deleted file mode 100644 index 61e51c30..00000000 --- a/doc/BgrewriteaofCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -BgrewriteaofCommand: Contents
      BGREWRITEAOF
    Return value -
- -

BgrewriteaofCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

BGREWRITEAOF

-
Please for detailed information about the Redis Append Only File checkthe Append Only File Howto.
-
BGREWRITEAOF rewrites the Append Only File in background when it gets toobig. The Redis Append Only File is a Journal, so every operation modifyingthe dataset is logged in the Append Only File (and replayed at startup).This means that the Append Only File always grows. In order to rebuildits content the BGREWRITEAOF creates a new version of the append only filestarting directly form the dataset in memory in order to guarantee thegeneration of the minimal number of commands needed to rebuild the database.
-
The Append Only File Howto contains further details.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/BgsaveCommand.html b/doc/BgsaveCommand.html deleted file mode 100644 index 33468ae5..00000000 --- a/doc/BgsaveCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -BgsaveCommand: Contents
  BGSAVE
    Return value -
- -

BgsaveCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

BGSAVE

-
Save the DB in background. The OK code is immediately returned.Redis forks, the parent continues to server the clients, the childsaves the DB on disk then exit. A client my be able to check if theoperation succeeded using the LASTSAVE command.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/CommandReference.html b/doc/CommandReference.html deleted file mode 100644 index 6541e3a4..00000000 --- a/doc/CommandReference.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - -
- - - -
- - -

CommandReference

- -
- -
- -
- = Redis Command Reference =

Every command name links to a specific wiki page describing the behavior of the command.

Connection handling

  • QUIT close the connection
  • AUTH simple password authentication if enabled
-

Commands operating on all the kind of values

  • EXISTS key test if a key exists
  • DEL key delete a key
  • TYPE key return the type of the value stored at key
  • KEYS pattern return all the keys matching a given pattern
  • RANDOMKEY return a random key from the key space
  • RENAME oldname newname rename the old key in the new one, destroing the newname key if it already exists
  • RENAMENX oldname newname rename the old key in the new one, if the newname key does not already exist
  • DBSIZE return the number of keys in the current db
  • EXPIRE set a time to live in seconds on a key
  • TTL get the time to live in seconds of a key
  • SELECT index Select the DB having the specified index
  • MOVE key dbindex Move the key from the currently selected DB to the DB having as index dbindex
  • FLUSHDB Remove all the keys of the currently selected DB
  • FLUSHALL Remove all the keys from all the databases
-

Commands operating on string values

  • SET key value set a key to a string value
  • GET key return the string value of the key
  • GETSET key value set a key to a string returning the old value of the key
  • MGET key1 key2 ... keyN multi-get, return the strings values of the keys
  • SETNX key value set a key to a string value if the key does not exist
  • SETEX key time value Set+Expire combo command
  • MSET key1 value1 key2 value2 ... keyN valueN set a multiple keys to multiple values in a single atomic operation
  • MSETNX key1 value1 key2 value2 ... keyN valueN set a multiple keys to multiple values in a single atomic operation if none of the keys already exist
  • INCR key increment the integer value of key
  • INCRBY key integer increment the integer value of key by integer
  • DECR key decrement the integer value of key
  • DECRBY key integer decrement the integer value of key by integer
  • APPEND key value append the specified string to the string stored at key
  • SUBSTR key start end return a substring out of a larger string
-

Commands operating on lists

  • RPUSH key value Append an element to the tail of the List value at key
  • LPUSH key value Append an element to the head of the List value at key
  • LLEN key Return the length of the List value at key
  • LRANGE key start end Return a range of elements from the List at key
  • LTRIM key start end Trim the list at key to the specified range of elements
  • LINDEX key index Return the element at index position from the List at key
  • LSET key index value Set a new value as the element at index position of the List at key
  • LREM key count value Remove the first-N, last-N, or all the elements matching value from the List at key
  • LPOP key Return and remove (atomically) the first element of the List at key
  • RPOP key Return and remove (atomically) the last element of the List at key
  • BLPOP key1 key2 ... keyN timeout Blocking LPOP
  • BRPOP key1 key2 ... keyN timeout Blocking RPOP
  • RPOPLPUSH srckey dstkey Return and remove (atomically) the last element of the source List stored at _srckey_ and push the same element to the destination List stored at _dstkey_
-

Commands operating on sets

  • SADD key member Add the specified member to the Set value at key
  • SREM key member Remove the specified member from the Set value at key
  • SPOP key Remove and return (pop) a random element from the Set value at key
  • SMOVE srckey dstkey member Move the specified member from one Set to another atomically
  • SCARD key Return the number of elements (the cardinality) of the Set at key
  • SISMEMBER key member Test if the specified value is a member of the Set at key
  • SINTER key1 key2 ... keyN Return the intersection between the Sets stored at key1, key2, ..., keyN
  • SINTERSTORE dstkey key1 key2 ... keyN Compute the intersection between the Sets stored at key1, key2, ..., keyN, and store the resulting Set at dstkey
  • SUNION key1 key2 ... keyN Return the union between the Sets stored at key1, key2, ..., keyN
  • SUNIONSTORE dstkey key1 key2 ... keyN Compute the union between the Sets stored at key1, key2, ..., keyN, and store the resulting Set at dstkey
  • SDIFF key1 key2 ... keyN Return the difference between the Set stored at key1 and all the Sets key2, ..., keyN
  • SDIFFSTORE dstkey key1 key2 ... keyN Compute the difference between the Set key1 and all the Sets key2, ..., keyN, and store the resulting Set at dstkey
  • SMEMBERS key Return all the members of the Set value at key
  • SRANDMEMBER key Return a random member of the Set value at key
-

Commands operating on sorted sets (zsets, Redis version >

1.1) ==

  • ZADD key score member Add the specified member to the Sorted Set value at key or update the score if it already exist
  • ZREM key member Remove the specified member from the Sorted Set value at key
  • ZINCRBY key increment member If the member already exists increment its score by _increment_, otherwise add the member setting _increment_ as score
  • ZRANK key member Return the rank (or index) or _member_ in the sorted set at _key_, with scores being ordered from low to high
  • ZREVRANK key member Return the rank (or index) or _member_ in the sorted set at _key_, with scores being ordered from high to low
  • ZRANGE key start end Return a range of elements from the sorted set at key
  • ZREVRANGE key start end Return a range of elements from the sorted set at key, exactly like ZRANGE, but the sorted set is ordered in traversed in reverse order, from the greatest to the smallest score
  • ZRANGEBYSCORE key min max Return all the elements with score >= min and score <= max (a range query) from the sorted set
  • ZCARD key Return the cardinality (number of elements) of the sorted set at key
  • ZSCORE key element Return the score associated with the specified element of the sorted set at key
  • ZREMRANGEBYRANK key min max Remove all the elements with rank >= min and rank <= max from the sorted set
  • ZREMRANGEBYSCORE key min max Remove all the elements with score >= min and score <= max from the sorted set
  • ZUNIONSTORE / ZINTERSTORE dstkey N key1 ... keyN WEIGHTS w1 ... wN AGGREGATE SUM|MIN|MAX Perform a union or intersection over a number of sorted sets with optional weight and aggregate
-

Commands operating on hashes

  • HSET key field value Set the hash field to the specified value. Creates the hash if needed.
  • HGET key field Retrieve the value of the specified hash field.
  • HMSET key field1 value1 ... fieldN valueN Set the hash fields to their respective values.
  • HINCRBY key field integer Increment the integer value of the hash at _key_ on _field_ with _integer_.
  • HEXISTS key field Test for existence of a specified field in a hash
  • HDEL key field Remove the specified field from a hash
  • HLEN key Return the number of items in a hash.
  • HKEYS key Return all the fields in a hash.
  • HVALS key Return all the values in a hash.
  • HGETALL key Return all the fields and associated values in a hash.
-

Sorting

  • SORT key BY pattern LIMIT start end GET pattern ASC|DESC ALPHA Sort a Set or a List accordingly to the specified parameters
-

Transactions

-

Publish/Subscribe

-

Persistence control commands

  • SAVE Synchronously save the DB on disk
  • BGSAVE Asynchronously save the DB on disk
  • LASTSAVE Return the UNIX time stamp of the last successfully saving of the dataset on disk
  • SHUTDOWN Synchronously save the DB on disk, then shutdown the server
  • BGREWRITEAOF Rewrite the append only file in background when it gets too big
-

Remote server control commands

  • INFO Provide information and statistics about the server
  • MONITOR Dump all the received requests in real time
  • SLAVEOF Change the replication settings
-
- -
-
- - - diff --git a/doc/Comparisons.html b/doc/Comparisons.html deleted file mode 100644 index 14c253a4..00000000 --- a/doc/Comparisons.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -Comparisons: Contents
    Memcached
    Tokyo Cabinet / Toyo Tyrant -
- -

Comparisons

- -
- -
- -
- if your are asking yourself how is Redis different fom other key-value stores here you will find it compared to some of the most popular contendors (all great software) in this category.

Memcached

  • Memcached is not persistent, it just holds everything in memory without saving since its main goal is to be used as a cache, while Redis is persistent.
-
  • Like memcached Redis uses a key-value model, but while keys can just be strings, values in Redis can be Lists, Sets or SortedSets and complex operations like intersections, set/get n-th element of lists, pop/push of elements, can be performed against sets and lists.
-

Tokyo Cabinet / Toyo Tyrant

Redis and Tokyo Cabinet can be used for the same applications, but actually they are very different beasts. If you read Twitter messages of people involved in scalable things both products are reported to work well, but surely there are times where one or the other can be the best choice.

  • Tokyo Cabinet writes synchronously on disk, Redis takes the whole dataset on memory and writes on disk asynchronously. Tokyo Cabinet is safer and probably a better idea if your dataset is going to be bigger than RAM, but Redis is faster (note that Redis supports master-slave replication that is trivial to setup, so you are safe anyway if you want a setup where data can't be lost even after a disaster).
-
  • Redis supports higher level operations and data structures. Tokyo Cabinet supports a kind of database that is able to organize data into rows with named fields (in a way very similar to Berkeley DB) but can't do things like server side List and Set operations Redis is able to do: pushing or popping from Lists in an atomic way, in O(1) time complexity, server side Set intersections, Sorting of schema free data in complex ways (By the way TC supports sorting in the table-based database format). Redis on the other hand does not support the abstraction of tables with fields, the idea is that you can build this stuff in software easily if you really need a table-alike approach.
-
  • Tokyo Cabinet does not implement a networking layer. You have to use a networking layer called Tokyo Tyrant that interfaces to Tokyo Cabinet so you can talk to Tokyo Cabinet in a client-server fashion. In Redis the networking support is built-in inside the server, and is basically the only interface between the external world and the dataset.
-
  • Redis is reported to be much faster, especially if you plan to access Tokyo Cabinet via Tokyo Tyrant. Here I can only say that with Redis you can expect 100,000 operations/seconds with a normal Linux box and 50 concurrent clients. You should test Redis, Tokyo, and the other alternatives with your specific work load to get a feeling about performances for your application.
-
  • Redis is not an on-disk DB engine like Tokyo: the latter can be used as a fast DB engine in your C project without the networking overhead just linking to the library. Still in many scalable applications you need multiple servers talking with multiple clients, so the client-server model is almost always needed, this is why in Redis this is built-in.
-
- -
-
- - - diff --git a/doc/Configuration.html b/doc/Configuration.html deleted file mode 100644 index 91d15fca..00000000 --- a/doc/Configuration.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -Configuration: Contents -
- -

Configuration

- -
- -
- -
- The redis.conf file included in the source code distribution is a starting point, you should be able to modify it in order do adapt it to your needs without troubles reading the comments inside the file.

In order to start Redis using a configuration file just pass the file name as the sole argument when starting the server:

-$ ./redis-server redis.conf
-
-
- -
-
- - - diff --git a/doc/ConnectionHandlingSidebar.html b/doc/ConnectionHandlingSidebar.html deleted file mode 100644 index 09392041..00000000 --- a/doc/ConnectionHandlingSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -ConnectionHandlingSidebar: Contents -
- -

ConnectionHandlingSidebar

- -
- -
- -
- == Connection handling ==

-
- -
-
- - - diff --git a/doc/ControlCommandsSidebar.html b/doc/ControlCommandsSidebar.html deleted file mode 100644 index ba7d706d..00000000 --- a/doc/ControlCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -ControlCommandsSidebar: Contents -
- -

ControlCommandsSidebar

- -
- -
- -
- == Control Commands ==

-
- -
-
- - - diff --git a/doc/Credits.html b/doc/Credits.html deleted file mode 100644 index fefc4440..00000000 --- a/doc/Credits.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -Credits: Contents
  Credits -
- -

Credits

- -
- -
- -
-

Credits

  • The Redis server was designed and written by Salvatore Sanfilippo (aka antirez)
  • Ezra Zygmuntowicz (aka ezmobius) - Ruby client lib initial version and hacking
  • Ludovico Magnocavallo (aka ludo) - Python clinet lib
  • Valentino Volonghi of Adroll - Erlang client lib
  • brettbender - found and fixed a bug in sds.c that caused the server to crash at least on 64 bit systems, and anyway to be buggy since we used the same vararg thing against vsprintf without to call va_start and va_end every time.
  • Dobrica Pavlinusic - Perl client lib
  • Brian Hammond - AUTH command implementation, C++ client lib
  • Daniele Alessandri - Lua client lib
  • Corey Stup - C99 cleanups
  • Taylor Weibley - Ruby client improvements
  • Bob Potter - Rearrange redisObject struct to reduce memory usage in 64bit environments
  • Luca Guidi and Brian McKinney - Ruby client improvements
  • Aman Gupta - SDIFF / SDIFFSTORE, other Set operations improvements, ability to disable clients timeout.
  • Diego Rosario Brogna - Code and ideas about dumping backtrace on sigsegv and similar error conditions.
-p.s. sorry to take this file in sync is hard in this early days. Please drop me an email if I forgot to add your name here! - -
- -
-
- - - diff --git a/doc/DbsizeCommand.html b/doc/DbsizeCommand.html deleted file mode 100644 index d9b5b032..00000000 --- a/doc/DbsizeCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -DbsizeCommand: Contents
  DBSIZE
    Return value -
- -

DbsizeCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

DBSIZE

Return the number of keys in the currently selected database.
-

Return value

Integer reply - -
- -
-
- - - diff --git a/doc/DelCommand.html b/doc/DelCommand.html deleted file mode 100644 index 8d063ce7..00000000 --- a/doc/DelCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -DelCommand: Contents
  DEL _key1_ _key2_ ... _keyN_
    Return value -
- -

DelCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

DEL _key1_ _key2_ ... _keyN_

-Time complexity: O(1)
Remove the specified keys. If a given key does not existno operation is performed for this key. The commnad returns the number ofkeys removed.
-

Return value

Integer reply, specifically:

-an integer greater than 0 if one or more keys were removed
-0 if none of the specified key existed
-
- -
- -
-
- - - diff --git a/doc/DesignPatterns.html b/doc/DesignPatterns.html deleted file mode 100644 index 411739f1..00000000 --- a/doc/DesignPatterns.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - -
- - - -
-
- -DesignPatterns: Contents -
- -

DesignPatterns

- -
- -
- -
- Use random keys instead of incremental keys in order to avoid a single-key that gets incremented by many servers. This can can't be distributed among servers. - -
- -
-
- - - diff --git a/doc/ExistsCommand.html b/doc/ExistsCommand.html deleted file mode 100644 index e8279163..00000000 --- a/doc/ExistsCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -ExistsCommand: Contents
  EXISTS _key_
    Return value -
- -

ExistsCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

EXISTS _key_

-Time complexity: O(1)
Test if the specified key exists. The command returns"0" if the key exists, otherwise "1" is returned.Note that even keys set with an empty string as value willreturn "1".
-

Return value

Integer reply, specifically:

-1 if the key exists.
-0 if the key does not exist.
-
- -
- -
-
- - - diff --git a/doc/ExpireCommand.html b/doc/ExpireCommand.html deleted file mode 100644 index a3dbbe5b..00000000 --- a/doc/ExpireCommand.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - -
- - - -
- - -

ExpireCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

EXPIRE _key_ _seconds_

-

EXPIREAT _key_ _unixtime_ (Redis >

1.1)= -Time complexity: O(1)
Set a timeout on the specified key. After the timeout the key will beautomatically delete by the server. A key with an associated timeout issaid to be volatile in Redis terminology.
-
Voltile keys are stored on disk like the other keys, the timeout is persistenttoo like all the other aspects of the dataset. Saving a dataset containingthe dataset and stopping the server does not stop the flow of time as Redisregisters on disk when the key will no longer be available as Unix time, andnot the remaining seconds.
-
EXPIREAT works exctly like EXPIRE but instead to get the number of secondsrepresenting the Time To Live of the key as a second argument (that is arelative way of specifing the TTL), it takes an absolute one in the form ofa UNIX timestamp (Number of seconds elapsed since 1 Gen 1970).
-
EXPIREAT was introduced in order to implement [Persistence append only saving mode] so that EXPIRE commands are automatically translated into EXPIREAT commands for the append only file. Of course EXPIREAT can alsoused by programmers that need a way to simply specify that a given key should expire at a given time in the future.
-

How the expire is removed from a key

When the key is set to a new value using the SET command, the INCR commandor any other command that modify the value stored at key the timeout isremoved from the key and the key becomes non volatile.
-

Restrictions with write operations against volatile keys

Write operations like LPUSH, LSET and every other command that has theeffect of modifying the value stored at a volatile key have a special semantic:basically a volatile key is destroyed when it is target of a write operation.See for example the following usage pattern:
-
-% ./redis-cli lpush mylist foobar /Users/antirez/hack/redis
-OK
-% ./redis-cli lpush mylist hello  /Users/antirez/hack/redis
-OK
-% ./redis-cli expire mylist 10000 /Users/antirez/hack/redis
-1
-% ./redis-cli lpush mylist newelement
-OK
-% ./redis-cli lrange mylist 0 -1  /Users/antirez/hack/redis
-1. newelement
-
What happened here is that lpush against the key with a timeout set deletedthe key before to perform the operation. There is so a simple rule, writeoperations against volatile keys will destroy the key before to perform theoperation. Why Redis uses this behavior? In order to retain an importantproperty: a server that receives a given number of commands in the samesequence will end with the same dataset in memory. Without the delete-on-writesemantic what happens is that the state of the server depends on the timeof the commands to. This is not a desirable property in a distributed databasethat supports replication.
-

Setting the timeout again on already volatile keys

Trying to call EXPIRE against a key that already has an associated timeoutwill not change the timeout of the key, but will just return 0. If insteadthe key does not have a timeout associated the timeout will be set and EXPIREwill return 1.
-

Enhanced Lazy Expiration algorithm

Redis does not constantly monitor keys that are going to be expired.Keys are expired simply when some client tries to access a key, andthe key is found to be timed out.
-
Of course this is not enough as there are expired keys that will neverbe accessed again. This keys should be expired anyway, so once everysecond Redis test a few keys at random among keys with an expire set.All the keys that are already expired are deleted from the keyspace.
-

Version 1.0

Each time a fixed number of keys where tested (100 by default). So ifyou had a client setting keys with a very short expire faster than 100for second the memory continued to grow. When you stopped to insertnew keys the memory started to be freed, 100 keys every second in thebest conditions. Under a peak Redis continues to use more and more RAMeven if most keys are expired in each sweep.
-

Version 1.1

Each time Redis:
-
  1. Tests 100 random keys from expired keys set.
  2. Deletes all the keys found expired.
  3. If more than 25 keys were expired, it start again from 1.
-
This is a trivial probabilistic algorithm, basically the assumption isthat our sample is representative of the whole key space,and we continue to expire until the percentage of keys that are likelyto be expired is under 25%
-
This means that at any given moment the maximum amount of keys alreadyexpired that are using memory is at max equal to max setting operations per second divided by 4.
-

Return value

Integer reply, specifically:

-1: the timeout was set.
-0: the timeout was not set since the key already has an associated timeout, or the key does not exist.
-

FAQ: Can you explain better why Redis deletes keys with an EXPIRE on write operations?

-Ok let's start with the problem: -
-redis> set a 100
-OK
-redis> expire a 360
-(integer) 1
-redis> incr a
-(integer) 1
-
-I set a key to the value of 100, then set an expire of 360 seconds, and then incremented the key (before the 360 timeout expired of course). The obvious result would be: 101, instead the key is set to the value of 1. Why? -There is a very important reason involving the Append Only File and Replication. Let's rework a bit hour example adding the notion of time to the mix: -
-SET a 100
-EXPIRE a 5
-... wait 10 seconds ...
-INCR a
-
-Imagine a Redis version that does not implement the "Delete keys with an expire set on write operation" semantic. -Running the above example with the 10 seconds pause will lead to 'a' being set to the value of 1, as it no longer exists when INCR is called 10 seconds later.

Instead if we drop the 10 seconds pause, the result is that 'a' is set to 101.

And in the practice timing changes! For instance the client may wait 10 seconds before INCR, but the sequence written in the Append Only File (and later replayed-back as fast as possible when Redis is restarted) will not have the pause. Even if we add a timestamp in the AOF, when the time difference is smaller than our timer resolution, we have a race condition.

The same happens with master-slave replication. Again, consider the example above: the client will use the same sequence of commands without the 10 seconds pause, but the replication link will slow down for a few seconds due to a network problem. Result? The master will contain 'a' set to 101, the slave 'a' set to 1.

The only way to avoid this but at the same time have reliable non time dependent timeouts on keys is to destroy volatile keys when a write operation is attempted against it.

After all Redis is one of the rare fully persistent databases that will give you EXPIRE. This comes to a cost :) -
- -
-
- - - diff --git a/doc/FAQ.html b/doc/FAQ.html deleted file mode 100644 index 7c012b2c..00000000 --- a/doc/FAQ.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - -
- - - -
-
- -FAQ: Contents
  Isn't this key-value thing just hype?
  Can I backup a Redis DB while the server is working?
  What's the Redis memory footprint?
  I like Redis high level operations and features, but I don't like it takes everything in memory and I can't have a dataset larger the memory. Plans to change this?
  Why Redis takes the whole dataset in RAM?
  If my dataset is too big for RAM and I don't want to use consistent hashing or other ways to distribute the dataset across different nodes, what I can do to use Redis anyway?
  Do you plan to implement Virtual Memory in Redis? Why don't just let the Operating System handle it for you?
  Is there something I can do to lower the Redis memory usage?
  I have an empty Redis server but INFO and logs are reporting megabytes of memory in use!
  What happens if Redis runs out of memory?
  Does Redis use more memory running in 64 bit boxes? Can I use 32 bit Redis in 64 bit systems?
  How much time it takes to load a big database at server startup?
  Background saving is failing with a fork() error under Linux even if I've a lot of free RAM!
  Are Redis on disk snapshots atomic?
  Redis is single threaded, how can I exploit multiple CPU / cores?
  I'm using some form of key hashing for partitioning, but what about SORT BY?
  What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a List, Set, Ordered Set?
  What Redis means actually?
  Why did you started the Redis project? -
- -

FAQ

- -
- -
- -
- = Why I need Redis if there is already memcachedb, Tokyo Cabinet, ...? =

Memcachedb is basically memcached done persistent. Redis is a different evolution -path in the key-value DBs, the idea is that the main advantages of key-value DBs -are retained even without a so severe loss of comfort of plain key-value DBs. -So Redis offers more features:

  • Keys can store different data types, not just strings. Notably Lists and Sets. For example if you want to use Redis as a log storage system for different computers every computer can just RPUSH data to the computer_ID key. Don't want to save more than 1000 log lines per computer? Just issue a LTRIM computer_ID 0 999 command to trim the list after every push.
-
  • Another example is about Sets. Imagine to build a social news site like Reddit. Every time a user upvote a given news you can just add to the news_ID_upmods key holding a value of type SET the id of the user that did the upmodding. Sets can also be used to index things. Every key can be a tag holding a SET with the IDs of all the objects associated to this tag. Using Redis set intersection you obtain the list of IDs having all this tags at the same time.
-
  • We wrote a simple Twitter Clone using just Redis as database. Download the source code from the download section and imagine to write it with a plain key-value DB without support for lists and sets... it's much harder.
-
  • Multiple DBs. Using the SELECT command the client can select different datasets. This is useful because Redis provides a MOVE atomic primitive that moves a key form a DB to another one, if the target DB already contains such a key it returns an error: this basically means a way to perform locking in distributed processing.
-
  • So what is Redis really about? The User interface with the programmer. Redis aims to export to the programmer the right tools to model a wide range of problems. Sets, Lists with O(1) push operation, lrange and ltrim, server-side fast intersection between sets, are primitives that allow to model complex problems with a key value database.
-

Isn't this key-value thing just hype?

I imagine key-value DBs, in the short term future, to be used like you use memory in a program, with lists, hashes, and so on. With Redis it's like this, but this special kind of memory containing your data structures is shared, atomic, persistent.

When we write code it is obvious, when we take data in memory, to use the most sensible data structure for the work, right? Incredibly when data is put inside a relational DB this is no longer true, and we create an absurd data model even if our need is to put data and get this data back in the same order we put it inside (an ORDER BY is required when the data should be already sorted. Strange, dont' you think?).

Key-value DBs bring this back at home, to create sensible data models and use the right data structures for the problem we are trying to solve.

Can I backup a Redis DB while the server is working?

Yes you can. When Redis saves the DB it actually creates a temp file, then rename(2) that temp file name to the destination file name. So even while the server is working it is safe to save the database file just with the cp unix command. Note that you can use master-slave replication in order to have redundancy of data, but if all you need is backups, cp or scp will do the work pretty well.

What's the Redis memory footprint?

Worst case scenario: 1 Million keys with the key being the natural numbers from 0 to 999999 and the string "Hello World" as value use 100MB on my Intel macbook (32bit). Note that the same data stored linearly in an unique string takes something like 16MB, this is the norm because with small keys and values there is a lot of overhead. Memcached will perform similarly.

With large keys/values the ratio is much better of course.

64 bit systems will use much more memory than 32 bit systems to store the same keys, especially if the keys and values are small, this is because pointers takes 8 bytes in 64 bit systems. But of course the advantage is that you can have a lot of memory in 64 bit systems, so to run large Redis servers a 64 bit system is more or less required.

I like Redis high level operations and features, but I don't like it takes everything in memory and I can't have a dataset larger the memory. Plans to change this?

Short answer: If you are using a Redis client that supports consistent hashing you can distribute the dataset across different nodes. For instance the Ruby clients supports this feature. There are plans to develop redis-cluster that basically is a dummy Redis server that is only used in order to distribute the requests among N different nodes using consistent hashing.

Why Redis takes the whole dataset in RAM?

Redis takes the whole dataset in memory and writes asynchronously on disk in order to be very fast, you have the best of both worlds: hyper-speed and persistence of data, but the price to pay is exactly this, that the dataset must fit on your computers RAM.

If the data is larger then memory, and this data is stored on disk, what happens is that the bottleneck of the disk I/O speed will start to ruin the performances. Maybe not in benchmarks, but once you have real load from multiple clients with distributed key accesses the data must come from disk, and the disk is damn slow. Not only, but Redis supports higher level data structures than the plain values. To implement this things on disk is even slower.

Redis will always continue to hold the whole dataset in memory because this days scalability requires to use RAM as storage media, and RAM is getting cheaper and cheaper. Today it is common for an entry level server to have 16 GB of RAM! And in the 64-bit era there are no longer limits to the amount of RAM you can have in theory.

Amazon EC2 now provides instances with 32 or 64 GB of RAM.

If my dataset is too big for RAM and I don't want to use consistent hashing or other ways to distribute the dataset across different nodes, what I can do to use Redis anyway?

You may try to load a dataset larger than your memory in Redis and see what happens, basically if you are using a modern Operating System, and you have a lot of data in the DB that is rarely accessed, the OS's virtual memory implementation will try to swap rarely used pages of memory on the disk, to only recall this pages when they are needed. If you have many large values rarely used this will work. If your DB is big because you have tons of little values accessed at random without a specific pattern this will not work (at low level a page is usually 4096 bytes, and you can have different keys/values stored at a single page. The OS can't swap this page on disk if there are even few keys used frequently).

Another possible solution is to use both MySQL and Redis at the same time, basically take the state on Redis, and all the things that get accessed very frequently: user auth tokens, Redis Lists with chronologically ordered IDs of the last N-comments, N-posts, and so on. Then use MySQL as a simple storage engine for larger data, that is just create a table with an auto-incrementing ID as primary key and a large BLOB field as data field. Access MySQL data only by primary key (the ID). The application will run the high traffic queries against Redis but when there is to take the big data will ask MySQL for specific resources IDs.

Update: it could be interesting to test how Redis performs with datasets larger than memory if the OS swap partition is in one of this very fast Intel SSD disks.

Do you plan to implement Virtual Memory in Redis? Why don't just let the Operating System handle it for you?

Yes, in order to support datasets bigger than RAM there is the plan to -implement transparent Virtual Memory in Redis, that is, the ability to -transfer large values associated to keys rarely used on Disk, and -reload them transparently in memory when this values are requested in -some way.

So you may ask why don't let the operating system VM do the work for -us. There are two main reasons: in Redis even a large value stored at -a given key, for instance a 1 million elements list, is not allocated -in a contiguous piece of memory. It's actually very fragmented since -Redis uses quite aggressive object sharing and allocated Redis Objects -structures reuse.

So you can imagine the memory layout composed of 4096 bytes pages that -actually contain different parts of different large values. Not only, -but a lot of values that are large enough for us to swap out to disk, -like a 1024k value, is just one quarter the size of a memory page, and -likely in the same page there are other values that are not rarely -used. So this value wil never be swapped out by the operating system. -This is the first reason for implementing application-level virtual -memory in Redis.

There is another one, as important as the first. A complex object in -memory like a list or a set is something 10 times bigger than the -same object serialized on disk. Probably you already noticed how Redis -snapshots on disk are damn smaller compared to the memory usage of -Redis for the same objects. This happens because when data is in -memory is full of pointers, reference counters and other metadata. Add -to this malloc fragmentation and need to return word-aligned chunks of -memory and you have a clear picture of what happens. So this means to -have 10 times the I/O between memory and disk than otherwise needed.

Is there something I can do to lower the Redis memory usage?

Yes, try to compile it with 32 bit target if you are using a 64 bit box.

If you are using Redis >= 1.3, try using the Hash data type, it can save a lot of memory.

If you are using hashes or any other type with values bigger than 128 bytes try also this to lower the RSS usage (Resident Set Size): EXPORT MMAP_THRESHOLD=4096

I have an empty Redis server but INFO and logs are reporting megabytes of memory in use!

This may happen and it's prefectly ok. Redis objects are small C structures allocated and freed a lot of times. This costs a lot of CPU so instead of being freed, released objects are taken into a free list and reused when needed. This memory is taken exactly by this free objects ready to be reused.

What happens if Redis runs out of memory?

With modern operating systems malloc() returning NULL is not common, usually the server will start swapping and Redis performances will be disastrous so you'll know it's time to use more Redis servers or get more RAM.

The INFO command (work in progress in this days) will report the amount of memory Redis is using so you can write scripts that monitor your Redis servers checking for critical conditions.

You can also use the "maxmemory" option in the config file to put a limit to the memory Redis can use. If this limit is reached Redis will start to reply with an error to write commands (but will continue to accept read-only commands).

Does Redis use more memory running in 64 bit boxes? Can I use 32 bit Redis in 64 bit systems?

Redis uses a lot more memory when compiled for 64 bit target, especially if the dataset is composed of many small keys and values. Such a database will, for instance, consume 50 MB of RAM when compiled for the 32 bit target, and 80 MB for 64 bit! That's a big difference.

You can run 32 bit Redis binaries in a 64 bit Linux and Mac OS X system without problems. For OS X just use make 32bit. For Linux instead, make sure you have libc6-dev-i386 installed, then use make 32bit if you are using the latest Git version. Instead for Redis <= 1.2.2 you have to edit the Makefile and replace "-arch i386" with "-m32".

If your application is already able to perform application-level sharding, it is very advisable to run N instances of Redis 32bit against a big 64 bit Redis box (with more than 4GB of RAM) instead than a single 64 bit instance, as this is much more memory efficient.

How much time it takes to load a big database at server startup?

Just an example on normal hardware: It takes about 45 seconds to restore a 2 GB database on a fairly standard system, no RAID. This can give you some kind of feeling about the order of magnitude of the time needed to load data when you restart the server.

Background saving is failing with a fork() error under Linux even if I've a lot of free RAM!

Short answer: echo 1 > /proc/sys/vm/overcommit_memory :)

And now the long one:

Redis background saving schema relies on the copy-on-write semantic of fork in modern operating systems: Redis forks (creates a child process) that is an exact copy of the parent. The child process dumps the DB on disk and finally exits. In theory the child should use as much memory as the parent being a copy, but actually thanks to the copy-on-write semantic implemented by most modern operating systems the parent and child process will share the common memory pages. A page will be duplicated only when it changes in the child or in the parent. Since in theory all the pages may change while the child process is saving, Linux can't tell in advance how much memory the child will take, so if the overcommit_memory setting is set to zero fork will fail unless there is as much free RAM as required to really duplicate all the parent memory pages, with the result that if you have a Redis dataset of 3 GB and just 2 GB of free memory it will fail.

Setting overcommit_memory to 1 says Linux to relax and perform the fork in a more optimistic allocation fashion, and this is indeed what you want for Redis.

Are Redis on disk snapshots atomic?

Yes, redis background saving process is always fork(2)ed when the server is outside of the execution of a command, so every command reported to be atomic in RAM is also atomic from the point of view of the disk snapshot.

Redis is single threaded, how can I exploit multiple CPU / cores?

Simply start multiple instances of Redis in different ports in the same box and threat them as different servers! Given that Redis is a distributed database anyway in order to scale you need to think in terms of multiple computational units. At some point a single box may not be enough anyway.

In general key-value databases are very scalable because of the property that different keys can stay on different servers independently.

In Redis there are client libraries such Redis-rb (the Ruby client) that are able to handle multiple servers automatically using consistent hashing. We are going to implement consistent hashing in all the other major client libraries. If you use a different language you can implement it yourself otherwise just hash the key before to SET / GET it from a given server. For example imagine to have N Redis servers, server-0, server-1, ..., server-N. You want to store the key "foo", what's the right server where to put "foo" in order to distribute keys evenly among different servers? Just perform the crc = CRC32("foo"), then servernum = crc % N (the rest of the division for N). This will give a number between 0 and N-1 for every key. Connect to this server and store the key. The same for gets.

This is a basic way of performing key partitioning, consistent hashing is much better and this is why after Redis 1.0 will be released we'll try to implement this in every widely used client library starting from Python and PHP (Ruby already implements this support).

I'm using some form of key hashing for partitioning, but what about SORT BY?

With SORT BY you need that all the weight keys are in the same Redis instance of the list/set you are trying to sort. In order to make this possible we developed a concept called key tags. A key tag is a special pattern inside a key that, if preset, is the only part of the key hashed in order to select the server for this key. For example in order to hash the key "foo" I simply perform the CRC32 checksum of the whole string, but if this key has a pattern in the form of the characters {...} I only hash this substring. So for example for the key "foo{bared}" the key hashing code will simply perform the CRC32 of "bared". This way using key tags you can ensure that related keys will be stored on the same Redis instance just using the same key tag for all this keys. Redis-rb already implements key tags.

What is the maximum number of keys a single Redis instance can hold? and what the max number of elements in a List, Set, Ordered Set?

In theory Redis can handle up to 232 keys, and was tested in practice to handle at least 150 million of keys per instance. We are working in order to experiment with larger values.

Every list, set, and ordered set, can hold 2
32 elements.

Actually Redis internals are ready to allow up to 264 elements but the current disk dump format don't support this, and there is a lot time to fix this issues in the future as currently even with 128 GB of RAM it's impossible to reach 232 elements.

What Redis means actually?

Redis means two things: -
  • it's a joke on the word Redistribute (instead to use just a Relational DB redistribute your workload among Redis servers)
  • it means REmote DIctionary Server
-

Why did you started the Redis project?

In order to scale LLOOGG. But after I got the basic server working I liked the idea to share the work with other guys, and Redis was turned into an open source project. -
- -
-
- - - diff --git a/doc/Features.html b/doc/Features.html deleted file mode 100644 index 66d387e1..00000000 --- a/doc/Features.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
- - -

Features

- -
- -
- -
- #sidebar SideBar -

Features (DRAFT)

Checking Redis for the first time? Here your will find the most important features, and pointers to a lot more information.

Speed

Redis is written in ANSI C, and loads the whole dataset in memory, so it is wicked fast! Up to 110,000 SETs/second, 81,000 GETs/second can be achieved in an entry level Linux box. Read more about Redis Speed.

Also Redis supports Pipelining of commands and getting and setting múltiple values in a single command to speed up communication with the client libraries.

Persistence

While all the data lives in memory, changes are asynchronously saved on disk using flexible policies based on elapsed time and/or number of updates since last save.

If you can't afford losing some data, starting on version 1.1 (currently in beta but you can download it from the Git repository) Redis supports an append-only file persistence mode. Check more on Persistence, or read the AppendOnlyFileHowto for more information.

Support for Data Structures

Values in Redis can be Strings as in a conventional key-value store, but also Lists, Sets, and SortedSets (to be support in version 1.1). This data types allow pushing/poping elements, or adding/removing them, also perform server side union, intersection, difference between sets, and so forth depending on the types. Redis supports different kind of sorting abilities for Sets and Lists.

You can think in Redis as a Data Structures Server, that allows you to model non trivial problems. Read Data Types to learn more about the way Redis handle Strings, and the Commands supported by Lists, Sets and SortedSets

Atomic Operations

Redis operations working on the different Data Types are atomic, so setting or increasing a key, adding and removing elements from a set, increasing a counter will all be accomplished safely.

Variety of Supported Languages

Ruby, Python, Twisted Python, PHP, Erlang, Tcl, Perl, Lua, Java, Scala, Clojure, choose your poison. Check the list of Supported Languages for all the details.

If your favorite language is not supported yet, you can write your own client library, as the Protocol is pretty simple.

Master/Slave Replication

Redis supports a very simple and fast Master/Slave replication. Is so simple it takes only one line in the configuration file to set it up, and 21 seconds for a Slave to complete the initial sync of 10 MM key set in a Amazon EC2 instance.

Read more about Master/Slave Replication.

Sharding

Distributing the dataset across multiple Redis instances is easy in Redis, as in any other key-value store. And this depends basically on the Languages client libraries being able to do so.

Read more about Sharding if you want to know more abour distributing data and workload in Redis.

Hot Backups

TODO

Simple to Install, Setup and Manage

Installing Redis requires little more than downloading it, uncompressing it and running make. Management is near zero, so you can start using Redis in a matter of minutes.

Go on and read about Redis installation, its Setup and Management.

Portable

Redis is written in ANSI C and works in most POSIX systems like Linux, BSD, Mac OS X, Solaris, and so on. Redis is reported to compile and work under WIN32 if compiled with Cygwin, but there is no official support for Windows currently.

Liberal Licensing

Redis is free software released under the very liberal BSD license.

What's next?

Want to get started with Redis? Try the Quick Start you will be up and running in just a matter of minutes.

Check the Code Samples and find how you can use Redis with your favorite programming language.

Compare Redis with other key-value stores, like Tokyo Cabinet or Memcached. - -
- -
-
- - - diff --git a/doc/FlushallCommand.html b/doc/FlushallCommand.html deleted file mode 100644 index 5ce99421..00000000 --- a/doc/FlushallCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -FlushallCommand: Contents
  FLUSHALL
    Return value -
- -

FlushallCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

FLUSHALL

-
Delete all the keys of all the existing databases, not just the currently selected one. This command never fails.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/FlushdbCommand.html b/doc/FlushdbCommand.html deleted file mode 100644 index 53217350..00000000 --- a/doc/FlushdbCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -FlushdbCommand: Contents
  FLUSHDB
    Return value -
- -

FlushdbCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

FLUSHDB

-
Delete all the keys of the currently selected DB. This command never fails.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/FromSqlToDataStructures.html b/doc/FromSqlToDataStructures.html deleted file mode 100644 index 4a837dfd..00000000 --- a/doc/FromSqlToDataStructures.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - -
- - - -
-
- -FromSqlToDataStructures: Contents
  Introduction (IDEA MORE THAN A DRAFT)
    Data Structures
    Dude where is my SELECT statement?
    LISTs
    SETs
    SORT to the rescue
      SORT BY
    HASHEs -
- -

FromSqlToDataStructures

- -
- -
- -
- -

Introduction (IDEA MORE THAN A DRAFT)

¿Coming from SQLand? ¿Who doesn't? Redis is simple, primitive when comapred to the world you are used to in the world of Relational Database Managers (RDBMS) and Structure Query Language (SQL), here you will find insight to build bridges between both worlds to model real life problems.

Data Structures

When I was young, happy and single ;) I studied Data Structures at the university, actually I learnt Data Structures and Algorithms before learning anything about Databases, and particularly RDBMS and SQL. This is natural because you need to know about Data Structures and Algorithms to understand a Database.

Redis can be seen as a Data Structures Server, a very simple interface to a extremly fast and efficient

Dude where is my SELECT statement?

LISTs

In SQL there is no such thing as a "natural" order, a SELECT statement without a ORDER BY clause will return data in a undefined order. In Redis LISTs address the problem of natural ordering, ...

SETs

So you have a bunch of unordered data,

SORT to the rescue

But sometimes we need to actually sort a LIST in a order different from its natural or take a SET and have it ordered, there is where the fast SORT commands comes handy...

SORT BY

Just SORTing keys would be kind of boring, sometimes useless right? Well, you can SORT...

HASHEs

Umm, sorry you will have to wait for a upcoming version of Redis to have Hashes, but here are Idioms you should house to manage Dictionary like data... -
- -
-
- - - diff --git a/doc/GenericCommandsSidebar.html b/doc/GenericCommandsSidebar.html deleted file mode 100644 index d2dd6aa7..00000000 --- a/doc/GenericCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -GenericCommandsSidebar: Contents -
- -

GenericCommandsSidebar

- -
- -
- - - -
-
- - - diff --git a/doc/GetCommand.html b/doc/GetCommand.html deleted file mode 100644 index 69083552..00000000 --- a/doc/GetCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -GetCommand: Contents
  GET _key_
    Return value -
- -

GetCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

GET _key_

-Time complexity: O(1)
Get the value of the specified key. If the keydoes not exist the special value 'nil' is returned.If the value stored at key is not a string an erroris returned because GET can only handle string values.
-

Return value

Bulk reply - -
- -
-
- - - diff --git a/doc/GetsetCommand.html b/doc/GetsetCommand.html deleted file mode 100644 index 1726bccc..00000000 --- a/doc/GetsetCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -GetsetCommand: Contents
  GETSET _key_ _value_
    Return value
    Design patterns -
- -

GetsetCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

GETSET _key_ _value_

-Time complexity: O(1)
GETSET is an atomic set this value and return the old value command.Set key to the string value and return the old value stored at key.The string can't be longer than 1073741824 bytes (1 GB).
-

Return value

Bulk reply

Design patterns

GETSET can be used together with INCR for counting with atomic reset whena given condition arises. For example a process may call INCR against thekey mycounter every time some event occurred, but from time totime we need to get the value of the counter and reset it to zero atomicallyusing GETSET mycounter 0.
-
- -
-
- - - diff --git a/doc/IncrCommand.html b/doc/IncrCommand.html deleted file mode 100644 index 5479e5f9..00000000 --- a/doc/IncrCommand.html +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - -
- - - -
- - -

IncrCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

INCR _key_

-

INCRBY _key_ _integer_

-

DECR _key_ _integer_

-

DECRBY _key_ _integer_

-Time complexity: O(1)
Increment or decrement the number stored at key by one. If the key doesnot exist or contains a value of a wrong type, set the key to thevalue of "0" before to perform the increment or decrement operation.
-
INCRBY and DECRBY work just like INCR and DECR but instead toincrement/decrement by 1 the increment/decrement is integer.
-
INCR commands are limited to 64 bit signed integers.
-Note: this is actually a string operation, that is, in Redis there are not "integer" types. Simply the string stored at the key is parsed as a base 10 64 bit signed integer, incremented, and then converted back as a string.

Return value

Integer reply, this commands will reply with the new value of key after the increment or decrement. -
- -
-
- - - diff --git a/doc/InfoCommand.html b/doc/InfoCommand.html deleted file mode 100644 index dd689acd..00000000 --- a/doc/InfoCommand.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - -
- - - -
-
- -InfoCommand: Contents
  INFO
    Return value
    Notes -
- -

InfoCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

INFO

The info command returns different information and statistics about the server in an format that's simple to parse by computers and easy to red by huamns.
-

Return value

Bulk reply, specifically in the following format:

-edis_version:0.07
-connected_clients:1
-connected_slaves:0
-used_memory:3187
-changes_since_last_save:0
-last_save_time:1237655729
-total_connections_received:1
-total_commands_processed:1
-uptime_in_seconds:25
-uptime_in_days:0
-
All the fields are in the form field:value

Notes

  • used_memory is returned in bytes, and is the total number of bytes allocated by the program using malloc.
  • uptime_in_days is redundant since the uptime in seconds contains already the full uptime information, this field is only mainly present for humans.
  • changes_since_last_save does not refer to the number of key changes, but to the number of operations that produced some kind of change in the dataset.
-
- -
-
- - - diff --git a/doc/IntroductionToRedisDataTypes.html b/doc/IntroductionToRedisDataTypes.html deleted file mode 100644 index 26b2ba19..00000000 --- a/doc/IntroductionToRedisDataTypes.html +++ /dev/null @@ -1,153 +0,0 @@ - - - - - - - -
- - - -
- - -

IntroductionToRedisDataTypes

- -
- -
- -
- #sidebar RedisGuides -

A fifteen minutes introduction to Redis data types

As you already probably know Redis is not a plain key-value store, actually it is a data structures server, supporting different kind of values. That is, you can't just set strings as values of keys. All the following data types are supported as values:

  • Binary-safe strings.
  • Lists of binary-safe strings.
  • Sets of binary-safe strings, that are collection of unique unsorted elements. You can think at this as a Ruby hash where all the keys are set to the 'true' value.
  • Sorted sets, similar to Sets but where every element is associated to a floating number score. The elements are taken sorted by score. You can think at this as Ruby hashes where the key is the element and the value is the score, but where elements are always taken in order without requiring a sorting operation.
-It's not always trivial to grasp how this data types work and what to use in order to solve a given problem from the Redis command reference, so this document is a crash course to Redis data types and their most used patterns.

For all the examples we'll use the redis-cli utility, that's a simple but handy command line utility to issue commands against the Redis server.

Redis keys

Before to start talking about the different kind of values supported by Redis it is better to start saying that keys are not binary safe strings in Redis, but just strings not containing a space or a newline character. For instance "foo" or "123456789" or "foo_bar" are valid keys, while "hello world" or "hello\n" are not.

Actually there is nothing inside the Redis internals preventing the use of binary keys, it's just a matter of protocol, and actually the new protocol introduced with Redis 1.2 (1.2 betas are 1.1.x) in order to implement commands like MSET, is totally binary safe. Still for now consider this as an hard limit as the database is only tested with "normal" keys.

A few other rules about keys:

  • Too long keys are not a good idea, for instance a key of 1024 bytes is not a good idea not only memory-wise, but also because the lookup of the key in the dataset may require several costly key-comparisons.
  • Too short keys are not a good idea. There is no point in writing "u:1000:pwd" as key if you can write instead "user:1000:password", the latter is more readable and the added space is very little compared to the space used by the key object itself.
  • Try to stick with a schema. For instance "object-type:id:field" can be a nice idea, like in "user:1000:password". I like to use dots for multi-words fields, like in "comment:1234:reply.to".
-

The string type

This is the simplest Redis type. If you use only this type, Redis will be something like a memcached server with persistence.

Let's play a bit with the string type:

-$ ./redis-cli set mykey "my binary safe value"
-OK
-$ ./redis-cli get mykey
-my binary safe value
-
As you can see using the Set command and the Get command is trivial to set values to strings and have this strings returned back.

Values can be strings (including binary data) of every kind, for instance you can store a jpeg image inside a key. A value can't be bigger than 1 Gigabyte.

Even if strings are the basic values of Redis, there are interesting operations you can perform against them. For instance one is atomic increment:

-$ ./redis-cli set counter 100
-OK
-$ ./redis-cli incr counter
-(integer) 101
-$ ./redis-cli incr counter
-(integer) 102
-$ ./redis-cli incrby counter 10
-(integer) 112
-
The INCR command parses the string value as an integer, increments it by one, and finally sets the obtained value as the new string value. There are other similar commands like INCRBY, DECR and DECRBY. Actually internally it's always the same command, acting in a slightly different way.

What means that INCR is atomic? That even multiple clients issuing INCR against the same key will never incur into a race condition. For instance it can't never happen that client 1 read "10", client 2 read "10" at the same time, both increment to 11, and set the new value of 11. The final value will always be of 12 ad the read-increment-set operation is performed while all the other clients are not executing a command at the same time.

Another interesting operation on string is the GETSET command, that does just what its name suggests: Set a key to a new value, returning the old value, as result. Why this is useful? Example: you have a system that increments a Redis key using the INCR command every time your web site receives a new visit. You want to collect this information one time every hour, without loosing a single key. You can GETSET the key assigning it the new value of "0" and reading the old value back.

The List type

To explain the List data type it's better to start with a little of theory, as the term List is often used in an improper way by information technology folks. For instance "Python Lists" are not what the name may suggest (Linked Lists), but them are actually Arrays (the same data type is called Array in Ruby actually).

From a very general point of view a List is just a sequence of ordered elements: 10,20,1,2,3 is a list, but when a list of items is implemented using an Array and when instead a Linked List is used for the implementation, the properties change a lot.

Redis lists are implemented via Linked Lists, this means that even if you have million of elements inside a list, the operation of adding a new element in the head or in the tail of the list is performed in constant time. Adding a new element with the LPUSH command to the head of a ten elements list is the same speed as adding an element to the head of a 10 million elements list.

What's the downside? That accessing an element by index is very fast in lists implemented with an Array and not so fast in lists implemented by linked lists.

Redis Lists are implemented with linked lists because for a database system is crucial to be able to add elements to a very long list in a very fast way. Another strong advantage is, as you'll see in a moment, that Redis Lists can be taken at constant length in constant time.

First steps with Redis lists

The LPUSH command add a new element into a list, on the left (on head), while the RPUSH command add a new element into alist, ot the right (on tail). Finally the LRANGE command extract ranges of elements from lists:

-$ ./redis-cli rpush messages "Hello how are you?"
-OK
-$ ./redis-cli rpush messages "Fine thanks. I'm having fun with Redis"
-OK
-$ ./redis-cli rpush messages "I should look into this NOSQL thing ASAP"
-OK
-$ ./redis-cli lrange messages 0 2
-1. Hello how are you?
-2. Fine thanks. I'm having fun with Redis
-3. I should look into this NOSQL thing ASAP
-
Note that LRANGE takes two indexes, the first and the last element of the range to return. Both the indexes can be negative to tell Redis to start to count for the end, so -1 is the last element, -2 is the penultimate element of the list, and so forth.

As you can guess from the example above, lists can be used, for instance, in order to implement a chat system. Another use is as queues in order to route messages between different processes. But the key point is that you can use Redis lists every time you require to access data in the same order they are added. This will not require any SQL ORDER BY operation, will be very fast, and will scale to millions of elements even with a toy Linux box.

For instance in ranking systems like the social news reddit.com you can add every new submitted link into a List, and with LRANGE it's possible to paginate results in a trivial way.

In a blog engine implementation you can have a list for every post, where to push blog comments, and so forth.

Pushing IDs instead of the actual data in Redis lists

In the above example we pushed our "objects" (simply messages in the example) directly inside the Redis list, but this is often not the way to go, as objects can be referenced in multiple times: in a list to preserve their chronological order, in a Set to remember they are about a specific category, in another list but only if this object matches some kind of requisite, and so forth.

Let's return back to the reddit.com example. A more credible pattern for adding submitted links (news) to the list is the following:

-$ ./redis-cli incr next.news.id
-(integer) 1
-$ ./redis-cli set news:1:title "Redis is simple"
-OK
-$ ./redis-cli set news:1:url "http://code.google.com/p/redis"
-OK
-$ ./redis-cli lpush submitted.news 1
-OK
-
We obtained an unique incremental ID for our news object just incrementing a key, then used this ID to create the object setting a key for every field in the object. Finally the ID of the new object was pushed on the submitted.news list.

This is just the start. Check the Command Reference and read about all the other list related commands. You can remove elements, rotate lists, get and set elements by index, and of course retrieve the length of the list with LLEN.

Redis Sets

Redis Sets are unordered collection of binary-safe strings. The SADD command adds a new element to a set. It's also possible to do a number of other operations against sets like testing if a given element already exists, performing the intersection, union or difference between multiple sets and so forth. An example is worth 1000 words:

-$ ./redis-cli sadd myset 1
-(integer) 1
-$ ./redis-cli sadd myset 2
-(integer) 1
-$ ./redis-cli sadd myset 3
-(integer) 1
-$ ./redis-cli smembers myset
-1. 3
-2. 1
-3. 2
-
I added three elements to my set and told Redis to return back all the elements. As you can see they are not sorted.

Now let's check if a given element exists:

-$ ./redis-cli sismember myset 3
-(integer) 1
-$ ./redis-cli sismember myset 30
-(integer) 0
-
"3" is a member of the set, while "30" is not. Sets are very good in order to express relations between objects. For instance we can easily Redis Sets in order to implement tags.

A simple way to model this is to have, for every object you want to tag, a Set with all the IDs of the tags associated with the object, and for every tag that exists, a Set of of all the objects tagged with this tag.

For instance if our news ID 1000 is tagged with tag 1,2,5 and 77, we can specify the following two Sets:

-$ ./redis-cli sadd news:1000:tags 1
-(integer) 1
-$ ./redis-cli sadd news:1000:tags 2
-(integer) 1
-$ ./redis-cli sadd news:1000:tags 5
-(integer) 1
-$ ./redis-cli sadd news:1000:tags 77
-(integer) 1
-$ ./redis-cli sadd tag:1:objects 1000
-(integer) 1
-$ ./redis-cli sadd tag:2:objects 1000
-(integer) 1
-$ ./redis-cli sadd tag:5:objects 1000
-(integer) 1
-$ ./redis-cli sadd tag:77:objects 1000
-(integer) 1
-
To get all the tags for a given object is trivial:

$ ./redis-cli smembers news:1000:tags -1. 5 -2. 1 -3. 77 -4. 2

But there are other non trivial operations that are still easy to implement using the right Redis commands. For instance we may want the list of all the objects having as tags 1, 2, 10, and 27 at the same time. We can do this using the SinterCommand that performs the intersection between different sets. So in order to reach our goal we can just use:

-$ ./redis-cli sinter tag:1:objects tag:2:objects tag:10:objects tag:27:objects
-... no result in our dataset composed of just one object ;) ...
-
Look at the Command Reference to discover other Set related commands, there are a bunch of interesting one. Also make sure to check the SORT command as both Redis Sets and Lists are sortable.

A digression. How to get unique identifiers for strings

In our tags example we showed tag IDs without to mention how this IDs can be obtained. Basically for every tag added to the system, you need an unique identifier. You also want to be sure that there are no race conditions if multiple clients are trying to add the same tag at the same time. Also, if a tag already exists, you want its ID returned, otherwise a new unique ID should be created and associated to the tag.

Redis 1.4 will add the Hash type. With it it will be trivial to associate strings with unique IDs, but how to do this today with the current commands exported by Redis in a reliable way?

Our first attempt (that is broken) can be the following. Let's suppose we want to get an unique ID for the tag "redis":

  • In order to make this algorithm binary safe (they are just tags but think to utf8, spaces and so forth) we start performing the SHA1 sum of the tag. SHA1(redis) = b840fc02d524045429941cc15f59e41cb7be6c52.
  • Let's check if this tag is already associated with an unique ID with the command GET tag:b840fc02d524045429941cc15f59e41cb7be6c52:id.
  • If the above GET returns an ID, return it back to the user. We already have the unique ID.
  • Otherwise... create a new unique ID with INCR next.tag.id (assume it returned 123456).
  • Finally associate this new ID to our tag with SET tag:b840fc02d524045429941cc15f59e41cb7be6c52:id 123456 and return the new ID to the caller.
-Nice. Or better.. broken! What about if two clients perform this commands at the same time trying to get the unique ID for the tag "redis"? If the timing is right they'll both get nil from the GET operation, will both increment the next.tag.id key and will set two times the key. One of the two clients will return the wrong ID to the caller. To fix the algorithm is not hard fortunately, and this is the sane version:

  • In order to make this algorithm binary safe (they are just tags but think to utf8, spaces and so forth) we start performing the SHA1 sum of the tag. SHA1(redis) = b840fc02d524045429941cc15f59e41cb7be6c52.
  • Let's check if this tag is already associated with an unique ID with the command GET tag:b840fc02d524045429941cc15f59e41cb7be6c52:id.
  • If the above GET returns an ID, return it back to the user. We already have the unique ID.
  • Otherwise... create a new unique ID with INCR next.tag.id (assume it returned 123456).
  • Finally associate this new ID to our tag with SETNX tag:b840fc02d524045429941cc15f59e41cb7be6c52:id 123456. By using SETNX if a different client was faster than this one the key wil not be setted. Not only, SETNX returns 1 if the key is set, 0 otherwise. So... let's add a final step to our computation.
  • If SETNX returned 1 (We set the key) return 123456 to the caller, it's our tag ID, otherwise perform GET tag:b840fc02d524045429941cc15f59e41cb7be6c52:id and return the value to the caller.
-

Sorted sets

Sets are a very handy data type, but... they are a bit too unsorted in order to fit well for a number of problems ;) This is why Redis 1.2 introduced Sorted Sets. They are very similar to Sets, collections of binary-safe strings, but this time with an associated score, and an operation similar to the List LRANGE operation to return items in order, but working against Sorted Sets, that is, the ZRANGE command.

Basically Sorted Sets are in some way the Redis equivalent of Indexes in the SQL world. For instance in our reddit.com example above there was no mention about how to generate the actual home page with news raked by user votes and time. We'll see how sorted sets can fix this problem, but it's better to start with something simpler, illustrating the basic working of this advanced data type. Let's add a few selected hackers with their year of birth as "score".

-$ ./redis-cli zadd hackers 1940 "Alan Kay"
-(integer) 1
-$ ./redis-cli zadd hackers 1953 "Richard Stallman"
-(integer) 1
-$ ./redis-cli zadd hackers 1965 "Yukihiro Matsumoto"
-(integer) 1
-$ ./redis-cli zadd hackers 1916 "Claude Shannon"
-(integer) 1
-$ ./redis-cli zadd hackers 1969 "Linus Torvalds"
-(integer) 1
-$ ./redis-cli zadd hackers 1912 "Alan Turing"
-(integer) 1
-
For sorted sets it's a joke to return these hackers sorted by their birth year because actually they are already sorted. Sorted sets are implemented via a dual-ported data structure containing both a skip list and an hash table, so every time we add an element Redis performs an O(log(N)) operation, that's good, but when we ask for sorted elements Redis does not have to do any work at all, it's already all sorted:

-$ ./redis-cli zrange hackers 0 -1
-1. Alan Turing
-2. Claude Shannon
-3. Alan Kay
-4. Richard Stallman
-5. Yukihiro Matsumoto
-6. Linus Torvalds
-
Didn't know that Linus was younger than Yukihiro btw ;)

Anyway I want to order this elements the other way around, using ZrangeCommand instead of ZrangeCommand this time:

-$ ./redis-cli zrevrange hackers 0 -1
-1. Linus Torvalds
-2. Yukihiro Matsumoto
-3. Richard Stallman
-4. Alan Kay
-5. Claude Shannon
-6. Alan Turing
-
A very important note, ZSets have just a "default" ordering but you are still free to call the SORT command against sorted sets to get a different ordering (but this time the server will waste CPU). An alternative for having multiple orders is to add every element in multiple sorted sets at the same time.

Operating on ranges

Sorted sets are more powerful than this. They can operate on ranges. For instance let's try to get all the individuals that born up to the 1950. We use the ZRANGEBYSCORE command to do it:

-$ ./redis-cli zrangebyscore hackers -inf 1950
-1. Alan Turing
-2. Claude Shannon
-3. Alan Kay
-
We asked Redis to return all the elements with a score between negative infinite and 1950 (both extremes are included).

It's also possible to remove ranges of elements. For instance let's remove all the hackers born between 1940 and 1960 from the sorted set:

-$ ./redis-cli zremrangebyscore hackers 1940 1960
-(integer) 2
-
ZREMRANGEBYSCORE is not the best command name, but it can be very useful, and returns the number of removed elements.

Back to the reddit example

For the last time, back to the Reddit example. Now we have a decent plan to populate a sorted set in order to generate the home page. A sorted set can contain all the news that are not older than a few days (we remove old entries from time to time using ZREMRANGEBYSCORE). A background job gets all the elements from this sorted set, get the user votes and the time of the news, and compute the score to populate the reddit.home.page sorted set with the news IDs and associated scores. To show the home page we have just to perform a blazingly fast call to ZRANGE.

From time to time we'll remove too old news from the reddit.home.page sorted set as well in order for our system to work always against a limited set of news.

Updating the scores of a sorted set

Just a final note before to finish this tutorial. Sorted sets scores can be updated at any time. Just calling again ZADD against an element already included in the sorted set will update its score (and position) in O(log(N)), so sorted sets are suitable even when there are tons of updates.

This tutorial is in no way complete, this is just the basics to get started with Redis, read the Command Reference to discover a lot more.

Thanks for reading. Salvatore. - -
- -
-
- - - diff --git a/doc/KeysCommand.html b/doc/KeysCommand.html deleted file mode 100644 index f1a6e070..00000000 --- a/doc/KeysCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -KeysCommand: Contents
  KEYS _pattern_
    Return value -
- -

KeysCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

KEYS _pattern_

-Time complexity: O(n) (with n being the number of keys in the DB, and assuming keys and pattern of limited length)
Returns all the keys matching the glob-style pattern asspace separated strings. For example if you have in thedatabase the keys "foo" and "foobar" the command "KEYS foo*"will return "foo foobar".
-
Note that while the time complexity for this operation is O(n)the constant times are pretty low. For example Redis runningon an entry level laptop can scan a 1 million keys databasein 40 milliseconds. Still it's better to consider this one of -
the slow commands that may ruin the DB performance if not usedwith care*.
-
In other words this command is intended only for debugging and *special* operations like creating a script to change the DB schema. Don't use it in your normal code. Use Redis Sets in order to group together a subset of objects.
-Glob style patterns examples: -
* h?llo will match hello hallo hhllo* h*llo will match hllo heeeello* h[ae]llo will match hello and hallo, but not hillo
Use \ to escape special chars if you want to match them verbatim.

Return value

Bulk reply, specifically a string in the form of space separated list of keys. Note that most client libraries will return an Array of keys and not a single string with space separated keys (that is, split by " " is performed in the client library usually).
-
- -
-
- - - diff --git a/doc/LastsaveCommand.html b/doc/LastsaveCommand.html deleted file mode 100644 index 67b1f66f..00000000 --- a/doc/LastsaveCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -LastsaveCommand: Contents
  LASTSAVE
    Return value -
- -

LastsaveCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

LASTSAVE

-
Return the UNIX TIME of the last DB save executed with success.A client may check if a BGSAVE command succeeded reading the LASTSAVEvalue, then issuing a BGSAVE command and checking at regular intervalsevery N seconds if LASTSAVE changed.
-

Return value

Integer reply, specifically an UNIX time stamp. - -
- -
-
- - - diff --git a/doc/LindexCommand.html b/doc/LindexCommand.html deleted file mode 100644 index 4af80530..00000000 --- a/doc/LindexCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -LindexCommand: Contents
  LINDEX _key_ _index_
    Return value -
- -

LindexCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LINDEX _key_ _index_

-Time complexity: O(n) (with n being the length of the list)
Return the specified element of the list stored at the specifiedkey. 0 is the first element, 1 the second and so on. Negative indexesare supported, for example -1 is the last element, -2 the penultimateand so on.
-
If the value stored at key is not of list type an error is returned.If the index is out of range an empty string is returned.
-
Note that even if the average time complexity is O(n) asking forthe first or the last element of the list is O(1).
-

Return value

Bulk reply, specifically the requested element. - -
- -
-
- - - diff --git a/doc/ListCommandsSidebar.html b/doc/ListCommandsSidebar.html deleted file mode 100644 index ae827923..00000000 --- a/doc/ListCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -ListCommandsSidebar: Contents -
- -

ListCommandsSidebar

- -
- -
- - - -
-
- - - diff --git a/doc/Lists.html b/doc/Lists.html deleted file mode 100644 index 5f71937f..00000000 --- a/doc/Lists.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -Lists: Contents
  Redis List Type
  Implementation details -
- -

Lists

- -
- -
- -
- #sidebar ListCommandsSidebar

Redis List Type

Redis Lists are lists of Redis Strings, sorted by insertion order. It's possible to add elements to a Redis List pushing new elements on the head (on the left) or on the tail (on the right) of the list.

The LPUSH command inserts a new elmenet on head, while RPUSH inserts a new element on tail. A new list is created when one of this operations is performed against an empty key.

For instance if perform the following operations: -
-LPUSH mylist a   # now the list is "a"
-LPUSH mylist b   # now the list is "b","a"
-RPUSH mylist c   # now the list is "b","a","c" (RPUSH was used this time)
-
-The resulting list stored at mylist will contain the elements "b","a","c".

The max length of a list is 232-1 elements (4294967295, more than 4 billion of elements per list).

Implementation details

Redis Lists are implemented as doubly liked lists. A few commands benefit from the fact the lists are doubly linked in order to reach the needed element starting from the nearest extreme (head or tail). LRANGE and LINDEX are examples of such commands.

The use of linked lists also guarantees that regardless of the length of the list pushing and popping are O(1) operations.

Redis Lists cache length information so LLEN is O(1) as well. -
- -
-
- - - diff --git a/doc/LlenCommand.html b/doc/LlenCommand.html deleted file mode 100644 index 07072a42..00000000 --- a/doc/LlenCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -LlenCommand: Contents
  LLEN _key_
    Return value -
- -

LlenCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LLEN _key_

-Time complexity: O(1)
Return the length of the list stored at the specified key. If thekey does not exist zero is returned (the same behaviour as forempty lists). If the value stored at key is not a list an error is returned.
-

Return value

Integer reply, specifically:

-The length of the list.
-
- -
- -
-
- - - diff --git a/doc/LpopCommand.html b/doc/LpopCommand.html deleted file mode 100644 index 58dffb93..00000000 --- a/doc/LpopCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -LpopCommand: Contents
  LPOP _key_
  RPOP _key_
    Return value -
- -

LpopCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LPOP _key_

-

RPOP _key_

-Time complexity: O(1)
Atomically return and remove the first (LPOP) or last (RPOP) elementof the list. For example if the list contains the elements "a","b","c" LPOPwill return "a" and the list will become "b","c".
-
If the key does not exist or the list is already empty the specialvalue 'nil' is returned.
-

Return value

Bulk reply - -
- -
-
- - - diff --git a/doc/LrangeCommand.html b/doc/LrangeCommand.html deleted file mode 100644 index 28dbf890..00000000 --- a/doc/LrangeCommand.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - -
- - - -
- - -

LrangeCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LRANGE _key_ _start_ _end_

-Time complexity: O(start+n) (with n being the length of the range and start being the start offset)Return the specified elements of the list stored at the specified -key. Start and end are zero-based indexes. 0 is the first element -of the list (the list head), 1 the next element and so on.

For example LRANGE foobar 0 2 will return the first three elements -of the list.

start and end can also be negative numbers indicating offsets -from the end of the list. For example -1 is the last element of -the list, -2 the penultimate element and so on.

Consistency with range functions in various programming languages

Note that if you have a list of numbers from 0 to 100, LRANGE 0 10 will return -11 elements, that is, rightmost item is included. This may or may not be consistent with -behavior of range-related functions in your programming language of choice (think Ruby's Range.new, Array#slice or Python's range() function).

LRANGE behavior is consistent with one of Tcl.

Out-of-range indexes

Indexes out of range will not produce an error: if start is over -the end of the list, or start > end, an empty list is returned. -If end is over the end of the list Redis will threat it just like -the last element of the list.

Return value

Multi bulk reply, specifically a list of elements in the specified range. -
- -
-
- - - diff --git a/doc/LremCommand.html b/doc/LremCommand.html deleted file mode 100644 index 142160f0..00000000 --- a/doc/LremCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -LremCommand: Contents
  LREM _key_ _count_ _value_
    Return value -
- -

LremCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LREM _key_ _count_ _value_

-Time complexity: O(N) (with N being the length of the list)
Remove the first count occurrences of the value element from the list.If count is zero all the elements are removed. If count is negativeelements are removed from tail to head, instead to go from head to tailthat is the normal behaviour. So for example LREM with count -2 and_hello_ as value to remove against the list (a,b,c,hello,x,hello,hello) willlave the list (a,b,c,hello,x). The number of removed elements is returnedas an integer, see below for more information about the returned value.Note that non existing keys are considered like empty lists by LREM, so LREMagainst non existing keys will always return 0.
-

Return value

Integer Reply, specifically:

-The number of removed elements if the operation succeeded
-
- -
- -
-
- - - diff --git a/doc/LsetCommand.html b/doc/LsetCommand.html deleted file mode 100644 index 5b833650..00000000 --- a/doc/LsetCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -LsetCommand: Contents
  LSET _key_ _index_ _value_
    Return value -
- -

LsetCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LSET _key_ _index_ _value_

-Time complexity: O(N) (with N being the length of the list)
Set the list element at index (see LINDEX for information about the_index_ argument) with the new value. Out of range indexes willgenerate an error. Note that setting the first or last elements ofthe list is O(1).
-
Similarly to other list commands accepting indexes, the index can be negative to access elements starting from the end of the list. So -1 is the last element, -2 is the penultimate, and so forth.

Return value

Status code reply -
- -
-
- - - diff --git a/doc/LtrimCommand.html b/doc/LtrimCommand.html deleted file mode 100644 index 08d3231a..00000000 --- a/doc/LtrimCommand.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - -
- - - -
-
- -LtrimCommand: Contents
  LTRIM _key_ _start_ _end_
    Return value -
- -

LtrimCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

LTRIM _key_ _start_ _end_

-Time complexity: O(n) (with n being len of list - len of range)
Trim an existing list so that it will contain only the specifiedrange of elements specified. Start and end are zero-based indexes.0 is the first element of the list (the list head), 1 the next elementand so on.
-
For example LTRIM foobar 0 2 will modify the list stored at foobarkey so that only the first three elements of the list will remain.
-
_start_ and end can also be negative numbers indicating offsetsfrom the end of the list. For example -1 is the last element ofthe list, -2 the penultimate element and so on.
-
Indexes out of range will not produce an error: if start is overthe end of the list, or start > end, an empty list is left as value.If end over the end of the list Redis will threat it just likethe last element of the list.
-
Hint: the obvious use of LTRIM is together with LPUSH/RPUSH. For example:
-
-        LPUSH mylist <someelement>
-        LTRIM mylist 0 99
-
The above two commands will push elements in the list taking care thatthe list will not grow without limits. This is very useful when usingRedis to store logs for example. It is important to note that when usedin this way LTRIM is an O(1) operation because in the average casejust one element is removed from the tail of the list.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/MgetCommand.html b/doc/MgetCommand.html deleted file mode 100644 index 0c716907..00000000 --- a/doc/MgetCommand.html +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - -
- - - -
-
- -MgetCommand: Contents
  MGET _key1_ _key2_ ... _keyN_
    Return value
    Example -
- -

MgetCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

MGET _key1_ _key2_ ... _keyN_

-Time complexity: O(1) for every key
Get the values of all the specified keys. If one or more keys dont existor is not of type String, a 'nil' value is returned instead of the valueof the specified key, but the operation never fails.
-

Return value

Multi bulk reply

Example

-$ ./redis-cli set foo 1000
-+OK
-$ ./redis-cli set bar 2000
-+OK
-$ ./redis-cli mget foo bar
-1. 1000
-2. 2000
-$ ./redis-cli mget foo bar nokey
-1. 1000
-2. 2000
-3. (nil)
-$ 
-
- -
- -
-
- - - diff --git a/doc/MonitorCommand.html b/doc/MonitorCommand.html deleted file mode 100644 index 1abe72b6..00000000 --- a/doc/MonitorCommand.html +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - -
- - - -
-
- -MonitorCommand: Contents
  MONITOR
    Return value -
- -

MonitorCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

MONITOR

MONITOR is a debugging command that outputs the whole sequence of commandsreceived by the Redis server. is very handy in order to understandwhat is happening into the database. This command is used directlyvia telnet.
-
-% telnet 127.0.0.1 6379
-Trying 127.0.0.1...
-Connected to segnalo-local.com.
-Escape character is '^]'.
-MONITOR
-+OK
-monitor
-keys *
-dbsize
-set x 6
-foobar
-get x
-del x
-get x
-set key_x 5
-hello
-set key_y 5
-hello
-set key_z 5
-hello
-set foo_a 5
-hello
-
The ability to see all the requests processed by the server is useful in orderto spot bugs in the application both when using Redis as a database and asa distributed caching system.
-
In order to end a monitoring session just issue a QUIT command by hand.
-

Return value

Non standard return value, just dumps the received commands in an infinite flow. - -
- -
-
- - - diff --git a/doc/MoveCommand.html b/doc/MoveCommand.html deleted file mode 100644 index 3bab67d0..00000000 --- a/doc/MoveCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -MoveCommand: Contents
  MOVE _key_ _dbindex_
    Return value -
- -

MoveCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

MOVE _key_ _dbindex_

-
Move the specified key from the currently selected DB to the specifieddestination DB. Note that this command returns 1 only if the key wassuccessfully moved, and 0 if the target key was already there or if thesource key was not found at all, so it is possible to use MOVE as a lockingprimitive.
-

Return value

Integer reply, specifically:

-1 if the key was moved
-0 if the key was not moved because already present on the target DB or was not found in the current DB.
-
- -
- -
-
- - - diff --git a/doc/MsetCommand.html b/doc/MsetCommand.html deleted file mode 100644 index 2b6bff0f..00000000 --- a/doc/MsetCommand.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - -
- - - -
- - -

MsetCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

MSET _key1_ _value1_ _key2_ _value2_ ... _keyN_ _valueN_ (Redis >

1.1) = -

MSETNX _key1_ _value1_ _key2_ _value2_ ... _keyN_ _valueN_ (Redis >

1.1) = -Time complexity: O(1) to set every key
Set the the respective keys to the respective values. MSET will replace oldvalues with new values, while MSETNX will not perform any operation at alleven if just a single key already exists.
-
Because of this semantic MSETNX can be used in order to set different keysrepresenting different fields of an unique logic object in a way thatensures that either all the fields or none at all are set.
-
Both MSET and MSETNX are atomic operations. This means that for instanceif the keys A and B are modified, another client talking to Redis can eithersee the changes to both A and B at once, or no modification at all.
-

MSET Return value

Status code reply Basically +OK as MSET can't fail

MSETNX Return value

Integer reply, specifically:

-1 if the all the keys were set
-0 if no key was set (at least one key already existed)
-
-
- -
-
- - - diff --git a/doc/ObjectHashMappers.html b/doc/ObjectHashMappers.html deleted file mode 100644 index e66a7e42..00000000 --- a/doc/ObjectHashMappers.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -ObjectHashMappers: Contents
  Object Hash Mappers
    Ruby
      Ohm
      dm-redis-adapter
      redis-models -
- -

ObjectHashMappers

- -
- -
- -
- -

Object Hash Mappers

Looking for a higher level if abstraction for your Objects, their Properties and Relationships?

There is not need to stick to the client libraries exposing the raw features of Redis, here you will find a list of Object Hash Mappers, working in the same fashion a ORM does.

Ruby

Ohm

-

dm-redis-adapter

-

redis-models

  • Minimal model support for Redis. Directly maps Ruby properties to model_name:id:field_name keys in redis. Scalar, List and Set properties are supported. Values can be marshaled to/from Integer, Float, DateTime, JSON.
  • Repository: http://github.com/voloko/redis-model
-
- -
-
- - - diff --git a/doc/Pipelining.html b/doc/Pipelining.html deleted file mode 100644 index c3f3b4b8..00000000 --- a/doc/Pipelining.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -Pipelining: Contents
  Pipelining (DRAFT) -
- -

Pipelining

- -
- -
- -
-

Pipelining (DRAFT)

A client library can use the same connection in order to issue multiple commands. But Redis supports pipelining, so multiple commands can be sent to the server with a single write operation by the client, without need to read the server reply in order to issue the next command. All the replies can be read at the end.

Usually Redis server and client will have a very fast link so this is not very important to support this feature in a client implementation, still if an application needs to issue a very large number of commands in s short time, using pipelining can be much faster.

Please read the ProtocolSpecification if you want to learn more about the way Redis clients and the server communicate.

Pipelining is one of the Speed Features of Redis, you can also check the support for send and receive multiple values in a single command. -
- -
-
- - - diff --git a/doc/ProgrammingExamples.html b/doc/ProgrammingExamples.html deleted file mode 100644 index 9dc04b8e..00000000 --- a/doc/ProgrammingExamples.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -ProgrammingExamples: Contents
  Programming Examples (DRAFT)
    TODO
    Java
      Twayis
    PHP
      Retwis
    Ruby
      twatcher-lite
      Resque
      Retwis-rb
      scanty-redis
      Note Taking -
- -

ProgrammingExamples

- -
- -
- -
- -

Programming Examples (DRAFT)

TODO

-Nothing speaks better than code examples, here you are:

Java

Twayis



A Java clone of Retwis showcase integration between the Play! framework and Redis Google Code Project Page

PHP

Retwis

A PHP Twitter clone, the original example of Redis capabilities. With a live demo, and an article explaining it design. You can find the code in the Downloads tab.

Ruby

twatcher-lite

A simplied version of the application running http://twatcher.com/ from Mirko Froehlich (@digitalhobbit) with a full blog post explaining its development at Building a Twitter Filter With Sinatra, Redis, and TweetStream

Resque

The "simple" Redis-based queue behind Github background jobs, that replaced SQS, Starling, ActiveMessaging, BackgroundJob, DelayedJob, and Beanstalkd. Developed by Chris Wanstrath (@defunkt) the code is at http://github.com/defunkt/resque, be sure to read the introduction

Retwis-rb

A port of Retwis to Ruby and Sinatra written by Daniel Lucraft (@DanLucraft) Full source code is available at http://github.com/danlucraft/retwis-rb

scanty-redis

Scanty is minimal blogging software developed by Adam Wiggins (@hirodusk) It is not a blogging engine, but it’s small and easy to modify, so it could be the starting point for your blog. This fork is modified to use Redis, a full featured key-value database, instead of SQL.

Note Taking

A very simple note taking example of Ruby and Redis application using Sinatra. Developed by Pieter Noordhuis @pnoordhuis, you can check the code at http://gist.github.com/86714 -
- -
-
- - - diff --git a/doc/ProtocolSpecification.html b/doc/ProtocolSpecification.html deleted file mode 100644 index 686c574e..00000000 --- a/doc/ProtocolSpecification.html +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - -
- - - -
- - -

ProtocolSpecification

- -
- -
- -
- = Protocol Specification =

The Redis protocol is a compromise between being easy to parse by a computer -and being easy to parse by an human. Before reading this section you are -strongly encouraged to read the "REDIS TUTORIAL" section of this README in order -to get a first feeling of the protocol playing with it by TELNET.

Networking layer

A client connects to a Redis server creating a TCP connection to the port 6379. -Every redis command or data transmitted by the client and the server is -terminated by "\r\n" (CRLF).

Simple INLINE commands

The simplest commands are the inline commands. This is an example of a -server/client chat (the server chat starts with S:, the client chat with C:)

-C: PING
-S: +PONG
-
An inline command is a CRLF-terminated string sent to the client. The server can reply to commands in different ways: -
  • With an error message (the first byte of the reply will be "-")
  • With a single line reply (the first byte of the reply will be "+)
  • With bulk data (the first byte of the reply will be "$")
  • With multi-bulk data, a list of values (the first byte of the reply will be "*")
  • With an integer number (the first byte of the reply will be ":")
-The following is another example of an INLINE command returning an integer:

-C: EXISTS somekey
-S: :0
-
Since 'somekey' does not exist the server returned ':0'.

Note that the EXISTS command takes one argument. Arguments are separated -simply by spaces.

Bulk commands

A bulk command is exactly like an inline command, but the last argument -of the command must be a stream of bytes in order to send data to the server. -the "SET" command is a bulk command, see the following example:

-C: SET mykey 6
-C: foobar
-S: +OK
-
The last argument of the commnad is '6'. This specify the number of DATA -bytes that will follow (note that even this bytes are terminated by two -additional bytes of CRLF).

All the bulk commands are in this exact form: instead of the last argument -the number of bytes that will follow is specified, followed by the bytes, -and CRLF. In order to be more clear for the programmer this is the string -sent by the client in the above sample:

"SET mykey 6\r\nfoobar\r\n"
-

Bulk replies

The server may reply to an inline or bulk command with a bulk reply. See -the following example:

-C: GET mykey
-S: $6
-S: foobar
-
A bulk reply is very similar to the last argument of a bulk command. The -server sends as the first line a "$" byte followed by the number of bytes -of the actual reply followed by CRLF, then the bytes are sent followed by -additional two bytes for the final CRLF. The exact sequence sent by the -server is:

"$6\r\nfoobar\r\n"
-If the requested value does not exist the bulk reply will use the special -value -1 as data length, example:

-C: GET nonexistingkey
-S: $-1
-
The client library API should not return an empty string, but a nil object, when the requested object does not exist. -For example a Ruby library should return 'nil' while a C library should return -NULL, and so forth.

Multi-Bulk replies

Commands similar to LRANGE needs to return multiple values (every element -of the list is a value, and LRANGE needs to return more than a single element). This is accomplished using multiple bulk writes, -prefixed by an initial line indicating how many bulk writes will follow. -The first byte of a multi bulk reply is always *. Example:

-C: LRANGE mylist 0 3
-S: *4
-S: $3
-S: foo
-S: $3
-S: bar
-S: $5
-S: Hello
-S: $5
-S: World
-
The first line the server sent is "4\r\n" in order to specify that four bulk -write will follow. Then every bulk write is transmitted.

If the specified key does not exist instead of the number of elements in the -list, the special value -1 is sent as count. Example:

-C: LRANGE nokey 0 1
-S: *-1
-
A client library API SHOULD return a nil object and not an empty list when this -happens. This makes possible to distinguish between empty list and non existing ones.

Nil elements in Multi-Bulk replies

Single elements of a multi bulk reply may have -1 length, in order to signal that this elements are missing and not empty strings. This can happen with the SORT command when used with the GET pattern option when the specified key is missing. Example of a multi bulk reply containing an empty element:

-S: *3
-S: $3
-S: foo
-S: $-1
-S: $3
-S: bar
-
The second element is nul. The client library should return something like this:

-["foo",nil,"bar"]
-

Single line reply

As already seen a single line reply is in the form of a single line string -starting with "+" terminated by "\r\n". For example:

-+OK
-
The client library should return everything after the "+", that is, the string "OK" in the example.

The following commands reply with a status code reply: -PING, SET, SELECT, SAVE, BGSAVE, SHUTDOWN, RENAME, LPUSH, RPUSH, LSET, LTRIM

Integer reply

This type of reply is just a CRLF terminated string representing an integer, prefixed by a ":" byte. For example ":0\r\n", or ":1000\r\n" are integer replies.

With commands like INCR or LASTSAVE using the integer reply to actually return a value there is no special meaning for the returned integer. It is just an incremental number for INCR, a UNIX time for LASTSAVE and so on.

Some commands like EXISTS will return 1 for true and 0 for false.

Other commands like SADD, SREM and SETNX will return 1 if the operation was actually done, 0 otherwise.

The following commands will reply with an integer reply: SETNX, DEL, EXISTS, INCR, INCRBY, DECR, DECRBY, DBSIZE, LASTSAVE, RENAMENX, MOVE, LLEN, SADD, SREM, SISMEMBER, SCARD

Multi bulk commands

As you can see with the protocol described so far there is no way to -send multiple binary-safe arguments to a command. With bulk commands the -last argument is binary safe, but there are commands where multiple binary-safe -commands are needed, like the MSET command that is able to SET multiple keys -in a single operation.

In order to address this problem Redis 1.1 introduced a new way of seding -commands to a Redis server, that uses exactly the same protocol of the -multi bulk replies. For instance the following is a SET command using the -normal bulk protocol:

-SET mykey 8
-myvalue
-
While the following uses the multi bulk command protocol:

-*3
-$3
-SET
-$5
-mykey
-$8
-myvalue
-
Commands sent in this format are longer, so currently they are used only in -order to transmit commands containing multiple binary-safe arguments, but -actually this protocol can be used to send every kind of command, without to -know if it's an inline, bulk or multi-bulk command.

It is possible that in the future Redis will support only this format.

A good client library may implement unknown commands using this -command format in order to support new commands out of the box without -modifications.

Multiple commands and pipelining

A client can use the same connection in order to issue multiple commands. -Pipelining is supported so multiple commands can be sent with a single -write operation by the client, it is not needed to read the server reply -in order to issue the next command. All the replies can be read at the end.

Usually Redis server and client will have a very fast link so this is not -very important to support this feature in a client implementation, still -if an application needs to issue a very large number of commands in short -time to use pipelining can be much faster. -
-
- -
-
- - - diff --git a/doc/QuickStart.html b/doc/QuickStart.html deleted file mode 100644 index c233a1b5..00000000 --- a/doc/QuickStart.html +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - -
- - - -
-
- -QuickStart: Contents
  Quick Start
    Obtain the latest version
    Compile
    Run the server
    Play with the built in client
    Further reading -
- -

QuickStart

- -
- -
- -
- #sidebar RedisGuides -

Quick Start

This quickstart is a five minutes howto on how to get started with Redis. For more information on Redis check Redis Documentation Index.

Obtain the latest version

The latest stable source distribution of Redis can be obtained at this location as a tarball.

-$ wget http://redis.googlecode.com/files/redis-1.02.tar.gz
-
The unstable source code, with more features but not ready for production, can be downloaded using git:

-$ git clone git://github.com/antirez/redis.git
-

Compile

Redis can be compiled in most POSIX systems. To compile Redis just untar the tar.gz, enter the directly and type 'make'.

-$ tar xvzf redis-1.02.tar.gz
-$ cd redis-1.02
-$ make
-
In order to test if the Redis server is working well in your computer make sure to run make test and check that all the tests are passed.

Run the server

Redis can run just fine without a configuration file (when executed without a config file a standard configuration is used). To run Redis just type the following command:

-$ ./redis-server
-
With the default configuration Redis will log to the standard output so you can check what happens. Later, you can change the default settings.

Play with the built in client

Redis ships with a command line client that is automatically compiled when you ran make and it is called redis-cliFor instance to set a key and read back the value use the following:

-$ ./redis-cli set mykey somevalue
-OK
-$ ./redis-cli get mykey
-somevalue
-
What about adding elements to a list:

-$ ./redis-cli lpush mylist firstvalue
-OK
-$ ./redis-cli lpush mylist secondvalue
-OK
-$ ./redis-cli lpush mylist thirdvalue
-OK
-$ ./redis-cli lrange mylist 0 -1
-1. thirdvalue
-2. secondvalue
-3. firstvalue
-$ ./redis-cli rpop mylist
-firstvalue
-$ ./redis-cli lrange mylist 0 -1
-1. thirdvalue
-2. secondvalue
-

Further reading

-
- -
-
- - - diff --git a/doc/QuitCommand.html b/doc/QuitCommand.html deleted file mode 100644 index 95eb25b6..00000000 --- a/doc/QuitCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -QuitCommand: Contents
  Quit
    Return value -
- -

QuitCommand

- -
- -
- -
- #sidebar ConnectionHandlingSidebar

Quit

Ask the server to silently close the connection.
-

Return value

None. The connection is closed as soon as the QUIT command is received. - -
- -
-
- - - diff --git a/doc/README.html b/doc/README.html deleted file mode 100644 index f70fe83f..00000000 --- a/doc/README.html +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - -
- - - -
- - -

README

- -
- -
- -
- = Introduction =

Redis is a database. To be specific, Redis is a database implementing a dictionary, where every key is associated with a value. For example I can set the key "surname_1992" to the string "Smith". -What makes Redis different from many other key-value stores, is that every single value has a type. The following types are supported:

-The type of a value determines what operations (called commands) are available for the value itself. -For example you can append elements to a list stored at the key "mylist" using the LPUSH or RPUSH command in O(1). Later you'll be able to get a range of elements with LRANGE or trim the list with LTRIM. Sets are very flexible too, it is possible to add and remove elements from Sets (unsorted collections of strings), and then ask for server-side intersection, union, difference of Sets. Each command is performed through server-side atomic operations. -Please refer to the Command Reference to see the full list of operations associated to these data types.

In other words, you can look at Redis as a data structures server. A Redis user is virtually provided with an interface to Abstract Data Types, saving her from the responsibility to implement concrete data structures and algorithms. Indeed both algorithms and data structures in Redis are properly choosed in order to obtain the best performance.

All data in memory, but saved on disk

Redis loads and mantains the whole dataset into memory, but the dataset is persistent, since at the same time it is saved on disk, so that when the server is restarted data can be loaded back in memory.

There are two kind of persistence supported: the first one is called snapshotting. In this mode Redis, from time to time, writes a dump on disk asynchronously. The dataset is loaded from the dump every time the server is (re)started.

Redis can be configured to save the dataset when a certain number of changes is reached and after a given number of seconds elapses. For example, you can configure Redis to save after 1000 changes and at most 60 seconds since the last save. You can specify any combination for these numbers.

Because data is written asynchronously, when a system crash occurs, the last few queries can get lost (that is acceptable in many applications but not in all). In order to make this a non issue Redis supports another, safer persistence mode, called Append Only File, where every command received altering the dataset (so not a read-only command, but a write command) is written on an append only file ASAP. This commands are replayed when the server is restarted in order to rebuild the dataset in memory.

Redis Append Only File supports a very handy feature: the server is able to safely rebuild the append only file in background in a non-blocking fashion when it gets too long. You can find more details in the Append Only File HOWTO.

Master-Slave replication made trivial

Whatever will be the persistence mode you'll use Redis supports master-slave replications if you want to stay really safe or if you need to scale to huge amounts of reads.

Redis Replication is trivial to setup. So trivial that all you need to do in order to configure a Redis server to be a slave of another one, with automatic synchronization if the link will go down and so forth, is the following config line: slaveof 192.168.1.100 6379. We provide a Replication Howto if you want to know more about this feature.

It's persistent but supports expires

Redis can be used as a memcached on steroids because is as fast as memcached but with a number of features more. Like memcached, Redis also supports setting timeouts to keys so that this key will be automatically removed when a given amount of time passes.

Beyond key-value databases

All these features allow to use Redis as the sole DB for your scalable application without the need of any relational database. We wrote a simple Twitter clone in PHP + Redis to show a real world example, the link points to an article explaining the design and internals in very simple words.

Multiple databases support

Redis supports multiple databases with commands to atomically move keys from one database to the other. By default DB 0 is selected for every new connection, but using the SELECT command it is possible to select a different database. The MOVE operation can move an item from one DB to another atomically. This can be used as a base for locking free algorithms together with the 'RANDOMKEY' commands.

Know more about Redis!

To really get a feeling about what Redis is and how it works please try reading A fifteen minutes introduction to Redis data types.

To know a bit more about how Redis works internally continue reading.

Redis Tutorial

(note, you can skip this section if you are only interested in "formal" doc.)

Later in this document you can find detailed information about Redis commands, -the protocol specification, and so on. This kind of documentation is useful -but... if you are new to Redis it is also BORING! The Redis protocol is designed -so that is both pretty efficient to be parsed by computers, but simple enough -to be used by humans just poking around with the 'telnet' command, so this -section will show to the reader how to play a bit with Redis to get an initial -feeling about it, and how it works.

To start just compile redis with 'make' and start it with './redis-server'. -The server will start and log stuff on the standard output, if you want -it to log more edit redis.conf, set the loglevel to debug, and restart it.

You can specify a configuration file as unique parameter:

./redis-server /etc/redis.conf
-This is NOT required. The server will start even without a configuration file -using a default built-in configuration.

Now let's try to set a key to a given value:

-$ telnet localhost 6379
-Trying 127.0.0.1...
-Connected to localhost.
-Escape character is '^]'.
-SET foo 3  
-bar
-+OK
-
The first line we sent to the server is "set foo 3". This means "set the key -foo with the following three bytes I'll send you". The following line is -the "bar" string, that is, the three bytes. So the effect is to set the -key "foo" to the value "bar". Very simple!

(note that you can send commands in lowercase and it will work anyway, -commands are not case sensitive)

Note that after the first and the second line we sent to the server there -is a newline at the end. The server expects commands terminated by "\r\n" -and sequence of bytes terminated by "\r\n". This is a minimal overhead from -the point of view of both the server and client but allows us to play with -Redis with the telnet command easily.

The last line of the chat between server and client is "+OK". This means -our key was added without problems. Actually SET can never fail but -the "+OK" sent lets us know that the server received everything and -the command was actually executed.

Let's try to get the key content now:

-GET foo
-$3
-bar
-
Ok that's very similar to 'set', just the other way around. We sent "get foo", -the server replied with a first line that is just the $ character follwed by -the number of bytes the value stored at key contained, followed by the actual -bytes. Again "\r\n" are appended both to the bytes count and the actual data. In Redis slang this is called a bulk reply.

What about requesting a non existing key?

-GET blabla
-$-1
-
When the key does not exist instead of the length, just the "$-1" string is sent. Since a -1 length of a bulk reply has no meaning it is used in order to specifiy a 'nil' value and distinguish it from a zero length value. Another way to check if a given key exists or not is indeed the EXISTS command:

-EXISTS nokey
-:0
-EXISTS foo
-:1
-
As you can see the server replied ':0' the first time since 'nokey' does not -exist, and ':1' for 'foo', a key that actually exists. Replies starting with the colon character are integer reply.

Ok... now you know the basics, read the REDIS COMMAND REFERENCE section to -learn all the commands supported by Redis and the PROTOCOL SPECIFICATION -section for more details about the protocol used if you plan to implement one -for a language missing a decent client implementation.

License

Redis is released under the BSD license. See the COPYING file for more information.

Credits

Redis is written and maintained by Salvatore Sanfilippo, Aka 'antirez'. -
- -
-
- - - diff --git a/doc/RandomkeyCommand.html b/doc/RandomkeyCommand.html deleted file mode 100644 index ac8e8cd4..00000000 --- a/doc/RandomkeyCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -RandomkeyCommand: Contents
  RANDOMKEY
    Return value -
- -

RandomkeyCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

RANDOMKEY

-Time complexity: O(1)
Return a randomly selected key from the currently selected DB.
-

Return value

Singe line reply, specifically the randomly selected key or an empty string is the database is empty. - -
- -
-
- - - diff --git a/doc/Redis0100ChangeLog.html b/doc/Redis0100ChangeLog.html deleted file mode 100644 index 26b32ac2..00000000 --- a/doc/Redis0100ChangeLog.html +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - -
- - - -
-
- -Redis0100ChangeLog: Contents
  Redis 0.100 Changelog -
- -

Redis0100ChangeLog

- -
- -
- -
-

Redis 0.100 Changelog

-
-- SUNION, SDIFF, SUNIONSTORE, SDIFFSTORE commands implemented. (Aman Gupta, antirez)
-- Non blocking replication. Now while N slaves are synchronizing, the master will continue to ask to client queries. (antirez)
-- PHP client ported to PHP5 (antirez)
-- FLUSHALL/FLUSHDB no longer sync on disk. Just increment the dirty counter by the number of elements removed, that will probably trigger a background saving operation (antirez)
-- INCRBY/DECRBY now support 64bit increments, with tests (antirez)
-- New fields in INFO command, bgsave_in_progress and replication related (antirez)
-- Ability to specify a different file name for the DB (... can't remember ...)
-- GETSET command, atomic GET + SET (antirez)
-- SMOVE command implemented, atomic move-element across sets operation (antirez)
-- Ability to work with huge data sets, tested up to 350 million keys (antirez)
-- Warns if /proc/sys/vm/overcommit_memory is set to 0 on Linux. Also make sure to don't resize the hash tables while the child process is saving in order to avoid copy-on-write of memory pages (antirez)
-- Infinite number of arguments for MGET and all the other commands (antirez)
-- CPP client (Brian Hammond)
-- DEL is now a vararg, IMPORTANT: memory leak fixed in loading DB code (antirez)
-- Benchmark utility now supports random keys (antirez)
-- Timestamp in log lines (antirez)
-- Fix SINTER/UNIONSTORE to allow for &=/|= style operations (i.e. SINTERSTORE set1 set1 set2) (Aman Gupta)
-- Partial qsort implemented in SORT command, only when both BY and LIMIT is used (antirez)
-- Allow timeout=0 config to disable client timeouts (Aman Gupta)
-- Alternative (faster/simpler) ruby client API compatible with Redis-rb (antirez)
-- S*STORE now return the cardinality of the resulting set (antirez)
-- TTL command implemented (antirez)
-- Critical bug about glueoutputbuffers=yes fixed. Under load and with pipelining and clients disconnecting on the middle of the chat with the server, Redis could block. (antirez)
-- Different replication fixes (antirez)
-- SLAVEOF command implemented for remote replication management (antirez)
-- Issue with redis-client used in scripts solved, now to check if the latest argument must come from standard input we do not check that stdin is or not a tty but the command arity (antirez)
-- Warns if using the default config (antirez)
-- maxclients implemented, see redis.conf for details (antirez)
-- max bytes of a received command enlarged from 1k to 32k (antirez)
-
-
- -
-
- - - diff --git a/doc/Redis0900ChangeLog.html b/doc/Redis0900ChangeLog.html deleted file mode 100644 index e2e360fb..00000000 --- a/doc/Redis0900ChangeLog.html +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - -
- - - -
-
- -Redis0900ChangeLog: Contents
  CHANGELOG for Redis 0.900 -
- -

Redis0900ChangeLog

- -
- -
- -
-

CHANGELOG for Redis 0.900

-2009-06-16 client libraries updated (antirez)
-2009-06-16 Better handling of background saving process killed or crashed (antirez)
-2009-06-14 number of keys info in INFO command (Diego Rosario Brogna)
-2009-06-14 SPOP documented (antirez)
-2009-06-14 Clojure library (Ragnar Dahlén)
-2009-06-10 It is now possible to specify - as config file name to read it from stdin (antirez)
-2009-06-10 max bytes in an inline command raised to 1024*1024 bytes, in order to allow for very large MGETs and still protect from client crashes (antirez)
-2009-06-08 SPOP implemented. Hash table resizing for Sets and Expires too. Changed the resize policy to play better with RANDOMKEY and SPOP. (antirez)
-2009-06-07 some minor changes to the backtrace code (antirez)
-2009-06-07 enable backtrace capabilities only for Linux and MacOSX (antirez)
-2009-06-07 Dump a backtrace on sigsegv/sigbus, original coded (Diego Rosario Brogna)
-2009-06-05 Avoid a busy loop while sending very large replies against very fast links, this allows to be more responsive with other clients even under a KEY * against the loopback interface (antirez)
-2009-06-05 Kill the background saving process before performing SHUTDOWN to avoid races (antirez)
-2009-06-05 LREM now returns :0 for non existing keys (antirez)
-2009-06-05 added config.h for #ifdef business isolation, added fstat64 for Mac OS X (antirez)
-2009-06-04 macosx specific zmalloc.c, uses malloc_size function in order to avoid to waste memory and time to put an additional header (antirez)
-2009-06-04 DEBUG OBJECT implemented (antirez)
-2009-06-03 shareobjectspoolsize implemented in reds.conf, in order to control the pool size when object sharing is on (antirez)
-2009-05-27 maxmemory implemented (antirez)
-
-
- -
-
- - - diff --git a/doc/Redis_1_2_0_Changelog.html b/doc/Redis_1_2_0_Changelog.html deleted file mode 100644 index 256bee02..00000000 --- a/doc/Redis_1_2_0_Changelog.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
- - -

Redis_1_2_0_Changelog

- -
- -
- -
-

What's new in Redis 1.2

New persistence mode: Append Only File

The Append Only File is an alternative way to save your data in Redis that is fully durable! Unlike the snapshotting (default) persistence mode, where the database is saved asynchronously from time to time, the Append Only File saves every change ASAP in a text-only file that works like a journal. Redis will play back this file again at startup reloading the whole dataset back in memory. Redis Append Only File supports background Log compaction. For more info read the Append Only File HOWTO.

New data type: sorted sets

Sorted sets are collections of elements (like Sets) with an associated score (in the form of a double precision floating point number). Elements in a sorted set are taken in order, so for instance to take the greatest element is an O(1) operation. Insertion and deletion is O(log(N)). Sorted sets are implemented using a dual ported data structure consisting of an hash table and a skip list. For more information please read the Introduction To Redis Data Types.

Specialized integer objects encoding

Redis 1.2 will use less memory than Redis 1.0 for values in Strings, Lists or Sets elements that happen to be representable as 32 or 64 bit signed integers (it depends on your arch bits for the long C type). This is totally transparent form the point of view of the user, but will safe a lot of memory (30% less in datasets where there are many integers).

MSET and MSETNX

That is, setting multiple keys in one command, atomically. For more information see the MSET command wiki page.

Better Performances

  • 100x times faster SAVE and BGSAVE! There was a problem in the LZF lib configuration that is now resolved. The effect is this impressive speedup. Also the saving child will no longer use 100% of CPU.
  • Glue output buffer and writev(). Many commands producing large outputs, like LRANGE, will now be even 10 times faster, thanks to the new output buffer gluing algorithm and the (optional) use of writev(2) syscall.
  • Support for epool and kqueue / kevent. 10,000 clients scalability.
  • Much better EXPIRE support, now it's possible to work with very large sets of keys expiring in very short time without to incur in memory problems (the new algorithm expires keys in an adaptive way, so will get more aggressive if there are a lot of expiring keys)
-

Solaris Support

Redis will now compile and work on Solaris without problems. Warning: the Solaris user base is very little, so Redis running on Solaris may not be as tested and stable as it is on Linux and Mac OS X.

Support for the new generation protocol

  • Redis is now able to accept commands in a new fully binary safe way: with the new protocol keys are binary safe, not only values, and there is no distinction between bulk commands and inline commands. This new protocol is currently used only for MSET and MSETNX but at some point it will hopefully replace the old one. See the Multi Bulk Commands section in the Redis Protocol Specification for more information.
-

A few new commands about already supported data types

  • SRANDMEMBER
  • The SortCommand is now supprots the STORE and GET # forms, the first can be used to save sorted lists, sets or sorted sets into keys for caching. Check the manual page for more information about the GET # form.
  • The new RPOPLPUSH command can do many interesting magics, and a few of this are documented in the wiki page of the command.
-

Bug fixing

Of course, many bugs are now fixed, and I bet, a few others introduced: this is how software works after all, so make sure to report issues in the Redis mailing list or in the Google Code issues tracker.

Enjoy! -antirez

CHANGELOG for Redis 1.1.90

  • 2009-09-10 in-memory specialized object encoding. (antirez)
  • 2009-09-17 maxmemory fixed in 64 systems for values > 4GB. (antirez)
  • 2009-10-07 multi-bulk protocol implemented. (antriez)
  • 2009-10-16 MSET and MSETNX commands implemented (antirez)
  • 2009-10-21 SRANDMEMBER added (antirez)
  • 2009-10-23 Fixed compilation in mac os x snow leopard when compiling a 32 bit binary. (antirez)
  • 2009-10-23 New data type: Sorted sets and Z-commands (antirez)
  • 2009-10-26 Solaris fixed (Alan Harder)
  • 2009-10-29 Fixed Issue a number of open issues (antirez)
  • 2009-10-30 New persistence mode: append only file (antirez)
  • 2009-11-01 SORT STORE option (antirez)
  • 2009-11-03 redis-cli now accepts a -r (repeat) switch. (antirez)
  • 2009-11-04 masterauth option merged (Anthony Lauzon)
  • 2009-11-04 redis-test is now a better Redis citizen, testing everything against DB 9 and 10 and only if this DBs are empty. (antirez)
  • 2009-11-10 Implemented a much better lazy expiring algorithm for EXPIRE (antirez)
  • 2009-11-11 RPUSHLPOP (antirez from an idea of @ezmobius)
  • 2009-11-12 Merge git://github.com/ianxm/redis (Can't remmber what this implements, sorry)
  • 2009-11-17 multi-bulk reply support for redis-bench, LRANGE speed tests (antirez)
  • 2009-11-17 support for writev implemented. (Stefano Barbato)
  • 2009-11-19 debug mode (-D) in redis-bench (antirez)
  • 2009-11-21 SORT GET # implemented (antirez)
  • 2009-11-23 ae.c made modular, with support for epoll. (antirez)
  • 2009-11-26 background append log rebuilding (antirez)
  • 2009-11-28 Added support for kqueue. (Harish Mallipeddi)
  • 2009-11-29 SORT support for sorted sets (antirez, thanks to @tobi for the idea)
-
- -
-
- - - diff --git a/doc/RenameCommand.html b/doc/RenameCommand.html deleted file mode 100644 index e5b681cb..00000000 --- a/doc/RenameCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -RenameCommand: Contents
  RENAME _oldkey_ _newkey_
    Return value -
- -

RenameCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

RENAME _oldkey_ _newkey_

-Time complexity: O(1)
Atomically renames the key oldkey to newkey. If the source anddestination name are the same an error is returned. If newkeyalready exists it is overwritten.
-

Return value

Status code repy - -
- -
-
- - - diff --git a/doc/RenamenxCommand.html b/doc/RenamenxCommand.html deleted file mode 100644 index e41d3dae..00000000 --- a/doc/RenamenxCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -RenamenxCommand: Contents
  RENAMENX _oldkey_ _newkey_
    Return value -
- -

RenamenxCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

RENAMENX _oldkey_ _newkey_

-Time complexity: O(1)
Rename oldkey into newkey but fails if the destination key newkey already exists.
-

Return value

Integer reply, specifically:

-1 if the key was renamed
-0 if the target key already exist
-
- -
- -
-
- - - diff --git a/doc/ReplicationHowto.html b/doc/ReplicationHowto.html deleted file mode 100644 index d0a6c6ca..00000000 --- a/doc/ReplicationHowto.html +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - -
- - - -
-
- -ReplicationHowto: Contents
  Redis Replication Howto
    General Information
    How Redis replication works
    Configuration -
- -

ReplicationHowto

- -
- -
- -
- #sidebar RedisGuides -

Redis Replication Howto

General Information

Redis replication is a very simple to use and configure master-slave replication that allows slave Redis servers to be exact copies of master servers. The following are some very important facts about Redis replication:

  • A master can have multiple slaves.
  • Slaves are able to accept other slaves connections, so instead to connect a number of slaves against the same master it is also possible to connect some of the slaves to other slaves in a graph-alike structure.
  • Redis replication is non-blocking on the master side, this means that the master will continue to serve queries while one or more slaves are performing the first synchronization. Instead replication is blocking on the slave side: while the slave is performing the first synchronization it can't reply to queries.
  • Replications can be used both for scalability, in order to have multiple slaves for read-only queries (for example heavy SORT operations can be launched against slaves), or simply for data redundancy.
  • It is possible to use replication to avoid the saving process on the master side: just configure your master redis.conf in order to avoid saving at all (just comment al the "save" directives), then connect a slave configured to save from time to time.
-

How Redis replication works

In order to start the replication, or after the connection closes in order resynchronize with the master, the slave connects to the master and issues the SYNC command.

The master starts a background saving, and at the same time starts to collect all the new commands received that had the effect to modify the dataset. When the background saving completed the master starts the transfer of the database file to the slave, that saves it on disk, and then load it in memory. At this point the master starts to send all the accumulated commands, and all the new commands received from clients that had the effect of a dataset modification, to the slave, as a stream of commands, in the same format of the Redis protocol itself.

You can try it yourself via telnet. Connect to the Redis port while the server is doing some work and issue the SYNC command. You'll see a bulk transfer and then every command received by the master will be re-issued in the telnet session.

Slaves are able to automatically reconnect when the master <-> slave link goes down for some reason. If the master receives multiple concurrent slave synchronization requests it performs a single background saving in order to serve all them.

Configuration

To configure replication is trivial: just add the following line to the slave configuration file: -
-slaveof 192.168.1.1 6379
-
-Of course you need to replace 192.168.1.1 6379 with your master ip address (or hostname) and port. - -
- -
-
- - - diff --git a/doc/ReplyTypes.html b/doc/ReplyTypes.html deleted file mode 100644 index 9f60aecf..00000000 --- a/doc/ReplyTypes.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
- - -

ReplyTypes

- -
- -
- -
-

Redis Reply Types

Redis commands can reply to the client with four different kind of replies, you can find the protocol level specification of this replies in the Redis Protocol Specification. This page is instead an higher level description of the four types of replies from the point of view of the final user.

Status code reply

-Status code replies are single line strings having the + character as first byte. The string to return to the client is simply verything that follows the first + character. For example the PING command returns +PONG, that is the string "PONG".

Error reply

-This is like a status code reply but the first character is - instead of +. The client library should raise an error for error replies and stop the execution of the program if the exception is not trapped, showing the error message (everything following the first - character). An example of error is "-Error no such key" or "-foobar". Note that error replies will not collide with negative integer replies since integer replies are prefixed with the : character.

Integer reply

-At protocol level integer replies are single line replies in form of a decimal singed number prefixed by a : character. For example :10 is an integer reply. Redis commands returning true or false will use an integer reply with 0 or 1 as values where 0 is false and 1 is true.

Integer replies are usually passed by client libraries as integer values.

Bulk reply

-A bulk reply is a binary-safe reply that is used to return a binary safe single string value (string is not limited to alphanumerical strings, it may contain binary data of any kind). Client libraries will usually return a string as return value of Redis commands returning bulk replies. There is a special bulk reply that signal that the element does not exist. When this happens the client library should return 'nil', 'false', or some other special element that can be distinguished by an empty string.

Multi bulk reply

-While a bulk reply returns a single string value, multi bulk replies are used to return multiple values: lists, sets, and so on. Elements of a bulk reply can be missing. Client libraries should return 'nil' or 'false' in order to make this elements distinguishable from empty strings. Client libraries should return multi bulk replies that are about ordered elements like list ranges as lists, and bulk replies about sets as hashes or Sets if the implementation language has a Set type. - -
- -
-
- - - diff --git a/doc/RoadMap.html b/doc/RoadMap.html deleted file mode 100644 index 59f35711..00000000 --- a/doc/RoadMap.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -RoadMap: Contents
  Road Map (ROUGH DRAFT)
    Features added in past versions
    1.1 / 1.2
    0.x / 1.0 -
- -

RoadMap

- -
- -
- -
- -

Road Map (ROUGH DRAFT)

The up to date, raw Road Map for Redis is part of the source code, you can find it here: http://github.com/antirez/redis/raw/master/TODO

Features added in past versions

1.1 / 1.2

-

0.x / 1.0

-
- -
-
- - - diff --git a/doc/RpoplpushCommand.html b/doc/RpoplpushCommand.html deleted file mode 100644 index dbd82029..00000000 --- a/doc/RpoplpushCommand.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - -
- - - -
- - -

RpoplpushCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

RPOPLPUSH _srckey_ _dstkey_ (Redis >

1.1) = -Time complexity: O(1)
Atomically return and remove the last (tail) element of the srckey list,and push the element as the first (head) element of the dstkey list. Forexample if the source list contains the elements "a","b","c" and thedestination list contains the elements "foo","bar" after an RPOPLPUSH commandthe content of the two lists will be "a","b" and "c","foo","bar".
-
If the key does not exist or the list is already empty the specialvalue 'nil' is returned. If the srckey and dstkey are the same theoperation is equivalent to removing the last element from the list and pusingit as first element of the list, so it's a "list rotation" command.
-

Programming patterns: safe queues

Redis lists are often used as queues in order to exchange messages betweendifferent programs. A program can add a message performing an LPUSH operationagainst a Redis list (we call this program a Producer), while another program(that we call Consumer) can process the messages performing an RPOP commandin order to start reading the messages from the oldest.
-
Unfortunately if a Consumer crashes just after an RPOP operation the messagegets lost. RPOPLPUSH solves this problem since the returned message isadded to another "backup" list. The Consumer can later remove the messagefrom the backup list using the LREM command when the message was correctlyprocessed.
-
Another process, called Helper, can monitor the "backup" list to check fortimed out entries to repush against the main queue.
-

Programming patterns: server-side O(N) list traversal

Using RPOPPUSH with the same source and destination key a process canvisit all the elements of an N-elements List in O(N) without to transferthe full list from the server to the client in a single LRANGE operation.Note that a process can traverse the list even while other processesare actively RPUSHing against the list, and still no element will be skipped.
-

Return value

Bulk reply - -
- -
-
- - - diff --git a/doc/RpushCommand.html b/doc/RpushCommand.html deleted file mode 100644 index 18376bc1..00000000 --- a/doc/RpushCommand.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
-
- -RpushCommand: Contents
      RPUSH _key_ _string_
      LPUSH _key_ _string_
    Return value -
- -

RpushCommand

- -
- -
- -
- #sidebar ListCommandsSidebar

RPUSH _key_ _string_

-

LPUSH _key_ _string_

-Time complexity: O(1)
Add the string value to the head (RPUSH) or tail (LPUSH) of the liststored at key. If the key does not exist an empty list is created just beforethe append operation. If the key exists but is not a List an erroris returned.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/SaddCommand.html b/doc/SaddCommand.html deleted file mode 100644 index 57c2dfad..00000000 --- a/doc/SaddCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -SaddCommand: Contents
  SADD _key_ _member_
    Return value -
- -

SaddCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SADD _key_ _member_

-Time complexity O(1)
Add the specified member to the set value stored at key. If memberis already a member of the set no operation is performed. If keydoes not exist a new set with the specified member as sole member iscreated. If the key exists but does not hold a set value an error isreturned.
-

Return value

Integer reply, specifically:

-1 if the new element was added
-0 if the element was already a member of the set
-
-
- -
-
- - - diff --git a/doc/SaveCommand.html b/doc/SaveCommand.html deleted file mode 100644 index 5b703059..00000000 --- a/doc/SaveCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -SaveCommand: Contents
      SAVE
    Return value -
- -

SaveCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

SAVE

-
Save the whole dataset on disk (this means that all the databases are saved, as well as keys with an EXPIRE set (the expire is preserved). The server hangs while the saving is notcompleted, no connection is served in the meanwhile. An OK codeis returned when the DB was fully stored in disk.
-
The background variant of this command is BGSAVE that is able to perform the saving in the background while the server continues serving other clients.
-

Return value

Status code reply -
- -
-
- - - diff --git a/doc/ScardCommand.html b/doc/ScardCommand.html deleted file mode 100644 index da7b3cbd..00000000 --- a/doc/ScardCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -ScardCommand: Contents
  SCARD _key_
    Return value -
- -

ScardCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SCARD _key_

-Time complexity O(1)
Return the set cardinality (number of elements). If the key does notexist 0 is returned, like for empty sets.
-

Return value

Integer reply, specifically:

-the cardinality (number of elements) of the set as an integer.
-
- -
- -
-
- - - diff --git a/doc/SdiffCommand.html b/doc/SdiffCommand.html deleted file mode 100644 index 7aef28b9..00000000 --- a/doc/SdiffCommand.html +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - -
- - - -
-
- -SdiffCommand: Contents
  SDIFF _key1_ _key2_ ... _keyN_
    Return value -
- -

SdiffCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SDIFF _key1_ _key2_ ... _keyN_

-Time complexity O(N) with N being the total number of elements of all the sets
Return the members of a set resulting from the difference between the firstset provided and all the successive sets. Example:
-
-key1 = x,a,b,c
-key2 = c
-key3 = a,d
-SDIFF key1,key2,key3 => x,b
-
Non existing keys are considered like empty sets.
-

Return value

Multi bulk reply, specifically the list of common elements. - -
- -
-
- - - diff --git a/doc/SdiffstoreCommand.html b/doc/SdiffstoreCommand.html deleted file mode 100644 index 91962acb..00000000 --- a/doc/SdiffstoreCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -SdiffstoreCommand: Contents
  SDIFFSTORE _dstkey_ _key1_ _key2_ ... _keyN_
    Return value -
- -

SdiffstoreCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SDIFFSTORE _dstkey_ _key1_ _key2_ ... _keyN_

-Time complexity O(N) where N is the total number of elements in all the provided sets
This command works exactly like SDIFF but instead of being returned the resulting set is stored in dstkey.
-

Return value

Status code reply -
- -
-
- - - diff --git a/doc/SelectCommand.html b/doc/SelectCommand.html deleted file mode 100644 index e63aa7e4..00000000 --- a/doc/SelectCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -SelectCommand: Contents
  SELECT _index_
    Return value -
- -

SelectCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

SELECT _index_

-
Select the DB with having the specified zero-based numeric index.For default every new client connection is automatically selectedto DB 0.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/SetCommand.html b/doc/SetCommand.html deleted file mode 100644 index 9557be6f..00000000 --- a/doc/SetCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -SetCommand: Contents
  SET _key_ _value_
    Return value -
- -

SetCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

SET _key_ _value_

-Time complexity: O(1)
Set the string value as value of the key.The string can't be longer than 1073741824 bytes (1 GB).
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/SetCommandsSidebar.html b/doc/SetCommandsSidebar.html deleted file mode 100644 index fc612b6c..00000000 --- a/doc/SetCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -SetCommandsSidebar: Contents -
- -

SetCommandsSidebar

- -
- -
- - - -
-
- - - diff --git a/doc/SetnxCommand.html b/doc/SetnxCommand.html deleted file mode 100644 index 8c47e72d..00000000 --- a/doc/SetnxCommand.html +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - -
- - - -
- - -

SetnxCommand

- -
- -
- -
- #sidebar StringCommandsSidebar

SETNX _key_ _value_

-Time complexity: O(1)
SETNX works exactly like SET with the only difference thatif the key already exists no operation is performed.SETNX actually means "SET if Not eXists".
-

Return value

Integer reply, specifically:

-1 if the key was set
-0 if the key was not set
-

Design pattern: Implementing locking with SETNX

SETNX can also be seen as a locking primitive. For instance to acquirethe lock of the key foo, the client could try the following:
-
-SETNX lock.foo <current UNIX time + lock timeout + 1>
-
If SETNX returns 1 the client acquired the lock, setting the lock.fookey to the UNIX time at witch the lock should no longer be considered valid.The client will later use DEL lock.foo in order to release the lock.
-
If SETNX returns 0 the key is already locked by some other client. We caneither return to the caller if it's a non blocking lock, or enter aloop retrying to hold the lock until we succeed or some kind of timeoutexpires.
-

Handling deadlocks

In the above locking algorithm there is a problem: what happens if a clientfails, crashes, or is otherwise not able to release the lock?It's possible to detect this condition because the lock key contains aUNIX timestamp. If such a timestamp is <= the current Unix time the lockis no longer valid.
-
When this happens we can't just call DEL against the key to remove the lockand then try to issue a SETNX, as there is a race condition here, whenmultiple clients detected an expired lock and are trying to release it.
-
  • C1 and C2 read lock.foo to check the timestamp, because SETNX returned 0 to both C1 and C2, as the lock is still hold by C3 that crashed after holding the lock.
  • C1 sends DEL lock.foo
  • C1 sends SETNX => success!
  • C2 sends DEL lock.foo
  • C2 sends SETNX => success!
  • ERROR: both C1 and C2 acquired the lock because of the race condition.
-
Fortunately it's possible to avoid this issue using the following algorithm.Let's see how C4, our sane client, uses the good algorithm:
-
  • C4 sends SETNX lock.foo in order to acquire the lock
  • The crashed C3 client still holds it, so Redis will reply with 0 to C4.
  • C4 GET lock.foo to check if the lock expired. If not it will sleep one second (for instance) and retry from the start.
  • If instead the lock is expired because the UNIX time at lock.foo is older than the current UNIX time, C4 tries to perform GETSET lock.foo <current unix timestamp + lock timeout + 1>
  • Thanks to the GETSET command semantic C4 can check if the old value stored at key is still an expired timestamp. If so we acquired the lock!
  • Otherwise if another client, for instance C5, was faster than C4 and acquired the lock with the GETSET operation, C4 GETSET operation will return a non expired timestamp. C4 will simply restart from the first step. Note that even if C4 set the key a bit a few seconds in the future this is not a problem.
-IMPORTANT NOTE: In order to make this locking algorithm more robust, a client holding a lock should always check the timeout didn't expired before to unlock the key with DEL because client failures can be complex, not just crashing but also blocking a lot of time against some operation and trying to issue DEL after a lot of time (when the LOCK is already hold by some other client). -
- -
-
- - - diff --git a/doc/Sets.html b/doc/Sets.html deleted file mode 100644 index 19fe7f80..00000000 --- a/doc/Sets.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -Sets: Contents
  Redis Set Type
  Implementation details -
- -

Sets

- -
- -
- -
- #sidebar SetCommandsSidebar

Redis Set Type

Redis Sets are unordered collections of Redis Strings. It's possible to add, remove, and test for existence of members in O(1).

Redis Sets have the desirable property of not allowing repeated members. Adding the same element multiple times will result in a set having a single copy of this element. Practically speaking this means that adding an members does not require a "check if exists then add" operation.

Commands operating on sets try to make a good use of the return value in order to signal the application about previous existence of members. For instance the SADD command will return 1 if the element added was not already a member of the set, otherwise will return 0.

The max number of members in a set is 232-1 (4294967295, more than 4 billion of members per set).

Redis Sets support a wide range of operations, like union, intersection, difference. Intersection is optimized in order to perform the smallest number of lookups. For instance if you try to intersect a 10000 members set with a 2 members set Redis will iterate the 2 members set testing for members existence in the other set, performing 2 lookups instead of 10000.

Implementation details

Redis Sets are implemented using hash tables, so adding, removing and testing for members is O(1) in the average. The hash table will automatically resize when new elements are added or removed into a Set.

The hash table resizing is a blocking operation performed synchronously so working with huge sets (consisting of many millions of elements) care should be taken when mass-inserting a very big amount of elements in a Set while other clients are querying Redis at high speed.

It is possible that in the near future Redis will switch to skip lists (already used in sorted sets) in order to avoid such a problem. -
- -
-
- - - diff --git a/doc/ShutdownCommand.html b/doc/ShutdownCommand.html deleted file mode 100644 index 78de2876..00000000 --- a/doc/ShutdownCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -ShutdownCommand: Contents
  SHUTDOWN
    Return value -
- -

ShutdownCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

SHUTDOWN

-
Stop all the clients, save the DB, then quit the server. This commandsmakes sure that the DB is switched off without the lost of any data.This is not guaranteed if the client uses simply "SAVE" and then"QUIT" because other clients may alter the DB data between the twocommands.
-

Return value

Status code reply on error. On success nothing is returned since the server quits and the connection is closed. - -
- -
-
- - - diff --git a/doc/SideBar.html b/doc/SideBar.html deleted file mode 100644 index f69a322f..00000000 --- a/doc/SideBar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - - -
- - - diff --git a/doc/SinterCommand.html b/doc/SinterCommand.html deleted file mode 100644 index 528945c5..00000000 --- a/doc/SinterCommand.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
-
- -SinterCommand: Contents
  SINTER _key1_ _key2_ ... _keyN_
    Return value -
- -

SinterCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SINTER _key1_ _key2_ ... _keyN_

-Time complexity O(NM) worst case where N is the cardinality of the smallest set and M the number of sets
Return the members of a set resulting from the intersection of all thesets hold at the specified keys. Like in LRANGE the result is sent tothe client as a multi-bulk reply (see the protocol specification formore information). If just a single key is specified, then this commandproduces the same result as SMEMBERS. Actually SMEMBERS is just syntaxsugar for SINTERSECT.
-
Non existing keys are considered like empty sets, so if one of the keys ismissing an empty set is returned (since the intersection with an emptyset always is an empty set).
-

Return value

Multi bulk reply, specifically the list of common elements. - -
- -
-
- - - diff --git a/doc/SinterstoreCommand.html b/doc/SinterstoreCommand.html deleted file mode 100644 index 61cdbfa3..00000000 --- a/doc/SinterstoreCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -SinterstoreCommand: Contents
  SINTERSTORE _dstkey_ _key1_ _key2_ ... _keyN_
    Return value -
- -

SinterstoreCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SINTERSTORE _dstkey_ _key1_ _key2_ ... _keyN_

-Time complexity O(NM) worst case where N is the cardinality of the smallest set and M the number of sets
This commnad works exactly like SINTER but instead of being returned the resulting set is sotred as dstkey.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/SismemberCommand.html b/doc/SismemberCommand.html deleted file mode 100644 index 58768ef7..00000000 --- a/doc/SismemberCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -SismemberCommand: Contents
  SISMEMBER _key_ _member_
    Return value -
- -

SismemberCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SISMEMBER _key_ _member_

-Time complexity O(1)
Return 1 if member is a member of the set stored at key, otherwise0 is returned.
-

Return value

Integer reply, specifically:

-1 if the element is a member of the set
-0 if the element is not a member of the set OR if the key does not exist
-
- -
- -
-
- - - diff --git a/doc/SlaveofCommand.html b/doc/SlaveofCommand.html deleted file mode 100644 index 8937306c..00000000 --- a/doc/SlaveofCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -SlaveofCommand: Contents
  SLAVEOF _host_ _port_
  SLAVEOF no one
    Return value -
- -

SlaveofCommand

- -
- -
- -
- #sidebar ControlCommandsSidebar

SLAVEOF _host_ _port_

-

SLAVEOF no one

The SLAVEOF command can change the replication settings of a slave on the fly.If a Redis server is arleady acting as slave, the command SLAVEOF NO ONEwill turn off the replicaiton turning the Redis server into a MASTER.In the proper form SLAVEOF hostname port will make the server a slave of thespecific server listening at the specified hostname and port.
-
If a server is already a slave of some master, SLAVEOF hostname port willstop the replication against the old server and start the synchrnonizationagainst the new one discarding the old dataset.
-
The form SLAVEOF no one will stop replication turning the server into aMASTER but will not discard the replication. So if the old master stop workingit is possible to turn the slave into a master and set the application touse the new master in read/write. Later when the other Redis server will befixed it can be configured in order to work as slave.
-

Return value

Status code reply - -
- -
-
- - - diff --git a/doc/SmembersCommand.html b/doc/SmembersCommand.html deleted file mode 100644 index a3ef124e..00000000 --- a/doc/SmembersCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -SmembersCommand: Contents
  SMEMBERS _key_
    Return value -
- -

SmembersCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SMEMBERS _key_

-Time complexity O(N)
Return all the members (elements) of the set value stored at key. Thisis just syntax glue for SINTER.
-

Return value

Multi bulk reply -
- -
-
- - - diff --git a/doc/SmoveCommand.html b/doc/SmoveCommand.html deleted file mode 100644 index 1bdb001f..00000000 --- a/doc/SmoveCommand.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - -
- - - -
-
- -SmoveCommand: Contents
  SMOVE _srckey_ _dstkey_ _member_
    Return value -
- -

SmoveCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SMOVE _srckey_ _dstkey_ _member_

-Time complexity O(1)
Move the specifided member from the set at srckey to the set at dstkey.This operation is atomic, in every given moment the element will appear tobe in the source or destination set for accessing clients.
-
If the source set does not exist or does not contain the specified elementno operation is performed and zero is returned, otherwise the element isremoved from the source set and added to the destination set. On successone is returned, even if the element was already present in the destinationset.
-
An error is raised if the source or destination keys contain a non Set value.
-

Return value

Integer reply, specifically:

-1 if the element was moved
-0 if the element was not found on the first set and no operation was performed
-
- -
- -
-
- - - diff --git a/doc/SortCommand.html b/doc/SortCommand.html deleted file mode 100644 index 90c8ac7f..00000000 --- a/doc/SortCommand.html +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - - -
- - - -
- - -

SortCommand

- -
- -
- -
- = SORT key [BY pattern] [LIMIT start count] [GET pattern] [ASC|DESC] [ALPHA] [STORE dstkey] = -
Sort the elements contained in the List, Set, orSorted Set value at key. By defaultsorting is numeric with elements being compared as double precisionfloating point numbers. This is the simplest form of SORT:
-
-SORT mylist
-
Assuming mylist contains a list of numbers, the return value will bethe list of numbers ordered from the smallest to the biggest number.In order to get the sorting in reverse order use DESC:
-
-SORT mylist DESC
-
The ASC option is also supported but it's the default so you don'treally need it.If you want to sort lexicographically use ALPHA. Note that Redis isutf-8 aware assuming you set the right value for the LC_COLLATEenvironment variable.
-
Sort is able to limit the number of returned elements using the LIMIT option:
-
-SORT mylist LIMIT 0 10
-
In the above example SORT will return only 10 elements, starting fromthe first one (start is zero-based). Almost all the sort options canbe mixed together. For example the command:
-
-SORT mylist LIMIT 0 10 ALPHA DESC
-
Will sort mylist lexicographically, in descending order, returning onlythe first 10 elements.
-
Sometimes you want to sort elements using external keys as weights tocompare instead to compare the actual List Sets or Sorted Set elements.For example the list mylist may contain the elements 1, 2, 3, 4, thatare just unique IDs of objects stored at object_1, object_2, object_3and object_4, while the keys weight_1, weight_2, weight_3 and weight_4can contain weights we want to use to sort our list of objectsidentifiers. We can use the following command:
-

Sorting by external keys

-SORT mylist BY weight_*
-
the BY option takes a pattern (weight_* in our example) that is usedin order to generate the key names of the weights used for sorting.Weight key names are obtained substituting the first occurrence of *with the actual value of the elements on the list (1,2,3,4 in our example).
-
Our previous example will return just the sorted IDs. Often it isneeded to get the actual objects sorted (object_1, ..., object_4 in theexample). We can do it with the following command:
-

Not Sorting at all

-SORT mylist BY nosort
-
also the BY option can take a "nosort" specifier. This is useful if you want to retrieve a external key (using GET, read below) but you don't want the sorting overhead.
-

Retrieving external keys

-SORT mylist BY weight_* GET object_*
-
Note that GET can be used multiple times in order to get more keys forevery element of the original List, Set or Sorted Set sorted.
-
Since Redis >= 1.1 it's possible to also GET the list elements itselfusing the special # pattern:
-
-SORT mylist BY weight_* GET object_* GET #
-

Storing the result of a SORT operation

By default SORT returns the sorted elements as its return value.Using the STORE option instead to return the elements SORT willstore this elements as a Redis List in the specified key.An example:
-
-SORT mylist BY weight_* STORE resultkey
-
An interesting pattern using SORT ... STORE consists in associatingan EXPIRE timeout to the resulting key so that inapplications where the result of a sort operation can be cached forsome time other clients will use the cached list instead to call SORTfor every request. When the key will timeout an updated version ofthe cache can be created using SORT ... STORE again.
-
Note that implementing this pattern it is important to avoid that multipleclients will try to rebuild the cached version of the cacheat the same time, so some form of locking should be implemented(for instance using SETNX).
-

SORT and Hashes: BY and GET by hash field

-
It's possible to use BY and GET options against Hash fields using the following syntax:
-SORT mylist BY weight_*->fieldname
-SORT mylist GET object_*->fieldname
-
-
The two chars string -> is used in order to signal the name of the Hash field. The key is substituted as documented above with sort BY and GET against normal keys, and the Hash stored at the resulting key is accessed in order to retrieve the specified field.

Return value

Multi bulk reply, specifically a list of sorted elements. -
- -
-
- - - diff --git a/doc/SortedSetCommandsSidebar.html b/doc/SortedSetCommandsSidebar.html deleted file mode 100644 index 2534beb2..00000000 --- a/doc/SortedSetCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -SortedSetCommandsSidebar: Contents -
- -

SortedSetCommandsSidebar

- -
- -
- - - -
-
- - - diff --git a/doc/SortedSets.html b/doc/SortedSets.html deleted file mode 100644 index a9d5f8b3..00000000 --- a/doc/SortedSets.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -SortedSets: Contents
  Redis Sorted Set Type
  Implementation details -
- -

SortedSets

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

Redis Sorted Set Type

Redis Sorted Sets are, similarly to Sets, collections of Redis Strings. The difference is that every member of a Sorted Set hash an associated score that is used in order to take this member in order.

The ZADD command is used to add a new member to a Sorted Set, specifying the score of the element. Calling ZADD against a member already present in the sorted set but using a different score will update the score for the element, moving it to the right position in order to preserve ordering.

It's possible to get ranges of elements from Sorted Sets in a very similar way to what happens with Lists and the LRANGE command using the Sorted Sets ZRANGE command.

It's also possible to get or remove ranges of elements by score using the ZRANGEBYSCORE and ZREMRANGEBYSCORE commands.

The max number of members in a sorted set is 232-1 (4294967295, more than 4 billion of members per set).

Note that while Sorted Sets are already ordered, it is still possible to use the SORT command against sorted sets to get the elements in a different order.

Implementation details

Redis Sets are implemented using a dual-ported data structure containing a skip list and an hash table. When an element is added a map between the element and the score is added to the hash table (so that given the element we get the score in O(1)), and a map between the score and the element is added in the skip list so that elements are taken in order.

Redis uses a special skip list implementation that is doubly linked so that it's possible to traverse the sorted set from tail to head if needed (Check the ZREVRANGE command).

When ZADD is used in order to update the score of an element, Redis retrieve the score of the element using the hash table, so that it's fast to access the element inside the skip list (that's indexed by score) in order to update the position.

Like it happens for Sets the hash table resizing is a blocking operation performed synchronously so working with huge sorted sets (consisting of many millions of elements) care should be taken when mass-inserting a very big amount of elements in a Set while other clients are querying Redis at high speed.

It is possible that in the near future Redis will switch to skip lists even for the element => score map, so every Sorted Set will have two skip lists, one indexed by element and one indexed by score. -
- -
-
- - - diff --git a/doc/Speed.html b/doc/Speed.html deleted file mode 100644 index 47e77761..00000000 --- a/doc/Speed.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -Speed: Contents
  Speed (ROUGH DRAFT)
    TODO -
- -

Speed

- -
- -
- -
- -

Speed (ROUGH DRAFT)

TODO

  • Written in ANSI C
  • Pipelining
  • MultiBulkCommands
  • epoll >= 1.1
  • Benchmarks
-Redis takes the whole dataset in memory and writes asynchronously to disk in order to be very fast, you have the best of both worlds: hyper-speed and persistence for your data.

Establishing a new connection to a Redis Server is simple and fast nothing more that a TCP three way handshake. There is no authentication or other handshake involved (Google Group: Can we use connection pool in Redis?) You can read more about the way Redis clients communicate with servers in the Protocol Specification.

On most commodity hardware it takes about 45 seconds to restore a 2 GB database, without fancy RAID. This can give you some kind of feeling about the order of magnitude of the time needed to load data when you restart the server, so restarting a server is fast too.

Also Replication is fast, benchamarks will give you the the same order of magnitude a restart does (Google Group: Replication speed benchmak) -
- -
-
- - - diff --git a/doc/SponsorshipHistory.html b/doc/SponsorshipHistory.html deleted file mode 100644 index 107f9c8a..00000000 --- a/doc/SponsorshipHistory.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -SponsorshipHistory: Contents
  Redis Sponsorship History -
- -

SponsorshipHistory

- -
- -
- -
-

Redis Sponsorship History

Important notice: since 15 March 2010 I Joined VMware that is sponsoring all my work on Redis. Thank you to all the companies and people donating in the past. No further donations are accepted.

This is a list of companies that sponsorship Redis developments, with details about the sponsored features. Thanks for helping the project!.



  • 15 January 2010, provided Virtual Machines for Redis testing in a virtualized environment.


  • 14 January 2010, provided Virtual Machines for Redis testing in a virtualized environment.


  • 18 Dec 2009, part of Virtual Memory.


  • 15 Dec 2009, part of Redis Cluster.


  • 13 Dec 2009, for blocking POP (BLPOP) and part of the Virtual Memory implementation.
-Also thaks to the following people or organizations that donated to the Project: - -
- -
-
- - - diff --git a/doc/SpopCommand.html b/doc/SpopCommand.html deleted file mode 100644 index 0a943c65..00000000 --- a/doc/SpopCommand.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
-
- -SpopCommand: Contents
  SPOP _key_
    Return value -
- -

SpopCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SPOP _key_

-Time complexity O(1)
Remove a random element from a Set returning it as return value.If the Set is empty or the key does not exist, a nil object is returned.
-
The SRANDMEMBER command does a similar work butthe returned element is not removed from the Set.
-

Return value

Bulk reply - -
- -
-
- - - diff --git a/doc/SrandmemberCommand.html b/doc/SrandmemberCommand.html deleted file mode 100644 index 43a22387..00000000 --- a/doc/SrandmemberCommand.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
-
- -SrandmemberCommand: Contents
  SRANDMEMBER _key_
    Return value -
- -

SrandmemberCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SRANDMEMBER _key_

-Time complexity O(1)
Return a random element from a Set, without removing the element. If the Set is empty or the key does not exist, a nil object is returned.
-
The SPOP command does a similar work but the returned elementis popped (removed) from the Set.
-

Return value

Bulk reply - -
- -
-
- - - diff --git a/doc/SremCommand.html b/doc/SremCommand.html deleted file mode 100644 index e5aab23e..00000000 --- a/doc/SremCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -SremCommand: Contents
  SREM _key_ _member_
    Return value -
- -

SremCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SREM _key_ _member_

-Time complexity O(1)
Remove the specified member from the set value stored at key. If_member_ was not a member of the set no operation is performed. If keydoes not hold a set value an error is returned.
-

Return value

Integer reply, specifically:

-1 if the new element was removed
-0 if the new element was not a member of the set
-
- -
- -
-
- - - diff --git a/doc/StringCommandsSidebar.html b/doc/StringCommandsSidebar.html deleted file mode 100644 index 369b0cfb..00000000 --- a/doc/StringCommandsSidebar.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - -
- - - -
-
- -StringCommandsSidebar: Contents -
- -

StringCommandsSidebar

- -
- -
- -
- == String Commands ==

-
- -
-
- - - diff --git a/doc/Strings.html b/doc/Strings.html deleted file mode 100644 index d05bf928..00000000 --- a/doc/Strings.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - -
- - - -
-
- -Strings: Contents
  Redis String Type
  Implementation details -
- -

Strings

- -
- -
- -
- #sidebar StringCommandsSidebar

Redis String Type

Strings are the most basic Redis kind of values. Redis Strings are binary safe, this means a Redis string can contain any kind of data, for instance a JPEG image or a serialized Ruby object, and so forth.

A String value can be at max 1 Gigabyte in length.

Strings are treated as integer values by the INCR commands family, in this respect the value of an intger is limited to a singed 64 bit value.

Note that the single elements contained in Redis Lists, Sets and Sorted Sets, are Redis Strings.

Implementation details

Strings are implemented using a dynamic strings library called sds.c (simple dynamic strings). This library caches the current length of the string, so to obtain the length of a Redis string is an O(1) operation (but currently there is no such STRLEN command. It will likely be added later).

Redis strings are incapsualted into Redis Objects. Redis Objects use a reference counting memory management system, so a single Redis String can be shared in different places of the dataset. This means that if you happen to use the same strings many times (especially if you have object sharing turned on in the configuration file) Redis will try to use the same string object instead to allocate one new every time.

Starting from version 1.1 Redis is also able to encode in a special way strings that are actually just numbers. Instead to save the string as an array of characters Redis will save the integer value in order to use less memory. With many datasets this can reduce the memory usage of about 30% compared to Redis 1.0. - -
- -
-
- - - diff --git a/doc/SunionCommand.html b/doc/SunionCommand.html deleted file mode 100644 index 7f7ec5b3..00000000 --- a/doc/SunionCommand.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - -
- - - -
-
- -SunionCommand: Contents
  SUNION _key1_ _key2_ ... _keyN_
    Return value -
- -

SunionCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SUNION _key1_ _key2_ ... _keyN_

-Time complexity O(N) where N is the total number of elements in all the provided sets
Return the members of a set resulting from the union of all thesets hold at the specified keys. Like in LRANGE the result is sent tothe client as a multi-bulk reply (see the protocol specification formore information). If just a single key is specified, then this commandproduces the same result as SMEMBERS.
-
Non existing keys are considered like empty sets.
-

Return value

Multi bulk reply, specifically the list of common elements. - -
- -
-
- - - diff --git a/doc/SunionstoreCommand.html b/doc/SunionstoreCommand.html deleted file mode 100644 index e4f627e2..00000000 --- a/doc/SunionstoreCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -SunionstoreCommand: Contents
  SUNIONSTORE _dstkey_ _key1_ _key2_ ... _keyN_
    Return value -
- -

SunionstoreCommand

- -
- -
- -
- #sidebar SetCommandsSidebar

SUNIONSTORE _dstkey_ _key1_ _key2_ ... _keyN_

-Time complexity O(N) where N is the total number of elements in all the provided sets
This command works exactly like SUNION but instead of being returned the resulting set is stored as dstkey. Any existing value in dstkey will be over-written.
-

Return value

Status code reply -
- -
-
- - - diff --git a/doc/SupportedLanguages.html b/doc/SupportedLanguages.html deleted file mode 100644 index 3b8156a2..00000000 --- a/doc/SupportedLanguages.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - -
- - - -
-
- -SupportedLanguages: Contents
  Supported Languages (DRAFT)
    TODO
    Features Support Matrix
      Version 1.1
      Version 1.0
    Client Libraries Reference
      as3 (ActionScript 3)
      redis-clojure (Clojure)
      CL-Redis (Common Lisp)
      erldis (Erlang)
      Go-Redis (Go)
      haskell-redis (Haskell)
      Java
      redis-lua (Lua)
      Perl
      PHP
      Python
      txredis
      redis-rb (Ruby)
      scala-redis (Scala)
      Tcl -
- -

SupportedLanguages

- -
- -
- -
-

Supported Languages (DRAFT)

Wondering if you can use Redis from your favorite language? Well here is the definitive guide to the available client libraries.

This libraries are intended to expose Redis commands, but you also have the option to use some higher level libraries that provide a Object Hash Mappings pretty much the same idea implemented by a classic ORM.

TODO

-

Features Support Matrix



The following matrix should give you a quick overviwe of the state of the different client libraries existing for each supported language.

The core command set is the one of Version 1.0, while Sharding and Pipelining are convenient client side features not tied to any Redis server version.

Version 1.1

Compatible client libraries are expected to implement the command sets specified in Version 1.0 plus:

  • String: MSET, MSETNX.
  • List: RPOPLPUSH.
  • Sorted Set (ZSET): ZADD, ZREM, ZRANGE, ZREVRANGE, ZRANGEBYSCORE, ZCARD, ZSCORE.
-

Version 1.0



Compatible client libraries are expected to implement the following command sets:

  • String: GET, SET, SETNX, DEL, EXISTS, INCR, DECR, MGET, INCRBY, DECRBY, GETSET, TYPE.
  • List: RPUSH, LPUSH, RPOP, LPOP, LLEN, LINDEX, LSET, LRANGE, LTRIM, LREM.
  • Set: SADD, SREM, SMOVE, SISMEMBER, SCARD, SPOP, SINTER, SINTERSTORE, SUNION, SUNIONSTORE, SDIFF, SDIFFSTORE, SMEMBERS.
  • Keyspace: KEYS, RANDOMKEY, RENAME, RENAMENX, DBSIZE, EXPIRE, TTL.
  • Databases: SELECT, MOVE, FLUSHDB, FLUSHALL.
  • Sort: SORT
  • Connection: AUTH, QUIT?. ???
  • Persistence: SAVE, BGSAVE, LASTSAVE, SHUTDOWN?. ???
  • Server: INFO, MONITOR? SLAVEOF? ???
Language Name Sharding Pipelining 1.1 1.0
ActionScript 3 as3redis No Yes Yes Yes
Clojure redis-clojure No No Partial Yes
Common Lisp CL-Redis No No No Yes
Erlang erldis No Looks like No Looks like
Go Go-Redis No Yes Yes Yes
Haskell haskell-redis No No No Yes
Java JDBC-Redis No No No Yes
Java JRedis No Yes Yes Yes
LUA redis-lua No No Yes Yes
Perl Redis Client No No No Yes
Perl AnyEvent::Redis No No No Yes
PHP Redis PHP Bindings No No No Yes
PHP phpredis (C) No No No Yes
PHP Predis Yes Yes Yes Yes
PHP Redisent Yes No No Yes
Python Python Client No No No Yes
Python py-redis No No Partial Yes
Python txredis No No No Yes
Ruby redis-rb Yes Yes Yes Yes
Scala scala-redis Yes No No Yes
TCL TCL No No Yes Yes
-

Client Libraries Reference

as3 (ActionScript 3)

-

redis-clojure (Clojure)

-

CL-Redis (Common Lisp)

-

erldis (Erlang)

-

Go-Redis (Go)

-

haskell-redis (Haskell)

-

Java

JDBC-Redis

  • JDBC-Redis is Java driver using the JDBC interface for Redis Database. This project doesn't aim for a complete implementation of the JDBC specification since Redis isn't a relational database, but should provide a familiar interface to Java developers interact with Redis.
  • Repository: http://code.google.com/p/jdbc-redis/
-

JRedis

-

redis-lua (Lua)

-

Perl

Perl Client

-

AnyEvent::Redis

-

PHP

Redis PHP Bindings

-

phpredis

-

Predis

  • A flexible and feature-complete PHP client library for the Redis key-value database. Predis is currently a work-in-progress and it targets PHP >= 5.3, though it is highly due to be backported to PHP >= 5.2.6 as soon as the public API and the internal design on the main branch will be considered stable enough.
  • Author: Daniele Alessandri, @jol1hahn
  • Repository: http://github.com/nrk/predis/
-

Redisent

-

Python

Python Client

-

py-redis

-

txredis

-

redis-rb (Ruby)

-

scala-redis (Scala)

-

Tcl

-
- -
-
- - - diff --git a/doc/SupportedPlatforms.html b/doc/SupportedPlatforms.html deleted file mode 100644 index 3e1ac96c..00000000 --- a/doc/SupportedPlatforms.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - -
- - - -
-
- -SupportedPlatforms: Contents
  Supported Platforms -
- -

SupportedPlatforms

- -
- -
- -
-

Supported Platforms

Redis can be compiled in most POSIX systems, but the development targets mainly:

  • Linux
  • Mac OS X
  • FreeBSD
  • OpenBSD
  • Solaris (startting with Version 1.1)
-Windows (using CygWin) is not a supported platform. -
- -
-
- - - diff --git a/doc/TemplateCommand.html b/doc/TemplateCommand.html deleted file mode 100644 index 274c4966..00000000 --- a/doc/TemplateCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -TemplateCommand: Contents
    Return value
    See also -
- -

TemplateCommand

- -
- -
- - - -
-
- - - diff --git a/doc/TtlCommand.html b/doc/TtlCommand.html deleted file mode 100644 index 53db4ebf..00000000 --- a/doc/TtlCommand.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - -
- - - -
-
- -TtlCommand: Contents
  TTL _key_
    Return value -
- -

TtlCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

TTL _key_

The TTL command returns the remaining time to live in seconds of a key that has an EXPIRE set. This introspection capability allows a Redis client to check how many seconds a given key will continue to be part of the dataset. If the Key does not exists or does not have an associated expire, -1 is returned.
-

Return value

Integer reply - -
- -
-
- - - diff --git a/doc/TwitterAlikeExample.html b/doc/TwitterAlikeExample.html deleted file mode 100644 index 0c75cc93..00000000 --- a/doc/TwitterAlikeExample.html +++ /dev/null @@ -1,250 +0,0 @@ - - - - - - - -
- - - -
- - -

TwitterAlikeExample

- -
- -
- -
-

A case study: Design and implementation of a simple Twitter clone using only the Redis key-value store as database and PHP

In this article I'll explain the design and the implementation of a simple clone of Twitter written using PHP and Redis as only database. The programming community uses to look at key-value stores like special databases that can't be used as drop in replacement for a relational database for the development of web applications. This article will try to prove the contrary.

Our Twitter clone, called Retwis, is structurally simple, has very good performances, and can be distributed among N web servers and M Redis servers with very little efforts. You can find the source code here.

We use PHP for the example since it can be read by everybody. The same (or... much better) results can be obtained using Ruby, Python, Erlang, and so on.

News! Retwis-rb is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft! With full source code included of course, the git repository is linked at the end of the Retwis-RB page. The rest of this article targets PHP, but Ruby programmers can also check the other source code, it conceptually very similar.

Key-value stores basics

-The essence of a key-value store is the ability to store some data, called value, inside a key. This data can later be retrieved only if we know the exact key used to store it. There is no way to search something by value. So for example I can use the command SET to store the value bar at key foo:

-SET foo bar
-
Redis will store our data permanently, so we can later ask for "What is the value stored at key foo?" and Redis will reply with bar:

-GET foo => bar
-
Other common operations provided by key-value stores are DEL used to delete a given key, and the associated value, SET-if-not-exists (called SETNX on Redis) that sets a key only if it does not already exist, and INCR that is able to atomically increment a number stored at a given key:

-SET foo 10
-INCR foo => 11
-INCR foo => 12
-INCR foo => 13
-

Atomic operations

-So far it should be pretty simple, but there is something special about INCR. Think about this, why to provide such an operation if we can do it ourself with a bit of code? After all it is as simple as:

-x = GET foo
-x = x + 1
-SET foo x
-
The problem is that doing the increment this way will work as long as there is only a client working with the value x at a time. See what happens if two computers are accessing this data at the same time:

-x = GET foo (yields 10)
-y = GET foo (yields 10)
-x = x + 1 (x is now 11)
-y = y + 1 (y is now 11)
-SET foo x (foo is now 11)
-SET foo y (foo is now 11)
-
Something is wrong with that! We incremented the value two times, but instead to go from 10 to 12 our key holds 11. This is because the INCR operation done with GET / increment / SET is not an atomic operation. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, the server will take care to protect the get-increment-set for all the time needed to complete in order to prevent simultaneous accesses.

What makes Redis different from other key-value stores is that it provides more operations similar to INCR that can be used together to model complex problems. This is why you can use Redis to write whole web applications without using an SQL database and without to get mad. -

Beyond key-value stores

-In this section we will see what Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists and Sets as values, and there are atomic operations to operate against this more advanced values so we are safe even with multiple accesses against the same key. Let's start from Lists:

-LPUSH mylist a (now mylist holds one element list 'a')
-LPUSH mylist b (now mylist holds 'b,a')
-LPUSH mylist c (now mylist holds 'c,b,a')
-
LPUSH means Left Push, that is, add an element to the left (or to the head) of the list stored at mylist. If the key mylist does not exist it is automatically created by Redis as an empty list before the PUSH operation. As you can imagine, there is also the RPUSH operation that adds the element on the right of the list (on the tail).

This is very useful for our Twitter clone. Updates of users can be stored into a list stored at username:updates for instance. There are operations to get data or information from Lists of course. For instance LRANGE returns a range of the list, or the whole list.

-LRANGE mylist 0 1 => c,b
-
LRANGE uses zero-based indexes, that is the first element is 0, the second 1, and so on. The command aguments are LRANGE key first-index last-index. The last index argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So in order to get the whole list we can use:

-LRANGE mylist 0 -1 => c,b,a
-
Other important operations are LLEN that returns the length of the list, and LTRIM that is like LRANGE but instead of returning the specified range trims the list, so it is like Get range from mylist, Set this range as new value but atomic. We will use only this List operations, but make sure to check the Redis documentation to discover all the List operations supported by Redis. -

The set data type

-There is more than Lists, Redis also supports Sets, that are unsorted collection of elements. It is possible to add, remove, and test for existence of members, and perform intersection between different Sets. Of course it is possible to ask for the list or the number of elements of a Set. Some example will make it more clear. Keep in mind that SADD is the add to set operation, SREM is the remove from set operation, sismember is the test if it is a member operation, and SINTER is perform intersection operation. Other operations are SCARD that is used to get the cardinality (the number of elements) of a Set, and SMEMBERS that will return all the members of a Set.

-SADD myset a
-SADD myset b
-SADD myset foo
-SADD myset bar
-SCARD myset => 4
-SMEMBERS myset => bar,a,foo,b
-
Note that SMEMBERS does not return the elements in the same order we added them, since Sets are unsorted collections of elements. When you want to store the order it is better to use Lists instead. Some more operations against Sets:

-SADD mynewset b
-SADD mynewset foo
-SADD mynewset hello
-SINTER myset mynewset => foo,b
-
SINTER can return the intersection between Sets but it is not limited to two sets, you may ask for intersection of 4,5 or 10000 Sets. Finally let's check how SISMEMBER works:

-SISMEMBER myset foo => 1
-SISMEMBER myset notamember => 0
-
Ok I think we are ready to start coding! -

Prerequisites

-If you didn't download it already please grab the source code of Retwis. It's a simple tar.gz file with a few of .php files inside. The implementation is very simple. You will find the PHP library client inside (redis.php) that is used to talk with the Redis server from PHP. This library was written by Ludovico Magnocavallo and you are free to reuse this in your own projects, but for updated version of the library please download the Redis distribution.

Another thing you probably want is a working Redis server. Just get the source, compile with make, and run with ./redis-server and you are done. No configuration is required at all in order to play with it or to run Retwis in your computer. -

Data layout

-Working with a relational database this is the stage were the database layout should be produced in form of tables, indexes, and so on. We don't have tables, so what should be designed? We need to identify what keys are needed to represent our objects and what kind of values this keys need to hold.

Let's start from Users. We need to represent this users of course, with the username, userid, password, followers and following users, and so on. The first question is, what should identify an user inside our system? The username can be a good idea since it is unique, but it is also too big, and we want to stay low on memory. So like if our DB was a relational one we can associate an unique ID to every user. Every other reference to this user will be done by id. That's very simple to do, because we have our atomic INCR operation! When we create a new user we can do something like this, assuming the user is callled "antirez":

-INCR global:nextUserId => 1000
-SET uid:1000:username antirez
-SET uid:1000:password p1pp0
-
We use the global:nextUserId key in order to always get an unique ID for every new user. Then we use this unique ID to populate all the other keys holding our user data. This is a Design Pattern with key-values stores! Keep it in mind. -Besides the fields already defined, we need some more stuff in order to fully define an User. For example sometimes it can be useful to be able to get the user ID from the username, so we set this key too:

-SET username:antirez:uid 1000
-
This may appear strange at first, but remember that we are only able to access data by key! It's not possible to tell Redis to return the key that holds a specific value. This is also our strength, this new paradigm is forcing us to organize the data so that everything is accessible by primary key, speaking with relational DBs language. -

Following, followers and updates

-There is another central need in our system. Every user has followers users and following users. We have a perfect data structure for this work! That is... Sets. So let's add this two new fields to our schema:

-uid:1000:followers => Set of uids of all the followers users
-uid:1000:following => Set of uids of all the following users
-
Another important thing we need is a place were we can add the updates to display in the user home page. We'll need to access this data in chronological order later, from the most recent update to the older ones, so the perfect kind of Value for this work is a List. Basically every new update will be LPUSHed in the user updates key, and thanks to LRANGE we can implement pagination and so on. Note that we use the words updates and posts interchangeably, since updates are actually "little posts" in some way.

-uid:1000:posts => a List of post ids, every new post is LPUSHed here.
-
-

Authentication

-Ok we have more or less everything about the user, but authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions or other things like this, our system must be ready in order to be distributed among different servers, so we'll take the whole state in our Redis database. So all we need is a random string to set as the cookie of an authenticated user, and a key that will tell us what is the user ID of the client holding such a random string. We need two keys in order to make this thing working in a robust way:

-SET uid:1000:auth fea5e81ac8ca77622bed1c2132a021f9
-SET auth:fea5e81ac8ca77622bed1c2132a021f9 1000
-
In order to authenticate an user we'll do this simple work (login.php): -
  • Get the username and password via the login form
  • Check if the username:<username>:uid key actually exists
  • If it exists we have the user id, (i.e. 1000)
  • Check if uid:1000:password matches, if not, error message
  • Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of uid:1000:auth) as "auth" cookie
-This is the actual code:

-include("retwis.php");
-
-# Form sanity checks
-if (!gt("username") || !gt("password"))
-    goback("You need to enter both username and password to login.");
-
-# The form is ok, check if the username is available
-$username = gt("username");
-$password = gt("password");
-$r = redisLink();
-$userid = $r->get("username:$username:id");
-if (!$userid)
-    goback("Wrong username or password");
-$realpassword = $r->get("uid:$userid:password");
-if ($realpassword != $password)
-    goback("Wrong useranme or password");
-
-# Username / password OK, set the cookie and redirect to index.php
-$authsecret = $r->get("uid:$userid:auth");
-setcookie("auth",$authsecret,time()+3600*24*365);
-header("Location: index.php");
-
This happens every time the users log in, but we also need a function isLoggedIn in order to check if a given user is already authenticated or not. These are the logical steps preformed by the isLoggedIn function: -
  • Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of this cookie <authcookie>
  • Check if auth:<authcookie> exists, and what the value (the user id) is (1000 in the exmple).
  • In order to be sure check that uid:1000:auth matches.
  • Ok the user is authenticated, and we loaded a bit of information in the $User global variable.
-The code is simpler than the description, possibly:

-function isLoggedIn() {
-    global $User, $_COOKIE;
-
-    if (isset($User)) return true;
-
-    if (isset($_COOKIE['auth'])) {
-        $r = redisLink();
-        $authcookie = $_COOKIE['auth'];
-        if ($userid = $r->get("auth:$authcookie")) {
-            if ($r->get("uid:$userid:auth") != $authcookie) return false;
-            loadUserInfo($userid);
-            return true;
-        }
-    }
-    return false;
-}
-
-function loadUserInfo($userid) {
-    global $User;
-
-    $r = redisLink();
-    $User['id'] = $userid;
-    $User['username'] = $r->get("uid:$userid:username");
-    return true;
-}
-
loadUserInfo as separated function is an overkill for our application, but it's a good template for a complex application. The only thing it's missing from all the authentication is the logout. What we do on logout? That's simple, we'll just change the random string in uid:1000:auth, remove the old auth:<oldauthstring> and add a new auth:<newauthstring>.

Important: the logout procedure explains why we don't just authenticate the user after the lookup of auth:<randomstring>, but double check it against uid:1000:auth. The true authentication string is the latter, the auth:<randomstring> is just an authentication key that may even be volatile, or if there are bugs in the program or a script gets interrupted we may even end with multiple auth:<something> keys pointing to the same user id. The logout code is the following (logout.php):

-include("retwis.php");
-
-if (!isLoggedIn()) {
-    header("Location: index.php");
-    exit;
-}
-
-$r = redisLink();
-$newauthsecret = getrand();
-$userid = $User['id'];
-$oldauthsecret = $r->get("uid:$userid:auth");
-
-$r->set("uid:$userid:auth",$newauthsecret);
-$r->set("auth:$newauthsecret",$userid);
-$r->delete("auth:$oldauthsecret");
-
-header("Location: index.php");
-
That is just what we described and should be simple to undestand. -

Updates

-Updates, also known as posts, are even simpler. In order to create a new post on the database we do something like this:

-INCR global:nextPostId => 10343
-SET post:10343 "$owner_id|$time|I'm having fun with Retwis"
-
As you can se the user id and time of the post are stored directly inside the string, we don't need to lookup by time or user id in the example application so it is better to compact everything inside the post string.

After we create a post we obtain the post id. We need to LPUSH this post id in every user that's following the author of the post, and of course in the list of posts of the author. This is the file update.php that shows how this is performed:

-include("retwis.php");
-
-if (!isLoggedIn() || !gt("status")) {
-    header("Location:index.php");
-    exit;
-}
-
-$r = redisLink();
-$postid = $r->incr("global:nextPostId");
-$status = str_replace("\n"," ",gt("status"));
-$post = $User['id']."|".time()."|".$status;
-$r->set("post:$postid",$post);
-$followers = $r->smembers("uid:".$User['id'].":followers");
-if ($followers === false) $followers = Array();
-$followers[] = $User['id']; /* Add the post to our own posts too */
-
-foreach($followers as $fid) {
-    $r->push("uid:$fid:posts",$postid,false);
-}
-# Push the post on the timeline, and trim the timeline to the
-# newest 1000 elements.
-$r->push("global:timeline",$postid,false);
-$r->ltrim("global:timeline",0,1000);
-
-header("Location: index.php");
-
The core of the function is the foreach. We get using SMEMBERS all the followers of the current user, then the loop will LPUSH the post against the uid:<userid>:posts of every follower.

Note that we also maintain a timeline with all the posts. In order to do so what is needed is just to LPUSH the post against global:timeline. Let's face it, do you start thinking it was a bit strange to have to sort things added in chronological order using ORDER BY with SQL? I think so indeed. -

Paginating updates

-Now it should be pretty clear how we can user LRANGE in order to get ranges of posts, and render this posts on the screen. The code is simple:

-function showPost($id) {
-    $r = redisLink();
-    $postdata = $r->get("post:$id");
-    if (!$postdata) return false;
-
-    $aux = explode("|",$postdata);
-    $id = $aux[0];
-    $time = $aux[1];
-    $username = $r->get("uid:$id:username");
-    $post = join(array_splice($aux,2,count($aux)-2),"|");
-    $elapsed = strElapsed($time);
-    $userlink = "<a class=\"username\" href=\"profile.php?u=".urlencode($username)."\">".utf8entities($username)."</a>";
-
-    echo('<div class="post">'.$userlink.' '.utf8entities($post)."<br>");
-    echo('<i>posted '.$elapsed.' ago via web</i></div>');
-    return true;
-}
-
-function showUserPosts($userid,$start,$count) {
-    $r = redisLink();
-    $key = ($userid == -1) ? "global:timeline" : "uid:$userid:posts";
-    $posts = $r->lrange($key,$start,$start+$count);
-    $c = 0;
-    foreach($posts as $p) {
-        if (showPost($p)) $c++;
-        if ($c == $count) break;
-    }
-    return count($posts) == $count+1;
-}
-
showPost will simply convert and print a Post in HTML while showUserPosts get range of posts passing them to showPosts.

Following users

If user id 1000 (antirez) wants to follow user id 1001 (pippo), we can do this with just two SADD:

-SADD uid:1000:following 1001
-SADD uid:1001:followers 1000
-
Note the same pattern again and again, in theory with a relational database the list of following and followers is a single table with fields like following_id and follower_id. With queries you can extract the followers or following of every user. With a key-value DB that's a bit different as we need to set both the 1000 is following 1001 and 1001 is followed by 1000 relations. This is the price to pay, but on the other side accessing the data is simpler and ultra-fast. And having this things as separated sets allows us to do interesting stuff, for example using SINTER we can have the intersection of 'following' of two different users, so we may add a feature to our Twitter clone so that it is able to say you at warp speed, when you visit somebody' else profile, "you and foobar have 34 followers in common" and things like that.

You can find the code that sets or removes a following/follower relation at follow.php. It is trivial as you can see. -

Making it horizontally scalable

-Gentle reader, if you reached this point you are already an hero, thank you. Before to talk about scaling horizontally it is worth to check the performances on a single server. Retwis is amazingly fast, without any kind of cache. On a very slow and loaded server, apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey asses slow! Go figure with more recent hardware.

So, first of all, probably you will not need more than one server for a lot of applications, even when you have a lot of users. But let's assume we are Twitter and need to handle a huge amount of traffic. What to do? -

Hashing the key

-The first thing to do is to hash the key and issue the request on different servers based on the key hash. There are a lot of well known algorithms to do so, for example check the Redis Ruby library client that implements consistent hashing, but the general idea is that you can turn your key into a number, and than take the reminder of the division of this number by the number of servers you have:

-server_id = crc32(key) % number_of_servers
-
This has a lot of problems since if you add one server you need to move too much keys and so on, but this is the general idea even if you use a better hashing scheme like consistent hashing.

Ok, are key accesses distributed among the key space? Well, all the user data will be partitioned among different servers. There are no inter-keys operations used (like SINTER, otherwise you need to care that things you want to intersect will end in the same server. This is why Redis unlike memcached does not force a specific hashing scheme, it's application specific). Btw there are keys that are accessed more frequently.

Special keys

For example every time we post a new message, we need to increment the global:nextPostId key. How to fix this problem? A Single server will get a lot if increments. The simplest way to handle this is to have a dedicated server just for increments. This is probably an overkill btw unless you have really a lot of traffic. There is another trick. The ID does not really need to be an incremental number, but just it needs to be unique. So you can get a random string long enough to be unlikely (almost impossible, if it's md5-size) to collide, and you are done. We successfully eliminated our main problem to make it really horizontally scalable!

There is another one: global:timeline. There is no fix for this, if you need to take something in order you can split among different servers and then merge when you need to get the data back, or take it ordered and use a single key. Again if you really have so much posts per second, you can use a single server just for this. Remember that with commodity hardware Redis is able to handle 100000 writes for second, that's enough even for Twitter, I guess.

Please feel free to use the comments below for questions and feedbacks. -
- -
-
- - - diff --git a/doc/TypeCommand.html b/doc/TypeCommand.html deleted file mode 100644 index 45900422..00000000 --- a/doc/TypeCommand.html +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - -
- - - -
-
- -TypeCommand: Contents
  TYPE _key_
    Return value
    See also -
- -

TypeCommand

- -
- -
- -
- #sidebar GenericCommandsSidebar

TYPE _key_

-Time complexity: O(1)
Return the type of the value stored at key in form of astring. The type can be one of "none", "string", "list", "set"."none" is returned if the key does not exist.
-

Return value

Status code reply, specifically:

-"none" if the key does not exist
-"string" if the key contains a String value
-"list" if the key contains a List value
-"set" if the key contains a Set value
-"zset" if the key contains a Sorted Set value
-"hash" if the key contains a Hash value
-

See also

- -
- -
-
- - - diff --git a/doc/UnstableSource.html b/doc/UnstableSource.html deleted file mode 100644 index 90afdfa3..00000000 --- a/doc/UnstableSource.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -UnstableSource: Contents
  Get the latest Redis source code
    Unstable code
    Stable code -
- -

UnstableSource

- -
- -
- -
-

Get the latest Redis source code

Unstable code

-The development version of Redis is hosted here at Github, have fun cloning the source code with Git. If you are not familar with Git just use the download button to get a tarball.

Stable code

-Warning: the development source code is only intended for people that want to develop Redis or absolutely need the latest features still not available on the stable releases. You may have a better experience with the latest stable tarball. - -
- -
-
- - - diff --git a/doc/ZaddCommand.html b/doc/ZaddCommand.html deleted file mode 100644 index fa4c8950..00000000 --- a/doc/ZaddCommand.html +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - -
- - - -
-
- -ZaddCommand: Contents
  ZADD _key_ _score_ _member_ (Redis >
    Return value -
- -

ZaddCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZADD _key_ _score_ _member_ (Redis >

1.1) = -Time complexity O(log(N)) with N being the number of elements in the sorted set
Add the specified member having the specifeid score to the sortedset stored at key. If member is already a member of the sorted setthe score is updated, and the element reinserted in the right position toensure sorting. If key does not exist a new sorted set with the specified_member_ as sole member is crated. If the key exists but does not hold asorted set value an error is returned.
-
The score value can be the string representation of a double precision floatingpoint number.
-
For an introduction to sorted sets check the Introduction to Redis data types page.
-

Return value

Integer reply, specifically:

-1 if the new element was added
-0 if the element was already a member of the sorted set and the score was updated
-
-
- -
-
- - - diff --git a/doc/ZcardCommand.html b/doc/ZcardCommand.html deleted file mode 100644 index d9a85ef0..00000000 --- a/doc/ZcardCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -ZcardCommand: Contents
  ZCARD _key_ (Redis >
    Return value -
- -

ZcardCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZCARD _key_ (Redis >

1.1) = -Time complexity O(1)
Return the sorted set cardinality (number of elements). If the key does notexist 0 is returned, like for empty sorted sets.
-

Return value

Integer reply, specifically:

-the cardinality (number of elements) of the set as an integer.
-
- -
- -
-
- - - diff --git a/doc/ZincrbyCommand.html b/doc/ZincrbyCommand.html deleted file mode 100644 index 7e6a8458..00000000 --- a/doc/ZincrbyCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -ZincrbyCommand: Contents
  ZINCRBY _key_ _increment_ _member_ (Redis >
    Return value -
- -

ZincrbyCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZINCRBY _key_ _increment_ _member_ (Redis >

1.1) = -Time complexity O(log(N)) with N being the number of elements in the sorted set
If member already exists in the sorted set adds the increment to its scoreand updates the position of the element in the sorted set accordingly.If member does not already exist in the sorted set it is added with_increment_ as score (that is, like if the previous score was virtually zero).If key does not exist a new sorted set with the specified_member_ as sole member is crated. If the key exists but does not hold asorted set value an error is returned.
-
The score value can be the string representation of a double precision floatingpoint number. It's possible to provide a negative value to perform a decrement.
-
For an introduction to sorted sets check the Introduction to Redis data types page.
-

Return value

Integer reply, specifically:

-The score of the member after the increment is performed.
-
-
- -
-
- - - diff --git a/doc/ZrangeCommand.html b/doc/ZrangeCommand.html deleted file mode 100644 index e453d36c..00000000 --- a/doc/ZrangeCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
- - -

ZrangeCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZRANGE _key_ _start_ _end_ `[`WITHSCORES`]`(Redis >

1.1) = -

ZREVRANGE _key_ _start_ _end_ `[`WITHSCORES`]` (Redis >

1.1) = -Time complexity: O(log(N))+O(M) (with N being the number of elements in the sorted set and M the number of elements requested)
Return the specified elements of the sorted set at the specifiedkey. The elements are considered sorted from the lowerest to the highestscore when using ZRANGE, and in the reverse order when using ZREVRANGE.Start and end are zero-based indexes. 0 is the first elementof the sorted set (the one with the lowerest score when using ZRANGE), 1the next element by score and so on.
-
_start_ and end can also be negative numbers indicating offsetsfrom the end of the sorted set. For example -1 is the last element ofthe sorted set, -2 the penultimate element and so on.
-
Indexes out of range will not produce an error: if start is overthe end of the sorted set, or start > end, an empty list is returned.If end is over the end of the sorted set Redis will threat it just likethe last element of the sorted set.
-
It's possible to pass the WITHSCORES option to the command in order to return notonly the values but also the scores of the elements. Redis will return the dataas a single list composed of value1,score1,value2,score2,...,valueN,scoreN but clientlibraries are free to return a more appropriate data type (what we think is thatthe best return type for this command is a Array of two-elements Array / Tuple inorder to preserve sorting).
-

Return value

Multi bulk reply, specifically a list of elements in the specified range. -
- -
-
- - - diff --git a/doc/ZrangebyscoreCommand.html b/doc/ZrangebyscoreCommand.html deleted file mode 100644 index 583e9303..00000000 --- a/doc/ZrangebyscoreCommand.html +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - -
- - - -
- - -

ZrangebyscoreCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZRANGEBYSCORE _key_ _min_ _max_ `[`LIMIT _offset_ _count_`]` (Redis >

1.1) = -

ZRANGEBYSCORE _key_ _min_ _max_ `[`LIMIT _offset_ _count_`]` `[`WITHSCORES`]` (Redis >

1.3.4) = -Time complexity: O(log(N))+O(M) with N being the number of elements in the sorted set and M the number of elements returned by the command, so if M is constant (for instance you always ask for the first ten elements with LIMIT) you can consider it O(log(N))
Return the all the elements in the sorted set at key with a score between_min_ and max (including elements with score equal to min or max).
-
The elements having the same score are returned sorted lexicographically asASCII strings (this follows from a property of Redis sorted sets and does notinvolve further computation).
-
Using the optional LIMIT it's possible to get only a range of the matchingelements in an SQL-alike way. Note that if offset is large the commandsneeds to traverse the list for offset elements and this adds up to theO(M) figure.

Exclusive intervals and infinity

-min and max can be -inf and +inf, so that you are not required to know what's the greatest or smallest element in order to take, for instance, elements "up to a given value".

Also while the interval is for default closed (inclusive) it's possible to specify open intervals prefixing the score with a "(" character, so for instance: -
-ZRANGEBYSCORE zset (1.3 5
-
-Will return all the values with score > 1.3 and <= 5, while for instance: -
-ZRANGEBYSCORE zset (5 (10
-
-Will return all the values with score > 5 and < 10 (5 and 10 excluded). -

Return value

Multi bulk reply, specifically a list of elements in the specified score range. -

Examples

-
-redis> zadd zset 1 foo
-(integer) 1
-redis> zadd zset 2 bar
-(integer) 1
-redis> zadd zset 3 biz
-(integer) 1
-redis> zadd zset 4 foz
-(integer) 1
-redis> zrangebyscore zset -inf +inf
-1. "foo"
-2. "bar"
-3. "biz"
-4. "foz"
-redis> zrangebyscore zset 1 2
-1. "foo"
-2. "bar"
-redis> zrangebyscore zset (1 2
-1. "bar"
-redis> zrangebyscore zset (1 (2
-(empty list or set)
-
-
- -
-
- - - diff --git a/doc/ZremCommand.html b/doc/ZremCommand.html deleted file mode 100644 index ecc4b982..00000000 --- a/doc/ZremCommand.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - -
- - - -
-
- -ZremCommand: Contents
  ZREM _key_ _member_ (Redis >
    Return value -
- -

ZremCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZREM _key_ _member_ (Redis >

1.1) = -Time complexity O(log(N)) with N being the number of elements in the sorted set
Remove the specified member from the sorted set value stored at key. If_member_ was not a member of the set no operation is performed. If keydoes not not hold a set value an error is returned.
-

Return value

Integer reply, specifically:

-1 if the new element was removed
-0 if the new element was not a member of the set
-
- -
- -
-
- - - diff --git a/doc/ZremrangebyscoreCommand.html b/doc/ZremrangebyscoreCommand.html deleted file mode 100644 index 304650d1..00000000 --- a/doc/ZremrangebyscoreCommand.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - -
- - - -
-
- -ZremrangebyscoreCommand: Contents
  ZREMRANGEBYSCORE _key_ _min_ _max_ (Redis >
    Return value -
- -

ZremrangebyscoreCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZREMRANGEBYSCORE _key_ _min_ _max_ (Redis >

1.1) = -Time complexity: O(log(N))+O(M) with N being the number of elements in the sorted set and M the number of elements removed by the operation
Remove all the elements in the sorted set at key with a score between_min_ and max (including elements with score equal to min or max).
-

Return value

Integer reply, specifically the number of elements removed. - -
- -
-
- - - diff --git a/doc/ZscoreCommand.html b/doc/ZscoreCommand.html deleted file mode 100644 index 9815f026..00000000 --- a/doc/ZscoreCommand.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -ZscoreCommand: Contents
  ZSCORE _key_ _element_ (Redis >
    Return value -
- -

ZscoreCommand

- -
- -
- -
- #sidebar SortedSetCommandsSidebar

ZSCORE _key_ _element_ (Redis >

1.1) = -Time complexity O(1)
Return the score of the specified element of the sorted set at key.If the specified element does not exist in the sorted set, or the keydoes not exist at all, a special 'nil' value is returned.
-

Return value

Bulk reply
-the score (a double precision floating point number) represented as string.
-
- -
- -
-
- - - diff --git a/doc/index.html b/doc/index.html deleted file mode 100644 index 2cf5d9a8..00000000 --- a/doc/index.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - -
- - - -
-
- -index: Contents
  HOWTOs about selected features
  Hacking
  Videos -
- -

index

- -
- -
- -
- = Redis Documentation =

Russian TranslationHello! The followings are pointers to different parts of the Redis Documentation.

-

HOWTOs about selected features

  • The Redis Replication HOWTO is what you need to read in order to understand how Redis master <-> slave replication works.
  • The Append Only File HOWTO explains how the alternative Redis durability mode works. AOF is an alternative to snapshotting on disk from time to time (the default).
  • Virutal Memory User Guide. A simple to understand guide about using and configuring the Redis Virtual Memory.
-

Hacking

-
  • The Protocol Specification is all you need in order to implement a Redis client library for a missing language. PHP, Python, Ruby and Erlang are already supported.
-
  • Look at Redis Internals if you are interested in the implementation details of the Redis server.
-

Videos

-
- -
-
- - - diff --git a/mkreleasehdr.sh b/mkreleasehdr.sh new file mode 100755 index 00000000..97290437 --- /dev/null +++ b/mkreleasehdr.sh @@ -0,0 +1,9 @@ +#!/bin/sh +GIT_SHA1=$((git show-ref --head --hash=8 2> /dev/null || echo 00000000) | head -n1) +GIT_DIRTY=$(git status -s 2> /dev/null | wc -l) +test -f release.h || touch release.h +(cat release.h | grep SHA1 | grep $GIT_SHA1) && \ +(cat release.h | grep DIRTY | grep $GIT_DIRTY) && exit 0 # Already uptodate +echo "#define REDIS_GIT_SHA1 \"$GIT_SHA1\"" > release.h +echo "#define REDIS_GIT_DIRTY $GIT_DIRTY" >> release.h +touch redis.c # force recompile of redis.c -- 2.45.2