# Redis configuration file example
+# Note on units: when memory size is needed, it is possible to specifiy
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
-# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
-# You can specify a custom pid file location here.
+# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
+# default. You can specify a custom pid file location here.
pidfile /var/run/redis.pid
-# Accept connections on the specified port, default is 6379
+# Accept connections on the specified port, default is 6379.
port 6379
# If you want you can bind a single interface, if the bind option is not
-# specified all the interfaces will listen for connections.
+# specified all the interfaces will listen for incoming connections.
#
# bind 127.0.0.1
+# Specify the path for the unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /tmp/redis.sock
+
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 300
# Set server verbosity to 'debug'
# it can be one of:
# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
-loglevel debug
+loglevel verbose
# Specify the log file name. Also 'stdout' can be used to force
-# the demon to log on the standard output. Note that if you use standard
+# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile stdout
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
#
# Note: you can disable saving at all commenting all the "save" lines.
-#save 900 1
-#save 300 10
-#save 60 10000
+save 900 1
+save 300 10
+save 60 10000
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# The filename where to dump the DB
dbfilename dump.rdb
-# For default save/load DB in/from the working directory
-# Note that you must specify a directory not a file name.
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# Also the Append Only File will be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
dir ./
################################# REPLICATION #################################
#
# masterauth <master-password>
+# When a slave lost the connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of data data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
+# Command renaming.
+#
+# It is possilbe to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# of hard to guess so that it will be still available for internal-use
+# tools but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possilbe to completely kill a command renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default there
# is no limit, and it's up to the number of file descriptors the Redis process
-# is able to open. The special value '0' means no limts.
+# is able to open. The special value '0' means no limits.
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
#
# maxmemory <bytes>
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached? You can select among five behavior:
+#
+# volatile-lru -> remove the key with an expire set using an LRU algorithm
+# allkeys-lru -> remove any key accordingly to the LRU algorithm
+# volatile-random -> remove a random key with an expire set
+# allkeys->random -> remove a random key, any key
+# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
+# noeviction -> don't expire at all, just return an error on write operations
+#
+# Note: with all the kind of policies, Redis will return an error on write
+# operations, when there are not suitable keys for eviction.
+#
+# At the date of writing this commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy volatile-lru
+
+# LRU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can select as well the sample
+# size to check. For instance for default Redis will check three keys and
+# pick the one that was used less recently, you can change the sample size
+# using the following configuration directive.
+#
+# maxmemory-samples 3
+
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. If you can live
# happens this is the preferred way to run Redis. If instead you care a lot
# about your data and don't want to that a single record can get lost you should
# enable the append only mode: when this mode is enabled Redis will append
-# every write operation received in the file appendonly.log. This file will
+# every write operation received in the file appendonly.aof. This file will
# be read on startup in order to rebuild the full dataset in memory.
#
# Note that you can have both the async dumps and the append only file if you
# Still if append only mode is enabled Redis will load the data from the
# log file at startup ignoring the dump.rdb file.
#
-# The name of the append only file is "appendonly.log"
-#
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
# log file in background when it gets too big.
appendonly no
+# The name of the append only file (default: "appendonly.aof")
+# appendfilename appendonly.aof
+
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only if one second passed since the last fsync. Compromise.
#
-# The default is "always" that's the safer of the options. It's up to you to
-# understand if you can relax this to "everysec" that will fsync every second
-# or to "no" that will let the operating system flush the output buffer when
-# it want, for better performances (but if you can live with the idea of
-# some data loss consider the default persistence mode that's snapshotting).
+# The default is "everysec" that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# If unsure, use "everysec".
-appendfsync always
-# appendfsync everysec
+# appendfsync always
+appendfsync everysec
# appendfsync no
-################################ VIRTUAL MEMORY ###############################
-
-# Virtual Memory allows Redis to work with datasets bigger than the actual
-# amount of RAM needed to hold the whole dataset in memory.
-# In order to do so very used keys are taken in memory while the other keys
-# are swapped into a swap file, similarly to what operating systems do
-# with memory pages.
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
#
-# To enable VM just set 'vm-enabled' to yes, and set the following three
-# VM parameters accordingly to your needs.
-
-vm-enabled yes
-# vm-enabled no
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving the durability of Redis is
+# the same as "appendfsync none", that in pratical terms means that it is
+# possible to lost up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+no-appendfsync-on-rewrite no
+
+#################################### DISK STORE ###############################
+
+# When disk store is active Redis works as an on-disk database, where memory
+# is only used as a object cache.
+#
+# This mode is good for datasets that are bigger than memory, and in general
+# when you want to trade speed for:
+#
+# - less memory used
+# - immediate server restart
+# - per key durability, without need for backgrond savig
+#
+# On the other hand, with disk store enabled MULTI/EXEC are no longer
+# transactional from the point of view of the persistence on disk, that is,
+# Redis transactions will still guarantee that commands are either processed
+# all or nothing, but there is no guarantee that all the keys are flushed
+# on disk in an atomic way.
+#
+# Of course with disk store enabled Redis is not as fast as it is when
+# working with just the memory back end.
-# vm-max-memory configures the VM to use at max the specified amount of
-# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
-# is, if there is still enough contiguous space in the swap file.
-vm-max-memory 10000000
+diskstore-enabled no
+diskstore-path redis.ds
+cache-max-memory 0
+cache-flush-delay 0
-# Redis swap files is split into pages. An object can be saved using multiple
-# contiguous pages, but pages can't be shared between different objects.
-# So if your page is too big, small objects swapped out on disk will waste
-# a lot of space. If you page is too small, there is less space in the swap
-# file (assuming you configured the same number of total swap file pages).
-#
-# If you use a lot of small objects, use a page size of 64 or 32 bytes.
-# If you use a lot of big objects, use a bigger page size.
-# If unsure, use the defualt :)
-vm-page-size 256
+############################### ADVANCED CONFIG ###############################
-# Number of total memory pages in the swap file.
-# Given that the page table (a bitmap of free/used pages) is taken in memory,
-# every 8 pages on disk will consume 1 byte of RAM.
+# Hashes are encoded in a special way (much more memory efficient) when they
+# have at max a given numer of elements, and the biggest element does not
+# exceed a given threshold. You can configure this limits with the following
+# configuration directives.
+hash-max-zipmap-entries 64
+hash-max-zipmap-value 512
+
+# Similarly to hashes, small lists are also encoded in a special way in order
+# to save a lot of space. The special representation is only used when
+# you are under the following limits:
+list-max-ziplist-entries 512
+list-max-ziplist-value 64
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happens to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into an hash table
+# that is rhashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# active rehashing the main dictionaries, freeing memory when possible.
#
-# The total swap size is vm-page-size * vm-pages
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply form time to time
+# to queries with 2 milliseconds delay.
#
-# With the default of 256-bytes memory pages and 104857600 pages Redis will
-# use a 25 GB swap file, that will use rougly 13 MB of RAM for the page table.
-vm-pages 104857600
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
-############################### ADVANCED CONFIG ###############################
+################################## INCLUDES ###################################
-# Glue small output buffers together in order to send small replies in a
-# single TCP packet. Uses a bit more CPU but most of the times it is a win
-# in terms of number of queries per second. Use 'yes' if unsure.
-glueoutputbuf yes
-
-# Use object sharing. Can save a lot of memory if you have many common
-# string in your dataset, but performs lookups against the shared objects
-# pool so it uses more CPU and can be a bit slower. Usually it's a good
-# idea.
-#
-# When object sharing is enabled (shareobjects yes) you can use
-# shareobjectspoolsize to control the size of the pool used in order to try
-# object sharing. A bigger pool size will lead to better sharing capabilities.
-# In general you want this value to be at least the double of the number of
-# very common strings you have in your dataset.
-#
-# WARNING: object sharing is experimental, don't enable this feature
-# in production before of Redis 1.0-stable. Still please try this feature in
-# your development environment so that we can test it better.
-shareobjects no
-shareobjectspoolsize 1024
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all redis server but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf