X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/f9ef912c661a8d0a0690e20dc46f29484e0e4a5e..e04be06e89ac7158f0a5a79668ee734bf879c9e4:/redis.conf diff --git a/redis.conf b/redis.conf index 4760f291..97aea334 100644 --- a/redis.conf +++ b/redis.conf @@ -93,12 +93,36 @@ save 900 1 save 300 10 save 60 10000 +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in an hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# distater will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usually even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes +# Since verison 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + # The filename where to dump the DB dbfilename dump.rdb @@ -141,6 +165,22 @@ dir ./ # slave-serve-stale-data yes +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. @@ -156,6 +196,21 @@ slave-serve-stale-data yes # # repl-timeout 60 +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one wtih priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other @@ -231,7 +286,7 @@ slave-serve-stale-data yes # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set -# allkeys->random -> remove a random key, any key +# allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # @@ -258,21 +313,23 @@ slave-serve-stale-data yes ############################## APPEND ONLY MODE ############################### -# By default Redis asynchronously dumps the dataset on disk. If you can live -# with the idea that the latest records will be lost if something like a crash -# happens this is the preferred way to run Redis. If instead you care a lot -# about your data and don't want to that a single record can get lost you should -# enable the append only mode: when this mode is enabled Redis will append -# every write operation received in the file appendonly.aof. This file will -# be read on startup in order to rebuild the full dataset in memory. +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. # -# Note that you can have both the async dumps and the append only file if you -# like (you have to comment the "save" statements above to disable the dumps). -# Still if append only mode is enabled Redis will load the data from the -# log file at startup ignoring the dump.rdb file. +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. # -# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append -# log file in background when it gets too big. +# Please check http://redis.io/topics/persistence for more information. appendonly no @@ -287,7 +344,7 @@ appendonly no # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only if one second passed since the last fsync. Compromise. +# everysec: fsync only one time every second. Compromise. # # The default is "everysec" that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to @@ -297,6 +354,9 @@ appendonly no # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# # If unsure, use "everysec". # appendfsync always @@ -361,25 +421,6 @@ auto-aof-rewrite-min-size 64mb # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 -################################ REDIS CLUSTER ############################### -# -# Normal Redis instances can't be part of a Redis Cluster, only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system does not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified @@ -402,16 +443,15 @@ slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 1024 +slowlog-max-len 128 ############################### ADVANCED CONFIG ############################### -# Hashes are encoded in a special way (much more memory efficient) when they -# have at max a given number of elements, and the biggest element does not -# exceed a given threshold. You can configure this limits with the following -# configuration directives. -hash-max-zipmap-entries 512 -hash-max-zipmap-value 64 +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 # Similarly to hashes, small lists are also encoded in a special way in order # to save a lot of space. The special representation is only used when