]> git.saurik.com Git - redis.git/blob - tests/assets/default.conf
CLUSTER SETSLOT command and some refactoring of the cluster command
[redis.git] / tests / assets / default.conf
1 # Redis configuration file example
2
3 # Note on units: when memory size is needed, it is possible to specifiy
4 # it in the usual form of 1k 5GB 4M and so forth:
5 #
6 # 1k => 1000 bytes
7 # 1kb => 1024 bytes
8 # 1m => 1000000 bytes
9 # 1mb => 1024*1024 bytes
10 # 1g => 1000000000 bytes
11 # 1gb => 1024*1024*1024 bytes
12 #
13 # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
15 # By default Redis does not run as a daemon. Use 'yes' if you need it.
16 # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17 daemonize no
18
19 # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20 # default. You can specify a custom pid file location here.
21 pidfile /var/run/redis.pid
22
23 # Accept connections on the specified port, default is 6379.
24 port 6379
25
26 # If you want you can bind a single interface, if the bind option is not
27 # specified all the interfaces will listen for incoming connections.
28 #
29 # bind 127.0.0.1
30
31 # Specify the path for the unix socket that will be used to listen for
32 # incoming connections. There is no default, so Redis will not listen
33 # on a unix socket when not specified.
34 #
35 # unixsocket /tmp/redis.sock
36
37 # Close the connection after a client is idle for N seconds (0 to disable)
38 timeout 300
39
40 # Set server verbosity to 'debug'
41 # it can be one of:
42 # debug (a lot of information, useful for development/testing)
43 # verbose (many rarely useful info, but not a mess like the debug level)
44 # notice (moderately verbose, what you want in production probably)
45 # warning (only very important / critical messages are logged)
46 loglevel verbose
47
48 # Specify the log file name. Also 'stdout' can be used to force
49 # Redis to log on the standard output. Note that if you use standard
50 # output for logging but daemonize, logs will be sent to /dev/null
51 logfile stdout
52
53 # To enable logging to the system logger, just set 'syslog-enabled' to yes,
54 # and optionally update the other syslog parameters to suit your needs.
55 # syslog-enabled no
56
57 # Specify the syslog identity.
58 # syslog-ident redis
59
60 # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
61 # syslog-facility local0
62
63 # Set the number of databases. The default database is DB 0, you can select
64 # a different one on a per-connection basis using SELECT <dbid> where
65 # dbid is a number between 0 and 'databases'-1
66 databases 16
67
68 ################################ SNAPSHOTTING #################################
69 #
70 # Save the DB on disk:
71 #
72 # save <seconds> <changes>
73 #
74 # Will save the DB if both the given number of seconds and the given
75 # number of write operations against the DB occurred.
76 #
77 # In the example below the behaviour will be to save:
78 # after 900 sec (15 min) if at least 1 key changed
79 # after 300 sec (5 min) if at least 10 keys changed
80 # after 60 sec if at least 10000 keys changed
81 #
82 # Note: you can disable saving at all commenting all the "save" lines.
83
84 save 900 1
85 save 300 10
86 save 60 10000
87
88 # Compress string objects using LZF when dump .rdb databases?
89 # For default that's set to 'yes' as it's almost always a win.
90 # If you want to save some CPU in the saving child set it to 'no' but
91 # the dataset will likely be bigger if you have compressible values or keys.
92 rdbcompression yes
93
94 # The filename where to dump the DB
95 dbfilename dump.rdb
96
97 # The working directory.
98 #
99 # The DB will be written inside this directory, with the filename specified
100 # above using the 'dbfilename' configuration directive.
101 #
102 # Also the Append Only File will be created inside this directory.
103 #
104 # Note that you must specify a directory here, not a file name.
105 dir ./
106
107 ################################# REPLICATION #################################
108
109 # Master-Slave replication. Use slaveof to make a Redis instance a copy of
110 # another Redis server. Note that the configuration is local to the slave
111 # so for example it is possible to configure the slave to save the DB with a
112 # different interval, or to listen to another port, and so on.
113 #
114 # slaveof <masterip> <masterport>
115
116 # If the master is password protected (using the "requirepass" configuration
117 # directive below) it is possible to tell the slave to authenticate before
118 # starting the replication synchronization process, otherwise the master will
119 # refuse the slave request.
120 #
121 # masterauth <master-password>
122
123 # When a slave lost the connection with the master, or when the replication
124 # is still in progress, the slave can act in two different ways:
125 #
126 # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
127 # still reply to client requests, possibly with out of data data, or the
128 # data set may just be empty if this is the first synchronization.
129 #
130 # 2) if slave-serve-stale data is set to 'no' the slave will reply with
131 # an error "SYNC with master in progress" to all the kind of commands
132 # but to INFO and SLAVEOF.
133 #
134 slave-serve-stale-data yes
135
136 ################################## SECURITY ###################################
137
138 # Require clients to issue AUTH <PASSWORD> before processing any other
139 # commands. This might be useful in environments in which you do not trust
140 # others with access to the host running redis-server.
141 #
142 # This should stay commented out for backward compatibility and because most
143 # people do not need auth (e.g. they run their own servers).
144 #
145 # Warning: since Redis is pretty fast an outside user can try up to
146 # 150k passwords per second against a good box. This means that you should
147 # use a very strong password otherwise it will be very easy to break.
148 #
149 # requirepass foobared
150
151 # Command renaming.
152 #
153 # It is possilbe to change the name of dangerous commands in a shared
154 # environment. For instance the CONFIG command may be renamed into something
155 # of hard to guess so that it will be still available for internal-use
156 # tools but not available for general clients.
157 #
158 # Example:
159 #
160 # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
161 #
162 # It is also possilbe to completely kill a command renaming it into
163 # an empty string:
164 #
165 # rename-command CONFIG ""
166
167 ################################### LIMITS ####################################
168
169 # Set the max number of connected clients at the same time. By default there
170 # is no limit, and it's up to the number of file descriptors the Redis process
171 # is able to open. The special value '0' means no limits.
172 # Once the limit is reached Redis will close all the new connections sending
173 # an error 'max number of clients reached'.
174 #
175 # maxclients 128
176
177 # Don't use more memory than the specified amount of bytes.
178 # When the memory limit is reached Redis will try to remove keys with an
179 # EXPIRE set. It will try to start freeing keys that are going to expire
180 # in little time and preserve keys with a longer time to live.
181 # Redis will also try to remove objects from free lists if possible.
182 #
183 # If all this fails, Redis will start to reply with errors to commands
184 # that will use more memory, like SET, LPUSH, and so on, and will continue
185 # to reply to most read-only commands like GET.
186 #
187 # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
188 # 'state' server or cache, not as a real DB. When Redis is used as a real
189 # database the memory usage will grow over the weeks, it will be obvious if
190 # it is going to use too much memory in the long run, and you'll have the time
191 # to upgrade. With maxmemory after the limit is reached you'll start to get
192 # errors for write operations, and this may even lead to DB inconsistency.
193 #
194 # maxmemory <bytes>
195
196 # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
197 # is reached? You can select among five behavior:
198 #
199 # volatile-lru -> remove the key with an expire set using an LRU algorithm
200 # allkeys-lru -> remove any key accordingly to the LRU algorithm
201 # volatile-random -> remove a random key with an expire set
202 # allkeys->random -> remove a random key, any key
203 # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
204 # noeviction -> don't expire at all, just return an error on write operations
205 #
206 # Note: with all the kind of policies, Redis will return an error on write
207 # operations, when there are not suitable keys for eviction.
208 #
209 # At the date of writing this commands are: set setnx setex append
210 # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
211 # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
212 # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
213 # getset mset msetnx exec sort
214 #
215 # The default is:
216 #
217 # maxmemory-policy volatile-lru
218
219 # LRU and minimal TTL algorithms are not precise algorithms but approximated
220 # algorithms (in order to save memory), so you can select as well the sample
221 # size to check. For instance for default Redis will check three keys and
222 # pick the one that was used less recently, you can change the sample size
223 # using the following configuration directive.
224 #
225 # maxmemory-samples 3
226
227 ############################## APPEND ONLY MODE ###############################
228
229 # By default Redis asynchronously dumps the dataset on disk. If you can live
230 # with the idea that the latest records will be lost if something like a crash
231 # happens this is the preferred way to run Redis. If instead you care a lot
232 # about your data and don't want to that a single record can get lost you should
233 # enable the append only mode: when this mode is enabled Redis will append
234 # every write operation received in the file appendonly.aof. This file will
235 # be read on startup in order to rebuild the full dataset in memory.
236 #
237 # Note that you can have both the async dumps and the append only file if you
238 # like (you have to comment the "save" statements above to disable the dumps).
239 # Still if append only mode is enabled Redis will load the data from the
240 # log file at startup ignoring the dump.rdb file.
241 #
242 # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
243 # log file in background when it gets too big.
244
245 appendonly no
246
247 # The name of the append only file (default: "appendonly.aof")
248 # appendfilename appendonly.aof
249
250 # The fsync() call tells the Operating System to actually write data on disk
251 # instead to wait for more data in the output buffer. Some OS will really flush
252 # data on disk, some other OS will just try to do it ASAP.
253 #
254 # Redis supports three different modes:
255 #
256 # no: don't fsync, just let the OS flush the data when it wants. Faster.
257 # always: fsync after every write to the append only log . Slow, Safest.
258 # everysec: fsync only if one second passed since the last fsync. Compromise.
259 #
260 # The default is "everysec" that's usually the right compromise between
261 # speed and data safety. It's up to you to understand if you can relax this to
262 # "no" that will will let the operating system flush the output buffer when
263 # it wants, for better performances (but if you can live with the idea of
264 # some data loss consider the default persistence mode that's snapshotting),
265 # or on the contrary, use "always" that's very slow but a bit safer than
266 # everysec.
267 #
268 # If unsure, use "everysec".
269
270 # appendfsync always
271 appendfsync everysec
272 # appendfsync no
273
274 # When the AOF fsync policy is set to always or everysec, and a background
275 # saving process (a background save or AOF log background rewriting) is
276 # performing a lot of I/O against the disk, in some Linux configurations
277 # Redis may block too long on the fsync() call. Note that there is no fix for
278 # this currently, as even performing fsync in a different thread will block
279 # our synchronous write(2) call.
280 #
281 # In order to mitigate this problem it's possible to use the following option
282 # that will prevent fsync() from being called in the main process while a
283 # BGSAVE or BGREWRITEAOF is in progress.
284 #
285 # This means that while another child is saving the durability of Redis is
286 # the same as "appendfsync none", that in pratical terms means that it is
287 # possible to lost up to 30 seconds of log in the worst scenario (with the
288 # default Linux settings).
289 #
290 # If you have latency problems turn this to "yes". Otherwise leave it as
291 # "no" that is the safest pick from the point of view of durability.
292 no-appendfsync-on-rewrite no
293
294 #################################### DISK STORE ###############################
295
296 # When disk store is active Redis works as an on-disk database, where memory
297 # is only used as a object cache.
298 #
299 # This mode is good for datasets that are bigger than memory, and in general
300 # when you want to trade speed for:
301 #
302 # - less memory used
303 # - immediate server restart
304 # - per key durability, without need for backgrond savig
305 #
306 # On the other hand, with disk store enabled MULTI/EXEC are no longer
307 # transactional from the point of view of the persistence on disk, that is,
308 # Redis transactions will still guarantee that commands are either processed
309 # all or nothing, but there is no guarantee that all the keys are flushed
310 # on disk in an atomic way.
311 #
312 # Of course with disk store enabled Redis is not as fast as it is when
313 # working with just the memory back end.
314
315 diskstore-enabled no
316 diskstore-path redis.ds
317 cache-max-memory 0
318 cache-flush-delay 0
319
320 ############################### ADVANCED CONFIG ###############################
321
322 # Hashes are encoded in a special way (much more memory efficient) when they
323 # have at max a given numer of elements, and the biggest element does not
324 # exceed a given threshold. You can configure this limits with the following
325 # configuration directives.
326 hash-max-zipmap-entries 64
327 hash-max-zipmap-value 512
328
329 # Similarly to hashes, small lists are also encoded in a special way in order
330 # to save a lot of space. The special representation is only used when
331 # you are under the following limits:
332 list-max-ziplist-entries 512
333 list-max-ziplist-value 64
334
335 # Sets have a special encoding in just one case: when a set is composed
336 # of just strings that happens to be integers in radix 10 in the range
337 # of 64 bit signed integers.
338 # The following configuration setting sets the limit in the size of the
339 # set in order to use this special memory saving encoding.
340 set-max-intset-entries 512
341
342 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
343 # order to help rehashing the main Redis hash table (the one mapping top-level
344 # keys to values). The hash table implementation redis uses (see dict.c)
345 # performs a lazy rehashing: the more operation you run into an hash table
346 # that is rhashing, the more rehashing "steps" are performed, so if the
347 # server is idle the rehashing is never complete and some more memory is used
348 # by the hash table.
349 #
350 # The default is to use this millisecond 10 times every second in order to
351 # active rehashing the main dictionaries, freeing memory when possible.
352 #
353 # If unsure:
354 # use "activerehashing no" if you have hard latency requirements and it is
355 # not a good thing in your environment that Redis can reply form time to time
356 # to queries with 2 milliseconds delay.
357 #
358 # use "activerehashing yes" if you don't have such hard requirements but
359 # want to free memory asap when possible.
360 activerehashing yes
361
362 ################################## INCLUDES ###################################
363
364 # Include one or more other config files here. This is useful if you
365 # have a standard template that goes to all redis server but also need
366 # to customize a few per-server settings. Include files can include
367 # other files, so use this wisely.
368 #
369 # include /path/to/local.conf
370 # include /path/to/other.conf