]> git.saurik.com Git - redis.git/blob - tests/unit/other.tcl
BITCOUNT performance improved.
[redis.git] / tests / unit / other.tcl
1 start_server {tags {"other"}} {
2 if {$::force_failure} {
3 # This is used just for test suite development purposes.
4 test {Failing test} {
5 format err
6 } {ok}
7 }
8
9 test {SAVE - make sure there are all the types as values} {
10 # Wait for a background saving in progress to terminate
11 waitForBgsave r
12 r lpush mysavelist hello
13 r lpush mysavelist world
14 r set myemptykey {}
15 r set mynormalkey {blablablba}
16 r zadd mytestzset 10 a
17 r zadd mytestzset 20 b
18 r zadd mytestzset 30 c
19 r save
20 } {OK}
21
22 tags {slow} {
23 if {$::accurate} {set iterations 10000} else {set iterations 1000}
24 foreach fuzztype {binary alpha compr} {
25 test "FUZZ stresser with data model $fuzztype" {
26 set err 0
27 for {set i 0} {$i < $iterations} {incr i} {
28 set fuzz [randstring 0 512 $fuzztype]
29 r set foo $fuzz
30 set got [r get foo]
31 if {$got ne $fuzz} {
32 set err [list $fuzz $got]
33 break
34 }
35 }
36 set _ $err
37 } {0}
38 }
39 }
40
41 test {BGSAVE} {
42 waitForBgsave r
43 r flushdb
44 r save
45 r set x 10
46 r bgsave
47 waitForBgsave r
48 r debug reload
49 r get x
50 } {10}
51
52 test {SELECT an out of range DB} {
53 catch {r select 1000000} err
54 set _ $err
55 } {*invalid*}
56
57 tags {consistency} {
58 if {![catch {package require sha1}]} {
59 if {$::accurate} {set numops 10000} else {set numops 1000}
60 test {Check consistency of different data types after a reload} {
61 r flushdb
62 createComplexDataset r $numops
63 set dump [csvdump r]
64 set sha1 [r debug digest]
65 r debug reload
66 set sha1_after [r debug digest]
67 if {$sha1 eq $sha1_after} {
68 set _ 1
69 } else {
70 set newdump [csvdump r]
71 puts "Consistency test failed!"
72 puts "You can inspect the two dumps in /tmp/repldump*.txt"
73
74 set fd [open /tmp/repldump1.txt w]
75 puts $fd $dump
76 close $fd
77 set fd [open /tmp/repldump2.txt w]
78 puts $fd $newdump
79 close $fd
80
81 set _ 0
82 }
83 } {1}
84
85 test {Same dataset digest if saving/reloading as AOF?} {
86 r bgrewriteaof
87 waitForBgrewriteaof r
88 r debug loadaof
89 set sha1_after [r debug digest]
90 if {$sha1 eq $sha1_after} {
91 set _ 1
92 } else {
93 set newdump [csvdump r]
94 puts "Consistency test failed!"
95 puts "You can inspect the two dumps in /tmp/aofdump*.txt"
96
97 set fd [open /tmp/aofdump1.txt w]
98 puts $fd $dump
99 close $fd
100 set fd [open /tmp/aofdump2.txt w]
101 puts $fd $newdump
102 close $fd
103
104 set _ 0
105 }
106 } {1}
107 }
108 }
109
110 test {EXPIRES after a reload (snapshot + append only file rewrite)} {
111 r flushdb
112 r set x 10
113 r expire x 1000
114 r save
115 r debug reload
116 set ttl [r ttl x]
117 set e1 [expr {$ttl > 900 && $ttl <= 1000}]
118 r bgrewriteaof
119 waitForBgrewriteaof r
120 r debug loadaof
121 set ttl [r ttl x]
122 set e2 [expr {$ttl > 900 && $ttl <= 1000}]
123 list $e1 $e2
124 } {1 1}
125
126 test {EXPIRES after AOF reload (without rewrite)} {
127 r flushdb
128 r config set appendonly yes
129 r set x somevalue
130 r expire x 1000
131 r setex y 2000 somevalue
132 r set z somevalue
133 r expireat z [expr {[clock seconds]+3000}]
134
135 # Milliseconds variants
136 r set px somevalue
137 r pexpire px 1000000
138 r psetex py 2000000 somevalue
139 r set pz somevalue
140 r pexpireat pz [expr {([clock seconds]+3000)*1000}]
141
142 # Reload and check
143 waitForBgrewriteaof r
144 # We need to wait two seconds to avoid false positives here, otherwise
145 # the DEBUG LOADAOF command may read a partial file.
146 # Another solution would be to set the fsync policy to no, since this
147 # prevents write() to be delayed by the completion of fsync().
148 after 2000
149 r debug loadaof
150 set ttl [r ttl x]
151 assert {$ttl > 900 && $ttl <= 1000}
152 set ttl [r ttl y]
153 assert {$ttl > 1900 && $ttl <= 2000}
154 set ttl [r ttl z]
155 assert {$ttl > 2900 && $ttl <= 3000}
156 set ttl [r ttl px]
157 assert {$ttl > 900 && $ttl <= 1000}
158 set ttl [r ttl py]
159 assert {$ttl > 1900 && $ttl <= 2000}
160 set ttl [r ttl pz]
161 assert {$ttl > 2900 && $ttl <= 3000}
162 r config set appendonly no
163 }
164
165 tags {protocol} {
166 test {PIPELINING stresser (also a regression for the old epoll bug)} {
167 set fd2 [socket $::host $::port]
168 fconfigure $fd2 -encoding binary -translation binary
169 puts -nonewline $fd2 "SELECT 9\r\n"
170 flush $fd2
171 gets $fd2
172
173 for {set i 0} {$i < 100000} {incr i} {
174 set q {}
175 set val "0000${i}0000"
176 append q "SET key:$i $val\r\n"
177 puts -nonewline $fd2 $q
178 set q {}
179 append q "GET key:$i\r\n"
180 puts -nonewline $fd2 $q
181 }
182 flush $fd2
183
184 for {set i 0} {$i < 100000} {incr i} {
185 gets $fd2 line
186 gets $fd2 count
187 set count [string range $count 1 end]
188 set val [read $fd2 $count]
189 read $fd2 2
190 }
191 close $fd2
192 set _ 1
193 } {1}
194 }
195
196 test {MUTLI / EXEC basics} {
197 r del mylist
198 r rpush mylist a
199 r rpush mylist b
200 r rpush mylist c
201 r multi
202 set v1 [r lrange mylist 0 -1]
203 set v2 [r ping]
204 set v3 [r exec]
205 list $v1 $v2 $v3
206 } {QUEUED QUEUED {{a b c} PONG}}
207
208 test {DISCARD} {
209 r del mylist
210 r rpush mylist a
211 r rpush mylist b
212 r rpush mylist c
213 r multi
214 set v1 [r del mylist]
215 set v2 [r discard]
216 set v3 [r lrange mylist 0 -1]
217 list $v1 $v2 $v3
218 } {QUEUED OK {a b c}}
219
220 test {Nested MULTI are not allowed} {
221 set err {}
222 r multi
223 catch {[r multi]} err
224 r exec
225 set _ $err
226 } {*ERR MULTI*}
227
228 test {MULTI where commands alter argc/argv} {
229 r sadd myset a
230 r multi
231 r spop myset
232 list [r exec] [r exists myset]
233 } {a 0}
234
235 test {WATCH inside MULTI is not allowed} {
236 set err {}
237 r multi
238 catch {[r watch x]} err
239 r exec
240 set _ $err
241 } {*ERR WATCH*}
242
243 test {APPEND basics} {
244 list [r append foo bar] [r get foo] \
245 [r append foo 100] [r get foo]
246 } {3 bar 6 bar100}
247
248 test {APPEND basics, integer encoded values} {
249 set res {}
250 r del foo
251 r append foo 1
252 r append foo 2
253 lappend res [r get foo]
254 r set foo 1
255 r append foo 2
256 lappend res [r get foo]
257 } {12 12}
258
259 test {APPEND fuzzing} {
260 set err {}
261 foreach type {binary alpha compr} {
262 set buf {}
263 r del x
264 for {set i 0} {$i < 1000} {incr i} {
265 set bin [randstring 0 10 $type]
266 append buf $bin
267 r append x $bin
268 }
269 if {$buf != [r get x]} {
270 set err "Expected '$buf' found '[r get x]'"
271 break
272 }
273 }
274 set _ $err
275 } {}
276
277 # Leave the user with a clean DB before to exit
278 test {FLUSHDB} {
279 set aux {}
280 r select 9
281 r flushdb
282 lappend aux [r dbsize]
283 r select 10
284 r flushdb
285 lappend aux [r dbsize]
286 } {0 0}
287
288 test {Perform a final SAVE to leave a clean DB on disk} {
289 waitForBgsave r
290 r save
291 } {OK}
292 }