]> git.saurik.com Git - redis.git/blame_incremental - tests/unit/other.tcl
Query the archive to provide a complete KEYS list.
[redis.git] / tests / unit / other.tcl
... / ...
CommitLineData
1start_server {tags {"other"}} {
2 if {$::force_failure} {
3 # This is used just for test suite development purposes.
4 test {Failing test} {
5 format err
6 } {ok}
7 }
8
9 test {SAVE - make sure there are all the types as values} {
10 # Wait for a background saving in progress to terminate
11 waitForBgsave r
12 r lpush mysavelist hello
13 r lpush mysavelist world
14 r set myemptykey {}
15 r set mynormalkey {blablablba}
16 r zadd mytestzset 10 a
17 r zadd mytestzset 20 b
18 r zadd mytestzset 30 c
19 r save
20 } {OK}
21
22 tags {slow} {
23 if {$::accurate} {set iterations 10000} else {set iterations 1000}
24 foreach fuzztype {binary alpha compr} {
25 test "FUZZ stresser with data model $fuzztype" {
26 set err 0
27 for {set i 0} {$i < $iterations} {incr i} {
28 set fuzz [randstring 0 512 $fuzztype]
29 r set foo $fuzz
30 set got [r get foo]
31 if {$got ne $fuzz} {
32 set err [list $fuzz $got]
33 break
34 }
35 }
36 set _ $err
37 } {0}
38 }
39 }
40
41 test {BGSAVE} {
42 waitForBgsave r
43 r flushdb
44 r save
45 r set x 10
46 r bgsave
47 waitForBgsave r
48 r debug reload
49 r get x
50 } {10}
51
52 test {SELECT an out of range DB} {
53 catch {r select 1000000} err
54 set _ $err
55 } {*invalid*}
56
57 tags {consistency} {
58 if {![catch {package require sha1}]} {
59 if {$::accurate} {set numops 10000} else {set numops 1000}
60 test {Check consistency of different data types after a reload} {
61 r flushdb
62 createComplexDataset r $numops
63 set dump [csvdump r]
64 set sha1 [r debug digest]
65 r debug reload
66 set sha1_after [r debug digest]
67 if {$sha1 eq $sha1_after} {
68 set _ 1
69 } else {
70 set newdump [csvdump r]
71 puts "Consistency test failed!"
72 puts "You can inspect the two dumps in /tmp/repldump*.txt"
73
74 set fd [open /tmp/repldump1.txt w]
75 puts $fd $dump
76 close $fd
77 set fd [open /tmp/repldump2.txt w]
78 puts $fd $newdump
79 close $fd
80
81 set _ 0
82 }
83 } {1}
84
85 test {Same dataset digest if saving/reloading as AOF?} {
86 r bgrewriteaof
87 waitForBgrewriteaof r
88 r debug loadaof
89 set sha1_after [r debug digest]
90 if {$sha1 eq $sha1_after} {
91 set _ 1
92 } else {
93 set newdump [csvdump r]
94 puts "Consistency test failed!"
95 puts "You can inspect the two dumps in /tmp/aofdump*.txt"
96
97 set fd [open /tmp/aofdump1.txt w]
98 puts $fd $dump
99 close $fd
100 set fd [open /tmp/aofdump2.txt w]
101 puts $fd $newdump
102 close $fd
103
104 set _ 0
105 }
106 } {1}
107 }
108 }
109
110 test {EXPIRES after a reload (snapshot + append only file rewrite)} {
111 r flushdb
112 r set x 10
113 r expire x 1000
114 r save
115 r debug reload
116 set ttl [r ttl x]
117 set e1 [expr {$ttl > 900 && $ttl <= 1000}]
118 r bgrewriteaof
119 waitForBgrewriteaof r
120 r debug loadaof
121 set ttl [r ttl x]
122 set e2 [expr {$ttl > 900 && $ttl <= 1000}]
123 list $e1 $e2
124 } {1 1}
125
126 test {EXPIRES after AOF reload (without rewrite)} {
127 r flushdb
128 r config set appendonly yes
129 r set x somevalue
130 r expire x 1000
131 r setex y 2000 somevalue
132 r set z somevalue
133 r expireat z [expr {[clock seconds]+3000}]
134
135 # Milliseconds variants
136 r set px somevalue
137 r pexpire px 1000000
138 r psetex py 2000000 somevalue
139 r set pz somevalue
140 r pexpireat pz [expr {([clock seconds]+3000)*1000}]
141
142 # Reload and check
143 waitForBgrewriteaof r
144 # We need to wait two seconds to avoid false positives here, otherwise
145 # the DEBUG LOADAOF command may read a partial file.
146 # Another solution would be to set the fsync policy to no, since this
147 # prevents write() to be delayed by the completion of fsync().
148 after 2000
149 r debug loadaof
150 set ttl [r ttl x]
151 assert {$ttl > 900 && $ttl <= 1000}
152 set ttl [r ttl y]
153 assert {$ttl > 1900 && $ttl <= 2000}
154 set ttl [r ttl z]
155 assert {$ttl > 2900 && $ttl <= 3000}
156 set ttl [r ttl px]
157 assert {$ttl > 900 && $ttl <= 1000}
158 set ttl [r ttl py]
159 assert {$ttl > 1900 && $ttl <= 2000}
160 set ttl [r ttl pz]
161 assert {$ttl > 2900 && $ttl <= 3000}
162 r config set appendonly no
163 }
164
165 tags {protocol} {
166 test {PIPELINING stresser (also a regression for the old epoll bug)} {
167 set fd2 [socket $::host $::port]
168 fconfigure $fd2 -encoding binary -translation binary
169 puts -nonewline $fd2 "SELECT 9\r\n"
170 flush $fd2
171 gets $fd2
172
173 for {set i 0} {$i < 100000} {incr i} {
174 set q {}
175 set val "0000${i}0000"
176 append q "SET key:$i $val\r\n"
177 puts -nonewline $fd2 $q
178 set q {}
179 append q "GET key:$i\r\n"
180 puts -nonewline $fd2 $q
181 }
182 flush $fd2
183
184 for {set i 0} {$i < 100000} {incr i} {
185 gets $fd2 line
186 gets $fd2 count
187 set count [string range $count 1 end]
188 set val [read $fd2 $count]
189 read $fd2 2
190 }
191 close $fd2
192 set _ 1
193 } {1}
194 }
195
196 test {APPEND basics} {
197 list [r append foo bar] [r get foo] \
198 [r append foo 100] [r get foo]
199 } {3 bar 6 bar100}
200
201 test {APPEND basics, integer encoded values} {
202 set res {}
203 r del foo
204 r append foo 1
205 r append foo 2
206 lappend res [r get foo]
207 r set foo 1
208 r append foo 2
209 lappend res [r get foo]
210 } {12 12}
211
212 test {APPEND fuzzing} {
213 set err {}
214 foreach type {binary alpha compr} {
215 set buf {}
216 r del x
217 for {set i 0} {$i < 1000} {incr i} {
218 set bin [randstring 0 10 $type]
219 append buf $bin
220 r append x $bin
221 }
222 if {$buf != [r get x]} {
223 set err "Expected '$buf' found '[r get x]'"
224 break
225 }
226 }
227 set _ $err
228 } {}
229
230 # Leave the user with a clean DB before to exit
231 test {FLUSHDB} {
232 set aux {}
233 r select 9
234 r flushdb
235 lappend aux [r dbsize]
236 r select 10
237 r flushdb
238 lappend aux [r dbsize]
239 } {0 0}
240
241 test {Perform a final SAVE to leave a clean DB on disk} {
242 waitForBgsave r
243 r save
244 } {OK}
245}