s -1 role
} {slave}
- test {MASTER and SLAVE dataset should be identical after complex ops} {
- createComplexDataset r 10000
- after 500
- if {[r debug digest] ne [r -1 debug digest]} {
- set csv1 [csvdump r]
- set csv2 [csvdump {r -1}]
- set fd [open /tmp/repldump1.txt w]
- puts -nonewline $fd $csv1
- close $fd
- set fd [open /tmp/repldump2.txt w]
- puts -nonewline $fd $csv2
- close $fd
- puts "Master - Slave inconsistency"
- puts "Run diff -u against /tmp/repldump*.txt for more info"
- }
+ test {BRPOPLPUSH replication, when blocking against empty list} {
+ set rd [redis_deferring_client]
+ $rd brpoplpush a b 5
+ r lpush a foo
+ after 1000
assert_equal [r debug digest] [r -1 debug digest]
}
- test {MASTER and SLAVE consistency with expire} {
- createComplexDataset r 50000 useexpire
- after 4000 ;# Make sure everything expired before taking the digest
- r keys * ;# Force DEL syntesizing to slave
- after 1000 ;# Wait another second. Now everything should be file.
- if {[r debug digest] ne [r -1 debug digest]} {
- set csv1 [csvdump r]
- set csv2 [csvdump {r -1}]
- set fd [open /tmp/repldump1.txt w]
- puts -nonewline $fd $csv1
- close $fd
- set fd [open /tmp/repldump2.txt w]
- puts -nonewline $fd $csv2
- close $fd
- puts "Master - Slave inconsistency"
- puts "Run diff -u against /tmp/repldump*.txt for more info"
- }
+ test {BRPOPLPUSH replication, list exists} {
+ set rd [redis_deferring_client]
+ r lpush c 1
+ r lpush c 2
+ r lpush c 3
+ $rd brpoplpush c d 5
+ after 1000
assert_equal [r debug digest] [r -1 debug digest]
}
}
test {SET on the master should immediately propagate} {
r -1 set mykey bar
+ if {$::valgrind} {after 2000}
r 0 get mykey
} {bar}
+
+ test {FLUSHALL should replicate} {
+ r -1 flushall
+ if {$::valgrind} {after 2000}
+ list [r -1 dbsize] [r 0 dbsize]
+ } {0 0}
+ }
+}
+
+proc start_write_load {host port seconds} {
+ exec tclsh8.5 tests/helpers/gen_write_load.tcl $host $port $seconds &
+}
+
+proc stop_write_load {handle} {
+ catch {exec /bin/kill -9 $handle}
+}
+
+start_server {tags {"repl"}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set slaves {}
+ set load_handle0 [start_write_load $master_host $master_port 20]
+ set load_handle1 [start_write_load $master_host $master_port 20]
+ set load_handle2 [start_write_load $master_host $master_port 20]
+ set load_handle3 [start_write_load $master_host $master_port 20]
+ set load_handle4 [start_write_load $master_host $master_port 20]
+ after 2000
+ start_server {} {
+ lappend slaves [srv 0 client]
+ start_server {} {
+ lappend slaves [srv 0 client]
+ start_server {} {
+ lappend slaves [srv 0 client]
+ test "Connect multiple slaves at the same time (issue #141)" {
+ [lindex $slaves 0] slaveof $master_host $master_port
+ [lindex $slaves 1] slaveof $master_host $master_port
+ [lindex $slaves 2] slaveof $master_host $master_port
+
+ # Wait for all the three slaves to reach the "online" state
+ set retry 100
+ while {$retry} {
+ set info [r -3 info]
+ if {[string match {*slave0:*,online*slave1:*,online*slave2:*,online*} $info]} {
+ break
+ } else {
+ incr retry -1
+ after 100
+ }
+ }
+ if {$retry == 0} {
+ error "assertion:Slaves not correctly synchronized"
+ }
+ stop_write_load $load_handle0
+ stop_write_load $load_handle1
+ stop_write_load $load_handle2
+ stop_write_load $load_handle3
+ stop_write_load $load_handle4
+ set retry 10
+ while {$retry && ([$master debug digest] ne [[lindex $slaves 0] debug digest])} {
+ after 1000
+ incr retry -1
+ }
+ set digest [$master debug digest]
+ set digest0 [[lindex $slaves 0] debug digest]
+ set digest1 [[lindex $slaves 1] debug digest]
+ set digest2 [[lindex $slaves 2] debug digest]
+ assert {$digest ne 0000000000000000000000000000000000000000}
+ assert {$digest eq $digest0}
+ assert {$digest eq $digest1}
+ assert {$digest eq $digest2}
+ #puts [$master dbsize]
+ #puts [[lindex $slaves 0] dbsize]
+ #puts [[lindex $slaves 1] dbsize]
+ #puts [[lindex $slaves 2] dbsize]
+ }
+ }
+ }
}
}