#!/usr/bin/env ruby
+# TODO (temporary here, we'll move this into the Github issues once
+# redis-trib initial implementation is complted).
+#
+# - Make sure that if the rehashing fails in the middle redis-trib will try
+# to recover.
+# - When redis-trib performs a cluster check, if it detects a slot move in
+# progress it should prompt the user to continue the move from where it
+# stopped.
+# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop
+# while rehashing, and performing the best cleanup possible if the user
+# forces the quit.
+# - When doing "fix" set a global Fix to true, and prompt the user to
+# fix the problem if automatically fixable every time there is something
+# to fix. For instance:
+# 1) If there is a node that pretend to receive a slot, or to migrate a
+# slot, but has no entries in that slot, fix it.
+# 2) If there is a node having keys in slots that are not owned by it
+# fix this condiiton moving the entries in the same node.
+# 3) Perform more possibly slow tests about the state of the cluster.
+# 4) When aborted slot migration is detected, fix it.
+
require 'rubygems'
require 'redis'
exit 1
end
@r = nil
- @host = s[0]
- @port = s[1]
- @slots = {}
- @dirty = false
+ @info = {}
+ @info[:host] = s[0]
+ @info[:port] = s[1]
+ @info[:slots] = {}
+ @dirty = false # True if we need to flush slots info into node.
+ @friends = []
+ end
+
+ def friends
+ @friends
+ end
+
+ def slots
+ @info[:slots]
end
def to_s
- "#{@host}:#{@port}"
+ "#{@info[:host]}:#{@info[:port]}"
end
def connect(o={})
+ return if @r
xputs "Connecting to node #{self}: "
begin
- @r = Redis.new(:host => @host, :port => @port)
+ @r = Redis.new(:host => @info[:host], :port => @info[:port])
@r.ping
rescue
puts "ERROR"
end
end
+ def load_info(o={})
+ self.connect
+ nodes = @r.cluster("nodes").split("\n")
+ nodes.each{|n|
+ # name addr flags role ping_sent ping_recv link_status slots
+ split = n.split
+ name,addr,flags,role,ping_sent,ping_recv,link_status = split[0..6]
+ slots = split[7..-1]
+ info = {
+ :name => name,
+ :addr => addr,
+ :flags => flags.split(","),
+ :role => role,
+ :ping_sent => ping_sent.to_i,
+ :ping_recv => ping_recv.to_i,
+ :link_status => link_status
+ }
+ if info[:flags].index("myself")
+ @info = @info.merge(info)
+ @info[:slots] = {}
+ slots.each{|s|
+ if s[0..0] == '['
+ # Fixme: for now skipping migration entries
+ elsif s.index("-")
+ start,stop = s.split("-")
+ self.add_slots((start.to_i)..(stop.to_i))
+ else
+ self.add_slots((s.to_i)..(s.to_i))
+ end
+ } if slots
+ @dirty = false
+ @r.cluster("info").split("\n").each{|e|
+ k,v=e.split(":")
+ k = k.to_sym
+ v.chop!
+ if k != :cluster_state
+ @info[k] = v.to_i
+ else
+ @info[k] = v
+ end
+ }
+ elsif o[:getfriends]
+ @friends << info
+ end
+ }
+ end
+
def add_slots(slots)
slots.each{|s|
- @slots[s] = :new
+ @info[:slots][s] = :new
}
@dirty = true
end
def flush_node_config
return if !@dirty
new = []
- @slots.each{|s,val|
+ @info[:slots].each{|s,val|
if val == :new
new << s
- @slots[s] = true
+ @info[:slots][s] = true
end
}
@r.cluster("addslots",*new)
# First step: we want an increasing array of integers
# for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
- slots = @slots.keys.sort
+ slots = @info[:slots].keys.sort
# As we want to aggregate adiacent slots we convert all the
# slot integers into ranges (with just one element)
x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}"
}.join(",")
- "#{self.to_s.ljust(25)} slots:#{slots}"
+ "[#{@info[:cluster_state].upcase}] #{self.info[:name]} #{self.to_s} slots:#{slots} (#{self.slots.length} slots)"
end
def info
- {
- :host => @host,
- :port => @port,
- :slots => @slots,
- :dirty => @dirty
- }
+ @info
end
def is_dirty?
@nodes << node
end
- def create_cluster
- puts "Creating cluster"
- ARGV[1..-1].each{|n|
- node = ClusterNode.new(n)
- node.connect(:abort => true)
- node.assert_cluster
- node.assert_empty
- add_node(node)
+ def get_node_by_name(name)
+ @nodes.each{|n|
+ return n if n.info[:name] == name.downcase
}
- puts "Performing hash slots allocation on #{@nodes.length} nodes..."
- alloc_slots
- show_nodes
- yes_or_die "Can I set the above configuration?"
- flush_nodes_config
- puts "** Nodes configuration updated"
- puts "** Sending CLUSTER MEET messages to join the cluster"
- join_cluster
- check_cluster
+ return nil
end
def check_cluster
- puts "Performing Cluster Check (node #{ARGV[1]})"
- node = ClusterNode.new(ARGV[1])
- node.connect(:abort => true)
- node.assert_cluster
- node.add_slots(10..15)
- node.add_slots(30..30)
- node.add_slots(5..5)
- add_node(node)
+ puts "Performing Cluster Check (using node #{@nodes[0]})"
+ errors = []
show_nodes
+ # Check if all the slots are covered
+ slots = {}
+ @nodes.each{|n|
+ slots = slots.merge(n.slots)
+ }
+ if slots.length == 4096
+ puts "[OK] All 4096 slots covered."
+ else
+ errors << "[ERR] Not all 4096 slots are covered by nodes."
+ puts errors[-1]
+ end
+ return errors
end
def alloc_slots
exit 1
end
end
+
+ def load_cluster_info_from_node(nodeaddr)
+ node = ClusterNode.new(ARGV[1])
+ node.connect(:abort => true)
+ node.assert_cluster
+ node.load_info(:getfriends => true)
+ add_node(node)
+ node.friends.each{|f|
+ fnode = ClusterNode.new(f[:addr])
+ fnode.connect()
+ fnode.load_info()
+ add_node(fnode)
+ }
+ end
+
+ # Given a list of source nodes return a "resharding plan"
+ # with what slots to move in order to move "numslots" slots to another
+ # instance.
+ def compute_reshard_table(sources,numslots)
+ moved = []
+ # Sort from bigger to smaller instance, for two reasons:
+ # 1) If we take less slots than instanes it is better to start getting from
+ # the biggest instances.
+ # 2) We take one slot more from the first instance in the case of not perfect
+ # divisibility. Like we have 3 nodes and need to get 10 slots, we take
+ # 4 from the first, and 3 from the rest. So the biggest is always the first.
+ sources = sources.sort{|a,b| b.slots.length <=> a.slots.length}
+ source_tot_slots = sources.inject(0) {|sum,source| sum+source.slots.length}
+ sources.each_with_index{|s,i|
+ # Every node will provide a number of slots proportional to the
+ # slots it has assigned.
+ n = (numslots.to_f/source_tot_slots*s.slots.length)
+ if i == 0
+ n = n.ceil
+ else
+ n = n.floor
+ end
+ s.slots.keys.sort[(0...n)].each{|slot|
+ if moved.length < numslots
+ moved << {:source => s, :slot => slot}
+ end
+ }
+ }
+ return moved
+ end
+
+ def show_reshard_table(table)
+ table.each{|e|
+ puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}"
+ }
+ end
+
+ def move_slot(source,target,slot,o={})
+ # We start marking the slot as importing in the destination node,
+ # and the slot as migrating in the target host. Note that the order of
+ # the operations is important, as otherwise a client may be redirected to
+ # the target node that does not yet know it is importing this slot.
+ print "Moving slot #{slot} from #{source.info_string}: "; STDOUT.flush
+ target.r.cluster("setslot",slot,"importing",source.info[:name])
+ source.r.cluster("setslot",slot,"migrating",source.info[:name])
+ # Migrate all the keys from source to target using the MIGRATE command
+ while true
+ keys = source.r.cluster("getkeysinslot",slot,10)
+ break if keys.length == 0
+ keys.each{|key|
+ source.r.migrate(target.info[:host],target.info[:port],key,0,1000)
+ print "." if o[:verbose]
+ STDOUT.flush
+ }
+ end
+ puts
+ # Set the new node as the owner of the slot in all the known nodes.
+ @nodes.each{|n|
+ n.r.cluster("setslot",slot,"node",target.info[:name])
+ }
+ end
+
+ # redis-trib subcommands implementations
+
+ def check_cluster_cmd
+ load_cluster_info_from_node(ARGV[1])
+ check_cluster
+ end
+
+ def reshard_cluster_cmd
+ load_cluster_info_from_node(ARGV[1])
+ errors = check_cluster
+ if errors.length != 0
+ puts "Please fix your cluster problems before resharding."
+ exit 1
+ end
+ numslots = 0
+ while numslots <= 0 or numslots > 4096
+ print "How many slots do you want to move (from 1 to 4096)? "
+ numslots = STDIN.gets.to_i
+ end
+ target = nil
+ while not target
+ print "What is the receiving node ID? "
+ target = get_node_by_name(STDIN.gets.chop)
+ if not target
+ puts "The specified node is not known, please retry."
+ end
+ end
+ sources = []
+ puts "Please enter all the source node IDs."
+ puts " Type 'all' to use all the nodes as source nodes for the hash slots."
+ puts " Type 'done' once you entered all the source nodes IDs."
+ while true
+ print "Source node ##{sources.length+1}:"
+ line = STDIN.gets.chop
+ src = get_node_by_name(line)
+ if line == "done"
+ if sources.length == 0
+ puts "No source nodes given, operation aborted"
+ exit 1
+ else
+ break
+ end
+ elsif line == "all"
+ @nodes.each{|n|
+ next if n.info[:name] == target.info[:name]
+ sources << n
+ }
+ break
+ elsif not src
+ puts "The specified node is not known, please retry."
+ elsif src.info[:name] == target.info[:name]
+ puts "It is not possible to use the target node as source node."
+ else
+ sources << src
+ end
+ end
+ puts "\nReady to move #{numslots} slots."
+ puts " Source nodes:"
+ sources.each{|s| puts " "+s.info_string}
+ puts " Destination node:"
+ puts " #{target.info_string}"
+ reshard_table = compute_reshard_table(sources,numslots)
+ puts " Resharding plan:"
+ show_reshard_table(reshard_table)
+ print "Do you want to proceed with the proposed reshard plan (yes/no)? "
+ yesno = STDIN.gets.chop
+ exit(1) if (yesno != "yes")
+ reshard_table.each{|e|
+ move_slot(e[:source],target,e[:slot],:verbose=>true)
+ }
+ end
+
+ def create_cluster_cmd
+ puts "Creating cluster"
+ ARGV[1..-1].each{|n|
+ node = ClusterNode.new(n)
+ node.connect(:abort => true)
+ node.assert_cluster
+ node.load_info
+ node.assert_empty
+ add_node(node)
+ }
+ puts "Performing hash slots allocation on #{@nodes.length} nodes..."
+ alloc_slots
+ show_nodes
+ yes_or_die "Can I set the above configuration?"
+ flush_nodes_config
+ puts "** Nodes configuration updated"
+ puts "** Sending CLUSTER MEET messages to join the cluster"
+ join_cluster
+ check_cluster
+ end
end
COMMANDS={
- "create" => ["create_cluster", -2, "host1:port host2:port ... hostN:port"],
- "check" => ["check_cluster", 2, "host:port"]
+ "create" => ["create_cluster_cmd", -2, "host1:port host2:port ... hostN:port"],
+ "check" => ["check_cluster_cmd", 2, "host:port"],
+ "reshard" => ["reshard_cluster_cmd", 2, "host:port"]
}
# Sanity check