]>
Commit | Line | Data |
---|---|---|
1 | #!/usr/bin/env ruby | |
2 | ||
3 | require 'rubygems' | |
4 | require 'redis' | |
5 | ||
6 | ClusterHashSlots = 4096 | |
7 | ||
8 | def xputs(s) | |
9 | printf s | |
10 | STDOUT.flush | |
11 | end | |
12 | ||
13 | class ClusterNode | |
14 | def initialize(addr) | |
15 | s = addr.split(":") | |
16 | if s.length != 2 | |
17 | puts "Invalid node name #{addr}" | |
18 | exit 1 | |
19 | end | |
20 | @r = nil | |
21 | @info = {} | |
22 | @info[:host] = s[0] | |
23 | @info[:port] = s[1] | |
24 | @info[:slots] = {} | |
25 | @dirty = false # True if we need to flush slots info into node. | |
26 | @friends = [] | |
27 | end | |
28 | ||
29 | def friends | |
30 | @friends | |
31 | end | |
32 | ||
33 | def slots | |
34 | @info[:slots] | |
35 | end | |
36 | ||
37 | def to_s | |
38 | "#{@info[:host]}:#{@info[:port]}" | |
39 | end | |
40 | ||
41 | def connect(o={}) | |
42 | return if @r | |
43 | xputs "Connecting to node #{self}: " | |
44 | begin | |
45 | @r = Redis.new(:host => @info[:host], :port => @info[:port]) | |
46 | @r.ping | |
47 | rescue | |
48 | puts "ERROR" | |
49 | puts "Sorry, can't connect to node #{self}" | |
50 | exit 1 if o[:abort] | |
51 | @r = nil | |
52 | end | |
53 | puts "OK" | |
54 | end | |
55 | ||
56 | def assert_cluster | |
57 | info = @r.info | |
58 | if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 | |
59 | puts "Error: Node #{self} is not configured as a cluster node." | |
60 | exit 1 | |
61 | end | |
62 | end | |
63 | ||
64 | def assert_empty | |
65 | if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || | |
66 | (@r.info['db0']) | |
67 | puts "Error: Node #{self} is not empty. Either the node already knows other nodes (check with nodes-info) or contains some key in database 0." | |
68 | exit 1 | |
69 | end | |
70 | end | |
71 | ||
72 | def load_info(o={}) | |
73 | self.connect | |
74 | nodes = @r.cluster("nodes").split("\n") | |
75 | nodes.each{|n| | |
76 | # name addr flags role ping_sent ping_recv link_status slots | |
77 | name,addr,flags,role,ping_sent,ping_recv,link_status,slots = n.split(" ") | |
78 | info = { | |
79 | :name => name, | |
80 | :addr => addr, | |
81 | :flags => flags.split(","), | |
82 | :role => role, | |
83 | :ping_sent => ping_sent.to_i, | |
84 | :ping_recv => ping_recv.to_i, | |
85 | :link_status => link_status | |
86 | } | |
87 | if info[:flags].index("myself") | |
88 | @info = @info.merge(info) | |
89 | @info[:slots] = {} | |
90 | slots.split(",").each{|s| | |
91 | if s.index("-") | |
92 | start,stop = s.split("-") | |
93 | self.add_slots((start.to_i)..(stop.to_i)) | |
94 | else | |
95 | self.add_slots((s.to_i)..(s.to_i)) | |
96 | end | |
97 | } | |
98 | @dirty = false | |
99 | @r.cluster("info").split("\n").each{|e| | |
100 | k,v=e.split(":") | |
101 | k = k.to_sym | |
102 | if k != :cluster_state | |
103 | @info[k] = v.to_i | |
104 | else | |
105 | @info[k] = v | |
106 | end | |
107 | } | |
108 | elsif o[:getfriends] | |
109 | @friends << info | |
110 | end | |
111 | } | |
112 | end | |
113 | ||
114 | def add_slots(slots) | |
115 | slots.each{|s| | |
116 | @info[:slots][s] = :new | |
117 | } | |
118 | @dirty = true | |
119 | end | |
120 | ||
121 | def flush_node_config | |
122 | return if !@dirty | |
123 | new = [] | |
124 | @info[:slots].each{|s,val| | |
125 | if val == :new | |
126 | new << s | |
127 | @info[:slots][s] = true | |
128 | end | |
129 | } | |
130 | @r.cluster("addslots",*new) | |
131 | @dirty = false | |
132 | end | |
133 | ||
134 | def info_string | |
135 | # We want to display the hash slots assigned to this node | |
136 | # as ranges, like in: "1-5,8-9,20-25,30" | |
137 | # | |
138 | # Note: this could be easily written without side effects, | |
139 | # we use 'slots' just to split the computation into steps. | |
140 | ||
141 | # First step: we want an increasing array of integers | |
142 | # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] | |
143 | slots = @info[:slots].keys.sort | |
144 | ||
145 | # As we want to aggregate adiacent slots we convert all the | |
146 | # slot integers into ranges (with just one element) | |
147 | # So we have something like [1..1,2..2, ... and so forth. | |
148 | slots.map!{|x| x..x} | |
149 | ||
150 | # Finally we group ranges with adiacent elements. | |
151 | slots = slots.reduce([]) {|a,b| | |
152 | if !a.empty? && b.first == (a[-1].last)+1 | |
153 | a[0..-2] + [(a[-1].first)..(b.last)] | |
154 | else | |
155 | a + [b] | |
156 | end | |
157 | } | |
158 | ||
159 | # Now our task is easy, we just convert ranges with just one | |
160 | # element into a number, and a real range into a start-end format. | |
161 | # Finally we join the array using the comma as separator. | |
162 | slots = slots.map{|x| | |
163 | x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" | |
164 | }.join(",") | |
165 | ||
166 | "#{self.to_s.ljust(25)} slots:#{slots}" | |
167 | end | |
168 | ||
169 | def info | |
170 | @info | |
171 | end | |
172 | ||
173 | def is_dirty? | |
174 | @dirty | |
175 | end | |
176 | ||
177 | def r | |
178 | @r | |
179 | end | |
180 | end | |
181 | ||
182 | class RedisTrib | |
183 | def initialize | |
184 | @nodes = [] | |
185 | end | |
186 | ||
187 | def check_arity(req_args, num_args) | |
188 | if ((req_args > 0 and num_args != req_args) || | |
189 | (req_args < 0 and num_args < req_args.abs)) | |
190 | puts "Wrong number of arguments for specified sub command" | |
191 | exit 1 | |
192 | end | |
193 | end | |
194 | ||
195 | def add_node(node) | |
196 | @nodes << node | |
197 | end | |
198 | ||
199 | def check_cluster | |
200 | puts "Performing Cluster Check (using node #{@nodes[0]})" | |
201 | show_nodes | |
202 | # Check if all the slots are covered | |
203 | slots = {} | |
204 | @nodes.each{|n| | |
205 | slots = slots.merge(n.slots) | |
206 | } | |
207 | if slots.length == 4096 | |
208 | puts "[OK] All 4096 slots covered." | |
209 | else | |
210 | puts "[ERR] Not all 4096 slots are covered by nodes." | |
211 | end | |
212 | end | |
213 | ||
214 | def alloc_slots | |
215 | slots_per_node = ClusterHashSlots/@nodes.length | |
216 | i = 0 | |
217 | @nodes.each{|n| | |
218 | first = i*slots_per_node | |
219 | last = first+slots_per_node-1 | |
220 | last = ClusterHashSlots-1 if i == @nodes.length-1 | |
221 | n.add_slots first..last | |
222 | i += 1 | |
223 | } | |
224 | end | |
225 | ||
226 | def flush_nodes_config | |
227 | @nodes.each{|n| | |
228 | n.flush_node_config | |
229 | } | |
230 | end | |
231 | ||
232 | def show_nodes | |
233 | @nodes.each{|n| | |
234 | puts n.info_string | |
235 | } | |
236 | end | |
237 | ||
238 | def join_cluster | |
239 | # We use a brute force approach to make sure the node will meet | |
240 | # each other, that is, sending CLUSTER MEET messages to all the nodes | |
241 | # about the very same node. | |
242 | # Thanks to gossip this information should propagate across all the | |
243 | # cluster in a matter of seconds. | |
244 | first = false | |
245 | @nodes.each{|n| | |
246 | if !first then first = n.info; next; end # Skip the first node | |
247 | n.r.cluster("meet",first[:host],first[:port]) | |
248 | } | |
249 | end | |
250 | ||
251 | def yes_or_die(msg) | |
252 | print "#{msg} (type 'yes' to accept): " | |
253 | STDOUT.flush | |
254 | if !(STDIN.gets.chomp.downcase == "yes") | |
255 | puts "Aborting..." | |
256 | exit 1 | |
257 | end | |
258 | end | |
259 | ||
260 | # redis-trib subcommands implementations | |
261 | ||
262 | def check_cluster_cmd | |
263 | node = ClusterNode.new(ARGV[1]) | |
264 | node.connect(:abort => true) | |
265 | node.assert_cluster | |
266 | node.load_info(:getfriends => true) | |
267 | add_node(node) | |
268 | node.friends.each{|f| | |
269 | fnode = ClusterNode.new(f[:addr]) | |
270 | fnode.connect() | |
271 | fnode.load_info() | |
272 | add_node(fnode) | |
273 | } | |
274 | check_cluster | |
275 | end | |
276 | ||
277 | def create_cluster_cmd | |
278 | puts "Creating cluster" | |
279 | ARGV[1..-1].each{|n| | |
280 | node = ClusterNode.new(n) | |
281 | node.connect(:abort => true) | |
282 | node.assert_cluster | |
283 | node.assert_empty | |
284 | add_node(node) | |
285 | } | |
286 | puts "Performing hash slots allocation on #{@nodes.length} nodes..." | |
287 | alloc_slots | |
288 | show_nodes | |
289 | yes_or_die "Can I set the above configuration?" | |
290 | flush_nodes_config | |
291 | puts "** Nodes configuration updated" | |
292 | puts "** Sending CLUSTER MEET messages to join the cluster" | |
293 | join_cluster | |
294 | check_cluster | |
295 | end | |
296 | end | |
297 | ||
298 | COMMANDS={ | |
299 | "create" => ["create_cluster_cmd", -2, "host1:port host2:port ... hostN:port"], | |
300 | "check" => ["check_cluster_cmd", 2, "host:port"] | |
301 | } | |
302 | ||
303 | # Sanity check | |
304 | if ARGV.length == 0 | |
305 | puts "Usage: redis-trib <command> <arguments ...>" | |
306 | puts | |
307 | COMMANDS.each{|k,v| | |
308 | puts " #{k.ljust(20)} #{v[2]}" | |
309 | } | |
310 | puts | |
311 | exit 1 | |
312 | end | |
313 | ||
314 | rt = RedisTrib.new | |
315 | cmd_spec = COMMANDS[ARGV[0].downcase] | |
316 | if !cmd_spec | |
317 | puts "Unknown redis-trib subcommand '#{ARGV[0]}'" | |
318 | exit 1 | |
319 | end | |
320 | rt.check_arity(cmd_spec[1],ARGV.length) | |
321 | ||
322 | # Dispatch | |
323 | rt.send(cmd_spec[0]) |