]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 1993-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * Timer interrupt callout module. | |
1c79356b A |
30 | */ |
31 | ||
32 | #include <mach/mach_types.h> | |
33 | ||
34 | #include <kern/clock.h> | |
9bccf70c | 35 | #include <kern/processor.h> |
2d21ac55 | 36 | #include <kern/etimer.h> |
1c79356b | 37 | #include <kern/timer_call.h> |
c910b4d9 | 38 | #include <kern/timer_queue.h> |
1c79356b A |
39 | #include <kern/call_entry.h> |
40 | ||
0c530ab8 A |
41 | #include <sys/kdebug.h> |
42 | ||
2d21ac55 A |
43 | #if CONFIG_DTRACE && (DEVELOPMENT || DEBUG ) |
44 | #include <mach/sdt.h> | |
45 | #endif | |
1c79356b | 46 | |
1c79356b | 47 | |
6d2010ae A |
48 | #if DEBUG |
49 | #define TIMER_ASSERT 1 | |
50 | #endif | |
51 | ||
52 | //#define TIMER_ASSERT 1 | |
53 | //#define TIMER_DBG 1 | |
54 | ||
55 | #if TIMER_DBG | |
56 | #define DBG(x...) kprintf("DBG: " x); | |
57 | #else | |
58 | #define DBG(x...) | |
59 | #endif | |
60 | ||
61 | lck_grp_t timer_call_lck_grp; | |
62 | lck_attr_t timer_call_lck_attr; | |
63 | lck_grp_attr_t timer_call_lck_grp_attr; | |
64 | ||
65 | ||
66 | #define timer_call_lock_spin(queue) \ | |
67 | lck_mtx_lock_spin_always(&queue->lock_data) | |
68 | ||
69 | #define timer_call_unlock(queue) \ | |
70 | lck_mtx_unlock_always(&queue->lock_data) | |
71 | ||
72 | ||
73 | #define QUEUE(x) ((queue_t)(x)) | |
74 | #define MPQUEUE(x) ((mpqueue_head_t *)(x)) | |
75 | #define TIMER_CALL(x) ((timer_call_t)(x)) | |
76 | ||
77 | static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags); | |
78 | boolean_t mach_timer_coalescing_enabled = TRUE; | |
79 | ||
80 | mpqueue_head_t *timer_call_enqueue_deadline_unlocked( | |
81 | timer_call_t call, | |
82 | mpqueue_head_t *queue, | |
83 | uint64_t deadline); | |
84 | ||
85 | mpqueue_head_t *timer_call_dequeue_unlocked( | |
86 | timer_call_t call); | |
87 | ||
1c79356b A |
88 | |
89 | void | |
90 | timer_call_initialize(void) | |
91 | { | |
6d2010ae A |
92 | lck_attr_setdefault(&timer_call_lck_attr); |
93 | lck_grp_attr_setdefault(&timer_call_lck_grp_attr); | |
94 | lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr); | |
1c79356b A |
95 | } |
96 | ||
6d2010ae A |
97 | |
98 | void | |
99 | timer_call_initialize_queue(mpqueue_head_t *queue) | |
100 | { | |
101 | DBG("timer_call_initialize_queue(%p)\n", queue); | |
102 | mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr); | |
103 | } | |
104 | ||
105 | ||
1c79356b A |
106 | void |
107 | timer_call_setup( | |
108 | timer_call_t call, | |
109 | timer_call_func_t func, | |
110 | timer_call_param_t param0) | |
111 | { | |
6d2010ae A |
112 | DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0); |
113 | call_entry_setup(CE(call), func, param0); | |
114 | simple_lock_init(&(call)->lock, 0); | |
115 | call->async_dequeue = FALSE; | |
1c79356b A |
116 | } |
117 | ||
6d2010ae A |
118 | /* |
119 | * Timer call entry locking model | |
120 | * ============================== | |
121 | * | |
122 | * Timer call entries are linked on per-cpu timer queues which are protected | |
123 | * by the queue lock and the call entry lock. The locking protocol is: | |
124 | * | |
125 | * 0) The canonical locking order is timer call entry followed by queue. | |
126 | * | |
127 | * 1) With only the entry lock held, entry.queue is valid: | |
128 | * 1a) NULL: the entry is not queued, or | |
129 | * 1b) non-NULL: this queue must be locked before the entry is modified. | |
130 | * After locking the queue, the call.async_dequeue flag must be checked: | |
131 | * 1c) TRUE: the entry was removed from the queue by another thread | |
132 | * and we must NULL the entry.queue and reset this flag, or | |
133 | * 1d) FALSE: (ie. queued), the entry can be manipulated. | |
134 | * | |
135 | * 2) If a queue lock is obtained first, the queue is stable: | |
136 | * 2a) If a try-lock of a queued entry succeeds, the call can be operated on | |
137 | * and dequeued. | |
138 | * 2b) If a try-lock fails, it indicates that another thread is attempting | |
139 | * to change the entry and move it to a different position in this queue | |
140 | * or to different queue. The entry can be dequeued but it should not be | |
141 | * operated upon since it is being changed. Furthermore, we don't null | |
142 | * the entry.queue pointer (protected by the entry lock we don't own). | |
143 | * Instead, we set the async_dequeue flag -- see (1c). | |
144 | */ | |
c910b4d9 | 145 | |
6d2010ae A |
146 | /* |
147 | * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline() | |
148 | * cast between pointer types (mpqueue_head_t *) and (queue_t) so that | |
149 | * we can use the call_entry_dequeue() and call_entry_enqueue_deadline() | |
150 | * methods to operate on timer_call structs as if they are call_entry structs. | |
151 | * These structures are identical except for their queue head pointer fields. | |
152 | * | |
153 | * In the debug case, we assert that the timer call locking protocol | |
154 | * is being obeyed. | |
155 | */ | |
156 | #if TIMER_ASSERT | |
157 | static __inline__ mpqueue_head_t * | |
158 | timer_call_entry_dequeue( | |
159 | timer_call_t entry) | |
160 | { | |
161 | mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); | |
162 | ||
163 | if (!hw_lock_held((hw_lock_t)&entry->lock)) | |
164 | panic("_call_entry_dequeue() " | |
165 | "entry %p is not locked\n", entry); | |
166 | /* | |
167 | * XXX The queue lock is actually a mutex in spin mode | |
168 | * but there's no way to test for it being held | |
169 | * so we pretend it's a spinlock! | |
170 | */ | |
171 | if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) | |
172 | panic("_call_entry_dequeue() " | |
173 | "queue %p is not locked\n", old_queue); | |
174 | ||
175 | call_entry_dequeue(CE(entry)); | |
c910b4d9 | 176 | |
6d2010ae A |
177 | return (old_queue); |
178 | } | |
1c79356b | 179 | |
6d2010ae A |
180 | static __inline__ mpqueue_head_t * |
181 | timer_call_entry_enqueue_deadline( | |
182 | timer_call_t entry, | |
183 | mpqueue_head_t *queue, | |
184 | uint64_t deadline) | |
185 | { | |
186 | mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); | |
1c79356b | 187 | |
6d2010ae A |
188 | if (!hw_lock_held((hw_lock_t)&entry->lock)) |
189 | panic("_call_entry_enqueue_deadline() " | |
190 | "entry %p is not locked\n", entry); | |
191 | /* XXX More lock pretense: */ | |
192 | if (!hw_lock_held((hw_lock_t)&queue->lock_data)) | |
193 | panic("_call_entry_enqueue_deadline() " | |
194 | "queue %p is not locked\n", queue); | |
195 | if (old_queue != NULL && old_queue != queue) | |
196 | panic("_call_entry_enqueue_deadline() " | |
197 | "old_queue %p != queue", old_queue); | |
1c79356b | 198 | |
6d2010ae | 199 | call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); |
1c79356b | 200 | |
6d2010ae A |
201 | return (old_queue); |
202 | } | |
1c79356b | 203 | |
6d2010ae | 204 | #else |
1c79356b | 205 | |
6d2010ae A |
206 | static __inline__ mpqueue_head_t * |
207 | timer_call_entry_dequeue( | |
208 | timer_call_t entry) | |
209 | { | |
210 | return MPQUEUE(call_entry_dequeue(CE(entry))); | |
211 | } | |
c910b4d9 | 212 | |
6d2010ae A |
213 | static __inline__ mpqueue_head_t * |
214 | timer_call_entry_enqueue_deadline( | |
215 | timer_call_t entry, | |
216 | mpqueue_head_t *queue, | |
217 | uint64_t deadline) | |
218 | { | |
219 | return MPQUEUE(call_entry_enqueue_deadline(CE(entry), | |
220 | QUEUE(queue), deadline)); | |
1c79356b A |
221 | } |
222 | ||
6d2010ae A |
223 | #endif |
224 | ||
225 | #if TIMER_ASSERT | |
226 | unsigned timer_call_enqueue_deadline_unlocked_async1; | |
227 | unsigned timer_call_enqueue_deadline_unlocked_async2; | |
228 | #endif | |
229 | /* | |
230 | * Assumes call_entry and queues unlocked, interrupts disabled. | |
231 | */ | |
232 | __inline__ mpqueue_head_t * | |
233 | timer_call_enqueue_deadline_unlocked( | |
234 | timer_call_t call, | |
235 | mpqueue_head_t *queue, | |
236 | uint64_t deadline) | |
1c79356b | 237 | { |
6d2010ae A |
238 | call_entry_t entry = CE(call); |
239 | mpqueue_head_t *old_queue; | |
1c79356b | 240 | |
6d2010ae | 241 | DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); |
1c79356b | 242 | |
6d2010ae A |
243 | simple_lock(&call->lock); |
244 | old_queue = MPQUEUE(entry->queue); | |
245 | if (old_queue != NULL) { | |
246 | timer_call_lock_spin(old_queue); | |
247 | if (call->async_dequeue) { | |
248 | /* collision (1c): null queue pointer and reset flag */ | |
249 | call->async_dequeue = FALSE; | |
250 | entry->queue = NULL; | |
251 | #if TIMER_ASSERT | |
252 | timer_call_enqueue_deadline_unlocked_async1++; | |
253 | #endif | |
254 | } else if (old_queue != queue) { | |
255 | (void)remque(qe(entry)); | |
256 | entry->queue = NULL; | |
257 | #if TIMER_ASSERT | |
258 | timer_call_enqueue_deadline_unlocked_async2++; | |
259 | #endif | |
260 | } | |
261 | if (old_queue != queue) { | |
262 | timer_call_unlock(old_queue); | |
263 | timer_call_lock_spin(queue); | |
264 | } | |
265 | } else { | |
266 | timer_call_lock_spin(queue); | |
267 | } | |
1c79356b | 268 | |
6d2010ae A |
269 | timer_call_entry_enqueue_deadline(call, queue, deadline); |
270 | timer_call_unlock(queue); | |
271 | simple_unlock(&call->lock); | |
1c79356b | 272 | |
c910b4d9 A |
273 | return (old_queue); |
274 | } | |
1c79356b | 275 | |
6d2010ae A |
276 | #if TIMER_ASSERT |
277 | unsigned timer_call_dequeue_unlocked_async1; | |
278 | unsigned timer_call_dequeue_unlocked_async2; | |
279 | #endif | |
280 | mpqueue_head_t * | |
281 | timer_call_dequeue_unlocked( | |
282 | timer_call_t call) | |
c910b4d9 | 283 | { |
6d2010ae A |
284 | call_entry_t entry = CE(call); |
285 | mpqueue_head_t *old_queue; | |
1c79356b | 286 | |
6d2010ae | 287 | DBG("timer_call_dequeue_unlocked(%p)\n", call); |
1c79356b | 288 | |
6d2010ae A |
289 | simple_lock(&call->lock); |
290 | old_queue = MPQUEUE(entry->queue); | |
291 | if (old_queue != NULL) { | |
292 | timer_call_lock_spin(old_queue); | |
293 | if (call->async_dequeue) { | |
294 | /* collision (1c): null queue pointer and reset flag */ | |
295 | call->async_dequeue = FALSE; | |
296 | #if TIMER_ASSERT | |
297 | timer_call_dequeue_unlocked_async1++; | |
298 | #endif | |
299 | } else { | |
300 | (void)remque(qe(entry)); | |
301 | #if TIMER_ASSERT | |
302 | timer_call_dequeue_unlocked_async2++; | |
303 | #endif | |
304 | } | |
305 | entry->queue = NULL; | |
306 | timer_call_unlock(old_queue); | |
307 | } | |
308 | simple_unlock(&call->lock); | |
c910b4d9 | 309 | return (old_queue); |
1c79356b A |
310 | } |
311 | ||
6d2010ae A |
312 | static boolean_t |
313 | timer_call_enter_internal( | |
314 | timer_call_t call, | |
315 | timer_call_param_t param1, | |
316 | uint64_t deadline, | |
317 | uint32_t flags) | |
1c79356b | 318 | { |
6d2010ae A |
319 | mpqueue_head_t *queue; |
320 | mpqueue_head_t *old_queue; | |
1c79356b | 321 | spl_t s; |
6d2010ae | 322 | uint64_t slop = 0; |
1c79356b A |
323 | |
324 | s = splclock(); | |
6d2010ae A |
325 | |
326 | call->soft_deadline = deadline; | |
327 | call->flags = flags; | |
328 | ||
329 | if ((flags & TIMER_CALL_CRITICAL) == 0 && | |
330 | mach_timer_coalescing_enabled) { | |
331 | slop = timer_call_slop(deadline); | |
332 | deadline += slop; | |
333 | } | |
1c79356b | 334 | |
c910b4d9 | 335 | queue = timer_queue_assign(deadline); |
1c79356b | 336 | |
6d2010ae | 337 | old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline); |
1c79356b | 338 | |
6d2010ae | 339 | CE(call)->param1 = param1; |
1c79356b | 340 | |
1c79356b A |
341 | splx(s); |
342 | ||
c910b4d9 | 343 | return (old_queue != NULL); |
1c79356b A |
344 | } |
345 | ||
6d2010ae A |
346 | boolean_t |
347 | timer_call_enter( | |
348 | timer_call_t call, | |
349 | uint64_t deadline, | |
350 | uint32_t flags) | |
351 | { | |
352 | return timer_call_enter_internal(call, NULL, deadline, flags); | |
353 | } | |
354 | ||
1c79356b | 355 | boolean_t |
c910b4d9 A |
356 | timer_call_enter1( |
357 | timer_call_t call, | |
358 | timer_call_param_t param1, | |
6d2010ae A |
359 | uint64_t deadline, |
360 | uint32_t flags) | |
1c79356b | 361 | { |
6d2010ae | 362 | return timer_call_enter_internal(call, param1, deadline, flags); |
1c79356b A |
363 | } |
364 | ||
365 | boolean_t | |
c910b4d9 A |
366 | timer_call_cancel( |
367 | timer_call_t call) | |
1c79356b | 368 | { |
6d2010ae | 369 | mpqueue_head_t *old_queue; |
1c79356b A |
370 | spl_t s; |
371 | ||
372 | s = splclock(); | |
1c79356b | 373 | |
6d2010ae | 374 | old_queue = timer_call_dequeue_unlocked(call); |
c910b4d9 A |
375 | |
376 | if (old_queue != NULL) { | |
6d2010ae A |
377 | timer_call_lock_spin(old_queue); |
378 | if (!queue_empty(&old_queue->head)) | |
379 | timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); | |
c910b4d9 | 380 | else |
6d2010ae A |
381 | timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX); |
382 | timer_call_unlock(old_queue); | |
1c79356b | 383 | } |
1c79356b A |
384 | splx(s); |
385 | ||
c910b4d9 | 386 | return (old_queue != NULL); |
1c79356b A |
387 | } |
388 | ||
6d2010ae | 389 | uint32_t timer_queue_shutdown_lock_skips; |
9bccf70c | 390 | void |
c910b4d9 | 391 | timer_queue_shutdown( |
6d2010ae | 392 | mpqueue_head_t *queue) |
9bccf70c | 393 | { |
6d2010ae A |
394 | timer_call_t call; |
395 | mpqueue_head_t *new_queue; | |
c910b4d9 | 396 | spl_t s; |
9bccf70c | 397 | |
6d2010ae A |
398 | DBG("timer_queue_shutdown(%p)\n", queue); |
399 | ||
c910b4d9 | 400 | s = splclock(); |
9bccf70c | 401 | |
6d2010ae A |
402 | /* Note comma operator in while expression re-locking each iteration */ |
403 | while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) { | |
404 | call = TIMER_CALL(queue_first(&queue->head)); | |
405 | if (!simple_lock_try(&call->lock)) { | |
406 | /* | |
407 | * case (2b) lock order inversion, dequeue and skip | |
408 | * Don't change the call_entry queue back-pointer | |
409 | * but set the async_dequeue field. | |
410 | */ | |
411 | timer_queue_shutdown_lock_skips++; | |
412 | (void) remque(qe(call)); | |
413 | call->async_dequeue = TRUE; | |
414 | timer_call_unlock(queue); | |
415 | continue; | |
416 | } | |
9bccf70c | 417 | |
6d2010ae A |
418 | /* remove entry from old queue */ |
419 | timer_call_entry_dequeue(call); | |
420 | timer_call_unlock(queue); | |
9bccf70c | 421 | |
6d2010ae A |
422 | /* and queue it on new */ |
423 | new_queue = timer_queue_assign(CE(call)->deadline); | |
424 | timer_call_lock_spin(new_queue); | |
425 | timer_call_entry_enqueue_deadline( | |
426 | call, new_queue, CE(call)->deadline); | |
427 | timer_call_unlock(new_queue); | |
9bccf70c | 428 | |
6d2010ae | 429 | simple_unlock(&call->lock); |
9bccf70c A |
430 | } |
431 | ||
6d2010ae | 432 | timer_call_unlock(queue); |
c910b4d9 | 433 | splx(s); |
9bccf70c A |
434 | } |
435 | ||
6d2010ae | 436 | uint32_t timer_queue_expire_lock_skips; |
c910b4d9 A |
437 | uint64_t |
438 | timer_queue_expire( | |
6d2010ae | 439 | mpqueue_head_t *queue, |
c910b4d9 | 440 | uint64_t deadline) |
1c79356b | 441 | { |
c910b4d9 | 442 | timer_call_t call; |
1c79356b | 443 | |
6d2010ae A |
444 | DBG("timer_queue_expire(%p,)\n", queue); |
445 | ||
446 | timer_call_lock_spin(queue); | |
1c79356b | 447 | |
6d2010ae A |
448 | while (!queue_empty(&queue->head)) { |
449 | call = TIMER_CALL(queue_first(&queue->head)); | |
1c79356b | 450 | |
6d2010ae | 451 | if (call->soft_deadline <= deadline) { |
1c79356b A |
452 | timer_call_func_t func; |
453 | timer_call_param_t param0, param1; | |
454 | ||
6d2010ae A |
455 | if (!simple_lock_try(&call->lock)) { |
456 | /* case (2b) lock inversion, dequeue and skip */ | |
457 | timer_queue_expire_lock_skips++; | |
458 | (void) remque(qe(call)); | |
459 | call->async_dequeue = TRUE; | |
460 | continue; | |
461 | } | |
462 | ||
463 | timer_call_entry_dequeue(call); | |
1c79356b | 464 | |
6d2010ae A |
465 | func = CE(call)->func; |
466 | param0 = CE(call)->param0; | |
467 | param1 = CE(call)->param1; | |
1c79356b | 468 | |
6d2010ae A |
469 | simple_unlock(&call->lock); |
470 | timer_call_unlock(queue); | |
1c79356b | 471 | |
6d2010ae | 472 | KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_START, |
b0d623f7 A |
473 | func, |
474 | param0, | |
475 | param1, 0, 0); | |
2d21ac55 A |
476 | |
477 | #if CONFIG_DTRACE && (DEVELOPMENT || DEBUG ) | |
478 | DTRACE_TMR3(callout__start, timer_call_func_t, func, | |
479 | timer_call_param_t, param0, | |
480 | timer_call_param_t, param1); | |
481 | #endif | |
0c530ab8 | 482 | |
1c79356b A |
483 | (*func)(param0, param1); |
484 | ||
2d21ac55 A |
485 | #if CONFIG_DTRACE && (DEVELOPMENT || DEBUG ) |
486 | DTRACE_TMR3(callout__end, timer_call_func_t, func, | |
487 | timer_call_param_t, param0, | |
488 | timer_call_param_t, param1); | |
489 | #endif | |
490 | ||
6d2010ae | 491 | KERNEL_DEBUG_CONSTANT(DECR_TIMER_CALLOUT | DBG_FUNC_END, |
b0d623f7 A |
492 | func, |
493 | param0, | |
494 | param1, 0, 0); | |
0c530ab8 | 495 | |
6d2010ae | 496 | timer_call_lock_spin(queue); |
c910b4d9 A |
497 | } |
498 | else | |
1c79356b | 499 | break; |
1c79356b A |
500 | } |
501 | ||
6d2010ae A |
502 | if (!queue_empty(&queue->head)) |
503 | deadline = CE(call)->deadline; | |
c910b4d9 A |
504 | else |
505 | deadline = UINT64_MAX; | |
1c79356b | 506 | |
6d2010ae | 507 | timer_call_unlock(queue); |
c910b4d9 A |
508 | |
509 | return (deadline); | |
1c79356b | 510 | } |
6d2010ae A |
511 | |
512 | ||
513 | extern int serverperfmode; | |
514 | uint32_t timer_queue_migrate_lock_skips; | |
515 | /* | |
516 | * timer_queue_migrate() is called by etimer_queue_migrate() | |
517 | * to move timer requests from the local processor (queue_from) | |
518 | * to a target processor's (queue_to). | |
519 | */ | |
520 | int | |
521 | timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) | |
522 | { | |
523 | timer_call_t call; | |
524 | timer_call_t head_to; | |
525 | int timers_migrated = 0; | |
526 | ||
527 | DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to); | |
528 | ||
529 | assert(!ml_get_interrupts_enabled()); | |
530 | assert(queue_from != queue_to); | |
531 | ||
532 | if (serverperfmode) { | |
533 | /* | |
534 | * if we're running a high end server | |
535 | * avoid migrations... they add latency | |
536 | * and don't save us power under typical | |
537 | * server workloads | |
538 | */ | |
539 | return -4; | |
540 | } | |
541 | ||
542 | /* | |
543 | * Take both local (from) and target (to) timer queue locks while | |
544 | * moving the timers from the local queue to the target processor. | |
545 | * We assume that the target is always the boot processor. | |
546 | * But only move if all of the following is true: | |
547 | * - the target queue is non-empty | |
548 | * - the local queue is non-empty | |
549 | * - the local queue's first deadline is later than the target's | |
550 | * - the local queue contains no non-migrateable "local" call | |
551 | * so that we need not have the target resync. | |
552 | */ | |
553 | ||
554 | timer_call_lock_spin(queue_to); | |
555 | ||
556 | head_to = TIMER_CALL(queue_first(&queue_to->head)); | |
557 | if (queue_empty(&queue_to->head)) { | |
558 | timers_migrated = -1; | |
559 | goto abort1; | |
560 | } | |
561 | ||
562 | timer_call_lock_spin(queue_from); | |
563 | ||
564 | if (queue_empty(&queue_from->head)) { | |
565 | timers_migrated = -2; | |
566 | goto abort2; | |
567 | } | |
568 | ||
569 | call = TIMER_CALL(queue_first(&queue_from->head)); | |
570 | if (CE(call)->deadline < CE(head_to)->deadline) { | |
571 | timers_migrated = 0; | |
572 | goto abort2; | |
573 | } | |
574 | ||
575 | /* perform scan for non-migratable timers */ | |
576 | do { | |
577 | if (call->flags & TIMER_CALL_LOCAL) { | |
578 | timers_migrated = -3; | |
579 | goto abort2; | |
580 | } | |
581 | call = TIMER_CALL(queue_next(qe(call))); | |
582 | } while (!queue_end(&queue_from->head, qe(call))); | |
583 | ||
584 | /* migration loop itself -- both queues are locked */ | |
585 | while (!queue_empty(&queue_from->head)) { | |
586 | call = TIMER_CALL(queue_first(&queue_from->head)); | |
587 | if (!simple_lock_try(&call->lock)) { | |
588 | /* case (2b) lock order inversion, dequeue only */ | |
589 | timer_queue_migrate_lock_skips++; | |
590 | (void) remque(qe(call)); | |
591 | call->async_dequeue = TRUE; | |
592 | continue; | |
593 | } | |
594 | timer_call_entry_dequeue(call); | |
595 | timer_call_entry_enqueue_deadline( | |
596 | call, queue_to, CE(call)->deadline); | |
597 | timers_migrated++; | |
598 | simple_unlock(&call->lock); | |
599 | } | |
600 | ||
601 | abort2: | |
602 | timer_call_unlock(queue_from); | |
603 | abort1: | |
604 | timer_call_unlock(queue_to); | |
605 | ||
606 | return timers_migrated; | |
607 | } |