]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/timer_call.c
5a17e057c3648e1fc028441bd52d4d889631e90b
[apple/xnu.git] / osfmk / kern / timer_call.c
1 /*
2 * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Timer interrupt callout module.
30 */
31
32 #include <mach/mach_types.h>
33
34 #include <kern/clock.h>
35 #include <kern/processor.h>
36 #include <kern/etimer.h>
37 #include <kern/timer_call.h>
38 #include <kern/timer_queue.h>
39 #include <kern/call_entry.h>
40
41 #include <sys/kdebug.h>
42
43 #if CONFIG_DTRACE
44 #include <mach/sdt.h>
45 #endif
46
47
48 #if DEBUG
49 #define TIMER_ASSERT 1
50 #endif
51
52 //#define TIMER_ASSERT 1
53 //#define TIMER_DBG 1
54
55 #if TIMER_DBG
56 #define DBG(x...) kprintf("DBG: " x);
57 #else
58 #define DBG(x...)
59 #endif
60
61 lck_grp_t timer_call_lck_grp;
62 lck_attr_t timer_call_lck_attr;
63 lck_grp_attr_t timer_call_lck_grp_attr;
64
65
66 #define timer_call_lock_spin(queue) \
67 lck_mtx_lock_spin_always(&queue->lock_data)
68
69 #define timer_call_unlock(queue) \
70 lck_mtx_unlock_always(&queue->lock_data)
71
72
73 #define QUEUE(x) ((queue_t)(x))
74 #define MPQUEUE(x) ((mpqueue_head_t *)(x))
75 #define TIMER_CALL(x) ((timer_call_t)(x))
76
77
78 uint64_t past_deadline_timers;
79 uint64_t past_deadline_deltas;
80 uint64_t past_deadline_longest;
81 uint64_t past_deadline_shortest = ~0ULL;
82 enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
83
84 uint64_t past_deadline_timer_adjustment;
85
86 static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags);
87 boolean_t mach_timer_coalescing_enabled = TRUE;
88
89 mpqueue_head_t *timer_call_enqueue_deadline_unlocked(
90 timer_call_t call,
91 mpqueue_head_t *queue,
92 uint64_t deadline);
93
94 mpqueue_head_t *timer_call_dequeue_unlocked(
95 timer_call_t call);
96
97
98 void
99 timer_call_initialize(void)
100 {
101 lck_attr_setdefault(&timer_call_lck_attr);
102 lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
103 lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
104 nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
105 }
106
107
108 void
109 timer_call_initialize_queue(mpqueue_head_t *queue)
110 {
111 DBG("timer_call_initialize_queue(%p)\n", queue);
112 mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr);
113 }
114
115
116 void
117 timer_call_setup(
118 timer_call_t call,
119 timer_call_func_t func,
120 timer_call_param_t param0)
121 {
122 DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
123 call_entry_setup(CE(call), func, param0);
124 simple_lock_init(&(call)->lock, 0);
125 call->async_dequeue = FALSE;
126 }
127
128 /*
129 * Timer call entry locking model
130 * ==============================
131 *
132 * Timer call entries are linked on per-cpu timer queues which are protected
133 * by the queue lock and the call entry lock. The locking protocol is:
134 *
135 * 0) The canonical locking order is timer call entry followed by queue.
136 *
137 * 1) With only the entry lock held, entry.queue is valid:
138 * 1a) NULL: the entry is not queued, or
139 * 1b) non-NULL: this queue must be locked before the entry is modified.
140 * After locking the queue, the call.async_dequeue flag must be checked:
141 * 1c) TRUE: the entry was removed from the queue by another thread
142 * and we must NULL the entry.queue and reset this flag, or
143 * 1d) FALSE: (ie. queued), the entry can be manipulated.
144 *
145 * 2) If a queue lock is obtained first, the queue is stable:
146 * 2a) If a try-lock of a queued entry succeeds, the call can be operated on
147 * and dequeued.
148 * 2b) If a try-lock fails, it indicates that another thread is attempting
149 * to change the entry and move it to a different position in this queue
150 * or to different queue. The entry can be dequeued but it should not be
151 * operated upon since it is being changed. Furthermore, we don't null
152 * the entry.queue pointer (protected by the entry lock we don't own).
153 * Instead, we set the async_dequeue flag -- see (1c).
154 */
155
156 /*
157 * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
158 * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
159 * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
160 * methods to operate on timer_call structs as if they are call_entry structs.
161 * These structures are identical except for their queue head pointer fields.
162 *
163 * In the debug case, we assert that the timer call locking protocol
164 * is being obeyed.
165 */
166 #if TIMER_ASSERT
167 static __inline__ mpqueue_head_t *
168 timer_call_entry_dequeue(
169 timer_call_t entry)
170 {
171 mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue);
172
173 if (!hw_lock_held((hw_lock_t)&entry->lock))
174 panic("_call_entry_dequeue() "
175 "entry %p is not locked\n", entry);
176 /*
177 * XXX The queue lock is actually a mutex in spin mode
178 * but there's no way to test for it being held
179 * so we pretend it's a spinlock!
180 */
181 if (!hw_lock_held((hw_lock_t)&old_queue->lock_data))
182 panic("_call_entry_dequeue() "
183 "queue %p is not locked\n", old_queue);
184
185 call_entry_dequeue(CE(entry));
186
187 return (old_queue);
188 }
189
190 static __inline__ mpqueue_head_t *
191 timer_call_entry_enqueue_deadline(
192 timer_call_t entry,
193 mpqueue_head_t *queue,
194 uint64_t deadline)
195 {
196 mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue);
197
198 if (!hw_lock_held((hw_lock_t)&entry->lock))
199 panic("_call_entry_enqueue_deadline() "
200 "entry %p is not locked\n", entry);
201 /* XXX More lock pretense: */
202 if (!hw_lock_held((hw_lock_t)&queue->lock_data))
203 panic("_call_entry_enqueue_deadline() "
204 "queue %p is not locked\n", queue);
205 if (old_queue != NULL && old_queue != queue)
206 panic("_call_entry_enqueue_deadline() "
207 "old_queue %p != queue", old_queue);
208
209 call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);
210
211 return (old_queue);
212 }
213
214 #else
215
216 static __inline__ mpqueue_head_t *
217 timer_call_entry_dequeue(
218 timer_call_t entry)
219 {
220 return MPQUEUE(call_entry_dequeue(CE(entry)));
221 }
222
223 static __inline__ mpqueue_head_t *
224 timer_call_entry_enqueue_deadline(
225 timer_call_t entry,
226 mpqueue_head_t *queue,
227 uint64_t deadline)
228 {
229 return MPQUEUE(call_entry_enqueue_deadline(CE(entry),
230 QUEUE(queue), deadline));
231 }
232
233 #endif
234
235 #if TIMER_ASSERT
236 unsigned timer_call_enqueue_deadline_unlocked_async1;
237 unsigned timer_call_enqueue_deadline_unlocked_async2;
238 #endif
239 /*
240 * Assumes call_entry and queues unlocked, interrupts disabled.
241 */
242 __inline__ mpqueue_head_t *
243 timer_call_enqueue_deadline_unlocked(
244 timer_call_t call,
245 mpqueue_head_t *queue,
246 uint64_t deadline)
247 {
248 call_entry_t entry = CE(call);
249 mpqueue_head_t *old_queue;
250
251 DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
252
253 simple_lock(&call->lock);
254 old_queue = MPQUEUE(entry->queue);
255 if (old_queue != NULL) {
256 timer_call_lock_spin(old_queue);
257 if (call->async_dequeue) {
258 /* collision (1c): null queue pointer and reset flag */
259 call->async_dequeue = FALSE;
260 entry->queue = NULL;
261 #if TIMER_ASSERT
262 timer_call_enqueue_deadline_unlocked_async1++;
263 #endif
264 } else if (old_queue != queue) {
265 (void)remque(qe(entry));
266 entry->queue = NULL;
267 #if TIMER_ASSERT
268 timer_call_enqueue_deadline_unlocked_async2++;
269 #endif
270 }
271 if (old_queue != queue) {
272 timer_call_unlock(old_queue);
273 timer_call_lock_spin(queue);
274 }
275 } else {
276 timer_call_lock_spin(queue);
277 }
278
279 timer_call_entry_enqueue_deadline(call, queue, deadline);
280 timer_call_unlock(queue);
281 simple_unlock(&call->lock);
282
283 return (old_queue);
284 }
285
286 #if TIMER_ASSERT
287 unsigned timer_call_dequeue_unlocked_async1;
288 unsigned timer_call_dequeue_unlocked_async2;
289 #endif
290 mpqueue_head_t *
291 timer_call_dequeue_unlocked(
292 timer_call_t call)
293 {
294 call_entry_t entry = CE(call);
295 mpqueue_head_t *old_queue;
296
297 DBG("timer_call_dequeue_unlocked(%p)\n", call);
298
299 simple_lock(&call->lock);
300 old_queue = MPQUEUE(entry->queue);
301 if (old_queue != NULL) {
302 timer_call_lock_spin(old_queue);
303 if (call->async_dequeue) {
304 /* collision (1c): null queue pointer and reset flag */
305 call->async_dequeue = FALSE;
306 #if TIMER_ASSERT
307 timer_call_dequeue_unlocked_async1++;
308 #endif
309 } else {
310 (void)remque(qe(entry));
311 #if TIMER_ASSERT
312 timer_call_dequeue_unlocked_async2++;
313 #endif
314 }
315 entry->queue = NULL;
316 timer_call_unlock(old_queue);
317 }
318 simple_unlock(&call->lock);
319 return (old_queue);
320 }
321
322 static boolean_t
323 timer_call_enter_internal(
324 timer_call_t call,
325 timer_call_param_t param1,
326 uint64_t deadline,
327 uint32_t flags)
328 {
329 mpqueue_head_t *queue;
330 mpqueue_head_t *old_queue;
331 spl_t s;
332 uint64_t slop = 0;
333
334 s = splclock();
335
336 call->soft_deadline = deadline;
337 call->flags = flags;
338
339 if ((flags & TIMER_CALL_CRITICAL) == 0 &&
340 mach_timer_coalescing_enabled) {
341 slop = timer_call_slop(deadline);
342 deadline += slop;
343 }
344
345 #if defined(__i386__) || defined(__x86_64__)
346 uint64_t ctime = mach_absolute_time();
347 if (__improbable(deadline < ctime)) {
348 uint64_t delta = (ctime - deadline);
349
350 past_deadline_timers++;
351 past_deadline_deltas += delta;
352 if (delta > past_deadline_longest)
353 past_deadline_longest = deadline;
354 if (delta < past_deadline_shortest)
355 past_deadline_shortest = delta;
356
357 deadline = ctime + past_deadline_timer_adjustment;
358 call->soft_deadline = deadline;
359 }
360 #endif
361 call->ttd = call->soft_deadline - ctime;
362
363 #if CONFIG_DTRACE
364 DTRACE_TMR6(callout__create, timer_call_func_t, CE(call)->func,
365 timer_call_param_t, CE(call)->param0, uint32_t, call->flags,
366 (deadline - call->soft_deadline),
367 (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
368 #endif
369
370 queue = timer_queue_assign(deadline);
371
372 old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
373
374 CE(call)->param1 = param1;
375
376 splx(s);
377
378 return (old_queue != NULL);
379 }
380
381 boolean_t
382 timer_call_enter(
383 timer_call_t call,
384 uint64_t deadline,
385 uint32_t flags)
386 {
387 return timer_call_enter_internal(call, NULL, deadline, flags);
388 }
389
390 boolean_t
391 timer_call_enter1(
392 timer_call_t call,
393 timer_call_param_t param1,
394 uint64_t deadline,
395 uint32_t flags)
396 {
397 return timer_call_enter_internal(call, param1, deadline, flags);
398 }
399
400 boolean_t
401 timer_call_cancel(
402 timer_call_t call)
403 {
404 mpqueue_head_t *old_queue;
405 spl_t s;
406
407 s = splclock();
408
409 old_queue = timer_call_dequeue_unlocked(call);
410
411 if (old_queue != NULL) {
412 timer_call_lock_spin(old_queue);
413 if (!queue_empty(&old_queue->head))
414 timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline);
415 else
416 timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX);
417 timer_call_unlock(old_queue);
418 }
419 splx(s);
420
421 #if CONFIG_DTRACE
422 DTRACE_TMR6(callout__cancel, timer_call_func_t, CE(call)->func,
423 timer_call_param_t, CE(call)->param0, uint32_t, call->flags, 0,
424 (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
425 #endif
426
427 return (old_queue != NULL);
428 }
429
430 uint32_t timer_queue_shutdown_lock_skips;
431 void
432 timer_queue_shutdown(
433 mpqueue_head_t *queue)
434 {
435 timer_call_t call;
436 mpqueue_head_t *new_queue;
437 spl_t s;
438
439 DBG("timer_queue_shutdown(%p)\n", queue);
440
441 s = splclock();
442
443 /* Note comma operator in while expression re-locking each iteration */
444 while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) {
445 call = TIMER_CALL(queue_first(&queue->head));
446 if (!simple_lock_try(&call->lock)) {
447 /*
448 * case (2b) lock order inversion, dequeue and skip
449 * Don't change the call_entry queue back-pointer
450 * but set the async_dequeue field.
451 */
452 timer_queue_shutdown_lock_skips++;
453 (void) remque(qe(call));
454 call->async_dequeue = TRUE;
455 timer_call_unlock(queue);
456 continue;
457 }
458
459 /* remove entry from old queue */
460 timer_call_entry_dequeue(call);
461 timer_call_unlock(queue);
462
463 /* and queue it on new */
464 new_queue = timer_queue_assign(CE(call)->deadline);
465 timer_call_lock_spin(new_queue);
466 timer_call_entry_enqueue_deadline(
467 call, new_queue, CE(call)->deadline);
468 timer_call_unlock(new_queue);
469
470 simple_unlock(&call->lock);
471 }
472
473 timer_call_unlock(queue);
474 splx(s);
475 }
476
477 uint32_t timer_queue_expire_lock_skips;
478 uint64_t
479 timer_queue_expire(
480 mpqueue_head_t *queue,
481 uint64_t deadline)
482 {
483 timer_call_t call;
484
485 DBG("timer_queue_expire(%p,)\n", queue);
486
487 timer_call_lock_spin(queue);
488
489 while (!queue_empty(&queue->head)) {
490 call = TIMER_CALL(queue_first(&queue->head));
491
492 if (call->soft_deadline <= deadline) {
493 timer_call_func_t func;
494 timer_call_param_t param0, param1;
495
496 if (!simple_lock_try(&call->lock)) {
497 /* case (2b) lock inversion, dequeue and skip */
498 timer_queue_expire_lock_skips++;
499 (void) remque(qe(call));
500 call->async_dequeue = TRUE;
501 continue;
502 }
503
504 timer_call_entry_dequeue(call);
505
506 func = CE(call)->func;
507 param0 = CE(call)->param0;
508 param1 = CE(call)->param1;
509
510 simple_unlock(&call->lock);
511 timer_call_unlock(queue);
512
513 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
514 DECR_TIMER_CALLOUT | DBG_FUNC_START,
515 VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
516
517 #if CONFIG_DTRACE
518 DTRACE_TMR6(callout__start, timer_call_func_t, func,
519 timer_call_param_t, param0, unsigned, call->flags,
520 0, (call->ttd >> 32),
521 (unsigned) (call->ttd & 0xFFFFFFFF));
522 #endif
523
524 /* Maintain time-to-deadline in per-processor data
525 * structure for thread wakeup deadline statistics.
526 */
527 uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
528 *ttdp = call->ttd;
529 (*func)(param0, param1);
530 *ttdp = 0;
531
532 #if CONFIG_DTRACE
533 DTRACE_TMR3(callout__end, timer_call_func_t, func,
534 timer_call_param_t, param0, timer_call_param_t,
535 param1);
536 #endif
537
538 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
539 DECR_TIMER_CALLOUT | DBG_FUNC_END,
540 VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
541
542 timer_call_lock_spin(queue);
543 }
544 else
545 break;
546 }
547
548 if (!queue_empty(&queue->head))
549 deadline = CE(call)->deadline;
550 else
551 deadline = UINT64_MAX;
552
553 timer_call_unlock(queue);
554
555 return (deadline);
556 }
557
558
559 extern int serverperfmode;
560 uint32_t timer_queue_migrate_lock_skips;
561 /*
562 * timer_queue_migrate() is called by etimer_queue_migrate()
563 * to move timer requests from the local processor (queue_from)
564 * to a target processor's (queue_to).
565 */
566 int
567 timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
568 {
569 timer_call_t call;
570 timer_call_t head_to;
571 int timers_migrated = 0;
572
573 DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
574
575 assert(!ml_get_interrupts_enabled());
576 assert(queue_from != queue_to);
577
578 if (serverperfmode) {
579 /*
580 * if we're running a high end server
581 * avoid migrations... they add latency
582 * and don't save us power under typical
583 * server workloads
584 */
585 return -4;
586 }
587
588 /*
589 * Take both local (from) and target (to) timer queue locks while
590 * moving the timers from the local queue to the target processor.
591 * We assume that the target is always the boot processor.
592 * But only move if all of the following is true:
593 * - the target queue is non-empty
594 * - the local queue is non-empty
595 * - the local queue's first deadline is later than the target's
596 * - the local queue contains no non-migrateable "local" call
597 * so that we need not have the target resync.
598 */
599
600 timer_call_lock_spin(queue_to);
601
602 head_to = TIMER_CALL(queue_first(&queue_to->head));
603 if (queue_empty(&queue_to->head)) {
604 timers_migrated = -1;
605 goto abort1;
606 }
607
608 timer_call_lock_spin(queue_from);
609
610 if (queue_empty(&queue_from->head)) {
611 timers_migrated = -2;
612 goto abort2;
613 }
614
615 call = TIMER_CALL(queue_first(&queue_from->head));
616 if (CE(call)->deadline < CE(head_to)->deadline) {
617 timers_migrated = 0;
618 goto abort2;
619 }
620
621 /* perform scan for non-migratable timers */
622 do {
623 if (call->flags & TIMER_CALL_LOCAL) {
624 timers_migrated = -3;
625 goto abort2;
626 }
627 call = TIMER_CALL(queue_next(qe(call)));
628 } while (!queue_end(&queue_from->head, qe(call)));
629
630 /* migration loop itself -- both queues are locked */
631 while (!queue_empty(&queue_from->head)) {
632 call = TIMER_CALL(queue_first(&queue_from->head));
633 if (!simple_lock_try(&call->lock)) {
634 /* case (2b) lock order inversion, dequeue only */
635 timer_queue_migrate_lock_skips++;
636 (void) remque(qe(call));
637 call->async_dequeue = TRUE;
638 continue;
639 }
640 timer_call_entry_dequeue(call);
641 timer_call_entry_enqueue_deadline(
642 call, queue_to, CE(call)->deadline);
643 timers_migrated++;
644 simple_unlock(&call->lock);
645 }
646
647 abort2:
648 timer_call_unlock(queue_from);
649 abort1:
650 timer_call_unlock(queue_to);
651
652 return timers_migrated;
653 }