2 * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Timer interrupt callout module.
32 #include <mach/mach_types.h>
34 #include <kern/clock.h>
35 #include <kern/processor.h>
36 #include <kern/timer_call.h>
37 #include <kern/timer_queue.h>
38 #include <kern/call_entry.h>
39 #include <kern/thread.h>
41 #include <sys/kdebug.h>
49 #define TIMER_ASSERT 1
52 //#define TIMER_ASSERT 1
56 #define DBG(x...) kprintf("DBG: " x);
62 #define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST
64 #define TIMER_KDEBUG_TRACE(x...)
68 lck_grp_t timer_call_lck_grp
;
69 lck_attr_t timer_call_lck_attr
;
70 lck_grp_attr_t timer_call_lck_grp_attr
;
72 lck_grp_t timer_longterm_lck_grp
;
73 lck_attr_t timer_longterm_lck_attr
;
74 lck_grp_attr_t timer_longterm_lck_grp_attr
;
77 #define timer_queue_lock_spin(queue) \
78 lck_mtx_lock_spin_always(&queue->lock_data)
80 #define timer_queue_unlock(queue) \
81 lck_mtx_unlock_always(&queue->lock_data)
84 #define QUEUE(x) ((queue_t)(x))
85 #define MPQUEUE(x) ((mpqueue_head_t *)(x))
86 #define TIMER_CALL(x) ((timer_call_t)(x))
89 * The longterm timer object is a global structure holding all timers
90 * beyond the short-term, local timer queue threshold. The boot processor
91 * is responsible for moving each timer to its local timer queue
92 * if and when that timer becomes due within the threshold.
94 #define TIMER_LONGTERM_NONE EndOfAllTime
95 #if defined(__x86_64__)
96 #define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC)
98 #define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE
102 uint64_t interval
; /* longterm timer interval */
103 uint64_t margin
; /* fudge factor (10% of interval */
104 uint64_t deadline
; /* first/soonest longterm deadline */
105 uint64_t preempted
; /* sooner timer has pre-empted */
106 timer_call_t call
; /* first/soonest longterm timer call */
107 uint64_t deadline_set
; /* next timer set */
108 timer_call_data_t timer
; /* timer used by threshold management */
110 uint64_t scans
; /* num threshold timer scans */
111 uint64_t preempts
; /* num threshold reductions */
112 uint64_t latency
; /* average threshold latency */
113 uint64_t latency_min
; /* minimum threshold latency */
114 uint64_t latency_max
; /* maximum threshold latency */
118 mpqueue_head_t queue
; /* longterm timer list */
119 uint64_t enqueues
; /* num timers queued */
120 uint64_t dequeues
; /* num timers dequeued */
121 uint64_t escalates
; /* num timers becoming shortterm */
122 uint64_t scan_time
; /* last time the list was scanned */
123 threshold_t threshold
; /* longterm timer threshold */
126 timer_longterm_t timer_longterm
;
128 static mpqueue_head_t
*timer_longterm_queue
= NULL
;
130 static void timer_longterm_init(void);
131 static void timer_longterm_callout(
132 timer_call_param_t p0
,
133 timer_call_param_t p1
);
134 extern void timer_longterm_scan(
135 timer_longterm_t
*tlp
,
137 static void timer_longterm_update(
138 timer_longterm_t
*tlp
);
139 static void timer_longterm_update_locked(
140 timer_longterm_t
*tlp
);
141 static mpqueue_head_t
* timer_longterm_enqueue_unlocked(
145 mpqueue_head_t
** old_queue
);
146 static void timer_longterm_dequeued_locked(
149 uint64_t past_deadline_timers
;
150 uint64_t past_deadline_deltas
;
151 uint64_t past_deadline_longest
;
152 uint64_t past_deadline_shortest
= ~0ULL;
153 enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS
= 10 * 1000};
155 uint64_t past_deadline_timer_adjustment
;
157 static boolean_t
timer_call_enter_internal(timer_call_t call
, timer_call_param_t param1
, uint64_t deadline
, uint64_t leeway
, uint32_t flags
, boolean_t ratelimited
);
158 boolean_t mach_timer_coalescing_enabled
= TRUE
;
160 mpqueue_head_t
*timer_call_enqueue_deadline_unlocked(
162 mpqueue_head_t
*queue
,
165 mpqueue_head_t
*timer_call_dequeue_unlocked(
170 timer_call_init(void)
172 lck_attr_setdefault(&timer_call_lck_attr
);
173 lck_grp_attr_setdefault(&timer_call_lck_grp_attr
);
174 lck_grp_init(&timer_call_lck_grp
, "timer_call", &timer_call_lck_grp_attr
);
175 nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS
, &past_deadline_timer_adjustment
);
177 timer_longterm_init();
182 timer_call_queue_init(mpqueue_head_t
*queue
)
184 DBG("timer_call_queue_init(%p)\n", queue
);
185 mpqueue_init(queue
, &timer_call_lck_grp
, &timer_call_lck_attr
);
192 timer_call_func_t func
,
193 timer_call_param_t param0
)
195 DBG("timer_call_setup(%p,%p,%p)\n", call
, func
, param0
);
196 call_entry_setup(CE(call
), func
, param0
);
197 simple_lock_init(&(call
)->lock
, 0);
198 call
->async_dequeue
= FALSE
;
202 * Timer call entry locking model
203 * ==============================
205 * Timer call entries are linked on per-cpu timer queues which are protected
206 * by the queue lock and the call entry lock. The locking protocol is:
208 * 0) The canonical locking order is timer call entry followed by queue.
210 * 1) With only the entry lock held, entry.queue is valid:
211 * 1a) NULL: the entry is not queued, or
212 * 1b) non-NULL: this queue must be locked before the entry is modified.
213 * After locking the queue, the call.async_dequeue flag must be checked:
214 * 1c) TRUE: the entry was removed from the queue by another thread
215 * and we must NULL the entry.queue and reset this flag, or
216 * 1d) FALSE: (ie. queued), the entry can be manipulated.
218 * 2) If a queue lock is obtained first, the queue is stable:
219 * 2a) If a try-lock of a queued entry succeeds, the call can be operated on
221 * 2b) If a try-lock fails, it indicates that another thread is attempting
222 * to change the entry and move it to a different position in this queue
223 * or to different queue. The entry can be dequeued but it should not be
224 * operated upon since it is being changed. Furthermore, we don't null
225 * the entry.queue pointer (protected by the entry lock we don't own).
226 * Instead, we set the async_dequeue flag -- see (1c).
227 * 2c) Same as 2b but occurring when a longterm timer is matured.
231 * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
232 * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
233 * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
234 * methods to operate on timer_call structs as if they are call_entry structs.
235 * These structures are identical except for their queue head pointer fields.
237 * In the debug case, we assert that the timer call locking protocol
241 static __inline__ mpqueue_head_t
*
242 timer_call_entry_dequeue(
245 mpqueue_head_t
*old_queue
= MPQUEUE(CE(entry
)->queue
);
247 if (!hw_lock_held((hw_lock_t
)&entry
->lock
))
248 panic("_call_entry_dequeue() "
249 "entry %p is not locked\n", entry
);
251 * XXX The queue lock is actually a mutex in spin mode
252 * but there's no way to test for it being held
253 * so we pretend it's a spinlock!
255 if (!hw_lock_held((hw_lock_t
)&old_queue
->lock_data
))
256 panic("_call_entry_dequeue() "
257 "queue %p is not locked\n", old_queue
);
259 call_entry_dequeue(CE(entry
));
265 static __inline__ mpqueue_head_t
*
266 timer_call_entry_enqueue_deadline(
268 mpqueue_head_t
*queue
,
271 mpqueue_head_t
*old_queue
= MPQUEUE(CE(entry
)->queue
);
273 if (!hw_lock_held((hw_lock_t
)&entry
->lock
))
274 panic("_call_entry_enqueue_deadline() "
275 "entry %p is not locked\n", entry
);
276 /* XXX More lock pretense: */
277 if (!hw_lock_held((hw_lock_t
)&queue
->lock_data
))
278 panic("_call_entry_enqueue_deadline() "
279 "queue %p is not locked\n", queue
);
280 if (old_queue
!= NULL
&& old_queue
!= queue
)
281 panic("_call_entry_enqueue_deadline() "
282 "old_queue %p != queue", old_queue
);
284 call_entry_enqueue_deadline(CE(entry
), QUEUE(queue
), deadline
);
286 /* For efficiency, track the earliest soft deadline on the queue, so that
287 * fuzzy decisions can be made without lock acquisitions.
289 queue
->earliest_soft_deadline
= ((timer_call_t
)queue_first(&queue
->head
))->soft_deadline
;
300 static __inline__ mpqueue_head_t
*
301 timer_call_entry_dequeue(
304 mpqueue_head_t
*old_queue
= MPQUEUE(CE(entry
)->queue
);
306 call_entry_dequeue(CE(entry
));
312 static __inline__ mpqueue_head_t
*
313 timer_call_entry_enqueue_deadline(
315 mpqueue_head_t
*queue
,
318 mpqueue_head_t
*old_queue
= MPQUEUE(CE(entry
)->queue
);
320 call_entry_enqueue_deadline(CE(entry
), QUEUE(queue
), deadline
);
322 /* For efficiency, track the earliest soft deadline on the queue,
323 * so that fuzzy decisions can be made without lock acquisitions.
325 queue
->earliest_soft_deadline
= ((timer_call_t
)queue_first(&queue
->head
))->soft_deadline
;
336 static __inline__
void
337 timer_call_entry_enqueue_tail(
339 mpqueue_head_t
*queue
)
341 call_entry_enqueue_tail(CE(entry
), QUEUE(queue
));
347 * Remove timer entry from its queue but don't change the queue pointer
348 * and set the async_dequeue flag. This is locking case 2b.
350 static __inline__
void
351 timer_call_entry_dequeue_async(
354 mpqueue_head_t
*old_queue
= MPQUEUE(CE(entry
)->queue
);
357 (void) remque(qe(entry
));
358 entry
->async_dequeue
= TRUE
;
364 unsigned timer_call_enqueue_deadline_unlocked_async1
;
365 unsigned timer_call_enqueue_deadline_unlocked_async2
;
368 * Assumes call_entry and queues unlocked, interrupts disabled.
370 __inline__ mpqueue_head_t
*
371 timer_call_enqueue_deadline_unlocked(
373 mpqueue_head_t
*queue
,
376 call_entry_t entry
= CE(call
);
377 mpqueue_head_t
*old_queue
;
379 DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call
, queue
);
381 simple_lock(&call
->lock
);
382 old_queue
= MPQUEUE(entry
->queue
);
383 if (old_queue
!= NULL
) {
384 timer_queue_lock_spin(old_queue
);
385 if (call
->async_dequeue
) {
386 /* collision (1c): timer already dequeued, clear flag */
388 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
389 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
394 timer_call_enqueue_deadline_unlocked_async1
++;
396 call
->async_dequeue
= FALSE
;
398 } else if (old_queue
!= queue
) {
399 timer_call_entry_dequeue(call
);
401 timer_call_enqueue_deadline_unlocked_async2
++;
404 if (old_queue
== timer_longterm_queue
)
405 timer_longterm_dequeued_locked(call
);
406 if (old_queue
!= queue
) {
407 timer_queue_unlock(old_queue
);
408 timer_queue_lock_spin(queue
);
411 timer_queue_lock_spin(queue
);
414 timer_call_entry_enqueue_deadline(call
, queue
, deadline
);
415 timer_queue_unlock(queue
);
416 simple_unlock(&call
->lock
);
422 unsigned timer_call_dequeue_unlocked_async1
;
423 unsigned timer_call_dequeue_unlocked_async2
;
426 timer_call_dequeue_unlocked(
429 call_entry_t entry
= CE(call
);
430 mpqueue_head_t
*old_queue
;
432 DBG("timer_call_dequeue_unlocked(%p)\n", call
);
434 simple_lock(&call
->lock
);
435 old_queue
= MPQUEUE(entry
->queue
);
437 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
438 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
444 if (old_queue
!= NULL
) {
445 timer_queue_lock_spin(old_queue
);
446 if (call
->async_dequeue
) {
447 /* collision (1c): timer already dequeued, clear flag */
449 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
450 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
455 timer_call_dequeue_unlocked_async1
++;
457 call
->async_dequeue
= FALSE
;
460 timer_call_entry_dequeue(call
);
462 if (old_queue
== timer_longterm_queue
)
463 timer_longterm_dequeued_locked(call
);
464 timer_queue_unlock(old_queue
);
466 simple_unlock(&call
->lock
);
471 timer_call_enter_internal(
473 timer_call_param_t param1
,
477 boolean_t ratelimited
)
479 mpqueue_head_t
*queue
= NULL
;
480 mpqueue_head_t
*old_queue
;
487 call
->soft_deadline
= deadline
;
490 uint64_t ctime
= mach_absolute_time();
492 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
493 DECR_TIMER_ENTER
| DBG_FUNC_START
,
495 param1
, deadline
, flags
, 0);
497 urgency
= (flags
& TIMER_CALL_URGENCY_MASK
);
499 boolean_t slop_ratelimited
= FALSE
;
500 slop
= timer_call_slop(deadline
, ctime
, urgency
, current_thread(), &slop_ratelimited
);
502 if ((flags
& TIMER_CALL_LEEWAY
) != 0 && leeway
> slop
)
505 if (UINT64_MAX
- deadline
<= slop
) {
506 deadline
= UINT64_MAX
;
511 if (__improbable(deadline
< ctime
)) {
512 uint64_t delta
= (ctime
- deadline
);
514 past_deadline_timers
++;
515 past_deadline_deltas
+= delta
;
516 if (delta
> past_deadline_longest
)
517 past_deadline_longest
= deadline
;
518 if (delta
< past_deadline_shortest
)
519 past_deadline_shortest
= delta
;
521 deadline
= ctime
+ past_deadline_timer_adjustment
;
522 call
->soft_deadline
= deadline
;
525 /* Bit 0 of the "soft" deadline indicates that
526 * this particular timer call requires rate-limiting
527 * behaviour. Maintain the invariant deadline >= soft_deadline by
528 * setting bit 0 of "deadline".
532 if (ratelimited
|| slop_ratelimited
) {
533 call
->soft_deadline
|= 1ULL;
535 call
->soft_deadline
&= ~0x1ULL
;
538 call
->ttd
= call
->soft_deadline
- ctime
;
541 DTRACE_TMR7(callout__create
, timer_call_func_t
, CE(call
)->func
,
542 timer_call_param_t
, CE(call
)->param0
, uint32_t, call
->flags
,
543 (deadline
- call
->soft_deadline
),
544 (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF), call
);
547 if (!ratelimited
&& !slop_ratelimited
) {
548 queue
= timer_longterm_enqueue_unlocked(call
, ctime
, deadline
, &old_queue
);
552 queue
= timer_queue_assign(deadline
);
553 old_queue
= timer_call_enqueue_deadline_unlocked(call
, queue
, deadline
);
556 CE(call
)->param1
= param1
;
558 CE(call
)->entry_time
= ctime
;
561 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
562 DECR_TIMER_ENTER
| DBG_FUNC_END
,
564 (old_queue
!= NULL
), call
->soft_deadline
, queue
->count
, 0);
568 return (old_queue
!= NULL
);
573 * return boolean indicating whether the call was previously queued.
581 return timer_call_enter_internal(call
, NULL
, deadline
, 0, flags
, FALSE
);
587 timer_call_param_t param1
,
591 return timer_call_enter_internal(call
, param1
, deadline
, 0, flags
, FALSE
);
595 timer_call_enter_with_leeway(
597 timer_call_param_t param1
,
601 boolean_t ratelimited
)
603 return timer_call_enter_internal(call
, param1
, deadline
, leeway
, flags
, ratelimited
);
610 mpqueue_head_t
*old_queue
;
615 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
616 DECR_TIMER_CANCEL
| DBG_FUNC_START
,
618 CE(call
)->deadline
, call
->soft_deadline
, call
->flags
, 0);
620 old_queue
= timer_call_dequeue_unlocked(call
);
622 if (old_queue
!= NULL
) {
623 timer_queue_lock_spin(old_queue
);
624 if (!queue_empty(&old_queue
->head
)) {
625 timer_queue_cancel(old_queue
, CE(call
)->deadline
, CE(queue_first(&old_queue
->head
))->deadline
);
626 old_queue
->earliest_soft_deadline
= ((timer_call_t
)queue_first(&old_queue
->head
))->soft_deadline
;
629 timer_queue_cancel(old_queue
, CE(call
)->deadline
, UINT64_MAX
);
630 old_queue
->earliest_soft_deadline
= UINT64_MAX
;
632 timer_queue_unlock(old_queue
);
634 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
635 DECR_TIMER_CANCEL
| DBG_FUNC_END
,
638 CE(call
)->deadline
- mach_absolute_time(),
639 CE(call
)->deadline
- CE(call
)->entry_time
, 0);
643 DTRACE_TMR6(callout__cancel
, timer_call_func_t
, CE(call
)->func
,
644 timer_call_param_t
, CE(call
)->param0
, uint32_t, call
->flags
, 0,
645 (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF));
648 return (old_queue
!= NULL
);
651 uint32_t timer_queue_shutdown_lock_skips
;
653 timer_queue_shutdown(
654 mpqueue_head_t
*queue
)
657 mpqueue_head_t
*new_queue
;
660 DBG("timer_queue_shutdown(%p)\n", queue
);
664 /* Note comma operator in while expression re-locking each iteration */
665 while (timer_queue_lock_spin(queue
), !queue_empty(&queue
->head
)) {
666 call
= TIMER_CALL(queue_first(&queue
->head
));
667 if (!simple_lock_try(&call
->lock
)) {
669 * case (2b) lock order inversion, dequeue and skip
670 * Don't change the call_entry queue back-pointer
671 * but set the async_dequeue field.
673 timer_queue_shutdown_lock_skips
++;
674 timer_call_entry_dequeue_async(call
);
676 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
677 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
683 timer_queue_unlock(queue
);
687 /* remove entry from old queue */
688 timer_call_entry_dequeue(call
);
689 timer_queue_unlock(queue
);
691 /* and queue it on new */
692 new_queue
= timer_queue_assign(CE(call
)->deadline
);
693 timer_queue_lock_spin(new_queue
);
694 timer_call_entry_enqueue_deadline(
695 call
, new_queue
, CE(call
)->deadline
);
696 timer_queue_unlock(new_queue
);
698 simple_unlock(&call
->lock
);
701 timer_queue_unlock(queue
);
705 uint32_t timer_queue_expire_lock_skips
;
707 timer_queue_expire_with_options(
708 mpqueue_head_t
*queue
,
712 timer_call_t call
= NULL
;
713 uint32_t tc_iterations
= 0;
714 DBG("timer_queue_expire(%p,)\n", queue
);
716 uint64_t cur_deadline
= deadline
;
717 timer_queue_lock_spin(queue
);
719 while (!queue_empty(&queue
->head
)) {
720 /* Upon processing one or more timer calls, refresh the
721 * deadline to account for time elapsed in the callout
723 if (++tc_iterations
> 1)
724 cur_deadline
= mach_absolute_time();
727 call
= TIMER_CALL(queue_first(&queue
->head
));
729 if (call
->soft_deadline
<= cur_deadline
) {
730 timer_call_func_t func
;
731 timer_call_param_t param0
, param1
;
733 TCOAL_DEBUG(0xDDDD0000, queue
->earliest_soft_deadline
, call
->soft_deadline
, 0, 0, 0);
734 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
735 DECR_TIMER_EXPIRE
| DBG_FUNC_NONE
,
739 CE(call
)->entry_time
, 0);
741 /* Bit 0 of the "soft" deadline indicates that
742 * this particular timer call is rate-limited
743 * and hence shouldn't be processed before its
746 if ((call
->soft_deadline
& 0x1) &&
747 (CE(call
)->deadline
> cur_deadline
)) {
752 if (!simple_lock_try(&call
->lock
)) {
753 /* case (2b) lock inversion, dequeue and skip */
754 timer_queue_expire_lock_skips
++;
755 timer_call_entry_dequeue_async(call
);
760 timer_call_entry_dequeue(call
);
762 func
= CE(call
)->func
;
763 param0
= CE(call
)->param0
;
764 param1
= CE(call
)->param1
;
766 simple_unlock(&call
->lock
);
767 timer_queue_unlock(queue
);
769 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
770 DECR_TIMER_CALLOUT
| DBG_FUNC_START
,
771 call
, VM_KERNEL_UNSLIDE(func
), param0
, param1
, 0);
774 DTRACE_TMR7(callout__start
, timer_call_func_t
, func
,
775 timer_call_param_t
, param0
, unsigned, call
->flags
,
776 0, (call
->ttd
>> 32),
777 (unsigned) (call
->ttd
& 0xFFFFFFFF), call
);
779 /* Maintain time-to-deadline in per-processor data
780 * structure for thread wakeup deadline statistics.
782 uint64_t *ttdp
= &(PROCESSOR_DATA(current_processor(), timer_call_ttd
));
784 (*func
)(param0
, param1
);
787 DTRACE_TMR4(callout__end
, timer_call_func_t
, func
,
788 param0
, param1
, call
);
791 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
792 DECR_TIMER_CALLOUT
| DBG_FUNC_END
,
793 call
, VM_KERNEL_UNSLIDE(func
), param0
, param1
, 0);
795 timer_queue_lock_spin(queue
);
797 if (__probable(rescan
== FALSE
)) {
800 int64_t skew
= CE(call
)->deadline
- call
->soft_deadline
;
801 assert(CE(call
)->deadline
>= call
->soft_deadline
);
803 /* DRK: On a latency quality-of-service level change,
804 * re-sort potentially rate-limited timers. The platform
805 * layer determines which timers require
806 * this. In the absence of the per-callout
807 * synchronization requirement, a global resort could
808 * be more efficient. The re-sort effectively
809 * annuls all timer adjustments, i.e. the "soft
810 * deadline" is the sort key.
813 if (timer_resort_threshold(skew
)) {
814 if (__probable(simple_lock_try(&call
->lock
))) {
815 timer_call_entry_dequeue(call
);
816 timer_call_entry_enqueue_deadline(call
, queue
, call
->soft_deadline
);
817 simple_unlock(&call
->lock
);
822 call
= TIMER_CALL(queue_next(qe(call
)));
823 if (queue_end(&queue
->head
, qe(call
)))
830 if (!queue_empty(&queue
->head
)) {
831 call
= TIMER_CALL(queue_first(&queue
->head
));
832 cur_deadline
= CE(call
)->deadline
;
833 queue
->earliest_soft_deadline
= call
->soft_deadline
;
835 queue
->earliest_soft_deadline
= cur_deadline
= UINT64_MAX
;
838 timer_queue_unlock(queue
);
840 return (cur_deadline
);
845 mpqueue_head_t
*queue
,
848 return timer_queue_expire_with_options(queue
, deadline
, FALSE
);
851 extern int serverperfmode
;
852 uint32_t timer_queue_migrate_lock_skips
;
854 * timer_queue_migrate() is called by timer_queue_migrate_cpu()
855 * to move timer requests from the local processor (queue_from)
856 * to a target processor's (queue_to).
859 timer_queue_migrate(mpqueue_head_t
*queue_from
, mpqueue_head_t
*queue_to
)
862 timer_call_t head_to
;
863 int timers_migrated
= 0;
865 DBG("timer_queue_migrate(%p,%p)\n", queue_from
, queue_to
);
867 assert(!ml_get_interrupts_enabled());
868 assert(queue_from
!= queue_to
);
870 if (serverperfmode
) {
872 * if we're running a high end server
873 * avoid migrations... they add latency
874 * and don't save us power under typical
881 * Take both local (from) and target (to) timer queue locks while
882 * moving the timers from the local queue to the target processor.
883 * We assume that the target is always the boot processor.
884 * But only move if all of the following is true:
885 * - the target queue is non-empty
886 * - the local queue is non-empty
887 * - the local queue's first deadline is later than the target's
888 * - the local queue contains no non-migrateable "local" call
889 * so that we need not have the target resync.
892 timer_queue_lock_spin(queue_to
);
894 head_to
= TIMER_CALL(queue_first(&queue_to
->head
));
895 if (queue_empty(&queue_to
->head
)) {
896 timers_migrated
= -1;
900 timer_queue_lock_spin(queue_from
);
902 if (queue_empty(&queue_from
->head
)) {
903 timers_migrated
= -2;
907 call
= TIMER_CALL(queue_first(&queue_from
->head
));
908 if (CE(call
)->deadline
< CE(head_to
)->deadline
) {
913 /* perform scan for non-migratable timers */
915 if (call
->flags
& TIMER_CALL_LOCAL
) {
916 timers_migrated
= -3;
919 call
= TIMER_CALL(queue_next(qe(call
)));
920 } while (!queue_end(&queue_from
->head
, qe(call
)));
922 /* migration loop itself -- both queues are locked */
923 while (!queue_empty(&queue_from
->head
)) {
924 call
= TIMER_CALL(queue_first(&queue_from
->head
));
925 if (!simple_lock_try(&call
->lock
)) {
926 /* case (2b) lock order inversion, dequeue only */
928 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
929 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
932 call
->lock
.interlock
.lock_data
,
935 timer_queue_migrate_lock_skips
++;
936 timer_call_entry_dequeue_async(call
);
939 timer_call_entry_dequeue(call
);
940 timer_call_entry_enqueue_deadline(
941 call
, queue_to
, CE(call
)->deadline
);
943 simple_unlock(&call
->lock
);
945 queue_from
->earliest_soft_deadline
= UINT64_MAX
;
947 timer_queue_unlock(queue_from
);
949 timer_queue_unlock(queue_to
);
951 return timers_migrated
;
955 timer_queue_trace_cpu(int ncpu
)
957 timer_call_nosync_cpu(
959 (void(*)())timer_queue_trace
,
960 (void*) timer_queue_cpu(ncpu
));
965 mpqueue_head_t
*queue
)
974 timer_queue_lock_spin(queue
);
976 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
977 DECR_TIMER_QUEUE
| DBG_FUNC_START
,
978 queue
->count
, mach_absolute_time(), 0, 0, 0);
980 if (!queue_empty(&queue
->head
)) {
981 call
= TIMER_CALL(queue_first(&queue
->head
));
983 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
984 DECR_TIMER_QUEUE
| DBG_FUNC_NONE
,
987 CE(call
)->entry_time
,
990 call
= TIMER_CALL(queue_next(qe(call
)));
991 } while (!queue_end(&queue
->head
, qe(call
)));
994 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
995 DECR_TIMER_QUEUE
| DBG_FUNC_END
,
996 queue
->count
, mach_absolute_time(), 0, 0, 0);
998 timer_queue_unlock(queue
);
1003 timer_longterm_dequeued_locked(timer_call_t call
)
1005 timer_longterm_t
*tlp
= &timer_longterm
;
1008 if (call
== tlp
->threshold
.call
)
1009 tlp
->threshold
.call
= NULL
;
1013 * Place a timer call in the longterm list
1014 * and adjust the next timer callout deadline if the new timer is first.
1017 timer_longterm_enqueue_unlocked(timer_call_t call
,
1020 mpqueue_head_t
**old_queue
)
1022 timer_longterm_t
*tlp
= &timer_longterm
;
1023 boolean_t update_required
= FALSE
;
1024 uint64_t longterm_threshold
;
1026 longterm_threshold
= now
+ tlp
->threshold
.interval
;
1029 * Return NULL without doing anything if:
1030 * - this timer is local, or
1031 * - the longterm mechanism is disabled, or
1032 * - this deadline is too short.
1034 if (__probable((call
->flags
& TIMER_CALL_LOCAL
) != 0 ||
1035 (tlp
->threshold
.interval
== TIMER_LONGTERM_NONE
) ||
1036 (deadline
<= longterm_threshold
)))
1040 * Remove timer from its current queue, if any.
1042 *old_queue
= timer_call_dequeue_unlocked(call
);
1045 * Lock the longterm queue, queue timer and determine
1046 * whether an update is necessary.
1048 assert(!ml_get_interrupts_enabled());
1049 simple_lock(&call
->lock
);
1050 timer_queue_lock_spin(timer_longterm_queue
);
1051 timer_call_entry_enqueue_tail(call
, timer_longterm_queue
);
1052 CE(call
)->deadline
= deadline
;
1057 * We'll need to update the currently set threshold timer
1058 * if the new deadline is sooner and no sooner update is in flight.
1060 if (deadline
< tlp
->threshold
.deadline
&&
1061 deadline
< tlp
->threshold
.preempted
) {
1062 tlp
->threshold
.preempted
= deadline
;
1063 tlp
->threshold
.call
= call
;
1064 update_required
= TRUE
;
1066 timer_queue_unlock(timer_longterm_queue
);
1067 simple_unlock(&call
->lock
);
1069 if (update_required
) {
1070 timer_call_nosync_cpu(
1072 (void (*)(void *)) timer_longterm_update
,
1076 return timer_longterm_queue
;
1080 * Scan for timers below the longterm threshold.
1081 * Move these to the local timer queue (of the boot processor on which the
1082 * calling thread is running).
1083 * Both the local (boot) queue and the longterm queue are locked.
1084 * The scan is similar to the timer migrate sequence but is performed by
1085 * successively examining each timer on the longterm queue:
1086 * - if within the short-term threshold
1087 * - enter on the local queue (unless being deleted),
1089 * - if sooner, deadline becomes the next threshold deadline.
1092 timer_longterm_scan(timer_longterm_t
*tlp
,
1099 mpqueue_head_t
*timer_master_queue
;
1101 assert(!ml_get_interrupts_enabled());
1102 assert(cpu_number() == master_cpu
);
1104 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
)
1105 threshold
= now
+ tlp
->threshold
.interval
;
1107 threshold
= TIMER_LONGTERM_NONE
;
1109 tlp
->threshold
.deadline
= TIMER_LONGTERM_NONE
;
1110 tlp
->threshold
.call
= NULL
;
1112 if (queue_empty(&timer_longterm_queue
->head
))
1115 timer_master_queue
= timer_queue_cpu(master_cpu
);
1116 timer_queue_lock_spin(timer_master_queue
);
1118 qe
= queue_first(&timer_longterm_queue
->head
);
1119 while (!queue_end(&timer_longterm_queue
->head
, qe
)) {
1120 call
= TIMER_CALL(qe
);
1121 deadline
= call
->soft_deadline
;
1122 qe
= queue_next(qe
);
1123 if (!simple_lock_try(&call
->lock
)) {
1124 /* case (2c) lock order inversion, dequeue only */
1126 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1127 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
1130 call
->lock
.interlock
.lock_data
,
1133 timer_call_entry_dequeue_async(call
);
1136 if (deadline
< threshold
) {
1138 * This timer needs moving (escalating)
1139 * to the local (boot) processor's queue.
1143 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1144 DECR_TIMER_OVERDUE
| DBG_FUNC_NONE
,
1151 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1152 DECR_TIMER_ESCALATE
| DBG_FUNC_NONE
,
1155 CE(call
)->entry_time
,
1159 timer_call_entry_dequeue(call
);
1160 timer_call_entry_enqueue_deadline(
1161 call
, timer_master_queue
, CE(call
)->deadline
);
1163 * A side-effect of the following call is to update
1164 * the actual hardware deadline if required.
1166 (void) timer_queue_assign(deadline
);
1168 if (deadline
< tlp
->threshold
.deadline
) {
1169 tlp
->threshold
.deadline
= deadline
;
1170 tlp
->threshold
.call
= call
;
1173 simple_unlock(&call
->lock
);
1176 timer_queue_unlock(timer_master_queue
);
1180 timer_longterm_callout(timer_call_param_t p0
, __unused timer_call_param_t p1
)
1182 timer_longterm_t
*tlp
= (timer_longterm_t
*) p0
;
1184 timer_longterm_update(tlp
);
1188 timer_longterm_update_locked(timer_longterm_t
*tlp
)
1192 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1193 DECR_TIMER_UPDATE
| DBG_FUNC_START
,
1195 tlp
->threshold
.deadline
,
1196 tlp
->threshold
.preempted
,
1197 tlp
->queue
.count
, 0);
1199 tlp
->scan_time
= mach_absolute_time();
1200 if (tlp
->threshold
.preempted
!= TIMER_LONGTERM_NONE
) {
1201 tlp
->threshold
.preempts
++;
1202 tlp
->threshold
.deadline
= tlp
->threshold
.preempted
;
1203 tlp
->threshold
.preempted
= TIMER_LONGTERM_NONE
;
1205 * Note: in the unlikely event that a pre-empted timer has
1206 * itself been cancelled, we'll simply re-scan later at the
1207 * time of the preempted/cancelled timer.
1210 tlp
->threshold
.scans
++;
1213 * Maintain a moving average of our wakeup latency.
1214 * Clamp latency to 0 and ignore above threshold interval.
1216 if (tlp
->scan_time
> tlp
->threshold
.deadline_set
)
1217 latency
= tlp
->scan_time
- tlp
->threshold
.deadline_set
;
1220 if (latency
< tlp
->threshold
.interval
) {
1221 tlp
->threshold
.latency_min
=
1222 MIN(tlp
->threshold
.latency_min
, latency
);
1223 tlp
->threshold
.latency_max
=
1224 MAX(tlp
->threshold
.latency_max
, latency
);
1225 tlp
->threshold
.latency
=
1226 (tlp
->threshold
.latency
*99 + latency
) / 100;
1229 timer_longterm_scan(tlp
, tlp
->scan_time
);
1232 tlp
->threshold
.deadline_set
= tlp
->threshold
.deadline
;
1233 /* The next deadline timer to be set is adjusted */
1234 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
) {
1235 tlp
->threshold
.deadline_set
-= tlp
->threshold
.margin
;
1236 tlp
->threshold
.deadline_set
-= tlp
->threshold
.latency
;
1239 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1240 DECR_TIMER_UPDATE
| DBG_FUNC_END
,
1242 tlp
->threshold
.deadline
,
1243 tlp
->threshold
.scans
,
1244 tlp
->queue
.count
, 0);
1248 timer_longterm_update(timer_longterm_t
*tlp
)
1250 spl_t s
= splclock();
1252 timer_queue_lock_spin(timer_longterm_queue
);
1254 if (cpu_number() != master_cpu
)
1255 panic("timer_longterm_update_master() on non-boot cpu");
1257 timer_longterm_update_locked(tlp
);
1259 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
)
1261 &tlp
->threshold
.timer
,
1262 tlp
->threshold
.deadline_set
,
1263 TIMER_CALL_LOCAL
| TIMER_CALL_SYS_CRITICAL
);
1265 timer_queue_unlock(timer_longterm_queue
);
1270 timer_longterm_init(void)
1273 timer_longterm_t
*tlp
= &timer_longterm
;
1275 DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp
, &tlp
->queue
);
1278 * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD
1279 * or TIMER_LONGTERM_NONE (disabled) for server;
1280 * overridden longterm boot-arg
1282 tlp
->threshold
.interval
= serverperfmode
? TIMER_LONGTERM_NONE
1283 : TIMER_LONGTERM_THRESHOLD
;
1284 if (PE_parse_boot_argn("longterm", &longterm
, sizeof (longterm
))) {
1285 tlp
->threshold
.interval
= (longterm
== 0) ?
1286 TIMER_LONGTERM_NONE
:
1287 longterm
* NSEC_PER_MSEC
;
1289 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
) {
1290 printf("Longterm timer threshold: %llu ms\n",
1291 tlp
->threshold
.interval
/ NSEC_PER_MSEC
);
1292 kprintf("Longterm timer threshold: %llu ms\n",
1293 tlp
->threshold
.interval
/ NSEC_PER_MSEC
);
1294 nanoseconds_to_absolutetime(tlp
->threshold
.interval
,
1295 &tlp
->threshold
.interval
);
1296 tlp
->threshold
.margin
= tlp
->threshold
.interval
/ 10;
1297 tlp
->threshold
.latency_min
= EndOfAllTime
;
1298 tlp
->threshold
.latency_max
= 0;
1301 tlp
->threshold
.preempted
= TIMER_LONGTERM_NONE
;
1302 tlp
->threshold
.deadline
= TIMER_LONGTERM_NONE
;
1304 lck_attr_setdefault(&timer_longterm_lck_attr
);
1305 lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr
);
1306 lck_grp_init(&timer_longterm_lck_grp
,
1307 "timer_longterm", &timer_longterm_lck_grp_attr
);
1308 mpqueue_init(&tlp
->queue
,
1309 &timer_longterm_lck_grp
, &timer_longterm_lck_attr
);
1311 timer_call_setup(&tlp
->threshold
.timer
,
1312 timer_longterm_callout
, (timer_call_param_t
) tlp
);
1314 timer_longterm_queue
= &tlp
->queue
;
1319 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
1320 LATENCY
, LATENCY_MIN
, LATENCY_MAX
1323 timer_sysctl_get(int oid
)
1325 timer_longterm_t
*tlp
= &timer_longterm
;
1329 return (tlp
->threshold
.interval
== TIMER_LONGTERM_NONE
) ?
1330 0 : tlp
->threshold
.interval
/ NSEC_PER_MSEC
;
1332 return tlp
->queue
.count
;
1334 return tlp
->enqueues
;
1336 return tlp
->dequeues
;
1338 return tlp
->escalates
;
1340 return tlp
->threshold
.scans
;
1342 return tlp
->threshold
.preempts
;
1344 return tlp
->threshold
.latency
;
1346 return tlp
->threshold
.latency_min
;
1348 return tlp
->threshold
.latency_max
;
1355 * timer_master_scan() is the inverse of timer_longterm_scan()
1356 * since it un-escalates timers to the longterm queue.
1359 timer_master_scan(timer_longterm_t
*tlp
,
1366 mpqueue_head_t
*timer_master_queue
;
1368 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
)
1369 threshold
= now
+ tlp
->threshold
.interval
;
1371 threshold
= TIMER_LONGTERM_NONE
;
1373 timer_master_queue
= timer_queue_cpu(master_cpu
);
1374 timer_queue_lock_spin(timer_master_queue
);
1376 qe
= queue_first(&timer_master_queue
->head
);
1377 while (!queue_end(&timer_master_queue
->head
, qe
)) {
1378 call
= TIMER_CALL(qe
);
1379 deadline
= CE(call
)->deadline
;
1380 qe
= queue_next(qe
);
1381 if ((call
->flags
& TIMER_CALL_LOCAL
) != 0)
1383 if (!simple_lock_try(&call
->lock
)) {
1384 /* case (2c) lock order inversion, dequeue only */
1385 timer_call_entry_dequeue_async(call
);
1388 if (deadline
> threshold
) {
1389 /* move from master to longterm */
1390 timer_call_entry_dequeue(call
);
1391 timer_call_entry_enqueue_tail(call
, timer_longterm_queue
);
1392 if (deadline
< tlp
->threshold
.deadline
) {
1393 tlp
->threshold
.deadline
= deadline
;
1394 tlp
->threshold
.call
= call
;
1397 simple_unlock(&call
->lock
);
1399 timer_queue_unlock(timer_master_queue
);
1403 timer_sysctl_set_threshold(uint64_t value
)
1405 timer_longterm_t
*tlp
= &timer_longterm
;
1406 spl_t s
= splclock();
1407 boolean_t threshold_increase
;
1409 timer_queue_lock_spin(timer_longterm_queue
);
1411 timer_call_cancel(&tlp
->threshold
.timer
);
1414 * Set the new threshold and note whther it's increasing.
1417 tlp
->threshold
.interval
= TIMER_LONGTERM_NONE
;
1418 threshold_increase
= TRUE
;
1419 timer_call_cancel(&tlp
->threshold
.timer
);
1421 uint64_t old_interval
= tlp
->threshold
.interval
;
1422 tlp
->threshold
.interval
= value
* NSEC_PER_MSEC
;
1423 nanoseconds_to_absolutetime(tlp
->threshold
.interval
,
1424 &tlp
->threshold
.interval
);
1425 tlp
->threshold
.margin
= tlp
->threshold
.interval
/ 10;
1426 if (old_interval
== TIMER_LONGTERM_NONE
)
1427 threshold_increase
= FALSE
;
1429 threshold_increase
= (tlp
->threshold
.interval
> old_interval
);
1432 if (threshold_increase
/* or removal */) {
1433 /* Escalate timers from the longterm queue */
1434 timer_longterm_scan(tlp
, mach_absolute_time());
1435 } else /* decrease or addition */ {
1437 * We scan the local/master queue for timers now longterm.
1438 * To be strictly correct, we should scan all processor queues
1439 * but timer migration results in most timers gravitating to the
1440 * master processor in any case.
1442 timer_master_scan(tlp
, mach_absolute_time());
1445 /* Set new timer accordingly */
1446 tlp
->threshold
.deadline_set
= tlp
->threshold
.deadline
;
1447 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
) {
1448 tlp
->threshold
.deadline_set
-= tlp
->threshold
.margin
;
1449 tlp
->threshold
.deadline_set
-= tlp
->threshold
.latency
;
1451 &tlp
->threshold
.timer
,
1452 tlp
->threshold
.deadline_set
,
1453 TIMER_CALL_LOCAL
| TIMER_CALL_SYS_CRITICAL
);
1460 tlp
->threshold
.scans
= 0;
1461 tlp
->threshold
.preempts
= 0;
1462 tlp
->threshold
.latency
= 0;
1463 tlp
->threshold
.latency_min
= EndOfAllTime
;
1464 tlp
->threshold
.latency_max
= 0;
1466 timer_queue_unlock(timer_longterm_queue
);
1471 timer_sysctl_set(int oid
, uint64_t value
)
1477 (void (*)(void *)) timer_sysctl_set_threshold
,
1479 return KERN_SUCCESS
;
1481 return KERN_INVALID_ARGUMENT
;