2 * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Timer interrupt callout module.
32 #include <mach/mach_types.h>
34 #include <kern/clock.h>
36 #include <kern/processor.h>
37 #include <kern/timer_call.h>
38 #include <kern/timer_queue.h>
39 #include <kern/call_entry.h>
40 #include <kern/thread.h>
41 #include <kern/policy_internal.h>
43 #include <sys/kdebug.h>
51 #define TIMER_ASSERT 1
54 //#define TIMER_ASSERT 1
58 #define DBG(x...) kprintf("DBG: " x);
64 #define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST
66 #define TIMER_KDEBUG_TRACE(x...)
70 lck_grp_t timer_call_lck_grp
;
71 lck_attr_t timer_call_lck_attr
;
72 lck_grp_attr_t timer_call_lck_grp_attr
;
74 lck_grp_t timer_longterm_lck_grp
;
75 lck_attr_t timer_longterm_lck_attr
;
76 lck_grp_attr_t timer_longterm_lck_grp_attr
;
78 /* Timer queue lock must be acquired with interrupts disabled (under splclock()) */
80 #define timer_queue_lock_spin(queue) \
81 lck_mtx_lock_spin_always(&queue->lock_data)
83 #define timer_queue_unlock(queue) \
84 lck_mtx_unlock_always(&queue->lock_data)
86 #define timer_queue_lock_spin(queue) (void)1
87 #define timer_queue_unlock(queue) (void)1
90 #define QUEUE(x) ((queue_t)(x))
91 #define MPQUEUE(x) ((mpqueue_head_t *)(x))
92 #define TIMER_CALL(x) ((timer_call_t)(x))
93 #define TCE(x) (&(x->call_entry))
95 * The longterm timer object is a global structure holding all timers
96 * beyond the short-term, local timer queue threshold. The boot processor
97 * is responsible for moving each timer to its local timer queue
98 * if and when that timer becomes due within the threshold.
101 /* Sentinel for "no time set": */
102 #define TIMER_LONGTERM_NONE EndOfAllTime
103 /* The default threadhold is the delta above which a timer is "long-term" */
104 #if defined(__x86_64__)
105 #define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC) /* 1 sec */
107 #define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE /* disabled */
111 * The scan_limit throttles processing of the longterm queue.
112 * If the scan time exceeds this limit, we terminate, unlock
113 * and defer for scan_interval. This prevents unbounded holding of
114 * timer queue locks with interrupts masked.
116 #define TIMER_LONGTERM_SCAN_LIMIT (100ULL * NSEC_PER_USEC) /* 100 us */
117 #define TIMER_LONGTERM_SCAN_INTERVAL (100ULL * NSEC_PER_USEC) /* 100 us */
118 /* Sentinel for "scan limit exceeded": */
119 #define TIMER_LONGTERM_SCAN_AGAIN 0
122 uint64_t interval
; /* longterm timer interval */
123 uint64_t margin
; /* fudge factor (10% of interval */
124 uint64_t deadline
; /* first/soonest longterm deadline */
125 uint64_t preempted
; /* sooner timer has pre-empted */
126 timer_call_t call
; /* first/soonest longterm timer call */
127 uint64_t deadline_set
; /* next timer set */
128 timer_call_data_t timer
; /* timer used by threshold management */
130 uint64_t scans
; /* num threshold timer scans */
131 uint64_t preempts
; /* num threshold reductions */
132 uint64_t latency
; /* average threshold latency */
133 uint64_t latency_min
; /* minimum threshold latency */
134 uint64_t latency_max
; /* maximum threshold latency */
138 mpqueue_head_t queue
; /* longterm timer list */
139 uint64_t enqueues
; /* num timers queued */
140 uint64_t dequeues
; /* num timers dequeued */
141 uint64_t escalates
; /* num timers becoming shortterm */
142 uint64_t scan_time
; /* last time the list was scanned */
143 threshold_t threshold
; /* longterm timer threshold */
144 uint64_t scan_limit
; /* maximum scan time */
145 uint64_t scan_interval
; /* interval between LT "escalation" scans */
146 uint64_t scan_pauses
; /* num scans exceeding time limit */
149 timer_longterm_t timer_longterm
= {
150 .scan_limit
= TIMER_LONGTERM_SCAN_LIMIT
,
151 .scan_interval
= TIMER_LONGTERM_SCAN_INTERVAL
,
154 static mpqueue_head_t
*timer_longterm_queue
= NULL
;
156 static void timer_longterm_init(void);
157 static void timer_longterm_callout(
158 timer_call_param_t p0
,
159 timer_call_param_t p1
);
160 extern void timer_longterm_scan(
161 timer_longterm_t
*tlp
,
163 static void timer_longterm_update(
164 timer_longterm_t
*tlp
);
165 static void timer_longterm_update_locked(
166 timer_longterm_t
*tlp
);
167 static mpqueue_head_t
* timer_longterm_enqueue_unlocked(
171 mpqueue_head_t
** old_queue
,
172 uint64_t soft_deadline
,
174 timer_call_param_t param1
,
175 uint32_t callout_flags
);
176 static void timer_longterm_dequeued_locked(
179 uint64_t past_deadline_timers
;
180 uint64_t past_deadline_deltas
;
181 uint64_t past_deadline_longest
;
182 uint64_t past_deadline_shortest
= ~0ULL;
183 enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS
= 10 * 1000};
185 uint64_t past_deadline_timer_adjustment
;
187 static boolean_t
timer_call_enter_internal(timer_call_t call
, timer_call_param_t param1
, uint64_t deadline
, uint64_t leeway
, uint32_t flags
, boolean_t ratelimited
);
188 boolean_t mach_timer_coalescing_enabled
= TRUE
;
190 mpqueue_head_t
*timer_call_enqueue_deadline_unlocked(
192 mpqueue_head_t
*queue
,
194 uint64_t soft_deadline
,
196 timer_call_param_t param1
,
199 mpqueue_head_t
*timer_call_dequeue_unlocked(
202 timer_coalescing_priority_params_t tcoal_prio_params
;
205 int32_t nc_tcl
, rt_tcl
, bg_tcl
, kt_tcl
, fp_tcl
, ts_tcl
, qos_tcl
;
206 #define TCOAL_PRIO_STAT(x) (x++)
208 #define TCOAL_PRIO_STAT(x)
212 timer_call_init_abstime(void)
216 timer_coalescing_priority_params_ns_t
* tcoal_prio_params_init
= timer_call_get_priority_params();
217 nanoseconds_to_absolutetime(PAST_DEADLINE_TIMER_ADJUSTMENT_NS
, &past_deadline_timer_adjustment
);
218 nanoseconds_to_absolutetime(tcoal_prio_params_init
->idle_entry_timer_processing_hdeadline_threshold_ns
, &result
);
219 tcoal_prio_params
.idle_entry_timer_processing_hdeadline_threshold_abstime
= (uint32_t)result
;
220 nanoseconds_to_absolutetime(tcoal_prio_params_init
->interrupt_timer_coalescing_ilat_threshold_ns
, &result
);
221 tcoal_prio_params
.interrupt_timer_coalescing_ilat_threshold_abstime
= (uint32_t)result
;
222 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_resort_threshold_ns
, &result
);
223 tcoal_prio_params
.timer_resort_threshold_abstime
= (uint32_t)result
;
224 tcoal_prio_params
.timer_coalesce_rt_shift
= tcoal_prio_params_init
->timer_coalesce_rt_shift
;
225 tcoal_prio_params
.timer_coalesce_bg_shift
= tcoal_prio_params_init
->timer_coalesce_bg_shift
;
226 tcoal_prio_params
.timer_coalesce_kt_shift
= tcoal_prio_params_init
->timer_coalesce_kt_shift
;
227 tcoal_prio_params
.timer_coalesce_fp_shift
= tcoal_prio_params_init
->timer_coalesce_fp_shift
;
228 tcoal_prio_params
.timer_coalesce_ts_shift
= tcoal_prio_params_init
->timer_coalesce_ts_shift
;
230 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_coalesce_rt_ns_max
,
231 &tcoal_prio_params
.timer_coalesce_rt_abstime_max
);
232 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_coalesce_bg_ns_max
,
233 &tcoal_prio_params
.timer_coalesce_bg_abstime_max
);
234 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_coalesce_kt_ns_max
,
235 &tcoal_prio_params
.timer_coalesce_kt_abstime_max
);
236 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_coalesce_fp_ns_max
,
237 &tcoal_prio_params
.timer_coalesce_fp_abstime_max
);
238 nanoseconds_to_absolutetime(tcoal_prio_params_init
->timer_coalesce_ts_ns_max
,
239 &tcoal_prio_params
.timer_coalesce_ts_abstime_max
);
241 for (i
= 0; i
< NUM_LATENCY_QOS_TIERS
; i
++) {
242 tcoal_prio_params
.latency_qos_scale
[i
] = tcoal_prio_params_init
->latency_qos_scale
[i
];
243 nanoseconds_to_absolutetime(tcoal_prio_params_init
->latency_qos_ns_max
[i
],
244 &tcoal_prio_params
.latency_qos_abstime_max
[i
]);
245 tcoal_prio_params
.latency_tier_rate_limited
[i
] = tcoal_prio_params_init
->latency_tier_rate_limited
[i
];
251 timer_call_init(void)
253 lck_attr_setdefault(&timer_call_lck_attr
);
254 lck_grp_attr_setdefault(&timer_call_lck_grp_attr
);
255 lck_grp_init(&timer_call_lck_grp
, "timer_call", &timer_call_lck_grp_attr
);
257 timer_longterm_init();
258 timer_call_init_abstime();
263 timer_call_queue_init(mpqueue_head_t
*queue
)
265 DBG("timer_call_queue_init(%p)\n", queue
);
266 mpqueue_init(queue
, &timer_call_lck_grp
, &timer_call_lck_attr
);
273 timer_call_func_t func
,
274 timer_call_param_t param0
)
276 DBG("timer_call_setup(%p,%p,%p)\n", call
, func
, param0
);
277 call_entry_setup(TCE(call
), func
, param0
);
278 simple_lock_init(&(call
)->lock
, 0);
279 call
->async_dequeue
= FALSE
;
282 static __inline__ mpqueue_head_t
*
283 timer_call_entry_dequeue(
286 mpqueue_head_t
*old_queue
= MPQUEUE(TCE(entry
)->queue
);
288 if (!hw_lock_held((hw_lock_t
)&entry
->lock
))
289 panic("_call_entry_dequeue() "
290 "entry %p is not locked\n", entry
);
292 * XXX The queue lock is actually a mutex in spin mode
293 * but there's no way to test for it being held
294 * so we pretend it's a spinlock!
296 if (!hw_lock_held((hw_lock_t
)&old_queue
->lock_data
))
297 panic("_call_entry_dequeue() "
298 "queue %p is not locked\n", old_queue
);
300 call_entry_dequeue(TCE(entry
));
306 static __inline__ mpqueue_head_t
*
307 timer_call_entry_enqueue_deadline(
309 mpqueue_head_t
*queue
,
312 mpqueue_head_t
*old_queue
= MPQUEUE(TCE(entry
)->queue
);
314 if (!hw_lock_held((hw_lock_t
)&entry
->lock
))
315 panic("_call_entry_enqueue_deadline() "
316 "entry %p is not locked\n", entry
);
317 /* XXX More lock pretense: */
318 if (!hw_lock_held((hw_lock_t
)&queue
->lock_data
))
319 panic("_call_entry_enqueue_deadline() "
320 "queue %p is not locked\n", queue
);
321 if (old_queue
!= NULL
&& old_queue
!= queue
)
322 panic("_call_entry_enqueue_deadline() "
323 "old_queue %p != queue", old_queue
);
325 call_entry_enqueue_deadline(TCE(entry
), QUEUE(queue
), deadline
);
327 /* For efficiency, track the earliest soft deadline on the queue, so that
328 * fuzzy decisions can be made without lock acquisitions.
330 timer_call_t thead
= (timer_call_t
)queue_first(&queue
->head
);
332 queue
->earliest_soft_deadline
= thead
->flags
& TIMER_CALL_RATELIMITED
? TCE(thead
)->deadline
: thead
->soft_deadline
;
343 static __inline__ mpqueue_head_t
*
344 timer_call_entry_dequeue(
347 mpqueue_head_t
*old_queue
= MPQUEUE(TCE(entry
)->queue
);
349 call_entry_dequeue(TCE(entry
));
355 static __inline__ mpqueue_head_t
*
356 timer_call_entry_enqueue_deadline(
358 mpqueue_head_t
*queue
,
361 mpqueue_head_t
*old_queue
= MPQUEUE(TCE(entry
)->queue
);
363 call_entry_enqueue_deadline(TCE(entry
), QUEUE(queue
), deadline
);
365 /* For efficiency, track the earliest soft deadline on the queue,
366 * so that fuzzy decisions can be made without lock acquisitions.
369 timer_call_t thead
= (timer_call_t
)queue_first(&queue
->head
);
370 queue
->earliest_soft_deadline
= thead
->flags
& TIMER_CALL_RATELIMITED
? TCE(thead
)->deadline
: thead
->soft_deadline
;
381 static __inline__
void
382 timer_call_entry_enqueue_tail(
384 mpqueue_head_t
*queue
)
386 call_entry_enqueue_tail(TCE(entry
), QUEUE(queue
));
392 * Remove timer entry from its queue but don't change the queue pointer
393 * and set the async_dequeue flag. This is locking case 2b.
395 static __inline__
void
396 timer_call_entry_dequeue_async(
399 mpqueue_head_t
*old_queue
= MPQUEUE(TCE(entry
)->queue
);
402 (void) remque(qe(entry
));
403 entry
->async_dequeue
= TRUE
;
409 unsigned timer_call_enqueue_deadline_unlocked_async1
;
410 unsigned timer_call_enqueue_deadline_unlocked_async2
;
413 * Assumes call_entry and queues unlocked, interrupts disabled.
415 __inline__ mpqueue_head_t
*
416 timer_call_enqueue_deadline_unlocked(
418 mpqueue_head_t
*queue
,
420 uint64_t soft_deadline
,
422 timer_call_param_t param1
,
423 uint32_t callout_flags
)
425 call_entry_t entry
= TCE(call
);
426 mpqueue_head_t
*old_queue
;
428 DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call
, queue
);
430 simple_lock(&call
->lock
);
432 old_queue
= MPQUEUE(entry
->queue
);
434 if (old_queue
!= NULL
) {
435 timer_queue_lock_spin(old_queue
);
436 if (call
->async_dequeue
) {
437 /* collision (1c): timer already dequeued, clear flag */
439 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
440 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
441 VM_KERNEL_UNSLIDE_OR_PERM(call
),
443 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
445 timer_call_enqueue_deadline_unlocked_async1
++;
447 call
->async_dequeue
= FALSE
;
449 } else if (old_queue
!= queue
) {
450 timer_call_entry_dequeue(call
);
452 timer_call_enqueue_deadline_unlocked_async2
++;
455 if (old_queue
== timer_longterm_queue
)
456 timer_longterm_dequeued_locked(call
);
457 if (old_queue
!= queue
) {
458 timer_queue_unlock(old_queue
);
459 timer_queue_lock_spin(queue
);
462 timer_queue_lock_spin(queue
);
465 call
->soft_deadline
= soft_deadline
;
466 call
->flags
= callout_flags
;
467 TCE(call
)->param1
= param1
;
470 timer_call_entry_enqueue_deadline(call
, queue
, deadline
);
471 timer_queue_unlock(queue
);
472 simple_unlock(&call
->lock
);
478 unsigned timer_call_dequeue_unlocked_async1
;
479 unsigned timer_call_dequeue_unlocked_async2
;
482 timer_call_dequeue_unlocked(
485 call_entry_t entry
= TCE(call
);
486 mpqueue_head_t
*old_queue
;
488 DBG("timer_call_dequeue_unlocked(%p)\n", call
);
490 simple_lock(&call
->lock
);
491 old_queue
= MPQUEUE(entry
->queue
);
493 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
494 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
495 VM_KERNEL_UNSLIDE_OR_PERM(call
),
497 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
500 if (old_queue
!= NULL
) {
501 timer_queue_lock_spin(old_queue
);
502 if (call
->async_dequeue
) {
503 /* collision (1c): timer already dequeued, clear flag */
505 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
506 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
507 VM_KERNEL_UNSLIDE_OR_PERM(call
),
509 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
511 timer_call_dequeue_unlocked_async1
++;
513 call
->async_dequeue
= FALSE
;
516 timer_call_entry_dequeue(call
);
518 if (old_queue
== timer_longterm_queue
)
519 timer_longterm_dequeued_locked(call
);
520 timer_queue_unlock(old_queue
);
522 simple_unlock(&call
->lock
);
527 past_deadline_timer_handle(uint64_t deadline
, uint64_t ctime
)
529 uint64_t delta
= (ctime
- deadline
);
531 past_deadline_timers
++;
532 past_deadline_deltas
+= delta
;
533 if (delta
> past_deadline_longest
)
534 past_deadline_longest
= deadline
;
535 if (delta
< past_deadline_shortest
)
536 past_deadline_shortest
= delta
;
538 return (ctime
+ past_deadline_timer_adjustment
);
542 * Timer call entry locking model
543 * ==============================
545 * Timer call entries are linked on per-cpu timer queues which are protected
546 * by the queue lock and the call entry lock. The locking protocol is:
548 * 0) The canonical locking order is timer call entry followed by queue.
550 * 1) With only the entry lock held, entry.queue is valid:
551 * 1a) NULL: the entry is not queued, or
552 * 1b) non-NULL: this queue must be locked before the entry is modified.
553 * After locking the queue, the call.async_dequeue flag must be checked:
554 * 1c) TRUE: the entry was removed from the queue by another thread
555 * and we must NULL the entry.queue and reset this flag, or
556 * 1d) FALSE: (ie. queued), the entry can be manipulated.
558 * 2) If a queue lock is obtained first, the queue is stable:
559 * 2a) If a try-lock of a queued entry succeeds, the call can be operated on
561 * 2b) If a try-lock fails, it indicates that another thread is attempting
562 * to change the entry and move it to a different position in this queue
563 * or to different queue. The entry can be dequeued but it should not be
564 * operated upon since it is being changed. Furthermore, we don't null
565 * the entry.queue pointer (protected by the entry lock we don't own).
566 * Instead, we set the async_dequeue flag -- see (1c).
567 * 2c) Same as 2b but occurring when a longterm timer is matured.
568 * 3) A callout's parameters (deadline, flags, parameters, soft deadline &c.)
569 * should be manipulated with the appropriate timer queue lock held,
570 * to prevent queue traversal observations from observing inconsistent
571 * updates to an in-flight callout.
575 * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
576 * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
577 * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
578 * methods to operate on timer_call structs as if they are call_entry structs.
579 * These structures are identical except for their queue head pointer fields.
581 * In the debug case, we assert that the timer call locking protocol
586 timer_call_enter_internal(
588 timer_call_param_t param1
,
592 boolean_t ratelimited
)
594 mpqueue_head_t
*queue
= NULL
;
595 mpqueue_head_t
*old_queue
;
599 uint64_t sdeadline
, ttd
;
601 assert(call
->call_entry
.func
!= NULL
);
604 sdeadline
= deadline
;
605 uint64_t ctime
= mach_absolute_time();
607 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
608 DECR_TIMER_ENTER
| DBG_FUNC_START
,
609 VM_KERNEL_UNSLIDE_OR_PERM(call
),
610 VM_KERNEL_ADDRHIDE(param1
), deadline
, flags
, 0);
612 urgency
= (flags
& TIMER_CALL_URGENCY_MASK
);
614 boolean_t slop_ratelimited
= FALSE
;
615 slop
= timer_call_slop(deadline
, ctime
, urgency
, current_thread(), &slop_ratelimited
);
617 if ((flags
& TIMER_CALL_LEEWAY
) != 0 && leeway
> slop
)
620 if (UINT64_MAX
- deadline
<= slop
) {
621 deadline
= UINT64_MAX
;
626 if (__improbable(deadline
< ctime
)) {
627 deadline
= past_deadline_timer_handle(deadline
, ctime
);
628 sdeadline
= deadline
;
631 if (ratelimited
|| slop_ratelimited
) {
632 flags
|= TIMER_CALL_RATELIMITED
;
634 flags
&= ~TIMER_CALL_RATELIMITED
;
637 ttd
= sdeadline
- ctime
;
639 DTRACE_TMR7(callout__create
, timer_call_func_t
, TCE(call
)->func
,
640 timer_call_param_t
, TCE(call
)->param0
, uint32_t, flags
,
641 (deadline
- sdeadline
),
642 (ttd
>> 32), (unsigned) (ttd
& 0xFFFFFFFF), call
);
645 /* Program timer callout parameters under the appropriate per-CPU or
646 * longterm queue lock. The callout may have been previously enqueued
647 * and in-flight on this or another timer queue.
649 if (!ratelimited
&& !slop_ratelimited
) {
650 queue
= timer_longterm_enqueue_unlocked(call
, ctime
, deadline
, &old_queue
, sdeadline
, ttd
, param1
, flags
);
654 queue
= timer_queue_assign(deadline
);
655 old_queue
= timer_call_enqueue_deadline_unlocked(call
, queue
, deadline
, sdeadline
, ttd
, param1
, flags
);
659 TCE(call
)->entry_time
= ctime
;
662 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
663 DECR_TIMER_ENTER
| DBG_FUNC_END
,
664 VM_KERNEL_UNSLIDE_OR_PERM(call
),
665 (old_queue
!= NULL
), deadline
, queue
->count
, 0);
669 return (old_queue
!= NULL
);
674 * return boolean indicating whether the call was previously queued.
682 return timer_call_enter_internal(call
, NULL
, deadline
, 0, flags
, FALSE
);
688 timer_call_param_t param1
,
692 return timer_call_enter_internal(call
, param1
, deadline
, 0, flags
, FALSE
);
696 timer_call_enter_with_leeway(
698 timer_call_param_t param1
,
702 boolean_t ratelimited
)
704 return timer_call_enter_internal(call
, param1
, deadline
, leeway
, flags
, ratelimited
);
708 timer_call_quantum_timer_enter(
710 timer_call_param_t param1
,
714 assert(call
->call_entry
.func
!= NULL
);
715 assert(ml_get_interrupts_enabled() == FALSE
);
717 uint32_t flags
= TIMER_CALL_SYS_CRITICAL
| TIMER_CALL_LOCAL
;
719 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
, DECR_TIMER_ENTER
| DBG_FUNC_START
,
720 VM_KERNEL_UNSLIDE_OR_PERM(call
),
721 VM_KERNEL_ADDRHIDE(param1
), deadline
,
724 if (__improbable(deadline
< ctime
)) {
725 deadline
= past_deadline_timer_handle(deadline
, ctime
);
728 uint64_t ttd
= deadline
- ctime
;
730 DTRACE_TMR7(callout__create
, timer_call_func_t
, TCE(call
)->func
,
731 timer_call_param_t
, TCE(call
)->param0
, uint32_t, flags
, 0,
732 (ttd
>> 32), (unsigned) (ttd
& 0xFFFFFFFF), call
);
735 quantum_timer_set_deadline(deadline
);
736 TCE(call
)->deadline
= deadline
;
737 TCE(call
)->param1
= param1
;
742 TCE(call
)->entry_time
= ctime
;
745 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
, DECR_TIMER_ENTER
| DBG_FUNC_END
,
746 VM_KERNEL_UNSLIDE_OR_PERM(call
),
754 timer_call_quantum_timer_cancel(
757 assert(ml_get_interrupts_enabled() == FALSE
);
759 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
760 DECR_TIMER_CANCEL
| DBG_FUNC_START
,
761 VM_KERNEL_UNSLIDE_OR_PERM(call
), TCE(call
)->deadline
,
764 TCE(call
)->deadline
= 0;
765 quantum_timer_set_deadline(0);
767 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
768 DECR_TIMER_CANCEL
| DBG_FUNC_END
,
769 VM_KERNEL_UNSLIDE_OR_PERM(call
), 0,
770 TCE(call
)->deadline
- mach_absolute_time(),
771 TCE(call
)->deadline
- TCE(call
)->entry_time
, 0);
774 DTRACE_TMR6(callout__cancel
, timer_call_func_t
, TCE(call
)->func
,
775 timer_call_param_t
, TCE(call
)->param0
, uint32_t, call
->flags
, 0,
776 (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF));
786 mpqueue_head_t
*old_queue
;
791 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
792 DECR_TIMER_CANCEL
| DBG_FUNC_START
,
793 VM_KERNEL_UNSLIDE_OR_PERM(call
),
794 TCE(call
)->deadline
, call
->soft_deadline
, call
->flags
, 0);
796 old_queue
= timer_call_dequeue_unlocked(call
);
798 if (old_queue
!= NULL
) {
799 timer_queue_lock_spin(old_queue
);
800 if (!queue_empty(&old_queue
->head
)) {
801 timer_queue_cancel(old_queue
, TCE(call
)->deadline
, CE(queue_first(&old_queue
->head
))->deadline
);
802 timer_call_t thead
= (timer_call_t
)queue_first(&old_queue
->head
);
803 old_queue
->earliest_soft_deadline
= thead
->flags
& TIMER_CALL_RATELIMITED
? TCE(thead
)->deadline
: thead
->soft_deadline
;
806 timer_queue_cancel(old_queue
, TCE(call
)->deadline
, UINT64_MAX
);
807 old_queue
->earliest_soft_deadline
= UINT64_MAX
;
809 timer_queue_unlock(old_queue
);
811 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
812 DECR_TIMER_CANCEL
| DBG_FUNC_END
,
813 VM_KERNEL_UNSLIDE_OR_PERM(call
),
814 VM_KERNEL_UNSLIDE_OR_PERM(old_queue
),
815 TCE(call
)->deadline
- mach_absolute_time(),
816 TCE(call
)->deadline
- TCE(call
)->entry_time
, 0);
820 DTRACE_TMR6(callout__cancel
, timer_call_func_t
, TCE(call
)->func
,
821 timer_call_param_t
, TCE(call
)->param0
, uint32_t, call
->flags
, 0,
822 (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF));
825 return (old_queue
!= NULL
);
828 static uint32_t timer_queue_shutdown_lock_skips
;
829 static uint32_t timer_queue_shutdown_discarded
;
832 timer_queue_shutdown(
833 mpqueue_head_t
*queue
)
836 mpqueue_head_t
*new_queue
;
840 DBG("timer_queue_shutdown(%p)\n", queue
);
844 /* Note comma operator in while expression re-locking each iteration */
845 while ((void)timer_queue_lock_spin(queue
), !queue_empty(&queue
->head
)) {
846 call
= TIMER_CALL(queue_first(&queue
->head
));
848 if (!simple_lock_try(&call
->lock
)) {
850 * case (2b) lock order inversion, dequeue and skip
851 * Don't change the call_entry queue back-pointer
852 * but set the async_dequeue field.
854 timer_queue_shutdown_lock_skips
++;
855 timer_call_entry_dequeue_async(call
);
857 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
858 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
859 VM_KERNEL_UNSLIDE_OR_PERM(call
),
861 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
864 timer_queue_unlock(queue
);
868 boolean_t call_local
= ((call
->flags
& TIMER_CALL_LOCAL
) != 0);
870 /* remove entry from old queue */
871 timer_call_entry_dequeue(call
);
872 timer_queue_unlock(queue
);
874 if (call_local
== FALSE
) {
875 /* and queue it on new, discarding LOCAL timers */
876 new_queue
= timer_queue_assign(TCE(call
)->deadline
);
877 timer_queue_lock_spin(new_queue
);
878 timer_call_entry_enqueue_deadline(
879 call
, new_queue
, TCE(call
)->deadline
);
880 timer_queue_unlock(new_queue
);
882 timer_queue_shutdown_discarded
++;
885 assert(call_local
== FALSE
);
886 simple_unlock(&call
->lock
);
889 timer_queue_unlock(queue
);
895 quantum_timer_expire(
898 processor_t processor
= current_processor();
899 timer_call_t call
= TIMER_CALL(&(processor
->quantum_timer
));
901 if (__improbable(TCE(call
)->deadline
> deadline
))
902 panic("CPU quantum timer deadlin out of sync with timer call deadline");
904 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
905 DECR_TIMER_EXPIRE
| DBG_FUNC_NONE
,
906 VM_KERNEL_UNSLIDE_OR_PERM(call
),
909 TCE(call
)->entry_time
, 0);
911 timer_call_func_t func
= TCE(call
)->func
;
912 timer_call_param_t param0
= TCE(call
)->param0
;
913 timer_call_param_t param1
= TCE(call
)->param1
;
915 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
916 DECR_TIMER_CALLOUT
| DBG_FUNC_START
,
917 VM_KERNEL_UNSLIDE_OR_PERM(call
), VM_KERNEL_UNSLIDE(func
),
918 VM_KERNEL_ADDRHIDE(param0
),
919 VM_KERNEL_ADDRHIDE(param1
),
923 DTRACE_TMR7(callout__start
, timer_call_func_t
, func
,
924 timer_call_param_t
, param0
, unsigned, call
->flags
,
925 0, (call
->ttd
>> 32),
926 (unsigned) (call
->ttd
& 0xFFFFFFFF), call
);
928 (*func
)(param0
, param1
);
930 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
931 DECR_TIMER_CALLOUT
| DBG_FUNC_END
,
932 VM_KERNEL_UNSLIDE_OR_PERM(call
), VM_KERNEL_UNSLIDE(func
),
933 VM_KERNEL_ADDRHIDE(param0
),
934 VM_KERNEL_ADDRHIDE(param1
),
938 static uint32_t timer_queue_expire_lock_skips
;
940 timer_queue_expire_with_options(
941 mpqueue_head_t
*queue
,
945 timer_call_t call
= NULL
;
946 uint32_t tc_iterations
= 0;
947 DBG("timer_queue_expire(%p,)\n", queue
);
949 uint64_t cur_deadline
= deadline
;
950 timer_queue_lock_spin(queue
);
952 while (!queue_empty(&queue
->head
)) {
953 /* Upon processing one or more timer calls, refresh the
954 * deadline to account for time elapsed in the callout
956 if (++tc_iterations
> 1)
957 cur_deadline
= mach_absolute_time();
960 call
= TIMER_CALL(queue_first(&queue
->head
));
962 if (call
->soft_deadline
<= cur_deadline
) {
963 timer_call_func_t func
;
964 timer_call_param_t param0
, param1
;
966 TCOAL_DEBUG(0xDDDD0000, queue
->earliest_soft_deadline
, call
->soft_deadline
, 0, 0, 0);
967 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
968 DECR_TIMER_EXPIRE
| DBG_FUNC_NONE
,
969 VM_KERNEL_UNSLIDE_OR_PERM(call
),
972 TCE(call
)->entry_time
, 0);
974 if ((call
->flags
& TIMER_CALL_RATELIMITED
) &&
975 (TCE(call
)->deadline
> cur_deadline
)) {
980 if (!simple_lock_try(&call
->lock
)) {
981 /* case (2b) lock inversion, dequeue and skip */
982 timer_queue_expire_lock_skips
++;
983 timer_call_entry_dequeue_async(call
);
988 timer_call_entry_dequeue(call
);
990 func
= TCE(call
)->func
;
991 param0
= TCE(call
)->param0
;
992 param1
= TCE(call
)->param1
;
994 simple_unlock(&call
->lock
);
995 timer_queue_unlock(queue
);
997 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
998 DECR_TIMER_CALLOUT
| DBG_FUNC_START
,
999 VM_KERNEL_UNSLIDE_OR_PERM(call
), VM_KERNEL_UNSLIDE(func
),
1000 VM_KERNEL_ADDRHIDE(param0
),
1001 VM_KERNEL_ADDRHIDE(param1
),
1005 DTRACE_TMR7(callout__start
, timer_call_func_t
, func
,
1006 timer_call_param_t
, param0
, unsigned, call
->flags
,
1007 0, (call
->ttd
>> 32),
1008 (unsigned) (call
->ttd
& 0xFFFFFFFF), call
);
1010 /* Maintain time-to-deadline in per-processor data
1011 * structure for thread wakeup deadline statistics.
1013 uint64_t *ttdp
= &(PROCESSOR_DATA(current_processor(), timer_call_ttd
));
1015 (*func
)(param0
, param1
);
1018 DTRACE_TMR4(callout__end
, timer_call_func_t
, func
,
1019 param0
, param1
, call
);
1022 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1023 DECR_TIMER_CALLOUT
| DBG_FUNC_END
,
1024 VM_KERNEL_UNSLIDE_OR_PERM(call
), VM_KERNEL_UNSLIDE(func
),
1025 VM_KERNEL_ADDRHIDE(param0
),
1026 VM_KERNEL_ADDRHIDE(param1
),
1029 timer_queue_lock_spin(queue
);
1031 if (__probable(rescan
== FALSE
)) {
1034 int64_t skew
= TCE(call
)->deadline
- call
->soft_deadline
;
1035 assert(TCE(call
)->deadline
>= call
->soft_deadline
);
1037 /* DRK: On a latency quality-of-service level change,
1038 * re-sort potentially rate-limited timers. The platform
1039 * layer determines which timers require
1040 * this. In the absence of the per-callout
1041 * synchronization requirement, a global resort could
1042 * be more efficient. The re-sort effectively
1043 * annuls all timer adjustments, i.e. the "soft
1044 * deadline" is the sort key.
1047 if (timer_resort_threshold(skew
)) {
1048 if (__probable(simple_lock_try(&call
->lock
))) {
1049 timer_call_entry_dequeue(call
);
1050 timer_call_entry_enqueue_deadline(call
, queue
, call
->soft_deadline
);
1051 simple_unlock(&call
->lock
);
1056 call
= TIMER_CALL(queue_next(qe(call
)));
1057 if (queue_end(&queue
->head
, qe(call
)))
1064 if (!queue_empty(&queue
->head
)) {
1065 call
= TIMER_CALL(queue_first(&queue
->head
));
1066 cur_deadline
= TCE(call
)->deadline
;
1067 queue
->earliest_soft_deadline
= (call
->flags
& TIMER_CALL_RATELIMITED
) ? TCE(call
)->deadline
: call
->soft_deadline
;
1069 queue
->earliest_soft_deadline
= cur_deadline
= UINT64_MAX
;
1072 timer_queue_unlock(queue
);
1074 return (cur_deadline
);
1079 mpqueue_head_t
*queue
,
1082 return timer_queue_expire_with_options(queue
, deadline
, FALSE
);
1085 extern int serverperfmode
;
1086 static uint32_t timer_queue_migrate_lock_skips
;
1088 * timer_queue_migrate() is called by timer_queue_migrate_cpu()
1089 * to move timer requests from the local processor (queue_from)
1090 * to a target processor's (queue_to).
1093 timer_queue_migrate(mpqueue_head_t
*queue_from
, mpqueue_head_t
*queue_to
)
1096 timer_call_t head_to
;
1097 int timers_migrated
= 0;
1099 DBG("timer_queue_migrate(%p,%p)\n", queue_from
, queue_to
);
1101 assert(!ml_get_interrupts_enabled());
1102 assert(queue_from
!= queue_to
);
1104 if (serverperfmode
) {
1106 * if we're running a high end server
1107 * avoid migrations... they add latency
1108 * and don't save us power under typical
1115 * Take both local (from) and target (to) timer queue locks while
1116 * moving the timers from the local queue to the target processor.
1117 * We assume that the target is always the boot processor.
1118 * But only move if all of the following is true:
1119 * - the target queue is non-empty
1120 * - the local queue is non-empty
1121 * - the local queue's first deadline is later than the target's
1122 * - the local queue contains no non-migrateable "local" call
1123 * so that we need not have the target resync.
1126 timer_queue_lock_spin(queue_to
);
1128 head_to
= TIMER_CALL(queue_first(&queue_to
->head
));
1129 if (queue_empty(&queue_to
->head
)) {
1130 timers_migrated
= -1;
1134 timer_queue_lock_spin(queue_from
);
1136 if (queue_empty(&queue_from
->head
)) {
1137 timers_migrated
= -2;
1141 call
= TIMER_CALL(queue_first(&queue_from
->head
));
1142 if (TCE(call
)->deadline
< TCE(head_to
)->deadline
) {
1143 timers_migrated
= 0;
1147 /* perform scan for non-migratable timers */
1149 if (call
->flags
& TIMER_CALL_LOCAL
) {
1150 timers_migrated
= -3;
1153 call
= TIMER_CALL(queue_next(qe(call
)));
1154 } while (!queue_end(&queue_from
->head
, qe(call
)));
1156 /* migration loop itself -- both queues are locked */
1157 while (!queue_empty(&queue_from
->head
)) {
1158 call
= TIMER_CALL(queue_first(&queue_from
->head
));
1159 if (!simple_lock_try(&call
->lock
)) {
1160 /* case (2b) lock order inversion, dequeue only */
1162 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1163 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
1164 VM_KERNEL_UNSLIDE_OR_PERM(call
),
1165 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
1166 VM_KERNEL_UNSLIDE_OR_PERM(call
->lock
.interlock
.lock_data
),
1169 timer_queue_migrate_lock_skips
++;
1170 timer_call_entry_dequeue_async(call
);
1173 timer_call_entry_dequeue(call
);
1174 timer_call_entry_enqueue_deadline(
1175 call
, queue_to
, TCE(call
)->deadline
);
1177 simple_unlock(&call
->lock
);
1179 queue_from
->earliest_soft_deadline
= UINT64_MAX
;
1181 timer_queue_unlock(queue_from
);
1183 timer_queue_unlock(queue_to
);
1185 return timers_migrated
;
1189 timer_queue_trace_cpu(int ncpu
)
1191 timer_call_nosync_cpu(
1193 (void(*)(void *))timer_queue_trace
,
1194 (void*) timer_queue_cpu(ncpu
));
1199 mpqueue_head_t
*queue
)
1208 timer_queue_lock_spin(queue
);
1210 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1211 DECR_TIMER_QUEUE
| DBG_FUNC_START
,
1212 queue
->count
, mach_absolute_time(), 0, 0, 0);
1214 if (!queue_empty(&queue
->head
)) {
1215 call
= TIMER_CALL(queue_first(&queue
->head
));
1217 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1218 DECR_TIMER_QUEUE
| DBG_FUNC_NONE
,
1219 call
->soft_deadline
,
1220 TCE(call
)->deadline
,
1221 TCE(call
)->entry_time
,
1222 VM_KERNEL_UNSLIDE(TCE(call
)->func
),
1224 call
= TIMER_CALL(queue_next(qe(call
)));
1225 } while (!queue_end(&queue
->head
, qe(call
)));
1228 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1229 DECR_TIMER_QUEUE
| DBG_FUNC_END
,
1230 queue
->count
, mach_absolute_time(), 0, 0, 0);
1232 timer_queue_unlock(queue
);
1237 timer_longterm_dequeued_locked(timer_call_t call
)
1239 timer_longterm_t
*tlp
= &timer_longterm
;
1242 if (call
== tlp
->threshold
.call
)
1243 tlp
->threshold
.call
= NULL
;
1247 * Place a timer call in the longterm list
1248 * and adjust the next timer callout deadline if the new timer is first.
1251 timer_longterm_enqueue_unlocked(timer_call_t call
,
1254 mpqueue_head_t
**old_queue
,
1255 uint64_t soft_deadline
,
1257 timer_call_param_t param1
,
1258 uint32_t callout_flags
)
1260 timer_longterm_t
*tlp
= &timer_longterm
;
1261 boolean_t update_required
= FALSE
;
1262 uint64_t longterm_threshold
;
1264 longterm_threshold
= now
+ tlp
->threshold
.interval
;
1267 * Return NULL without doing anything if:
1268 * - this timer is local, or
1269 * - the longterm mechanism is disabled, or
1270 * - this deadline is too short.
1272 if ((callout_flags
& TIMER_CALL_LOCAL
) != 0 ||
1273 (tlp
->threshold
.interval
== TIMER_LONGTERM_NONE
) ||
1274 (deadline
<= longterm_threshold
))
1278 * Remove timer from its current queue, if any.
1280 *old_queue
= timer_call_dequeue_unlocked(call
);
1283 * Lock the longterm queue, queue timer and determine
1284 * whether an update is necessary.
1286 assert(!ml_get_interrupts_enabled());
1287 simple_lock(&call
->lock
);
1288 timer_queue_lock_spin(timer_longterm_queue
);
1289 TCE(call
)->deadline
= deadline
;
1290 TCE(call
)->param1
= param1
;
1292 call
->soft_deadline
= soft_deadline
;
1293 call
->flags
= callout_flags
;
1294 timer_call_entry_enqueue_tail(call
, timer_longterm_queue
);
1299 * We'll need to update the currently set threshold timer
1300 * if the new deadline is sooner and no sooner update is in flight.
1302 if (deadline
< tlp
->threshold
.deadline
&&
1303 deadline
< tlp
->threshold
.preempted
) {
1304 tlp
->threshold
.preempted
= deadline
;
1305 tlp
->threshold
.call
= call
;
1306 update_required
= TRUE
;
1308 timer_queue_unlock(timer_longterm_queue
);
1309 simple_unlock(&call
->lock
);
1311 if (update_required
) {
1313 * Note: this call expects that calling the master cpu
1314 * alone does not involve locking the topo lock.
1316 timer_call_nosync_cpu(
1318 (void (*)(void *)) timer_longterm_update
,
1322 return timer_longterm_queue
;
1326 * Scan for timers below the longterm threshold.
1327 * Move these to the local timer queue (of the boot processor on which the
1328 * calling thread is running).
1329 * Both the local (boot) queue and the longterm queue are locked.
1330 * The scan is similar to the timer migrate sequence but is performed by
1331 * successively examining each timer on the longterm queue:
1332 * - if within the short-term threshold
1333 * - enter on the local queue (unless being deleted),
1335 * - if sooner, deadline becomes the next threshold deadline.
1336 * The total scan time is limited to TIMER_LONGTERM_SCAN_LIMIT. Should this be
1337 * exceeded, we abort and reschedule again so that we don't shut others from
1338 * the timer queues. Longterm timers firing late is not critical.
1341 timer_longterm_scan(timer_longterm_t
*tlp
,
1342 uint64_t time_start
)
1348 uint64_t time_limit
= time_start
+ tlp
->scan_limit
;
1349 mpqueue_head_t
*timer_master_queue
;
1351 assert(!ml_get_interrupts_enabled());
1352 assert(cpu_number() == master_cpu
);
1354 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
)
1355 threshold
= time_start
+ tlp
->threshold
.interval
;
1357 tlp
->threshold
.deadline
= TIMER_LONGTERM_NONE
;
1358 tlp
->threshold
.call
= NULL
;
1360 if (queue_empty(&timer_longterm_queue
->head
))
1363 timer_master_queue
= timer_queue_cpu(master_cpu
);
1364 timer_queue_lock_spin(timer_master_queue
);
1366 qe
= queue_first(&timer_longterm_queue
->head
);
1367 while (!queue_end(&timer_longterm_queue
->head
, qe
)) {
1368 call
= TIMER_CALL(qe
);
1369 deadline
= call
->soft_deadline
;
1370 qe
= queue_next(qe
);
1371 if (!simple_lock_try(&call
->lock
)) {
1372 /* case (2c) lock order inversion, dequeue only */
1374 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1375 DECR_TIMER_ASYNC_DEQ
| DBG_FUNC_NONE
,
1376 VM_KERNEL_UNSLIDE_OR_PERM(call
),
1377 VM_KERNEL_UNSLIDE_OR_PERM(TCE(call
)->queue
),
1378 VM_KERNEL_UNSLIDE_OR_PERM(call
->lock
.interlock
.lock_data
),
1381 timer_call_entry_dequeue_async(call
);
1384 if (deadline
< threshold
) {
1386 * This timer needs moving (escalating)
1387 * to the local (boot) processor's queue.
1390 if (deadline
< time_start
)
1391 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1392 DECR_TIMER_OVERDUE
| DBG_FUNC_NONE
,
1393 VM_KERNEL_UNSLIDE_OR_PERM(call
),
1399 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1400 DECR_TIMER_ESCALATE
| DBG_FUNC_NONE
,
1401 VM_KERNEL_UNSLIDE_OR_PERM(call
),
1402 TCE(call
)->deadline
,
1403 TCE(call
)->entry_time
,
1404 VM_KERNEL_UNSLIDE(TCE(call
)->func
),
1407 timer_call_entry_dequeue(call
);
1408 timer_call_entry_enqueue_deadline(
1409 call
, timer_master_queue
, TCE(call
)->deadline
);
1411 * A side-effect of the following call is to update
1412 * the actual hardware deadline if required.
1414 (void) timer_queue_assign(deadline
);
1416 if (deadline
< tlp
->threshold
.deadline
) {
1417 tlp
->threshold
.deadline
= deadline
;
1418 tlp
->threshold
.call
= call
;
1421 simple_unlock(&call
->lock
);
1423 /* Abort scan if we're taking too long. */
1424 if (mach_absolute_time() > time_limit
) {
1425 tlp
->threshold
.deadline
= TIMER_LONGTERM_SCAN_AGAIN
;
1427 DBG("timer_longterm_scan() paused %llu, qlen: %llu\n",
1428 time_limit
, tlp
->queue
.count
);
1433 timer_queue_unlock(timer_master_queue
);
1437 timer_longterm_callout(timer_call_param_t p0
, __unused timer_call_param_t p1
)
1439 timer_longterm_t
*tlp
= (timer_longterm_t
*) p0
;
1441 timer_longterm_update(tlp
);
1445 timer_longterm_update_locked(timer_longterm_t
*tlp
)
1449 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1450 DECR_TIMER_UPDATE
| DBG_FUNC_START
,
1451 VM_KERNEL_UNSLIDE_OR_PERM(&tlp
->queue
),
1452 tlp
->threshold
.deadline
,
1453 tlp
->threshold
.preempted
,
1454 tlp
->queue
.count
, 0);
1456 tlp
->scan_time
= mach_absolute_time();
1457 if (tlp
->threshold
.preempted
!= TIMER_LONGTERM_NONE
) {
1458 tlp
->threshold
.preempts
++;
1459 tlp
->threshold
.deadline
= tlp
->threshold
.preempted
;
1460 tlp
->threshold
.preempted
= TIMER_LONGTERM_NONE
;
1462 * Note: in the unlikely event that a pre-empted timer has
1463 * itself been cancelled, we'll simply re-scan later at the
1464 * time of the preempted/cancelled timer.
1467 tlp
->threshold
.scans
++;
1470 * Maintain a moving average of our wakeup latency.
1471 * Clamp latency to 0 and ignore above threshold interval.
1473 if (tlp
->scan_time
> tlp
->threshold
.deadline_set
)
1474 latency
= tlp
->scan_time
- tlp
->threshold
.deadline_set
;
1477 if (latency
< tlp
->threshold
.interval
) {
1478 tlp
->threshold
.latency_min
=
1479 MIN(tlp
->threshold
.latency_min
, latency
);
1480 tlp
->threshold
.latency_max
=
1481 MAX(tlp
->threshold
.latency_max
, latency
);
1482 tlp
->threshold
.latency
=
1483 (tlp
->threshold
.latency
*99 + latency
) / 100;
1486 timer_longterm_scan(tlp
, tlp
->scan_time
);
1489 tlp
->threshold
.deadline_set
= tlp
->threshold
.deadline
;
1490 /* The next deadline timer to be set is adjusted */
1491 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
&&
1492 tlp
->threshold
.deadline
!= TIMER_LONGTERM_SCAN_AGAIN
) {
1493 tlp
->threshold
.deadline_set
-= tlp
->threshold
.margin
;
1494 tlp
->threshold
.deadline_set
-= tlp
->threshold
.latency
;
1497 /* Throttle next scan time */
1498 uint64_t scan_clamp
= mach_absolute_time() + tlp
->scan_interval
;
1499 if (tlp
->threshold
.deadline_set
< scan_clamp
)
1500 tlp
->threshold
.deadline_set
= scan_clamp
;
1502 TIMER_KDEBUG_TRACE(KDEBUG_TRACE
,
1503 DECR_TIMER_UPDATE
| DBG_FUNC_END
,
1504 VM_KERNEL_UNSLIDE_OR_PERM(&tlp
->queue
),
1505 tlp
->threshold
.deadline
,
1506 tlp
->threshold
.scans
,
1507 tlp
->queue
.count
, 0);
1511 timer_longterm_update(timer_longterm_t
*tlp
)
1513 spl_t s
= splclock();
1515 timer_queue_lock_spin(timer_longterm_queue
);
1517 if (cpu_number() != master_cpu
)
1518 panic("timer_longterm_update_master() on non-boot cpu");
1520 timer_longterm_update_locked(tlp
);
1522 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
)
1524 &tlp
->threshold
.timer
,
1525 tlp
->threshold
.deadline_set
,
1526 TIMER_CALL_LOCAL
| TIMER_CALL_SYS_CRITICAL
);
1528 timer_queue_unlock(timer_longterm_queue
);
1533 timer_longterm_init(void)
1536 timer_longterm_t
*tlp
= &timer_longterm
;
1538 DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp
, &tlp
->queue
);
1541 * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD
1542 * or TIMER_LONGTERM_NONE (disabled) for server;
1543 * overridden longterm boot-arg
1545 tlp
->threshold
.interval
= serverperfmode
? TIMER_LONGTERM_NONE
1546 : TIMER_LONGTERM_THRESHOLD
;
1547 if (PE_parse_boot_argn("longterm", &longterm
, sizeof (longterm
))) {
1548 tlp
->threshold
.interval
= (longterm
== 0) ?
1549 TIMER_LONGTERM_NONE
:
1550 longterm
* NSEC_PER_MSEC
;
1552 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
) {
1553 printf("Longterm timer threshold: %llu ms\n",
1554 tlp
->threshold
.interval
/ NSEC_PER_MSEC
);
1555 kprintf("Longterm timer threshold: %llu ms\n",
1556 tlp
->threshold
.interval
/ NSEC_PER_MSEC
);
1557 nanoseconds_to_absolutetime(tlp
->threshold
.interval
,
1558 &tlp
->threshold
.interval
);
1559 tlp
->threshold
.margin
= tlp
->threshold
.interval
/ 10;
1560 tlp
->threshold
.latency_min
= EndOfAllTime
;
1561 tlp
->threshold
.latency_max
= 0;
1564 tlp
->threshold
.preempted
= TIMER_LONGTERM_NONE
;
1565 tlp
->threshold
.deadline
= TIMER_LONGTERM_NONE
;
1567 lck_attr_setdefault(&timer_longterm_lck_attr
);
1568 lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr
);
1569 lck_grp_init(&timer_longterm_lck_grp
,
1570 "timer_longterm", &timer_longterm_lck_grp_attr
);
1571 mpqueue_init(&tlp
->queue
,
1572 &timer_longterm_lck_grp
, &timer_longterm_lck_attr
);
1574 timer_call_setup(&tlp
->threshold
.timer
,
1575 timer_longterm_callout
, (timer_call_param_t
) tlp
);
1577 timer_longterm_queue
= &tlp
->queue
;
1582 ENQUEUES
, DEQUEUES
, ESCALATES
, SCANS
, PREEMPTS
,
1583 LATENCY
, LATENCY_MIN
, LATENCY_MAX
, SCAN_LIMIT
, SCAN_INTERVAL
, PAUSES
1586 timer_sysctl_get(int oid
)
1588 timer_longterm_t
*tlp
= &timer_longterm
;
1592 return (tlp
->threshold
.interval
== TIMER_LONGTERM_NONE
) ?
1593 0 : tlp
->threshold
.interval
/ NSEC_PER_MSEC
;
1595 return tlp
->queue
.count
;
1597 return tlp
->enqueues
;
1599 return tlp
->dequeues
;
1601 return tlp
->escalates
;
1603 return tlp
->threshold
.scans
;
1605 return tlp
->threshold
.preempts
;
1607 return tlp
->threshold
.latency
;
1609 return tlp
->threshold
.latency_min
;
1611 return tlp
->threshold
.latency_max
;
1613 return tlp
->scan_limit
;
1615 return tlp
->scan_interval
;
1617 return tlp
->scan_pauses
;
1624 * timer_master_scan() is the inverse of timer_longterm_scan()
1625 * since it un-escalates timers to the longterm queue.
1628 timer_master_scan(timer_longterm_t
*tlp
,
1635 mpqueue_head_t
*timer_master_queue
;
1637 if (tlp
->threshold
.interval
!= TIMER_LONGTERM_NONE
)
1638 threshold
= now
+ tlp
->threshold
.interval
;
1640 threshold
= TIMER_LONGTERM_NONE
;
1642 timer_master_queue
= timer_queue_cpu(master_cpu
);
1643 timer_queue_lock_spin(timer_master_queue
);
1645 qe
= queue_first(&timer_master_queue
->head
);
1646 while (!queue_end(&timer_master_queue
->head
, qe
)) {
1647 call
= TIMER_CALL(qe
);
1648 deadline
= TCE(call
)->deadline
;
1649 qe
= queue_next(qe
);
1650 if ((call
->flags
& TIMER_CALL_LOCAL
) != 0)
1652 if (!simple_lock_try(&call
->lock
)) {
1653 /* case (2c) lock order inversion, dequeue only */
1654 timer_call_entry_dequeue_async(call
);
1657 if (deadline
> threshold
) {
1658 /* move from master to longterm */
1659 timer_call_entry_dequeue(call
);
1660 timer_call_entry_enqueue_tail(call
, timer_longterm_queue
);
1661 if (deadline
< tlp
->threshold
.deadline
) {
1662 tlp
->threshold
.deadline
= deadline
;
1663 tlp
->threshold
.call
= call
;
1666 simple_unlock(&call
->lock
);
1668 timer_queue_unlock(timer_master_queue
);
1672 timer_sysctl_set_threshold(uint64_t value
)
1674 timer_longterm_t
*tlp
= &timer_longterm
;
1675 spl_t s
= splclock();
1676 boolean_t threshold_increase
;
1678 timer_queue_lock_spin(timer_longterm_queue
);
1680 timer_call_cancel(&tlp
->threshold
.timer
);
1683 * Set the new threshold and note whther it's increasing.
1686 tlp
->threshold
.interval
= TIMER_LONGTERM_NONE
;
1687 threshold_increase
= TRUE
;
1688 timer_call_cancel(&tlp
->threshold
.timer
);
1690 uint64_t old_interval
= tlp
->threshold
.interval
;
1691 tlp
->threshold
.interval
= value
* NSEC_PER_MSEC
;
1692 nanoseconds_to_absolutetime(tlp
->threshold
.interval
,
1693 &tlp
->threshold
.interval
);
1694 tlp
->threshold
.margin
= tlp
->threshold
.interval
/ 10;
1695 if (old_interval
== TIMER_LONGTERM_NONE
)
1696 threshold_increase
= FALSE
;
1698 threshold_increase
= (tlp
->threshold
.interval
> old_interval
);
1701 if (threshold_increase
/* or removal */) {
1702 /* Escalate timers from the longterm queue */
1703 timer_longterm_scan(tlp
, mach_absolute_time());
1704 } else /* decrease or addition */ {
1706 * We scan the local/master queue for timers now longterm.
1707 * To be strictly correct, we should scan all processor queues
1708 * but timer migration results in most timers gravitating to the
1709 * master processor in any case.
1711 timer_master_scan(tlp
, mach_absolute_time());
1714 /* Set new timer accordingly */
1715 tlp
->threshold
.deadline_set
= tlp
->threshold
.deadline
;
1716 if (tlp
->threshold
.deadline
!= TIMER_LONGTERM_NONE
) {
1717 tlp
->threshold
.deadline_set
-= tlp
->threshold
.margin
;
1718 tlp
->threshold
.deadline_set
-= tlp
->threshold
.latency
;
1720 &tlp
->threshold
.timer
,
1721 tlp
->threshold
.deadline_set
,
1722 TIMER_CALL_LOCAL
| TIMER_CALL_SYS_CRITICAL
);
1729 tlp
->scan_pauses
= 0;
1730 tlp
->threshold
.scans
= 0;
1731 tlp
->threshold
.preempts
= 0;
1732 tlp
->threshold
.latency
= 0;
1733 tlp
->threshold
.latency_min
= EndOfAllTime
;
1734 tlp
->threshold
.latency_max
= 0;
1736 timer_queue_unlock(timer_longterm_queue
);
1741 timer_sysctl_set(int oid
, uint64_t value
)
1747 (void (*)(void *)) timer_sysctl_set_threshold
,
1749 return KERN_SUCCESS
;
1751 timer_longterm
.scan_limit
= value
;
1752 return KERN_SUCCESS
;
1754 timer_longterm
.scan_interval
= value
;
1755 return KERN_SUCCESS
;
1757 return KERN_INVALID_ARGUMENT
;
1762 /* Select timer coalescing window based on per-task quality-of-service hints */
1763 static boolean_t
tcoal_qos_adjust(thread_t t
, int32_t *tshift
, uint64_t *tmax_abstime
, boolean_t
*pratelimited
) {
1764 uint32_t latency_qos
;
1765 boolean_t adjusted
= FALSE
;
1766 task_t ctask
= t
->task
;
1769 latency_qos
= proc_get_effective_thread_policy(t
, TASK_POLICY_LATENCY_QOS
);
1771 assert(latency_qos
<= NUM_LATENCY_QOS_TIERS
);
1774 *tshift
= tcoal_prio_params
.latency_qos_scale
[latency_qos
- 1];
1775 *tmax_abstime
= tcoal_prio_params
.latency_qos_abstime_max
[latency_qos
- 1];
1776 *pratelimited
= tcoal_prio_params
.latency_tier_rate_limited
[latency_qos
- 1];
1784 /* Adjust timer deadlines based on priority of the thread and the
1785 * urgency value provided at timeout establishment. With this mechanism,
1786 * timers are no longer necessarily sorted in order of soft deadline
1787 * on a given timer queue, i.e. they may be differentially skewed.
1788 * In the current scheme, this could lead to fewer pending timers
1789 * processed than is technically possible when the HW deadline arrives.
1792 timer_compute_leeway(thread_t cthread
, int32_t urgency
, int32_t *tshift
, uint64_t *tmax_abstime
, boolean_t
*pratelimited
) {
1793 int16_t tpri
= cthread
->sched_pri
;
1794 if ((urgency
& TIMER_CALL_USER_MASK
) != 0) {
1795 if (tpri
>= BASEPRI_RTQUEUES
||
1796 urgency
== TIMER_CALL_USER_CRITICAL
) {
1797 *tshift
= tcoal_prio_params
.timer_coalesce_rt_shift
;
1798 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_rt_abstime_max
;
1799 TCOAL_PRIO_STAT(rt_tcl
);
1800 } else if (proc_get_effective_thread_policy(cthread
, TASK_POLICY_DARWIN_BG
) ||
1801 (urgency
== TIMER_CALL_USER_BACKGROUND
)) {
1802 /* Determine if timer should be subjected to a lower QoS */
1803 if (tcoal_qos_adjust(cthread
, tshift
, tmax_abstime
, pratelimited
)) {
1804 if (*tmax_abstime
> tcoal_prio_params
.timer_coalesce_bg_abstime_max
) {
1807 *pratelimited
= FALSE
;
1810 *tshift
= tcoal_prio_params
.timer_coalesce_bg_shift
;
1811 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_bg_abstime_max
;
1812 TCOAL_PRIO_STAT(bg_tcl
);
1813 } else if (tpri
>= MINPRI_KERNEL
) {
1814 *tshift
= tcoal_prio_params
.timer_coalesce_kt_shift
;
1815 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_kt_abstime_max
;
1816 TCOAL_PRIO_STAT(kt_tcl
);
1817 } else if (cthread
->sched_mode
== TH_MODE_FIXED
) {
1818 *tshift
= tcoal_prio_params
.timer_coalesce_fp_shift
;
1819 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_fp_abstime_max
;
1820 TCOAL_PRIO_STAT(fp_tcl
);
1821 } else if (tcoal_qos_adjust(cthread
, tshift
, tmax_abstime
, pratelimited
)) {
1822 TCOAL_PRIO_STAT(qos_tcl
);
1823 } else if (cthread
->sched_mode
== TH_MODE_TIMESHARE
) {
1824 *tshift
= tcoal_prio_params
.timer_coalesce_ts_shift
;
1825 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_ts_abstime_max
;
1826 TCOAL_PRIO_STAT(ts_tcl
);
1828 TCOAL_PRIO_STAT(nc_tcl
);
1830 } else if (urgency
== TIMER_CALL_SYS_BACKGROUND
) {
1831 *tshift
= tcoal_prio_params
.timer_coalesce_bg_shift
;
1832 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_bg_abstime_max
;
1833 TCOAL_PRIO_STAT(bg_tcl
);
1835 *tshift
= tcoal_prio_params
.timer_coalesce_kt_shift
;
1836 *tmax_abstime
= tcoal_prio_params
.timer_coalesce_kt_abstime_max
;
1837 TCOAL_PRIO_STAT(kt_tcl
);
1842 int timer_user_idle_level
;
1845 timer_call_slop(uint64_t deadline
, uint64_t now
, uint32_t flags
, thread_t cthread
, boolean_t
*pratelimited
)
1847 int32_t tcs_shift
= 0;
1848 uint64_t tcs_max_abstime
= 0;
1850 uint32_t urgency
= (flags
& TIMER_CALL_URGENCY_MASK
);
1852 if (mach_timer_coalescing_enabled
&&
1853 (deadline
> now
) && (urgency
!= TIMER_CALL_SYS_CRITICAL
)) {
1854 timer_compute_leeway(cthread
, urgency
, &tcs_shift
, &tcs_max_abstime
, pratelimited
);
1857 adjval
= MIN((deadline
- now
) >> tcs_shift
, tcs_max_abstime
);
1859 adjval
= MIN((deadline
- now
) << (-tcs_shift
), tcs_max_abstime
);
1860 /* Apply adjustments derived from "user idle level" heuristic */
1861 adjval
+= (adjval
* timer_user_idle_level
) >> 7;
1869 timer_get_user_idle_level(void) {
1870 return timer_user_idle_level
;
1873 kern_return_t
timer_set_user_idle_level(int ilevel
) {
1874 boolean_t do_reeval
= FALSE
;
1876 if ((ilevel
< 0) || (ilevel
> 128))
1877 return KERN_INVALID_ARGUMENT
;
1879 if (ilevel
< timer_user_idle_level
) {
1883 timer_user_idle_level
= ilevel
;
1886 ml_timer_evaluate();
1888 return KERN_SUCCESS
;