]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 1993-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * Timer interrupt callout module. | |
1c79356b A |
30 | */ |
31 | ||
32 | #include <mach/mach_types.h> | |
33 | ||
34 | #include <kern/clock.h> | |
3e170ce0 | 35 | #include <kern/smp.h> |
9bccf70c | 36 | #include <kern/processor.h> |
1c79356b | 37 | #include <kern/timer_call.h> |
c910b4d9 | 38 | #include <kern/timer_queue.h> |
1c79356b | 39 | #include <kern/call_entry.h> |
39236c6e | 40 | #include <kern/thread.h> |
39037602 | 41 | #include <kern/policy_internal.h> |
1c79356b | 42 | |
0c530ab8 A |
43 | #include <sys/kdebug.h> |
44 | ||
4b17d6b6 | 45 | #if CONFIG_DTRACE |
2d21ac55 A |
46 | #include <mach/sdt.h> |
47 | #endif | |
1c79356b | 48 | |
1c79356b | 49 | |
6d2010ae A |
50 | #if DEBUG |
51 | #define TIMER_ASSERT 1 | |
52 | #endif | |
53 | ||
54 | //#define TIMER_ASSERT 1 | |
55 | //#define TIMER_DBG 1 | |
56 | ||
57 | #if TIMER_DBG | |
58 | #define DBG(x...) kprintf("DBG: " x); | |
59 | #else | |
60 | #define DBG(x...) | |
61 | #endif | |
62 | ||
39236c6e A |
63 | #if TIMER_TRACE |
64 | #define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST | |
65 | #else | |
66 | #define TIMER_KDEBUG_TRACE(x...) | |
67 | #endif | |
68 | ||
69 | ||
6d2010ae A |
70 | lck_grp_t timer_call_lck_grp; |
71 | lck_attr_t timer_call_lck_attr; | |
72 | lck_grp_attr_t timer_call_lck_grp_attr; | |
73 | ||
39236c6e A |
74 | lck_grp_t timer_longterm_lck_grp; |
75 | lck_attr_t timer_longterm_lck_attr; | |
76 | lck_grp_attr_t timer_longterm_lck_grp_attr; | |
77 | ||
3e170ce0 A |
78 | /* Timer queue lock must be acquired with interrupts disabled (under splclock()) */ |
79 | #if __SMP__ | |
39236c6e | 80 | #define timer_queue_lock_spin(queue) \ |
6d2010ae A |
81 | lck_mtx_lock_spin_always(&queue->lock_data) |
82 | ||
39236c6e | 83 | #define timer_queue_unlock(queue) \ |
6d2010ae | 84 | lck_mtx_unlock_always(&queue->lock_data) |
3e170ce0 A |
85 | #else |
86 | #define timer_queue_lock_spin(queue) (void)1 | |
87 | #define timer_queue_unlock(queue) (void)1 | |
88 | #endif | |
6d2010ae A |
89 | |
90 | #define QUEUE(x) ((queue_t)(x)) | |
91 | #define MPQUEUE(x) ((mpqueue_head_t *)(x)) | |
92 | #define TIMER_CALL(x) ((timer_call_t)(x)) | |
fe8ab488 | 93 | #define TCE(x) (&(x->call_entry)) |
39236c6e A |
94 | /* |
95 | * The longterm timer object is a global structure holding all timers | |
96 | * beyond the short-term, local timer queue threshold. The boot processor | |
97 | * is responsible for moving each timer to its local timer queue | |
98 | * if and when that timer becomes due within the threshold. | |
99 | */ | |
5ba3f43e A |
100 | |
101 | /* Sentinel for "no time set": */ | |
39236c6e | 102 | #define TIMER_LONGTERM_NONE EndOfAllTime |
5ba3f43e | 103 | /* The default threadhold is the delta above which a timer is "long-term" */ |
39236c6e | 104 | #if defined(__x86_64__) |
5ba3f43e | 105 | #define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC) /* 1 sec */ |
39236c6e | 106 | #else |
5ba3f43e | 107 | #define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE /* disabled */ |
39236c6e A |
108 | #endif |
109 | ||
5ba3f43e A |
110 | /* |
111 | * The scan limit throttles processing of the longterm queue. | |
112 | * If the scan time exceeds this limit, we terminate, unlock | |
113 | * and repeat after this same interval. This prevents unbounded holding of | |
114 | * timer queue locks with interrupts masked. | |
115 | */ | |
116 | #define TIMER_LONGTERM_SCAN_LIMIT (1ULL * NSEC_PER_MSEC) /* 1 msec */ | |
117 | /* Sentinel for "scan limit exceeded": */ | |
118 | #define TIMER_LONGTERM_SCAN_AGAIN 0 | |
119 | ||
39236c6e A |
120 | typedef struct { |
121 | uint64_t interval; /* longterm timer interval */ | |
122 | uint64_t margin; /* fudge factor (10% of interval */ | |
123 | uint64_t deadline; /* first/soonest longterm deadline */ | |
124 | uint64_t preempted; /* sooner timer has pre-empted */ | |
125 | timer_call_t call; /* first/soonest longterm timer call */ | |
126 | uint64_t deadline_set; /* next timer set */ | |
127 | timer_call_data_t timer; /* timer used by threshold management */ | |
128 | /* Stats: */ | |
129 | uint64_t scans; /* num threshold timer scans */ | |
130 | uint64_t preempts; /* num threshold reductions */ | |
131 | uint64_t latency; /* average threshold latency */ | |
132 | uint64_t latency_min; /* minimum threshold latency */ | |
133 | uint64_t latency_max; /* maximum threshold latency */ | |
134 | } threshold_t; | |
135 | ||
136 | typedef struct { | |
137 | mpqueue_head_t queue; /* longterm timer list */ | |
138 | uint64_t enqueues; /* num timers queued */ | |
139 | uint64_t dequeues; /* num timers dequeued */ | |
140 | uint64_t escalates; /* num timers becoming shortterm */ | |
141 | uint64_t scan_time; /* last time the list was scanned */ | |
142 | threshold_t threshold; /* longterm timer threshold */ | |
5ba3f43e A |
143 | uint64_t scan_limit; /* maximum scan time */ |
144 | uint64_t scan_pauses; /* num scans exceeding time limit */ | |
39236c6e A |
145 | } timer_longterm_t; |
146 | ||
5ba3f43e A |
147 | timer_longterm_t timer_longterm = { |
148 | .scan_limit = TIMER_LONGTERM_SCAN_LIMIT, | |
149 | }; | |
39236c6e A |
150 | |
151 | static mpqueue_head_t *timer_longterm_queue = NULL; | |
152 | ||
153 | static void timer_longterm_init(void); | |
154 | static void timer_longterm_callout( | |
155 | timer_call_param_t p0, | |
156 | timer_call_param_t p1); | |
157 | extern void timer_longterm_scan( | |
158 | timer_longterm_t *tlp, | |
159 | uint64_t now); | |
160 | static void timer_longterm_update( | |
161 | timer_longterm_t *tlp); | |
162 | static void timer_longterm_update_locked( | |
163 | timer_longterm_t *tlp); | |
164 | static mpqueue_head_t * timer_longterm_enqueue_unlocked( | |
165 | timer_call_t call, | |
166 | uint64_t now, | |
167 | uint64_t deadline, | |
fe8ab488 A |
168 | mpqueue_head_t ** old_queue, |
169 | uint64_t soft_deadline, | |
170 | uint64_t ttd, | |
171 | timer_call_param_t param1, | |
172 | uint32_t callout_flags); | |
39236c6e A |
173 | static void timer_longterm_dequeued_locked( |
174 | timer_call_t call); | |
316670eb A |
175 | |
176 | uint64_t past_deadline_timers; | |
177 | uint64_t past_deadline_deltas; | |
178 | uint64_t past_deadline_longest; | |
179 | uint64_t past_deadline_shortest = ~0ULL; | |
180 | enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000}; | |
181 | ||
182 | uint64_t past_deadline_timer_adjustment; | |
183 | ||
39236c6e | 184 | static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited); |
6d2010ae A |
185 | boolean_t mach_timer_coalescing_enabled = TRUE; |
186 | ||
187 | mpqueue_head_t *timer_call_enqueue_deadline_unlocked( | |
188 | timer_call_t call, | |
189 | mpqueue_head_t *queue, | |
fe8ab488 A |
190 | uint64_t deadline, |
191 | uint64_t soft_deadline, | |
192 | uint64_t ttd, | |
193 | timer_call_param_t param1, | |
194 | uint32_t flags); | |
6d2010ae A |
195 | |
196 | mpqueue_head_t *timer_call_dequeue_unlocked( | |
197 | timer_call_t call); | |
198 | ||
fe8ab488 A |
199 | timer_coalescing_priority_params_t tcoal_prio_params; |
200 | ||
201 | #if TCOAL_PRIO_STATS | |
202 | int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl; | |
203 | #define TCOAL_PRIO_STAT(x) (x++) | |
204 | #else | |
205 | #define TCOAL_PRIO_STAT(x) | |
206 | #endif | |
207 | ||
208 | static void | |
209 | timer_call_init_abstime(void) | |
210 | { | |
211 | int i; | |
212 | uint64_t result; | |
213 | timer_coalescing_priority_params_ns_t * tcoal_prio_params_init = timer_call_get_priority_params(); | |
214 | nanoseconds_to_absolutetime(PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment); | |
215 | nanoseconds_to_absolutetime(tcoal_prio_params_init->idle_entry_timer_processing_hdeadline_threshold_ns, &result); | |
216 | tcoal_prio_params.idle_entry_timer_processing_hdeadline_threshold_abstime = (uint32_t)result; | |
217 | nanoseconds_to_absolutetime(tcoal_prio_params_init->interrupt_timer_coalescing_ilat_threshold_ns, &result); | |
218 | tcoal_prio_params.interrupt_timer_coalescing_ilat_threshold_abstime = (uint32_t)result; | |
219 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_resort_threshold_ns, &result); | |
220 | tcoal_prio_params.timer_resort_threshold_abstime = (uint32_t)result; | |
221 | tcoal_prio_params.timer_coalesce_rt_shift = tcoal_prio_params_init->timer_coalesce_rt_shift; | |
222 | tcoal_prio_params.timer_coalesce_bg_shift = tcoal_prio_params_init->timer_coalesce_bg_shift; | |
223 | tcoal_prio_params.timer_coalesce_kt_shift = tcoal_prio_params_init->timer_coalesce_kt_shift; | |
224 | tcoal_prio_params.timer_coalesce_fp_shift = tcoal_prio_params_init->timer_coalesce_fp_shift; | |
225 | tcoal_prio_params.timer_coalesce_ts_shift = tcoal_prio_params_init->timer_coalesce_ts_shift; | |
226 | ||
227 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_rt_ns_max, | |
228 | &tcoal_prio_params.timer_coalesce_rt_abstime_max); | |
229 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_bg_ns_max, | |
230 | &tcoal_prio_params.timer_coalesce_bg_abstime_max); | |
231 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_kt_ns_max, | |
232 | &tcoal_prio_params.timer_coalesce_kt_abstime_max); | |
233 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_fp_ns_max, | |
234 | &tcoal_prio_params.timer_coalesce_fp_abstime_max); | |
235 | nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_ts_ns_max, | |
236 | &tcoal_prio_params.timer_coalesce_ts_abstime_max); | |
237 | ||
238 | for (i = 0; i < NUM_LATENCY_QOS_TIERS; i++) { | |
239 | tcoal_prio_params.latency_qos_scale[i] = tcoal_prio_params_init->latency_qos_scale[i]; | |
240 | nanoseconds_to_absolutetime(tcoal_prio_params_init->latency_qos_ns_max[i], | |
241 | &tcoal_prio_params.latency_qos_abstime_max[i]); | |
242 | tcoal_prio_params.latency_tier_rate_limited[i] = tcoal_prio_params_init->latency_tier_rate_limited[i]; | |
243 | } | |
244 | } | |
245 | ||
1c79356b A |
246 | |
247 | void | |
39236c6e | 248 | timer_call_init(void) |
1c79356b | 249 | { |
6d2010ae A |
250 | lck_attr_setdefault(&timer_call_lck_attr); |
251 | lck_grp_attr_setdefault(&timer_call_lck_grp_attr); | |
252 | lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr); | |
39236c6e A |
253 | |
254 | timer_longterm_init(); | |
fe8ab488 | 255 | timer_call_init_abstime(); |
1c79356b A |
256 | } |
257 | ||
6d2010ae A |
258 | |
259 | void | |
39236c6e | 260 | timer_call_queue_init(mpqueue_head_t *queue) |
6d2010ae | 261 | { |
39236c6e | 262 | DBG("timer_call_queue_init(%p)\n", queue); |
6d2010ae A |
263 | mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr); |
264 | } | |
265 | ||
266 | ||
1c79356b A |
267 | void |
268 | timer_call_setup( | |
269 | timer_call_t call, | |
270 | timer_call_func_t func, | |
271 | timer_call_param_t param0) | |
272 | { | |
6d2010ae | 273 | DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0); |
fe8ab488 | 274 | call_entry_setup(TCE(call), func, param0); |
6d2010ae A |
275 | simple_lock_init(&(call)->lock, 0); |
276 | call->async_dequeue = FALSE; | |
1c79356b | 277 | } |
6d2010ae A |
278 | #if TIMER_ASSERT |
279 | static __inline__ mpqueue_head_t * | |
280 | timer_call_entry_dequeue( | |
281 | timer_call_t entry) | |
282 | { | |
fe8ab488 | 283 | mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); |
6d2010ae A |
284 | |
285 | if (!hw_lock_held((hw_lock_t)&entry->lock)) | |
286 | panic("_call_entry_dequeue() " | |
287 | "entry %p is not locked\n", entry); | |
288 | /* | |
289 | * XXX The queue lock is actually a mutex in spin mode | |
290 | * but there's no way to test for it being held | |
291 | * so we pretend it's a spinlock! | |
292 | */ | |
293 | if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) | |
294 | panic("_call_entry_dequeue() " | |
295 | "queue %p is not locked\n", old_queue); | |
296 | ||
fe8ab488 | 297 | call_entry_dequeue(TCE(entry)); |
39236c6e | 298 | old_queue->count--; |
c910b4d9 | 299 | |
6d2010ae A |
300 | return (old_queue); |
301 | } | |
1c79356b | 302 | |
6d2010ae A |
303 | static __inline__ mpqueue_head_t * |
304 | timer_call_entry_enqueue_deadline( | |
305 | timer_call_t entry, | |
306 | mpqueue_head_t *queue, | |
307 | uint64_t deadline) | |
308 | { | |
fe8ab488 | 309 | mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); |
1c79356b | 310 | |
6d2010ae A |
311 | if (!hw_lock_held((hw_lock_t)&entry->lock)) |
312 | panic("_call_entry_enqueue_deadline() " | |
313 | "entry %p is not locked\n", entry); | |
314 | /* XXX More lock pretense: */ | |
315 | if (!hw_lock_held((hw_lock_t)&queue->lock_data)) | |
316 | panic("_call_entry_enqueue_deadline() " | |
317 | "queue %p is not locked\n", queue); | |
318 | if (old_queue != NULL && old_queue != queue) | |
319 | panic("_call_entry_enqueue_deadline() " | |
320 | "old_queue %p != queue", old_queue); | |
1c79356b | 321 | |
fe8ab488 | 322 | call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); |
1c79356b | 323 | |
39236c6e A |
324 | /* For efficiency, track the earliest soft deadline on the queue, so that |
325 | * fuzzy decisions can be made without lock acquisitions. | |
326 | */ | |
fe8ab488 A |
327 | timer_call_t thead = (timer_call_t)queue_first(&queue->head); |
328 | ||
329 | queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; | |
39236c6e A |
330 | |
331 | if (old_queue) | |
332 | old_queue->count--; | |
333 | queue->count++; | |
334 | ||
6d2010ae A |
335 | return (old_queue); |
336 | } | |
1c79356b | 337 | |
6d2010ae | 338 | #else |
1c79356b | 339 | |
6d2010ae A |
340 | static __inline__ mpqueue_head_t * |
341 | timer_call_entry_dequeue( | |
342 | timer_call_t entry) | |
343 | { | |
fe8ab488 | 344 | mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); |
39236c6e | 345 | |
fe8ab488 | 346 | call_entry_dequeue(TCE(entry)); |
39236c6e A |
347 | old_queue->count--; |
348 | ||
349 | return old_queue; | |
6d2010ae | 350 | } |
c910b4d9 | 351 | |
6d2010ae A |
352 | static __inline__ mpqueue_head_t * |
353 | timer_call_entry_enqueue_deadline( | |
354 | timer_call_t entry, | |
355 | mpqueue_head_t *queue, | |
356 | uint64_t deadline) | |
357 | { | |
fe8ab488 | 358 | mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); |
39236c6e | 359 | |
fe8ab488 | 360 | call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); |
39236c6e A |
361 | |
362 | /* For efficiency, track the earliest soft deadline on the queue, | |
363 | * so that fuzzy decisions can be made without lock acquisitions. | |
364 | */ | |
fe8ab488 A |
365 | |
366 | timer_call_t thead = (timer_call_t)queue_first(&queue->head); | |
367 | queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; | |
39236c6e A |
368 | |
369 | if (old_queue) | |
370 | old_queue->count--; | |
371 | queue->count++; | |
372 | ||
373 | return old_queue; | |
1c79356b A |
374 | } |
375 | ||
6d2010ae A |
376 | #endif |
377 | ||
39236c6e A |
378 | static __inline__ void |
379 | timer_call_entry_enqueue_tail( | |
380 | timer_call_t entry, | |
381 | mpqueue_head_t *queue) | |
382 | { | |
fe8ab488 | 383 | call_entry_enqueue_tail(TCE(entry), QUEUE(queue)); |
39236c6e A |
384 | queue->count++; |
385 | return; | |
386 | } | |
387 | ||
388 | /* | |
389 | * Remove timer entry from its queue but don't change the queue pointer | |
390 | * and set the async_dequeue flag. This is locking case 2b. | |
391 | */ | |
392 | static __inline__ void | |
393 | timer_call_entry_dequeue_async( | |
394 | timer_call_t entry) | |
395 | { | |
fe8ab488 | 396 | mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); |
39236c6e A |
397 | if (old_queue) { |
398 | old_queue->count--; | |
399 | (void) remque(qe(entry)); | |
400 | entry->async_dequeue = TRUE; | |
401 | } | |
402 | return; | |
403 | } | |
404 | ||
6d2010ae A |
405 | #if TIMER_ASSERT |
406 | unsigned timer_call_enqueue_deadline_unlocked_async1; | |
407 | unsigned timer_call_enqueue_deadline_unlocked_async2; | |
408 | #endif | |
409 | /* | |
410 | * Assumes call_entry and queues unlocked, interrupts disabled. | |
411 | */ | |
412 | __inline__ mpqueue_head_t * | |
413 | timer_call_enqueue_deadline_unlocked( | |
414 | timer_call_t call, | |
415 | mpqueue_head_t *queue, | |
fe8ab488 A |
416 | uint64_t deadline, |
417 | uint64_t soft_deadline, | |
418 | uint64_t ttd, | |
419 | timer_call_param_t param1, | |
420 | uint32_t callout_flags) | |
1c79356b | 421 | { |
fe8ab488 | 422 | call_entry_t entry = TCE(call); |
6d2010ae | 423 | mpqueue_head_t *old_queue; |
1c79356b | 424 | |
6d2010ae | 425 | DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); |
1c79356b | 426 | |
6d2010ae | 427 | simple_lock(&call->lock); |
fe8ab488 | 428 | |
6d2010ae | 429 | old_queue = MPQUEUE(entry->queue); |
fe8ab488 | 430 | |
6d2010ae | 431 | if (old_queue != NULL) { |
39236c6e | 432 | timer_queue_lock_spin(old_queue); |
6d2010ae | 433 | if (call->async_dequeue) { |
39236c6e | 434 | /* collision (1c): timer already dequeued, clear flag */ |
6d2010ae | 435 | #if TIMER_ASSERT |
39236c6e A |
436 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
437 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 | 438 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 439 | call->async_dequeue, |
4bd07ac2 | 440 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), |
39236c6e | 441 | 0x1c, 0); |
6d2010ae A |
442 | timer_call_enqueue_deadline_unlocked_async1++; |
443 | #endif | |
39236c6e | 444 | call->async_dequeue = FALSE; |
6d2010ae | 445 | entry->queue = NULL; |
39236c6e A |
446 | } else if (old_queue != queue) { |
447 | timer_call_entry_dequeue(call); | |
6d2010ae A |
448 | #if TIMER_ASSERT |
449 | timer_call_enqueue_deadline_unlocked_async2++; | |
450 | #endif | |
451 | } | |
39236c6e A |
452 | if (old_queue == timer_longterm_queue) |
453 | timer_longterm_dequeued_locked(call); | |
6d2010ae | 454 | if (old_queue != queue) { |
39236c6e A |
455 | timer_queue_unlock(old_queue); |
456 | timer_queue_lock_spin(queue); | |
6d2010ae A |
457 | } |
458 | } else { | |
39236c6e | 459 | timer_queue_lock_spin(queue); |
6d2010ae | 460 | } |
1c79356b | 461 | |
fe8ab488 A |
462 | call->soft_deadline = soft_deadline; |
463 | call->flags = callout_flags; | |
464 | TCE(call)->param1 = param1; | |
465 | call->ttd = ttd; | |
466 | ||
6d2010ae | 467 | timer_call_entry_enqueue_deadline(call, queue, deadline); |
39236c6e | 468 | timer_queue_unlock(queue); |
6d2010ae | 469 | simple_unlock(&call->lock); |
1c79356b | 470 | |
c910b4d9 A |
471 | return (old_queue); |
472 | } | |
1c79356b | 473 | |
6d2010ae A |
474 | #if TIMER_ASSERT |
475 | unsigned timer_call_dequeue_unlocked_async1; | |
476 | unsigned timer_call_dequeue_unlocked_async2; | |
477 | #endif | |
478 | mpqueue_head_t * | |
479 | timer_call_dequeue_unlocked( | |
480 | timer_call_t call) | |
c910b4d9 | 481 | { |
fe8ab488 | 482 | call_entry_t entry = TCE(call); |
6d2010ae | 483 | mpqueue_head_t *old_queue; |
1c79356b | 484 | |
6d2010ae | 485 | DBG("timer_call_dequeue_unlocked(%p)\n", call); |
1c79356b | 486 | |
6d2010ae A |
487 | simple_lock(&call->lock); |
488 | old_queue = MPQUEUE(entry->queue); | |
39236c6e A |
489 | #if TIMER_ASSERT |
490 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
491 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 | 492 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 493 | call->async_dequeue, |
4bd07ac2 | 494 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), |
39236c6e A |
495 | 0, 0); |
496 | #endif | |
6d2010ae | 497 | if (old_queue != NULL) { |
39236c6e | 498 | timer_queue_lock_spin(old_queue); |
6d2010ae | 499 | if (call->async_dequeue) { |
39236c6e | 500 | /* collision (1c): timer already dequeued, clear flag */ |
6d2010ae | 501 | #if TIMER_ASSERT |
39236c6e A |
502 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
503 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 | 504 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 505 | call->async_dequeue, |
4bd07ac2 | 506 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), |
39236c6e | 507 | 0x1c, 0); |
6d2010ae A |
508 | timer_call_dequeue_unlocked_async1++; |
509 | #endif | |
39236c6e A |
510 | call->async_dequeue = FALSE; |
511 | entry->queue = NULL; | |
6d2010ae | 512 | } else { |
39236c6e | 513 | timer_call_entry_dequeue(call); |
6d2010ae | 514 | } |
39236c6e A |
515 | if (old_queue == timer_longterm_queue) |
516 | timer_longterm_dequeued_locked(call); | |
517 | timer_queue_unlock(old_queue); | |
6d2010ae A |
518 | } |
519 | simple_unlock(&call->lock); | |
c910b4d9 | 520 | return (old_queue); |
1c79356b A |
521 | } |
522 | ||
5ba3f43e A |
523 | static uint64_t |
524 | past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) | |
525 | { | |
526 | uint64_t delta = (ctime - deadline); | |
527 | ||
528 | past_deadline_timers++; | |
529 | past_deadline_deltas += delta; | |
530 | if (delta > past_deadline_longest) | |
531 | past_deadline_longest = deadline; | |
532 | if (delta < past_deadline_shortest) | |
533 | past_deadline_shortest = delta; | |
534 | ||
535 | return (ctime + past_deadline_timer_adjustment); | |
536 | } | |
fe8ab488 A |
537 | |
538 | /* | |
539 | * Timer call entry locking model | |
540 | * ============================== | |
541 | * | |
542 | * Timer call entries are linked on per-cpu timer queues which are protected | |
543 | * by the queue lock and the call entry lock. The locking protocol is: | |
544 | * | |
545 | * 0) The canonical locking order is timer call entry followed by queue. | |
546 | * | |
547 | * 1) With only the entry lock held, entry.queue is valid: | |
548 | * 1a) NULL: the entry is not queued, or | |
549 | * 1b) non-NULL: this queue must be locked before the entry is modified. | |
550 | * After locking the queue, the call.async_dequeue flag must be checked: | |
551 | * 1c) TRUE: the entry was removed from the queue by another thread | |
552 | * and we must NULL the entry.queue and reset this flag, or | |
553 | * 1d) FALSE: (ie. queued), the entry can be manipulated. | |
554 | * | |
555 | * 2) If a queue lock is obtained first, the queue is stable: | |
556 | * 2a) If a try-lock of a queued entry succeeds, the call can be operated on | |
557 | * and dequeued. | |
558 | * 2b) If a try-lock fails, it indicates that another thread is attempting | |
559 | * to change the entry and move it to a different position in this queue | |
560 | * or to different queue. The entry can be dequeued but it should not be | |
561 | * operated upon since it is being changed. Furthermore, we don't null | |
562 | * the entry.queue pointer (protected by the entry lock we don't own). | |
563 | * Instead, we set the async_dequeue flag -- see (1c). | |
564 | * 2c) Same as 2b but occurring when a longterm timer is matured. | |
565 | * 3) A callout's parameters (deadline, flags, parameters, soft deadline &c.) | |
566 | * should be manipulated with the appropriate timer queue lock held, | |
567 | * to prevent queue traversal observations from observing inconsistent | |
568 | * updates to an in-flight callout. | |
569 | */ | |
570 | ||
571 | /* | |
572 | * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline() | |
573 | * cast between pointer types (mpqueue_head_t *) and (queue_t) so that | |
574 | * we can use the call_entry_dequeue() and call_entry_enqueue_deadline() | |
575 | * methods to operate on timer_call structs as if they are call_entry structs. | |
576 | * These structures are identical except for their queue head pointer fields. | |
577 | * | |
578 | * In the debug case, we assert that the timer call locking protocol | |
579 | * is being obeyed. | |
580 | */ | |
581 | ||
6d2010ae A |
582 | static boolean_t |
583 | timer_call_enter_internal( | |
584 | timer_call_t call, | |
585 | timer_call_param_t param1, | |
586 | uint64_t deadline, | |
39236c6e A |
587 | uint64_t leeway, |
588 | uint32_t flags, | |
589 | boolean_t ratelimited) | |
1c79356b | 590 | { |
39236c6e | 591 | mpqueue_head_t *queue = NULL; |
6d2010ae | 592 | mpqueue_head_t *old_queue; |
1c79356b | 593 | spl_t s; |
39236c6e A |
594 | uint64_t slop; |
595 | uint32_t urgency; | |
fe8ab488 | 596 | uint64_t sdeadline, ttd; |
1c79356b | 597 | |
39037602 | 598 | assert(call->call_entry.func != NULL); |
1c79356b | 599 | s = splclock(); |
6d2010ae | 600 | |
fe8ab488 | 601 | sdeadline = deadline; |
39236c6e A |
602 | uint64_t ctime = mach_absolute_time(); |
603 | ||
604 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
605 | DECR_TIMER_ENTER | DBG_FUNC_START, | |
4bd07ac2 | 606 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
5ba3f43e | 607 | VM_KERNEL_ADDRHIDE(param1), deadline, flags, 0); |
39236c6e A |
608 | |
609 | urgency = (flags & TIMER_CALL_URGENCY_MASK); | |
610 | ||
611 | boolean_t slop_ratelimited = FALSE; | |
612 | slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited); | |
613 | ||
614 | if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) | |
615 | slop = leeway; | |
616 | ||
617 | if (UINT64_MAX - deadline <= slop) { | |
618 | deadline = UINT64_MAX; | |
619 | } else { | |
6d2010ae A |
620 | deadline += slop; |
621 | } | |
1c79356b | 622 | |
316670eb | 623 | if (__improbable(deadline < ctime)) { |
5ba3f43e | 624 | deadline = past_deadline_timer_handle(deadline, ctime); |
fe8ab488 | 625 | sdeadline = deadline; |
316670eb | 626 | } |
39236c6e | 627 | |
39236c6e | 628 | if (ratelimited || slop_ratelimited) { |
fe8ab488 | 629 | flags |= TIMER_CALL_RATELIMITED; |
39236c6e | 630 | } else { |
fe8ab488 | 631 | flags &= ~TIMER_CALL_RATELIMITED; |
39236c6e A |
632 | } |
633 | ||
fe8ab488 | 634 | ttd = sdeadline - ctime; |
4b17d6b6 | 635 | #if CONFIG_DTRACE |
fe8ab488 A |
636 | DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, |
637 | timer_call_param_t, TCE(call)->param0, uint32_t, flags, | |
638 | (deadline - sdeadline), | |
639 | (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); | |
4b17d6b6 A |
640 | #endif |
641 | ||
fe8ab488 A |
642 | /* Program timer callout parameters under the appropriate per-CPU or |
643 | * longterm queue lock. The callout may have been previously enqueued | |
644 | * and in-flight on this or another timer queue. | |
645 | */ | |
39236c6e | 646 | if (!ratelimited && !slop_ratelimited) { |
fe8ab488 | 647 | queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue, sdeadline, ttd, param1, flags); |
39236c6e | 648 | } |
1c79356b | 649 | |
39236c6e A |
650 | if (queue == NULL) { |
651 | queue = timer_queue_assign(deadline); | |
fe8ab488 | 652 | old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline, sdeadline, ttd, param1, flags); |
39236c6e | 653 | } |
1c79356b | 654 | |
39236c6e | 655 | #if TIMER_TRACE |
fe8ab488 | 656 | TCE(call)->entry_time = ctime; |
39236c6e A |
657 | #endif |
658 | ||
659 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
660 | DECR_TIMER_ENTER | DBG_FUNC_END, | |
4bd07ac2 | 661 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
fe8ab488 | 662 | (old_queue != NULL), deadline, queue->count, 0); |
1c79356b | 663 | |
1c79356b A |
664 | splx(s); |
665 | ||
c910b4d9 | 666 | return (old_queue != NULL); |
1c79356b A |
667 | } |
668 | ||
39236c6e A |
669 | /* |
670 | * timer_call_*() | |
671 | * return boolean indicating whether the call was previously queued. | |
672 | */ | |
6d2010ae A |
673 | boolean_t |
674 | timer_call_enter( | |
675 | timer_call_t call, | |
676 | uint64_t deadline, | |
677 | uint32_t flags) | |
678 | { | |
39236c6e | 679 | return timer_call_enter_internal(call, NULL, deadline, 0, flags, FALSE); |
6d2010ae A |
680 | } |
681 | ||
1c79356b | 682 | boolean_t |
c910b4d9 A |
683 | timer_call_enter1( |
684 | timer_call_t call, | |
685 | timer_call_param_t param1, | |
6d2010ae A |
686 | uint64_t deadline, |
687 | uint32_t flags) | |
1c79356b | 688 | { |
39236c6e A |
689 | return timer_call_enter_internal(call, param1, deadline, 0, flags, FALSE); |
690 | } | |
691 | ||
692 | boolean_t | |
693 | timer_call_enter_with_leeway( | |
694 | timer_call_t call, | |
695 | timer_call_param_t param1, | |
696 | uint64_t deadline, | |
697 | uint64_t leeway, | |
698 | uint32_t flags, | |
699 | boolean_t ratelimited) | |
700 | { | |
701 | return timer_call_enter_internal(call, param1, deadline, leeway, flags, ratelimited); | |
1c79356b A |
702 | } |
703 | ||
5ba3f43e A |
704 | boolean_t |
705 | timer_call_quantum_timer_enter( | |
706 | timer_call_t call, | |
707 | timer_call_param_t param1, | |
708 | uint64_t deadline, | |
709 | uint64_t ctime) | |
710 | { | |
711 | assert(call->call_entry.func != NULL); | |
712 | assert(ml_get_interrupts_enabled() == FALSE); | |
713 | ||
714 | uint32_t flags = TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL; | |
715 | ||
716 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, | |
717 | VM_KERNEL_UNSLIDE_OR_PERM(call), | |
718 | VM_KERNEL_ADDRHIDE(param1), deadline, | |
719 | flags, 0); | |
720 | ||
721 | if (__improbable(deadline < ctime)) { | |
722 | deadline = past_deadline_timer_handle(deadline, ctime); | |
723 | } | |
724 | ||
725 | uint64_t ttd = deadline - ctime; | |
726 | #if CONFIG_DTRACE | |
727 | DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, | |
728 | timer_call_param_t, TCE(call)->param0, uint32_t, flags, 0, | |
729 | (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); | |
730 | #endif | |
731 | ||
732 | quantum_timer_set_deadline(deadline); | |
733 | TCE(call)->deadline = deadline; | |
734 | TCE(call)->param1 = param1; | |
735 | call->ttd = ttd; | |
736 | call->flags = flags; | |
737 | ||
738 | #if TIMER_TRACE | |
739 | TCE(call)->entry_time = ctime; | |
740 | #endif | |
741 | ||
742 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, | |
743 | VM_KERNEL_UNSLIDE_OR_PERM(call), | |
744 | 1, deadline, 0, 0); | |
745 | ||
746 | return true; | |
747 | } | |
748 | ||
749 | ||
750 | boolean_t | |
751 | timer_call_quantum_timer_cancel( | |
752 | timer_call_t call) | |
753 | { | |
754 | assert(ml_get_interrupts_enabled() == FALSE); | |
755 | ||
756 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
757 | DECR_TIMER_CANCEL | DBG_FUNC_START, | |
758 | VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, | |
759 | 0, call->flags, 0); | |
760 | ||
761 | TCE(call)->deadline = 0; | |
762 | quantum_timer_set_deadline(0); | |
763 | ||
764 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
765 | DECR_TIMER_CANCEL | DBG_FUNC_END, | |
766 | VM_KERNEL_UNSLIDE_OR_PERM(call), 0, | |
767 | TCE(call)->deadline - mach_absolute_time(), | |
768 | TCE(call)->deadline - TCE(call)->entry_time, 0); | |
769 | ||
770 | #if CONFIG_DTRACE | |
771 | DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, | |
772 | timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, | |
773 | (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); | |
774 | #endif | |
775 | ||
776 | return true; | |
777 | } | |
778 | ||
1c79356b | 779 | boolean_t |
c910b4d9 A |
780 | timer_call_cancel( |
781 | timer_call_t call) | |
1c79356b | 782 | { |
6d2010ae | 783 | mpqueue_head_t *old_queue; |
1c79356b A |
784 | spl_t s; |
785 | ||
786 | s = splclock(); | |
1c79356b | 787 | |
39236c6e A |
788 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
789 | DECR_TIMER_CANCEL | DBG_FUNC_START, | |
4bd07ac2 | 790 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
fe8ab488 | 791 | TCE(call)->deadline, call->soft_deadline, call->flags, 0); |
39236c6e | 792 | |
6d2010ae | 793 | old_queue = timer_call_dequeue_unlocked(call); |
c910b4d9 A |
794 | |
795 | if (old_queue != NULL) { | |
39236c6e A |
796 | timer_queue_lock_spin(old_queue); |
797 | if (!queue_empty(&old_queue->head)) { | |
fe8ab488 A |
798 | timer_queue_cancel(old_queue, TCE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); |
799 | timer_call_t thead = (timer_call_t)queue_first(&old_queue->head); | |
800 | old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; | |
39236c6e A |
801 | } |
802 | else { | |
fe8ab488 | 803 | timer_queue_cancel(old_queue, TCE(call)->deadline, UINT64_MAX); |
39236c6e A |
804 | old_queue->earliest_soft_deadline = UINT64_MAX; |
805 | } | |
806 | timer_queue_unlock(old_queue); | |
1c79356b | 807 | } |
39236c6e A |
808 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
809 | DECR_TIMER_CANCEL | DBG_FUNC_END, | |
4bd07ac2 A |
810 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
811 | VM_KERNEL_UNSLIDE_OR_PERM(old_queue), | |
fe8ab488 A |
812 | TCE(call)->deadline - mach_absolute_time(), |
813 | TCE(call)->deadline - TCE(call)->entry_time, 0); | |
1c79356b A |
814 | splx(s); |
815 | ||
4b17d6b6 | 816 | #if CONFIG_DTRACE |
fe8ab488 A |
817 | DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, |
818 | timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, | |
4b17d6b6 A |
819 | (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); |
820 | #endif | |
821 | ||
c910b4d9 | 822 | return (old_queue != NULL); |
1c79356b A |
823 | } |
824 | ||
fe8ab488 A |
825 | static uint32_t timer_queue_shutdown_lock_skips; |
826 | static uint32_t timer_queue_shutdown_discarded; | |
827 | ||
9bccf70c | 828 | void |
c910b4d9 | 829 | timer_queue_shutdown( |
6d2010ae | 830 | mpqueue_head_t *queue) |
9bccf70c | 831 | { |
6d2010ae A |
832 | timer_call_t call; |
833 | mpqueue_head_t *new_queue; | |
c910b4d9 | 834 | spl_t s; |
9bccf70c | 835 | |
fe8ab488 | 836 | |
6d2010ae A |
837 | DBG("timer_queue_shutdown(%p)\n", queue); |
838 | ||
c910b4d9 | 839 | s = splclock(); |
9bccf70c | 840 | |
6d2010ae | 841 | /* Note comma operator in while expression re-locking each iteration */ |
39037602 | 842 | while ((void)timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { |
6d2010ae | 843 | call = TIMER_CALL(queue_first(&queue->head)); |
fe8ab488 | 844 | |
6d2010ae A |
845 | if (!simple_lock_try(&call->lock)) { |
846 | /* | |
847 | * case (2b) lock order inversion, dequeue and skip | |
848 | * Don't change the call_entry queue back-pointer | |
849 | * but set the async_dequeue field. | |
850 | */ | |
851 | timer_queue_shutdown_lock_skips++; | |
39236c6e A |
852 | timer_call_entry_dequeue_async(call); |
853 | #if TIMER_ASSERT | |
854 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
855 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 | 856 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 857 | call->async_dequeue, |
4bd07ac2 | 858 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), |
39236c6e A |
859 | 0x2b, 0); |
860 | #endif | |
861 | timer_queue_unlock(queue); | |
6d2010ae A |
862 | continue; |
863 | } | |
9bccf70c | 864 | |
fe8ab488 A |
865 | boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0); |
866 | ||
6d2010ae A |
867 | /* remove entry from old queue */ |
868 | timer_call_entry_dequeue(call); | |
39236c6e | 869 | timer_queue_unlock(queue); |
9bccf70c | 870 | |
fe8ab488 A |
871 | if (call_local == FALSE) { |
872 | /* and queue it on new, discarding LOCAL timers */ | |
873 | new_queue = timer_queue_assign(TCE(call)->deadline); | |
874 | timer_queue_lock_spin(new_queue); | |
875 | timer_call_entry_enqueue_deadline( | |
876 | call, new_queue, TCE(call)->deadline); | |
877 | timer_queue_unlock(new_queue); | |
878 | } else { | |
879 | timer_queue_shutdown_discarded++; | |
880 | } | |
881 | ||
5ba3f43e | 882 | assert(call_local == FALSE); |
6d2010ae | 883 | simple_unlock(&call->lock); |
9bccf70c A |
884 | } |
885 | ||
39236c6e | 886 | timer_queue_unlock(queue); |
c910b4d9 | 887 | splx(s); |
9bccf70c A |
888 | } |
889 | ||
5ba3f43e A |
890 | |
891 | void | |
892 | quantum_timer_expire( | |
893 | uint64_t deadline) | |
894 | { | |
895 | processor_t processor = current_processor(); | |
896 | timer_call_t call = TIMER_CALL(&(processor->quantum_timer)); | |
897 | ||
898 | if (__improbable(TCE(call)->deadline > deadline)) | |
899 | panic("CPU quantum timer deadlin out of sync with timer call deadline"); | |
900 | ||
901 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
902 | DECR_TIMER_EXPIRE | DBG_FUNC_NONE, | |
903 | VM_KERNEL_UNSLIDE_OR_PERM(call), | |
904 | TCE(call)->deadline, | |
905 | TCE(call)->deadline, | |
906 | TCE(call)->entry_time, 0); | |
907 | ||
908 | timer_call_func_t func = TCE(call)->func; | |
909 | timer_call_param_t param0 = TCE(call)->param0; | |
910 | timer_call_param_t param1 = TCE(call)->param1; | |
911 | ||
912 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
913 | DECR_TIMER_CALLOUT | DBG_FUNC_START, | |
914 | VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), | |
915 | VM_KERNEL_ADDRHIDE(param0), | |
916 | VM_KERNEL_ADDRHIDE(param1), | |
917 | 0); | |
918 | ||
919 | #if CONFIG_DTRACE | |
920 | DTRACE_TMR7(callout__start, timer_call_func_t, func, | |
921 | timer_call_param_t, param0, unsigned, call->flags, | |
922 | 0, (call->ttd >> 32), | |
923 | (unsigned) (call->ttd & 0xFFFFFFFF), call); | |
924 | #endif | |
925 | (*func)(param0, param1); | |
926 | ||
927 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
928 | DECR_TIMER_CALLOUT | DBG_FUNC_END, | |
929 | VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), | |
930 | VM_KERNEL_ADDRHIDE(param0), | |
931 | VM_KERNEL_ADDRHIDE(param1), | |
932 | 0); | |
933 | } | |
934 | ||
fe8ab488 | 935 | static uint32_t timer_queue_expire_lock_skips; |
c910b4d9 | 936 | uint64_t |
39236c6e | 937 | timer_queue_expire_with_options( |
6d2010ae | 938 | mpqueue_head_t *queue, |
39236c6e A |
939 | uint64_t deadline, |
940 | boolean_t rescan) | |
1c79356b | 941 | { |
39236c6e A |
942 | timer_call_t call = NULL; |
943 | uint32_t tc_iterations = 0; | |
6d2010ae A |
944 | DBG("timer_queue_expire(%p,)\n", queue); |
945 | ||
39236c6e A |
946 | uint64_t cur_deadline = deadline; |
947 | timer_queue_lock_spin(queue); | |
1c79356b | 948 | |
6d2010ae | 949 | while (!queue_empty(&queue->head)) { |
39236c6e A |
950 | /* Upon processing one or more timer calls, refresh the |
951 | * deadline to account for time elapsed in the callout | |
952 | */ | |
953 | if (++tc_iterations > 1) | |
954 | cur_deadline = mach_absolute_time(); | |
955 | ||
956 | if (call == NULL) | |
957 | call = TIMER_CALL(queue_first(&queue->head)); | |
1c79356b | 958 | |
39236c6e | 959 | if (call->soft_deadline <= cur_deadline) { |
1c79356b A |
960 | timer_call_func_t func; |
961 | timer_call_param_t param0, param1; | |
962 | ||
39236c6e A |
963 | TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0); |
964 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
965 | DECR_TIMER_EXPIRE | DBG_FUNC_NONE, | |
4bd07ac2 | 966 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 967 | call->soft_deadline, |
fe8ab488 A |
968 | TCE(call)->deadline, |
969 | TCE(call)->entry_time, 0); | |
39236c6e | 970 | |
fe8ab488 A |
971 | if ((call->flags & TIMER_CALL_RATELIMITED) && |
972 | (TCE(call)->deadline > cur_deadline)) { | |
39236c6e A |
973 | if (rescan == FALSE) |
974 | break; | |
975 | } | |
976 | ||
6d2010ae A |
977 | if (!simple_lock_try(&call->lock)) { |
978 | /* case (2b) lock inversion, dequeue and skip */ | |
979 | timer_queue_expire_lock_skips++; | |
39236c6e A |
980 | timer_call_entry_dequeue_async(call); |
981 | call = NULL; | |
6d2010ae A |
982 | continue; |
983 | } | |
984 | ||
985 | timer_call_entry_dequeue(call); | |
1c79356b | 986 | |
fe8ab488 A |
987 | func = TCE(call)->func; |
988 | param0 = TCE(call)->param0; | |
989 | param1 = TCE(call)->param1; | |
1c79356b | 990 | |
6d2010ae | 991 | simple_unlock(&call->lock); |
39236c6e | 992 | timer_queue_unlock(queue); |
1c79356b | 993 | |
39236c6e | 994 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
316670eb | 995 | DECR_TIMER_CALLOUT | DBG_FUNC_START, |
4bd07ac2 | 996 | VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), |
5ba3f43e A |
997 | VM_KERNEL_ADDRHIDE(param0), |
998 | VM_KERNEL_ADDRHIDE(param1), | |
4bd07ac2 | 999 | 0); |
2d21ac55 | 1000 | |
4b17d6b6 | 1001 | #if CONFIG_DTRACE |
39236c6e | 1002 | DTRACE_TMR7(callout__start, timer_call_func_t, func, |
4b17d6b6 A |
1003 | timer_call_param_t, param0, unsigned, call->flags, |
1004 | 0, (call->ttd >> 32), | |
39236c6e | 1005 | (unsigned) (call->ttd & 0xFFFFFFFF), call); |
2d21ac55 | 1006 | #endif |
4b17d6b6 A |
1007 | /* Maintain time-to-deadline in per-processor data |
1008 | * structure for thread wakeup deadline statistics. | |
1009 | */ | |
1010 | uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd)); | |
1011 | *ttdp = call->ttd; | |
1c79356b | 1012 | (*func)(param0, param1); |
4b17d6b6 | 1013 | *ttdp = 0; |
4b17d6b6 | 1014 | #if CONFIG_DTRACE |
39236c6e A |
1015 | DTRACE_TMR4(callout__end, timer_call_func_t, func, |
1016 | param0, param1, call); | |
2d21ac55 A |
1017 | #endif |
1018 | ||
39236c6e | 1019 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
316670eb | 1020 | DECR_TIMER_CALLOUT | DBG_FUNC_END, |
4bd07ac2 | 1021 | VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), |
5ba3f43e A |
1022 | VM_KERNEL_ADDRHIDE(param0), |
1023 | VM_KERNEL_ADDRHIDE(param1), | |
4bd07ac2 | 1024 | 0); |
39236c6e A |
1025 | call = NULL; |
1026 | timer_queue_lock_spin(queue); | |
1027 | } else { | |
1028 | if (__probable(rescan == FALSE)) { | |
1029 | break; | |
1030 | } else { | |
fe8ab488 A |
1031 | int64_t skew = TCE(call)->deadline - call->soft_deadline; |
1032 | assert(TCE(call)->deadline >= call->soft_deadline); | |
39236c6e A |
1033 | |
1034 | /* DRK: On a latency quality-of-service level change, | |
1035 | * re-sort potentially rate-limited timers. The platform | |
1036 | * layer determines which timers require | |
1037 | * this. In the absence of the per-callout | |
1038 | * synchronization requirement, a global resort could | |
1039 | * be more efficient. The re-sort effectively | |
1040 | * annuls all timer adjustments, i.e. the "soft | |
1041 | * deadline" is the sort key. | |
1042 | */ | |
1043 | ||
1044 | if (timer_resort_threshold(skew)) { | |
1045 | if (__probable(simple_lock_try(&call->lock))) { | |
1046 | timer_call_entry_dequeue(call); | |
1047 | timer_call_entry_enqueue_deadline(call, queue, call->soft_deadline); | |
1048 | simple_unlock(&call->lock); | |
1049 | call = NULL; | |
1050 | } | |
1051 | } | |
1052 | if (call) { | |
1053 | call = TIMER_CALL(queue_next(qe(call))); | |
1054 | if (queue_end(&queue->head, qe(call))) | |
1055 | break; | |
1056 | } | |
1057 | } | |
c910b4d9 | 1058 | } |
1c79356b A |
1059 | } |
1060 | ||
39236c6e A |
1061 | if (!queue_empty(&queue->head)) { |
1062 | call = TIMER_CALL(queue_first(&queue->head)); | |
fe8ab488 A |
1063 | cur_deadline = TCE(call)->deadline; |
1064 | queue->earliest_soft_deadline = (call->flags & TIMER_CALL_RATELIMITED) ? TCE(call)->deadline: call->soft_deadline; | |
39236c6e A |
1065 | } else { |
1066 | queue->earliest_soft_deadline = cur_deadline = UINT64_MAX; | |
1067 | } | |
1c79356b | 1068 | |
39236c6e | 1069 | timer_queue_unlock(queue); |
c910b4d9 | 1070 | |
39236c6e | 1071 | return (cur_deadline); |
1c79356b | 1072 | } |
6d2010ae | 1073 | |
39236c6e A |
1074 | uint64_t |
1075 | timer_queue_expire( | |
1076 | mpqueue_head_t *queue, | |
1077 | uint64_t deadline) | |
1078 | { | |
1079 | return timer_queue_expire_with_options(queue, deadline, FALSE); | |
1080 | } | |
6d2010ae A |
1081 | |
1082 | extern int serverperfmode; | |
fe8ab488 | 1083 | static uint32_t timer_queue_migrate_lock_skips; |
6d2010ae | 1084 | /* |
39236c6e | 1085 | * timer_queue_migrate() is called by timer_queue_migrate_cpu() |
6d2010ae A |
1086 | * to move timer requests from the local processor (queue_from) |
1087 | * to a target processor's (queue_to). | |
1088 | */ | |
1089 | int | |
1090 | timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) | |
1091 | { | |
1092 | timer_call_t call; | |
1093 | timer_call_t head_to; | |
1094 | int timers_migrated = 0; | |
1095 | ||
1096 | DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to); | |
1097 | ||
1098 | assert(!ml_get_interrupts_enabled()); | |
1099 | assert(queue_from != queue_to); | |
1100 | ||
1101 | if (serverperfmode) { | |
1102 | /* | |
1103 | * if we're running a high end server | |
1104 | * avoid migrations... they add latency | |
1105 | * and don't save us power under typical | |
1106 | * server workloads | |
1107 | */ | |
1108 | return -4; | |
1109 | } | |
1110 | ||
1111 | /* | |
1112 | * Take both local (from) and target (to) timer queue locks while | |
1113 | * moving the timers from the local queue to the target processor. | |
1114 | * We assume that the target is always the boot processor. | |
1115 | * But only move if all of the following is true: | |
1116 | * - the target queue is non-empty | |
1117 | * - the local queue is non-empty | |
1118 | * - the local queue's first deadline is later than the target's | |
1119 | * - the local queue contains no non-migrateable "local" call | |
1120 | * so that we need not have the target resync. | |
1121 | */ | |
1122 | ||
39236c6e | 1123 | timer_queue_lock_spin(queue_to); |
6d2010ae A |
1124 | |
1125 | head_to = TIMER_CALL(queue_first(&queue_to->head)); | |
1126 | if (queue_empty(&queue_to->head)) { | |
1127 | timers_migrated = -1; | |
1128 | goto abort1; | |
1129 | } | |
1130 | ||
39236c6e | 1131 | timer_queue_lock_spin(queue_from); |
6d2010ae A |
1132 | |
1133 | if (queue_empty(&queue_from->head)) { | |
1134 | timers_migrated = -2; | |
1135 | goto abort2; | |
1136 | } | |
1137 | ||
1138 | call = TIMER_CALL(queue_first(&queue_from->head)); | |
fe8ab488 | 1139 | if (TCE(call)->deadline < TCE(head_to)->deadline) { |
6d2010ae A |
1140 | timers_migrated = 0; |
1141 | goto abort2; | |
1142 | } | |
1143 | ||
1144 | /* perform scan for non-migratable timers */ | |
1145 | do { | |
1146 | if (call->flags & TIMER_CALL_LOCAL) { | |
1147 | timers_migrated = -3; | |
1148 | goto abort2; | |
1149 | } | |
1150 | call = TIMER_CALL(queue_next(qe(call))); | |
1151 | } while (!queue_end(&queue_from->head, qe(call))); | |
1152 | ||
1153 | /* migration loop itself -- both queues are locked */ | |
1154 | while (!queue_empty(&queue_from->head)) { | |
1155 | call = TIMER_CALL(queue_first(&queue_from->head)); | |
1156 | if (!simple_lock_try(&call->lock)) { | |
1157 | /* case (2b) lock order inversion, dequeue only */ | |
39236c6e A |
1158 | #ifdef TIMER_ASSERT |
1159 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1160 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 A |
1161 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
1162 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), | |
1163 | VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), | |
39236c6e A |
1164 | 0x2b, 0); |
1165 | #endif | |
6d2010ae | 1166 | timer_queue_migrate_lock_skips++; |
39236c6e | 1167 | timer_call_entry_dequeue_async(call); |
6d2010ae A |
1168 | continue; |
1169 | } | |
1170 | timer_call_entry_dequeue(call); | |
1171 | timer_call_entry_enqueue_deadline( | |
fe8ab488 | 1172 | call, queue_to, TCE(call)->deadline); |
6d2010ae A |
1173 | timers_migrated++; |
1174 | simple_unlock(&call->lock); | |
1175 | } | |
39236c6e | 1176 | queue_from->earliest_soft_deadline = UINT64_MAX; |
6d2010ae | 1177 | abort2: |
39236c6e | 1178 | timer_queue_unlock(queue_from); |
6d2010ae | 1179 | abort1: |
39236c6e | 1180 | timer_queue_unlock(queue_to); |
6d2010ae A |
1181 | |
1182 | return timers_migrated; | |
1183 | } | |
39236c6e A |
1184 | |
1185 | void | |
1186 | timer_queue_trace_cpu(int ncpu) | |
1187 | { | |
1188 | timer_call_nosync_cpu( | |
1189 | ncpu, | |
5ba3f43e | 1190 | (void(*)(void *))timer_queue_trace, |
39236c6e A |
1191 | (void*) timer_queue_cpu(ncpu)); |
1192 | } | |
1193 | ||
1194 | void | |
1195 | timer_queue_trace( | |
1196 | mpqueue_head_t *queue) | |
1197 | { | |
1198 | timer_call_t call; | |
1199 | spl_t s; | |
1200 | ||
1201 | if (!kdebug_enable) | |
1202 | return; | |
1203 | ||
1204 | s = splclock(); | |
1205 | timer_queue_lock_spin(queue); | |
1206 | ||
1207 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1208 | DECR_TIMER_QUEUE | DBG_FUNC_START, | |
1209 | queue->count, mach_absolute_time(), 0, 0, 0); | |
1210 | ||
1211 | if (!queue_empty(&queue->head)) { | |
1212 | call = TIMER_CALL(queue_first(&queue->head)); | |
1213 | do { | |
1214 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1215 | DECR_TIMER_QUEUE | DBG_FUNC_NONE, | |
1216 | call->soft_deadline, | |
fe8ab488 A |
1217 | TCE(call)->deadline, |
1218 | TCE(call)->entry_time, | |
4bd07ac2 | 1219 | VM_KERNEL_UNSLIDE(TCE(call)->func), |
39236c6e A |
1220 | 0); |
1221 | call = TIMER_CALL(queue_next(qe(call))); | |
1222 | } while (!queue_end(&queue->head, qe(call))); | |
1223 | } | |
1224 | ||
1225 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1226 | DECR_TIMER_QUEUE | DBG_FUNC_END, | |
1227 | queue->count, mach_absolute_time(), 0, 0, 0); | |
1228 | ||
1229 | timer_queue_unlock(queue); | |
1230 | splx(s); | |
1231 | } | |
1232 | ||
1233 | void | |
1234 | timer_longterm_dequeued_locked(timer_call_t call) | |
1235 | { | |
1236 | timer_longterm_t *tlp = &timer_longterm; | |
1237 | ||
1238 | tlp->dequeues++; | |
1239 | if (call == tlp->threshold.call) | |
1240 | tlp->threshold.call = NULL; | |
1241 | } | |
1242 | ||
1243 | /* | |
1244 | * Place a timer call in the longterm list | |
1245 | * and adjust the next timer callout deadline if the new timer is first. | |
1246 | */ | |
1247 | mpqueue_head_t * | |
1248 | timer_longterm_enqueue_unlocked(timer_call_t call, | |
1249 | uint64_t now, | |
1250 | uint64_t deadline, | |
fe8ab488 A |
1251 | mpqueue_head_t **old_queue, |
1252 | uint64_t soft_deadline, | |
1253 | uint64_t ttd, | |
1254 | timer_call_param_t param1, | |
1255 | uint32_t callout_flags) | |
39236c6e A |
1256 | { |
1257 | timer_longterm_t *tlp = &timer_longterm; | |
1258 | boolean_t update_required = FALSE; | |
1259 | uint64_t longterm_threshold; | |
1260 | ||
1261 | longterm_threshold = now + tlp->threshold.interval; | |
1262 | ||
1263 | /* | |
1264 | * Return NULL without doing anything if: | |
1265 | * - this timer is local, or | |
1266 | * - the longterm mechanism is disabled, or | |
1267 | * - this deadline is too short. | |
1268 | */ | |
fe8ab488 | 1269 | if ((callout_flags & TIMER_CALL_LOCAL) != 0 || |
39236c6e | 1270 | (tlp->threshold.interval == TIMER_LONGTERM_NONE) || |
fe8ab488 | 1271 | (deadline <= longterm_threshold)) |
39236c6e A |
1272 | return NULL; |
1273 | ||
1274 | /* | |
1275 | * Remove timer from its current queue, if any. | |
1276 | */ | |
1277 | *old_queue = timer_call_dequeue_unlocked(call); | |
1278 | ||
1279 | /* | |
1280 | * Lock the longterm queue, queue timer and determine | |
1281 | * whether an update is necessary. | |
1282 | */ | |
1283 | assert(!ml_get_interrupts_enabled()); | |
1284 | simple_lock(&call->lock); | |
1285 | timer_queue_lock_spin(timer_longterm_queue); | |
fe8ab488 A |
1286 | TCE(call)->deadline = deadline; |
1287 | TCE(call)->param1 = param1; | |
1288 | call->ttd = ttd; | |
1289 | call->soft_deadline = soft_deadline; | |
1290 | call->flags = callout_flags; | |
39236c6e | 1291 | timer_call_entry_enqueue_tail(call, timer_longterm_queue); |
39236c6e A |
1292 | |
1293 | tlp->enqueues++; | |
1294 | ||
1295 | /* | |
1296 | * We'll need to update the currently set threshold timer | |
1297 | * if the new deadline is sooner and no sooner update is in flight. | |
1298 | */ | |
1299 | if (deadline < tlp->threshold.deadline && | |
1300 | deadline < tlp->threshold.preempted) { | |
1301 | tlp->threshold.preempted = deadline; | |
1302 | tlp->threshold.call = call; | |
1303 | update_required = TRUE; | |
1304 | } | |
1305 | timer_queue_unlock(timer_longterm_queue); | |
1306 | simple_unlock(&call->lock); | |
1307 | ||
1308 | if (update_required) { | |
fe8ab488 A |
1309 | /* |
1310 | * Note: this call expects that calling the master cpu | |
1311 | * alone does not involve locking the topo lock. | |
1312 | */ | |
39236c6e A |
1313 | timer_call_nosync_cpu( |
1314 | master_cpu, | |
1315 | (void (*)(void *)) timer_longterm_update, | |
1316 | (void *)tlp); | |
1317 | } | |
1318 | ||
1319 | return timer_longterm_queue; | |
1320 | } | |
1321 | ||
1322 | /* | |
1323 | * Scan for timers below the longterm threshold. | |
1324 | * Move these to the local timer queue (of the boot processor on which the | |
1325 | * calling thread is running). | |
1326 | * Both the local (boot) queue and the longterm queue are locked. | |
1327 | * The scan is similar to the timer migrate sequence but is performed by | |
1328 | * successively examining each timer on the longterm queue: | |
1329 | * - if within the short-term threshold | |
1330 | * - enter on the local queue (unless being deleted), | |
1331 | * - otherwise: | |
1332 | * - if sooner, deadline becomes the next threshold deadline. | |
5ba3f43e A |
1333 | * The total scan time is limited to TIMER_LONGTERM_SCAN_LIMIT. Should this be |
1334 | * exceeded, we abort and reschedule again so that we don't shut others from | |
1335 | * the timer queues. Longterm timers firing late is not critical. | |
39236c6e A |
1336 | */ |
1337 | void | |
1338 | timer_longterm_scan(timer_longterm_t *tlp, | |
5ba3f43e | 1339 | uint64_t time_start) |
39236c6e A |
1340 | { |
1341 | queue_entry_t qe; | |
1342 | timer_call_t call; | |
1343 | uint64_t threshold; | |
1344 | uint64_t deadline; | |
5ba3f43e | 1345 | uint64_t time_limit = time_start + tlp->scan_limit; |
39236c6e A |
1346 | mpqueue_head_t *timer_master_queue; |
1347 | ||
1348 | assert(!ml_get_interrupts_enabled()); | |
1349 | assert(cpu_number() == master_cpu); | |
1350 | ||
1351 | if (tlp->threshold.interval != TIMER_LONGTERM_NONE) | |
5ba3f43e | 1352 | threshold = time_start + tlp->threshold.interval; |
39236c6e A |
1353 | |
1354 | tlp->threshold.deadline = TIMER_LONGTERM_NONE; | |
1355 | tlp->threshold.call = NULL; | |
1356 | ||
1357 | if (queue_empty(&timer_longterm_queue->head)) | |
1358 | return; | |
1359 | ||
1360 | timer_master_queue = timer_queue_cpu(master_cpu); | |
1361 | timer_queue_lock_spin(timer_master_queue); | |
1362 | ||
1363 | qe = queue_first(&timer_longterm_queue->head); | |
1364 | while (!queue_end(&timer_longterm_queue->head, qe)) { | |
1365 | call = TIMER_CALL(qe); | |
1366 | deadline = call->soft_deadline; | |
1367 | qe = queue_next(qe); | |
1368 | if (!simple_lock_try(&call->lock)) { | |
1369 | /* case (2c) lock order inversion, dequeue only */ | |
1370 | #ifdef TIMER_ASSERT | |
1371 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1372 | DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, | |
4bd07ac2 A |
1373 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
1374 | VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), | |
1375 | VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), | |
39236c6e A |
1376 | 0x2c, 0); |
1377 | #endif | |
1378 | timer_call_entry_dequeue_async(call); | |
1379 | continue; | |
1380 | } | |
1381 | if (deadline < threshold) { | |
1382 | /* | |
1383 | * This timer needs moving (escalating) | |
1384 | * to the local (boot) processor's queue. | |
1385 | */ | |
1386 | #ifdef TIMER_ASSERT | |
5ba3f43e | 1387 | if (deadline < time_start) |
39236c6e A |
1388 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, |
1389 | DECR_TIMER_OVERDUE | DBG_FUNC_NONE, | |
4bd07ac2 | 1390 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
39236c6e | 1391 | deadline, |
5ba3f43e | 1392 | time_start, |
39236c6e A |
1393 | threshold, |
1394 | 0); | |
1395 | #endif | |
1396 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1397 | DECR_TIMER_ESCALATE | DBG_FUNC_NONE, | |
4bd07ac2 | 1398 | VM_KERNEL_UNSLIDE_OR_PERM(call), |
fe8ab488 A |
1399 | TCE(call)->deadline, |
1400 | TCE(call)->entry_time, | |
4bd07ac2 | 1401 | VM_KERNEL_UNSLIDE(TCE(call)->func), |
39236c6e A |
1402 | 0); |
1403 | tlp->escalates++; | |
1404 | timer_call_entry_dequeue(call); | |
1405 | timer_call_entry_enqueue_deadline( | |
fe8ab488 | 1406 | call, timer_master_queue, TCE(call)->deadline); |
39236c6e A |
1407 | /* |
1408 | * A side-effect of the following call is to update | |
1409 | * the actual hardware deadline if required. | |
1410 | */ | |
1411 | (void) timer_queue_assign(deadline); | |
1412 | } else { | |
1413 | if (deadline < tlp->threshold.deadline) { | |
1414 | tlp->threshold.deadline = deadline; | |
1415 | tlp->threshold.call = call; | |
1416 | } | |
1417 | } | |
1418 | simple_unlock(&call->lock); | |
5ba3f43e A |
1419 | |
1420 | /* Abort scan if we're taking too long. */ | |
1421 | if (mach_absolute_time() > time_limit) { | |
1422 | tlp->threshold.deadline = TIMER_LONGTERM_SCAN_AGAIN; | |
1423 | tlp->scan_pauses++; | |
1424 | DBG("timer_longterm_scan() paused %llu, qlen: %llu\n", | |
1425 | time_limit, tlp->queue.count); | |
1426 | break; | |
1427 | } | |
39236c6e A |
1428 | } |
1429 | ||
1430 | timer_queue_unlock(timer_master_queue); | |
1431 | } | |
1432 | ||
1433 | void | |
1434 | timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1) | |
1435 | { | |
1436 | timer_longterm_t *tlp = (timer_longterm_t *) p0; | |
1437 | ||
1438 | timer_longterm_update(tlp); | |
1439 | } | |
1440 | ||
1441 | void | |
1442 | timer_longterm_update_locked(timer_longterm_t *tlp) | |
1443 | { | |
1444 | uint64_t latency; | |
1445 | ||
1446 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1447 | DECR_TIMER_UPDATE | DBG_FUNC_START, | |
4bd07ac2 | 1448 | VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), |
39236c6e A |
1449 | tlp->threshold.deadline, |
1450 | tlp->threshold.preempted, | |
1451 | tlp->queue.count, 0); | |
1452 | ||
1453 | tlp->scan_time = mach_absolute_time(); | |
1454 | if (tlp->threshold.preempted != TIMER_LONGTERM_NONE) { | |
1455 | tlp->threshold.preempts++; | |
1456 | tlp->threshold.deadline = tlp->threshold.preempted; | |
1457 | tlp->threshold.preempted = TIMER_LONGTERM_NONE; | |
1458 | /* | |
1459 | * Note: in the unlikely event that a pre-empted timer has | |
1460 | * itself been cancelled, we'll simply re-scan later at the | |
1461 | * time of the preempted/cancelled timer. | |
1462 | */ | |
1463 | } else { | |
1464 | tlp->threshold.scans++; | |
1465 | ||
1466 | /* | |
1467 | * Maintain a moving average of our wakeup latency. | |
1468 | * Clamp latency to 0 and ignore above threshold interval. | |
1469 | */ | |
1470 | if (tlp->scan_time > tlp->threshold.deadline_set) | |
1471 | latency = tlp->scan_time - tlp->threshold.deadline_set; | |
1472 | else | |
1473 | latency = 0; | |
1474 | if (latency < tlp->threshold.interval) { | |
1475 | tlp->threshold.latency_min = | |
1476 | MIN(tlp->threshold.latency_min, latency); | |
1477 | tlp->threshold.latency_max = | |
1478 | MAX(tlp->threshold.latency_max, latency); | |
1479 | tlp->threshold.latency = | |
1480 | (tlp->threshold.latency*99 + latency) / 100; | |
1481 | } | |
1482 | ||
1483 | timer_longterm_scan(tlp, tlp->scan_time); | |
1484 | } | |
1485 | ||
1486 | tlp->threshold.deadline_set = tlp->threshold.deadline; | |
1487 | /* The next deadline timer to be set is adjusted */ | |
5ba3f43e A |
1488 | if (tlp->threshold.deadline != TIMER_LONGTERM_NONE && |
1489 | tlp->threshold.deadline != TIMER_LONGTERM_SCAN_AGAIN) { | |
39236c6e A |
1490 | tlp->threshold.deadline_set -= tlp->threshold.margin; |
1491 | tlp->threshold.deadline_set -= tlp->threshold.latency; | |
1492 | } | |
5ba3f43e A |
1493 | |
1494 | /* Throttle next scan time */ | |
1495 | uint64_t scan_clamp = mach_absolute_time() + tlp->scan_limit; | |
1496 | if (tlp->threshold.deadline_set < scan_clamp) | |
1497 | tlp->threshold.deadline_set = scan_clamp; | |
39236c6e A |
1498 | |
1499 | TIMER_KDEBUG_TRACE(KDEBUG_TRACE, | |
1500 | DECR_TIMER_UPDATE | DBG_FUNC_END, | |
4bd07ac2 | 1501 | VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), |
39236c6e A |
1502 | tlp->threshold.deadline, |
1503 | tlp->threshold.scans, | |
1504 | tlp->queue.count, 0); | |
1505 | } | |
1506 | ||
1507 | void | |
1508 | timer_longterm_update(timer_longterm_t *tlp) | |
1509 | { | |
1510 | spl_t s = splclock(); | |
1511 | ||
1512 | timer_queue_lock_spin(timer_longterm_queue); | |
1513 | ||
1514 | if (cpu_number() != master_cpu) | |
1515 | panic("timer_longterm_update_master() on non-boot cpu"); | |
1516 | ||
1517 | timer_longterm_update_locked(tlp); | |
1518 | ||
1519 | if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) | |
1520 | timer_call_enter( | |
1521 | &tlp->threshold.timer, | |
1522 | tlp->threshold.deadline_set, | |
1523 | TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL); | |
1524 | ||
1525 | timer_queue_unlock(timer_longterm_queue); | |
1526 | splx(s); | |
1527 | } | |
1528 | ||
1529 | void | |
1530 | timer_longterm_init(void) | |
1531 | { | |
1532 | uint32_t longterm; | |
1533 | timer_longterm_t *tlp = &timer_longterm; | |
1534 | ||
1535 | DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue); | |
1536 | ||
1537 | /* | |
15129b1c A |
1538 | * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD |
1539 | * or TIMER_LONGTERM_NONE (disabled) for server; | |
1540 | * overridden longterm boot-arg | |
39236c6e | 1541 | */ |
15129b1c A |
1542 | tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE |
1543 | : TIMER_LONGTERM_THRESHOLD; | |
39236c6e A |
1544 | if (PE_parse_boot_argn("longterm", &longterm, sizeof (longterm))) { |
1545 | tlp->threshold.interval = (longterm == 0) ? | |
1546 | TIMER_LONGTERM_NONE : | |
1547 | longterm * NSEC_PER_MSEC; | |
1548 | } | |
1549 | if (tlp->threshold.interval != TIMER_LONGTERM_NONE) { | |
1550 | printf("Longterm timer threshold: %llu ms\n", | |
1551 | tlp->threshold.interval / NSEC_PER_MSEC); | |
1552 | kprintf("Longterm timer threshold: %llu ms\n", | |
1553 | tlp->threshold.interval / NSEC_PER_MSEC); | |
1554 | nanoseconds_to_absolutetime(tlp->threshold.interval, | |
1555 | &tlp->threshold.interval); | |
1556 | tlp->threshold.margin = tlp->threshold.interval / 10; | |
1557 | tlp->threshold.latency_min = EndOfAllTime; | |
1558 | tlp->threshold.latency_max = 0; | |
1559 | } | |
1560 | ||
1561 | tlp->threshold.preempted = TIMER_LONGTERM_NONE; | |
1562 | tlp->threshold.deadline = TIMER_LONGTERM_NONE; | |
1563 | ||
1564 | lck_attr_setdefault(&timer_longterm_lck_attr); | |
1565 | lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr); | |
1566 | lck_grp_init(&timer_longterm_lck_grp, | |
1567 | "timer_longterm", &timer_longterm_lck_grp_attr); | |
1568 | mpqueue_init(&tlp->queue, | |
1569 | &timer_longterm_lck_grp, &timer_longterm_lck_attr); | |
1570 | ||
1571 | timer_call_setup(&tlp->threshold.timer, | |
1572 | timer_longterm_callout, (timer_call_param_t) tlp); | |
1573 | ||
1574 | timer_longterm_queue = &tlp->queue; | |
1575 | } | |
1576 | ||
1577 | enum { | |
1578 | THRESHOLD, QCOUNT, | |
1579 | ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS, | |
5ba3f43e | 1580 | LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, PAUSES |
39236c6e A |
1581 | }; |
1582 | uint64_t | |
1583 | timer_sysctl_get(int oid) | |
1584 | { | |
1585 | timer_longterm_t *tlp = &timer_longterm; | |
1586 | ||
1587 | switch (oid) { | |
1588 | case THRESHOLD: | |
1589 | return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ? | |
1590 | 0 : tlp->threshold.interval / NSEC_PER_MSEC; | |
1591 | case QCOUNT: | |
1592 | return tlp->queue.count; | |
1593 | case ENQUEUES: | |
1594 | return tlp->enqueues; | |
1595 | case DEQUEUES: | |
1596 | return tlp->dequeues; | |
1597 | case ESCALATES: | |
1598 | return tlp->escalates; | |
1599 | case SCANS: | |
1600 | return tlp->threshold.scans; | |
1601 | case PREEMPTS: | |
1602 | return tlp->threshold.preempts; | |
1603 | case LATENCY: | |
1604 | return tlp->threshold.latency; | |
1605 | case LATENCY_MIN: | |
1606 | return tlp->threshold.latency_min; | |
1607 | case LATENCY_MAX: | |
1608 | return tlp->threshold.latency_max; | |
5ba3f43e A |
1609 | case SCAN_LIMIT: |
1610 | return tlp->scan_limit; | |
1611 | case PAUSES: | |
1612 | return tlp->scan_pauses; | |
39236c6e A |
1613 | default: |
1614 | return 0; | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | /* | |
1619 | * timer_master_scan() is the inverse of timer_longterm_scan() | |
1620 | * since it un-escalates timers to the longterm queue. | |
1621 | */ | |
1622 | static void | |
1623 | timer_master_scan(timer_longterm_t *tlp, | |
1624 | uint64_t now) | |
1625 | { | |
1626 | queue_entry_t qe; | |
1627 | timer_call_t call; | |
1628 | uint64_t threshold; | |
1629 | uint64_t deadline; | |
1630 | mpqueue_head_t *timer_master_queue; | |
1631 | ||
1632 | if (tlp->threshold.interval != TIMER_LONGTERM_NONE) | |
1633 | threshold = now + tlp->threshold.interval; | |
1634 | else | |
1635 | threshold = TIMER_LONGTERM_NONE; | |
1636 | ||
1637 | timer_master_queue = timer_queue_cpu(master_cpu); | |
1638 | timer_queue_lock_spin(timer_master_queue); | |
1639 | ||
1640 | qe = queue_first(&timer_master_queue->head); | |
1641 | while (!queue_end(&timer_master_queue->head, qe)) { | |
1642 | call = TIMER_CALL(qe); | |
fe8ab488 | 1643 | deadline = TCE(call)->deadline; |
39236c6e A |
1644 | qe = queue_next(qe); |
1645 | if ((call->flags & TIMER_CALL_LOCAL) != 0) | |
1646 | continue; | |
1647 | if (!simple_lock_try(&call->lock)) { | |
1648 | /* case (2c) lock order inversion, dequeue only */ | |
1649 | timer_call_entry_dequeue_async(call); | |
1650 | continue; | |
1651 | } | |
1652 | if (deadline > threshold) { | |
1653 | /* move from master to longterm */ | |
1654 | timer_call_entry_dequeue(call); | |
1655 | timer_call_entry_enqueue_tail(call, timer_longterm_queue); | |
1656 | if (deadline < tlp->threshold.deadline) { | |
1657 | tlp->threshold.deadline = deadline; | |
1658 | tlp->threshold.call = call; | |
1659 | } | |
1660 | } | |
1661 | simple_unlock(&call->lock); | |
1662 | } | |
1663 | timer_queue_unlock(timer_master_queue); | |
1664 | } | |
1665 | ||
1666 | static void | |
1667 | timer_sysctl_set_threshold(uint64_t value) | |
1668 | { | |
1669 | timer_longterm_t *tlp = &timer_longterm; | |
1670 | spl_t s = splclock(); | |
1671 | boolean_t threshold_increase; | |
1672 | ||
1673 | timer_queue_lock_spin(timer_longterm_queue); | |
1674 | ||
1675 | timer_call_cancel(&tlp->threshold.timer); | |
1676 | ||
1677 | /* | |
1678 | * Set the new threshold and note whther it's increasing. | |
1679 | */ | |
1680 | if (value == 0) { | |
1681 | tlp->threshold.interval = TIMER_LONGTERM_NONE; | |
1682 | threshold_increase = TRUE; | |
1683 | timer_call_cancel(&tlp->threshold.timer); | |
1684 | } else { | |
1685 | uint64_t old_interval = tlp->threshold.interval; | |
1686 | tlp->threshold.interval = value * NSEC_PER_MSEC; | |
1687 | nanoseconds_to_absolutetime(tlp->threshold.interval, | |
1688 | &tlp->threshold.interval); | |
1689 | tlp->threshold.margin = tlp->threshold.interval / 10; | |
1690 | if (old_interval == TIMER_LONGTERM_NONE) | |
1691 | threshold_increase = FALSE; | |
1692 | else | |
1693 | threshold_increase = (tlp->threshold.interval > old_interval); | |
1694 | } | |
1695 | ||
1696 | if (threshold_increase /* or removal */) { | |
1697 | /* Escalate timers from the longterm queue */ | |
1698 | timer_longterm_scan(tlp, mach_absolute_time()); | |
1699 | } else /* decrease or addition */ { | |
1700 | /* | |
1701 | * We scan the local/master queue for timers now longterm. | |
1702 | * To be strictly correct, we should scan all processor queues | |
1703 | * but timer migration results in most timers gravitating to the | |
1704 | * master processor in any case. | |
1705 | */ | |
1706 | timer_master_scan(tlp, mach_absolute_time()); | |
1707 | } | |
1708 | ||
1709 | /* Set new timer accordingly */ | |
1710 | tlp->threshold.deadline_set = tlp->threshold.deadline; | |
1711 | if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) { | |
1712 | tlp->threshold.deadline_set -= tlp->threshold.margin; | |
1713 | tlp->threshold.deadline_set -= tlp->threshold.latency; | |
1714 | timer_call_enter( | |
1715 | &tlp->threshold.timer, | |
1716 | tlp->threshold.deadline_set, | |
1717 | TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL); | |
1718 | } | |
1719 | ||
1720 | /* Reset stats */ | |
1721 | tlp->enqueues = 0; | |
1722 | tlp->dequeues = 0; | |
1723 | tlp->escalates = 0; | |
5ba3f43e | 1724 | tlp->scan_pauses = 0; |
39236c6e A |
1725 | tlp->threshold.scans = 0; |
1726 | tlp->threshold.preempts = 0; | |
1727 | tlp->threshold.latency = 0; | |
1728 | tlp->threshold.latency_min = EndOfAllTime; | |
1729 | tlp->threshold.latency_max = 0; | |
1730 | ||
1731 | timer_queue_unlock(timer_longterm_queue); | |
1732 | splx(s); | |
1733 | } | |
1734 | ||
1735 | int | |
1736 | timer_sysctl_set(int oid, uint64_t value) | |
1737 | { | |
1738 | switch (oid) { | |
1739 | case THRESHOLD: | |
1740 | timer_call_cpu( | |
1741 | master_cpu, | |
1742 | (void (*)(void *)) timer_sysctl_set_threshold, | |
1743 | (void *) value); | |
1744 | return KERN_SUCCESS; | |
5ba3f43e A |
1745 | case SCAN_LIMIT: |
1746 | timer_longterm.scan_limit = value; | |
1747 | return KERN_SUCCESS; | |
39236c6e A |
1748 | default: |
1749 | return KERN_INVALID_ARGUMENT; | |
1750 | } | |
1751 | } | |
fe8ab488 A |
1752 | |
1753 | ||
1754 | /* Select timer coalescing window based on per-task quality-of-service hints */ | |
1755 | static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) { | |
1756 | uint32_t latency_qos; | |
1757 | boolean_t adjusted = FALSE; | |
1758 | task_t ctask = t->task; | |
1759 | ||
1760 | if (ctask) { | |
1761 | latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS); | |
1762 | ||
1763 | assert(latency_qos <= NUM_LATENCY_QOS_TIERS); | |
1764 | ||
1765 | if (latency_qos) { | |
1766 | *tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1]; | |
1767 | *tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1]; | |
1768 | *pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1]; | |
1769 | adjusted = TRUE; | |
1770 | } | |
1771 | } | |
1772 | return adjusted; | |
1773 | } | |
1774 | ||
1775 | ||
1776 | /* Adjust timer deadlines based on priority of the thread and the | |
1777 | * urgency value provided at timeout establishment. With this mechanism, | |
1778 | * timers are no longer necessarily sorted in order of soft deadline | |
1779 | * on a given timer queue, i.e. they may be differentially skewed. | |
1780 | * In the current scheme, this could lead to fewer pending timers | |
1781 | * processed than is technically possible when the HW deadline arrives. | |
1782 | */ | |
1783 | static void | |
1784 | timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) { | |
1785 | int16_t tpri = cthread->sched_pri; | |
1786 | if ((urgency & TIMER_CALL_USER_MASK) != 0) { | |
1787 | if (tpri >= BASEPRI_RTQUEUES || | |
1788 | urgency == TIMER_CALL_USER_CRITICAL) { | |
1789 | *tshift = tcoal_prio_params.timer_coalesce_rt_shift; | |
1790 | *tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max; | |
1791 | TCOAL_PRIO_STAT(rt_tcl); | |
1792 | } else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) || | |
1793 | (urgency == TIMER_CALL_USER_BACKGROUND)) { | |
1794 | /* Determine if timer should be subjected to a lower QoS */ | |
1795 | if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) { | |
1796 | if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) { | |
1797 | return; | |
1798 | } else { | |
1799 | *pratelimited = FALSE; | |
1800 | } | |
1801 | } | |
1802 | *tshift = tcoal_prio_params.timer_coalesce_bg_shift; | |
1803 | *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max; | |
1804 | TCOAL_PRIO_STAT(bg_tcl); | |
1805 | } else if (tpri >= MINPRI_KERNEL) { | |
1806 | *tshift = tcoal_prio_params.timer_coalesce_kt_shift; | |
1807 | *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max; | |
1808 | TCOAL_PRIO_STAT(kt_tcl); | |
1809 | } else if (cthread->sched_mode == TH_MODE_FIXED) { | |
1810 | *tshift = tcoal_prio_params.timer_coalesce_fp_shift; | |
1811 | *tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max; | |
1812 | TCOAL_PRIO_STAT(fp_tcl); | |
1813 | } else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) { | |
1814 | TCOAL_PRIO_STAT(qos_tcl); | |
1815 | } else if (cthread->sched_mode == TH_MODE_TIMESHARE) { | |
1816 | *tshift = tcoal_prio_params.timer_coalesce_ts_shift; | |
1817 | *tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max; | |
1818 | TCOAL_PRIO_STAT(ts_tcl); | |
1819 | } else { | |
1820 | TCOAL_PRIO_STAT(nc_tcl); | |
1821 | } | |
1822 | } else if (urgency == TIMER_CALL_SYS_BACKGROUND) { | |
1823 | *tshift = tcoal_prio_params.timer_coalesce_bg_shift; | |
1824 | *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max; | |
1825 | TCOAL_PRIO_STAT(bg_tcl); | |
1826 | } else { | |
1827 | *tshift = tcoal_prio_params.timer_coalesce_kt_shift; | |
1828 | *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max; | |
1829 | TCOAL_PRIO_STAT(kt_tcl); | |
1830 | } | |
1831 | } | |
1832 | ||
1833 | ||
1834 | int timer_user_idle_level; | |
1835 | ||
1836 | uint64_t | |
1837 | timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited) | |
1838 | { | |
1839 | int32_t tcs_shift = 0; | |
1840 | uint64_t tcs_max_abstime = 0; | |
1841 | uint64_t adjval; | |
1842 | uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK); | |
1843 | ||
1844 | if (mach_timer_coalescing_enabled && | |
1845 | (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) { | |
1846 | timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited); | |
1847 | ||
1848 | if (tcs_shift >= 0) | |
1849 | adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime); | |
1850 | else | |
1851 | adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime); | |
1852 | /* Apply adjustments derived from "user idle level" heuristic */ | |
1853 | adjval += (adjval * timer_user_idle_level) >> 7; | |
1854 | return adjval; | |
1855 | } else { | |
1856 | return 0; | |
1857 | } | |
1858 | } | |
1859 | ||
1860 | int | |
1861 | timer_get_user_idle_level(void) { | |
1862 | return timer_user_idle_level; | |
1863 | } | |
1864 | ||
1865 | kern_return_t timer_set_user_idle_level(int ilevel) { | |
1866 | boolean_t do_reeval = FALSE; | |
1867 | ||
1868 | if ((ilevel < 0) || (ilevel > 128)) | |
1869 | return KERN_INVALID_ARGUMENT; | |
1870 | ||
1871 | if (ilevel < timer_user_idle_level) { | |
1872 | do_reeval = TRUE; | |
1873 | } | |
1874 | ||
1875 | timer_user_idle_level = ilevel; | |
1876 | ||
1877 | if (do_reeval) | |
1878 | ml_timer_evaluate(); | |
1879 | ||
1880 | return KERN_SUCCESS; | |
1881 | } |