]>
Commit | Line | Data |
---|---|---|
39236c6e A |
1 | /* |
2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
39236c6e A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
39236c6e A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
39236c6e A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
39236c6e A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * @APPLE_FREE_COPYRIGHT@ | |
33 | */ | |
34 | /* | |
35 | * File: timer.c | |
36 | * Purpose: Routines for handling the machine independent timer. | |
37 | */ | |
38 | ||
39 | #include <mach/mach_types.h> | |
40 | ||
41 | #include <kern/timer_queue.h> | |
42 | #include <kern/timer_call.h> | |
43 | #include <kern/clock.h> | |
44 | #include <kern/thread.h> | |
45 | #include <kern/processor.h> | |
46 | #include <kern/macro_help.h> | |
47 | #include <kern/spl.h> | |
48 | #include <kern/timer_queue.h> | |
49 | #include <kern/pms.h> | |
50 | ||
51 | #include <machine/commpage.h> | |
52 | #include <machine/machine_routines.h> | |
53 | ||
54 | #include <sys/kdebug.h> | |
55 | #include <i386/cpu_data.h> | |
56 | #include <i386/cpu_topology.h> | |
57 | #include <i386/cpu_threads.h> | |
58 | ||
59 | uint32_t spurious_timers; | |
60 | ||
61 | /* | |
f427ee49 | 62 | * Event timer interrupt. |
39236c6e A |
63 | * |
64 | * XXX a drawback of this implementation is that events serviced earlier must not set deadlines | |
65 | * that occur before the entire chain completes. | |
66 | * | |
67 | * XXX a better implementation would use a set of generic callouts and iterate over them | |
68 | */ | |
69 | void | |
f427ee49 | 70 | timer_intr(int user_mode, uint64_t rip) |
39236c6e | 71 | { |
f427ee49 A |
72 | uint64_t orig_abstime, abstime; |
73 | rtclock_timer_t *mytimer; | |
74 | cpu_data_t *pp; | |
75 | uint64_t pmdeadline; | |
76 | uint64_t min_deadline = EndOfAllTime; | |
77 | uint64_t run_deadline = EndOfAllTime; | |
78 | bool timer_processed = false; | |
39236c6e A |
79 | |
80 | pp = current_cpu_datap(); | |
81 | ||
f427ee49 | 82 | SCHED_STATS_INC(timer_pop_count); |
39236c6e | 83 | |
f427ee49 | 84 | orig_abstime = abstime = mach_absolute_time(); |
39236c6e | 85 | |
f427ee49 A |
86 | /* |
87 | * Has a pending clock timer expired? | |
88 | */ | |
89 | mytimer = &pp->rtclock_timer; | |
90 | timer_processed = (mytimer->deadline <= abstime || | |
91 | abstime >= mytimer->queue.earliest_soft_deadline); | |
92 | if (timer_processed) { | |
93 | uint64_t rtclock_deadline = MAX(mytimer->deadline, mytimer->when_set); | |
39236c6e | 94 | /* |
f427ee49 A |
95 | * When opportunistically processing coalesced timers, don't factor |
96 | * their latency into the trace event. | |
39236c6e | 97 | */ |
f427ee49 A |
98 | if (abstime > rtclock_deadline) { |
99 | TCOAL_DEBUG(0xEEEE0000, abstime, | |
100 | mytimer->queue.earliest_soft_deadline, | |
101 | abstime - mytimer->queue.earliest_soft_deadline, 0, 0); | |
102 | } else { | |
103 | min_deadline = rtclock_deadline; | |
39236c6e A |
104 | } |
105 | ||
f427ee49 | 106 | mytimer->has_expired = TRUE; |
39236c6e A |
107 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); |
108 | mytimer->has_expired = FALSE; | |
109 | ||
f427ee49 A |
110 | /* |
111 | * Get a more up-to-date current time after expiring the timer queue. | |
112 | */ | |
39236c6e A |
113 | abstime = mach_absolute_time(); |
114 | mytimer->when_set = abstime; | |
115 | } | |
116 | ||
f427ee49 A |
117 | /* |
118 | * Has a per-CPU running timer expired? | |
119 | */ | |
120 | run_deadline = running_timers_expire(pp->cpu_processor, abstime); | |
121 | if (run_deadline != EndOfAllTime) { | |
122 | if (run_deadline < min_deadline) { | |
123 | min_deadline = run_deadline; | |
124 | } | |
125 | timer_processed = true; | |
126 | abstime = mach_absolute_time(); | |
39236c6e A |
127 | } |
128 | ||
f427ee49 A |
129 | /* |
130 | * Log the timer latency *before* the power management events. | |
131 | */ | |
132 | if (__probable(timer_processed)) { | |
133 | /* | |
134 | * Log the maximum interrupt service latency experienced by a timer. | |
135 | */ | |
136 | int64_t latency = min_deadline == EndOfAllTime ? 0 : | |
137 | (int64_t)(abstime - min_deadline); | |
138 | /* | |
139 | * Log interrupt service latency (-ve value expected by tool) | |
140 | * a non-PM event is expected next. | |
141 | * The requested deadline may be earlier than when it was set | |
142 | * - use MAX to avoid reporting bogus latencies. | |
143 | */ | |
144 | KDBG_RELEASE(DECR_TRAP_LATENCY, -latency, | |
145 | user_mode != 0 ? rip : VM_KERNEL_UNSLIDE(rip), user_mode); | |
146 | } | |
147 | ||
148 | /* | |
149 | * Is it time for power management state change? | |
150 | */ | |
151 | if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { | |
152 | KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_START); | |
153 | pmCPUDeadline(pp); | |
154 | KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_END); | |
155 | timer_processed = true; | |
156 | /* | |
157 | * XXX Nothing below needs an updated abstime, so omit the update. | |
158 | */ | |
5ba3f43e | 159 | } |
0a7de745 | 160 | |
f427ee49 A |
161 | /* |
162 | * Schedule the next deadline. | |
163 | */ | |
39236c6e A |
164 | x86_lcpu()->rtcDeadline = EndOfAllTime; |
165 | timer_resync_deadlines(); | |
166 | ||
f427ee49 | 167 | if (__improbable(!timer_processed)) { |
39236c6e | 168 | spurious_timers++; |
0a7de745 | 169 | } |
39236c6e A |
170 | } |
171 | ||
172 | /* | |
173 | * Set the clock deadline. | |
174 | */ | |
0a7de745 A |
175 | void |
176 | timer_set_deadline(uint64_t deadline) | |
39236c6e | 177 | { |
0a7de745 A |
178 | rtclock_timer_t *mytimer; |
179 | spl_t s; | |
180 | cpu_data_t *pp; | |
39236c6e | 181 | |
0a7de745 | 182 | s = splclock(); /* no interruptions */ |
39236c6e A |
183 | pp = current_cpu_datap(); |
184 | ||
0a7de745 A |
185 | mytimer = &pp->rtclock_timer; /* Point to the timer itself */ |
186 | mytimer->deadline = deadline; /* Set new expiration time */ | |
39236c6e A |
187 | mytimer->when_set = mach_absolute_time(); |
188 | ||
189 | timer_resync_deadlines(); | |
190 | ||
191 | splx(s); | |
192 | } | |
193 | ||
194 | /* | |
195 | * Re-evaluate the outstanding deadlines and select the most proximate. | |
196 | * | |
197 | * Should be called at splclock. | |
198 | */ | |
199 | void | |
200 | timer_resync_deadlines(void) | |
201 | { | |
0a7de745 A |
202 | uint64_t deadline = EndOfAllTime; |
203 | uint64_t pmdeadline; | |
0a7de745 A |
204 | rtclock_timer_t *mytimer; |
205 | spl_t s = splclock(); | |
206 | cpu_data_t *pp; | |
207 | uint32_t decr; | |
39236c6e A |
208 | |
209 | pp = current_cpu_datap(); | |
0a7de745 | 210 | if (!pp->cpu_running) { |
39236c6e A |
211 | /* There's really nothing to do if this processor is down */ |
212 | return; | |
0a7de745 | 213 | } |
39236c6e A |
214 | |
215 | /* | |
216 | * If we have a clock timer set, pick that. | |
217 | */ | |
218 | mytimer = &pp->rtclock_timer; | |
219 | if (!mytimer->has_expired && | |
0a7de745 | 220 | 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) { |
39236c6e | 221 | deadline = mytimer->deadline; |
0a7de745 | 222 | } |
39236c6e A |
223 | |
224 | /* | |
225 | * If we have a power management deadline, see if that's earlier. | |
226 | */ | |
227 | pmdeadline = pmCPUGetDeadline(pp); | |
0a7de745 | 228 | if (0 < pmdeadline && pmdeadline < deadline) { |
39236c6e | 229 | deadline = pmdeadline; |
0a7de745 | 230 | } |
39236c6e | 231 | |
f427ee49 A |
232 | uint64_t run_deadline = running_timers_deadline(pp->cpu_processor); |
233 | if (run_deadline < deadline) { | |
234 | deadline = run_deadline; | |
0a7de745 | 235 | } |
5ba3f43e | 236 | |
39236c6e A |
237 | /* |
238 | * Go and set the "pop" event. | |
239 | */ | |
240 | decr = (uint32_t) setPop(deadline); | |
241 | ||
242 | /* Record non-PM deadline for latency tool */ | |
243 | if (decr != 0 && deadline != pmdeadline) { | |
5ba3f43e | 244 | uint64_t queue_count = 0; |
f427ee49 | 245 | if (deadline != run_deadline) { |
0a7de745 | 246 | /* |
5ba3f43e A |
247 | * For non-quantum timer put the queue count |
248 | * in the tracepoint. | |
249 | */ | |
250 | queue_count = mytimer->queue.count; | |
251 | } | |
39236c6e | 252 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
5ba3f43e A |
253 | DECR_SET_DEADLINE | DBG_FUNC_NONE, |
254 | decr, 2, | |
255 | deadline, | |
256 | queue_count, 0); | |
39236c6e A |
257 | } |
258 | splx(s); | |
259 | } | |
260 | ||
261 | void | |
262 | timer_queue_expire_local( | |
0a7de745 | 263 | __unused void *arg) |
39236c6e | 264 | { |
0a7de745 A |
265 | rtclock_timer_t *mytimer; |
266 | uint64_t abstime; | |
267 | cpu_data_t *pp; | |
39236c6e A |
268 | |
269 | pp = current_cpu_datap(); | |
270 | ||
271 | mytimer = &pp->rtclock_timer; | |
272 | abstime = mach_absolute_time(); | |
273 | ||
274 | mytimer->has_expired = TRUE; | |
275 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); | |
276 | mytimer->has_expired = FALSE; | |
277 | mytimer->when_set = mach_absolute_time(); | |
278 | ||
279 | timer_resync_deadlines(); | |
280 | } | |
281 | ||
282 | void | |
283 | timer_queue_expire_rescan( | |
0a7de745 | 284 | __unused void *arg) |
39236c6e | 285 | { |
0a7de745 A |
286 | rtclock_timer_t *mytimer; |
287 | uint64_t abstime; | |
288 | cpu_data_t *pp; | |
39236c6e A |
289 | |
290 | assert(ml_get_interrupts_enabled() == FALSE); | |
291 | pp = current_cpu_datap(); | |
292 | ||
293 | mytimer = &pp->rtclock_timer; | |
294 | abstime = mach_absolute_time(); | |
295 | ||
296 | mytimer->has_expired = TRUE; | |
297 | mytimer->deadline = timer_queue_expire_with_options(&mytimer->queue, abstime, TRUE); | |
298 | mytimer->has_expired = FALSE; | |
299 | mytimer->when_set = mach_absolute_time(); | |
300 | ||
301 | timer_resync_deadlines(); | |
302 | } | |
303 | ||
39236c6e A |
304 | #define TIMER_RESORT_THRESHOLD_ABSTIME (50 * NSEC_PER_MSEC) |
305 | ||
306 | #if TCOAL_PRIO_STATS | |
307 | int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl; | |
308 | #define TCOAL_PRIO_STAT(x) (x++) | |
309 | #else | |
310 | #define TCOAL_PRIO_STAT(x) | |
311 | #endif | |
312 | ||
39236c6e | 313 | boolean_t |
0a7de745 A |
314 | timer_resort_threshold(uint64_t skew) |
315 | { | |
316 | if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) { | |
39236c6e | 317 | return TRUE; |
0a7de745 | 318 | } else { |
39236c6e | 319 | return FALSE; |
0a7de745 | 320 | } |
39236c6e A |
321 | } |
322 | ||
39236c6e A |
323 | /* |
324 | * Return the local timer queue for a running processor | |
325 | * else return the boot processor's timer queue. | |
326 | */ | |
327 | mpqueue_head_t * | |
328 | timer_queue_assign( | |
0a7de745 | 329 | uint64_t deadline) |
39236c6e | 330 | { |
0a7de745 A |
331 | cpu_data_t *cdp = current_cpu_datap(); |
332 | mpqueue_head_t *queue; | |
39236c6e A |
333 | |
334 | if (cdp->cpu_running) { | |
335 | queue = &cdp->rtclock_timer.queue; | |
336 | ||
0a7de745 | 337 | if (deadline < cdp->rtclock_timer.deadline) { |
39236c6e | 338 | timer_set_deadline(deadline); |
0a7de745 A |
339 | } |
340 | } else { | |
39236c6e | 341 | queue = &cpu_datap(master_cpu)->rtclock_timer.queue; |
0a7de745 | 342 | } |
39236c6e | 343 | |
0a7de745 | 344 | return queue; |
39236c6e A |
345 | } |
346 | ||
347 | void | |
348 | timer_queue_cancel( | |
0a7de745 A |
349 | mpqueue_head_t *queue, |
350 | uint64_t deadline, | |
351 | uint64_t new_deadline) | |
39236c6e | 352 | { |
0a7de745 A |
353 | if (queue == ¤t_cpu_datap()->rtclock_timer.queue) { |
354 | if (deadline < new_deadline) { | |
355 | timer_set_deadline(new_deadline); | |
356 | } | |
357 | } | |
39236c6e A |
358 | } |
359 | ||
360 | /* | |
361 | * timer_queue_migrate_cpu() is called from the Power-Management kext | |
362 | * when a logical processor goes idle (in a deep C-state) with a distant | |
363 | * deadline so that it's timer queue can be moved to another processor. | |
364 | * This target processor should be the least idle (most busy) -- | |
365 | * currently this is the primary processor for the calling thread's package. | |
366 | * Locking restrictions demand that the target cpu must be the boot cpu. | |
367 | */ | |
368 | uint32_t | |
369 | timer_queue_migrate_cpu(int target_cpu) | |
370 | { | |
0a7de745 A |
371 | cpu_data_t *target_cdp = cpu_datap(target_cpu); |
372 | cpu_data_t *cdp = current_cpu_datap(); | |
373 | int ntimers_moved; | |
39236c6e A |
374 | |
375 | assert(!ml_get_interrupts_enabled()); | |
376 | assert(target_cpu != cdp->cpu_number); | |
377 | assert(target_cpu == master_cpu); | |
378 | ||
379 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
0a7de745 A |
380 | DECR_TIMER_MIGRATE | DBG_FUNC_START, |
381 | target_cpu, | |
382 | cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >> 32), | |
383 | 0, 0); | |
39236c6e A |
384 | |
385 | /* | |
386 | * Move timer requests from the local queue to the target processor's. | |
387 | * The return value is the number of requests moved. If this is 0, | |
388 | * it indicates that the first (i.e. earliest) timer is earlier than | |
389 | * the earliest for the target processor. Since this would force a | |
390 | * resync, the move of this and all later requests is aborted. | |
391 | */ | |
392 | ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue, | |
0a7de745 | 393 | &target_cdp->rtclock_timer.queue); |
39236c6e A |
394 | |
395 | /* | |
396 | * Assuming we moved stuff, clear local deadline. | |
397 | */ | |
398 | if (ntimers_moved > 0) { | |
399 | cdp->rtclock_timer.deadline = EndOfAllTime; | |
400 | setPop(EndOfAllTime); | |
401 | } | |
0a7de745 | 402 | |
39236c6e | 403 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
0a7de745 A |
404 | DECR_TIMER_MIGRATE | DBG_FUNC_END, |
405 | target_cpu, ntimers_moved, 0, 0, 0); | |
39236c6e A |
406 | |
407 | return ntimers_moved; | |
408 | } | |
409 | ||
410 | mpqueue_head_t * | |
411 | timer_queue_cpu(int cpu) | |
412 | { | |
413 | return &cpu_datap(cpu)->rtclock_timer.queue; | |
414 | } | |
415 | ||
416 | void | |
417 | timer_call_cpu(int cpu, void (*fn)(void *), void *arg) | |
418 | { | |
419 | mp_cpus_call(cpu_to_cpumask(cpu), SYNC, fn, arg); | |
420 | } | |
421 | ||
422 | void | |
423 | timer_call_nosync_cpu(int cpu, void (*fn)(void *), void *arg) | |
424 | { | |
425 | /* XXX Needs error checking and retry */ | |
426 | mp_cpus_call(cpu_to_cpumask(cpu), NOSYNC, fn, arg); | |
427 | } | |
428 | ||
fe8ab488 A |
429 | |
430 | static timer_coalescing_priority_params_ns_t tcoal_prio_params_init = | |
431 | { | |
432 | .idle_entry_timer_processing_hdeadline_threshold_ns = 5000ULL * NSEC_PER_USEC, | |
433 | .interrupt_timer_coalescing_ilat_threshold_ns = 30ULL * NSEC_PER_USEC, | |
434 | .timer_resort_threshold_ns = 50 * NSEC_PER_MSEC, | |
435 | .timer_coalesce_rt_shift = 0, | |
436 | .timer_coalesce_bg_shift = -5, | |
437 | .timer_coalesce_kt_shift = 3, | |
438 | .timer_coalesce_fp_shift = 3, | |
439 | .timer_coalesce_ts_shift = 3, | |
440 | .timer_coalesce_rt_ns_max = 0ULL, | |
441 | .timer_coalesce_bg_ns_max = 100 * NSEC_PER_MSEC, | |
442 | .timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC, | |
443 | .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC, | |
444 | .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC, | |
445 | .latency_qos_scale = {3, 2, 1, -2, -15, -15}, | |
0a7de745 A |
446 | .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, |
447 | 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, | |
fe8ab488 A |
448 | .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE}, |
449 | }; | |
450 | ||
0a7de745 A |
451 | timer_coalescing_priority_params_ns_t * |
452 | timer_call_get_priority_params(void) | |
fe8ab488 A |
453 | { |
454 | return &tcoal_prio_params_init; | |
455 | } |