]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/i386_timer.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_timer.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 * File: timer.c
36 * Purpose: Routines for handling the machine independent timer.
37 */
38
39#include <mach/mach_types.h>
40
41#include <kern/timer_queue.h>
42#include <kern/timer_call.h>
43#include <kern/clock.h>
44#include <kern/thread.h>
45#include <kern/processor.h>
46#include <kern/macro_help.h>
47#include <kern/spl.h>
48#include <kern/timer_queue.h>
49#include <kern/pms.h>
50
51#include <machine/commpage.h>
52#include <machine/machine_routines.h>
53
54#include <sys/kdebug.h>
55#include <i386/cpu_data.h>
56#include <i386/cpu_topology.h>
57#include <i386/cpu_threads.h>
58
59uint32_t spurious_timers;
60
61/*
62 * Event timer interrupt.
63 *
64 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
65 * that occur before the entire chain completes.
66 *
67 * XXX a better implementation would use a set of generic callouts and iterate over them
68 */
69void
70timer_intr(int user_mode, uint64_t rip)
71{
72 uint64_t orig_abstime, abstime;
73 rtclock_timer_t *mytimer;
74 cpu_data_t *pp;
75 uint64_t pmdeadline;
76 uint64_t min_deadline = EndOfAllTime;
77 uint64_t run_deadline = EndOfAllTime;
78 bool timer_processed = false;
79
80 pp = current_cpu_datap();
81
82 SCHED_STATS_INC(timer_pop_count);
83
84 orig_abstime = abstime = mach_absolute_time();
85
86 /*
87 * Has a pending clock timer expired?
88 */
89 mytimer = &pp->rtclock_timer;
90 timer_processed = (mytimer->deadline <= abstime ||
91 abstime >= mytimer->queue.earliest_soft_deadline);
92 if (timer_processed) {
93 uint64_t rtclock_deadline = MAX(mytimer->deadline, mytimer->when_set);
94 /*
95 * When opportunistically processing coalesced timers, don't factor
96 * their latency into the trace event.
97 */
98 if (abstime > rtclock_deadline) {
99 TCOAL_DEBUG(0xEEEE0000, abstime,
100 mytimer->queue.earliest_soft_deadline,
101 abstime - mytimer->queue.earliest_soft_deadline, 0, 0);
102 } else {
103 min_deadline = rtclock_deadline;
104 }
105
106 mytimer->has_expired = TRUE;
107 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
108 mytimer->has_expired = FALSE;
109
110 /*
111 * Get a more up-to-date current time after expiring the timer queue.
112 */
113 abstime = mach_absolute_time();
114 mytimer->when_set = abstime;
115 }
116
117 /*
118 * Has a per-CPU running timer expired?
119 */
120 run_deadline = running_timers_expire(pp->cpu_processor, abstime);
121 if (run_deadline != EndOfAllTime) {
122 if (run_deadline < min_deadline) {
123 min_deadline = run_deadline;
124 }
125 timer_processed = true;
126 abstime = mach_absolute_time();
127 }
128
129 /*
130 * Log the timer latency *before* the power management events.
131 */
132 if (__probable(timer_processed)) {
133 /*
134 * Log the maximum interrupt service latency experienced by a timer.
135 */
136 int64_t latency = min_deadline == EndOfAllTime ? 0 :
137 (int64_t)(abstime - min_deadline);
138 /*
139 * Log interrupt service latency (-ve value expected by tool)
140 * a non-PM event is expected next.
141 * The requested deadline may be earlier than when it was set
142 * - use MAX to avoid reporting bogus latencies.
143 */
144 KDBG_RELEASE(DECR_TRAP_LATENCY, -latency,
145 user_mode != 0 ? rip : VM_KERNEL_UNSLIDE(rip), user_mode);
146 }
147
148 /*
149 * Is it time for power management state change?
150 */
151 if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
152 KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_START);
153 pmCPUDeadline(pp);
154 KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_END);
155 timer_processed = true;
156 /*
157 * XXX Nothing below needs an updated abstime, so omit the update.
158 */
159 }
160
161 /*
162 * Schedule the next deadline.
163 */
164 x86_lcpu()->rtcDeadline = EndOfAllTime;
165 timer_resync_deadlines();
166
167 if (__improbable(!timer_processed)) {
168 spurious_timers++;
169 }
170}
171
172/*
173 * Set the clock deadline.
174 */
175void
176timer_set_deadline(uint64_t deadline)
177{
178 rtclock_timer_t *mytimer;
179 spl_t s;
180 cpu_data_t *pp;
181
182 s = splclock(); /* no interruptions */
183 pp = current_cpu_datap();
184
185 mytimer = &pp->rtclock_timer; /* Point to the timer itself */
186 mytimer->deadline = deadline; /* Set new expiration time */
187 mytimer->when_set = mach_absolute_time();
188
189 timer_resync_deadlines();
190
191 splx(s);
192}
193
194/*
195 * Re-evaluate the outstanding deadlines and select the most proximate.
196 *
197 * Should be called at splclock.
198 */
199void
200timer_resync_deadlines(void)
201{
202 uint64_t deadline = EndOfAllTime;
203 uint64_t pmdeadline;
204 rtclock_timer_t *mytimer;
205 spl_t s = splclock();
206 cpu_data_t *pp;
207 uint32_t decr;
208
209 pp = current_cpu_datap();
210 if (!pp->cpu_running) {
211 /* There's really nothing to do if this processor is down */
212 return;
213 }
214
215 /*
216 * If we have a clock timer set, pick that.
217 */
218 mytimer = &pp->rtclock_timer;
219 if (!mytimer->has_expired &&
220 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) {
221 deadline = mytimer->deadline;
222 }
223
224 /*
225 * If we have a power management deadline, see if that's earlier.
226 */
227 pmdeadline = pmCPUGetDeadline(pp);
228 if (0 < pmdeadline && pmdeadline < deadline) {
229 deadline = pmdeadline;
230 }
231
232 uint64_t run_deadline = running_timers_deadline(pp->cpu_processor);
233 if (run_deadline < deadline) {
234 deadline = run_deadline;
235 }
236
237 /*
238 * Go and set the "pop" event.
239 */
240 decr = (uint32_t) setPop(deadline);
241
242 /* Record non-PM deadline for latency tool */
243 if (decr != 0 && deadline != pmdeadline) {
244 uint64_t queue_count = 0;
245 if (deadline != run_deadline) {
246 /*
247 * For non-quantum timer put the queue count
248 * in the tracepoint.
249 */
250 queue_count = mytimer->queue.count;
251 }
252 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
253 DECR_SET_DEADLINE | DBG_FUNC_NONE,
254 decr, 2,
255 deadline,
256 queue_count, 0);
257 }
258 splx(s);
259}
260
261void
262timer_queue_expire_local(
263 __unused void *arg)
264{
265 rtclock_timer_t *mytimer;
266 uint64_t abstime;
267 cpu_data_t *pp;
268
269 pp = current_cpu_datap();
270
271 mytimer = &pp->rtclock_timer;
272 abstime = mach_absolute_time();
273
274 mytimer->has_expired = TRUE;
275 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
276 mytimer->has_expired = FALSE;
277 mytimer->when_set = mach_absolute_time();
278
279 timer_resync_deadlines();
280}
281
282void
283timer_queue_expire_rescan(
284 __unused void *arg)
285{
286 rtclock_timer_t *mytimer;
287 uint64_t abstime;
288 cpu_data_t *pp;
289
290 assert(ml_get_interrupts_enabled() == FALSE);
291 pp = current_cpu_datap();
292
293 mytimer = &pp->rtclock_timer;
294 abstime = mach_absolute_time();
295
296 mytimer->has_expired = TRUE;
297 mytimer->deadline = timer_queue_expire_with_options(&mytimer->queue, abstime, TRUE);
298 mytimer->has_expired = FALSE;
299 mytimer->when_set = mach_absolute_time();
300
301 timer_resync_deadlines();
302}
303
304#define TIMER_RESORT_THRESHOLD_ABSTIME (50 * NSEC_PER_MSEC)
305
306#if TCOAL_PRIO_STATS
307int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl;
308#define TCOAL_PRIO_STAT(x) (x++)
309#else
310#define TCOAL_PRIO_STAT(x)
311#endif
312
313boolean_t
314timer_resort_threshold(uint64_t skew)
315{
316 if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) {
317 return TRUE;
318 } else {
319 return FALSE;
320 }
321}
322
323/*
324 * Return the local timer queue for a running processor
325 * else return the boot processor's timer queue.
326 */
327mpqueue_head_t *
328timer_queue_assign(
329 uint64_t deadline)
330{
331 cpu_data_t *cdp = current_cpu_datap();
332 mpqueue_head_t *queue;
333
334 if (cdp->cpu_running) {
335 queue = &cdp->rtclock_timer.queue;
336
337 if (deadline < cdp->rtclock_timer.deadline) {
338 timer_set_deadline(deadline);
339 }
340 } else {
341 queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
342 }
343
344 return queue;
345}
346
347void
348timer_queue_cancel(
349 mpqueue_head_t *queue,
350 uint64_t deadline,
351 uint64_t new_deadline)
352{
353 if (queue == &current_cpu_datap()->rtclock_timer.queue) {
354 if (deadline < new_deadline) {
355 timer_set_deadline(new_deadline);
356 }
357 }
358}
359
360/*
361 * timer_queue_migrate_cpu() is called from the Power-Management kext
362 * when a logical processor goes idle (in a deep C-state) with a distant
363 * deadline so that it's timer queue can be moved to another processor.
364 * This target processor should be the least idle (most busy) --
365 * currently this is the primary processor for the calling thread's package.
366 * Locking restrictions demand that the target cpu must be the boot cpu.
367 */
368uint32_t
369timer_queue_migrate_cpu(int target_cpu)
370{
371 cpu_data_t *target_cdp = cpu_datap(target_cpu);
372 cpu_data_t *cdp = current_cpu_datap();
373 int ntimers_moved;
374
375 assert(!ml_get_interrupts_enabled());
376 assert(target_cpu != cdp->cpu_number);
377 assert(target_cpu == master_cpu);
378
379 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
380 DECR_TIMER_MIGRATE | DBG_FUNC_START,
381 target_cpu,
382 cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >> 32),
383 0, 0);
384
385 /*
386 * Move timer requests from the local queue to the target processor's.
387 * The return value is the number of requests moved. If this is 0,
388 * it indicates that the first (i.e. earliest) timer is earlier than
389 * the earliest for the target processor. Since this would force a
390 * resync, the move of this and all later requests is aborted.
391 */
392 ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
393 &target_cdp->rtclock_timer.queue);
394
395 /*
396 * Assuming we moved stuff, clear local deadline.
397 */
398 if (ntimers_moved > 0) {
399 cdp->rtclock_timer.deadline = EndOfAllTime;
400 setPop(EndOfAllTime);
401 }
402
403 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
404 DECR_TIMER_MIGRATE | DBG_FUNC_END,
405 target_cpu, ntimers_moved, 0, 0, 0);
406
407 return ntimers_moved;
408}
409
410mpqueue_head_t *
411timer_queue_cpu(int cpu)
412{
413 return &cpu_datap(cpu)->rtclock_timer.queue;
414}
415
416void
417timer_call_cpu(int cpu, void (*fn)(void *), void *arg)
418{
419 mp_cpus_call(cpu_to_cpumask(cpu), SYNC, fn, arg);
420}
421
422void
423timer_call_nosync_cpu(int cpu, void (*fn)(void *), void *arg)
424{
425 /* XXX Needs error checking and retry */
426 mp_cpus_call(cpu_to_cpumask(cpu), NOSYNC, fn, arg);
427}
428
429
430static timer_coalescing_priority_params_ns_t tcoal_prio_params_init =
431{
432 .idle_entry_timer_processing_hdeadline_threshold_ns = 5000ULL * NSEC_PER_USEC,
433 .interrupt_timer_coalescing_ilat_threshold_ns = 30ULL * NSEC_PER_USEC,
434 .timer_resort_threshold_ns = 50 * NSEC_PER_MSEC,
435 .timer_coalesce_rt_shift = 0,
436 .timer_coalesce_bg_shift = -5,
437 .timer_coalesce_kt_shift = 3,
438 .timer_coalesce_fp_shift = 3,
439 .timer_coalesce_ts_shift = 3,
440 .timer_coalesce_rt_ns_max = 0ULL,
441 .timer_coalesce_bg_ns_max = 100 * NSEC_PER_MSEC,
442 .timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC,
443 .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC,
444 .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC,
445 .latency_qos_scale = {3, 2, 1, -2, -15, -15},
446 .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
447 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC},
448 .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE},
449};
450
451timer_coalescing_priority_params_ns_t *
452timer_call_get_priority_params(void)
453{
454 return &tcoal_prio_params_init;
455}