]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_timer.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_timer.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34 /*
35 * File: timer.c
36 * Purpose: Routines for handling the machine independent timer.
37 */
38
39 #include <mach/mach_types.h>
40
41 #include <kern/timer_queue.h>
42 #include <kern/timer_call.h>
43 #include <kern/clock.h>
44 #include <kern/thread.h>
45 #include <kern/processor.h>
46 #include <kern/macro_help.h>
47 #include <kern/spl.h>
48 #include <kern/timer_queue.h>
49 #include <kern/pms.h>
50
51 #include <machine/commpage.h>
52 #include <machine/machine_routines.h>
53
54 #include <sys/kdebug.h>
55 #include <i386/cpu_data.h>
56 #include <i386/cpu_topology.h>
57 #include <i386/cpu_threads.h>
58
59 uint32_t spurious_timers;
60
61 /*
62 * Event timer interrupt.
63 *
64 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
65 * that occur before the entire chain completes.
66 *
67 * XXX a better implementation would use a set of generic callouts and iterate over them
68 */
69 void
70 timer_intr(int user_mode,
71 uint64_t rip)
72 {
73 uint64_t abstime;
74 rtclock_timer_t *mytimer;
75 cpu_data_t *pp;
76 int64_t latency;
77 uint64_t pmdeadline;
78 boolean_t timer_processed = FALSE;
79
80 pp = current_cpu_datap();
81
82 SCHED_STATS_TIMER_POP(current_processor());
83
84 abstime = mach_absolute_time(); /* Get the time now */
85
86 /* has a pending clock timer expired? */
87 mytimer = &pp->rtclock_timer; /* Point to the event timer */
88
89 if ((timer_processed = ((mytimer->deadline <= abstime) ||
90 (abstime >= (mytimer->queue.earliest_soft_deadline))))) {
91 /*
92 * Log interrupt service latency (-ve value expected by tool)
93 * a non-PM event is expected next.
94 * The requested deadline may be earlier than when it was set
95 * - use MAX to avoid reporting bogus latencies.
96 */
97 latency = (int64_t) (abstime - MAX(mytimer->deadline,
98 mytimer->when_set));
99 /* Log zero timer latencies when opportunistically processing
100 * coalesced timers.
101 */
102 if (latency < 0) {
103 TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0);
104 latency = 0;
105 }
106
107 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
108 DECR_TRAP_LATENCY | DBG_FUNC_NONE,
109 -latency,
110 ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
111 user_mode, 0, 0);
112
113 mytimer->has_expired = TRUE; /* Remember that we popped */
114 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
115 mytimer->has_expired = FALSE;
116
117 /* Get the time again since we ran a bit */
118 abstime = mach_absolute_time();
119 mytimer->when_set = abstime;
120 }
121
122 /* is it time for power management state change? */
123 if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
124 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
125 DECR_PM_DEADLINE | DBG_FUNC_START,
126 0, 0, 0, 0, 0);
127 pmCPUDeadline(pp);
128 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
129 DECR_PM_DEADLINE | DBG_FUNC_END,
130 0, 0, 0, 0, 0);
131 timer_processed = TRUE;
132 }
133
134 /* schedule our next deadline */
135 x86_lcpu()->rtcDeadline = EndOfAllTime;
136 timer_resync_deadlines();
137
138 if (__improbable(timer_processed == FALSE))
139 spurious_timers++;
140 }
141
142 /*
143 * Set the clock deadline.
144 */
145 void timer_set_deadline(uint64_t deadline)
146 {
147 rtclock_timer_t *mytimer;
148 spl_t s;
149 cpu_data_t *pp;
150
151 s = splclock(); /* no interruptions */
152 pp = current_cpu_datap();
153
154 mytimer = &pp->rtclock_timer; /* Point to the timer itself */
155 mytimer->deadline = deadline; /* Set new expiration time */
156 mytimer->when_set = mach_absolute_time();
157
158 timer_resync_deadlines();
159
160 splx(s);
161 }
162
163 /*
164 * Re-evaluate the outstanding deadlines and select the most proximate.
165 *
166 * Should be called at splclock.
167 */
168 void
169 timer_resync_deadlines(void)
170 {
171 uint64_t deadline = EndOfAllTime;
172 uint64_t pmdeadline;
173 rtclock_timer_t *mytimer;
174 spl_t s = splclock();
175 cpu_data_t *pp;
176 uint32_t decr;
177
178 pp = current_cpu_datap();
179 if (!pp->cpu_running)
180 /* There's really nothing to do if this processor is down */
181 return;
182
183 /*
184 * If we have a clock timer set, pick that.
185 */
186 mytimer = &pp->rtclock_timer;
187 if (!mytimer->has_expired &&
188 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
189 deadline = mytimer->deadline;
190
191 /*
192 * If we have a power management deadline, see if that's earlier.
193 */
194 pmdeadline = pmCPUGetDeadline(pp);
195 if (0 < pmdeadline && pmdeadline < deadline)
196 deadline = pmdeadline;
197
198 /*
199 * Go and set the "pop" event.
200 */
201 decr = (uint32_t) setPop(deadline);
202
203 /* Record non-PM deadline for latency tool */
204 if (decr != 0 && deadline != pmdeadline) {
205 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
206 DECR_SET_DEADLINE | DBG_FUNC_NONE,
207 decr, 2,
208 deadline,
209 mytimer->queue.count, 0);
210 }
211 splx(s);
212 }
213
214 void
215 timer_queue_expire_local(
216 __unused void *arg)
217 {
218 rtclock_timer_t *mytimer;
219 uint64_t abstime;
220 cpu_data_t *pp;
221
222 pp = current_cpu_datap();
223
224 mytimer = &pp->rtclock_timer;
225 abstime = mach_absolute_time();
226
227 mytimer->has_expired = TRUE;
228 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
229 mytimer->has_expired = FALSE;
230 mytimer->when_set = mach_absolute_time();
231
232 timer_resync_deadlines();
233 }
234
235 void
236 timer_queue_expire_rescan(
237 __unused void *arg)
238 {
239 rtclock_timer_t *mytimer;
240 uint64_t abstime;
241 cpu_data_t *pp;
242
243 assert(ml_get_interrupts_enabled() == FALSE);
244 pp = current_cpu_datap();
245
246 mytimer = &pp->rtclock_timer;
247 abstime = mach_absolute_time();
248
249 mytimer->has_expired = TRUE;
250 mytimer->deadline = timer_queue_expire_with_options(&mytimer->queue, abstime, TRUE);
251 mytimer->has_expired = FALSE;
252 mytimer->when_set = mach_absolute_time();
253
254 timer_resync_deadlines();
255 }
256
257 #define TIMER_RESORT_THRESHOLD_ABSTIME (50 * NSEC_PER_MSEC)
258
259 #if TCOAL_PRIO_STATS
260 int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl;
261 #define TCOAL_PRIO_STAT(x) (x++)
262 #else
263 #define TCOAL_PRIO_STAT(x)
264 #endif
265
266 boolean_t
267 timer_resort_threshold(uint64_t skew) {
268 if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME)
269 return TRUE;
270 else
271 return FALSE;
272 }
273
274 /*
275 * Return the local timer queue for a running processor
276 * else return the boot processor's timer queue.
277 */
278 mpqueue_head_t *
279 timer_queue_assign(
280 uint64_t deadline)
281 {
282 cpu_data_t *cdp = current_cpu_datap();
283 mpqueue_head_t *queue;
284
285 if (cdp->cpu_running) {
286 queue = &cdp->rtclock_timer.queue;
287
288 if (deadline < cdp->rtclock_timer.deadline)
289 timer_set_deadline(deadline);
290 }
291 else
292 queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
293
294 return (queue);
295 }
296
297 void
298 timer_queue_cancel(
299 mpqueue_head_t *queue,
300 uint64_t deadline,
301 uint64_t new_deadline)
302 {
303 if (queue == &current_cpu_datap()->rtclock_timer.queue) {
304 if (deadline < new_deadline)
305 timer_set_deadline(new_deadline);
306 }
307 }
308
309 /*
310 * timer_queue_migrate_cpu() is called from the Power-Management kext
311 * when a logical processor goes idle (in a deep C-state) with a distant
312 * deadline so that it's timer queue can be moved to another processor.
313 * This target processor should be the least idle (most busy) --
314 * currently this is the primary processor for the calling thread's package.
315 * Locking restrictions demand that the target cpu must be the boot cpu.
316 */
317 uint32_t
318 timer_queue_migrate_cpu(int target_cpu)
319 {
320 cpu_data_t *target_cdp = cpu_datap(target_cpu);
321 cpu_data_t *cdp = current_cpu_datap();
322 int ntimers_moved;
323
324 assert(!ml_get_interrupts_enabled());
325 assert(target_cpu != cdp->cpu_number);
326 assert(target_cpu == master_cpu);
327
328 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
329 DECR_TIMER_MIGRATE | DBG_FUNC_START,
330 target_cpu,
331 cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32),
332 0, 0);
333
334 /*
335 * Move timer requests from the local queue to the target processor's.
336 * The return value is the number of requests moved. If this is 0,
337 * it indicates that the first (i.e. earliest) timer is earlier than
338 * the earliest for the target processor. Since this would force a
339 * resync, the move of this and all later requests is aborted.
340 */
341 ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
342 &target_cdp->rtclock_timer.queue);
343
344 /*
345 * Assuming we moved stuff, clear local deadline.
346 */
347 if (ntimers_moved > 0) {
348 cdp->rtclock_timer.deadline = EndOfAllTime;
349 setPop(EndOfAllTime);
350 }
351
352 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
353 DECR_TIMER_MIGRATE | DBG_FUNC_END,
354 target_cpu, ntimers_moved, 0, 0, 0);
355
356 return ntimers_moved;
357 }
358
359 mpqueue_head_t *
360 timer_queue_cpu(int cpu)
361 {
362 return &cpu_datap(cpu)->rtclock_timer.queue;
363 }
364
365 void
366 timer_call_cpu(int cpu, void (*fn)(void *), void *arg)
367 {
368 mp_cpus_call(cpu_to_cpumask(cpu), SYNC, fn, arg);
369 }
370
371 void
372 timer_call_nosync_cpu(int cpu, void (*fn)(void *), void *arg)
373 {
374 /* XXX Needs error checking and retry */
375 mp_cpus_call(cpu_to_cpumask(cpu), NOSYNC, fn, arg);
376 }
377
378
379 static timer_coalescing_priority_params_ns_t tcoal_prio_params_init =
380 {
381 .idle_entry_timer_processing_hdeadline_threshold_ns = 5000ULL * NSEC_PER_USEC,
382 .interrupt_timer_coalescing_ilat_threshold_ns = 30ULL * NSEC_PER_USEC,
383 .timer_resort_threshold_ns = 50 * NSEC_PER_MSEC,
384 .timer_coalesce_rt_shift = 0,
385 .timer_coalesce_bg_shift = -5,
386 .timer_coalesce_kt_shift = 3,
387 .timer_coalesce_fp_shift = 3,
388 .timer_coalesce_ts_shift = 3,
389 .timer_coalesce_rt_ns_max = 0ULL,
390 .timer_coalesce_bg_ns_max = 100 * NSEC_PER_MSEC,
391 .timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC,
392 .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC,
393 .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC,
394 .latency_qos_scale = {3, 2, 1, -2, -15, -15},
395 .latency_qos_ns_max ={1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
396 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC},
397 .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE},
398 };
399
400 timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void)
401 {
402 return &tcoal_prio_params_init;
403 }