]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/etimer.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / osfmk / i386 / etimer.c
CommitLineData
2d21ac55 1/*
6d2010ae 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 * File: etimer.c
36 * Purpose: Routines for handling the machine independent
37 * event timer.
38 */
39
40#include <mach/mach_types.h>
41
c910b4d9 42#include <kern/timer_queue.h>
2d21ac55
A
43#include <kern/clock.h>
44#include <kern/thread.h>
45#include <kern/processor.h>
46#include <kern/macro_help.h>
47#include <kern/spl.h>
48#include <kern/etimer.h>
49#include <kern/pms.h>
50
51#include <machine/commpage.h>
52#include <machine/machine_routines.h>
53
54#include <sys/kdebug.h>
55#include <i386/cpu_data.h>
56#include <i386/cpu_topology.h>
57#include <i386/cpu_threads.h>
58
2d21ac55
A
59/*
60 * Event timer interrupt.
61 *
62 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
63 * that occur before the entire chain completes.
64 *
65 * XXX a better implementation would use a set of generic callouts and iterate over them
66 */
67void
060df5ea
A
68etimer_intr(int user_mode,
69 uint64_t rip)
2d21ac55
A
70{
71 uint64_t abstime;
72 rtclock_timer_t *mytimer;
73 cpu_data_t *pp;
060df5ea
A
74 int32_t latency;
75 uint64_t pmdeadline;
2d21ac55
A
76
77 pp = current_cpu_datap();
2d21ac55 78
6d2010ae
A
79 SCHED_STATS_TIMER_POP(current_processor());
80
81 abstime = mach_absolute_time(); /* Get the time now */
2d21ac55
A
82
83 /* has a pending clock timer expired? */
6d2010ae 84 mytimer = &pp->rtclock_timer; /* Point to the event timer */
060df5ea 85 if (mytimer->deadline <= abstime) {
6d2010ae 86 /*
060df5ea
A
87 * Log interrupt service latency (-ve value expected by tool)
88 * a non-PM event is expected next.
6d2010ae
A
89 * The requested deadline may be earlier than when it was set
90 * - use MAX to avoid reporting bogus latencies.
060df5ea 91 */
6d2010ae
A
92 latency = (int32_t) (abstime - MAX(mytimer->deadline,
93 mytimer->when_set));
316670eb 94 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae 95 DECR_TRAP_LATENCY | DBG_FUNC_NONE,
316670eb
A
96 -latency,
97 ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
98 user_mode, 0, 0);
060df5ea 99
6d2010ae 100 mytimer->has_expired = TRUE; /* Remember that we popped */
c910b4d9 101 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
2d21ac55 102 mytimer->has_expired = FALSE;
060df5ea 103
6d2010ae 104 /* Get the time again since we ran a bit */
060df5ea 105 abstime = mach_absolute_time();
6d2010ae 106 mytimer->when_set = abstime;
060df5ea
A
107 }
108
109 /* is it time for power management state change? */
110 if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
316670eb 111 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae
A
112 DECR_PM_DEADLINE | DBG_FUNC_START,
113 0, 0, 0, 0, 0);
060df5ea 114 pmCPUDeadline(pp);
316670eb 115 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae
A
116 DECR_PM_DEADLINE | DBG_FUNC_END,
117 0, 0, 0, 0, 0);
2d21ac55
A
118 }
119
6d2010ae 120 /* schedule our next deadline */
2d21ac55
A
121 etimer_resync_deadlines();
122}
123
124/*
c910b4d9 125 * Set the clock deadline.
2d21ac55
A
126 */
127void etimer_set_deadline(uint64_t deadline)
128{
129 rtclock_timer_t *mytimer;
130 spl_t s;
131 cpu_data_t *pp;
132
060df5ea 133 s = splclock(); /* no interruptions */
2d21ac55
A
134 pp = current_cpu_datap();
135
060df5ea 136 mytimer = &pp->rtclock_timer; /* Point to the timer itself */
6d2010ae
A
137 mytimer->deadline = deadline; /* Set new expiration time */
138 mytimer->when_set = mach_absolute_time();
2d21ac55
A
139
140 etimer_resync_deadlines();
141
142 splx(s);
143}
144
145/*
146 * Re-evaluate the outstanding deadlines and select the most proximate.
147 *
148 * Should be called at splclock.
149 */
150void
151etimer_resync_deadlines(void)
152{
153 uint64_t deadline;
154 uint64_t pmdeadline;
155 rtclock_timer_t *mytimer;
156 spl_t s = splclock();
157 cpu_data_t *pp;
060df5ea 158 uint32_t decr;
2d21ac55
A
159
160 pp = current_cpu_datap();
060df5ea 161 deadline = EndOfAllTime;
2d21ac55
A
162
163 /*
060df5ea 164 * If we have a clock timer set, pick that.
2d21ac55
A
165 */
166 mytimer = &pp->rtclock_timer;
060df5ea
A
167 if (!mytimer->has_expired &&
168 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
2d21ac55
A
169 deadline = mytimer->deadline;
170
171 /*
172 * If we have a power management deadline, see if that's earlier.
173 */
174 pmdeadline = pmCPUGetDeadline(pp);
060df5ea 175 if (0 < pmdeadline && pmdeadline < deadline)
6d2010ae 176 deadline = pmdeadline;
2d21ac55
A
177
178 /*
179 * Go and set the "pop" event.
180 */
060df5ea
A
181 decr = (uint32_t) setPop(deadline);
182
183 /* Record non-PM deadline for latency tool */
184 if (deadline != pmdeadline) {
316670eb 185 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae
A
186 DECR_SET_DEADLINE | DBG_FUNC_NONE,
187 decr, 2,
188 deadline, (uint32_t)(deadline >> 32), 0);
2d21ac55
A
189 }
190 splx(s);
191}
c910b4d9
A
192
193void etimer_timer_expire(void *arg);
194
195void
196etimer_timer_expire(
197__unused void *arg)
198{
199 rtclock_timer_t *mytimer;
200 uint64_t abstime;
201 cpu_data_t *pp;
c910b4d9
A
202
203 pp = current_cpu_datap();
c910b4d9
A
204
205 mytimer = &pp->rtclock_timer;
206 abstime = mach_absolute_time();
207
208 mytimer->has_expired = TRUE;
209 mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
210 mytimer->has_expired = FALSE;
6d2010ae 211 mytimer->when_set = mach_absolute_time();
c910b4d9 212
c910b4d9
A
213 etimer_resync_deadlines();
214}
215
6d2010ae
A
216uint64_t
217timer_call_slop(
218 uint64_t deadline)
219{
220 uint64_t now = mach_absolute_time();
221 if (deadline > now) {
222 return MIN((deadline - now) >> 3, NSEC_PER_MSEC); /* Min of 12.5% and 1ms */
223 }
224
225 return 0;
226}
227
228mpqueue_head_t *
c910b4d9
A
229timer_queue_assign(
230 uint64_t deadline)
231{
316670eb 232 cpu_data_t *cdp = current_cpu_datap();
6d2010ae 233 mpqueue_head_t *queue;
c910b4d9
A
234
235 if (cdp->cpu_running) {
6d2010ae 236 queue = &cdp->rtclock_timer.queue;
c910b4d9 237
6d2010ae 238 if (deadline < cdp->rtclock_timer.deadline)
c910b4d9
A
239 etimer_set_deadline(deadline);
240 }
241 else
6d2010ae 242 queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
c910b4d9 243
316670eb 244 return (queue);
c910b4d9
A
245}
246
247void
248timer_queue_cancel(
6d2010ae 249 mpqueue_head_t *queue,
c910b4d9
A
250 uint64_t deadline,
251 uint64_t new_deadline)
252{
253 if (queue == &current_cpu_datap()->rtclock_timer.queue) {
254 if (deadline < new_deadline)
255 etimer_set_deadline(new_deadline);
256 }
257}
6d2010ae
A
258
259/*
260 * etimer_queue_migrate() is called from the Power-Management kext
261 * when a logical processor goes idle (in a deep C-state) with a distant
262 * deadline so that it's timer queue can be moved to another processor.
263 * This target processor should be the least idle (most busy) --
264 * currently this is the primary processor for the calling thread's package.
316670eb 265 * Locking restrictions demand that the target cpu must be the boot cpu.
6d2010ae
A
266 */
267uint32_t
268etimer_queue_migrate(int target_cpu)
269{
270 cpu_data_t *target_cdp = cpu_datap(target_cpu);
271 cpu_data_t *cdp = current_cpu_datap();
272 int ntimers_moved;
273
274 assert(!ml_get_interrupts_enabled());
275 assert(target_cpu != cdp->cpu_number);
276 assert(target_cpu == master_cpu);
277
316670eb 278 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae
A
279 DECR_TIMER_MIGRATE | DBG_FUNC_START,
280 target_cpu,
281 cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32),
282 0, 0);
283
284 /*
285 * Move timer requests from the local queue to the target processor's.
286 * The return value is the number of requests moved. If this is 0,
287 * it indicates that the first (i.e. earliest) timer is earlier than
288 * the earliest for the target processor. Since this would force a
289 * resync, the move of this and all later requests is aborted.
290 */
291 ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
292 &target_cdp->rtclock_timer.queue);
293
294 /*
295 * Assuming we moved stuff, clear local deadline.
296 */
297 if (ntimers_moved > 0) {
298 cdp->rtclock_timer.deadline = EndOfAllTime;
299 setPop(EndOfAllTime);
300 }
301
316670eb 302 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6d2010ae
A
303 DECR_TIMER_MIGRATE | DBG_FUNC_END,
304 target_cpu, ntimers_moved, 0, 0, 0);
305
306 return ntimers_moved;
307}