]>
Commit | Line | Data |
---|---|---|
2d21ac55 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
2d21ac55 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * @APPLE_FREE_COPYRIGHT@ | |
33 | */ | |
34 | /* | |
35 | * File: etimer.c | |
36 | * Purpose: Routines for handling the machine independent | |
37 | * event timer. | |
38 | */ | |
39 | ||
40 | #include <mach/mach_types.h> | |
41 | ||
c910b4d9 | 42 | #include <kern/timer_queue.h> |
2d21ac55 A |
43 | #include <kern/clock.h> |
44 | #include <kern/thread.h> | |
45 | #include <kern/processor.h> | |
46 | #include <kern/macro_help.h> | |
47 | #include <kern/spl.h> | |
48 | #include <kern/etimer.h> | |
49 | #include <kern/pms.h> | |
50 | ||
51 | #include <machine/commpage.h> | |
52 | #include <machine/machine_routines.h> | |
53 | ||
54 | #include <sys/kdebug.h> | |
55 | #include <i386/cpu_data.h> | |
56 | #include <i386/cpu_topology.h> | |
57 | #include <i386/cpu_threads.h> | |
58 | ||
2d21ac55 A |
59 | /* |
60 | * Event timer interrupt. | |
61 | * | |
62 | * XXX a drawback of this implementation is that events serviced earlier must not set deadlines | |
63 | * that occur before the entire chain completes. | |
64 | * | |
65 | * XXX a better implementation would use a set of generic callouts and iterate over them | |
66 | */ | |
67 | void | |
060df5ea A |
68 | etimer_intr(int user_mode, |
69 | uint64_t rip) | |
2d21ac55 A |
70 | { |
71 | uint64_t abstime; | |
72 | rtclock_timer_t *mytimer; | |
73 | cpu_data_t *pp; | |
060df5ea A |
74 | int32_t latency; |
75 | uint64_t pmdeadline; | |
2d21ac55 A |
76 | |
77 | pp = current_cpu_datap(); | |
2d21ac55 | 78 | |
6d2010ae A |
79 | SCHED_STATS_TIMER_POP(current_processor()); |
80 | ||
81 | abstime = mach_absolute_time(); /* Get the time now */ | |
2d21ac55 A |
82 | |
83 | /* has a pending clock timer expired? */ | |
6d2010ae | 84 | mytimer = &pp->rtclock_timer; /* Point to the event timer */ |
060df5ea | 85 | if (mytimer->deadline <= abstime) { |
6d2010ae | 86 | /* |
060df5ea A |
87 | * Log interrupt service latency (-ve value expected by tool) |
88 | * a non-PM event is expected next. | |
6d2010ae A |
89 | * The requested deadline may be earlier than when it was set |
90 | * - use MAX to avoid reporting bogus latencies. | |
060df5ea | 91 | */ |
6d2010ae A |
92 | latency = (int32_t) (abstime - MAX(mytimer->deadline, |
93 | mytimer->when_set)); | |
060df5ea | 94 | KERNEL_DEBUG_CONSTANT( |
6d2010ae A |
95 | DECR_TRAP_LATENCY | DBG_FUNC_NONE, |
96 | -latency, rip, user_mode, 0, 0); | |
060df5ea | 97 | |
6d2010ae | 98 | mytimer->has_expired = TRUE; /* Remember that we popped */ |
c910b4d9 | 99 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); |
2d21ac55 | 100 | mytimer->has_expired = FALSE; |
060df5ea | 101 | |
6d2010ae | 102 | /* Get the time again since we ran a bit */ |
060df5ea | 103 | abstime = mach_absolute_time(); |
6d2010ae | 104 | mytimer->when_set = abstime; |
060df5ea A |
105 | } |
106 | ||
107 | /* is it time for power management state change? */ | |
108 | if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { | |
109 | KERNEL_DEBUG_CONSTANT( | |
6d2010ae A |
110 | DECR_PM_DEADLINE | DBG_FUNC_START, |
111 | 0, 0, 0, 0, 0); | |
060df5ea A |
112 | pmCPUDeadline(pp); |
113 | KERNEL_DEBUG_CONSTANT( | |
6d2010ae A |
114 | DECR_PM_DEADLINE | DBG_FUNC_END, |
115 | 0, 0, 0, 0, 0); | |
2d21ac55 A |
116 | } |
117 | ||
6d2010ae | 118 | /* schedule our next deadline */ |
2d21ac55 A |
119 | etimer_resync_deadlines(); |
120 | } | |
121 | ||
122 | /* | |
c910b4d9 | 123 | * Set the clock deadline. |
2d21ac55 A |
124 | */ |
125 | void etimer_set_deadline(uint64_t deadline) | |
126 | { | |
127 | rtclock_timer_t *mytimer; | |
128 | spl_t s; | |
129 | cpu_data_t *pp; | |
130 | ||
060df5ea | 131 | s = splclock(); /* no interruptions */ |
2d21ac55 A |
132 | pp = current_cpu_datap(); |
133 | ||
060df5ea | 134 | mytimer = &pp->rtclock_timer; /* Point to the timer itself */ |
6d2010ae A |
135 | mytimer->deadline = deadline; /* Set new expiration time */ |
136 | mytimer->when_set = mach_absolute_time(); | |
2d21ac55 A |
137 | |
138 | etimer_resync_deadlines(); | |
139 | ||
140 | splx(s); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Re-evaluate the outstanding deadlines and select the most proximate. | |
145 | * | |
146 | * Should be called at splclock. | |
147 | */ | |
148 | void | |
149 | etimer_resync_deadlines(void) | |
150 | { | |
151 | uint64_t deadline; | |
152 | uint64_t pmdeadline; | |
153 | rtclock_timer_t *mytimer; | |
154 | spl_t s = splclock(); | |
155 | cpu_data_t *pp; | |
060df5ea | 156 | uint32_t decr; |
2d21ac55 A |
157 | |
158 | pp = current_cpu_datap(); | |
060df5ea | 159 | deadline = EndOfAllTime; |
2d21ac55 A |
160 | |
161 | /* | |
060df5ea | 162 | * If we have a clock timer set, pick that. |
2d21ac55 A |
163 | */ |
164 | mytimer = &pp->rtclock_timer; | |
060df5ea A |
165 | if (!mytimer->has_expired && |
166 | 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) | |
2d21ac55 A |
167 | deadline = mytimer->deadline; |
168 | ||
169 | /* | |
170 | * If we have a power management deadline, see if that's earlier. | |
171 | */ | |
172 | pmdeadline = pmCPUGetDeadline(pp); | |
060df5ea | 173 | if (0 < pmdeadline && pmdeadline < deadline) |
6d2010ae | 174 | deadline = pmdeadline; |
2d21ac55 A |
175 | |
176 | /* | |
177 | * Go and set the "pop" event. | |
178 | */ | |
060df5ea A |
179 | decr = (uint32_t) setPop(deadline); |
180 | ||
181 | /* Record non-PM deadline for latency tool */ | |
182 | if (deadline != pmdeadline) { | |
6d2010ae A |
183 | KERNEL_DEBUG_CONSTANT( |
184 | DECR_SET_DEADLINE | DBG_FUNC_NONE, | |
185 | decr, 2, | |
186 | deadline, (uint32_t)(deadline >> 32), 0); | |
2d21ac55 A |
187 | } |
188 | splx(s); | |
189 | } | |
c910b4d9 A |
190 | |
191 | void etimer_timer_expire(void *arg); | |
192 | ||
193 | void | |
194 | etimer_timer_expire( | |
195 | __unused void *arg) | |
196 | { | |
197 | rtclock_timer_t *mytimer; | |
198 | uint64_t abstime; | |
199 | cpu_data_t *pp; | |
c910b4d9 A |
200 | |
201 | pp = current_cpu_datap(); | |
c910b4d9 A |
202 | |
203 | mytimer = &pp->rtclock_timer; | |
204 | abstime = mach_absolute_time(); | |
205 | ||
206 | mytimer->has_expired = TRUE; | |
207 | mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); | |
208 | mytimer->has_expired = FALSE; | |
6d2010ae | 209 | mytimer->when_set = mach_absolute_time(); |
c910b4d9 | 210 | |
c910b4d9 A |
211 | etimer_resync_deadlines(); |
212 | } | |
213 | ||
6d2010ae A |
214 | uint64_t |
215 | timer_call_slop( | |
216 | uint64_t deadline) | |
217 | { | |
218 | uint64_t now = mach_absolute_time(); | |
219 | if (deadline > now) { | |
220 | return MIN((deadline - now) >> 3, NSEC_PER_MSEC); /* Min of 12.5% and 1ms */ | |
221 | } | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | mpqueue_head_t * | |
c910b4d9 A |
227 | timer_queue_assign( |
228 | uint64_t deadline) | |
229 | { | |
6d2010ae A |
230 | cpu_data_t *cdp = current_cpu_datap(); |
231 | mpqueue_head_t *queue; | |
c910b4d9 A |
232 | |
233 | if (cdp->cpu_running) { | |
6d2010ae | 234 | queue = &cdp->rtclock_timer.queue; |
c910b4d9 | 235 | |
6d2010ae | 236 | if (deadline < cdp->rtclock_timer.deadline) |
c910b4d9 A |
237 | etimer_set_deadline(deadline); |
238 | } | |
239 | else | |
6d2010ae | 240 | queue = &cpu_datap(master_cpu)->rtclock_timer.queue; |
c910b4d9 | 241 | |
6d2010ae | 242 | return queue; |
c910b4d9 A |
243 | } |
244 | ||
245 | void | |
246 | timer_queue_cancel( | |
6d2010ae | 247 | mpqueue_head_t *queue, |
c910b4d9 A |
248 | uint64_t deadline, |
249 | uint64_t new_deadline) | |
250 | { | |
251 | if (queue == ¤t_cpu_datap()->rtclock_timer.queue) { | |
252 | if (deadline < new_deadline) | |
253 | etimer_set_deadline(new_deadline); | |
254 | } | |
255 | } | |
6d2010ae A |
256 | |
257 | /* | |
258 | * etimer_queue_migrate() is called from the Power-Management kext | |
259 | * when a logical processor goes idle (in a deep C-state) with a distant | |
260 | * deadline so that it's timer queue can be moved to another processor. | |
261 | * This target processor should be the least idle (most busy) -- | |
262 | * currently this is the primary processor for the calling thread's package. | |
263 | * Locking restrictions demand that the target cpu must be the boot cpu. | |
264 | */ | |
265 | uint32_t | |
266 | etimer_queue_migrate(int target_cpu) | |
267 | { | |
268 | cpu_data_t *target_cdp = cpu_datap(target_cpu); | |
269 | cpu_data_t *cdp = current_cpu_datap(); | |
270 | int ntimers_moved; | |
271 | ||
272 | assert(!ml_get_interrupts_enabled()); | |
273 | assert(target_cpu != cdp->cpu_number); | |
274 | assert(target_cpu == master_cpu); | |
275 | ||
276 | KERNEL_DEBUG_CONSTANT( | |
277 | DECR_TIMER_MIGRATE | DBG_FUNC_START, | |
278 | target_cpu, | |
279 | cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32), | |
280 | 0, 0); | |
281 | ||
282 | /* | |
283 | * Move timer requests from the local queue to the target processor's. | |
284 | * The return value is the number of requests moved. If this is 0, | |
285 | * it indicates that the first (i.e. earliest) timer is earlier than | |
286 | * the earliest for the target processor. Since this would force a | |
287 | * resync, the move of this and all later requests is aborted. | |
288 | */ | |
289 | ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue, | |
290 | &target_cdp->rtclock_timer.queue); | |
291 | ||
292 | /* | |
293 | * Assuming we moved stuff, clear local deadline. | |
294 | */ | |
295 | if (ntimers_moved > 0) { | |
296 | cdp->rtclock_timer.deadline = EndOfAllTime; | |
297 | setPop(EndOfAllTime); | |
298 | } | |
299 | ||
300 | KERNEL_DEBUG_CONSTANT( | |
301 | DECR_TIMER_MIGRATE | DBG_FUNC_END, | |
302 | target_cpu, ntimers_moved, 0, 0, 0); | |
303 | ||
304 | return ntimers_moved; | |
305 | } |