]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/etimer.c
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * @APPLE_FREE_COPYRIGHT@
36 * Purpose: Routines for handling the machine independent
40 #include <mach/mach_types.h>
42 #include <kern/timer_queue.h>
43 #include <kern/clock.h>
44 #include <kern/thread.h>
45 #include <kern/processor.h>
46 #include <kern/macro_help.h>
48 #include <kern/etimer.h>
51 #include <machine/commpage.h>
52 #include <machine/machine_routines.h>
54 #include <sys/kdebug.h>
55 #include <i386/cpu_data.h>
56 #include <i386/cpu_topology.h>
57 #include <i386/cpu_threads.h>
60 * Event timer interrupt.
62 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
63 * that occur before the entire chain completes.
65 * XXX a better implementation would use a set of generic callouts and iterate over them
68 etimer_intr(int user_mode
,
72 rtclock_timer_t
*mytimer
;
77 pp
= current_cpu_datap();
79 SCHED_STATS_TIMER_POP(current_processor());
81 abstime
= mach_absolute_time(); /* Get the time now */
83 /* has a pending clock timer expired? */
84 mytimer
= &pp
->rtclock_timer
; /* Point to the event timer */
85 if (mytimer
->deadline
<= abstime
) {
87 * Log interrupt service latency (-ve value expected by tool)
88 * a non-PM event is expected next.
89 * The requested deadline may be earlier than when it was set
90 * - use MAX to avoid reporting bogus latencies.
92 latency
= (int32_t) (abstime
- MAX(mytimer
->deadline
,
94 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
95 DECR_TRAP_LATENCY
| DBG_FUNC_NONE
,
97 ((user_mode
!= 0) ? rip
: VM_KERNEL_UNSLIDE(rip
)),
100 mytimer
->has_expired
= TRUE
; /* Remember that we popped */
101 mytimer
->deadline
= timer_queue_expire(&mytimer
->queue
, abstime
);
102 mytimer
->has_expired
= FALSE
;
104 /* Get the time again since we ran a bit */
105 abstime
= mach_absolute_time();
106 mytimer
->when_set
= abstime
;
109 /* is it time for power management state change? */
110 if ((pmdeadline
= pmCPUGetDeadline(pp
)) && (pmdeadline
<= abstime
)) {
111 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
112 DECR_PM_DEADLINE
| DBG_FUNC_START
,
115 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
116 DECR_PM_DEADLINE
| DBG_FUNC_END
,
120 /* schedule our next deadline */
121 etimer_resync_deadlines();
125 * Set the clock deadline.
127 void etimer_set_deadline(uint64_t deadline
)
129 rtclock_timer_t
*mytimer
;
133 s
= splclock(); /* no interruptions */
134 pp
= current_cpu_datap();
136 mytimer
= &pp
->rtclock_timer
; /* Point to the timer itself */
137 mytimer
->deadline
= deadline
; /* Set new expiration time */
138 mytimer
->when_set
= mach_absolute_time();
140 etimer_resync_deadlines();
146 * Re-evaluate the outstanding deadlines and select the most proximate.
148 * Should be called at splclock.
151 etimer_resync_deadlines(void)
155 rtclock_timer_t
*mytimer
;
156 spl_t s
= splclock();
160 pp
= current_cpu_datap();
161 deadline
= EndOfAllTime
;
164 * If we have a clock timer set, pick that.
166 mytimer
= &pp
->rtclock_timer
;
167 if (!mytimer
->has_expired
&&
168 0 < mytimer
->deadline
&& mytimer
->deadline
< EndOfAllTime
)
169 deadline
= mytimer
->deadline
;
172 * If we have a power management deadline, see if that's earlier.
174 pmdeadline
= pmCPUGetDeadline(pp
);
175 if (0 < pmdeadline
&& pmdeadline
< deadline
)
176 deadline
= pmdeadline
;
179 * Go and set the "pop" event.
181 decr
= (uint32_t) setPop(deadline
);
183 /* Record non-PM deadline for latency tool */
184 if (deadline
!= pmdeadline
) {
185 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
186 DECR_SET_DEADLINE
| DBG_FUNC_NONE
,
188 deadline
, (uint32_t)(deadline
>> 32), 0);
193 void etimer_timer_expire(void *arg
);
199 rtclock_timer_t
*mytimer
;
203 pp
= current_cpu_datap();
205 mytimer
= &pp
->rtclock_timer
;
206 abstime
= mach_absolute_time();
208 mytimer
->has_expired
= TRUE
;
209 mytimer
->deadline
= timer_queue_expire(&mytimer
->queue
, abstime
);
210 mytimer
->has_expired
= FALSE
;
211 mytimer
->when_set
= mach_absolute_time();
213 etimer_resync_deadlines();
220 uint64_t now
= mach_absolute_time();
221 if (deadline
> now
) {
222 return MIN((deadline
- now
) >> 3, NSEC_PER_MSEC
); /* Min of 12.5% and 1ms */
232 cpu_data_t
*cdp
= current_cpu_datap();
233 mpqueue_head_t
*queue
;
235 if (cdp
->cpu_running
) {
236 queue
= &cdp
->rtclock_timer
.queue
;
238 if (deadline
< cdp
->rtclock_timer
.deadline
)
239 etimer_set_deadline(deadline
);
242 queue
= &cpu_datap(master_cpu
)->rtclock_timer
.queue
;
249 mpqueue_head_t
*queue
,
251 uint64_t new_deadline
)
253 if (queue
== ¤t_cpu_datap()->rtclock_timer
.queue
) {
254 if (deadline
< new_deadline
)
255 etimer_set_deadline(new_deadline
);
260 * etimer_queue_migrate() is called from the Power-Management kext
261 * when a logical processor goes idle (in a deep C-state) with a distant
262 * deadline so that it's timer queue can be moved to another processor.
263 * This target processor should be the least idle (most busy) --
264 * currently this is the primary processor for the calling thread's package.
265 * Locking restrictions demand that the target cpu must be the boot cpu.
268 etimer_queue_migrate(int target_cpu
)
270 cpu_data_t
*target_cdp
= cpu_datap(target_cpu
);
271 cpu_data_t
*cdp
= current_cpu_datap();
274 assert(!ml_get_interrupts_enabled());
275 assert(target_cpu
!= cdp
->cpu_number
);
276 assert(target_cpu
== master_cpu
);
278 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
279 DECR_TIMER_MIGRATE
| DBG_FUNC_START
,
281 cdp
->rtclock_timer
.deadline
, (cdp
->rtclock_timer
.deadline
>>32),
285 * Move timer requests from the local queue to the target processor's.
286 * The return value is the number of requests moved. If this is 0,
287 * it indicates that the first (i.e. earliest) timer is earlier than
288 * the earliest for the target processor. Since this would force a
289 * resync, the move of this and all later requests is aborted.
291 ntimers_moved
= timer_queue_migrate(&cdp
->rtclock_timer
.queue
,
292 &target_cdp
->rtclock_timer
.queue
);
295 * Assuming we moved stuff, clear local deadline.
297 if (ntimers_moved
> 0) {
298 cdp
->rtclock_timer
.deadline
= EndOfAllTime
;
299 setPop(EndOfAllTime
);
302 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
303 DECR_TIMER_MIGRATE
| DBG_FUNC_END
,
304 target_cpu
, ntimers_moved
, 0, 0, 0);
306 return ntimers_moved
;