2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * @APPLE_FREE_COPYRIGHT@
36 * Purpose: Routines for handling the machine independent
40 #include <mach/mach_types.h>
42 #include <kern/clock.h>
43 #include <kern/thread.h>
44 #include <kern/processor.h>
45 #include <kern/macro_help.h>
47 #include <kern/timer_queue.h>
48 #include <kern/timer_call.h>
50 #include <machine/commpage.h>
51 #include <machine/machine_routines.h>
53 #include <sys/kdebug.h>
54 #include <arm/cpu_data.h>
55 #include <arm/cpu_data_internal.h>
56 #include <arm/cpu_internal.h>
59 * Event timer interrupt.
61 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
62 * that occur before the entire chain completes.
64 * XXX a better implementation would use a set of generic callouts and iterate over them
67 timer_intr(__unused
int inuser
, __unused
uint64_t iaddr
)
69 uint64_t abstime
, new_idle_timeout_ticks
;
70 rtclock_timer_t
*mytimer
;
71 cpu_data_t
*cpu_data_ptr
;
73 cpu_data_ptr
= getCpuDatap();
74 mytimer
= &cpu_data_ptr
->rtclock_timer
; /* Point to the event timer */
75 abstime
= mach_absolute_time(); /* Get the time now */
77 /* is it time for an idle timer event? */
78 if ((cpu_data_ptr
->idle_timer_deadline
> 0) && (cpu_data_ptr
->idle_timer_deadline
<= abstime
)) {
79 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
80 new_idle_timeout_ticks
= 0x0ULL
;
82 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON
, MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 3) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
83 ((idle_timer_t
)cpu_data_ptr
->idle_timer_notify
)(cpu_data_ptr
->idle_timer_refcon
, &new_idle_timeout_ticks
);
84 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON
, MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 3) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
86 /* if a new idle timeout was requested set the new idle timer deadline */
87 if (new_idle_timeout_ticks
!= 0x0ULL
) {
88 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
91 abstime
= mach_absolute_time(); /* Get the time again since we ran a bit */
94 /* has a pending clock timer expired? */
95 if (mytimer
->deadline
<= abstime
) { /* Have we expired the
97 mytimer
->has_expired
= TRUE
; /* Remember that we popped */
98 mytimer
->deadline
= EndOfAllTime
; /* Set timer request to
102 mytimer
->deadline
= timer_queue_expire(&mytimer
->queue
, abstime
);
103 mytimer
->has_expired
= FALSE
;
104 abstime
= mach_absolute_time(); /* Get the time again since we ran a bit */
107 uint64_t quantum_deadline
= cpu_data_ptr
->quantum_timer_deadline
;
108 /* is it the quantum timer expiration? */
109 if ((quantum_deadline
<= abstime
) && (quantum_deadline
> 0)) {
110 cpu_data_ptr
->quantum_timer_deadline
= 0;
111 quantum_timer_expire(abstime
);
114 /* Force reload our next deadline */
115 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
116 /* schedule our next deadline */
117 timer_resync_deadlines();
121 * Set the clock deadline
124 timer_set_deadline(uint64_t deadline
)
126 rtclock_timer_t
*mytimer
;
128 cpu_data_t
*cpu_data_ptr
;
130 s
= splclock(); /* no interruptions */
131 cpu_data_ptr
= getCpuDatap();
133 mytimer
= &cpu_data_ptr
->rtclock_timer
; /* Point to the timer itself */
134 mytimer
->deadline
= deadline
; /* Set the new expiration time */
136 timer_resync_deadlines();
142 quantum_timer_set_deadline(uint64_t deadline
)
144 cpu_data_t
*cpu_data_ptr
;
146 /* We should've only come into this path with interrupts disabled */
147 assert(ml_get_interrupts_enabled() == FALSE
);
149 cpu_data_ptr
= getCpuDatap();
150 cpu_data_ptr
->quantum_timer_deadline
= deadline
;
151 timer_resync_deadlines();
155 * Re-evaluate the outstanding deadlines and select the most proximate.
157 * Should be called at splclock.
160 timer_resync_deadlines(void)
163 rtclock_timer_t
*mytimer
;
164 spl_t s
= splclock(); /* No interruptions please */
165 cpu_data_t
*cpu_data_ptr
;
167 cpu_data_ptr
= getCpuDatap();
171 /* if we have a clock timer set sooner, pop on that */
172 mytimer
= &cpu_data_ptr
->rtclock_timer
; /* Point to the timer itself */
173 if ((!mytimer
->has_expired
) && (mytimer
->deadline
> 0))
174 deadline
= mytimer
->deadline
;
176 /* if we have a idle timer event coming up, how about that? */
177 if ((cpu_data_ptr
->idle_timer_deadline
> 0)
178 && (cpu_data_ptr
->idle_timer_deadline
< deadline
))
179 deadline
= cpu_data_ptr
->idle_timer_deadline
;
181 /* If we have the quantum timer setup, check that */
182 if ((cpu_data_ptr
->quantum_timer_deadline
> 0)
183 && (cpu_data_ptr
->quantum_timer_deadline
< deadline
))
184 deadline
= cpu_data_ptr
->quantum_timer_deadline
;
186 if ((deadline
== EndOfAllTime
)
187 || ((deadline
> 0) && (cpu_data_ptr
->rtcPop
!= deadline
))) {
190 decr
= setPop(deadline
);
192 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
193 MACHDBG_CODE(DBG_MACH_EXCP_DECI
, 1) | DBG_FUNC_NONE
,
201 timer_resort_threshold(__unused
uint64_t skew
) {
209 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
210 mpqueue_head_t
*queue
;
212 if (cpu_data_ptr
->cpu_running
) {
213 queue
= &cpu_data_ptr
->rtclock_timer
.queue
;
215 if (deadline
< cpu_data_ptr
->rtclock_timer
.deadline
)
216 timer_set_deadline(deadline
);
219 queue
= &cpu_datap(master_cpu
)->rtclock_timer
.queue
;
226 mpqueue_head_t
*queue
,
228 uint64_t new_deadline
)
230 if (queue
== &getCpuDatap()->rtclock_timer
.queue
) {
231 if (deadline
< new_deadline
)
232 timer_set_deadline(new_deadline
);
237 timer_queue_cpu(int cpu
)
239 return &cpu_datap(cpu
)->rtclock_timer
.queue
;
243 timer_call_cpu(int cpu
, void (*fn
)(void *), void *arg
)
245 cpu_signal(cpu_datap(cpu
), SIGPxcall
, (void *) fn
, arg
);
249 timer_call_nosync_cpu(int cpu
, void (*fn
)(void *), void *arg
)
251 /* XXX Needs error checking and retry */
252 cpu_signal(cpu_datap(cpu
), SIGPxcall
, (void *) fn
, arg
);
256 static timer_coalescing_priority_params_ns_t tcoal_prio_params_init
=
258 .idle_entry_timer_processing_hdeadline_threshold_ns
= 5000ULL * NSEC_PER_USEC
,
259 .interrupt_timer_coalescing_ilat_threshold_ns
= 30ULL * NSEC_PER_USEC
,
260 .timer_resort_threshold_ns
= 50 * NSEC_PER_MSEC
,
261 .timer_coalesce_rt_shift
= 0,
262 .timer_coalesce_bg_shift
= -5,
263 .timer_coalesce_kt_shift
= 3,
264 .timer_coalesce_fp_shift
= 3,
265 .timer_coalesce_ts_shift
= 3,
266 .timer_coalesce_rt_ns_max
= 0ULL,
267 .timer_coalesce_bg_ns_max
= 100 * NSEC_PER_MSEC
,
268 .timer_coalesce_kt_ns_max
= 1 * NSEC_PER_MSEC
,
269 .timer_coalesce_fp_ns_max
= 1 * NSEC_PER_MSEC
,
270 .timer_coalesce_ts_ns_max
= 1 * NSEC_PER_MSEC
,
271 .latency_qos_scale
= {3, 2, 1, -2, -15, -15},
272 .latency_qos_ns_max
={1 * NSEC_PER_MSEC
, 5 * NSEC_PER_MSEC
, 20 * NSEC_PER_MSEC
,
273 75 * NSEC_PER_MSEC
, 10000 * NSEC_PER_MSEC
, 10000 * NSEC_PER_MSEC
},
274 .latency_tier_rate_limited
= {FALSE
, FALSE
, FALSE
, FALSE
, TRUE
, TRUE
},
276 timer_coalescing_priority_params_ns_t
* timer_call_get_priority_params(void)
278 return &tcoal_prio_params_init
;