]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/rtclock_asm.h
2 * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * @APPLE_FREE_COPYRIGHT@
36 * Purpose: Assembly routines for handling the machine dependent
40 #ifndef _I386_RTCLOCK_H_
41 #define _I386_RTCLOCK_H_
43 #include <i386/pal_rtclock_asm.h>
48 * Nanotime returned in %edx:%eax.
49 * Computed from tsc based on the scale factor
50 * and an implicit 32 bit shift.
52 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
55 mov %gs:CPU_NANOTIME,%edi ; \
56 PAL_RTC_NANOTIME_READ_FAST()
60 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
62 #define TIMER_UPDATE(treg,dreg,areg,offset) \
63 addl (TIMER_LOW+(offset))(treg),areg /* add low bits */ ; \
64 adcl dreg,(TIMER_HIGH+(offset))(treg) /* carry high bits */; \
65 movl areg,(TIMER_LOW+(offset))(treg) /* updated low bit */; \
66 movl (TIMER_HIGH+(offset))(treg),dreg /* copy high bits */ ; \
67 movl dreg,(TIMER_HIGHCHK+(offset))(treg) /* to high check */
70 * Add time delta to old timer and start new.
72 #define TIMER_EVENT(old,new) \
73 NANOTIME /* edx:eax nanosecs */ ; \
74 movl %eax,%esi /* save timestamp */ ; \
75 movl %edx,%edi /* save timestamp */ ; \
76 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ; \
77 subl (old##_TIMER)+TIMER_TSTAMP(%ecx),%eax /* elapsed */ ; \
78 sbbl (old##_TIMER)+TIMER_TSTAMP+4(%ecx),%edx /* time */ ; \
79 TIMER_UPDATE(%ecx,%edx,%eax,old##_TIMER) /* update timer */ ; \
80 movl %esi,(new##_TIMER)+TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
81 movl %edi,(new##_TIMER)+TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
82 leal (new##_TIMER)(%ecx), %ecx /* compute new timer pointer */ ; \
83 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
84 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
85 movl %esi,%eax /* restore timestamp */ ; \
86 movl %edi,%edx /* restore timestamp */ ; \
87 subl (old##_STATE)+TIMER_TSTAMP(%ebx),%eax /* elapsed */ ; \
88 sbbl (old##_STATE)+TIMER_TSTAMP+4(%ebx),%edx /* time */ ; \
89 TIMER_UPDATE(%ebx,%edx,%eax,old##_STATE)/* update timer */ ; \
90 leal (new##_STATE)(%ebx),%ecx /* new state pointer */ ; \
91 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
92 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
93 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
96 * Update time on user trap entry.
97 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
99 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
102 * update time on user trap exit.
103 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
105 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
108 * update time on interrupt entry.
109 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
110 * Saves processor state info on stack.
112 #define TIME_INT_ENTRY \
113 NANOTIME /* edx:eax nanosecs */ ; \
114 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
115 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
116 movl %eax,%esi /* save timestamp */ ; \
117 movl %edx,%edi /* save timestamp */ ; \
118 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
119 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
120 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
121 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
122 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
123 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
124 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
125 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
126 movl %esi,%eax /* restore timestamp */ ; \
127 movl %edi,%edx /* restore timestamp */ ; \
128 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
129 pushl %ecx /* save state */ ; \
130 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
131 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
132 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
133 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
134 cmpl %eax,%ecx /* compare current state */ ; \
135 je 0f /* skip if equal */ ; \
136 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
137 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
138 0: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
139 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
142 * update time on interrupt exit.
143 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
144 * Restores processor state info from stack.
146 #define TIME_INT_EXIT \
147 NANOTIME /* edx:eax nanosecs */ ; \
148 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
149 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
150 movl %eax,%esi /* save timestamp */ ; \
151 movl %edx,%edi /* save timestamp */ ; \
152 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
153 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
154 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
155 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
156 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
157 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
158 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
159 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
160 movl %esi,%eax /* restore timestamp */ ; \
161 movl %edi,%edx /* restore timestamp */ ; \
162 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
163 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
164 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
165 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
166 popl %ecx /* restore state */ ; \
167 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
168 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
169 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
171 #elif defined(__x86_64__)
174 * Nanotime returned in %rax.
175 * Computed from tsc based on the scale factor and an implicit 32 bit shift.
176 * This code must match what _rtc_nanotime_read does in
177 * machine_routines_asm.s. Failure to do so can
178 * result in "weird" timing results.
180 * Uses: %rsi, %rdi, %rdx, %rcx
183 movq %gs:CPU_NANOTIME,%rdi ; \
184 PAL_RTC_NANOTIME_READ_FAST()
187 * Add 64-bit delta in register reg to timer pointed to by register treg.
189 #define TIMER_UPDATE(treg,reg,offset) \
190 addq reg,(offset)+TIMER_ALL(treg) /* add timer */
193 * Add time delta to old timer and start new.
194 * Uses: %rsi, %rdi, %rdx, %rcx, %rax
196 #define TIMER_EVENT(old,new) \
197 NANOTIME /* %rax := nanosecs */ ; \
198 movq %rax,%rsi /* save timestamp */ ; \
199 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get thread */ ; \
200 subq (old##_TIMER)+TIMER_TSTAMP(%rcx),%rax /* compute elapsed */; \
201 TIMER_UPDATE(%rcx,%rax,old##_TIMER) /* update timer */ ; \
202 leaq (new##_TIMER)(%rcx),%rcx /* point to new timer */ ; \
203 movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */ ; \
204 movq %gs:CPU_PROCESSOR,%rdx /* get processor */ ; \
205 movq %rcx,THREAD_TIMER(%rdx) /* set current timer */ ; \
206 movq %rsi,%rax /* restore timestamp */ ; \
207 subq (old##_STATE)+TIMER_TSTAMP(%rdx),%rax /* compute elapsed */; \
208 TIMER_UPDATE(%rdx,%rax,old##_STATE) /* update timer */ ; \
209 leaq (new##_STATE)(%rdx),%rcx /* point to new state */ ; \
210 movq %rcx,CURRENT_STATE(%rdx) /* set current state */ ; \
211 movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */
214 * Update time on user trap entry.
215 * Uses: %rsi, %rdi, %rdx, %rcx, %rax
217 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
220 * update time on user trap exit.
221 * Uses: %rsi, %rdi, %rdx, %rcx, %rax
223 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
226 * update time on interrupt entry.
227 * Uses: %rsi, %rdi, %rdx, %rcx, %rax
228 * Saves processor state info on stack.
230 #define TIME_INT_ENTRY \
231 NANOTIME /* %rax := nanosecs */ ; \
232 movq %rax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
233 movq %rax,%rsi /* save timestamp */ ; \
234 movq %gs:CPU_PROCESSOR,%rdx /* get processor */ ; \
235 movq THREAD_TIMER(%rdx),%rcx /* get current timer */ ; \
236 subq TIMER_TSTAMP(%rcx),%rax /* compute elapsed */ ; \
237 TIMER_UPDATE(%rcx,%rax,0) /* update timer */ ; \
238 movq KERNEL_TIMER(%rdx),%rcx /* get kernel timer */ ; \
239 movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */ ; \
240 movq %rsi,%rax /* restore timestamp */ ; \
241 movq CURRENT_STATE(%rdx),%rcx /* get current state */ ; \
242 pushq %rcx /* save state */ ; \
243 subq TIMER_TSTAMP(%rcx),%rax /* compute elapsed */ ; \
244 TIMER_UPDATE(%rcx,%rax,0) /* update timer */ ; \
245 leaq IDLE_STATE(%rdx),%rax /* get idle state */ ; \
246 cmpq %rax,%rcx /* compare current */ ; \
247 je 0f /* skip if equal */ ; \
248 leaq SYSTEM_STATE(%rdx),%rcx /* get system state */ ; \
249 movq %rcx,CURRENT_STATE(%rdx) /* set current state */ ; \
250 0: movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */
253 * update time on interrupt exit.
254 * Uses: %rsi, %rdi, %rdx, %rcx, %rax
255 * Restores processor state info from stack.
257 #define TIME_INT_EXIT \
258 NANOTIME /* %rax := nanosecs */ ; \
259 movq %rax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
260 movq %rax,%rsi /* save timestamp */ ; \
261 movq %gs:CPU_PROCESSOR,%rdx /* get processor */ ; \
262 movq KERNEL_TIMER(%rdx),%rcx /* get kernel timer */ ; \
263 subq TIMER_TSTAMP(%rcx),%rax /* compute elapsed */ ; \
264 TIMER_UPDATE(%rcx,%rax,0) /* update timer */ ; \
265 movq THREAD_TIMER(%rdx),%rcx /* interrupted timer */ ; \
266 movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */ ; \
267 movq %rsi,%rax /* restore timestamp */ ; \
268 movq CURRENT_STATE(%rdx),%rcx /* get current state */ ; \
269 subq TIMER_TSTAMP(%rcx),%rax /* compute elapsed */ ; \
270 TIMER_UPDATE(%rcx,%rax,0) /* update timer */ ; \
271 popq %rcx /* restore state */ ; \
272 movq %rcx,CURRENT_STATE(%rdx) /* set current state */ ; \
273 movq %rsi,TIMER_TSTAMP(%rcx) /* set timestamp */
278 * Check for vtimers for task.
279 * task_reg is register pointing to current task
280 * thread_reg is register pointing to current thread
282 #define TASK_VTIMER_CHECK(task_reg,thread_reg) \
283 cmpl $0,TASK_VTIMERS(task_reg) ; \
285 orl $(AST_BSD),%gs:CPU_PENDING_AST /* Set pending AST */ ; \
287 orl $(AST_BSD),TH_AST(thread_reg) /* Set thread AST */ ; \
290 #endif /* _I386_RTCLOCK_H_ */