]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/locore.s
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / x86_64 / locore.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <mach_kdp.h>
59 #include <mach_assert.h>
60
61 #include <sys/errno.h>
62 #include <i386/asm.h>
63 #include <i386/cpuid.h>
64 #include <i386/eflags.h>
65 #include <i386/proc_reg.h>
66 #include <i386/trap.h>
67 #include <assym.s>
68 #include <mach/exception_types.h>
69 #include <config_dtrace.h>
70
71 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
72 #include <mach/i386/syscall_sw.h>
73
74 /*
75 * Fault recovery.
76 */
77
78 #ifdef __MACHO__
79 #define RECOVERY_SECTION .section __VECTORS, __recover
80 #else
81 #define RECOVERY_SECTION .text
82 #define RECOVERY_SECTION .text
83 #endif
84
85 #define RECOVER_TABLE_START \
86 .align 3 ; \
87 .globl EXT(recover_table) ;\
88 LEXT(recover_table) ;\
89 .text
90
91 #define RECOVER(addr) \
92 .align 3; \
93 .quad 9f ;\
94 .quad addr ;\
95 .text ;\
96 9:
97
98 #define RECOVER_TABLE_END \
99 .align 3 ;\
100 .globl EXT(recover_table_end) ;\
101 LEXT(recover_table_end) ;\
102 .text
103
104 /*
105 * Allocate recovery and table.
106 */
107 RECOVERY_SECTION
108 RECOVER_TABLE_START
109
110 /*
111 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
112 */
113 ENTRY(rdmsr_carefully)
114 movl %edi, %ecx
115 movq %rdx, %rdi
116 RECOVERY_SECTION
117 RECOVER(rdmsr_fail)
118 rdmsr
119 movl %eax, (%rsi)
120 movl %edx, (%rdi)
121 xorl %eax, %eax
122 ret
123
124 rdmsr_fail:
125 movq $1, %rax
126 ret
127 /*
128 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
129 */
130
131 ENTRY(rdmsr64_carefully)
132 movl %edi, %ecx
133 RECOVERY_SECTION
134 RECOVER(rdmsr64_carefully_fail)
135 rdmsr
136 movl %eax, (%rsi)
137 movl %edx, 4(%rsi)
138 xorl %eax, %eax
139 ret
140 rdmsr64_carefully_fail:
141 movl $1, %eax
142 ret
143 /*
144 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
145 */
146
147 ENTRY(wrmsr_carefully)
148 movl %edi, %ecx
149 movl %esi, %eax
150 shr $32, %rsi
151 movl %esi, %edx
152 RECOVERY_SECTION
153 RECOVER(wrmsr_fail)
154 wrmsr
155 xorl %eax, %eax
156 ret
157 wrmsr_fail:
158 movl $1, %eax
159 ret
160
161 .globl EXT(thread_exception_return)
162 .globl EXT(thread_bootstrap_return)
163 LEXT(thread_bootstrap_return)
164 #if CONFIG_DTRACE
165 call EXT(dtrace_thread_bootstrap)
166 #endif
167
168 LEXT(thread_exception_return)
169 cli
170 xorl %ecx, %ecx /* don't check if we're in the PFZ */
171 jmp EXT(return_from_trap)
172
173 /*
174 * Copyin/out from user/kernel address space.
175 * rdi: source address
176 * rsi: destination address
177 * rdx: byte count (in fact, always < 64MB -- see copyio)
178 */
179 Entry(_bcopy)
180 xchg %rdi, %rsi /* source %rsi, dest %rdi */
181
182 cld /* count up */
183 mov %rdx, %rcx /* move by longwords first */
184 shr $3, %rcx
185 RECOVERY_SECTION
186 RECOVER(_bcopy_fail)
187 rep
188 movsq /* move longwords */
189
190 movl %edx, %ecx /* now move remaining bytes */
191 andl $7, %ecx
192 RECOVERY_SECTION
193 RECOVER(_bcopy_fail)
194 rep
195 movsb
196
197 xorl %eax,%eax /* return 0 for success */
198 ret /* and return */
199
200 _bcopy_fail:
201 movl $(EFAULT),%eax /* return error for failure */
202 ret
203
204 Entry(pmap_safe_read)
205 RECOVERY_SECTION
206 RECOVER(_pmap_safe_read_fail)
207 movq (%rdi), %rcx
208 mov %rcx, (%rsi)
209 mov $1, %eax
210 ret
211 _pmap_safe_read_fail:
212 xor %eax, %eax
213 ret
214
215 /*
216 * 2-byte copy used by ml_copy_phys().
217 * rdi: source address
218 * rsi: destination address
219 */
220 Entry(_bcopy2)
221 RECOVERY_SECTION
222 RECOVER(_bcopy_fail)
223 movw (%rdi), %cx
224 RECOVERY_SECTION
225 RECOVER(_bcopy_fail)
226 movw %cx, (%rsi)
227
228 xorl %eax,%eax /* return 0 for success */
229 ret /* and return */
230
231 /*
232 * 4-byte copy used by ml_copy_phys().
233 * rdi: source address
234 * rsi: destination address
235 */
236 Entry(_bcopy4)
237 RECOVERY_SECTION
238 RECOVER(_bcopy_fail)
239 movl (%rdi), %ecx
240 RECOVERY_SECTION
241 RECOVER(_bcopy_fail)
242 mov %ecx, (%rsi)
243
244 xorl %eax,%eax /* return 0 for success */
245 ret /* and return */
246
247 /*
248 * 8-byte copy used by ml_copy_phys().
249 * rdi: source address
250 * rsi: destination address
251 */
252 Entry(_bcopy8)
253 RECOVERY_SECTION
254 RECOVER(_bcopy_fail)
255 movq (%rdi), %rcx
256 RECOVERY_SECTION
257 RECOVER(_bcopy_fail)
258 mov %rcx, (%rsi)
259
260 xorl %eax,%eax /* return 0 for success */
261 ret /* and return */
262
263
264
265 /*
266 * Copyin string from user/kern address space.
267 * rdi: source address
268 * rsi: destination address
269 * rdx: max byte count
270 * rcx: actual byte count (OUT)
271 */
272 Entry(_bcopystr)
273 pushq %rdi
274 xchgq %rdi, %rsi /* source %rsi, dest %rdi */
275
276 xorl %eax,%eax /* set to 0 here so that high 24 bits */
277 /* are 0 for the cmpl against 0 */
278 2:
279 RECOVERY_SECTION
280 RECOVER(_bcopystr_fail) /* copy bytes... */
281 movb (%rsi),%al
282 incq %rsi
283 testq %rdi,%rdi /* if kernel address is ... */
284 jz 3f /* not NULL */
285 movb %al,(%rdi) /* copy the byte */
286 incq %rdi
287 3:
288 testl %eax,%eax /* did we just stuff the 0-byte? */
289 jz 4f /* yes, return 0 already in %eax */
290 decq %rdx /* decrement #bytes left in buffer */
291 jnz 2b /* buffer not full, copy another byte */
292 movl $(ENAMETOOLONG),%eax /* buffer full, no \0: ENAMETOOLONG */
293 4:
294 cmpq $0,%rcx /* get OUT len ptr */
295 jz _bcopystr_ret /* if null, just return */
296 subq (%rsp),%rsi
297 movq %rsi,(%rcx) /* else set OUT arg to xfer len */
298 popq %rdi /* restore registers */
299 _bcopystr_ret:
300 ret /* and return */
301
302 _bcopystr_fail:
303 popq %rdi /* restore registers */
304 movl $(EFAULT),%eax /* return error for failure */
305 ret
306
307 /*
308 * Done with recovery table.
309 */
310 RECOVERY_SECTION
311 RECOVER_TABLE_END
312