]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/locore.s
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / x86_64 / locore.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <debug.h>
58 #include <mach_kdp.h>
59 #include <mach_assert.h>
60
61 #include <sys/errno.h>
62 #include <i386/asm.h>
63 #include <i386/cpuid.h>
64 #include <i386/eflags.h>
65 #include <i386/postcode.h>
66 #include <i386/proc_reg.h>
67 #include <i386/trap.h>
68 #include <assym.s>
69 #include <mach/exception_types.h>
70 #include <config_dtrace.h>
71
72 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
73 #include <mach/i386/syscall_sw.h>
74
75 /*
76 * Fault recovery.
77 */
78
79 #ifdef __MACHO__
80 #define RECOVERY_SECTION .section __VECTORS, __recover
81 #else
82 #define RECOVERY_SECTION .text
83 #endif
84
85 #define RECOVER_TABLE_START \
86 .align 3 ; \
87 .globl EXT(recover_table) ;\
88 LEXT(recover_table) ;\
89 .text
90
91 #define RECOVER(addr) \
92 .align 3; \
93 .quad 9f ;\
94 .quad addr ;\
95 .text ;\
96 9:
97
98 #define RECOVER_TABLE_END \
99 .align 3 ;\
100 .globl EXT(recover_table_end) ;\
101 LEXT(recover_table_end) ;\
102 .text
103
104 /*
105 * Allocate recovery and table.
106 */
107 RECOVERY_SECTION
108 RECOVER_TABLE_START
109
110 /*
111 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
112 */
113 ENTRY(rdmsr_carefully)
114 movl %edi, %ecx
115 movq %rdx, %rdi
116 RECOVERY_SECTION
117 RECOVER(rdmsr_fail)
118 rdmsr
119 movl %eax, (%rsi)
120 movl %edx, (%rdi)
121 xorl %eax, %eax
122 ret
123
124 rdmsr_fail:
125 movq $1, %rax
126 ret
127 /*
128 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
129 */
130
131 ENTRY(rdmsr64_carefully)
132 movl %edi, %ecx
133 RECOVERY_SECTION
134 RECOVER(rdmsr64_carefully_fail)
135 rdmsr
136 movl %eax, (%rsi)
137 movl %edx, 4(%rsi)
138 xorl %eax, %eax
139 ret
140 rdmsr64_carefully_fail:
141 movl $1, %eax
142 ret
143 /*
144 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
145 */
146
147 ENTRY(wrmsr_carefully)
148 movl %edi, %ecx
149 movl %esi, %eax
150 shr $32, %rsi
151 movl %esi, %edx
152 RECOVERY_SECTION
153 RECOVER(wrmsr_fail)
154 wrmsr
155 xorl %eax, %eax
156 ret
157 wrmsr_fail:
158 movl $1, %eax
159 ret
160
161 #if DEBUG
162 .globl EXT(thread_exception_return_internal)
163 #else
164 .globl EXT(thread_exception_return)
165 #endif
166 .globl EXT(thread_bootstrap_return)
167 LEXT(thread_bootstrap_return)
168 #if CONFIG_DTRACE
169 call EXT(dtrace_thread_bootstrap)
170 #endif
171
172 #if DEBUG
173 LEXT(thread_exception_return_internal)
174 #else
175 LEXT(thread_exception_return)
176 #endif
177 cli
178 xorl %ecx, %ecx /* don't check if we're in the PFZ */
179 jmp EXT(return_from_trap)
180
181 /*
182 * Copyin/out from user/kernel address space.
183 * rdi: source address
184 * rsi: destination address
185 * rdx: byte count (in fact, always < 64MB -- see copyio)
186 */
187 Entry(_bcopy)
188 xchg %rdi, %rsi /* source %rsi, dest %rdi */
189
190 cld /* count up */
191 mov %rdx, %rcx /* move by longwords first */
192 shr $3, %rcx
193 RECOVERY_SECTION
194 RECOVER(_bcopy_fail)
195 rep
196 movsq /* move longwords */
197
198 movl %edx, %ecx /* now move remaining bytes */
199 andl $7, %ecx
200 RECOVERY_SECTION
201 RECOVER(_bcopy_fail)
202 rep
203 movsb
204
205 xorl %eax,%eax /* return 0 for success */
206 ret /* and return */
207
208 _bcopy_fail:
209 movl $(EFAULT),%eax /* return error for failure */
210 ret
211
212 Entry(pmap_safe_read)
213 RECOVERY_SECTION
214 RECOVER(_pmap_safe_read_fail)
215 movq (%rdi), %rcx
216 mov %rcx, (%rsi)
217 mov $1, %eax
218 ret
219 _pmap_safe_read_fail:
220 xor %eax, %eax
221 ret
222
223 /*
224 * 2-byte copy used by ml_copy_phys().
225 * rdi: source address
226 * rsi: destination address
227 */
228 Entry(_bcopy2)
229 RECOVERY_SECTION
230 RECOVER(_bcopy_fail)
231 movw (%rdi), %cx
232 RECOVERY_SECTION
233 RECOVER(_bcopy_fail)
234 movw %cx, (%rsi)
235
236 xorl %eax,%eax /* return 0 for success */
237 ret /* and return */
238
239 /*
240 * 4-byte copy used by ml_copy_phys().
241 * rdi: source address
242 * rsi: destination address
243 */
244 Entry(_bcopy4)
245 RECOVERY_SECTION
246 RECOVER(_bcopy_fail)
247 movl (%rdi), %ecx
248 RECOVERY_SECTION
249 RECOVER(_bcopy_fail)
250 mov %ecx, (%rsi)
251
252 xorl %eax,%eax /* return 0 for success */
253 ret /* and return */
254
255 /*
256 * 8-byte copy used by ml_copy_phys().
257 * rdi: source address
258 * rsi: destination address
259 */
260 Entry(_bcopy8)
261 RECOVERY_SECTION
262 RECOVER(_bcopy_fail)
263 movq (%rdi), %rcx
264 RECOVERY_SECTION
265 RECOVER(_bcopy_fail)
266 mov %rcx, (%rsi)
267
268 xorl %eax,%eax /* return 0 for success */
269 ret /* and return */
270
271
272
273 /*
274 * Copyin string from user/kern address space.
275 * rdi: source address
276 * rsi: destination address
277 * rdx: max byte count
278 * rcx: actual byte count (OUT)
279 */
280 Entry(_bcopystr)
281 pushq %rdi
282 xchgq %rdi, %rsi /* source %rsi, dest %rdi */
283
284 xorl %eax,%eax /* set to 0 here so that high 24 bits */
285 /* are 0 for the cmpl against 0 */
286 2:
287 RECOVERY_SECTION
288 RECOVER(_bcopystr_fail) /* copy bytes... */
289 movb (%rsi),%al
290 incq %rsi
291 testq %rdi,%rdi /* if kernel address is ... */
292 jz 3f /* not NULL */
293 movb %al,(%rdi) /* copy the byte */
294 incq %rdi
295 3:
296 testl %eax,%eax /* did we just stuff the 0-byte? */
297 jz 4f /* yes, return 0 already in %eax */
298 decq %rdx /* decrement #bytes left in buffer */
299 jnz 2b /* buffer not full, copy another byte */
300 movl $(ENAMETOOLONG),%eax /* buffer full, no \0: ENAMETOOLONG */
301 4:
302 cmpq $0,%rcx /* get OUT len ptr */
303 jz _bcopystr_ret /* if null, just return */
304 subq (%rsp),%rsi
305 movq %rsi,(%rcx) /* else set OUT arg to xfer len */
306 popq %rdi /* restore registers */
307 _bcopystr_ret:
308 ret /* and return */
309
310 _bcopystr_fail:
311 popq %rdi /* restore registers */
312 movl $(EFAULT),%eax /* return error for failure */
313 ret
314
315 /*
316 * Copyin 32 or 64 bit aligned word as a single transaction
317 * rdi: source address (user)
318 * rsi: destination address (kernel)
319 * rdx: size (4 or 8)
320 */
321 Entry(_copyin_word)
322 pushq %rbp /* Save registers */
323 movq %rsp, %rbp
324 cmpl $0x4, %edx /* If size = 4 */
325 je L_copyin_word_4 /* handle 32-bit load */
326 movl $(EINVAL), %eax /* Set up error status */
327 cmpl $0x8, %edx /* If size != 8 */
328 jne L_copyin_word_exit /* exit with error */
329 RECOVERY_SECTION
330 RECOVER(L_copyin_word_fail) /* Set up recovery handler for next instruction*/
331 movq (%rdi), %rax /* Load quad from user */
332 jmp L_copyin_word_store
333 L_copyin_word_4:
334 RECOVERY_SECTION
335 RECOVER(L_copyin_word_fail) /* Set up recovery handler for next instruction */
336 movl (%rdi), %eax /* Load long from user */
337 L_copyin_word_store:
338 movq %rax, (%rsi) /* Store to kernel */
339 xorl %eax, %eax /* Return success */
340 L_copyin_word_exit:
341 popq %rbp /* Restore registers */
342 retq /* Return */
343
344 L_copyin_word_fail:
345 movl $(EFAULT), %eax /* Return error for failure */
346 popq %rbp /* Restore registers */
347 retq /* Return */
348
349
350 /*
351 * Done with recovery table.
352 */
353 RECOVERY_SECTION
354 RECOVER_TABLE_END
355
356
357 /*
358 * Vector here on any exception at startup prior to switching to
359 * the kernel's idle page-tables and installing the kernel master IDT.
360 */
361 Entry(vstart_trap_handler)
362 POSTCODE(BOOT_TRAP_HLT)
363 hlt
364