]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/locore.s
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / x86_64 / locore.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <debug.h>
58 #include <mach_kdp.h>
59 #include <mach_assert.h>
60
61 #include <sys/errno.h>
62 #include <i386/asm.h>
63 #include <i386/cpuid.h>
64 #include <i386/eflags.h>
65 #include <i386/postcode.h>
66 #include <i386/proc_reg.h>
67 #include <i386/trap.h>
68 #include <assym.s>
69 #include <mach/exception_types.h>
70 #include <config_dtrace.h>
71
72 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
73 #include <mach/i386/syscall_sw.h>
74
75 /*
76 * Fault recovery.
77 */
78
79 #ifdef __MACHO__
80 #define RECOVERY_SECTION .section __VECTORS, __recover
81 #else
82 #define RECOVERY_SECTION .text
83 #endif
84
85 #define RECOVER_TABLE_START \
86 .align 3 ; \
87 .globl EXT(recover_table) ;\
88 LEXT(recover_table) ;\
89 .text
90
91 #define RECOVER(addr) \
92 .align 3; \
93 .quad 9f ;\
94 .quad addr ;\
95 .text ;\
96 9:
97
98 #define RECOVER_TABLE_END \
99 .align 3 ;\
100 .globl EXT(recover_table_end) ;\
101 LEXT(recover_table_end) ;\
102 .text
103
104 /*
105 * Allocate recovery and table.
106 */
107 RECOVERY_SECTION
108 RECOVER_TABLE_START
109
110 /*
111 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
112 */
113 ENTRY(rdmsr_carefully)
114 movl %edi, %ecx
115 movq %rdx, %rdi
116 RECOVERY_SECTION
117 RECOVER(rdmsr_fail)
118 rdmsr
119 movl %eax, (%rsi)
120 movl %edx, (%rdi)
121 xorl %eax, %eax
122 ret
123
124 rdmsr_fail:
125 movq $1, %rax
126 ret
127 /*
128 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
129 */
130
131 ENTRY(rdmsr64_carefully)
132 movl %edi, %ecx
133 RECOVERY_SECTION
134 RECOVER(rdmsr64_carefully_fail)
135 rdmsr
136 movl %eax, (%rsi)
137 movl %edx, 4(%rsi)
138 xorl %eax, %eax
139 ret
140 rdmsr64_carefully_fail:
141 movl $1, %eax
142 ret
143 /*
144 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
145 */
146
147 ENTRY(wrmsr_carefully)
148 movl %edi, %ecx
149 movl %esi, %eax
150 shr $32, %rsi
151 movl %esi, %edx
152 RECOVERY_SECTION
153 RECOVER(wrmsr_fail)
154 wrmsr
155 xorl %eax, %eax
156 ret
157 wrmsr_fail:
158 movl $1, %eax
159 ret
160
161 #if DEBUG
162 #ifndef TERI
163 #define TERI 1
164 #endif
165 #endif
166
167 #if TERI
168 .globl EXT(thread_exception_return_internal)
169 #else
170 .globl EXT(thread_exception_return)
171 #endif
172 .globl EXT(thread_bootstrap_return)
173 LEXT(thread_bootstrap_return)
174 #if CONFIG_DTRACE
175 call EXT(dtrace_thread_bootstrap)
176 #endif
177
178 #if TERI
179 LEXT(thread_exception_return_internal)
180 #else
181 LEXT(thread_exception_return)
182 #endif
183 cli
184 xorl %ecx, %ecx /* don't check if we're in the PFZ */
185 jmp EXT(return_from_trap)
186
187 /*
188 * Copyin/out from user/kernel address space.
189 * rdi: source address
190 * rsi: destination address
191 * rdx: byte count (in fact, always < 64MB -- see copyio)
192 */
193 Entry(_bcopy)
194 xchg %rdi, %rsi /* source %rsi, dest %rdi */
195
196 cld /* count up */
197 mov %rdx, %rcx /* move by longwords first */
198 shr $3, %rcx
199 RECOVERY_SECTION
200 RECOVER(_bcopy_fail)
201 rep
202 movsq /* move longwords */
203
204 movl %edx, %ecx /* now move remaining bytes */
205 andl $7, %ecx
206 RECOVERY_SECTION
207 RECOVER(_bcopy_fail)
208 rep
209 movsb
210
211 xorl %eax,%eax /* return 0 for success */
212 ret /* and return */
213
214 _bcopy_fail:
215 movl $(EFAULT),%eax /* return error for failure */
216 ret
217
218 Entry(pmap_safe_read)
219 RECOVERY_SECTION
220 RECOVER(_pmap_safe_read_fail)
221 movq (%rdi), %rcx
222 mov %rcx, (%rsi)
223 mov $1, %eax
224 ret
225 _pmap_safe_read_fail:
226 xor %eax, %eax
227 ret
228
229 /*
230 * 2-byte copy used by ml_copy_phys().
231 * rdi: source address
232 * rsi: destination address
233 */
234 Entry(_bcopy2)
235 RECOVERY_SECTION
236 RECOVER(_bcopy_fail)
237 movw (%rdi), %cx
238 RECOVERY_SECTION
239 RECOVER(_bcopy_fail)
240 movw %cx, (%rsi)
241
242 xorl %eax,%eax /* return 0 for success */
243 ret /* and return */
244
245 /*
246 * 4-byte copy used by ml_copy_phys().
247 * rdi: source address
248 * rsi: destination address
249 */
250 Entry(_bcopy4)
251 RECOVERY_SECTION
252 RECOVER(_bcopy_fail)
253 movl (%rdi), %ecx
254 RECOVERY_SECTION
255 RECOVER(_bcopy_fail)
256 mov %ecx, (%rsi)
257
258 xorl %eax,%eax /* return 0 for success */
259 ret /* and return */
260
261 /*
262 * 8-byte copy used by ml_copy_phys().
263 * rdi: source address
264 * rsi: destination address
265 */
266 Entry(_bcopy8)
267 RECOVERY_SECTION
268 RECOVER(_bcopy_fail)
269 movq (%rdi), %rcx
270 RECOVERY_SECTION
271 RECOVER(_bcopy_fail)
272 mov %rcx, (%rsi)
273
274 xorl %eax,%eax /* return 0 for success */
275 ret /* and return */
276
277
278
279 /*
280 * Copyin string from user/kern address space.
281 * rdi: source address
282 * rsi: destination address
283 * rdx: max byte count
284 * rcx: actual byte count (OUT)
285 */
286 Entry(_bcopystr)
287 pushq %rdi
288 xchgq %rdi, %rsi /* source %rsi, dest %rdi */
289
290 xorl %eax,%eax /* set to 0 here so that high 24 bits */
291 /* are 0 for the cmpl against 0 */
292 2:
293 RECOVERY_SECTION
294 RECOVER(_bcopystr_fail) /* copy bytes... */
295 movb (%rsi),%al
296 incq %rsi
297 testq %rdi,%rdi /* if kernel address is ... */
298 jz 3f /* not NULL */
299 movb %al,(%rdi) /* copy the byte */
300 incq %rdi
301 3:
302 testl %eax,%eax /* did we just stuff the 0-byte? */
303 jz 4f /* yes, return 0 already in %eax */
304 decq %rdx /* decrement #bytes left in buffer */
305 jnz 2b /* buffer not full, copy another byte */
306 movl $(ENAMETOOLONG),%eax /* buffer full, no \0: ENAMETOOLONG */
307 4:
308 cmpq $0,%rcx /* get OUT len ptr */
309 jz _bcopystr_ret /* if null, just return */
310 subq (%rsp),%rsi
311 movq %rsi,(%rcx) /* else set OUT arg to xfer len */
312 popq %rdi /* restore registers */
313 _bcopystr_ret:
314 ret /* and return */
315
316 _bcopystr_fail:
317 popq %rdi /* restore registers */
318 movl $(EFAULT),%eax /* return error for failure */
319 ret
320
321 /*
322 * Copyin 32 bit aligned word as a single transaction
323 * rdi: source address (user)
324 * rsi: destination address (kernel)
325 */
326 Entry(_copyin_atomic32)
327 pushq %rbp /* Save registers */
328 movq %rsp, %rbp
329 RECOVERY_SECTION
330 RECOVER(L_copyin_atomic32_fail) /* Set up recovery handler for next instruction */
331 movl (%rdi), %eax /* Load long from user */
332 movl %eax, (%rsi) /* Store to kernel */
333 xorl %eax, %eax /* Return success */
334 popq %rbp /* Restore registers */
335 retq /* Return */
336
337 L_copyin_atomic32_fail:
338 movl $(EFAULT), %eax /* Return error for failure */
339 popq %rbp /* Restore registers */
340 retq /* Return */
341
342 /*
343 * Copyin 64 bit aligned word as a single transaction
344 * rdi: source address (user)
345 * rsi: destination address (kernel)
346 */
347 Entry(_copyin_atomic64)
348 pushq %rbp /* Save registers */
349 movq %rsp, %rbp
350 RECOVERY_SECTION
351 RECOVER(L_copyin_atomic64_fail) /* Set up recovery handler for next instruction*/
352 movq (%rdi), %rax /* Load quad from user */
353 movq %rax, (%rsi) /* Store to kernel */
354 xorl %eax, %eax /* Return success */
355 popq %rbp /* Restore registers */
356 retq /* Return */
357
358 L_copyin_atomic64_fail:
359 movl $(EFAULT), %eax /* Return error for failure */
360 popq %rbp /* Restore registers */
361 retq /* Return */
362
363 /*
364 * Copyin 32 bit aligned word as a single transaction
365 * rdi: source address (kernel)
366 * rsi: destination address (user)
367 */
368 Entry(_copyout_atomic32)
369 pushq %rbp /* Save registers */
370 movq %rsp, %rbp
371 movl (%rdi), %eax /* Load long from kernel */
372 RECOVERY_SECTION
373 RECOVER(L_copyout_atomic32_fail) /* Set up recovery handler for next instruction*/
374 movl %eax, (%rsi) /* Store long to user */
375 xorl %eax, %eax /* Return success */
376 popq %rbp /* Restore registers */
377 retq /* Return */
378
379 L_copyout_atomic32_fail:
380 movl $(EFAULT), %eax /* Return error for failure */
381 popq %rbp /* Restore registers */
382 retq /* Return */
383
384 /*
385 * Copyin 64 bit aligned word as a single transaction
386 * rdi: source address (kernel)
387 * rsi: destination address (user)
388 */
389 Entry(_copyout_atomic64)
390 pushq %rbp /* Save registers */
391 movq %rsp, %rbp
392 movq (%rdi), %rax /* Load quad from kernel */
393 RECOVERY_SECTION
394 RECOVER(L_copyout_atomic64_fail) /* Set up recovery handler for next instruction*/
395 movq %rax, (%rsi) /* Store quad to user */
396 xorl %eax, %eax /* Return success */
397 popq %rbp /* Restore registers */
398 retq /* Return */
399
400 L_copyout_atomic64_fail:
401 movl $(EFAULT), %eax /* Return error for failure */
402 popq %rbp /* Restore registers */
403 retq /* Return */
404
405
406 /*
407 * Done with recovery table.
408 */
409 RECOVERY_SECTION
410 RECOVER_TABLE_END
411
412
413 /*
414 * Vector here on any exception at startup prior to switching to
415 * the kernel's idle page-tables and installing the kernel master IDT.
416 */
417 Entry(vstart_trap_handler)
418 POSTCODE(BOOT_TRAP_HLT)
419 hlt
420