]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <platforms.h>
59 #include <mach_kdp.h>
60 #include <mach_assert.h>
61
62 #include <sys/errno.h>
63 #include <i386/asm.h>
64 #include <i386/cpuid.h>
65 #include <i386/eflags.h>
66 #include <i386/proc_reg.h>
67 #include <i386/trap.h>
68 #include <assym.s>
69
70 #include <config_dtrace.h>
71
72 /*
73 * PTmap is recursive pagemap at top of virtual address space.
74 * Within PTmap, the page directory can be found (third indirection).
75 */
76 .globl _PTmap,_PTD,_PTDpde
77 .set _PTmap,(PTDPTDI << PDESHIFT)
78 .set _PTD,_PTmap + (PTDPTDI * NBPG)
79 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
80
81 #if __MACHO__
82 /* Under Mach-O, etext is a variable which contains
83 * the last text address
84 */
85 #define ETEXT_ADDR (EXT(etext))
86 #else
87 /* Under ELF and other non-Mach-O formats, the address of
88 * etext represents the last text address
89 */
90 #define ETEXT_ADDR $ EXT(etext)
91 #endif
92
93
94 .text
95 locore_start:
96
97 /*
98 * Fault recovery.
99 */
100
101 #ifdef __MACHO__
102 #define RECOVERY_SECTION .section __VECTORS, __recover
103 #else
104 #define RECOVERY_SECTION .text
105 #define RECOVERY_SECTION .text
106 #endif
107
108 #define RECOVER_TABLE_START \
109 .align 2 ; \
110 .globl EXT(recover_table) ;\
111 LEXT(recover_table) ;\
112 .text
113
114 #define RECOVER(addr) \
115 .align 2; \
116 .long 9f ;\
117 .long addr ;\
118 .text ;\
119 9:
120
121 #define RECOVER_TABLE_END \
122 .align 2 ;\
123 .globl EXT(recover_table_end) ;\
124 LEXT(recover_table_end) ;\
125 .long 0 /* workaround see comment below */ ;\
126 .text ;
127
128 /* TODO FIXME
129 * the .long 0 is to work around a linker bug (insert radar# here)
130 * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s
131 * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each
132 * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point
133 * into the SLEEP section
134 */
135
136 /*
137 * Allocate recovery and table.
138 */
139 RECOVERY_SECTION
140 RECOVER_TABLE_START
141
142
143 /*
144 * Called as a function, makes the current thread
145 * return from the kernel as if from an exception.
146 * We will consult with DTrace if this is a
147 * newly created thread and we need to fire a probe.
148 */
149
150 .globl EXT(thread_exception_return)
151 .globl EXT(thread_bootstrap_return)
152 LEXT(thread_bootstrap_return)
153 #if CONFIG_DTRACE
154 call EXT(dtrace_thread_bootstrap)
155 #endif
156
157 LEXT(thread_exception_return)
158 cli
159 xorl %ecx,%ecx /* don't check if in the PFZ */
160 cmpl $0, %gs:CPU_IS64BIT
161 je EXT(return_from_trap32)
162 jmp EXT(return_from_trap)
163
164
165 /*
166 * Utility routines.
167 */
168
169 /*
170 * Copy from user/kernel address space.
171 * arg0: window offset or kernel address
172 * arg1: kernel address
173 * arg2: byte count
174 */
175 Entry(copyinphys_user)
176 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
177 mov %cx,%ds
178
179 Entry(copyinphys_kern)
180 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
181 mov %cx,%es
182 jmp copyin_common
183
184 Entry(copyin_user)
185 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
186 mov %cx,%ds
187
188 Entry(copyin_kern)
189
190 copyin_common:
191 pushl %esi
192 pushl %edi /* save registers */
193
194 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
195 movl 8+S_ARG1,%edi /* get destination - kernel address */
196 movl 8+S_ARG2,%edx /* get count */
197
198 cld /* count up */
199 movl %edx,%ecx /* move by longwords first */
200 shrl $2,%ecx
201 RECOVERY_SECTION
202 RECOVER(copyin_fail)
203 rep
204 movsl /* move longwords */
205 movl %edx,%ecx /* now move remaining bytes */
206 andl $3,%ecx
207 RECOVERY_SECTION
208 RECOVER(copyin_fail)
209 rep
210 movsb
211 xorl %eax,%eax /* return 0 for success */
212 copyin_ret:
213 mov %ss,%cx /* restore kernel data and extended segments */
214 mov %cx,%ds
215 mov %cx,%es
216
217 popl %edi /* restore registers */
218 popl %esi
219 ret /* and return */
220
221 copyin_fail:
222 movl $(EFAULT),%eax /* return error for failure */
223 jmp copyin_ret /* pop frame and return */
224
225
226
227 /*
228 * Copy string from user/kern address space.
229 * arg0: window offset or kernel address
230 * arg1: kernel address
231 * arg2: max byte count
232 * arg3: actual byte count (OUT)
233 */
234 Entry(copyinstr_kern)
235 mov %ds,%cx
236 jmp copyinstr_common
237
238 Entry(copyinstr_user)
239 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
240
241 copyinstr_common:
242 mov %cx,%fs
243
244 pushl %esi
245 pushl %edi /* save registers */
246
247 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
248 movl 8+S_ARG1,%edi /* get destination - kernel address */
249 movl 8+S_ARG2,%edx /* get count */
250
251 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
252 /* are 0 for the cmpl against 0 */
253 2:
254 RECOVERY_SECTION
255 RECOVER(copystr_fail) /* copy bytes... */
256 movb %fs:(%esi),%al
257 incl %esi
258 testl %edi,%edi /* if kernel address is ... */
259 jz 3f /* not NULL */
260 movb %al,(%edi) /* copy the byte */
261 incl %edi
262 3:
263 testl %eax,%eax /* did we just stuff the 0-byte? */
264 jz 4f /* yes, return 0 status already in %eax */
265 decl %edx /* decrement #bytes left in buffer */
266 jnz 2b /* buffer not full so copy in another byte */
267 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
268 4:
269 movl 8+S_ARG3,%edi /* get OUT len ptr */
270 cmpl $0,%edi
271 jz copystr_ret /* if null, just return */
272 subl 8+S_ARG0,%esi
273 movl %esi,(%edi) /* else set OUT arg to xfer len */
274 copystr_ret:
275 popl %edi /* restore registers */
276 popl %esi
277 ret /* and return */
278
279 copystr_fail:
280 movl $(EFAULT),%eax /* return error for failure */
281 jmp copystr_ret /* pop frame and return */
282
283
284 /*
285 * Copy to user/kern address space.
286 * arg0: kernel address
287 * arg1: window offset or kernel address
288 * arg2: byte count
289 */
290 ENTRY(copyoutphys_user)
291 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
292 mov %cx,%es
293
294 ENTRY(copyoutphys_kern)
295 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
296 mov %cx,%ds
297 jmp copyout_common
298
299 ENTRY(copyout_user)
300 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
301 mov %cx,%es
302
303 ENTRY(copyout_kern)
304
305 copyout_common:
306 pushl %esi
307 pushl %edi /* save registers */
308
309 movl 8+S_ARG0,%esi /* get source - kernel address */
310 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
311 movl 8+S_ARG2,%edx /* get count */
312
313 cld /* count up */
314 movl %edx,%ecx /* move by longwords first */
315 shrl $2,%ecx
316 RECOVERY_SECTION
317 RECOVER(copyout_fail)
318 rep
319 movsl
320 movl %edx,%ecx /* now move remaining bytes */
321 andl $3,%ecx
322 RECOVERY_SECTION
323 RECOVER(copyout_fail)
324 rep
325 movsb /* move */
326 xorl %eax,%eax /* return 0 for success */
327 copyout_ret:
328 mov %ss,%cx /* restore kernel segment */
329 mov %cx,%es
330 mov %cx,%ds
331
332 popl %edi /* restore registers */
333 popl %esi
334 ret /* and return */
335
336 copyout_fail:
337 movl $(EFAULT),%eax /* return error for failure */
338 jmp copyout_ret /* pop frame and return */
339
340
341 /*
342 * io register must not be used on slaves (no AT bus)
343 */
344 #define ILL_ON_SLAVE
345
346
347 #if MACH_ASSERT
348
349 #define ARG0 B_ARG0
350 #define ARG1 B_ARG1
351 #define ARG2 B_ARG2
352 #define PUSH_FRAME FRAME
353 #define POP_FRAME EMARF
354
355 #else /* MACH_ASSERT */
356
357 #define ARG0 S_ARG0
358 #define ARG1 S_ARG1
359 #define ARG2 S_ARG2
360 #define PUSH_FRAME
361 #define POP_FRAME
362
363 #endif /* MACH_ASSERT */
364
365
366 /*
367 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
368 */
369 ENTRY(rdmsr_carefully)
370 movl S_ARG0, %ecx
371 RECOVERY_SECTION
372 RECOVER(rdmsr_fail)
373 rdmsr
374 movl S_ARG1, %ecx
375 movl %eax, (%ecx)
376 movl S_ARG2, %ecx
377 movl %edx, (%ecx)
378 movl $0, %eax
379 ret
380
381 rdmsr_fail:
382 movl $1, %eax
383 ret
384
385 /*
386 * Done with recovery table.
387 */
388 RECOVERY_SECTION
389 RECOVER_TABLE_END
390
391 /*
392 * ffs(mask)
393 */
394 ENTRY(ffs)
395 bsfl S_ARG0, %eax
396 jz 0f
397 incl %eax
398 ret
399 0: xorl %eax, %eax
400 ret
401
402 /*
403 * cpu_shutdown()
404 * Force reboot
405 */
406
407 null_idtr:
408 .word 0
409 .long 0
410
411 Entry(cpu_shutdown)
412 lidt null_idtr /* disable the interrupt handler */
413 xor %ecx,%ecx /* generate a divide by zero */
414 div %ecx,%eax /* reboot now */
415 ret /* this will "never" be executed */
416
417
418 /*
419 * setbit(int bitno, int *s) - set bit in bit string
420 */
421 ENTRY(setbit)
422 movl S_ARG0, %ecx /* bit number */
423 movl S_ARG1, %eax /* address */
424 btsl %ecx, (%eax) /* set bit */
425 ret
426
427 /*
428 * clrbit(int bitno, int *s) - clear bit in bit string
429 */
430 ENTRY(clrbit)
431 movl S_ARG0, %ecx /* bit number */
432 movl S_ARG1, %eax /* address */
433 btrl %ecx, (%eax) /* clear bit */
434 ret
435
436 /*
437 * ffsbit(int *s) - find first set bit in bit string
438 */
439 ENTRY(ffsbit)
440 movl S_ARG0, %ecx /* address */
441 movl $0, %edx /* base offset */
442 0:
443 bsfl (%ecx), %eax /* check argument bits */
444 jnz 1f /* found bit, return */
445 addl $4, %ecx /* increment address */
446 addl $32, %edx /* increment offset */
447 jmp 0b /* try again */
448 1:
449 addl %edx, %eax /* return offset */
450 ret
451
452 /*
453 * testbit(int nr, volatile void *array)
454 *
455 * Test to see if the bit is set within the bit string
456 */
457
458 ENTRY(testbit)
459 movl S_ARG0,%eax /* Get the bit to test */
460 movl S_ARG1,%ecx /* get the array string */
461 btl %eax,(%ecx)
462 sbbl %eax,%eax
463 ret
464