]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-1699.26.8.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <platforms.h>
59 #include <mach_kdb.h>
60 #include <mach_kgdb.h>
61 #include <mach_kdp.h>
62 #include <stat_time.h>
63 #include <mach_assert.h>
64
65 #include <sys/errno.h>
66 #include <i386/asm.h>
67 #include <i386/cpuid.h>
68 #include <i386/eflags.h>
69 #include <i386/proc_reg.h>
70 #include <i386/trap.h>
71 #include <assym.s>
72
73 #include <config_dtrace.h>
74
75 /*
76 * PTmap is recursive pagemap at top of virtual address space.
77 * Within PTmap, the page directory can be found (third indirection).
78 */
79 .globl _PTmap,_PTD,_PTDpde
80 .set _PTmap,(PTDPTDI << PDESHIFT)
81 .set _PTD,_PTmap + (PTDPTDI * NBPG)
82 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
83
84 #if __MACHO__
85 /* Under Mach-O, etext is a variable which contains
86 * the last text address
87 */
88 #define ETEXT_ADDR (EXT(etext))
89 #else
90 /* Under ELF and other non-Mach-O formats, the address of
91 * etext represents the last text address
92 */
93 #define ETEXT_ADDR $ EXT(etext)
94 #endif
95
96
97 .text
98 locore_start:
99
100 /*
101 * Fault recovery.
102 */
103
104 #ifdef __MACHO__
105 #define RECOVERY_SECTION .section __VECTORS, __recover
106 #else
107 #define RECOVERY_SECTION .text
108 #define RECOVERY_SECTION .text
109 #endif
110
111 #define RECOVER_TABLE_START \
112 .align 2 ; \
113 .globl EXT(recover_table) ;\
114 LEXT(recover_table) ;\
115 .text
116
117 #define RECOVER(addr) \
118 .align 2; \
119 .long 9f ;\
120 .long addr ;\
121 .text ;\
122 9:
123
124 #define RECOVER_TABLE_END \
125 .align 2 ;\
126 .globl EXT(recover_table_end) ;\
127 LEXT(recover_table_end) ;\
128 .long 0 /* workaround see comment below */ ;\
129 .text ;
130
131 /* TODO FIXME
132 * the .long 0 is to work around a linker bug (insert radar# here)
133 * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s
134 * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each
135 * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point
136 * into the SLEEP section
137 */
138
139 /*
140 * Allocate recovery and table.
141 */
142 RECOVERY_SECTION
143 RECOVER_TABLE_START
144
145
146 /*
147 * Called as a function, makes the current thread
148 * return from the kernel as if from an exception.
149 * We will consult with DTrace if this is a
150 * newly created thread and we need to fire a probe.
151 */
152
153 .globl EXT(thread_exception_return)
154 .globl EXT(thread_bootstrap_return)
155 LEXT(thread_bootstrap_return)
156 #if CONFIG_DTRACE
157 call EXT(dtrace_thread_bootstrap)
158 #endif
159
160 LEXT(thread_exception_return)
161 cli
162 xorl %ecx,%ecx /* don't check if in the PFZ */
163 cmpl $0, %gs:CPU_IS64BIT
164 je EXT(return_from_trap32)
165 jmp EXT(return_from_trap)
166
167
168 /*
169 * Utility routines.
170 */
171
172 /*
173 * Copy from user/kernel address space.
174 * arg0: window offset or kernel address
175 * arg1: kernel address
176 * arg2: byte count
177 */
178 Entry(copyinphys_user)
179 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
180 mov %cx,%ds
181
182 Entry(copyinphys_kern)
183 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
184 mov %cx,%es
185 jmp copyin_common
186
187 Entry(copyin_user)
188 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
189 mov %cx,%ds
190
191 Entry(copyin_kern)
192
193 copyin_common:
194 pushl %esi
195 pushl %edi /* save registers */
196
197 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
198 movl 8+S_ARG1,%edi /* get destination - kernel address */
199 movl 8+S_ARG2,%edx /* get count */
200
201 cld /* count up */
202 movl %edx,%ecx /* move by longwords first */
203 shrl $2,%ecx
204 RECOVERY_SECTION
205 RECOVER(copyin_fail)
206 rep
207 movsl /* move longwords */
208 movl %edx,%ecx /* now move remaining bytes */
209 andl $3,%ecx
210 RECOVERY_SECTION
211 RECOVER(copyin_fail)
212 rep
213 movsb
214 xorl %eax,%eax /* return 0 for success */
215 copyin_ret:
216 mov %ss,%cx /* restore kernel data and extended segments */
217 mov %cx,%ds
218 mov %cx,%es
219
220 popl %edi /* restore registers */
221 popl %esi
222 ret /* and return */
223
224 copyin_fail:
225 movl $(EFAULT),%eax /* return error for failure */
226 jmp copyin_ret /* pop frame and return */
227
228
229
230 /*
231 * Copy string from user/kern address space.
232 * arg0: window offset or kernel address
233 * arg1: kernel address
234 * arg2: max byte count
235 * arg3: actual byte count (OUT)
236 */
237 Entry(copyinstr_kern)
238 mov %ds,%cx
239 jmp copyinstr_common
240
241 Entry(copyinstr_user)
242 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
243
244 copyinstr_common:
245 mov %cx,%fs
246
247 pushl %esi
248 pushl %edi /* save registers */
249
250 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
251 movl 8+S_ARG1,%edi /* get destination - kernel address */
252 movl 8+S_ARG2,%edx /* get count */
253
254 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
255 /* are 0 for the cmpl against 0 */
256 2:
257 RECOVERY_SECTION
258 RECOVER(copystr_fail) /* copy bytes... */
259 movb %fs:(%esi),%al
260 incl %esi
261 testl %edi,%edi /* if kernel address is ... */
262 jz 3f /* not NULL */
263 movb %al,(%edi) /* copy the byte */
264 incl %edi
265 3:
266 testl %eax,%eax /* did we just stuff the 0-byte? */
267 jz 4f /* yes, return 0 status already in %eax */
268 decl %edx /* decrement #bytes left in buffer */
269 jnz 2b /* buffer not full so copy in another byte */
270 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
271 4:
272 movl 8+S_ARG3,%edi /* get OUT len ptr */
273 cmpl $0,%edi
274 jz copystr_ret /* if null, just return */
275 subl 8+S_ARG0,%esi
276 movl %esi,(%edi) /* else set OUT arg to xfer len */
277 copystr_ret:
278 popl %edi /* restore registers */
279 popl %esi
280 ret /* and return */
281
282 copystr_fail:
283 movl $(EFAULT),%eax /* return error for failure */
284 jmp copystr_ret /* pop frame and return */
285
286
287 /*
288 * Copy to user/kern address space.
289 * arg0: kernel address
290 * arg1: window offset or kernel address
291 * arg2: byte count
292 */
293 ENTRY(copyoutphys_user)
294 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
295 mov %cx,%es
296
297 ENTRY(copyoutphys_kern)
298 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
299 mov %cx,%ds
300 jmp copyout_common
301
302 ENTRY(copyout_user)
303 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
304 mov %cx,%es
305
306 ENTRY(copyout_kern)
307
308 copyout_common:
309 pushl %esi
310 pushl %edi /* save registers */
311
312 movl 8+S_ARG0,%esi /* get source - kernel address */
313 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
314 movl 8+S_ARG2,%edx /* get count */
315
316 cld /* count up */
317 movl %edx,%ecx /* move by longwords first */
318 shrl $2,%ecx
319 RECOVERY_SECTION
320 RECOVER(copyout_fail)
321 rep
322 movsl
323 movl %edx,%ecx /* now move remaining bytes */
324 andl $3,%ecx
325 RECOVERY_SECTION
326 RECOVER(copyout_fail)
327 rep
328 movsb /* move */
329 xorl %eax,%eax /* return 0 for success */
330 copyout_ret:
331 mov %ss,%cx /* restore kernel segment */
332 mov %cx,%es
333 mov %cx,%ds
334
335 popl %edi /* restore registers */
336 popl %esi
337 ret /* and return */
338
339 copyout_fail:
340 movl $(EFAULT),%eax /* return error for failure */
341 jmp copyout_ret /* pop frame and return */
342
343
344 /*
345 * io register must not be used on slaves (no AT bus)
346 */
347 #define ILL_ON_SLAVE
348
349
350 #if MACH_ASSERT
351
352 #define ARG0 B_ARG0
353 #define ARG1 B_ARG1
354 #define ARG2 B_ARG2
355 #define PUSH_FRAME FRAME
356 #define POP_FRAME EMARF
357
358 #else /* MACH_ASSERT */
359
360 #define ARG0 S_ARG0
361 #define ARG1 S_ARG1
362 #define ARG2 S_ARG2
363 #define PUSH_FRAME
364 #define POP_FRAME
365
366 #endif /* MACH_ASSERT */
367
368
369 /*
370 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
371 */
372 ENTRY(rdmsr_carefully)
373 movl S_ARG0, %ecx
374 RECOVERY_SECTION
375 RECOVER(rdmsr_fail)
376 rdmsr
377 movl S_ARG1, %ecx
378 movl %eax, (%ecx)
379 movl S_ARG2, %ecx
380 movl %edx, (%ecx)
381 movl $0, %eax
382 ret
383
384 rdmsr_fail:
385 movl $1, %eax
386 ret
387
388 /*
389 * Done with recovery table.
390 */
391 RECOVERY_SECTION
392 RECOVER_TABLE_END
393
394 .data
395 dr_msk:
396 .long ~0x000f0003
397 .long ~0x00f0000c
398 .long ~0x0f000030
399 .long ~0xf00000c0
400 ENTRY(dr_addr)
401 .long 0,0,0,0
402 .long 0,0,0,0
403
404 .text
405
406 /*
407 * ffs(mask)
408 */
409 ENTRY(ffs)
410 bsfl S_ARG0, %eax
411 jz 0f
412 incl %eax
413 ret
414 0: xorl %eax, %eax
415 ret
416
417 /*
418 * cpu_shutdown()
419 * Force reboot
420 */
421
422 null_idtr:
423 .word 0
424 .long 0
425
426 Entry(cpu_shutdown)
427 lidt null_idtr /* disable the interrupt handler */
428 xor %ecx,%ecx /* generate a divide by zero */
429 div %ecx,%eax /* reboot now */
430 ret /* this will "never" be executed */
431
432
433 /*
434 * setbit(int bitno, int *s) - set bit in bit string
435 */
436 ENTRY(setbit)
437 movl S_ARG0, %ecx /* bit number */
438 movl S_ARG1, %eax /* address */
439 btsl %ecx, (%eax) /* set bit */
440 ret
441
442 /*
443 * clrbit(int bitno, int *s) - clear bit in bit string
444 */
445 ENTRY(clrbit)
446 movl S_ARG0, %ecx /* bit number */
447 movl S_ARG1, %eax /* address */
448 btrl %ecx, (%eax) /* clear bit */
449 ret
450
451 /*
452 * ffsbit(int *s) - find first set bit in bit string
453 */
454 ENTRY(ffsbit)
455 movl S_ARG0, %ecx /* address */
456 movl $0, %edx /* base offset */
457 0:
458 bsfl (%ecx), %eax /* check argument bits */
459 jnz 1f /* found bit, return */
460 addl $4, %ecx /* increment address */
461 addl $32, %edx /* increment offset */
462 jmp 0b /* try again */
463 1:
464 addl %edx, %eax /* return offset */
465 ret
466
467 /*
468 * testbit(int nr, volatile void *array)
469 *
470 * Test to see if the bit is set within the bit string
471 */
472
473 ENTRY(testbit)
474 movl S_ARG0,%eax /* Get the bit to test */
475 movl S_ARG1,%ecx /* get the array string */
476 btl %eax,(%ecx)
477 sbbl %eax,%eax
478 ret
479