]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <platforms.h>
59 #include <mach_kdb.h>
60 #include <mach_kgdb.h>
61 #include <mach_kdp.h>
62 #include <stat_time.h>
63 #include <mach_assert.h>
64
65 #include <sys/errno.h>
66 #include <i386/asm.h>
67 #include <i386/cpuid.h>
68 #include <i386/eflags.h>
69 #include <i386/lapic.h>
70 #include <i386/rtclock.h>
71 #include <i386/proc_reg.h>
72 #include <i386/trap.h>
73 #include <assym.s>
74 #include <mach/exception_types.h>
75 #include <config_dtrace.h>
76
77 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
78 #include <mach/i386/syscall_sw.h>
79
80 #include <i386/mp.h>
81
82
83 #define CLI cli
84 #define STI sti
85
86 /*
87 * PTmap is recursive pagemap at top of virtual address space.
88 * Within PTmap, the page directory can be found (third indirection).
89 */
90 .globl _PTmap,_PTD,_PTDpde
91 .set _PTmap,(PTDPTDI << PDESHIFT)
92 .set _PTD,_PTmap + (PTDPTDI * NBPG)
93 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
94
95 /*
96 * APTmap, APTD is the alternate recursive pagemap.
97 * It's used when modifying another process's page tables.
98 */
99 .globl _APTmap,_APTD,_APTDpde
100 .set _APTmap,(APTDPTDI << PDESHIFT)
101 .set _APTD,_APTmap + (APTDPTDI * NBPG)
102 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
103
104 #if __MACHO__
105 /* Under Mach-O, etext is a variable which contains
106 * the last text address
107 */
108 #define ETEXT_ADDR (EXT(etext))
109 #else
110 /* Under ELF and other non-Mach-O formats, the address of
111 * etext represents the last text address
112 */
113 #define ETEXT_ADDR $ EXT(etext)
114 #endif
115
116 #define CX(addr,reg) addr(,reg,4)
117
118 /*
119 * The following macros make calls into C code.
120 * They dynamically align the stack to 16 bytes.
121 * Arguments are moved (not pushed) onto the correctly aligned stack.
122 * NOTE: EDI is destroyed in the process, and hence cannot
123 * be directly used as a parameter. Users of this macro must
124 * independently preserve EDI (a non-volatile) if the routine is
125 * intended to be called from C, for instance.
126 */
127
128 #define CCALL(fn) \
129 movl %esp, %edi ;\
130 andl $0xFFFFFFF0, %esp ;\
131 call EXT(fn) ;\
132 movl %edi, %esp
133
134 #define CCALL1(fn, arg1) \
135 movl %esp, %edi ;\
136 subl $4, %esp ;\
137 andl $0xFFFFFFF0, %esp ;\
138 movl arg1, 0(%esp) ;\
139 call EXT(fn) ;\
140 movl %edi, %esp
141
142 #define CCALL2(fn, arg1, arg2) \
143 movl %esp, %edi ;\
144 subl $8, %esp ;\
145 andl $0xFFFFFFF0, %esp ;\
146 movl arg2, 4(%esp) ;\
147 movl arg1, 0(%esp) ;\
148 call EXT(fn) ;\
149 movl %edi, %esp
150
151 /*
152 * CCALL5 is used for callee functions with 3 arguments but
153 * where arg2 (a3:a2) and arg3 (a5:a4) are 64-bit values.
154 */
155 #define CCALL5(fn, a1, a2, a3, a4, a5) \
156 movl %esp, %edi ;\
157 subl $20, %esp ;\
158 andl $0xFFFFFFF0, %esp ;\
159 movl a5, 16(%esp) ;\
160 movl a4, 12(%esp) ;\
161 movl a3, 8(%esp) ;\
162 movl a2, 4(%esp) ;\
163 movl a1, 0(%esp) ;\
164 call EXT(fn) ;\
165 movl %edi, %esp
166
167 .text
168 locore_start:
169
170 /*
171 * Fault recovery.
172 */
173
174 #ifdef __MACHO__
175 #define RECOVERY_SECTION .section __VECTORS, __recover
176 #else
177 #define RECOVERY_SECTION .text
178 #define RECOVERY_SECTION .text
179 #endif
180
181 #define RECOVER_TABLE_START \
182 .align 2 ; \
183 .globl EXT(recover_table) ;\
184 LEXT(recover_table) ;\
185 .text
186
187 #define RECOVER(addr) \
188 .align 2; \
189 .long 9f ;\
190 .long addr ;\
191 .text ;\
192 9:
193
194 #define RECOVER_TABLE_END \
195 .align 2 ;\
196 .globl EXT(recover_table_end) ;\
197 LEXT(recover_table_end) ;\
198 .long 0 /* workaround see comment below */ ;\
199 .text ;
200
201 /* TODO FIXME
202 * the .long 0 is to work around a linker bug (insert radar# here)
203 * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s
204 * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each
205 * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point
206 * into the SLEEP section
207 */
208
209 /*
210 * Allocate recovery and table.
211 */
212 RECOVERY_SECTION
213 RECOVER_TABLE_START
214
215 /*
216 * Timing routines.
217 */
218 Entry(timer_update)
219 movl 4(%esp),%ecx
220 movl 8(%esp),%eax
221 movl 12(%esp),%edx
222 movl %eax,TIMER_HIGHCHK(%ecx)
223 movl %edx,TIMER_LOW(%ecx)
224 movl %eax,TIMER_HIGH(%ecx)
225 ret
226
227 Entry(timer_grab)
228 movl 4(%esp),%ecx
229 0: movl TIMER_HIGH(%ecx),%edx
230 movl TIMER_LOW(%ecx),%eax
231 cmpl TIMER_HIGHCHK(%ecx),%edx
232 jne 0b
233 ret
234
235 #if STAT_TIME
236
237 #define TIME_TRAP_UENTRY
238 #define TIME_TRAP_UEXIT
239 #define TIME_INT_ENTRY
240 #define TIME_INT_EXIT
241
242 #else
243 /*
244 * Nanosecond timing.
245 */
246
247 /*
248 * Nanotime returned in %edx:%eax.
249 * Computed from tsc based on the scale factor
250 * and an implicit 32 bit shift.
251 *
252 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
253 */
254 #define NANOTIME \
255 mov %gs:CPU_NANOTIME,%edi ; \
256 RTC_NANOTIME_READ_FAST()
257
258
259 /*
260 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
261 */
262 #define TIMER_UPDATE(treg,dreg,areg,offset) \
263 addl (TIMER_LOW+(offset))(treg),areg /* add low bits */ ;\
264 adcl dreg,(TIMER_HIGH+(offset))(treg) /* add carry high bits */ ;\
265 movl areg,(TIMER_LOW+(offset))(treg) /* store updated low bit */ ;\
266 movl (TIMER_HIGH+(offset))(treg),dreg /* copy high bits */ ;\
267 movl dreg,(TIMER_HIGHCHK+(offset))(treg) /* to high check */
268
269 /*
270 * Add time delta to old timer and start new.
271 */
272 #define TIMER_EVENT(old,new) \
273 NANOTIME /* edx:eax nanosecs */ ; \
274 movl %eax,%esi /* save timestamp */ ; \
275 movl %edx,%edi /* save timestamp */ ; \
276 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ; \
277 subl (old##_TIMER)+TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
278 sbbl (old##_TIMER)+TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
279 TIMER_UPDATE(%ecx,%edx,%eax,old##_TIMER) /* update timer */ ; \
280 movl %esi,(new##_TIMER)+TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
281 movl %edi,(new##_TIMER)+TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
282 leal (new##_TIMER)(%ecx), %ecx /* compute new timer pointer */ ; \
283 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
284 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
285 movl %esi,%eax /* restore timestamp */ ; \
286 movl %edi,%edx /* restore timestamp */ ; \
287 subl (old##_STATE)+TIMER_TSTAMP(%ebx),%eax /* compute elapsed time */ ; \
288 sbbl (old##_STATE)+TIMER_TSTAMP+4(%ebx),%edx /* compute elapsed time */ ; \
289 TIMER_UPDATE(%ebx,%edx,%eax,old##_STATE) /* update timer */ ; \
290 leal (new##_STATE)(%ebx),%ecx /* compute new state pointer */ ; \
291 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
292 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
293 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
294
295 /*
296 * Update time on user trap entry.
297 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
298 */
299 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
300
301 /*
302 * update time on user trap exit.
303 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
304 */
305 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
306
307 /*
308 * update time on interrupt entry.
309 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
310 * Saves processor state info on stack.
311 */
312 #define TIME_INT_ENTRY \
313 NANOTIME /* edx:eax nanosecs */ ; \
314 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
315 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
316 movl %eax,%esi /* save timestamp */ ; \
317 movl %edx,%edi /* save timestamp */ ; \
318 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
319 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
320 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
321 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
322 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
323 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
324 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
325 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
326 movl %esi,%eax /* restore timestamp */ ; \
327 movl %edi,%edx /* restore timestamp */ ; \
328 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
329 pushl %ecx /* save state */ ; \
330 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
331 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
332 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
333 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
334 cmpl %eax,%ecx /* compare current state */ ; \
335 je 0f /* skip if equal */ ; \
336 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
337 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
338 0: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
339 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
340
341 /*
342 * update time on interrupt exit.
343 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
344 * Restores processor state info from stack.
345 */
346 #define TIME_INT_EXIT \
347 NANOTIME /* edx:eax nanosecs */ ; \
348 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
349 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
350 movl %eax,%esi /* save timestamp */ ; \
351 movl %edx,%edi /* save timestamp */ ; \
352 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
353 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
354 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
355 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
356 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
357 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
358 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
359 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
360 movl %esi,%eax /* restore timestamp */ ; \
361 movl %edi,%edx /* restore timestamp */ ; \
362 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
363 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
364 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
365 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
366 popl %ecx /* restore state */ ; \
367 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
368 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
369 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
370
371 #endif /* STAT_TIME */
372
373 #undef PDEBUG
374
375 #ifdef PDEBUG
376
377 /*
378 * Traditional, not ANSI.
379 */
380 #define CAH(label) \
381 .data ;\
382 .globl label/**/count ;\
383 label/**/count: ;\
384 .long 0 ;\
385 .globl label/**/limit ;\
386 label/**/limit: ;\
387 .long 0 ;\
388 .text ;\
389 addl $1,%ss:label/**/count ;\
390 cmpl $0,label/**/limit ;\
391 jz label/**/exit ;\
392 pushl %eax ;\
393 label/**/loop: ;\
394 movl %ss:label/**/count,%eax ;\
395 cmpl %eax,%ss:label/**/limit ;\
396 je label/**/loop ;\
397 popl %eax ;\
398 label/**/exit:
399
400 #else /* PDEBUG */
401
402 #define CAH(label)
403
404 #endif /* PDEBUG */
405
406 #if MACH_KDB
407 /*
408 * Last-ditch debug code to handle faults that might result
409 * from entering kernel (from collocated server) on an invalid
410 * stack. On collocated entry, there's no hardware-initiated
411 * stack switch, so a valid stack must be in place when an
412 * exception occurs, or we may double-fault.
413 *
414 * In case of a double-fault, our only recourse is to switch
415 * hardware "tasks", so that we avoid using the current stack.
416 *
417 * The idea here is just to get the processor into the debugger,
418 * post-haste. No attempt is made to fix up whatever error got
419 * us here, so presumably continuing from the debugger will
420 * simply land us here again -- at best.
421 */
422 #if 0
423 /*
424 * Note that the per-fault entry points are not currently
425 * functional. The only way to make them work would be to
426 * set up separate TSS's for each fault type, which doesn't
427 * currently seem worthwhile. (The offset part of a task
428 * gate is always ignored.) So all faults that task switch
429 * currently resume at db_task_start.
430 */
431 /*
432 * Double fault (Murphy's point) - error code (0) on stack
433 */
434 Entry(db_task_dbl_fault)
435 popl %eax
436 movl $(T_DOUBLE_FAULT),%ebx
437 jmp db_task_start
438 /*
439 * Segment not present - error code on stack
440 */
441 Entry(db_task_seg_np)
442 popl %eax
443 movl $(T_SEGMENT_NOT_PRESENT),%ebx
444 jmp db_task_start
445 /*
446 * Stack fault - error code on (current) stack
447 */
448 Entry(db_task_stk_fault)
449 popl %eax
450 movl $(T_STACK_FAULT),%ebx
451 jmp db_task_start
452 /*
453 * General protection fault - error code on stack
454 */
455 Entry(db_task_gen_prot)
456 popl %eax
457 movl $(T_GENERAL_PROTECTION),%ebx
458 jmp db_task_start
459 #endif /* 0 */
460 /*
461 * The entry point where execution resumes after last-ditch debugger task
462 * switch.
463 */
464 Entry(db_task_start)
465 movl %esp,%edx
466 subl $(ISS32_SIZE),%edx
467 movl %edx,%esp /* allocate x86_saved_state on stack */
468 movl %eax,R32_ERR(%esp)
469 movl %ebx,R32_TRAPNO(%esp)
470 pushl %edx
471 CPU_NUMBER(%edx)
472 movl CX(EXT(master_dbtss),%edx),%edx
473 movl TSS_LINK(%edx),%eax
474 pushl %eax /* pass along selector of previous TSS */
475 call EXT(db_tss_to_frame)
476 popl %eax /* get rid of TSS selector */
477 call EXT(db_trap_from_asm)
478 addl $0x4,%esp
479 /*
480 * And now...?
481 */
482 iret /* ha, ha, ha... */
483 #endif /* MACH_KDB */
484
485 /*
486 * Called as a function, makes the current thread
487 * return from the kernel as if from an exception.
488 * We will consult with DTrace if this is a
489 * newly created thread and we need to fire a probe.
490 */
491
492 .globl EXT(thread_exception_return)
493 .globl EXT(thread_bootstrap_return)
494 LEXT(thread_bootstrap_return)
495 #if CONFIG_DTRACE
496 call EXT(dtrace_thread_bootstrap)
497 #endif
498
499 LEXT(thread_exception_return)
500 CLI
501 movl %gs:CPU_KERNEL_STACK,%ecx
502
503 movl (%ecx),%esp /* switch back to PCB stack */
504 xorl %ecx,%ecx /* don't check if we're in the PFZ */
505 jmp EXT(return_from_trap)
506
507 Entry(call_continuation)
508 movl S_ARG0,%eax /* get continuation */
509 movl S_ARG1,%edx /* continuation param */
510 movl S_ARG2,%ecx /* wait result */
511 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
512 xorl %ebp,%ebp /* zero frame pointer */
513 subl $8,%esp /* align the stack */
514 pushl %ecx
515 pushl %edx
516 call *%eax /* call continuation */
517 addl $16,%esp
518 movl %gs:CPU_ACTIVE_THREAD,%eax
519 pushl %eax
520 call EXT(thread_terminate)
521
522
523
524 /*******************************************************************************************************
525 *
526 * All 64 bit task 'exceptions' enter lo_alltraps:
527 * esp -> x86_saved_state_t
528 *
529 * The rest of the state is set up as:
530 * cr3 -> kernel directory
531 * esp -> low based stack
532 * gs -> CPU_DATA_GS
533 * cs -> KERNEL32_CS
534 * ss/ds/es -> KERNEL_DS
535 *
536 * interrupts disabled
537 * direction flag cleared
538 */
539 Entry(lo_alltraps)
540 movl R32_CS(%esp),%eax /* assume 32-bit state */
541 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
542 jne 1f
543 movl R64_CS(%esp),%eax /* 64-bit user mode */
544 1:
545 testb $3,%al
546 jz trap_from_kernel
547 /* user mode trap */
548 TIME_TRAP_UENTRY
549
550 movl %gs:CPU_ACTIVE_THREAD,%ecx
551 movl ACT_TASK(%ecx),%ebx
552
553 /* Check for active vtimers in the current task */
554 cmpl $0,TASK_VTIMERS(%ebx)
555 jz 1f
556
557 /* Set a pending AST */
558 orl $(AST_BSD),%gs:CPU_PENDING_AST
559
560 /* Set a thread AST (atomic) */
561 lock
562 orl $(AST_BSD),ACT_AST(%ecx)
563
564 1:
565 movl %gs:CPU_KERNEL_STACK,%ebx
566 xchgl %ebx,%esp /* switch to kernel stack */
567 sti
568
569 CCALL1(user_trap, %ebx) /* call user trap routine */
570 cli /* hold off intrs - critical section */
571 popl %esp /* switch back to PCB stack */
572 xorl %ecx,%ecx /* don't check if we're in the PFZ */
573
574 /*
575 * Return from trap or system call, checking for ASTs.
576 * On lowbase PCB stack with intrs disabled
577 */
578 LEXT(return_from_trap)
579 movl %gs:CPU_PENDING_AST, %eax
580 testl %eax, %eax
581 je EXT(return_to_user) /* branch if no AST */
582
583 LEXT(return_from_trap_with_ast)
584 movl %gs:CPU_KERNEL_STACK, %ebx
585 xchgl %ebx, %esp /* switch to kernel stack */
586
587 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
588 je 2f /* no, go handle the AST */
589 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
590 je 1f
591 /* no... 32-bit user mode */
592 movl R32_EIP(%ebx), %eax
593 pushl %ebx /* save PCB stack */
594 xorl %ebp, %ebp /* clear frame pointer */
595 CCALL1(commpage_is_in_pfz32, %eax)
596 popl %ebx /* retrieve pointer to PCB stack */
597 testl %eax, %eax
598 je 2f /* not in the PFZ... go service AST */
599 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
600 xchgl %ebx, %esp /* switch back to PCB stack */
601 jmp EXT(return_to_user)
602 1: /* 64-bit user mode */
603 movl R64_RIP(%ebx), %ecx
604 movl R64_RIP+4(%ebx), %eax
605 pushl %ebx /* save PCB stack */
606 xorl %ebp, %ebp /* clear frame pointer */
607 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
608 popl %ebx /* retrieve pointer to PCB stack */
609 testl %eax, %eax
610 je 2f /* not in the PFZ... go service AST */
611 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
612 xchgl %ebx, %esp /* switch back to PCB stack */
613 jmp EXT(return_to_user)
614 2:
615 STI /* interrupts always enabled on return to user mode */
616 pushl %ebx /* save PCB stack */
617 xorl %ebp, %ebp /* Clear framepointer */
618 CCALL1(i386_astintr, $0) /* take the AST */
619 CLI
620
621 popl %esp /* switch back to PCB stack (w/exc link) */
622
623 xorl %ecx, %ecx /* don't check if we're in the PFZ */
624 jmp EXT(return_from_trap) /* and check again (rare) */
625
626 LEXT(return_to_user)
627 TIME_TRAP_UEXIT
628
629 LEXT(ret_to_user)
630 cmpl $0, %gs:CPU_IS64BIT
631 je EXT(lo_ret_to_user)
632 jmp EXT(lo64_ret_to_user)
633
634
635
636 /*
637 * Trap from kernel mode. No need to switch stacks.
638 * Interrupts must be off here - we will set them to state at time of trap
639 * as soon as it's safe for us to do so and not recurse doing preemption
640 */
641 trap_from_kernel:
642 movl %esp, %eax /* saved state addr */
643 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
644 pushl %ebp /* Extend framepointer chain */
645 movl %esp, %ebp
646 CCALL1(kernel_trap, %eax) /* Call kernel trap handler */
647 popl %ebp
648 addl $4, %esp
649 cli
650
651 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
652 testl $ AST_URGENT,%eax /* any urgent preemption? */
653 je ret_to_kernel /* no, nothing to do */
654 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
655 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
656 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
657 je ret_to_kernel
658 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
659 jne ret_to_kernel
660 movl %gs:CPU_KERNEL_STACK,%eax
661 movl %esp,%ecx
662 xorl %eax,%ecx
663 and EXT(kernel_stack_mask),%ecx
664 testl %ecx,%ecx /* are we on the kernel stack? */
665 jne ret_to_kernel /* no, skip it */
666
667 CCALL1(i386_astintr, $1) /* take the AST */
668
669 ret_to_kernel:
670 cmpl $0, %gs:CPU_IS64BIT
671 je EXT(lo_ret_to_kernel)
672 jmp EXT(lo64_ret_to_kernel)
673
674
675
676 /*******************************************************************************************************
677 *
678 * All interrupts on all tasks enter here with:
679 * esp-> -> x86_saved_state_t
680 *
681 * cr3 -> kernel directory
682 * esp -> low based stack
683 * gs -> CPU_DATA_GS
684 * cs -> KERNEL32_CS
685 * ss/ds/es -> KERNEL_DS
686 *
687 * interrupts disabled
688 * direction flag cleared
689 */
690 Entry(lo_allintrs)
691 /*
692 * test whether already on interrupt stack
693 */
694 movl %gs:CPU_INT_STACK_TOP,%ecx
695 cmpl %esp,%ecx
696 jb 1f
697 leal -INTSTACK_SIZE(%ecx),%edx
698 cmpl %esp,%edx
699 jb int_from_intstack
700 1:
701 xchgl %ecx,%esp /* switch to interrupt stack */
702
703 movl %cr0,%eax /* get cr0 */
704 orl $(CR0_TS),%eax /* or in TS bit */
705 movl %eax,%cr0 /* set cr0 */
706
707 subl $8, %esp /* for 16-byte stack alignment */
708 pushl %ecx /* save pointer to old stack */
709 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
710
711 TIME_INT_ENTRY /* do timing */
712
713 movl %gs:CPU_ACTIVE_THREAD,%ecx
714 movl ACT_TASK(%ecx),%ebx
715
716 /* Check for active vtimers in the current task */
717 cmpl $0,TASK_VTIMERS(%ebx)
718 jz 1f
719
720 /* Set a pending AST */
721 orl $(AST_BSD),%gs:CPU_PENDING_AST
722
723 /* Set a thread AST (atomic) */
724 lock
725 orl $(AST_BSD),ACT_AST(%ecx)
726
727 1:
728 incl %gs:CPU_PREEMPTION_LEVEL
729 incl %gs:CPU_INTERRUPT_LEVEL
730
731 movl %gs:CPU_INT_STATE, %eax
732 CCALL1(interrupt, %eax) /* call generic interrupt routine */
733
734 cli /* just in case we returned with intrs enabled */
735 xorl %eax,%eax
736 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
737
738 decl %gs:CPU_INTERRUPT_LEVEL
739 decl %gs:CPU_PREEMPTION_LEVEL
740
741 TIME_INT_EXIT /* do timing */
742
743 movl %gs:CPU_ACTIVE_THREAD,%eax
744 movl ACT_PCB(%eax),%eax /* get act`s PCB */
745 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
746 cmpl $0,%eax /* Is there a context */
747 je 1f /* Branch if not */
748 movl FP_VALID(%eax),%eax /* Load fp_valid */
749 cmpl $0,%eax /* Check if valid */
750 jne 1f /* Branch if valid */
751 clts /* Clear TS */
752 jmp 2f
753 1:
754 movl %cr0,%eax /* get cr0 */
755 orl $(CR0_TS),%eax /* or in TS bit */
756 movl %eax,%cr0 /* set cr0 */
757 2:
758 popl %esp /* switch back to old stack */
759
760 /* Load interrupted code segment into %eax */
761 movl R32_CS(%esp),%eax /* assume 32-bit state */
762 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
763 jne 3f
764 movl R64_CS(%esp),%eax /* 64-bit user mode */
765 3:
766 testb $3,%al /* user mode, */
767 jnz ast_from_interrupt_user /* go handle potential ASTs */
768 /*
769 * we only want to handle preemption requests if
770 * the interrupt fell in the kernel context
771 * and preemption isn't disabled
772 */
773 movl %gs:CPU_PENDING_AST,%eax
774 testl $ AST_URGENT,%eax /* any urgent requests? */
775 je ret_to_kernel /* no, nothing to do */
776
777 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
778 jne ret_to_kernel /* yes, skip it */
779
780 movl %gs:CPU_KERNEL_STACK,%eax
781 movl %esp,%ecx
782 xorl %eax,%ecx
783 and EXT(kernel_stack_mask),%ecx
784 testl %ecx,%ecx /* are we on the kernel stack? */
785 jne ret_to_kernel /* no, skip it */
786
787 /*
788 * Take an AST from kernel space. We don't need (and don't want)
789 * to do as much as the case where the interrupt came from user
790 * space.
791 */
792 CCALL1(i386_astintr, $1)
793
794 jmp ret_to_kernel
795
796
797 /*
798 * nested int - simple path, can't preempt etc on way out
799 */
800 int_from_intstack:
801 incl %gs:CPU_PREEMPTION_LEVEL
802 incl %gs:CPU_INTERRUPT_LEVEL
803
804 movl %esp, %edx /* x86_saved_state */
805 CCALL1(interrupt, %edx)
806
807 decl %gs:CPU_INTERRUPT_LEVEL
808 decl %gs:CPU_PREEMPTION_LEVEL
809
810 jmp ret_to_kernel
811
812 /*
813 * Take an AST from an interrupted user
814 */
815 ast_from_interrupt_user:
816 movl %gs:CPU_PENDING_AST,%eax
817 testl %eax,%eax /* pending ASTs? */
818 je EXT(ret_to_user) /* no, nothing to do */
819
820 TIME_TRAP_UENTRY
821
822 movl $1, %ecx /* check if we're in the PFZ */
823 jmp EXT(return_from_trap_with_ast) /* return */
824
825
826 /*******************************************************************************************************
827 *
828 * 32bit Tasks
829 * System call entries via INTR_GATE or sysenter:
830 *
831 * esp -> x86_saved_state32_t
832 * cr3 -> kernel directory
833 * esp -> low based stack
834 * gs -> CPU_DATA_GS
835 * cs -> KERNEL32_CS
836 * ss/ds/es -> KERNEL_DS
837 *
838 * interrupts disabled
839 * direction flag cleared
840 */
841
842 Entry(lo_sysenter)
843 /*
844 * We can be here either for a mach syscall or a unix syscall,
845 * as indicated by the sign of the code:
846 */
847 movl R32_EAX(%esp),%eax
848 testl %eax,%eax
849 js EXT(lo_mach_scall) /* < 0 => mach */
850 /* > 0 => unix */
851
852 Entry(lo_unix_scall)
853 TIME_TRAP_UENTRY
854
855 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
856 movl ACT_TASK(%ecx),%ebx /* point to current task */
857 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
858
859 /* Check for active vtimers in the current task */
860 cmpl $0,TASK_VTIMERS(%ebx)
861 jz 1f
862
863 /* Set a pending AST */
864 orl $(AST_BSD),%gs:CPU_PENDING_AST
865
866 /* Set a thread AST (atomic) */
867 lock
868 orl $(AST_BSD),ACT_AST(%ecx)
869
870 1:
871 movl %gs:CPU_KERNEL_STACK,%ebx
872 xchgl %ebx,%esp /* switch to kernel stack */
873
874 sti
875
876 CCALL1(unix_syscall, %ebx)
877 /*
878 * always returns through thread_exception_return
879 */
880
881
882 Entry(lo_mach_scall)
883 TIME_TRAP_UENTRY
884
885 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
886 movl ACT_TASK(%ecx),%ebx /* point to current task */
887 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
888
889 /* Check for active vtimers in the current task */
890 cmpl $0,TASK_VTIMERS(%ebx)
891 jz 1f
892
893 /* Set a pending AST */
894 orl $(AST_BSD),%gs:CPU_PENDING_AST
895
896 /* Set a thread AST (atomic) */
897 lock
898 orl $(AST_BSD),ACT_AST(%ecx)
899
900 1:
901 movl %gs:CPU_KERNEL_STACK,%ebx
902 xchgl %ebx,%esp /* switch to kernel stack */
903
904 sti
905
906 CCALL1(mach_call_munger, %ebx)
907 /*
908 * always returns through thread_exception_return
909 */
910
911
912 Entry(lo_mdep_scall)
913 TIME_TRAP_UENTRY
914
915 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
916 movl ACT_TASK(%ecx),%ebx /* point to current task */
917
918 /* Check for active vtimers in the current task */
919 cmpl $0,TASK_VTIMERS(%ebx)
920 jz 1f
921
922 /* Set a pending AST */
923 orl $(AST_BSD),%gs:CPU_PENDING_AST
924
925 /* Set a thread AST (atomic) */
926 lock
927 orl $(AST_BSD),ACT_AST(%ecx)
928
929 1:
930 movl %gs:CPU_KERNEL_STACK,%ebx
931 xchgl %ebx,%esp /* switch to kernel stack */
932
933 sti
934
935 CCALL1(machdep_syscall, %ebx)
936 /*
937 * always returns through thread_exception_return
938 */
939
940
941 Entry(lo_diag_scall)
942 TIME_TRAP_UENTRY
943
944 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
945 movl ACT_TASK(%ecx),%ebx /* point to current task */
946
947 /* Check for active vtimers in the current task */
948 cmpl $0,TASK_VTIMERS(%ebx)
949 jz 1f
950
951 /* Set a pending AST */
952 orl $(AST_BSD),%gs:CPU_PENDING_AST
953
954 /* Set a thread AST (atomic) */
955 lock
956 orl $(AST_BSD),ACT_AST(%ecx)
957
958 1:
959 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
960 xchgl %ebx,%esp // Switch to it, saving the previous
961
962 CCALL1(diagCall, %ebx) // Call diagnostics
963
964 cmpl $0,%eax // What kind of return is this?
965 je 2f
966 cli // Disable interruptions just in case they were enabled
967 popl %esp // Get back the original stack
968 jmp EXT(return_to_user) // Normal return, do not check asts...
969 2:
970 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
971 // pass what would be the diag syscall
972 // error return - cause an exception
973 /* no return */
974
975
976
977 /*******************************************************************************************************
978 *
979 * 64bit Tasks
980 * System call entries via syscall only:
981 *
982 * esp -> x86_saved_state64_t
983 * cr3 -> kernel directory
984 * esp -> low based stack
985 * gs -> CPU_DATA_GS
986 * cs -> KERNEL32_CS
987 * ss/ds/es -> KERNEL_DS
988 *
989 * interrupts disabled
990 * direction flag cleared
991 */
992
993 Entry(lo_syscall)
994 TIME_TRAP_UENTRY
995
996 /*
997 * We can be here either for a mach, unix machdep or diag syscall,
998 * as indicated by the syscall class:
999 */
1000 movl R64_RAX(%esp), %eax /* syscall number/class */
1001 movl %eax, %ebx
1002 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
1003 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
1004 je EXT(lo64_mach_scall)
1005 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
1006 je EXT(lo64_unix_scall)
1007 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
1008 je EXT(lo64_mdep_scall)
1009 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
1010 je EXT(lo64_diag_scall)
1011
1012 movl %gs:CPU_KERNEL_STACK,%ebx
1013 xchgl %ebx,%esp /* switch to kernel stack */
1014
1015 sti
1016
1017 /* Syscall class unknown */
1018 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
1019 /* no return */
1020
1021
1022 Entry(lo64_unix_scall)
1023 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1024 movl ACT_TASK(%ecx),%ebx /* point to current task */
1025 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
1026
1027 /* Check for active vtimers in the current task */
1028 cmpl $0,TASK_VTIMERS(%ebx)
1029 jz 1f
1030
1031 /* Set a pending AST */
1032 orl $(AST_BSD),%gs:CPU_PENDING_AST
1033
1034 /* Set a thread AST (atomic) */
1035 lock
1036 orl $(AST_BSD),ACT_AST(%ecx)
1037
1038 1:
1039 movl %gs:CPU_KERNEL_STACK,%ebx
1040 xchgl %ebx,%esp /* switch to kernel stack */
1041
1042 sti
1043
1044 CCALL1(unix_syscall64, %ebx)
1045 /*
1046 * always returns through thread_exception_return
1047 */
1048
1049
1050 Entry(lo64_mach_scall)
1051 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1052 movl ACT_TASK(%ecx),%ebx /* point to current task */
1053 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
1054
1055 /* Check for active vtimers in the current task */
1056 cmpl $0,TASK_VTIMERS(%ebx)
1057 jz 1f
1058
1059 /* Set a pending AST */
1060 orl $(AST_BSD),%gs:CPU_PENDING_AST
1061
1062 lock
1063 orl $(AST_BSD),ACT_AST(%ecx)
1064
1065 1:
1066 movl %gs:CPU_KERNEL_STACK,%ebx
1067 xchgl %ebx,%esp /* switch to kernel stack */
1068
1069 sti
1070
1071 CCALL1(mach_call_munger64, %ebx)
1072 /*
1073 * always returns through thread_exception_return
1074 */
1075
1076
1077
1078 Entry(lo64_mdep_scall)
1079 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1080 movl ACT_TASK(%ecx),%ebx /* point to current task */
1081
1082 /* Check for active vtimers in the current task */
1083 cmpl $0,TASK_VTIMERS(%ebx)
1084 jz 1f
1085
1086 /* Set a pending AST */
1087 orl $(AST_BSD),%gs:CPU_PENDING_AST
1088
1089 /* Set a thread AST (atomic) */
1090 lock
1091 orl $(AST_BSD),ACT_AST(%ecx)
1092
1093 1:
1094 movl %gs:CPU_KERNEL_STACK,%ebx
1095 xchgl %ebx,%esp /* switch to kernel stack */
1096
1097 sti
1098
1099 CCALL1(machdep_syscall64, %ebx)
1100 /*
1101 * always returns through thread_exception_return
1102 */
1103
1104
1105 Entry(lo64_diag_scall)
1106 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1107 movl ACT_TASK(%ecx),%ebx /* point to current task */
1108
1109 /* Check for active vtimers in the current task */
1110 cmpl $0,TASK_VTIMERS(%ebx)
1111 jz 1f
1112
1113 /* Set a pending AST */
1114 orl $(AST_BSD),%gs:CPU_PENDING_AST
1115
1116 /* Set a thread AST (atomic) */
1117 lock
1118 orl $(AST_BSD),ACT_AST(%ecx)
1119
1120 1:
1121 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
1122 xchgl %ebx,%esp // Switch to it, saving the previous
1123
1124 CCALL1(diagCall64, %ebx) // Call diagnostics
1125
1126 cmpl $0,%eax // What kind of return is this?
1127 je 2f
1128 cli // Disable interruptions just in case they were enabled
1129 popl %esp // Get back the original stack
1130 jmp EXT(return_to_user) // Normal return, do not check asts...
1131 2:
1132 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1133 // pass what would be the diag syscall
1134 // error return - cause an exception
1135 /* no return */
1136
1137 /*\f*/
1138 /*
1139 * Utility routines.
1140 */
1141
1142
1143 /*
1144 * Copy from user/kernel address space.
1145 * arg0: window offset or kernel address
1146 * arg1: kernel address
1147 * arg2: byte count
1148 */
1149 Entry(copyinphys_user)
1150 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1151 mov %cx,%ds
1152
1153 Entry(copyinphys_kern)
1154 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1155 mov %cx,%es
1156 jmp copyin_common
1157
1158 Entry(copyin_user)
1159 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1160 mov %cx,%ds
1161
1162 Entry(copyin_kern)
1163
1164 copyin_common:
1165 pushl %esi
1166 pushl %edi /* save registers */
1167
1168 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1169 movl 8+S_ARG1,%edi /* get destination - kernel address */
1170 movl 8+S_ARG2,%edx /* get count */
1171
1172 cld /* count up */
1173 movl %edx,%ecx /* move by longwords first */
1174 shrl $2,%ecx
1175 RECOVERY_SECTION
1176 RECOVER(copyin_fail)
1177 rep
1178 movsl /* move longwords */
1179 movl %edx,%ecx /* now move remaining bytes */
1180 andl $3,%ecx
1181 RECOVERY_SECTION
1182 RECOVER(copyin_fail)
1183 rep
1184 movsb
1185 xorl %eax,%eax /* return 0 for success */
1186 copyin_ret:
1187 mov %ss,%cx /* restore kernel data and extended segments */
1188 mov %cx,%ds
1189 mov %cx,%es
1190
1191 popl %edi /* restore registers */
1192 popl %esi
1193 ret /* and return */
1194
1195 copyin_fail:
1196 movl $(EFAULT),%eax /* return error for failure */
1197 jmp copyin_ret /* pop frame and return */
1198
1199
1200
1201 /*
1202 * Copy string from user/kern address space.
1203 * arg0: window offset or kernel address
1204 * arg1: kernel address
1205 * arg2: max byte count
1206 * arg3: actual byte count (OUT)
1207 */
1208 Entry(copyinstr_kern)
1209 mov %ds,%cx
1210 jmp copyinstr_common
1211
1212 Entry(copyinstr_user)
1213 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1214
1215 copyinstr_common:
1216 mov %cx,%fs
1217
1218 pushl %esi
1219 pushl %edi /* save registers */
1220
1221 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1222 movl 8+S_ARG1,%edi /* get destination - kernel address */
1223 movl 8+S_ARG2,%edx /* get count */
1224
1225 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
1226 /* are 0 for the cmpl against 0 */
1227 2:
1228 RECOVERY_SECTION
1229 RECOVER(copystr_fail) /* copy bytes... */
1230 movb %fs:(%esi),%al
1231 incl %esi
1232 testl %edi,%edi /* if kernel address is ... */
1233 jz 3f /* not NULL */
1234 movb %al,(%edi) /* copy the byte */
1235 incl %edi
1236 3:
1237 testl %eax,%eax /* did we just stuff the 0-byte? */
1238 jz 4f /* yes, return 0 status already in %eax */
1239 decl %edx /* decrement #bytes left in buffer */
1240 jnz 2b /* buffer not full so copy in another byte */
1241 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1242 4:
1243 movl 8+S_ARG3,%edi /* get OUT len ptr */
1244 cmpl $0,%edi
1245 jz copystr_ret /* if null, just return */
1246 subl 8+S_ARG0,%esi
1247 movl %esi,(%edi) /* else set OUT arg to xfer len */
1248 copystr_ret:
1249 popl %edi /* restore registers */
1250 popl %esi
1251 ret /* and return */
1252
1253 copystr_fail:
1254 movl $(EFAULT),%eax /* return error for failure */
1255 jmp copystr_ret /* pop frame and return */
1256
1257
1258 /*
1259 * Copy to user/kern address space.
1260 * arg0: kernel address
1261 * arg1: window offset or kernel address
1262 * arg2: byte count
1263 */
1264 ENTRY(copyoutphys_user)
1265 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1266 mov %cx,%es
1267
1268 ENTRY(copyoutphys_kern)
1269 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1270 mov %cx,%ds
1271 jmp copyout_common
1272
1273 ENTRY(copyout_user)
1274 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1275 mov %cx,%es
1276
1277 ENTRY(copyout_kern)
1278
1279 copyout_common:
1280 pushl %esi
1281 pushl %edi /* save registers */
1282
1283 movl 8+S_ARG0,%esi /* get source - kernel address */
1284 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1285 movl 8+S_ARG2,%edx /* get count */
1286
1287 cld /* count up */
1288 movl %edx,%ecx /* move by longwords first */
1289 shrl $2,%ecx
1290 RECOVERY_SECTION
1291 RECOVER(copyout_fail)
1292 rep
1293 movsl
1294 movl %edx,%ecx /* now move remaining bytes */
1295 andl $3,%ecx
1296 RECOVERY_SECTION
1297 RECOVER(copyout_fail)
1298 rep
1299 movsb /* move */
1300 xorl %eax,%eax /* return 0 for success */
1301 copyout_ret:
1302 mov %ss,%cx /* restore kernel segment */
1303 mov %cx,%es
1304 mov %cx,%ds
1305
1306 popl %edi /* restore registers */
1307 popl %esi
1308 ret /* and return */
1309
1310 copyout_fail:
1311 movl $(EFAULT),%eax /* return error for failure */
1312 jmp copyout_ret /* pop frame and return */
1313
1314 /*
1315 * io register must not be used on slaves (no AT bus)
1316 */
1317 #define ILL_ON_SLAVE
1318
1319
1320 #if MACH_ASSERT
1321
1322 #define ARG0 B_ARG0
1323 #define ARG1 B_ARG1
1324 #define ARG2 B_ARG2
1325 #define PUSH_FRAME FRAME
1326 #define POP_FRAME EMARF
1327
1328 #else /* MACH_ASSERT */
1329
1330 #define ARG0 S_ARG0
1331 #define ARG1 S_ARG1
1332 #define ARG2 S_ARG2
1333 #define PUSH_FRAME
1334 #define POP_FRAME
1335
1336 #endif /* MACH_ASSERT */
1337
1338 /*
1339 * void loutb(unsigned byte *io_port,
1340 * unsigned byte *data,
1341 * unsigned int count)
1342 *
1343 * Output an array of bytes to an IO port.
1344 */
1345 ENTRY(loutb)
1346 ENTRY(outsb)
1347 PUSH_FRAME
1348 ILL_ON_SLAVE
1349 movl %esi,%eax /* save register */
1350 movl ARG0,%edx /* get io port number */
1351 movl ARG1,%esi /* get data address */
1352 movl ARG2,%ecx /* get count */
1353 cld /* count up */
1354 rep
1355 outsb /* output */
1356 movl %eax,%esi /* restore register */
1357 POP_FRAME
1358 ret
1359
1360
1361 /*
1362 * void loutw(unsigned short *io_port,
1363 * unsigned short *data,
1364 * unsigned int count)
1365 *
1366 * Output an array of shorts to an IO port.
1367 */
1368 ENTRY(loutw)
1369 ENTRY(outsw)
1370 PUSH_FRAME
1371 ILL_ON_SLAVE
1372 movl %esi,%eax /* save register */
1373 movl ARG0,%edx /* get io port number */
1374 movl ARG1,%esi /* get data address */
1375 movl ARG2,%ecx /* get count */
1376 cld /* count up */
1377 rep
1378 outsw /* output */
1379 movl %eax,%esi /* restore register */
1380 POP_FRAME
1381 ret
1382
1383 /*
1384 * void loutw(unsigned short io_port,
1385 * unsigned int *data,
1386 * unsigned int count)
1387 *
1388 * Output an array of longs to an IO port.
1389 */
1390 ENTRY(loutl)
1391 ENTRY(outsl)
1392 PUSH_FRAME
1393 ILL_ON_SLAVE
1394 movl %esi,%eax /* save register */
1395 movl ARG0,%edx /* get io port number */
1396 movl ARG1,%esi /* get data address */
1397 movl ARG2,%ecx /* get count */
1398 cld /* count up */
1399 rep
1400 outsl /* output */
1401 movl %eax,%esi /* restore register */
1402 POP_FRAME
1403 ret
1404
1405
1406 /*
1407 * void linb(unsigned char *io_port,
1408 * unsigned char *data,
1409 * unsigned int count)
1410 *
1411 * Input an array of bytes from an IO port.
1412 */
1413 ENTRY(linb)
1414 ENTRY(insb)
1415 PUSH_FRAME
1416 ILL_ON_SLAVE
1417 movl %edi,%eax /* save register */
1418 movl ARG0,%edx /* get io port number */
1419 movl ARG1,%edi /* get data address */
1420 movl ARG2,%ecx /* get count */
1421 cld /* count up */
1422 rep
1423 insb /* input */
1424 movl %eax,%edi /* restore register */
1425 POP_FRAME
1426 ret
1427
1428
1429 /*
1430 * void linw(unsigned short *io_port,
1431 * unsigned short *data,
1432 * unsigned int count)
1433 *
1434 * Input an array of shorts from an IO port.
1435 */
1436 ENTRY(linw)
1437 ENTRY(insw)
1438 PUSH_FRAME
1439 ILL_ON_SLAVE
1440 movl %edi,%eax /* save register */
1441 movl ARG0,%edx /* get io port number */
1442 movl ARG1,%edi /* get data address */
1443 movl ARG2,%ecx /* get count */
1444 cld /* count up */
1445 rep
1446 insw /* input */
1447 movl %eax,%edi /* restore register */
1448 POP_FRAME
1449 ret
1450
1451
1452 /*
1453 * void linl(unsigned short io_port,
1454 * unsigned int *data,
1455 * unsigned int count)
1456 *
1457 * Input an array of longs from an IO port.
1458 */
1459 ENTRY(linl)
1460 ENTRY(insl)
1461 PUSH_FRAME
1462 ILL_ON_SLAVE
1463 movl %edi,%eax /* save register */
1464 movl ARG0,%edx /* get io port number */
1465 movl ARG1,%edi /* get data address */
1466 movl ARG2,%ecx /* get count */
1467 cld /* count up */
1468 rep
1469 insl /* input */
1470 movl %eax,%edi /* restore register */
1471 POP_FRAME
1472 ret
1473
1474 /*
1475 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1476 */
1477 ENTRY(rdmsr_carefully)
1478 movl S_ARG0, %ecx
1479 RECOVERY_SECTION
1480 RECOVER(rdmsr_fail)
1481 rdmsr
1482 movl S_ARG1, %ecx
1483 movl %eax, (%ecx)
1484 movl S_ARG2, %ecx
1485 movl %edx, (%ecx)
1486 movl $0, %eax
1487 ret
1488
1489 rdmsr_fail:
1490 movl $1, %eax
1491 ret
1492
1493 /*
1494 * Done with recovery table.
1495 */
1496 RECOVERY_SECTION
1497 RECOVER_TABLE_END
1498
1499 .data
1500 dr_msk:
1501 .long ~0x000f0003
1502 .long ~0x00f0000c
1503 .long ~0x0f000030
1504 .long ~0xf00000c0
1505 ENTRY(dr_addr)
1506 .long 0,0,0,0
1507 .long 0,0,0,0
1508
1509 .text
1510
1511 #ifndef SYMMETRY
1512
1513 /*
1514 * ffs(mask)
1515 */
1516 ENTRY(ffs)
1517 bsfl S_ARG0, %eax
1518 jz 0f
1519 incl %eax
1520 ret
1521 0: xorl %eax, %eax
1522 ret
1523
1524 /*
1525 * cpu_shutdown()
1526 * Force reboot
1527 */
1528
1529 null_idtr:
1530 .word 0
1531 .long 0
1532
1533 Entry(cpu_shutdown)
1534 lidt null_idtr /* disable the interrupt handler */
1535 xor %ecx,%ecx /* generate a divide by zero */
1536 div %ecx,%eax /* reboot now */
1537 ret /* this will "never" be executed */
1538
1539 #endif /* SYMMETRY */
1540
1541
1542 /*
1543 * setbit(int bitno, int *s) - set bit in bit string
1544 */
1545 ENTRY(setbit)
1546 movl S_ARG0, %ecx /* bit number */
1547 movl S_ARG1, %eax /* address */
1548 btsl %ecx, (%eax) /* set bit */
1549 ret
1550
1551 /*
1552 * clrbit(int bitno, int *s) - clear bit in bit string
1553 */
1554 ENTRY(clrbit)
1555 movl S_ARG0, %ecx /* bit number */
1556 movl S_ARG1, %eax /* address */
1557 btrl %ecx, (%eax) /* clear bit */
1558 ret
1559
1560 /*
1561 * ffsbit(int *s) - find first set bit in bit string
1562 */
1563 ENTRY(ffsbit)
1564 movl S_ARG0, %ecx /* address */
1565 movl $0, %edx /* base offset */
1566 0:
1567 bsfl (%ecx), %eax /* check argument bits */
1568 jnz 1f /* found bit, return */
1569 addl $4, %ecx /* increment address */
1570 addl $32, %edx /* increment offset */
1571 jmp 0b /* try again */
1572 1:
1573 addl %edx, %eax /* return offset */
1574 ret
1575
1576 /*
1577 * testbit(int nr, volatile void *array)
1578 *
1579 * Test to see if the bit is set within the bit string
1580 */
1581
1582 ENTRY(testbit)
1583 movl S_ARG0,%eax /* Get the bit to test */
1584 movl S_ARG1,%ecx /* get the array string */
1585 btl %eax,(%ecx)
1586 sbbl %eax,%eax
1587 ret
1588
1589 ENTRY(get_pc)
1590 movl 4(%ebp),%eax
1591 ret
1592
1593 ENTRY(minsecurity)
1594 pushl %ebp
1595 movl %esp,%ebp
1596 /*
1597 * jail: set the EIP to "jail" to block a kernel thread.
1598 * Useful to debug synchronization problems on MPs.
1599 */
1600 ENTRY(jail)
1601 jmp EXT(jail)
1602
1603 /*
1604 * unsigned int
1605 * div_scale(unsigned int dividend,
1606 * unsigned int divisor,
1607 * unsigned int *scale)
1608 *
1609 * This function returns (dividend << *scale) //divisor where *scale
1610 * is the largest possible value before overflow. This is used in
1611 * computation where precision must be achieved in order to avoid
1612 * floating point usage.
1613 *
1614 * Algorithm:
1615 * *scale = 0;
1616 * while (((dividend >> *scale) >= divisor))
1617 * (*scale)++;
1618 * *scale = 32 - *scale;
1619 * return ((dividend << *scale) / divisor);
1620 */
1621 ENTRY(div_scale)
1622 PUSH_FRAME
1623 xorl %ecx, %ecx /* *scale = 0 */
1624 xorl %eax, %eax
1625 movl ARG0, %edx /* get dividend */
1626 0:
1627 cmpl ARG1, %edx /* if (divisor > dividend) */
1628 jle 1f /* goto 1f */
1629 addl $1, %ecx /* (*scale)++ */
1630 shrdl $1, %edx, %eax /* dividend >> 1 */
1631 shrl $1, %edx /* dividend >> 1 */
1632 jmp 0b /* goto 0b */
1633 1:
1634 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1635 movl ARG2, %edx /* get scale */
1636 movl $32, (%edx) /* *scale = 32 */
1637 subl %ecx, (%edx) /* *scale -= %ecx */
1638 POP_FRAME
1639 ret
1640
1641 /*
1642 * unsigned int
1643 * mul_scale(unsigned int multiplicand,
1644 * unsigned int multiplier,
1645 * unsigned int *scale)
1646 *
1647 * This function returns ((multiplicand * multiplier) >> *scale) where
1648 * scale is the largest possible value before overflow. This is used in
1649 * computation where precision must be achieved in order to avoid
1650 * floating point usage.
1651 *
1652 * Algorithm:
1653 * *scale = 0;
1654 * while (overflow((multiplicand * multiplier) >> *scale))
1655 * (*scale)++;
1656 * return ((multiplicand * multiplier) >> *scale);
1657 */
1658 ENTRY(mul_scale)
1659 PUSH_FRAME
1660 xorl %ecx, %ecx /* *scale = 0 */
1661 movl ARG0, %eax /* get multiplicand */
1662 mull ARG1 /* multiplicand * multiplier */
1663 0:
1664 cmpl $0, %edx /* if (!overflow()) */
1665 je 1f /* goto 1 */
1666 addl $1, %ecx /* (*scale)++ */
1667 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1668 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1669 jmp 0b
1670 1:
1671 movl ARG2, %edx /* get scale */
1672 movl %ecx, (%edx) /* set *scale */
1673 POP_FRAME
1674 ret
1675
1676
1677
1678 /*
1679 * Double-fault exception handler task. The last gasp...
1680 */
1681 Entry(df_task_start)
1682 CCALL1(panic_double_fault32, $(T_DOUBLE_FAULT))
1683 hlt
1684
1685
1686 /*
1687 * machine-check handler task. The last gasp...
1688 */
1689 Entry(mc_task_start)
1690 CCALL1(panic_machine_check32, $(T_MACHINE_CHECK))
1691 hlt
1692
1693 /*
1694 * Compatibility mode's last gasp...
1695 */
1696 Entry(lo_df64)
1697 movl %esp, %eax
1698 CCALL1(panic_double_fault64, %eax)
1699 hlt
1700
1701 Entry(lo_mc64)
1702 movl %esp, %eax
1703 CCALL1(panic_machine_check64, %eax)
1704 hlt
1705