2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
60 #include <platforms.h>
62 #include <mach_kgdb.h>
64 #include <stat_time.h>
65 #include <mach_assert.h>
67 #include <sys/errno.h>
69 #include <i386/cpuid.h>
70 #include <i386/eflags.h>
71 #include <i386/proc_reg.h>
72 #include <i386/trap.h>
74 #include <mach/exception_types.h>
76 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
77 #include <mach/i386/syscall_sw.h>
82 * PTmap is recursive pagemap at top of virtual address space.
83 * Within PTmap, the page directory can be found (third indirection).
85 .globl _PTmap,_PTD,_PTDpde
86 .set _PTmap,(PTDPTDI << PDESHIFT)
87 .set _PTD,_PTmap + (PTDPTDI * NBPG)
88 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
91 * APTmap, APTD is the alternate recursive pagemap.
92 * It's used when modifying another process's page tables.
94 .globl _APTmap,_APTD,_APTDpde
95 .set _APTmap,(APTDPTDI << PDESHIFT)
96 .set _APTD,_APTmap + (APTDPTDI * NBPG)
97 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
100 /* Under Mach-O, etext is a variable which contains
101 * the last text address
103 #define ETEXT_ADDR (EXT(etext))
105 /* Under ELF and other non-Mach-O formats, the address of
106 * etext represents the last text address
108 #define ETEXT_ADDR $ EXT(etext)
111 #define CX(addr,reg) addr(,reg,4)
114 * The following macros make calls into C code.
115 * They dynamically align the stack to 16 bytes.
116 * Arguments are moved (not pushed) onto the correctly aligned stack.
117 * NOTE: EDI is destroyed in the process, and hence cannot
118 * be directly used as a parameter. Users of this macro must
119 * independently preserve EDI (a non-volatile) if the routine is
120 * intended to be called from C, for instance.
125 andl $0xFFFFFFF0, %esp ;\
129 #define CCALL1(fn, arg1) \
132 andl $0xFFFFFFF0, %esp ;\
133 movl arg1, 0(%esp) ;\
137 #define CCALL2(fn, arg1, arg2) \
140 andl $0xFFFFFFF0, %esp ;\
141 movl arg2, 4(%esp) ;\
142 movl arg1, 0(%esp) ;\
146 #define CCALL3(fn, arg1, arg2, arg3) \
149 andl $0xFFFFFFF0, %esp ;\
150 movl arg3, 8(%esp) ;\
151 movl arg2, 4(%esp) ;\
152 movl arg1, 0(%esp) ;\
164 #define RECOVERY_SECTION .section __VECTORS, __recover
166 #define RECOVERY_SECTION .text
167 #define RECOVERY_SECTION .text
170 #define RECOVER_TABLE_START \
172 .globl EXT(recover_table) ;\
173 LEXT(recover_table) ;\
176 #define RECOVER(addr) \
183 #define RECOVER_TABLE_END \
185 .globl EXT(recover_table_end) ;\
186 LEXT(recover_table_end) ;\
190 * Allocate recovery and table.
202 movl %eax,TIMER_HIGHCHK(%ecx)
203 movl %edx,TIMER_LOW(%ecx)
204 movl %eax,TIMER_HIGH(%ecx)
209 0: movl TIMER_HIGH(%ecx),%edx
210 movl TIMER_LOW(%ecx),%eax
211 cmpl TIMER_HIGHCHK(%ecx),%edx
217 #define TIME_TRAP_UENTRY
218 #define TIME_TRAP_UEXIT
219 #define TIME_INT_ENTRY
220 #define TIME_INT_EXIT
228 * Low 32-bits of nanotime returned in %eax.
229 * Computed from tsc based on the scale factor
230 * and an implicit 32 bit shift.
232 * Uses %esi, %edi, %ebx, %ecx and %edx.
234 #define RNT_INFO _rtc_nanotime_info
236 0: movl RNT_INFO+RNT_TSC_BASE,%esi ;\
237 movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
239 subl %esi,%eax /* tsc - tsc_base */ ;\
241 movl RNT_INFO+RNT_SCALE,%ecx ;\
242 movl %edx,%ebx /* delta * scale */ ;\
248 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\
249 cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\
251 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
255 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
257 #define TIMER_UPDATE(treg,dreg) \
258 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
259 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
260 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
261 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
262 movl dreg,TIMER_HIGH(treg) /* to high bita */
265 * Add time delta to old timer and start new.
267 #define TIMER_EVENT(old,new) \
268 NANOTIME32 /* eax low bits nanosecs */ ;\
269 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
270 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
271 movl %eax,%edx /* save timestamp in %edx */ ;\
272 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
273 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
274 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
275 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
276 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
277 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */
281 * Update time on user trap entry.
282 * Uses %eax,%ecx,%edx,%esi.
284 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
287 * update time on user trap exit.
288 * Uses %eax,%ecx,%edx,%esi.
290 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
293 * update time on interrupt entry.
294 * Uses %eax,%ecx,%edx,%esi.
296 #define TIME_INT_ENTRY \
297 NANOTIME32 /* eax low bits nanosecs */ ;\
298 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
299 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
300 movl %eax,%edx /* save timestamp in %edx */ ;\
301 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
302 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
303 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
304 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
305 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
308 * update time on interrupt exit.
309 * Uses %eax, %ecx, %edx, %esi.
311 #define TIME_INT_EXIT \
312 NANOTIME32 /* eax low bits nanosecs */ ;\
313 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
314 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
315 movl %eax,%edx /* save timestamp in %edx */ ;\
316 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
317 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
318 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
319 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
320 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
322 #endif /* STAT_TIME */
329 * Traditional, not ANSI.
333 .globl label/**/count ;\
336 .globl label/**/limit ;\
340 addl $1,%ss:label/**/count ;\
341 cmpl $0,label/**/limit ;\
345 movl %ss:label/**/count,%eax ;\
346 cmpl %eax,%ss:label/**/limit ;\
359 * Last-ditch debug code to handle faults that might result
360 * from entering kernel (from collocated server) on an invalid
361 * stack. On collocated entry, there's no hardware-initiated
362 * stack switch, so a valid stack must be in place when an
363 * exception occurs, or we may double-fault.
365 * In case of a double-fault, our only recourse is to switch
366 * hardware "tasks", so that we avoid using the current stack.
368 * The idea here is just to get the processor into the debugger,
369 * post-haste. No attempt is made to fix up whatever error got
370 * us here, so presumably continuing from the debugger will
371 * simply land us here again -- at best.
375 * Note that the per-fault entry points are not currently
376 * functional. The only way to make them work would be to
377 * set up separate TSS's for each fault type, which doesn't
378 * currently seem worthwhile. (The offset part of a task
379 * gate is always ignored.) So all faults that task switch
380 * currently resume at db_task_start.
383 * Double fault (Murphy's point) - error code (0) on stack
385 Entry(db_task_dbl_fault)
387 movl $(T_DOUBLE_FAULT),%ebx
390 * Segment not present - error code on stack
392 Entry(db_task_seg_np)
394 movl $(T_SEGMENT_NOT_PRESENT),%ebx
397 * Stack fault - error code on (current) stack
399 Entry(db_task_stk_fault)
401 movl $(T_STACK_FAULT),%ebx
404 * General protection fault - error code on stack
406 Entry(db_task_gen_prot)
408 movl $(T_GENERAL_PROTECTION),%ebx
412 * The entry point where execution resumes after last-ditch debugger task
417 subl $(ISS32_SIZE),%edx
418 movl %edx,%esp /* allocate i386_saved_state on stack */
419 movl %eax,R_ERR(%esp)
420 movl %ebx,R_TRAPNO(%esp)
423 movl CX(EXT(master_dbtss),%edx),%edx
424 movl TSS_LINK(%edx),%eax
425 pushl %eax /* pass along selector of previous TSS */
426 call EXT(db_tss_to_frame)
427 popl %eax /* get rid of TSS selector */
428 call EXT(db_trap_from_asm)
433 iret /* ha, ha, ha... */
434 #endif /* MACH_KDB */
437 * Called as a function, makes the current thread
438 * return from the kernel as if from an exception.
441 .globl EXT(thread_exception_return)
442 .globl EXT(thread_bootstrap_return)
443 LEXT(thread_exception_return)
444 LEXT(thread_bootstrap_return)
446 movl %gs:CPU_KERNEL_STACK,%ecx
447 movl (%ecx),%esp /* switch back to PCB stack */
448 jmp EXT(return_from_trap)
450 Entry(call_continuation)
451 movl S_ARG0,%eax /* get continuation */
452 movl S_ARG1,%edx /* continuation param */
453 movl S_ARG2,%ecx /* wait result */
454 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
455 xorl %ebp,%ebp /* zero frame pointer */
456 subl $8,%esp /* align the stack */
459 call *%eax /* call continuation */
461 movl %gs:CPU_ACTIVE_THREAD,%eax
463 call EXT(thread_terminate)
467 /*******************************************************************************************************
469 * All 64 bit task 'exceptions' enter lo_alltraps:
470 * esp -> x86_saved_state_t
472 * The rest of the state is set up as:
473 * cr3 -> kernel directory
474 * esp -> low based stack
477 * ss/ds/es -> KERNEL_DS
479 * interrupts disabled
480 * direction flag cleared
483 movl R_CS(%esp),%eax /* assume 32-bit state */
484 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
486 movl R64_CS(%esp),%eax /* 64-bit user mode */
493 movl %gs:CPU_KERNEL_STACK,%ebx
494 xchgl %ebx,%esp /* switch to kernel stack */
497 CCALL1(user_trap, %ebx) /* call user trap routine */
498 cli /* hold off intrs - critical section */
499 popl %esp /* switch back to PCB stack */
502 * Return from trap or system call, checking for ASTs.
503 * On lowbase PCB stack with intrs disabled
505 LEXT(return_from_trap)
506 movl %gs:CPU_PENDING_AST,%eax
508 je EXT(return_to_user) /* branch if no AST */
510 movl %gs:CPU_KERNEL_STACK,%ebx
511 xchgl %ebx,%esp /* switch to kernel stack */
512 sti /* interrupts always enabled on return to user mode */
514 pushl %ebx /* save PCB stack */
515 CCALL1(i386_astintr, $0) /* take the AST */
517 popl %esp /* switch back to PCB stack (w/exc link) */
518 jmp EXT(return_from_trap) /* and check again (rare) */
524 cmpl $0, %gs:CPU_IS64BIT
525 je EXT(lo_ret_to_user)
526 jmp EXT(lo64_ret_to_user)
531 * Trap from kernel mode. No need to switch stacks.
532 * Interrupts must be off here - we will set them to state at time of trap
533 * as soon as it's safe for us to do so and not recurse doing preemption
536 movl %esp, %eax /* saved state addr */
537 CCALL1(kernel_trap, %eax) /* to kernel trap routine */
540 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
541 testl $ AST_URGENT,%eax /* any urgent preemption? */
542 je ret_to_kernel /* no, nothing to do */
543 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
544 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
545 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
547 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
549 movl %gs:CPU_KERNEL_STACK,%eax
552 andl $(-KERNEL_STACK_SIZE),%ecx
553 testl %ecx,%ecx /* are we on the kernel stack? */
554 jne ret_to_kernel /* no, skip it */
556 CCALL1(i386_astintr, $1) /* take the AST */
559 cmpl $0, %gs:CPU_IS64BIT
560 je EXT(lo_ret_to_kernel)
561 jmp EXT(lo64_ret_to_kernel)
565 /*******************************************************************************************************
567 * All interrupts on all tasks enter here with:
568 * esp-> -> x86_saved_state_t
570 * cr3 -> kernel directory
571 * esp -> low based stack
574 * ss/ds/es -> KERNEL_DS
576 * interrupts disabled
577 * direction flag cleared
581 * test whether already on interrupt stack
583 movl %gs:CPU_INT_STACK_TOP,%ecx
586 leal -INTSTACK_SIZE(%ecx),%edx
590 xchgl %ecx,%esp /* switch to interrupt stack */
592 movl %cr0,%eax /* get cr0 */
593 orl $(CR0_TS),%eax /* or in TS bit */
594 movl %eax,%cr0 /* set cr0 */
596 subl $8, %esp /* for 16-byte stack alignment */
597 pushl %ecx /* save pointer to old stack */
598 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
600 TIME_INT_ENTRY /* do timing */
602 incl %gs:CPU_PREEMPTION_LEVEL
603 incl %gs:CPU_INTERRUPT_LEVEL
605 movl %gs:CPU_INT_STATE, %eax
606 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
608 cli /* just in case we returned with intrs enabled */
610 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
612 .globl EXT(return_to_iret)
613 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
615 decl %gs:CPU_INTERRUPT_LEVEL
616 decl %gs:CPU_PREEMPTION_LEVEL
618 TIME_INT_EXIT /* do timing */
620 movl %gs:CPU_ACTIVE_THREAD,%eax
621 movl ACT_PCB(%eax),%eax /* get act`s PCB */
622 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
623 cmpl $0,%eax /* Is there a context */
624 je 1f /* Branch if not */
625 movl FP_VALID(%eax),%eax /* Load fp_valid */
626 cmpl $0,%eax /* Check if valid */
627 jne 1f /* Branch if valid */
631 movl %cr0,%eax /* get cr0 */
632 orl $(CR0_TS),%eax /* or in TS bit */
633 movl %eax,%cr0 /* set cr0 */
635 popl %esp /* switch back to old stack */
637 /* Load interrupted code segment into %eax */
638 movl R_CS(%esp),%eax /* assume 32-bit state */
639 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
641 movl R64_CS(%esp),%eax /* 64-bit user mode */
643 testb $3,%eax /* user mode, */
644 jnz ast_from_interrupt_user /* go handle potential ASTs */
646 * we only want to handle preemption requests if
647 * the interrupt fell in the kernel context
648 * and preemption isn't disabled
650 movl %gs:CPU_PENDING_AST,%eax
651 testl $ AST_URGENT,%eax /* any urgent requests? */
652 je ret_to_kernel /* no, nothing to do */
654 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
655 jne ret_to_kernel /* yes, skip it */
657 movl %gs:CPU_KERNEL_STACK,%eax
660 andl $(-KERNEL_STACK_SIZE),%ecx
661 testl %ecx,%ecx /* are we on the kernel stack? */
662 jne ret_to_kernel /* no, skip it */
665 * Take an AST from kernel space. We don't need (and don't want)
666 * to do as much as the case where the interrupt came from user
669 CCALL1(i386_astintr, $1)
675 * nested int - simple path, can't preempt etc on way out
678 incl %gs:CPU_PREEMPTION_LEVEL
679 incl %gs:CPU_INTERRUPT_LEVEL
681 movl %esp, %edx /* i386_saved_state */
682 CCALL1(PE_incoming_interrupt, %edx)
684 decl %gs:CPU_INTERRUPT_LEVEL
685 decl %gs:CPU_PREEMPTION_LEVEL
690 * Take an AST from an interrupted user
692 ast_from_interrupt_user:
693 movl %gs:CPU_PENDING_AST,%eax
694 testl %eax,%eax /* pending ASTs? */
695 je EXT(ret_to_user) /* no, nothing to do */
699 jmp EXT(return_from_trap) /* return */
702 /*******************************************************************************************************
705 * System call entries via INTR_GATE or sysenter:
707 * esp -> i386_saved_state_t
708 * cr3 -> kernel directory
709 * esp -> low based stack
712 * ss/ds/es -> KERNEL_DS
714 * interrupts disabled
715 * direction flag cleared
720 * We can be here either for a mach syscall or a unix syscall,
721 * as indicated by the sign of the code:
723 movl R_EAX(%esp),%eax
725 js EXT(lo_mach_scall) /* < 0 => mach */
731 movl %gs:CPU_KERNEL_STACK,%ebx
732 xchgl %ebx,%esp /* switch to kernel stack */
735 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
736 movl ACT_TASK(%ecx),%ecx /* point to current task */
737 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
739 CCALL1(unix_syscall, %ebx)
741 * always returns through thread_exception_return
748 movl %gs:CPU_KERNEL_STACK,%ebx
749 xchgl %ebx,%esp /* switch to kernel stack */
752 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
753 movl ACT_TASK(%ecx),%ecx /* point to current task */
754 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
756 CCALL1(mach_call_munger, %ebx)
758 * always returns through thread_exception_return
765 movl %gs:CPU_KERNEL_STACK,%ebx
766 xchgl %ebx,%esp /* switch to kernel stack */
770 CCALL1(machdep_syscall, %ebx)
772 * always returns through thread_exception_return
779 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
780 xchgl %ebx,%esp // Switch to it, saving the previous
782 CCALL1(diagCall, %ebx) // Call diagnostics
783 cli // Disable interruptions just in case they were enabled
784 popl %esp // Get back the original stack
786 cmpl $0,%eax // What kind of return is this?
787 jne EXT(return_to_user) // Normal return, do not check asts...
789 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
790 // pass what would be the diag syscall
791 // error return - cause an exception
796 /*******************************************************************************************************
799 * System call entries via syscall only:
801 * esp -> x86_saved_state64_t
802 * cr3 -> kernel directory
803 * esp -> low based stack
806 * ss/ds/es -> KERNEL_DS
808 * interrupts disabled
809 * direction flag cleared
814 * We can be here either for a mach, unix machdep or diag syscall,
815 * as indicated by the syscall class:
817 movl R64_RAX(%esp), %eax /* syscall number/class */
819 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
820 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
821 je EXT(lo64_mach_scall)
822 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
823 je EXT(lo64_unix_scall)
824 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
825 je EXT(lo64_mdep_scall)
826 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
827 je EXT(lo64_diag_scall)
829 /* Syscall class unknown */
830 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
833 Entry(lo64_unix_scall)
836 movl %gs:CPU_KERNEL_STACK,%ebx
837 xchgl %ebx,%esp /* switch to kernel stack */
840 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
841 movl ACT_TASK(%ecx),%ecx /* point to current task */
842 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
844 CCALL1(unix_syscall64, %ebx)
846 * always returns through thread_exception_return
850 Entry(lo64_mach_scall)
853 movl %gs:CPU_KERNEL_STACK,%ebx
854 xchgl %ebx,%esp /* switch to kernel stack */
857 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
858 movl ACT_TASK(%ecx),%ecx /* point to current task */
859 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
861 CCALL1(mach_call_munger64, %ebx)
863 * always returns through thread_exception_return
867 Entry(lo64_mdep_scall)
870 movl %gs:CPU_KERNEL_STACK,%ebx
871 xchgl %ebx,%esp /* switch to kernel stack */
875 CCALL1(machdep_syscall64, %ebx)
877 * always returns through thread_exception_return
881 Entry(lo64_diag_scall)
884 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
885 xchgl %ebx,%esp // Switch to it, saving the previous
887 pushl %ebx // Push the previous stack
888 CCALL1(diagCall64, %ebx) // Call diagnostics
889 cli // Disable interruptions just in case they were enabled
890 popl %esp // Get back the original stack
892 cmpl $0,%eax // What kind of return is this?
893 jne EXT(return_to_user) // Normal return, do not check asts...
895 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
900 /******************************************************************************************************
909 * Copy from user/kernel address space.
910 * arg0: window offset or kernel address
911 * arg1: kernel address
914 ENTRY(copyinphys_user)
915 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
918 ENTRY(copyinphys_kern)
919 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
924 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
931 pushl %edi /* save registers */
933 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
934 movl 8+S_ARG1,%edi /* get destination - kernel address */
935 movl 8+S_ARG2,%edx /* get count */
938 movl %edx,%ecx /* move by longwords first */
943 movsl /* move longwords */
944 movl %edx,%ecx /* now move remaining bytes */
950 xorl %eax,%eax /* return 0 for success */
952 mov %ss,%cx /* restore kernel data and extended segments */
956 popl %edi /* restore registers */
961 movl $(EFAULT),%eax /* return error for failure */
962 jmp copyin_ret /* pop frame and return */
967 * Copy string from user/kern address space.
968 * arg0: window offset or kernel address
969 * arg1: kernel address
970 * arg2: max byte count
971 * arg3: actual byte count (OUT)
973 Entry(copyinstr_kern)
977 Entry(copyinstr_user)
978 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
984 pushl %edi /* save registers */
986 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
987 movl 8+S_ARG1,%edi /* get destination - kernel address */
988 movl 8+S_ARG2,%edx /* get count */
990 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
991 /* are 0 for the cmpl against 0 */
994 RECOVER(copystr_fail) /* copy bytes... */
997 testl %edi,%edi /* if kernel address is ... */
999 movb %al,(%edi) /* copy the byte */
1002 testl %eax,%eax /* did we just stuff the 0-byte? */
1003 jz 4f /* yes, return 0 status already in %eax */
1004 decl %edx /* decrement #bytes left in buffer */
1005 jnz 2b /* buffer not full so copy in another byte */
1006 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1008 movl 8+S_ARG3,%edi /* get OUT len ptr */
1010 jz copystr_ret /* if null, just return */
1012 movl %esi,(%edi) /* else set OUT arg to xfer len */
1014 popl %edi /* restore registers */
1016 ret /* and return */
1019 movl $(EFAULT),%eax /* return error for failure */
1020 jmp copystr_ret /* pop frame and return */
1024 * Copy to user/kern address space.
1025 * arg0: kernel address
1026 * arg1: window offset or kernel address
1029 ENTRY(copyoutphys_user)
1030 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1033 ENTRY(copyoutphys_kern)
1034 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1039 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1046 pushl %edi /* save registers */
1048 movl 8+S_ARG0,%esi /* get source - kernel address */
1049 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1050 movl 8+S_ARG2,%edx /* get count */
1053 movl %edx,%ecx /* move by longwords first */
1056 RECOVER(copyout_fail)
1059 movl %edx,%ecx /* now move remaining bytes */
1062 RECOVER(copyout_fail)
1065 xorl %eax,%eax /* return 0 for success */
1067 mov %ss,%cx /* restore kernel segment */
1071 popl %edi /* restore registers */
1073 ret /* and return */
1076 movl $(EFAULT),%eax /* return error for failure */
1077 jmp copyout_ret /* pop frame and return */
1081 * io register must not be used on slaves (no AT bus)
1083 #define ILL_ON_SLAVE
1091 #define PUSH_FRAME FRAME
1092 #define POP_FRAME EMARF
1094 #else /* MACH_ASSERT */
1102 #endif /* MACH_ASSERT */
1105 #if MACH_KDB || MACH_ASSERT
1108 * Following routines are also defined as macros in i386/pio.h
1109 * Compile then when MACH_KDB is configured so that they
1110 * can be invoked from the debugger.
1114 * void outb(unsigned char *io_port,
1115 * unsigned char byte)
1117 * Output a byte to an IO port.
1122 movl ARG0,%edx /* IO port address */
1123 movl ARG1,%eax /* data to output */
1124 outb %al,%dx /* send it out */
1129 * unsigned char inb(unsigned char *io_port)
1131 * Input a byte from an IO port.
1136 movl ARG0,%edx /* IO port address */
1137 xor %eax,%eax /* clear high bits of register */
1138 inb %dx,%al /* get the byte */
1143 * void outw(unsigned short *io_port,
1144 * unsigned short word)
1146 * Output a word to an IO port.
1151 movl ARG0,%edx /* IO port address */
1152 movl ARG1,%eax /* data to output */
1153 outw %ax,%dx /* send it out */
1158 * unsigned short inw(unsigned short *io_port)
1160 * Input a word from an IO port.
1165 movl ARG0,%edx /* IO port address */
1166 xor %eax,%eax /* clear high bits of register */
1167 inw %dx,%ax /* get the word */
1172 * void outl(unsigned int *io_port,
1173 * unsigned int byte)
1175 * Output an int to an IO port.
1180 movl ARG0,%edx /* IO port address*/
1181 movl ARG1,%eax /* data to output */
1182 outl %eax,%dx /* send it out */
1187 * unsigned int inl(unsigned int *io_port)
1189 * Input an int from an IO port.
1194 movl ARG0,%edx /* IO port address */
1195 inl %dx,%eax /* get the int */
1199 #endif /* MACH_KDB || MACH_ASSERT*/
1202 * void loutb(unsigned byte *io_port,
1203 * unsigned byte *data,
1204 * unsigned int count)
1206 * Output an array of bytes to an IO port.
1212 movl %esi,%eax /* save register */
1213 movl ARG0,%edx /* get io port number */
1214 movl ARG1,%esi /* get data address */
1215 movl ARG2,%ecx /* get count */
1219 movl %eax,%esi /* restore register */
1225 * void loutw(unsigned short *io_port,
1226 * unsigned short *data,
1227 * unsigned int count)
1229 * Output an array of shorts to an IO port.
1235 movl %esi,%eax /* save register */
1236 movl ARG0,%edx /* get io port number */
1237 movl ARG1,%esi /* get data address */
1238 movl ARG2,%ecx /* get count */
1242 movl %eax,%esi /* restore register */
1247 * void loutw(unsigned short io_port,
1248 * unsigned int *data,
1249 * unsigned int count)
1251 * Output an array of longs to an IO port.
1257 movl %esi,%eax /* save register */
1258 movl ARG0,%edx /* get io port number */
1259 movl ARG1,%esi /* get data address */
1260 movl ARG2,%ecx /* get count */
1264 movl %eax,%esi /* restore register */
1270 * void linb(unsigned char *io_port,
1271 * unsigned char *data,
1272 * unsigned int count)
1274 * Input an array of bytes from an IO port.
1280 movl %edi,%eax /* save register */
1281 movl ARG0,%edx /* get io port number */
1282 movl ARG1,%edi /* get data address */
1283 movl ARG2,%ecx /* get count */
1287 movl %eax,%edi /* restore register */
1293 * void linw(unsigned short *io_port,
1294 * unsigned short *data,
1295 * unsigned int count)
1297 * Input an array of shorts from an IO port.
1303 movl %edi,%eax /* save register */
1304 movl ARG0,%edx /* get io port number */
1305 movl ARG1,%edi /* get data address */
1306 movl ARG2,%ecx /* get count */
1310 movl %eax,%edi /* restore register */
1316 * void linl(unsigned short io_port,
1317 * unsigned int *data,
1318 * unsigned int count)
1320 * Input an array of longs from an IO port.
1326 movl %edi,%eax /* save register */
1327 movl ARG0,%edx /* get io port number */
1328 movl ARG1,%edi /* get data address */
1329 movl ARG2,%ecx /* get count */
1333 movl %eax,%edi /* restore register */
1338 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1340 ENTRY(rdmsr_carefully)
1357 * Done with recovery table.
1368 /* dr<i>(address, type, len, persistence)
1372 movl %eax,EXT(dr_addr)
1378 movl %eax,EXT(dr_addr)+1*4
1384 movl %eax,EXT(dr_addr)+2*4
1391 movl %eax,EXT(dr_addr)+3*4
1400 movl %edx,EXT(dr_addr)+4*4
1401 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
1402 movl %edx,EXT(dr_addr)+5*4
1421 movl %edx,EXT(dr_addr)+7*4
1470 lidt null_idtr /* disable the interrupt handler */
1471 xor %ecx,%ecx /* generate a divide by zero */
1472 div %ecx,%eax /* reboot now */
1473 ret /* this will "never" be executed */
1475 #endif /* SYMMETRY */
1479 * setbit(int bitno, int *s) - set bit in bit string
1482 movl S_ARG0, %ecx /* bit number */
1483 movl S_ARG1, %eax /* address */
1484 btsl %ecx, (%eax) /* set bit */
1488 * clrbit(int bitno, int *s) - clear bit in bit string
1491 movl S_ARG0, %ecx /* bit number */
1492 movl S_ARG1, %eax /* address */
1493 btrl %ecx, (%eax) /* clear bit */
1497 * ffsbit(int *s) - find first set bit in bit string
1500 movl S_ARG0, %ecx /* address */
1501 movl $0, %edx /* base offset */
1503 bsfl (%ecx), %eax /* check argument bits */
1504 jnz 1f /* found bit, return */
1505 addl $4, %ecx /* increment address */
1506 addl $32, %edx /* increment offset */
1507 jmp 0b /* try again */
1509 addl %edx, %eax /* return offset */
1513 * testbit(int nr, volatile void *array)
1515 * Test to see if the bit is set within the bit string
1519 movl S_ARG0,%eax /* Get the bit to test */
1520 movl S_ARG1,%ecx /* get the array string */
1533 * jail: set the EIP to "jail" to block a kernel thread.
1534 * Useful to debug synchronization problems on MPs.
1541 * div_scale(unsigned int dividend,
1542 * unsigned int divisor,
1543 * unsigned int *scale)
1545 * This function returns (dividend << *scale) //divisor where *scale
1546 * is the largest possible value before overflow. This is used in
1547 * computation where precision must be achieved in order to avoid
1548 * floating point usage.
1552 * while (((dividend >> *scale) >= divisor))
1554 * *scale = 32 - *scale;
1555 * return ((dividend << *scale) / divisor);
1559 xorl %ecx, %ecx /* *scale = 0 */
1561 movl ARG0, %edx /* get dividend */
1563 cmpl ARG1, %edx /* if (divisor > dividend) */
1564 jle 1f /* goto 1f */
1565 addl $1, %ecx /* (*scale)++ */
1566 shrdl $1, %edx, %eax /* dividend >> 1 */
1567 shrl $1, %edx /* dividend >> 1 */
1568 jmp 0b /* goto 0b */
1570 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1571 movl ARG2, %edx /* get scale */
1572 movl $32, (%edx) /* *scale = 32 */
1573 subl %ecx, (%edx) /* *scale -= %ecx */
1579 * mul_scale(unsigned int multiplicand,
1580 * unsigned int multiplier,
1581 * unsigned int *scale)
1583 * This function returns ((multiplicand * multiplier) >> *scale) where
1584 * scale is the largest possible value before overflow. This is used in
1585 * computation where precision must be achieved in order to avoid
1586 * floating point usage.
1590 * while (overflow((multiplicand * multiplier) >> *scale))
1592 * return ((multiplicand * multiplier) >> *scale);
1596 xorl %ecx, %ecx /* *scale = 0 */
1597 movl ARG0, %eax /* get multiplicand */
1598 mull ARG1 /* multiplicand * multiplier */
1600 cmpl $0, %edx /* if (!overflow()) */
1602 addl $1, %ecx /* (*scale)++ */
1603 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1604 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1607 movl ARG2, %edx /* get scale */
1608 movl %ecx, (%edx) /* set *scale */
1615 * Double-fault exception handler task. The last gasp...
1617 Entry(df_task_start)
1618 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1623 * machine-check handler task. The last gasp...
1625 Entry(mc_task_start)
1626 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1630 * Compatibility mode's last gasp...
1634 CCALL1(panic_double_fault64, %eax)
1639 CCALL1(panic_machine_check64, %eax)