2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 #include <platforms.h>
54 #include <mach_kgdb.h>
56 #include <stat_time.h>
57 #include <mach_assert.h>
59 #include <sys/errno.h>
61 #include <i386/cpuid.h>
62 #include <i386/eflags.h>
63 #include <i386/proc_reg.h>
64 #include <i386/trap.h>
66 #include <mach/exception_types.h>
68 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
69 #include <mach/i386/syscall_sw.h>
74 * PTmap is recursive pagemap at top of virtual address space.
75 * Within PTmap, the page directory can be found (third indirection).
77 .globl _PTmap,_PTD,_PTDpde
78 .set _PTmap,(PTDPTDI << PDESHIFT)
79 .set _PTD,_PTmap + (PTDPTDI * NBPG)
80 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
83 * APTmap, APTD is the alternate recursive pagemap.
84 * It's used when modifying another process's page tables.
86 .globl _APTmap,_APTD,_APTDpde
87 .set _APTmap,(APTDPTDI << PDESHIFT)
88 .set _APTD,_APTmap + (APTDPTDI * NBPG)
89 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
92 /* Under Mach-O, etext is a variable which contains
93 * the last text address
95 #define ETEXT_ADDR (EXT(etext))
97 /* Under ELF and other non-Mach-O formats, the address of
98 * etext represents the last text address
100 #define ETEXT_ADDR $ EXT(etext)
103 #define CX(addr,reg) addr(,reg,4)
106 * The following macros make calls into C code.
107 * They dynamically align the stack to 16 bytes.
108 * Arguments are moved (not pushed) onto the correctly aligned stack.
109 * NOTE: EDI is destroyed in the process, and hence cannot
110 * be directly used as a parameter. Users of this macro must
111 * independently preserve EDI (a non-volatile) if the routine is
112 * intended to be called from C, for instance.
117 andl $0xFFFFFFF0, %esp ;\
121 #define CCALL1(fn, arg1) \
124 andl $0xFFFFFFF0, %esp ;\
125 movl arg1, 0(%esp) ;\
129 #define CCALL2(fn, arg1, arg2) \
132 andl $0xFFFFFFF0, %esp ;\
133 movl arg2, 4(%esp) ;\
134 movl arg1, 0(%esp) ;\
138 #define CCALL3(fn, arg1, arg2, arg3) \
141 andl $0xFFFFFFF0, %esp ;\
142 movl arg3, 8(%esp) ;\
143 movl arg2, 4(%esp) ;\
144 movl arg1, 0(%esp) ;\
156 #define RECOVERY_SECTION .section __VECTORS, __recover
158 #define RECOVERY_SECTION .text
159 #define RECOVERY_SECTION .text
162 #define RECOVER_TABLE_START \
164 .globl EXT(recover_table) ;\
165 LEXT(recover_table) ;\
168 #define RECOVER(addr) \
175 #define RECOVER_TABLE_END \
177 .globl EXT(recover_table_end) ;\
178 LEXT(recover_table_end) ;\
182 * Allocate recovery and table.
194 movl %eax,TIMER_HIGHCHK(%ecx)
195 movl %edx,TIMER_LOW(%ecx)
196 movl %eax,TIMER_HIGH(%ecx)
201 0: movl TIMER_HIGH(%ecx),%edx
202 movl TIMER_LOW(%ecx),%eax
203 cmpl TIMER_HIGHCHK(%ecx),%edx
209 #define TIME_TRAP_UENTRY
210 #define TIME_TRAP_UEXIT
211 #define TIME_INT_ENTRY
212 #define TIME_INT_EXIT
220 * Low 32-bits of nanotime returned in %eax.
221 * Computed from tsc based on the scale factor
222 * and an implicit 32 bit shift.
224 * Uses %esi, %edi, %ebx, %ecx and %edx.
226 #define RNT_INFO _rtc_nanotime_info
228 0: movl RNT_INFO+RNT_TSC_BASE,%esi ;\
229 movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
231 subl %esi,%eax /* tsc - tsc_base */ ;\
233 movl RNT_INFO+RNT_SCALE,%ecx ;\
234 movl %edx,%ebx /* delta * scale */ ;\
240 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\
241 cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\
243 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
247 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
249 #define TIMER_UPDATE(treg,dreg) \
250 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
251 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
252 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
253 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
254 movl dreg,TIMER_HIGH(treg) /* to high bita */
257 * Add time delta to old timer and start new.
259 #define TIMER_EVENT(old,new) \
260 NANOTIME32 /* eax low bits nanosecs */ ;\
261 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
262 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
263 movl %eax,%edx /* save timestamp in %edx */ ;\
264 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
265 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
266 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
267 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
268 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
269 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */
273 * Update time on user trap entry.
274 * Uses %eax,%ecx,%edx,%esi.
276 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
279 * update time on user trap exit.
280 * Uses %eax,%ecx,%edx,%esi.
282 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
285 * update time on interrupt entry.
286 * Uses %eax,%ecx,%edx,%esi.
288 #define TIME_INT_ENTRY \
289 NANOTIME32 /* eax low bits nanosecs */ ;\
290 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
291 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
292 movl %eax,%edx /* save timestamp in %edx */ ;\
293 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
294 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
295 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
296 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
297 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
300 * update time on interrupt exit.
301 * Uses %eax, %ecx, %edx, %esi.
303 #define TIME_INT_EXIT \
304 NANOTIME32 /* eax low bits nanosecs */ ;\
305 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
306 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
307 movl %eax,%edx /* save timestamp in %edx */ ;\
308 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
309 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
310 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
311 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
312 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
314 #endif /* STAT_TIME */
321 * Traditional, not ANSI.
325 .globl label/**/count ;\
328 .globl label/**/limit ;\
332 addl $1,%ss:label/**/count ;\
333 cmpl $0,label/**/limit ;\
337 movl %ss:label/**/count,%eax ;\
338 cmpl %eax,%ss:label/**/limit ;\
351 * Last-ditch debug code to handle faults that might result
352 * from entering kernel (from collocated server) on an invalid
353 * stack. On collocated entry, there's no hardware-initiated
354 * stack switch, so a valid stack must be in place when an
355 * exception occurs, or we may double-fault.
357 * In case of a double-fault, our only recourse is to switch
358 * hardware "tasks", so that we avoid using the current stack.
360 * The idea here is just to get the processor into the debugger,
361 * post-haste. No attempt is made to fix up whatever error got
362 * us here, so presumably continuing from the debugger will
363 * simply land us here again -- at best.
367 * Note that the per-fault entry points are not currently
368 * functional. The only way to make them work would be to
369 * set up separate TSS's for each fault type, which doesn't
370 * currently seem worthwhile. (The offset part of a task
371 * gate is always ignored.) So all faults that task switch
372 * currently resume at db_task_start.
375 * Double fault (Murphy's point) - error code (0) on stack
377 Entry(db_task_dbl_fault)
379 movl $(T_DOUBLE_FAULT),%ebx
382 * Segment not present - error code on stack
384 Entry(db_task_seg_np)
386 movl $(T_SEGMENT_NOT_PRESENT),%ebx
389 * Stack fault - error code on (current) stack
391 Entry(db_task_stk_fault)
393 movl $(T_STACK_FAULT),%ebx
396 * General protection fault - error code on stack
398 Entry(db_task_gen_prot)
400 movl $(T_GENERAL_PROTECTION),%ebx
404 * The entry point where execution resumes after last-ditch debugger task
409 subl $(ISS32_SIZE),%edx
410 movl %edx,%esp /* allocate i386_saved_state on stack */
411 movl %eax,R_ERR(%esp)
412 movl %ebx,R_TRAPNO(%esp)
415 movl CX(EXT(master_dbtss),%edx),%edx
416 movl TSS_LINK(%edx),%eax
417 pushl %eax /* pass along selector of previous TSS */
418 call EXT(db_tss_to_frame)
419 popl %eax /* get rid of TSS selector */
420 call EXT(db_trap_from_asm)
425 iret /* ha, ha, ha... */
426 #endif /* MACH_KDB */
429 * Called as a function, makes the current thread
430 * return from the kernel as if from an exception.
433 .globl EXT(thread_exception_return)
434 .globl EXT(thread_bootstrap_return)
435 LEXT(thread_exception_return)
436 LEXT(thread_bootstrap_return)
438 movl %gs:CPU_KERNEL_STACK,%ecx
439 movl (%ecx),%esp /* switch back to PCB stack */
440 jmp EXT(return_from_trap)
442 Entry(call_continuation)
443 movl S_ARG0,%eax /* get continuation */
444 movl S_ARG1,%edx /* continuation param */
445 movl S_ARG2,%ecx /* wait result */
446 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
447 xorl %ebp,%ebp /* zero frame pointer */
448 subl $8,%esp /* align the stack */
451 call *%eax /* call continuation */
453 movl %gs:CPU_ACTIVE_THREAD,%eax
455 call EXT(thread_terminate)
459 /*******************************************************************************************************
461 * All 64 bit task 'exceptions' enter lo_alltraps:
462 * esp -> x86_saved_state_t
464 * The rest of the state is set up as:
465 * cr3 -> kernel directory
466 * esp -> low based stack
469 * ss/ds/es -> KERNEL_DS
471 * interrupts disabled
472 * direction flag cleared
475 movl R_CS(%esp),%eax /* assume 32-bit state */
476 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
478 movl R64_CS(%esp),%eax /* 64-bit user mode */
485 movl %gs:CPU_KERNEL_STACK,%ebx
486 xchgl %ebx,%esp /* switch to kernel stack */
489 CCALL1(user_trap, %ebx) /* call user trap routine */
490 cli /* hold off intrs - critical section */
491 popl %esp /* switch back to PCB stack */
494 * Return from trap or system call, checking for ASTs.
495 * On lowbase PCB stack with intrs disabled
497 LEXT(return_from_trap)
498 movl %gs:CPU_PENDING_AST,%eax
500 je EXT(return_to_user) /* branch if no AST */
502 movl %gs:CPU_KERNEL_STACK,%ebx
503 xchgl %ebx,%esp /* switch to kernel stack */
504 sti /* interrupts always enabled on return to user mode */
506 pushl %ebx /* save PCB stack */
507 CCALL1(i386_astintr, $0) /* take the AST */
509 popl %esp /* switch back to PCB stack (w/exc link) */
510 jmp EXT(return_from_trap) /* and check again (rare) */
516 cmpl $0, %gs:CPU_IS64BIT
517 je EXT(lo_ret_to_user)
518 jmp EXT(lo64_ret_to_user)
523 * Trap from kernel mode. No need to switch stacks.
524 * Interrupts must be off here - we will set them to state at time of trap
525 * as soon as it's safe for us to do so and not recurse doing preemption
528 movl %esp, %eax /* saved state addr */
529 CCALL1(kernel_trap, %eax) /* to kernel trap routine */
532 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
533 testl $ AST_URGENT,%eax /* any urgent preemption? */
534 je ret_to_kernel /* no, nothing to do */
535 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
536 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
537 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
539 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
541 movl %gs:CPU_KERNEL_STACK,%eax
544 andl $(-KERNEL_STACK_SIZE),%ecx
545 testl %ecx,%ecx /* are we on the kernel stack? */
546 jne ret_to_kernel /* no, skip it */
548 CCALL1(i386_astintr, $1) /* take the AST */
551 cmpl $0, %gs:CPU_IS64BIT
552 je EXT(lo_ret_to_kernel)
553 jmp EXT(lo64_ret_to_kernel)
557 /*******************************************************************************************************
559 * All interrupts on all tasks enter here with:
560 * esp-> -> x86_saved_state_t
562 * cr3 -> kernel directory
563 * esp -> low based stack
566 * ss/ds/es -> KERNEL_DS
568 * interrupts disabled
569 * direction flag cleared
573 * test whether already on interrupt stack
575 movl %gs:CPU_INT_STACK_TOP,%ecx
578 leal -INTSTACK_SIZE(%ecx),%edx
582 xchgl %ecx,%esp /* switch to interrupt stack */
584 movl %cr0,%eax /* get cr0 */
585 orl $(CR0_TS),%eax /* or in TS bit */
586 movl %eax,%cr0 /* set cr0 */
588 subl $8, %esp /* for 16-byte stack alignment */
589 pushl %ecx /* save pointer to old stack */
590 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
592 TIME_INT_ENTRY /* do timing */
594 incl %gs:CPU_PREEMPTION_LEVEL
595 incl %gs:CPU_INTERRUPT_LEVEL
597 movl %gs:CPU_INT_STATE, %eax
598 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
600 cli /* just in case we returned with intrs enabled */
602 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
604 .globl EXT(return_to_iret)
605 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
607 decl %gs:CPU_INTERRUPT_LEVEL
608 decl %gs:CPU_PREEMPTION_LEVEL
610 TIME_INT_EXIT /* do timing */
612 movl %gs:CPU_ACTIVE_THREAD,%eax
613 movl ACT_PCB(%eax),%eax /* get act`s PCB */
614 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
615 cmpl $0,%eax /* Is there a context */
616 je 1f /* Branch if not */
617 movl FP_VALID(%eax),%eax /* Load fp_valid */
618 cmpl $0,%eax /* Check if valid */
619 jne 1f /* Branch if valid */
623 movl %cr0,%eax /* get cr0 */
624 orl $(CR0_TS),%eax /* or in TS bit */
625 movl %eax,%cr0 /* set cr0 */
627 popl %esp /* switch back to old stack */
629 /* Load interrupted code segment into %eax */
630 movl R_CS(%esp),%eax /* assume 32-bit state */
631 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
633 movl R64_CS(%esp),%eax /* 64-bit user mode */
635 testb $3,%eax /* user mode, */
636 jnz ast_from_interrupt_user /* go handle potential ASTs */
638 * we only want to handle preemption requests if
639 * the interrupt fell in the kernel context
640 * and preemption isn't disabled
642 movl %gs:CPU_PENDING_AST,%eax
643 testl $ AST_URGENT,%eax /* any urgent requests? */
644 je ret_to_kernel /* no, nothing to do */
646 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
647 jne ret_to_kernel /* yes, skip it */
649 movl %gs:CPU_KERNEL_STACK,%eax
652 andl $(-KERNEL_STACK_SIZE),%ecx
653 testl %ecx,%ecx /* are we on the kernel stack? */
654 jne ret_to_kernel /* no, skip it */
657 * Take an AST from kernel space. We don't need (and don't want)
658 * to do as much as the case where the interrupt came from user
661 CCALL1(i386_astintr, $1)
667 * nested int - simple path, can't preempt etc on way out
670 incl %gs:CPU_PREEMPTION_LEVEL
671 incl %gs:CPU_INTERRUPT_LEVEL
673 movl %esp, %edx /* i386_saved_state */
674 CCALL1(PE_incoming_interrupt, %edx)
676 decl %gs:CPU_INTERRUPT_LEVEL
677 decl %gs:CPU_PREEMPTION_LEVEL
682 * Take an AST from an interrupted user
684 ast_from_interrupt_user:
685 movl %gs:CPU_PENDING_AST,%eax
686 testl %eax,%eax /* pending ASTs? */
687 je EXT(ret_to_user) /* no, nothing to do */
691 jmp EXT(return_from_trap) /* return */
694 /*******************************************************************************************************
697 * System call entries via INTR_GATE or sysenter:
699 * esp -> i386_saved_state_t
700 * cr3 -> kernel directory
701 * esp -> low based stack
704 * ss/ds/es -> KERNEL_DS
706 * interrupts disabled
707 * direction flag cleared
712 * We can be here either for a mach syscall or a unix syscall,
713 * as indicated by the sign of the code:
715 movl R_EAX(%esp),%eax
717 js EXT(lo_mach_scall) /* < 0 => mach */
723 movl %gs:CPU_KERNEL_STACK,%ebx
724 xchgl %ebx,%esp /* switch to kernel stack */
727 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
728 movl ACT_TASK(%ecx),%ecx /* point to current task */
729 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
731 CCALL1(unix_syscall, %ebx)
733 * always returns through thread_exception_return
740 movl %gs:CPU_KERNEL_STACK,%ebx
741 xchgl %ebx,%esp /* switch to kernel stack */
744 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
745 movl ACT_TASK(%ecx),%ecx /* point to current task */
746 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
748 CCALL1(mach_call_munger, %ebx)
750 * always returns through thread_exception_return
757 movl %gs:CPU_KERNEL_STACK,%ebx
758 xchgl %ebx,%esp /* switch to kernel stack */
762 CCALL1(machdep_syscall, %ebx)
764 * always returns through thread_exception_return
771 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
772 xchgl %ebx,%esp // Switch to it, saving the previous
774 CCALL1(diagCall, %ebx) // Call diagnostics
775 cli // Disable interruptions just in case they were enabled
776 popl %esp // Get back the original stack
778 cmpl $0,%eax // What kind of return is this?
779 jne EXT(return_to_user) // Normal return, do not check asts...
781 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
782 // pass what would be the diag syscall
783 // error return - cause an exception
788 /*******************************************************************************************************
791 * System call entries via syscall only:
793 * esp -> x86_saved_state64_t
794 * cr3 -> kernel directory
795 * esp -> low based stack
798 * ss/ds/es -> KERNEL_DS
800 * interrupts disabled
801 * direction flag cleared
806 * We can be here either for a mach, unix machdep or diag syscall,
807 * as indicated by the syscall class:
809 movl R64_RAX(%esp), %eax /* syscall number/class */
811 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
812 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
813 je EXT(lo64_mach_scall)
814 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
815 je EXT(lo64_unix_scall)
816 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
817 je EXT(lo64_mdep_scall)
818 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
819 je EXT(lo64_diag_scall)
821 /* Syscall class unknown */
822 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
825 Entry(lo64_unix_scall)
828 movl %gs:CPU_KERNEL_STACK,%ebx
829 xchgl %ebx,%esp /* switch to kernel stack */
832 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
833 movl ACT_TASK(%ecx),%ecx /* point to current task */
834 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
836 CCALL1(unix_syscall64, %ebx)
838 * always returns through thread_exception_return
842 Entry(lo64_mach_scall)
845 movl %gs:CPU_KERNEL_STACK,%ebx
846 xchgl %ebx,%esp /* switch to kernel stack */
849 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
850 movl ACT_TASK(%ecx),%ecx /* point to current task */
851 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
853 CCALL1(mach_call_munger64, %ebx)
855 * always returns through thread_exception_return
859 Entry(lo64_mdep_scall)
862 movl %gs:CPU_KERNEL_STACK,%ebx
863 xchgl %ebx,%esp /* switch to kernel stack */
867 CCALL1(machdep_syscall64, %ebx)
869 * always returns through thread_exception_return
873 Entry(lo64_diag_scall)
876 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
877 xchgl %ebx,%esp // Switch to it, saving the previous
879 pushl %ebx // Push the previous stack
880 CCALL1(diagCall64, %ebx) // Call diagnostics
881 cli // Disable interruptions just in case they were enabled
882 popl %esp // Get back the original stack
884 cmpl $0,%eax // What kind of return is this?
885 jne EXT(return_to_user) // Normal return, do not check asts...
887 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
892 /******************************************************************************************************
901 * Copy from user/kernel address space.
902 * arg0: window offset or kernel address
903 * arg1: kernel address
906 ENTRY(copyinphys_user)
907 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
910 ENTRY(copyinphys_kern)
911 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
916 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
923 pushl %edi /* save registers */
925 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
926 movl 8+S_ARG1,%edi /* get destination - kernel address */
927 movl 8+S_ARG2,%edx /* get count */
930 movl %edx,%ecx /* move by longwords first */
935 movsl /* move longwords */
936 movl %edx,%ecx /* now move remaining bytes */
942 xorl %eax,%eax /* return 0 for success */
944 mov %ss,%cx /* restore kernel data and extended segments */
948 popl %edi /* restore registers */
953 movl $(EFAULT),%eax /* return error for failure */
954 jmp copyin_ret /* pop frame and return */
959 * Copy string from user/kern address space.
960 * arg0: window offset or kernel address
961 * arg1: kernel address
962 * arg2: max byte count
963 * arg3: actual byte count (OUT)
965 Entry(copyinstr_kern)
969 Entry(copyinstr_user)
970 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
976 pushl %edi /* save registers */
978 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
979 movl 8+S_ARG1,%edi /* get destination - kernel address */
980 movl 8+S_ARG2,%edx /* get count */
982 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
983 /* are 0 for the cmpl against 0 */
986 RECOVER(copystr_fail) /* copy bytes... */
989 testl %edi,%edi /* if kernel address is ... */
991 movb %al,(%edi) /* copy the byte */
994 testl %eax,%eax /* did we just stuff the 0-byte? */
995 jz 4f /* yes, return 0 status already in %eax */
996 decl %edx /* decrement #bytes left in buffer */
997 jnz 2b /* buffer not full so copy in another byte */
998 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1000 movl 8+S_ARG3,%edi /* get OUT len ptr */
1002 jz copystr_ret /* if null, just return */
1004 movl %esi,(%edi) /* else set OUT arg to xfer len */
1006 popl %edi /* restore registers */
1008 ret /* and return */
1011 movl $(EFAULT),%eax /* return error for failure */
1012 jmp copystr_ret /* pop frame and return */
1016 * Copy to user/kern address space.
1017 * arg0: kernel address
1018 * arg1: window offset or kernel address
1021 ENTRY(copyoutphys_user)
1022 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1025 ENTRY(copyoutphys_kern)
1026 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1031 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1038 pushl %edi /* save registers */
1040 movl 8+S_ARG0,%esi /* get source - kernel address */
1041 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1042 movl 8+S_ARG2,%edx /* get count */
1045 movl %edx,%ecx /* move by longwords first */
1048 RECOVER(copyout_fail)
1051 movl %edx,%ecx /* now move remaining bytes */
1054 RECOVER(copyout_fail)
1057 xorl %eax,%eax /* return 0 for success */
1059 mov %ss,%cx /* restore kernel segment */
1063 popl %edi /* restore registers */
1065 ret /* and return */
1068 movl $(EFAULT),%eax /* return error for failure */
1069 jmp copyout_ret /* pop frame and return */
1073 * io register must not be used on slaves (no AT bus)
1075 #define ILL_ON_SLAVE
1083 #define PUSH_FRAME FRAME
1084 #define POP_FRAME EMARF
1086 #else /* MACH_ASSERT */
1094 #endif /* MACH_ASSERT */
1097 #if MACH_KDB || MACH_ASSERT
1100 * Following routines are also defined as macros in i386/pio.h
1101 * Compile then when MACH_KDB is configured so that they
1102 * can be invoked from the debugger.
1106 * void outb(unsigned char *io_port,
1107 * unsigned char byte)
1109 * Output a byte to an IO port.
1114 movl ARG0,%edx /* IO port address */
1115 movl ARG1,%eax /* data to output */
1116 outb %al,%dx /* send it out */
1121 * unsigned char inb(unsigned char *io_port)
1123 * Input a byte from an IO port.
1128 movl ARG0,%edx /* IO port address */
1129 xor %eax,%eax /* clear high bits of register */
1130 inb %dx,%al /* get the byte */
1135 * void outw(unsigned short *io_port,
1136 * unsigned short word)
1138 * Output a word to an IO port.
1143 movl ARG0,%edx /* IO port address */
1144 movl ARG1,%eax /* data to output */
1145 outw %ax,%dx /* send it out */
1150 * unsigned short inw(unsigned short *io_port)
1152 * Input a word from an IO port.
1157 movl ARG0,%edx /* IO port address */
1158 xor %eax,%eax /* clear high bits of register */
1159 inw %dx,%ax /* get the word */
1164 * void outl(unsigned int *io_port,
1165 * unsigned int byte)
1167 * Output an int to an IO port.
1172 movl ARG0,%edx /* IO port address*/
1173 movl ARG1,%eax /* data to output */
1174 outl %eax,%dx /* send it out */
1179 * unsigned int inl(unsigned int *io_port)
1181 * Input an int from an IO port.
1186 movl ARG0,%edx /* IO port address */
1187 inl %dx,%eax /* get the int */
1191 #endif /* MACH_KDB || MACH_ASSERT*/
1194 * void loutb(unsigned byte *io_port,
1195 * unsigned byte *data,
1196 * unsigned int count)
1198 * Output an array of bytes to an IO port.
1204 movl %esi,%eax /* save register */
1205 movl ARG0,%edx /* get io port number */
1206 movl ARG1,%esi /* get data address */
1207 movl ARG2,%ecx /* get count */
1211 movl %eax,%esi /* restore register */
1217 * void loutw(unsigned short *io_port,
1218 * unsigned short *data,
1219 * unsigned int count)
1221 * Output an array of shorts to an IO port.
1227 movl %esi,%eax /* save register */
1228 movl ARG0,%edx /* get io port number */
1229 movl ARG1,%esi /* get data address */
1230 movl ARG2,%ecx /* get count */
1234 movl %eax,%esi /* restore register */
1239 * void loutw(unsigned short io_port,
1240 * unsigned int *data,
1241 * unsigned int count)
1243 * Output an array of longs to an IO port.
1249 movl %esi,%eax /* save register */
1250 movl ARG0,%edx /* get io port number */
1251 movl ARG1,%esi /* get data address */
1252 movl ARG2,%ecx /* get count */
1256 movl %eax,%esi /* restore register */
1262 * void linb(unsigned char *io_port,
1263 * unsigned char *data,
1264 * unsigned int count)
1266 * Input an array of bytes from an IO port.
1272 movl %edi,%eax /* save register */
1273 movl ARG0,%edx /* get io port number */
1274 movl ARG1,%edi /* get data address */
1275 movl ARG2,%ecx /* get count */
1279 movl %eax,%edi /* restore register */
1285 * void linw(unsigned short *io_port,
1286 * unsigned short *data,
1287 * unsigned int count)
1289 * Input an array of shorts from an IO port.
1295 movl %edi,%eax /* save register */
1296 movl ARG0,%edx /* get io port number */
1297 movl ARG1,%edi /* get data address */
1298 movl ARG2,%ecx /* get count */
1302 movl %eax,%edi /* restore register */
1308 * void linl(unsigned short io_port,
1309 * unsigned int *data,
1310 * unsigned int count)
1312 * Input an array of longs from an IO port.
1318 movl %edi,%eax /* save register */
1319 movl ARG0,%edx /* get io port number */
1320 movl ARG1,%edi /* get data address */
1321 movl ARG2,%ecx /* get count */
1325 movl %eax,%edi /* restore register */
1330 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1332 ENTRY(rdmsr_carefully)
1349 * Done with recovery table.
1360 /* dr<i>(address, type, len, persistence)
1364 movl %eax,EXT(dr_addr)
1370 movl %eax,EXT(dr_addr)+1*4
1376 movl %eax,EXT(dr_addr)+2*4
1383 movl %eax,EXT(dr_addr)+3*4
1392 movl %edx,EXT(dr_addr)+4*4
1393 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
1394 movl %edx,EXT(dr_addr)+5*4
1413 movl %edx,EXT(dr_addr)+7*4
1462 lidt null_idtr /* disable the interrupt handler */
1463 xor %ecx,%ecx /* generate a divide by zero */
1464 div %ecx,%eax /* reboot now */
1465 ret /* this will "never" be executed */
1467 #endif /* SYMMETRY */
1471 * setbit(int bitno, int *s) - set bit in bit string
1474 movl S_ARG0, %ecx /* bit number */
1475 movl S_ARG1, %eax /* address */
1476 btsl %ecx, (%eax) /* set bit */
1480 * clrbit(int bitno, int *s) - clear bit in bit string
1483 movl S_ARG0, %ecx /* bit number */
1484 movl S_ARG1, %eax /* address */
1485 btrl %ecx, (%eax) /* clear bit */
1489 * ffsbit(int *s) - find first set bit in bit string
1492 movl S_ARG0, %ecx /* address */
1493 movl $0, %edx /* base offset */
1495 bsfl (%ecx), %eax /* check argument bits */
1496 jnz 1f /* found bit, return */
1497 addl $4, %ecx /* increment address */
1498 addl $32, %edx /* increment offset */
1499 jmp 0b /* try again */
1501 addl %edx, %eax /* return offset */
1505 * testbit(int nr, volatile void *array)
1507 * Test to see if the bit is set within the bit string
1511 movl S_ARG0,%eax /* Get the bit to test */
1512 movl S_ARG1,%ecx /* get the array string */
1525 * jail: set the EIP to "jail" to block a kernel thread.
1526 * Useful to debug synchronization problems on MPs.
1533 * div_scale(unsigned int dividend,
1534 * unsigned int divisor,
1535 * unsigned int *scale)
1537 * This function returns (dividend << *scale) //divisor where *scale
1538 * is the largest possible value before overflow. This is used in
1539 * computation where precision must be achieved in order to avoid
1540 * floating point usage.
1544 * while (((dividend >> *scale) >= divisor))
1546 * *scale = 32 - *scale;
1547 * return ((dividend << *scale) / divisor);
1551 xorl %ecx, %ecx /* *scale = 0 */
1553 movl ARG0, %edx /* get dividend */
1555 cmpl ARG1, %edx /* if (divisor > dividend) */
1556 jle 1f /* goto 1f */
1557 addl $1, %ecx /* (*scale)++ */
1558 shrdl $1, %edx, %eax /* dividend >> 1 */
1559 shrl $1, %edx /* dividend >> 1 */
1560 jmp 0b /* goto 0b */
1562 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1563 movl ARG2, %edx /* get scale */
1564 movl $32, (%edx) /* *scale = 32 */
1565 subl %ecx, (%edx) /* *scale -= %ecx */
1571 * mul_scale(unsigned int multiplicand,
1572 * unsigned int multiplier,
1573 * unsigned int *scale)
1575 * This function returns ((multiplicand * multiplier) >> *scale) where
1576 * scale is the largest possible value before overflow. This is used in
1577 * computation where precision must be achieved in order to avoid
1578 * floating point usage.
1582 * while (overflow((multiplicand * multiplier) >> *scale))
1584 * return ((multiplicand * multiplier) >> *scale);
1588 xorl %ecx, %ecx /* *scale = 0 */
1589 movl ARG0, %eax /* get multiplicand */
1590 mull ARG1 /* multiplicand * multiplier */
1592 cmpl $0, %edx /* if (!overflow()) */
1594 addl $1, %ecx /* (*scale)++ */
1595 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1596 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1599 movl ARG2, %edx /* get scale */
1600 movl %ecx, (%edx) /* set *scale */
1607 * Double-fault exception handler task. The last gasp...
1609 Entry(df_task_start)
1610 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1615 * machine-check handler task. The last gasp...
1617 Entry(mc_task_start)
1618 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1622 * Compatibility mode's last gasp...
1626 CCALL1(panic_double_fault64, %eax)
1631 CCALL1(panic_machine_check64, %eax)