2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <etap_event_monitor.h>
55 #include <platforms.h>
57 #include <mach_kgdb.h>
59 #include <stat_time.h>
60 #include <mach_assert.h>
62 #include <sys/errno.h>
64 #include <i386/cpuid.h>
65 #include <i386/eflags.h>
66 #include <i386/proc_reg.h>
67 #include <i386/trap.h>
69 #include <mach/exception_types.h>
71 #include <i386/AT386/mp/mp.h>
73 #define PREEMPT_DEBUG_LOG 0
76 /* Under Mach-O, etext is a variable which contains
77 * the last text address
79 #define ETEXT_ADDR (EXT(etext))
81 /* Under ELF and other non-Mach-O formats, the address of
82 * etext represents the last text address
84 #define ETEXT_ADDR $ EXT(etext)
89 #define CX(addr,reg) addr(,reg,4)
92 #define CPU_NUMBER(reg)
93 #define CX(addr,reg) addr
95 #endif /* NCPUS > 1 */
105 #define RECOVERY_SECTION .section __VECTORS, __recover
106 #define RETRY_SECTION .section __VECTORS, __retries
108 #define RECOVERY_SECTION .text
109 #define RECOVERY_SECTION .text
112 #define RECOVER_TABLE_START \
114 .globl EXT(recover_table) ;\
115 LEXT(recover_table) ;\
118 #define RECOVER(addr) \
125 #define RECOVER_TABLE_END \
127 .globl EXT(recover_table_end) ;\
128 LEXT(recover_table_end) ;\
132 * Retry table for certain successful faults.
134 #define RETRY_TABLE_START \
136 .globl EXT(retry_table) ;\
140 #define RETRY(addr) \
147 #define RETRY_TABLE_END \
149 .globl EXT(retry_table_end) ;\
150 LEXT(retry_table_end) ;\
154 * Allocate recovery and retry tables.
166 #define TIME_TRAP_UENTRY
167 #define TIME_TRAP_UEXIT
168 #define TIME_INT_ENTRY
169 #define TIME_INT_EXIT
171 #else /* microsecond timing */
174 * Microsecond timing.
175 * Assumes a free-running microsecond counter.
176 * no TIMER_MAX check needed.
180 * There is only one current time-stamp per CPU, since only
181 * the time-stamp in the current timer is used.
182 * To save time, we allocate the current time-stamps here.
184 .comm EXT(current_tstamp), 4*NCPUS
187 * Update time on user trap entry.
188 * 11 instructions (including cli on entry)
189 * Assumes CPU number in %edx.
192 #define TIME_TRAP_UENTRY \
193 cli /* block interrupts */ ;\
194 movl VA_ETC,%ebx /* get timer value */ ;\
195 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
196 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
197 subl %ecx,%ebx /* elapsed = new-old */ ;\
198 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
199 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
200 jns 0f /* if overflow, */ ;\
201 call timer_normalize /* normalize timer */ ;\
202 0: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
203 /* switch to sys timer */;\
204 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
205 sti /* allow interrupts */
208 * update time on user trap exit.
210 * Assumes CPU number in %edx.
213 #define TIME_TRAP_UEXIT \
214 cli /* block interrupts */ ;\
215 movl VA_ETC,%ebx /* get timer */ ;\
216 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
217 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
218 subl %ecx,%ebx /* elapsed = new-old */ ;\
219 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
220 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
221 jns 0f /* if overflow, */ ;\
222 call timer_normalize /* normalize timer */ ;\
223 0: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\
224 /* switch to user timer */;\
225 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
228 * update time on interrupt entry.
230 * Assumes CPU number in %edx.
231 * Leaves old timer in %ebx.
234 #define TIME_INT_ENTRY \
235 movl VA_ETC,%ecx /* get timer */ ;\
236 movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
237 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
238 subl %ebx,%ecx /* elapsed = new-old */ ;\
239 movl CX(EXT(current_timer),%edx),%ebx /* get current timer */;\
240 addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
241 leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
242 lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
243 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
246 * update time on interrupt exit.
248 * Assumes CPU number in %edx, old timer in %ebx.
251 #define TIME_INT_EXIT \
252 movl VA_ETC,%eax /* get timer */ ;\
253 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
254 movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
255 subl %ecx,%eax /* elapsed = new-old */ ;\
256 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
257 addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
258 jns 0f /* if overflow, */ ;\
259 call timer_normalize /* normalize timer */ ;\
260 0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
261 jz 0f /* if overflow, */ ;\
262 movl %ebx,%ecx /* get old timer */ ;\
263 call timer_normalize /* normalize timer */ ;\
264 0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
268 * Normalize timer in ecx.
269 * Preserves edx; clobbers eax.
273 .long TIMER_HIGH_UNIT /* div has no immediate opnd */
276 pushl %edx /* save registersz */
278 xorl %edx,%edx /* clear divisor high */
279 movl LOW_BITS(%ecx),%eax /* get divisor low */
280 divl timer_high_unit,%eax /* quotient in eax */
281 /* remainder in edx */
282 addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
283 movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
284 addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
285 popl %eax /* restore register */
290 * Switch to a new timer.
293 CPU_NUMBER(%edx) /* get this CPU */
294 movl VA_ETC,%ecx /* get timer */
295 movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
296 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
297 subl %ecx,%eax /* elapsed = new - old */
298 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
299 addl %eax,LOW_BITS(%ecx) /* add to low bits */
300 jns 0f /* if overflow, */
301 call timer_normalize /* normalize timer */
303 movl S_ARG0,%ecx /* get new timer */
304 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
308 * Initialize the first timer for a CPU.
311 CPU_NUMBER(%edx) /* get this CPU */
312 movl VA_ETC,%ecx /* get timer */
313 movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
314 movl S_ARG0,%ecx /* get timer */
315 movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
318 #endif /* accurate timing */
321 * Encapsulate the transfer of exception stack frames between a PCB
322 * and a thread stack. Since the whole point of these is to emulate
323 * a call or exception that changes privilege level, both macros
324 * assume that there is no user esp or ss stored in the source
325 * frame (because there was no change of privilege to generate them).
329 * Transfer a stack frame from a thread's user stack to its PCB.
330 * We assume the thread and stack addresses have been loaded into
331 * registers (our arguments).
333 * The macro overwrites edi, esi, ecx and whatever registers hold the
334 * thread and stack addresses (which can't be one of the above three).
335 * The thread address is overwritten with the address of its saved state
336 * (where the frame winds up).
338 * Must be called on kernel stack.
340 #define FRAME_STACK_TO_PCB(thread, stkp) ;\
341 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
342 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
343 movl %edi,thread /* save for later */ ;\
344 movl stkp,%esi /* point to start of frame */ ;\
345 movl $ R_UESP,%ecx ;\
346 sarl $2,%ecx /* word count for transfer */ ;\
347 cld /* we`re incrementing */ ;\
349 movsl /* transfer the frame */ ;\
350 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
351 movl stkp,R_UESP(thread) /* store in PCB */ ;\
353 mov %ss,%cx /* get current ss */ ;\
354 movl %ecx,R_SS(thread) /* store in PCB */
357 * Transfer a stack frame from a thread's PCB to the stack pointed
358 * to by the PCB. We assume the thread address has been loaded into
359 * a register (our argument).
361 * The macro overwrites edi, esi, ecx and whatever register holds the
362 * thread address (which can't be one of the above three). The
363 * thread address is overwritten with the address of its saved state
364 * (where the frame winds up).
366 * Must be called on kernel stack.
368 #define FRAME_PCB_TO_STACK(thread) ;\
369 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
370 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
371 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
372 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
373 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
374 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
375 jz 1f /* use kernel data segment */ ;\
376 movl $ USER_DS,%cx /* else use user data segment */;\
379 movl $ R_UESP,%ecx ;\
380 subl %ecx,%edi /* derive start of frame */ ;\
381 movl %edi,thread /* save for later */ ;\
382 sarl $2,%ecx /* word count for transfer */ ;\
383 cld /* we`re incrementing */ ;\
385 movsl /* transfer the frame */ ;\
386 mov %ss,%cx /* restore kernel segments */ ;\
394 * Traditional, not ANSI.
398 .globl label/**/count ;\
401 .globl label/**/limit ;\
405 addl $1,%ss:label/**/count ;\
406 cmpl $0,label/**/limit ;\
410 movl %ss:label/**/count,%eax ;\
411 cmpl %eax,%ss:label/**/limit ;\
424 * Last-ditch debug code to handle faults that might result
425 * from entering kernel (from collocated server) on an invalid
426 * stack. On collocated entry, there's no hardware-initiated
427 * stack switch, so a valid stack must be in place when an
428 * exception occurs, or we may double-fault.
430 * In case of a double-fault, our only recourse is to switch
431 * hardware "tasks", so that we avoid using the current stack.
433 * The idea here is just to get the processor into the debugger,
434 * post-haste. No attempt is made to fix up whatever error got
435 * us here, so presumably continuing from the debugger will
436 * simply land us here again -- at best.
440 * Note that the per-fault entry points are not currently
441 * functional. The only way to make them work would be to
442 * set up separate TSS's for each fault type, which doesn't
443 * currently seem worthwhile. (The offset part of a task
444 * gate is always ignored.) So all faults that task switch
445 * currently resume at db_task_start.
448 * Double fault (Murphy's point) - error code (0) on stack
450 Entry(db_task_dbl_fault)
452 movl $(T_DOUBLE_FAULT),%ebx
455 * Segment not present - error code on stack
457 Entry(db_task_seg_np)
459 movl $(T_SEGMENT_NOT_PRESENT),%ebx
462 * Stack fault - error code on (current) stack
464 Entry(db_task_stk_fault)
466 movl $(T_STACK_FAULT),%ebx
469 * General protection fault - error code on stack
471 Entry(db_task_gen_prot)
473 movl $(T_GENERAL_PROTECTION),%ebx
477 * The entry point where execution resumes after last-ditch debugger task
483 movl %edx,%esp /* allocate i386_saved_state on stack */
484 movl %eax,R_ERR(%esp)
485 movl %ebx,R_TRAPNO(%esp)
489 movl CX(EXT(mp_dbtss),%edx),%edx
490 movl TSS_LINK(%edx),%eax
492 movl EXT(dbtss)+TSS_LINK,%eax
494 pushl %eax /* pass along selector of previous TSS */
495 call EXT(db_tss_to_frame)
496 popl %eax /* get rid of TSS selector */
497 call EXT(db_trap_from_asm)
502 iret /* ha, ha, ha... */
503 #endif /* MACH_KDB */
506 * Trap/interrupt entry points.
508 * All traps must create the following save area on the PCB "stack":
517 * cr2 if page fault - otherwise unused
527 * user esp - if from user
528 * user ss - if from user
529 * es - if from V86 thread
530 * ds - if from V86 thread
531 * fs - if from V86 thread
532 * gs - if from V86 thread
537 * General protection or segment-not-present fault.
538 * Check for a GP/NP fault in the kernel_return
539 * sequence; if there, report it as a GP/NP fault on the user's instruction.
541 * esp-> 0: trap code (NP or GP)
542 * 4: segment number in error
546 * 20 old registers (trap is from kernel)
549 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
550 jmp trap_check_kernel_exit /* check for kernel exit sequence */
553 pushl $(T_SEGMENT_NOT_PRESENT)
554 /* indicate fault type */
556 trap_check_kernel_exit:
557 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
558 jnz EXT(alltraps) /* isn`t kernel trap if so */
559 testl $3,12(%esp) /* is trap from kernel mode? */
560 jne EXT(alltraps) /* if so: */
561 /* check for the kernel exit sequence */
562 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
564 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
566 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
568 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
570 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
572 take_fault: /* if none of the above: */
573 jmp EXT(alltraps) /* treat as normal trap. */
576 * GP/NP fault on IRET: CS or SS is in error.
577 * All registers contain the user's values.
592 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
593 popl %eax /* get trap number */
594 movl %eax,12-4(%esp) /* put in user trap number */
595 popl %eax /* get error code */
596 movl %eax,16-8(%esp) /* put in user errcode */
597 popl %eax /* restore eax */
599 jmp EXT(alltraps) /* take fault */
602 * Fault restoring a segment register. The user's registers are still
603 * saved on the stack. The offending segment register has not been
607 popl %eax /* get trap number */
608 popl %edx /* get error code */
609 addl $12,%esp /* pop stack to user regs */
610 jmp push_es /* (DS on top of stack) */
612 popl %eax /* get trap number */
613 popl %edx /* get error code */
614 addl $12,%esp /* pop stack to user regs */
615 jmp push_fs /* (ES on top of stack) */
617 popl %eax /* get trap number */
618 popl %edx /* get error code */
619 addl $12,%esp /* pop stack to user regs */
620 jmp push_gs /* (FS on top of stack) */
622 popl %eax /* get trap number */
623 popl %edx /* get error code */
624 addl $12,%esp /* pop stack to user regs */
625 jmp push_segregs /* (GS on top of stack) */
628 pushl %es /* restore es, */
630 pushl %fs /* restore fs, */
632 pushl %gs /* restore gs. */
634 movl %eax,R_TRAPNO(%esp) /* set trap number */
635 movl %edx,R_ERR(%esp) /* set error code */
637 jmp trap_set_segs /* take trap */
640 * Debug trap. Check for single-stepping across system call into
641 * kernel. If this is the case, taking the debug trap has turned
642 * off single-stepping - save the flags register with the trace
646 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
647 jnz 0f /* isn`t kernel trap if so */
648 testl $3,4(%esp) /* is trap from kernel mode? */
650 cmpl $syscall_entry,(%esp) /* system call entry? */
652 /* flags are sitting where syscall */
654 addl $8,%esp /* remove eip/cs */
655 jmp syscall_entry_2 /* continue system call entry */
657 0: pushl $0 /* otherwise: */
658 pushl $(T_DEBUG) /* handle as normal */
659 jmp EXT(alltraps) /* debug fault */
662 * Page fault traps save cr2.
665 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
666 pusha /* save the general registers */
667 movl %cr2,%eax /* get the faulting address */
668 movl %eax,12(%esp) /* save in esp save slot */
669 jmp trap_push_segs /* continue fault */
672 * All 'exceptions' enter here with:
678 * old esp if trapped from user
679 * old ss if trapped from user
681 * NB: below use of CPU_NUMBER assumes that macro will use correct
682 * segment register for any kernel data accesses.
685 pusha /* save the general registers */
687 pushl %ds /* save the segment registers */
695 movl %ax,%es /* switch to kernel data seg */
696 cld /* clear direction flag */
697 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
698 jnz trap_from_user /* user mode trap if so */
699 testb $3,R_CS(%esp) /* user mode trap? */
702 cmpl $0,CX(EXT(active_kloaded),%edx)
703 je trap_from_kernel /* if clear, truly in kernel */
705 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
710 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
711 * so transfer the stack frame into the PCB explicitly, then
712 * start running on resulting "PCB stack". We have to set
713 * up a simulated "uesp" manually, since there's none in the
720 movl CX(EXT(active_kloaded),%edx),%ebx
721 movl CX(EXT(kernel_stack),%edx),%eax
723 FRAME_STACK_TO_PCB(%ebx,%eax)
734 movl CX(EXT(kernel_stack),%edx),%ebx
735 xchgl %ebx,%esp /* switch to kernel stack */
736 /* user regs pointer already set */
738 pushl %ebx /* record register save area */
739 pushl %ebx /* pass register save area to trap */
740 call EXT(user_trap) /* call user trap routine */
741 movl 4(%esp),%esp /* switch back to PCB stack */
744 * Return from trap or system call, checking for ASTs.
748 LEXT(return_from_trap)
750 cmpl $0,CX(EXT(need_ast),%edx)
751 je EXT(return_to_user) /* if we need an AST: */
753 movl CX(EXT(kernel_stack),%edx),%esp
754 /* switch to kernel stack */
755 pushl $0 /* push preemption flag */
756 call EXT(i386_astintr) /* take the AST */
757 addl $4,%esp /* pop preemption flag */
758 popl %esp /* switch back to PCB stack (w/exc link) */
759 jmp EXT(return_from_trap) /* and check again (rare) */
760 /* ASTs after this point will */
764 * Arrange the checks needed for kernel-loaded (or kernel-loading)
765 * threads so that branch is taken in kernel-loaded case.
770 cmpl $0,CX(EXT(active_kloaded),%eax)
771 jnz EXT(return_xfer_stack)
772 movl $ CPD_ACTIVE_THREAD,%ebx
773 movl %gs:(%ebx),%ebx /* get active thread */
774 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
775 cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */
776 jnz EXT(return_kernel_loading)
780 movl $ CPD_PREEMPTION_LEVEL,%ebx
782 je EXT(return_from_kernel)
784 #endif /* MACH_ASSERT */
788 * Return from kernel mode to interrupted thread.
791 LEXT(return_from_kernel)
793 popl %gs /* restore segment registers */
800 popa /* restore general registers */
801 addl $8,%esp /* discard trap number and error code */
804 iret /* return from interrupt */
807 LEXT(return_xfer_stack)
809 * If we're on PCB stack in a kernel-loaded task, we have
810 * to transfer saved state back to thread stack and swap
811 * stack pointers here, because the hardware's not going
816 movl CX(EXT(kernel_stack),%eax),%esp
817 movl CX(EXT(active_kloaded),%eax),%eax
818 FRAME_PCB_TO_STACK(%eax)
821 jmp EXT(return_from_kernel)
824 * Hate to put this here, but setting up a separate swap_func for
825 * kernel-loaded threads no longer works, since thread executes
826 * "for a while" (i.e., until it reaches glue code) when first
827 * created, even if it's nominally suspended. Hence we can't
828 * transfer the PCB when the thread first resumes, because we
829 * haven't initialized it yet.
832 * Have to force transfer to new stack "manually". Use a string
833 * move to transfer all of our saved state to the stack pointed
834 * to by iss.uesp, then install a pointer to it as our current
837 LEXT(return_kernel_loading)
839 movl CX(EXT(kernel_stack),%eax),%esp
840 movl $ CPD_ACTIVE_THREAD,%ebx
841 movl %gs:(%ebx),%ebx /* get active thread */
842 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
843 movl %ebx,%edx /* save for later */
844 movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */
845 FRAME_PCB_TO_STACK(%ebx)
846 movl %ebx,%esp /* start running on new stack */
847 movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */
848 movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */
849 jmp EXT(return_from_kernel)
852 * Trap from kernel mode. No need to switch stacks or load segment registers.
855 #if MACH_KDB || MACH_KGDB
858 movl %esp,%ebx /* save current stack */
860 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
864 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
867 pushl %esp /* Already on kgdb stack */
871 jmp EXT(return_from_kernel)
872 0: /* should kgdb handle this exception? */
873 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
875 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
878 cli /* disable interrupts */
879 CPU_NUMBER(%edx) /* get CPU number */
880 movl CX(EXT(kgdb_stacks),%edx),%ebx
881 xchgl %ebx,%esp /* switch to kgdb stack */
882 pushl %ebx /* pass old sp as an arg */
883 call EXT(kgdb_from_kernel)
884 popl %esp /* switch back to kernel stack */
885 jmp EXT(return_from_kernel)
887 #endif /* MACH_KGDB */
890 cmpl $0,EXT(db_active) /* could trap be from ddb? */
893 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
894 cmpl $0,CX(EXT(kdb_active),%edx)
896 #endif /* NCPUS > 1 */
898 call EXT(db_trap_from_asm)
900 jmp EXT(return_from_kernel)
904 * Dilemma: don't want to switch to kernel_stack if trap
905 * "belongs" to ddb; don't want to switch to db_stack if
906 * trap "belongs" to kernel. So have to duplicate here the
907 * set of trap types that kernel_trap() handles. Note that
908 * "unexpected" page faults will not be handled by kernel_trap().
909 * In this panic-worthy case, we fall into the debugger with
910 * kernel_stack containing the call chain that led to the
913 movl R_TRAPNO(%esp),%edx
914 cmpl $(T_PAGE_FAULT),%edx
916 cmpl $(T_NO_FPU),%edx
918 cmpl $(T_FPU_FAULT),%edx
920 cmpl $(T_FLOATING_POINT_ERROR),%edx
922 cmpl $(T_PREEMPT),%edx
925 #endif /* MACH_KDB */
927 CPU_NUMBER(%edx) /* get CPU number */
928 cmpl CX(EXT(kernel_stack),%edx),%esp
929 /* if not already on kernel stack, */
930 ja 5f /* check some more */
931 cmpl CX(EXT(active_stacks),%edx),%esp
932 ja 6f /* on kernel stack: no switch */
934 movl CX(EXT(kernel_stack),%edx),%esp
936 pushl %ebx /* save old stack */
937 pushl %ebx /* pass as parameter */
938 call EXT(kernel_trap) /* to kernel trap routine */
939 addl $4,%esp /* pop parameter */
943 * If kernel_trap returns false, trap wasn't handled.
948 movl CX(EXT(db_stacks),%edx),%esp
949 pushl %ebx /* pass old stack as parameter */
950 call EXT(db_trap_from_asm)
951 #endif /* MACH_KDB */
953 cli /* disable interrupts */
954 CPU_NUMBER(%edx) /* get CPU number */
955 movl CX(EXT(kgdb_stacks),%edx),%esp
956 pushl %ebx /* pass old stack as parameter */
957 call EXT(kgdb_from_kernel)
958 #endif /* MACH_KGDB */
959 addl $4,%esp /* pop parameter */
963 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
966 pushl %ebx /* pass old stack as parameter */
968 addl $4,%esp /* pop parameter */
970 movl %ebx,%esp /* get old stack (from callee-saves reg) */
971 #else /* MACH_KDB || MACH_KGDB */
972 pushl %esp /* pass parameter */
973 call EXT(kernel_trap) /* to kernel trap routine */
974 addl $4,%esp /* pop parameter */
975 #endif /* MACH_KDB || MACH_KGDB */
980 movl CX(EXT(need_ast),%edx),%eax /* get pending asts */
981 testl $ AST_URGENT,%eax /* any urgent preemption? */
982 je EXT(return_from_kernel) /* no, nothing to do */
983 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
984 je EXT(return_from_kernel) /* no, skip it */
985 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
986 jne EXT(return_from_kernel) /* no, nothing to do */
987 movl CX(EXT(kernel_stack),%edx),%eax
990 andl $(-KERNEL_STACK_SIZE),%ecx
991 testl %ecx,%ecx /* are we on the kernel stack? */
992 jne EXT(return_from_kernel) /* no, skip it */
994 #if PREEMPT_DEBUG_LOG
995 pushl 28(%esp) /* stack pointer */
996 pushl 24+4(%esp) /* frame pointer */
997 pushl 56+8(%esp) /* stack pointer */
999 call EXT(log_thread_action)
1002 0: String "trap preempt eip"
1004 #endif /* PREEMPT_DEBUG_LOG */
1006 pushl $1 /* push preemption flag */
1007 call EXT(i386_astintr) /* take the AST */
1008 addl $4,%esp /* pop preemption flag */
1009 #endif /* MACH_RT */
1011 jmp EXT(return_from_kernel)
1014 * Called as a function, makes the current thread
1015 * return from the kernel as if from an exception.
1018 .globl EXT(thread_exception_return)
1019 .globl EXT(thread_bootstrap_return)
1020 LEXT(thread_exception_return)
1021 LEXT(thread_bootstrap_return)
1022 movl %esp,%ecx /* get kernel stack */
1023 or $(KERNEL_STACK_SIZE-1),%ecx
1024 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1025 jmp EXT(return_from_trap)
1027 Entry(call_continuation)
1028 movl S_ARG0,%eax /* get continuation */
1029 movl %esp,%ecx /* get kernel stack */
1030 or $(KERNEL_STACK_SIZE-1),%ecx
1031 addl $(-3-IKS_SIZE),%ecx
1032 movl %ecx,%esp /* pop the stack */
1033 xorl %ebp,%ebp /* zero frame pointer */
1034 jmp *%eax /* goto continuation */
1037 #define LOG_INTERRUPT(info,msg) \
1041 call EXT(log_thread_action) ; \
1044 #define CHECK_INTERRUPT_TIME(n) \
1047 call EXT(check_thread_time) ; \
1051 #define LOG_INTERRUPT(info,msg)
1052 #define CHECK_INTERRUPT_TIME(n)
1056 String "interrupt start"
1058 String "interrupt end"
1061 * All interrupts enter here.
1062 * old %eax on stack; interrupt number in %eax.
1065 pushl %ecx /* save registers */
1067 cld /* clear direction flag */
1069 cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
1070 jb int_from_intstack /* if not: */
1072 pushl %ds /* save segment registers */
1074 mov %ss,%dx /* switch to kernel segments */
1082 movl CX(EXT(int_stack_top),%edx),%ecx
1083 movl 20(%esp),%edx /* get eip */
1084 xchgl %ecx,%esp /* switch to interrupt stack */
1087 pushl %ecx /* save pointer to old stack */
1089 pushl %ebx /* save %ebx - out of the way */
1090 /* so stack looks the same */
1091 pushl %ecx /* save pointer to old stack */
1092 TIME_INT_ENTRY /* do timing */
1095 pushl %edx /* pass eip to pe_incoming_interrupt */
1098 movl $ CPD_PREEMPTION_LEVEL,%edx
1100 #endif /* MACH_RT */
1102 movl $ CPD_INTERRUPT_LEVEL,%edx
1105 pushl %eax /* Push trap number */
1106 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
1107 addl $8,%esp /* Pop trap number and eip */
1109 .globl EXT(return_to_iret)
1110 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1112 movl $ CPD_INTERRUPT_LEVEL,%edx
1116 movl $ CPD_PREEMPTION_LEVEL,%edx
1118 #endif /* MACH_RT */
1122 TIME_INT_EXIT /* do timing */
1123 movl 4(%esp),%ebx /* restore the extra reg we saved */
1126 popl %esp /* switch back to old stack */
1129 movl CX(EXT(need_ast),%edx),%eax
1130 testl %eax,%eax /* any pending asts? */
1131 je 1f /* no, nothing to do */
1132 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1133 jnz ast_from_interrupt /* take it */
1134 testb $3,I_CS(%esp) /* user mode, */
1135 jnz ast_from_interrupt /* take it */
1137 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1138 jnb ast_from_interrupt /* take it */
1142 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
1143 je 1f /* no, skip it */
1144 movl $ CPD_PREEMPTION_LEVEL,%ecx
1145 cmpl $0,%gs:(%ecx) /* preemption masked? */
1146 jne 1f /* yes, skip it */
1147 testl $ AST_URGENT,%eax /* any urgent requests? */
1148 je 1f /* no, skip it */
1149 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1150 jb 1f /* yes, skip it */
1151 movl CX(EXT(kernel_stack),%edx),%eax
1154 andl $(-KERNEL_STACK_SIZE),%ecx
1155 testl %ecx,%ecx /* are we on the kernel stack? */
1156 jne 1f /* no, skip it */
1159 * Take an AST from kernel space. We don't need (and don't want)
1160 * to do as much as the case where the interrupt came from user
1163 #if PREEMPT_DEBUG_LOG
1168 call EXT(log_thread_action)
1171 0: String "intr preempt eip"
1173 #endif /* PREEMPT_DEBUG_LOG */
1176 pushl $1 /* push preemption flag */
1177 call EXT(i386_astintr) /* take the AST */
1178 addl $4,%esp /* pop preemption flag */
1179 #endif /* MACH_RT */
1182 pop %es /* restore segment regs */
1187 iret /* return to caller */
1191 movl $ CPD_PREEMPTION_LEVEL,%edx
1193 #endif /* MACH_RT */
1195 movl $ CPD_INTERRUPT_LEVEL,%edx
1199 pushl %edx /* push eip */
1201 pushl %eax /* Push trap number */
1203 call EXT(PE_incoming_interrupt)
1204 addl $4,%esp /* pop eip */
1206 LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1208 addl $4,%esp /* pop trap number */
1210 movl $ CPD_INTERRUPT_LEVEL,%edx
1214 movl $ CPD_PREEMPTION_LEVEL,%edx
1216 #endif /* MACH_RT */
1218 pop %edx /* must have been on kernel segs */
1220 pop %eax /* no ASTs */
1224 * Take an AST from an interrupt.
1238 pop %es /* restore all registers ... */
1243 sti /* Reenable interrupts */
1244 pushl $0 /* zero code */
1245 pushl $0 /* zero trap number */
1246 pusha /* save general registers */
1247 push %ds /* save segment registers */
1251 mov %ss,%dx /* switch to kernel segments */
1258 * See if we interrupted a kernel-loaded thread executing
1262 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1263 jnz 0f /* user mode trap if so */
1265 jnz 0f /* user mode, back to normal */
1267 cmpl ETEXT_ADDR,R_EIP(%esp)
1268 jb 0f /* not kernel-loaded, back to normal */
1272 * Transfer the current stack frame by hand into the PCB.
1275 movl CX(EXT(active_kloaded),%edx),%eax
1276 movl CX(EXT(kernel_stack),%edx),%ebx
1278 FRAME_STACK_TO_PCB(%eax,%ebx)
1285 movl CX(EXT(kernel_stack),%edx),%eax
1286 /* switch to kernel stack */
1290 pushl $0 /* push preemption flag */
1291 call EXT(i386_astintr) /* take the AST */
1292 addl $4,%esp /* pop preemption flag */
1293 popl %esp /* back to PCB stack */
1294 jmp EXT(return_from_trap) /* return */
1296 #if MACH_KDB || MACH_KGDB
1298 * kdb_kintr: enter kdb from keyboard interrupt.
1299 * Chase down the stack frames until we find one whose return
1300 * address is the interrupt handler. At that point, we have:
1302 * frame-> saved %ebp
1303 * return address in interrupt handler
1306 * return address == return_to_iret_i
1315 * frame-> saved %ebp
1316 * return address in interrupt handler
1319 * return address == return_to_iret
1320 * pointer to save area on old stack
1321 * [ saved %ebx, if accurate timing ]
1323 * old stack: saved %es
1332 * Call kdb, passing it that register save area.
1337 #endif /* MACH_KGDB */
1340 #endif /* MACH_KDB */
1341 movl %ebp,%eax /* save caller`s frame pointer */
1342 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1343 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1345 0: cmpl 16(%eax),%ecx /* does this frame return to */
1346 /* interrupt handler (1)? */
1348 cmpl $kdb_from_iret,16(%eax)
1350 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1352 cmpl $kdb_from_iret_i,16(%eax)
1354 movl (%eax),%eax /* try next frame */
1357 1: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1360 2: movl $kdb_from_iret_i,16(%eax)
1361 /* returns to interrupt stack */
1365 * On return from keyboard interrupt, we will execute
1367 * if returning to an interrupt on the interrupt stack
1369 * if returning to an interrupt on the user or kernel stack
1372 /* save regs in known locations */
1374 pushl %ebx /* caller`s %ebx is in reg */
1376 movl 4(%esp),%eax /* get caller`s %ebx */
1377 pushl %eax /* push on stack */
1386 pushl %esp /* pass regs */
1387 call EXT(kgdb_kentry) /* to kgdb */
1388 addl $4,%esp /* pop parameters */
1389 #endif /* MACH_KGDB */
1391 pushl %esp /* pass regs */
1392 call EXT(kdb_kentry) /* to kdb */
1393 addl $4,%esp /* pop parameters */
1394 #endif /* MACH_KDB */
1395 pop %gs /* restore registers */
1406 jmp EXT(return_to_iret) /* normal interrupt return */
1408 kdb_from_iret_i: /* on interrupt stack */
1409 pop %edx /* restore saved registers */
1412 pushl $0 /* zero error code */
1413 pushl $0 /* zero trap number */
1414 pusha /* save general registers */
1415 push %ds /* save segment registers */
1420 cli /* disable interrupts */
1421 CPU_NUMBER(%edx) /* get CPU number */
1422 movl CX(EXT(kgdb_stacks),%edx),%ebx
1423 xchgl %ebx,%esp /* switch to kgdb stack */
1424 pushl %ebx /* pass old sp as an arg */
1425 call EXT(kgdb_from_kernel)
1426 popl %esp /* switch back to interrupt stack */
1427 #endif /* MACH_KGDB */
1429 pushl %esp /* pass regs, */
1430 pushl $0 /* code, */
1431 pushl $-1 /* type to kdb */
1434 #endif /* MACH_KDB */
1435 pop %gs /* restore segment registers */
1439 popa /* restore general registers */
1443 #endif /* MACH_KDB || MACH_KGDB */
1447 * Mach RPC enters through a call gate, like a system call.
1451 pushf /* save flags as soon as possible */
1452 pushl %eax /* save system call number */
1453 pushl $0 /* clear trap number slot */
1455 pusha /* save the general registers */
1456 pushl %ds /* and the segment registers */
1461 mov %ss,%dx /* switch to kernel data segment */
1468 * Shuffle eflags,eip,cs into proper places
1471 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1472 movl R_CS(%esp),%ecx /* eip is in CS slot */
1473 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1474 movl %ecx,R_EIP(%esp) /* fix eip */
1475 movl %edx,R_CS(%esp) /* fix cs */
1476 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1481 negl %eax /* get system call number */
1482 shll $4,%eax /* manual indexing */
1485 * Check here for mach_rpc from kernel-loaded task --
1486 * - Note that kernel-loaded task returns via real return.
1487 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1488 * so transfer the stack frame into the PCB explicitly, then
1489 * start running on resulting "PCB stack". We have to set
1490 * up a simulated "uesp" manually, since there's none in the
1493 cmpl $0,CX(EXT(active_kloaded),%edx)
1496 movl CX(EXT(active_kloaded),%edx),%ebx
1497 movl CX(EXT(kernel_stack),%edx),%edx
1500 FRAME_STACK_TO_PCB(%ebx,%edx)
1508 movl CX(EXT(kernel_stack),%edx),%ebx
1509 /* get current kernel stack */
1510 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1511 /* user registers. */
1516 * Register use on entry:
1517 * eax contains syscall number
1518 * ebx contains user regs pointer
1520 #undef RPC_TRAP_REGISTERS
1521 #ifdef RPC_TRAP_REGISTERS
1527 movl EXT(mach_trap_table)(%eax),%ecx
1528 /* get number of arguments */
1529 jecxz 2f /* skip argument copy if none */
1530 movl R_UESP(%ebx),%esi /* get user stack pointer */
1531 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1532 /* and point past last argument */
1533 /* edx holds cpu number from above */
1534 movl CX(EXT(active_kloaded),%edx),%edx
1535 /* point to current thread */
1536 orl %edx,%edx /* if ! kernel-loaded, check addr */
1538 mov %ds,%dx /* kernel data segment access */
1541 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1542 ja mach_call_addr /* address error if not */
1543 movl $ USER_DS,%edx /* user data segment access */
1546 movl %esp,%edx /* save kernel ESP for error recovery */
1550 RECOVER(mach_call_addr_push)
1551 pushl %fs:(%esi) /* push argument on stack */
1552 loop 1b /* loop for all arguments */
1556 * Register use on entry:
1557 * eax contains syscall number
1558 * ebx contains user regs pointer
1562 call *EXT(mach_trap_table)+4(%eax)
1563 /* call procedure */
1564 movl %esp,%ecx /* get kernel stack */
1565 or $(KERNEL_STACK_SIZE-1),%ecx
1566 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1567 movl %eax,R_EAX(%esp) /* save return value */
1568 jmp EXT(return_from_trap) /* return to user */
1572 * Special system call entry for "int 0x80", which has the "eflags"
1573 * register saved at the right place already.
1574 * Fall back to the common syscall path after saving the registers.
1579 * old esp if trapped from user
1580 * old ss if trapped from user
1582 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1584 Entry(syscall_int80)
1585 pushl %eax /* save system call number */
1586 pushl $0 /* clear trap number slot */
1588 pusha /* save the general registers */
1589 pushl %ds /* and the segment registers */
1594 mov %ss,%dx /* switch to kernel data segment */
1603 * System call enters through a call gate. Flags are not saved -
1604 * we must shuffle stack to look like trap save area.
1611 * eax contains system call number.
1613 * NB: below use of CPU_NUMBER assumes that macro will use correct
1614 * correct segment register for any kernel data accesses.
1618 pushf /* save flags as soon as possible */
1620 pushl %eax /* save system call number */
1621 pushl $0 /* clear trap number slot */
1623 pusha /* save the general registers */
1624 pushl %ds /* and the segment registers */
1629 mov %ss,%dx /* switch to kernel data segment */
1636 * Shuffle eflags,eip,cs into proper places
1639 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1640 movl R_CS(%esp),%ecx /* eip is in CS slot */
1641 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1642 movl %ecx,R_EIP(%esp) /* fix eip */
1643 movl %edx,R_CS(%esp) /* fix cs */
1644 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1649 * Check here for syscall from kernel-loaded task --
1650 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1651 * so transfer the stack frame into the PCB explicitly, then
1652 * start running on resulting "PCB stack". We have to set
1653 * up a simulated "uesp" manually, since there's none in the
1656 cmpl $0,CX(EXT(active_kloaded),%edx)
1659 movl CX(EXT(active_kloaded),%edx),%ebx
1660 movl CX(EXT(kernel_stack),%edx),%edx
1662 FRAME_STACK_TO_PCB(%ebx,%edx)
1672 movl CX(EXT(kernel_stack),%edx),%ebx
1673 /* get current kernel stack */
1674 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1675 /* user registers. */
1676 /* user regs pointer already set */
1679 * Check for MACH or emulated system call
1680 * Register use (from here till we begin processing call):
1681 * eax contains system call number
1682 * ebx points to user regs
1685 movl $ CPD_ACTIVE_THREAD,%edx
1686 movl %gs:(%edx),%edx /* get active thread */
1687 /* point to current thread */
1688 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1689 movl ACT_TASK(%edx),%edx /* point to task */
1690 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1691 orl %edx,%edx /* if none, */
1692 je syscall_native /* do native system call */
1693 movl %eax,%ecx /* copy system call number */
1694 subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
1696 jl syscall_native /* too low - native system call */
1697 cmpl DISP_COUNT(%edx),%ecx /* check range */
1698 jnl syscall_native /* too high - native system call */
1699 movl DISP_VECTOR(%edx,%ecx,4),%edx
1700 /* get the emulation vector */
1701 orl %edx,%edx /* emulated system call if not zero */
1705 * Native system call.
1706 * Register use on entry:
1707 * eax contains syscall number
1708 * ebx points to user regs
1711 negl %eax /* get system call number */
1712 jl mach_call_range /* out of range if it was positive */
1714 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1715 jg mach_call_range /* error if out of range */
1716 shll $4,%eax /* manual indexing */
1718 movl EXT(mach_trap_table)+4(%eax),%edx
1720 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1721 jne do_native_call /* go on with Mach syscall */
1723 movl $ CPD_ACTIVE_THREAD,%edx
1724 movl %gs:(%edx),%edx /* get active thread */
1725 /* point to current thread */
1726 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1727 movl ACT_TASK(%edx),%edx /* point to task */
1728 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1729 orl %edx,%edx /* if it exists, */
1730 jne do_native_call /* do native system call */
1731 shrl $4,%eax /* restore syscall number */
1732 jmp mach_call_range /* try it as a "server" syscall */
1735 * Register use on entry:
1736 * eax contains syscall number
1737 * ebx contains user regs pointer
1740 movl EXT(mach_trap_table)(%eax),%ecx
1741 /* get number of arguments */
1742 jecxz mach_call_call /* skip argument copy if none */
1743 movl R_UESP(%ebx),%esi /* get user stack pointer */
1744 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1745 /* and point past last argument */
1747 movl CX(EXT(active_kloaded),%edx),%edx
1748 /* point to current thread */
1749 orl %edx,%edx /* if kernel-loaded, skip addr check */
1751 mov %ds,%dx /* kernel data segment access */
1754 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1755 ja mach_call_addr /* address error if not */
1756 movl $ USER_DS,%edx /* user data segment access */
1759 movl %esp,%edx /* save kernel ESP for error recovery */
1763 RECOVER(mach_call_addr_push)
1764 pushl %fs:(%esi) /* push argument on stack */
1765 loop 2b /* loop for all arguments */
1768 * Register use on entry:
1769 * eax contains syscall number
1770 * ebx contains user regs pointer
1776 #if ETAP_EVENT_MONITOR
1777 cmpl $0x200, %eax /* is this mach_msg? */
1778 jz make_syscall /* if yes, don't record event */
1780 pushal /* Otherwise: save registers */
1781 pushl %eax /* push syscall number on stack*/
1782 call EXT(etap_machcall_probe1) /* call event begin probe */
1783 add $4,%esp /* restore stack */
1784 popal /* restore registers */
1786 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1788 call EXT(etap_machcall_probe2) /* call event end probe */
1790 jmp skip_syscall /* syscall already made */
1791 #endif /* ETAP_EVENT_MONITOR */
1794 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1797 movl %esp,%ecx /* get kernel stack */
1798 or $(KERNEL_STACK_SIZE-1),%ecx
1799 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1800 movl %eax,R_EAX(%esp) /* save return value */
1801 jmp EXT(return_from_trap) /* return to user */
1804 * Address out of range. Change to page fault.
1805 * %esi holds failing address.
1806 * Register use on entry:
1807 * ebx contains user regs pointer
1809 mach_call_addr_push:
1810 movl %edx,%esp /* clean parameters from stack */
1812 movl %esi,R_CR2(%ebx) /* set fault address */
1813 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1814 /* set page-fault trap */
1815 movl $(T_PF_USER),R_ERR(%ebx)
1816 /* set error code - read user space */
1818 jmp EXT(take_trap) /* treat as a trap */
1821 * System call out of range. Treat as invalid-instruction trap.
1822 * (? general protection?)
1823 * Register use on entry:
1824 * eax contains syscall number
1827 movl $ CPD_ACTIVE_THREAD,%edx
1828 movl %gs:(%edx),%edx /* get active thread */
1830 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1831 movl ACT_TASK(%edx),%edx /* point to task */
1832 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1833 orl %edx,%edx /* if emulator, */
1834 jne EXT(syscall_failed) /* handle as illegal instruction */
1835 /* else generate syscall exception: */
1838 push $1 /* code_cnt = 1 */
1839 push %edx /* exception_type_t (see i/f docky) */
1845 .globl EXT(syscall_failed)
1846 LEXT(syscall_failed)
1847 movl %esp,%ecx /* get kernel stack */
1848 or $(KERNEL_STACK_SIZE-1),%ecx
1849 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1851 movl CX(EXT(kernel_stack),%edx),%ebx
1852 /* get current kernel stack */
1853 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1854 /* user registers. */
1855 /* user regs pointer already set */
1857 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1858 /* set invalid-operation trap */
1859 movl $0,R_ERR(%ebx) /* clear error code */
1861 jmp EXT(take_trap) /* treat as a trap */
1864 * User space emulation of system calls.
1865 * edx - user address to handle syscall
1867 * User stack will become:
1870 * Register use on entry:
1871 * ebx contains user regs pointer
1872 * edx contains emulator vector address
1875 movl R_UESP(%ebx),%edi /* get user stack pointer */
1877 movl CX(EXT(active_kloaded),%eax),%eax
1878 orl %eax,%eax /* if thread not kernel-loaded, */
1879 jz 0f /* do address checks */
1881 mov %ds,%ax /* kernel data segment access */
1882 jmp 1f /* otherwise, skip them */
1884 cmpl $(VM_MAX_ADDRESS),%edi /* in user space? */
1885 ja syscall_addr /* address error if not */
1886 subl $8,%edi /* push space for new arguments */
1887 cmpl $(VM_MIN_ADDRESS),%edi /* still in user space? */
1888 jb syscall_addr /* error if not */
1889 movl $ USER_DS,%ax /* user data segment access */
1892 movl R_EFLAGS(%ebx),%eax /* move flags */
1894 RECOVER(syscall_addr)
1895 movl %eax,%fs:0(%edi) /* to user stack */
1896 movl R_EIP(%ebx),%eax /* move eip */
1898 RECOVER(syscall_addr)
1899 movl %eax,%fs:4(%edi) /* to user stack */
1900 movl %edi,R_UESP(%ebx) /* set new user stack pointer */
1901 movl %edx,R_EIP(%ebx) /* change return address to trap */
1902 movl %ebx,%esp /* back to PCB stack */
1904 jmp EXT(return_from_trap) /* return to user */
1908 * Address error - address is in %edi.
1909 * Register use on entry:
1910 * ebx contains user regs pointer
1913 movl %edi,R_CR2(%ebx) /* set fault address */
1914 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1915 /* set page-fault trap */
1916 movl $(T_PF_USER),R_ERR(%ebx)
1917 /* set error code - read user space */
1919 jmp EXT(take_trap) /* treat as a trap */
1928 * Copy from user address space.
1929 * arg0: user address
1930 * arg1: kernel address
1936 pushl %edi /* save registers */
1938 movl 8+S_ARG0,%esi /* get user start address */
1939 movl 8+S_ARG1,%edi /* get kernel destination address */
1940 movl 8+S_ARG2,%edx /* get count */
1942 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1944 movl $ CPD_ACTIVE_THREAD,%ecx
1945 movl %gs:(%ecx),%ecx /* get active thread */
1946 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
1947 movl ACT_MAP(%ecx),%ecx /* get act->map */
1948 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1949 cmpl EXT(kernel_pmap), %ecx
1951 movl $ USER_DS,%cx /* user data segment access */
1955 jb copyin_fail /* fail if wrap-around */
1957 movl %edx,%ecx /* move by longwords first */
1960 RECOVER(copyin_fail)
1962 movsl /* move longwords */
1963 movl %edx,%ecx /* now move remaining bytes */
1966 RECOVER(copyin_fail)
1969 xorl %eax,%eax /* return 0 for success */
1971 mov %ss,%di /* restore kernel data segment */
1974 popl %edi /* restore registers */
1976 ret /* and return */
1979 movl $ EFAULT,%eax /* return error for failure */
1980 jmp copy_ret /* pop frame and return */
1983 * Copy string from user address space.
1984 * arg0: user address
1985 * arg1: kernel address
1986 * arg2: max byte count
1987 * arg3: actual byte count (OUT)
1991 pushl %edi /* save registers */
1993 movl 8+S_ARG0,%esi /* get user start address */
1994 movl 8+S_ARG1,%edi /* get kernel destination address */
1995 movl 8+S_ARG2,%edx /* get count */
1997 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1999 movl $ CPD_ACTIVE_THREAD,%ecx
2000 movl %gs:(%ecx),%ecx /* get active thread */
2001 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2002 movl ACT_MAP(%ecx),%ecx /* get act->map */
2003 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2004 cmpl EXT(kernel_pmap), %ecx
2006 mov %ds,%cx /* kernel data segment access */
2009 movl $ USER_DS,%cx /* user data segment access */
2017 RECOVER(copystr_fail) /* copy bytes... */
2018 movb %fs:(%esi),%eax
2020 testl %edi,%edi /* if kernel address is ... */
2021 jz 3f /* not NULL */
2022 movb %eax,(%edi) /* copy the byte */
2026 je 5f /* Zero count.. error out */
2028 jne 2b /* .. a NUL found? */
2031 movl $ ENAMETOOLONG,%eax /* String is too long.. */
2033 xorl %eax,%eax /* return zero for success */
2034 movl 8+S_ARG3,%edi /* get OUT len ptr */
2036 jz copystr_ret /* if null, just return */
2038 movl %esi,(%edi) /* else set OUT arg to xfer len */
2040 popl %edi /* restore registers */
2042 ret /* and return */
2045 movl $ EFAULT,%eax /* return error for failure */
2046 jmp copy_ret /* pop frame and return */
2049 * Copy to user address space.
2050 * arg0: kernel address
2051 * arg1: user address
2057 pushl %edi /* save registers */
2060 movl 12+S_ARG0,%esi /* get kernel start address */
2061 movl 12+S_ARG1,%edi /* get user start address */
2062 movl 12+S_ARG2,%edx /* get count */
2064 leal 0(%edi,%edx),%eax /* get user end address + 1 */
2066 movl $ CPD_ACTIVE_THREAD,%ecx
2067 movl %gs:(%ecx),%ecx /* get active thread */
2068 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2069 movl ACT_MAP(%ecx),%ecx /* get act->map */
2070 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2071 cmpl EXT(kernel_pmap), %ecx
2073 mov %ds,%cx /* else kernel data segment access */
2081 * Check whether user address space is writable
2082 * before writing to it - hardware is broken.
2084 * Skip check if "user" address is really in
2085 * kernel space (i.e., if it's in a kernel-loaded
2089 * esi/edi source/dest pointers for rep/mov
2090 * ecx counter for rep/mov
2091 * edx counts down from 3rd arg
2092 * eax count of bytes for each (partial) page copy
2093 * ebx shadows edi, used to adjust edx
2095 movl %edi,%ebx /* copy edi for syncing up */
2097 /* if restarting after a partial copy, put edx back in sync, */
2098 addl %ebx,%edx /* edx -= (edi - ebx); */
2100 movl %edi,%ebx /* ebx = edi; */
2103 cmpl $ USER_DS,%cx /* If kernel data segment */
2104 jnz 0f /* skip check */
2106 cmpb $(CPUID_FAMILY_386), EXT(cpuid_family)
2109 movl %cr3,%ecx /* point to page directory */
2111 andl $(~0x7), %ecx /* remove cpu number */
2112 #endif /* NCPUS > 1 && AT386 */
2113 movl %edi,%eax /* get page directory bits */
2114 shrl $(PDESHIFT),%eax /* from user address */
2115 movl KERNELBASE(%ecx,%eax,4),%ecx
2116 /* get page directory pointer */
2117 testl $(PTE_V),%ecx /* present? */
2118 jz 0f /* if not, fault is OK */
2119 andl $(PTE_PFN),%ecx /* isolate page frame address */
2120 movl %edi,%eax /* get page table bits */
2121 shrl $(PTESHIFT),%eax
2122 andl $(PTEMASK),%eax /* from user address */
2123 leal KERNELBASE(%ecx,%eax,4),%ecx
2124 /* point to page table entry */
2125 movl (%ecx),%eax /* get it */
2126 testl $(PTE_V),%eax /* present? */
2127 jz 0f /* if not, fault is OK */
2128 testl $(PTE_W),%eax /* writable? */
2129 jnz 0f /* OK if so */
2131 * Not writable - must fake a fault. Turn off access to the page.
2133 andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
2134 movl %cr3,%eax /* invalidate TLB */
2138 * Copy only what fits on the current destination page.
2139 * Check for write-fault again on the next page.
2141 leal NBPG(%edi),%eax /* point to */
2142 andl $(-NBPG),%eax /* start of next page */
2143 subl %edi,%eax /* get number of bytes to that point */
2144 cmpl %edx,%eax /* bigger than count? */
2146 movl %edx,%eax /* use count */
2149 movl %eax,%ecx /* move by longwords first */
2152 RECOVER(copyout_fail)
2154 RETRY(copyout_retry)
2157 movl %eax,%ecx /* now move remaining bytes */
2160 RECOVER(copyout_fail)
2162 RETRY(copyout_retry)
2165 movl %edi,%ebx /* copy edi for syncing up */
2166 subl %eax,%edx /* and decrement count */
2167 jg copyout_retry /* restart on next page if not done */
2168 xorl %eax,%eax /* return 0 for success */
2170 mov %ss,%di /* restore kernel segment */
2174 popl %edi /* restore registers */
2176 ret /* and return */
2179 movl $ EFAULT,%eax /* return error for failure */
2180 jmp copyout_ret /* pop frame and return */
2197 pushl %eax /* get stack space */
2213 xor %eax,%eax /* clear high 16 bits of eax */
2214 fnstsw %ax /* read FP status */
2218 * Clear FPU exceptions
2225 * Clear task-switched flag.
2232 * Save complete FPU state. Save error for later.
2235 movl 4(%esp),%eax /* get save area pointer */
2236 fnsave (%eax) /* save complete state, including */
2241 * Restore FPU state.
2244 movl 4(%esp),%eax /* get save area pointer */
2245 frstor (%eax) /* restore complete state */
2255 #else /* NCPUS > 1 && AT386 */
2256 movl 4(%esp),%eax /* get new cr3 value */
2257 #endif /* NCPUS > 1 && AT386 */
2259 * Don't set PDBR to a new value (hence invalidating the
2260 * "paging cache") if the new value matches the current one.
2262 movl %cr3,%edx /* get current cr3 value */
2264 je 0f /* if two are equal, don't set */
2265 movl %eax,%cr3 /* load it (and flush cache) */
2275 andl $(~0x7), %eax /* remove cpu number */
2276 #endif /* NCPUS > 1 && AT386 */
2283 movl %cr3,%eax /* flush tlb by reloading CR3 */
2284 movl %eax,%cr3 /* with itself */
2298 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2306 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2325 * Read task register.
2333 * Set task register. Also clears busy bit of task descriptor.
2336 movl S_ARG0,%eax /* get task segment number */
2337 subl $8,%esp /* push space for SGDT */
2338 sgdt 2(%esp) /* store GDT limit and base (linear) */
2339 movl 4(%esp),%edx /* address GDT */
2340 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2341 ltr %ax /* load task register */
2342 addl $8,%esp /* clear stack */
2343 ret /* and return */
2346 * Set task-switched flag.
2349 movl %cr0,%eax /* get cr0 */
2350 orl $(CR0_TS),%eax /* or in TS bit */
2351 movl %eax,%cr0 /* set cr0 */
2355 * io register must not be used on slaves (no AT bus)
2357 #define ILL_ON_SLAVE
2365 #define PUSH_FRAME FRAME
2366 #define POP_FRAME EMARF
2368 #else /* MACH_ASSERT */
2376 #endif /* MACH_ASSERT */
2379 #if MACH_KDB || MACH_ASSERT
2382 * Following routines are also defined as macros in i386/pio.h
2383 * Compile then when MACH_KDB is configured so that they
2384 * can be invoked from the debugger.
2388 * void outb(unsigned char *io_port,
2389 * unsigned char byte)
2391 * Output a byte to an IO port.
2396 movl ARG0,%edx /* IO port address */
2397 movl ARG1,%eax /* data to output */
2398 outb %al,%dx /* send it out */
2403 * unsigned char inb(unsigned char *io_port)
2405 * Input a byte from an IO port.
2410 movl ARG0,%edx /* IO port address */
2411 xor %eax,%eax /* clear high bits of register */
2412 inb %dx,%al /* get the byte */
2417 * void outw(unsigned short *io_port,
2418 * unsigned short word)
2420 * Output a word to an IO port.
2425 movl ARG0,%edx /* IO port address */
2426 movl ARG1,%eax /* data to output */
2427 outw %ax,%dx /* send it out */
2432 * unsigned short inw(unsigned short *io_port)
2434 * Input a word from an IO port.
2439 movl ARG0,%edx /* IO port address */
2440 xor %eax,%eax /* clear high bits of register */
2441 inw %dx,%ax /* get the word */
2446 * void outl(unsigned int *io_port,
2447 * unsigned int byte)
2449 * Output an int to an IO port.
2454 movl ARG0,%edx /* IO port address*/
2455 movl ARG1,%eax /* data to output */
2456 outl %eax,%dx /* send it out */
2461 * unsigned int inl(unsigned int *io_port)
2463 * Input an int from an IO port.
2468 movl ARG0,%edx /* IO port address */
2469 inl %dx,%eax /* get the int */
2473 #endif /* MACH_KDB || MACH_ASSERT*/
2476 * void loutb(unsigned byte *io_port,
2477 * unsigned byte *data,
2478 * unsigned int count)
2480 * Output an array of bytes to an IO port.
2486 movl %esi,%eax /* save register */
2487 movl ARG0,%edx /* get io port number */
2488 movl ARG1,%esi /* get data address */
2489 movl ARG2,%ecx /* get count */
2493 movl %eax,%esi /* restore register */
2499 * void loutw(unsigned short *io_port,
2500 * unsigned short *data,
2501 * unsigned int count)
2503 * Output an array of shorts to an IO port.
2509 movl %esi,%eax /* save register */
2510 movl ARG0,%edx /* get io port number */
2511 movl ARG1,%esi /* get data address */
2512 movl ARG2,%ecx /* get count */
2516 movl %eax,%esi /* restore register */
2521 * void loutw(unsigned short io_port,
2522 * unsigned int *data,
2523 * unsigned int count)
2525 * Output an array of longs to an IO port.
2531 movl %esi,%eax /* save register */
2532 movl ARG0,%edx /* get io port number */
2533 movl ARG1,%esi /* get data address */
2534 movl ARG2,%ecx /* get count */
2538 movl %eax,%esi /* restore register */
2544 * void linb(unsigned char *io_port,
2545 * unsigned char *data,
2546 * unsigned int count)
2548 * Input an array of bytes from an IO port.
2554 movl %edi,%eax /* save register */
2555 movl ARG0,%edx /* get io port number */
2556 movl ARG1,%edi /* get data address */
2557 movl ARG2,%ecx /* get count */
2561 movl %eax,%edi /* restore register */
2567 * void linw(unsigned short *io_port,
2568 * unsigned short *data,
2569 * unsigned int count)
2571 * Input an array of shorts from an IO port.
2577 movl %edi,%eax /* save register */
2578 movl ARG0,%edx /* get io port number */
2579 movl ARG1,%edi /* get data address */
2580 movl ARG2,%ecx /* get count */
2584 movl %eax,%edi /* restore register */
2590 * void linl(unsigned short io_port,
2591 * unsigned int *data,
2592 * unsigned int count)
2594 * Input an array of longs from an IO port.
2600 movl %edi,%eax /* save register */
2601 movl ARG0,%edx /* get io port number */
2602 movl ARG1,%edi /* get data address */
2603 movl ARG2,%ecx /* get count */
2607 movl %eax,%edi /* restore register */
2613 * int inst_fetch(int eip, int cs);
2615 * Fetch instruction byte. Return -1 if invalid address.
2617 .globl EXT(inst_fetch)
2619 movl S_ARG1, %eax /* get segment */
2620 movw %ax,%fs /* into FS */
2621 movl S_ARG0, %eax /* get offset */
2623 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2625 RECOVER(EXT(inst_fetch_fault))
2626 movzbl %fs:(%eax),%eax /* load instruction byte */
2629 LEXT(inst_fetch_fault)
2630 movl $-1,%eax /* return -1 if error */
2636 * kdp_copy_kmem(char *src, char *dst, int count)
2638 * Similar to copyin except that both addresses are kernel addresses.
2641 ENTRY(kdp_copy_kmem)
2643 pushl %edi /* save registers */
2645 movl 8+S_ARG0,%esi /* get kernel start address */
2646 movl 8+S_ARG1,%edi /* get kernel destination address */
2648 movl 8+S_ARG2,%edx /* get count */
2650 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2653 jb kdp_vm_read_fail /* fail if wrap-around */
2655 movl %edx,%ecx /* move by longwords first */
2658 RECOVER(kdp_vm_read_fail)
2660 movsl /* move longwords */
2661 movl %edx,%ecx /* now move remaining bytes */
2664 RECOVER(kdp_vm_read_fail)
2668 movl 8+S_ARG2,%edx /* get count */
2669 subl %ecx,%edx /* Return number of bytes transfered */
2672 popl %edi /* restore registers */
2674 ret /* and return */
2677 xorl %eax,%eax /* didn't copy a thing. */
2686 * Done with recovery and retry tables.
2699 /* dr<i>(address, type, len, persistence)
2703 movl %eax,EXT(dr_addr)
2709 movl %eax,EXT(dr_addr)+1*4
2715 movl %eax,EXT(dr_addr)+2*4
2722 movl %eax,EXT(dr_addr)+3*4
2731 movl %edx,EXT(dr_addr)+4*4
2732 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2733 movl %edx,EXT(dr_addr)+5*4
2752 movl %edx,EXT(dr_addr)+7*4
2759 DATA(preemptable) /* Not on an MP (makes cpu_number() usage unsafe) */
2760 #if MACH_RT && (NCPUS == 1)
2761 .long 0 /* FIXME -- Currently disabled */
2763 .long 0 /* FIX ME -- Currently disabled */
2764 #endif /* MACH_RT && (NCPUS == 1) */
2777 * Determine cpu model and set global cpuid_xxx variables
2779 * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it.
2780 * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it.
2781 * Relies on CPUID instruction for next x86 generations
2782 * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array)
2785 ENTRY(set_cpu_model)
2787 pushl %ebx /* save ebx */
2788 andl $~0x3,%esp /* Align stack to avoid AC fault */
2789 pushfl /* push EFLAGS */
2790 popl %eax /* pop into eax */
2791 movl %eax,%ecx /* Save original EFLAGS */
2792 xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */
2793 pushl %eax /* push new value */
2794 popfl /* through the EFLAGS register */
2795 pushfl /* and back */
2796 popl %eax /* into eax */
2797 movb $(CPUID_FAMILY_386),EXT(cpuid_family)
2798 pushl %ecx /* push original EFLAGS */
2799 popfl /* restore EFLAGS */
2800 xorl %ecx,%eax /* see what changed */
2801 testl $ EFL_AC,%eax /* test AC bit */
2802 jz 0f /* if AC toggled (486 or higher) */
2804 movb $(CPUID_FAMILY_486),EXT(cpuid_family)
2805 testl $ EFL_ID,%eax /* test ID bit */
2806 jz 0f /* if ID toggled use cpuid instruction */
2808 xorl %eax,%eax /* get vendor identification string */
2809 .word 0xA20F /* cpuid instruction */
2810 movl %eax,EXT(cpuid_value) /* Store high value */
2811 movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */
2812 movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */
2813 movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */
2814 movl $1,%eax /* get processor signature */
2815 .word 0xA20F /* cpuid instruction */
2816 movl %edx,EXT(cpuid_feature) /* Store feature flags */
2817 movl %eax,%ecx /* Save original signature */
2818 andb $0xF,%al /* Get Stepping ID */
2819 movb %al,EXT(cpuid_stepping) /* Save Stepping ID */
2820 movl %ecx,%eax /* Get original signature */
2821 shrl $4,%eax /* Shift Stepping ID */
2822 movl %eax,%ecx /* Save original signature */
2823 andb $0xF,%al /* Get Model */
2824 movb %al,EXT(cpuid_model) /* Save Model */
2825 movl %ecx,%eax /* Get original signature */
2826 shrl $4,%eax /* Shift Stepping ID */
2827 movl %eax,%ecx /* Save original signature */
2828 andb $0xF,%al /* Get Family */
2829 movb %al,EXT(cpuid_family) /* Save Family */
2830 movl %ecx,%eax /* Get original signature */
2831 shrl $4,%eax /* Shift Stepping ID */
2832 andb $0x3,%al /* Get Type */
2833 movb %al,EXT(cpuid_type) /* Save Type */
2835 movl EXT(cpuid_value),%eax /* Get high value */
2836 cmpl $2,%eax /* Test if processor configuration */
2837 jle 0f /* is present */
2838 movl $2,%eax /* get processor configuration */
2839 .word 0xA20F /* cpuid instruction */
2840 movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */
2841 movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */
2842 movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */
2843 movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */
2845 popl %ebx /* restore ebx */
2881 lidt null_idtr /* disable the interrupt handler */
2882 xor %ecx,%ecx /* generate a divide by zero */
2883 div %ecx,%eax /* reboot now */
2884 ret /* this will "never" be executed */
2886 #endif /* SYMMETRY */
2890 * setbit(int bitno, int *s) - set bit in bit string
2893 movl S_ARG0, %ecx /* bit number */
2894 movl S_ARG1, %eax /* address */
2895 btsl %ecx, (%eax) /* set bit */
2899 * clrbit(int bitno, int *s) - clear bit in bit string
2902 movl S_ARG0, %ecx /* bit number */
2903 movl S_ARG1, %eax /* address */
2904 btrl %ecx, (%eax) /* clear bit */
2908 * ffsbit(int *s) - find first set bit in bit string
2911 movl S_ARG0, %ecx /* address */
2912 movl $0, %edx /* base offset */
2914 bsfl (%ecx), %eax /* check argument bits */
2915 jnz 1f /* found bit, return */
2916 addl $4, %ecx /* increment address */
2917 addl $32, %edx /* increment offset */
2918 jmp 0b /* try again */
2920 addl %edx, %eax /* return offset */
2924 * testbit(int nr, volatile void *array)
2926 * Test to see if the bit is set within the bit string
2930 movl S_ARG0,%eax /* Get the bit to test */
2931 movl S_ARG1,%ecx /* get the array string */
2943 movl 4(%ebp), %eax /* fetch pc of caller */
2946 ENTRY(tvals_to_etap)
2948 movl $1000000000, %ecx
2955 * etap_time_sub(etap_time_t stop, etap_time_t start)
2957 * 64bit subtract, returns stop - start
2959 ENTRY(etap_time_sub)
2960 movl S_ARG0, %eax /* stop.low */
2961 movl S_ARG1, %edx /* stop.hi */
2962 subl S_ARG2, %eax /* stop.lo - start.lo */
2963 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2974 * jail: set the EIP to "jail" to block a kernel thread.
2975 * Useful to debug synchronization problems on MPs.
2980 #endif /* NCPUS > 1 */
2983 * delay(microseconds)
2990 movl EXT(delaycount), %ecx
3003 * div_scale(unsigned int dividend,
3004 * unsigned int divisor,
3005 * unsigned int *scale)
3007 * This function returns (dividend << *scale) //divisor where *scale
3008 * is the largest possible value before overflow. This is used in
3009 * computation where precision must be achieved in order to avoid
3010 * floating point usage.
3014 * while (((dividend >> *scale) >= divisor))
3016 * *scale = 32 - *scale;
3017 * return ((dividend << *scale) / divisor);
3021 xorl %ecx, %ecx /* *scale = 0 */
3023 movl ARG0, %edx /* get dividend */
3025 cmpl ARG1, %edx /* if (divisor > dividend) */
3026 jle 1f /* goto 1f */
3027 addl $1, %ecx /* (*scale)++ */
3028 shrdl $1, %edx, %eax /* dividend >> 1 */
3029 shrl $1, %edx /* dividend >> 1 */
3030 jmp 0b /* goto 0b */
3032 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
3033 movl ARG2, %edx /* get scale */
3034 movl $32, (%edx) /* *scale = 32 */
3035 subl %ecx, (%edx) /* *scale -= %ecx */
3041 * mul_scale(unsigned int multiplicand,
3042 * unsigned int multiplier,
3043 * unsigned int *scale)
3045 * This function returns ((multiplicand * multiplier) >> *scale) where
3046 * scale is the largest possible value before overflow. This is used in
3047 * computation where precision must be achieved in order to avoid
3048 * floating point usage.
3052 * while (overflow((multiplicand * multiplier) >> *scale))
3054 * return ((multiplicand * multiplier) >> *scale);
3058 xorl %ecx, %ecx /* *scale = 0 */
3059 movl ARG0, %eax /* get multiplicand */
3060 mull ARG1 /* multiplicand * multiplier */
3062 cmpl $0, %edx /* if (!overflow()) */
3064 addl $1, %ecx /* (*scale)++ */
3065 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
3066 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
3069 movl ARG2, %edx /* get scale */
3070 movl %ecx, (%edx) /* set *scale */
3078 #endif /* NCPUS > 1 */
3082 * BSD System call entry point..
3085 Entry(trap_unix_syscall)
3086 pushf /* save flags as soon as possible */
3087 pushl %eax /* save system call number */
3088 pushl $0 /* clear trap number slot */
3090 pusha /* save the general registers */
3091 pushl %ds /* and the segment registers */
3096 mov %ss,%dx /* switch to kernel data segment */
3103 * Shuffle eflags,eip,cs into proper places
3106 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3107 movl R_CS(%esp),%ecx /* eip is in CS slot */
3108 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3109 movl %ecx,R_EIP(%esp) /* fix eip */
3110 movl %edx,R_CS(%esp) /* fix cs */
3111 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3116 negl %eax /* get system call number */
3117 shll $4,%eax /* manual indexing */
3120 movl CX(EXT(kernel_stack),%edx),%ebx
3121 /* get current kernel stack */
3122 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3123 /* user registers. */
3126 * Register use on entry:
3127 * eax contains syscall number
3128 * ebx contains user regs pointer
3131 pushl %ebx /* Push the regs set onto stack */
3132 call EXT(unix_syscall)
3134 movl %esp,%ecx /* get kernel stack */
3135 or $(KERNEL_STACK_SIZE-1),%ecx
3136 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3137 movl %eax,R_EAX(%esp) /* save return value */
3138 jmp EXT(return_from_trap) /* return to user */
3141 * Entry point for machdep system calls..
3144 Entry(trap_machdep_syscall)
3145 pushf /* save flags as soon as possible */
3146 pushl %eax /* save system call number */
3147 pushl $0 /* clear trap number slot */
3149 pusha /* save the general registers */
3150 pushl %ds /* and the segment registers */
3155 mov %ss,%dx /* switch to kernel data segment */
3162 * Shuffle eflags,eip,cs into proper places
3165 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3166 movl R_CS(%esp),%ecx /* eip is in CS slot */
3167 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3168 movl %ecx,R_EIP(%esp) /* fix eip */
3169 movl %edx,R_CS(%esp) /* fix cs */
3170 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3175 negl %eax /* get system call number */
3176 shll $4,%eax /* manual indexing */
3179 movl CX(EXT(kernel_stack),%edx),%ebx
3180 /* get current kernel stack */
3181 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3182 /* user registers. */
3185 * Register use on entry:
3186 * eax contains syscall number
3187 * ebx contains user regs pointer
3191 call EXT(machdep_syscall)
3193 movl %esp,%ecx /* get kernel stack */
3194 or $(KERNEL_STACK_SIZE-1),%ecx
3195 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3196 movl %eax,R_EAX(%esp) /* save return value */
3197 jmp EXT(return_from_trap) /* return to user */
3199 Entry(trap_mach25_syscall)
3200 pushf /* save flags as soon as possible */
3201 pushl %eax /* save system call number */
3202 pushl $0 /* clear trap number slot */
3204 pusha /* save the general registers */
3205 pushl %ds /* and the segment registers */
3210 mov %ss,%dx /* switch to kernel data segment */
3217 * Shuffle eflags,eip,cs into proper places
3220 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3221 movl R_CS(%esp),%ecx /* eip is in CS slot */
3222 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3223 movl %ecx,R_EIP(%esp) /* fix eip */
3224 movl %edx,R_CS(%esp) /* fix cs */
3225 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3230 negl %eax /* get system call number */
3231 shll $4,%eax /* manual indexing */
3234 movl CX(EXT(kernel_stack),%edx),%ebx
3235 /* get current kernel stack */
3236 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3237 /* user registers. */
3240 * Register use on entry:
3241 * eax contains syscall number
3242 * ebx contains user regs pointer
3246 call EXT(mach25_syscall)
3248 movl %esp,%ecx /* get kernel stack */
3249 or $(KERNEL_STACK_SIZE-1),%ecx
3250 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3251 movl %eax,R_EAX(%esp) /* save return value */
3252 jmp EXT(return_from_trap) /* return to user */