2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 #include <etap_event_monitor.h>
58 #include <platforms.h>
60 #include <mach_kgdb.h>
62 #include <stat_time.h>
63 #include <mach_assert.h>
65 #include <sys/errno.h>
67 #include <i386/cpuid.h>
68 #include <i386/eflags.h>
69 #include <i386/proc_reg.h>
70 #include <i386/trap.h>
72 #include <mach/exception_types.h>
74 #include <i386/AT386/mp/mp.h>
76 #define PREEMPT_DEBUG_LOG 0
79 /* Under Mach-O, etext is a variable which contains
80 * the last text address
82 #define ETEXT_ADDR (EXT(etext))
84 /* Under ELF and other non-Mach-O formats, the address of
85 * etext represents the last text address
87 #define ETEXT_ADDR $ EXT(etext)
92 #define CX(addr,reg) addr(,reg,4)
95 #define CPU_NUMBER(reg)
96 #define CX(addr,reg) addr
98 #endif /* NCPUS > 1 */
108 #define RECOVERY_SECTION .section __VECTORS, __recover
109 #define RETRY_SECTION .section __VECTORS, __retries
111 #define RECOVERY_SECTION .text
112 #define RECOVERY_SECTION .text
115 #define RECOVER_TABLE_START \
117 .globl EXT(recover_table) ;\
118 LEXT(recover_table) ;\
121 #define RECOVER(addr) \
128 #define RECOVER_TABLE_END \
130 .globl EXT(recover_table_end) ;\
131 LEXT(recover_table_end) ;\
135 * Retry table for certain successful faults.
137 #define RETRY_TABLE_START \
139 .globl EXT(retry_table) ;\
143 #define RETRY(addr) \
150 #define RETRY_TABLE_END \
152 .globl EXT(retry_table_end) ;\
153 LEXT(retry_table_end) ;\
157 * Allocate recovery and retry tables.
169 #define TIME_TRAP_UENTRY
170 #define TIME_TRAP_UEXIT
171 #define TIME_INT_ENTRY
172 #define TIME_INT_EXIT
174 #else /* microsecond timing */
177 * Microsecond timing.
178 * Assumes a free-running microsecond counter.
179 * no TIMER_MAX check needed.
183 * There is only one current time-stamp per CPU, since only
184 * the time-stamp in the current timer is used.
185 * To save time, we allocate the current time-stamps here.
187 .comm EXT(current_tstamp), 4*NCPUS
190 * Update time on user trap entry.
191 * 11 instructions (including cli on entry)
192 * Assumes CPU number in %edx.
195 #define TIME_TRAP_UENTRY \
196 cli /* block interrupts */ ;\
197 movl VA_ETC,%ebx /* get timer value */ ;\
198 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
199 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
200 subl %ecx,%ebx /* elapsed = new-old */ ;\
201 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
202 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
203 jns 0f /* if overflow, */ ;\
204 call timer_normalize /* normalize timer */ ;\
205 0: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
206 /* switch to sys timer */;\
207 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
208 sti /* allow interrupts */
211 * update time on user trap exit.
213 * Assumes CPU number in %edx.
216 #define TIME_TRAP_UEXIT \
217 cli /* block interrupts */ ;\
218 movl VA_ETC,%ebx /* get timer */ ;\
219 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
220 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
221 subl %ecx,%ebx /* elapsed = new-old */ ;\
222 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
223 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
224 jns 0f /* if overflow, */ ;\
225 call timer_normalize /* normalize timer */ ;\
226 0: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\
227 /* switch to user timer */;\
228 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
231 * update time on interrupt entry.
233 * Assumes CPU number in %edx.
234 * Leaves old timer in %ebx.
237 #define TIME_INT_ENTRY \
238 movl VA_ETC,%ecx /* get timer */ ;\
239 movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
240 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
241 subl %ebx,%ecx /* elapsed = new-old */ ;\
242 movl CX(EXT(current_timer),%edx),%ebx /* get current timer */;\
243 addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
244 leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
245 lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
246 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
249 * update time on interrupt exit.
251 * Assumes CPU number in %edx, old timer in %ebx.
254 #define TIME_INT_EXIT \
255 movl VA_ETC,%eax /* get timer */ ;\
256 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
257 movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
258 subl %ecx,%eax /* elapsed = new-old */ ;\
259 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
260 addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
261 jns 0f /* if overflow, */ ;\
262 call timer_normalize /* normalize timer */ ;\
263 0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
264 jz 0f /* if overflow, */ ;\
265 movl %ebx,%ecx /* get old timer */ ;\
266 call timer_normalize /* normalize timer */ ;\
267 0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
271 * Normalize timer in ecx.
272 * Preserves edx; clobbers eax.
276 .long TIMER_HIGH_UNIT /* div has no immediate opnd */
279 pushl %edx /* save registersz */
281 xorl %edx,%edx /* clear divisor high */
282 movl LOW_BITS(%ecx),%eax /* get divisor low */
283 divl timer_high_unit,%eax /* quotient in eax */
284 /* remainder in edx */
285 addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
286 movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
287 addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
288 popl %eax /* restore register */
293 * Switch to a new timer.
296 CPU_NUMBER(%edx) /* get this CPU */
297 movl VA_ETC,%ecx /* get timer */
298 movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
299 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
300 subl %ecx,%eax /* elapsed = new - old */
301 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
302 addl %eax,LOW_BITS(%ecx) /* add to low bits */
303 jns 0f /* if overflow, */
304 call timer_normalize /* normalize timer */
306 movl S_ARG0,%ecx /* get new timer */
307 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
311 * Initialize the first timer for a CPU.
314 CPU_NUMBER(%edx) /* get this CPU */
315 movl VA_ETC,%ecx /* get timer */
316 movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
317 movl S_ARG0,%ecx /* get timer */
318 movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
321 #endif /* accurate timing */
324 * Encapsulate the transfer of exception stack frames between a PCB
325 * and a thread stack. Since the whole point of these is to emulate
326 * a call or exception that changes privilege level, both macros
327 * assume that there is no user esp or ss stored in the source
328 * frame (because there was no change of privilege to generate them).
332 * Transfer a stack frame from a thread's user stack to its PCB.
333 * We assume the thread and stack addresses have been loaded into
334 * registers (our arguments).
336 * The macro overwrites edi, esi, ecx and whatever registers hold the
337 * thread and stack addresses (which can't be one of the above three).
338 * The thread address is overwritten with the address of its saved state
339 * (where the frame winds up).
341 * Must be called on kernel stack.
343 #define FRAME_STACK_TO_PCB(thread, stkp) ;\
344 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
345 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
346 movl %edi,thread /* save for later */ ;\
347 movl stkp,%esi /* point to start of frame */ ;\
348 movl $ R_UESP,%ecx ;\
349 sarl $2,%ecx /* word count for transfer */ ;\
350 cld /* we`re incrementing */ ;\
352 movsl /* transfer the frame */ ;\
353 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
354 movl stkp,R_UESP(thread) /* store in PCB */ ;\
356 mov %ss,%cx /* get current ss */ ;\
357 movl %ecx,R_SS(thread) /* store in PCB */
360 * Transfer a stack frame from a thread's PCB to the stack pointed
361 * to by the PCB. We assume the thread address has been loaded into
362 * a register (our argument).
364 * The macro overwrites edi, esi, ecx and whatever register holds the
365 * thread address (which can't be one of the above three). The
366 * thread address is overwritten with the address of its saved state
367 * (where the frame winds up).
369 * Must be called on kernel stack.
371 #define FRAME_PCB_TO_STACK(thread) ;\
372 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
373 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
374 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
375 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
376 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
377 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
378 jz 1f /* use kernel data segment */ ;\
379 movl $ USER_DS,%cx /* else use user data segment */;\
382 movl $ R_UESP,%ecx ;\
383 subl %ecx,%edi /* derive start of frame */ ;\
384 movl %edi,thread /* save for later */ ;\
385 sarl $2,%ecx /* word count for transfer */ ;\
386 cld /* we`re incrementing */ ;\
388 movsl /* transfer the frame */ ;\
389 mov %ss,%cx /* restore kernel segments */ ;\
397 * Traditional, not ANSI.
401 .globl label/**/count ;\
404 .globl label/**/limit ;\
408 addl $1,%ss:label/**/count ;\
409 cmpl $0,label/**/limit ;\
413 movl %ss:label/**/count,%eax ;\
414 cmpl %eax,%ss:label/**/limit ;\
427 * Last-ditch debug code to handle faults that might result
428 * from entering kernel (from collocated server) on an invalid
429 * stack. On collocated entry, there's no hardware-initiated
430 * stack switch, so a valid stack must be in place when an
431 * exception occurs, or we may double-fault.
433 * In case of a double-fault, our only recourse is to switch
434 * hardware "tasks", so that we avoid using the current stack.
436 * The idea here is just to get the processor into the debugger,
437 * post-haste. No attempt is made to fix up whatever error got
438 * us here, so presumably continuing from the debugger will
439 * simply land us here again -- at best.
443 * Note that the per-fault entry points are not currently
444 * functional. The only way to make them work would be to
445 * set up separate TSS's for each fault type, which doesn't
446 * currently seem worthwhile. (The offset part of a task
447 * gate is always ignored.) So all faults that task switch
448 * currently resume at db_task_start.
451 * Double fault (Murphy's point) - error code (0) on stack
453 Entry(db_task_dbl_fault)
455 movl $(T_DOUBLE_FAULT),%ebx
458 * Segment not present - error code on stack
460 Entry(db_task_seg_np)
462 movl $(T_SEGMENT_NOT_PRESENT),%ebx
465 * Stack fault - error code on (current) stack
467 Entry(db_task_stk_fault)
469 movl $(T_STACK_FAULT),%ebx
472 * General protection fault - error code on stack
474 Entry(db_task_gen_prot)
476 movl $(T_GENERAL_PROTECTION),%ebx
480 * The entry point where execution resumes after last-ditch debugger task
486 movl %edx,%esp /* allocate i386_saved_state on stack */
487 movl %eax,R_ERR(%esp)
488 movl %ebx,R_TRAPNO(%esp)
492 movl CX(EXT(mp_dbtss),%edx),%edx
493 movl TSS_LINK(%edx),%eax
495 movl EXT(dbtss)+TSS_LINK,%eax
497 pushl %eax /* pass along selector of previous TSS */
498 call EXT(db_tss_to_frame)
499 popl %eax /* get rid of TSS selector */
500 call EXT(db_trap_from_asm)
505 iret /* ha, ha, ha... */
506 #endif /* MACH_KDB */
509 * Trap/interrupt entry points.
511 * All traps must create the following save area on the PCB "stack":
520 * cr2 if page fault - otherwise unused
530 * user esp - if from user
531 * user ss - if from user
532 * es - if from V86 thread
533 * ds - if from V86 thread
534 * fs - if from V86 thread
535 * gs - if from V86 thread
540 * General protection or segment-not-present fault.
541 * Check for a GP/NP fault in the kernel_return
542 * sequence; if there, report it as a GP/NP fault on the user's instruction.
544 * esp-> 0: trap code (NP or GP)
545 * 4: segment number in error
549 * 20 old registers (trap is from kernel)
552 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
553 jmp trap_check_kernel_exit /* check for kernel exit sequence */
556 pushl $(T_SEGMENT_NOT_PRESENT)
557 /* indicate fault type */
559 trap_check_kernel_exit:
560 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
561 jnz EXT(alltraps) /* isn`t kernel trap if so */
562 testl $3,12(%esp) /* is trap from kernel mode? */
563 jne EXT(alltraps) /* if so: */
564 /* check for the kernel exit sequence */
565 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
567 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
569 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
571 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
573 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
575 take_fault: /* if none of the above: */
576 jmp EXT(alltraps) /* treat as normal trap. */
579 * GP/NP fault on IRET: CS or SS is in error.
580 * All registers contain the user's values.
595 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
596 popl %eax /* get trap number */
597 movl %eax,12-4(%esp) /* put in user trap number */
598 popl %eax /* get error code */
599 movl %eax,16-8(%esp) /* put in user errcode */
600 popl %eax /* restore eax */
602 jmp EXT(alltraps) /* take fault */
605 * Fault restoring a segment register. The user's registers are still
606 * saved on the stack. The offending segment register has not been
610 popl %eax /* get trap number */
611 popl %edx /* get error code */
612 addl $12,%esp /* pop stack to user regs */
613 jmp push_es /* (DS on top of stack) */
615 popl %eax /* get trap number */
616 popl %edx /* get error code */
617 addl $12,%esp /* pop stack to user regs */
618 jmp push_fs /* (ES on top of stack) */
620 popl %eax /* get trap number */
621 popl %edx /* get error code */
622 addl $12,%esp /* pop stack to user regs */
623 jmp push_gs /* (FS on top of stack) */
625 popl %eax /* get trap number */
626 popl %edx /* get error code */
627 addl $12,%esp /* pop stack to user regs */
628 jmp push_segregs /* (GS on top of stack) */
631 pushl %es /* restore es, */
633 pushl %fs /* restore fs, */
635 pushl %gs /* restore gs. */
637 movl %eax,R_TRAPNO(%esp) /* set trap number */
638 movl %edx,R_ERR(%esp) /* set error code */
640 jmp trap_set_segs /* take trap */
643 * Debug trap. Check for single-stepping across system call into
644 * kernel. If this is the case, taking the debug trap has turned
645 * off single-stepping - save the flags register with the trace
649 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
650 jnz 0f /* isn`t kernel trap if so */
651 testl $3,4(%esp) /* is trap from kernel mode? */
653 cmpl $syscall_entry,(%esp) /* system call entry? */
655 /* flags are sitting where syscall */
657 addl $8,%esp /* remove eip/cs */
658 jmp syscall_entry_2 /* continue system call entry */
660 0: pushl $0 /* otherwise: */
661 pushl $(T_DEBUG) /* handle as normal */
662 jmp EXT(alltraps) /* debug fault */
665 * Page fault traps save cr2.
668 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
669 pusha /* save the general registers */
670 movl %cr2,%eax /* get the faulting address */
671 movl %eax,12(%esp) /* save in esp save slot */
672 jmp trap_push_segs /* continue fault */
675 * All 'exceptions' enter here with:
681 * old esp if trapped from user
682 * old ss if trapped from user
684 * NB: below use of CPU_NUMBER assumes that macro will use correct
685 * segment register for any kernel data accesses.
688 pusha /* save the general registers */
690 pushl %ds /* save the segment registers */
698 movl %ax,%es /* switch to kernel data seg */
699 cld /* clear direction flag */
700 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
701 jnz trap_from_user /* user mode trap if so */
702 testb $3,R_CS(%esp) /* user mode trap? */
705 cmpl $0,CX(EXT(active_kloaded),%edx)
706 je trap_from_kernel /* if clear, truly in kernel */
708 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
713 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
714 * so transfer the stack frame into the PCB explicitly, then
715 * start running on resulting "PCB stack". We have to set
716 * up a simulated "uesp" manually, since there's none in the
723 movl CX(EXT(active_kloaded),%edx),%ebx
724 movl CX(EXT(kernel_stack),%edx),%eax
726 FRAME_STACK_TO_PCB(%ebx,%eax)
737 movl CX(EXT(kernel_stack),%edx),%ebx
738 xchgl %ebx,%esp /* switch to kernel stack */
739 /* user regs pointer already set */
741 pushl %ebx /* record register save area */
742 pushl %ebx /* pass register save area to trap */
743 call EXT(user_trap) /* call user trap routine */
744 movl 4(%esp),%esp /* switch back to PCB stack */
747 * Return from trap or system call, checking for ASTs.
751 LEXT(return_from_trap)
753 cmpl $0,CX(EXT(need_ast),%edx)
754 je EXT(return_to_user) /* if we need an AST: */
756 movl CX(EXT(kernel_stack),%edx),%esp
757 /* switch to kernel stack */
758 pushl $0 /* push preemption flag */
759 call EXT(i386_astintr) /* take the AST */
760 addl $4,%esp /* pop preemption flag */
761 popl %esp /* switch back to PCB stack (w/exc link) */
762 jmp EXT(return_from_trap) /* and check again (rare) */
763 /* ASTs after this point will */
767 * Arrange the checks needed for kernel-loaded (or kernel-loading)
768 * threads so that branch is taken in kernel-loaded case.
773 cmpl $0,CX(EXT(active_kloaded),%eax)
774 jnz EXT(return_xfer_stack)
775 movl $ CPD_ACTIVE_THREAD,%ebx
776 movl %gs:(%ebx),%ebx /* get active thread */
777 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
778 cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */
779 jnz EXT(return_kernel_loading)
783 movl $ CPD_PREEMPTION_LEVEL,%ebx
785 je EXT(return_from_kernel)
787 #endif /* MACH_ASSERT */
791 * Return from kernel mode to interrupted thread.
794 LEXT(return_from_kernel)
796 popl %gs /* restore segment registers */
803 popa /* restore general registers */
804 addl $8,%esp /* discard trap number and error code */
807 iret /* return from interrupt */
810 LEXT(return_xfer_stack)
812 * If we're on PCB stack in a kernel-loaded task, we have
813 * to transfer saved state back to thread stack and swap
814 * stack pointers here, because the hardware's not going
819 movl CX(EXT(kernel_stack),%eax),%esp
820 movl CX(EXT(active_kloaded),%eax),%eax
821 FRAME_PCB_TO_STACK(%eax)
824 jmp EXT(return_from_kernel)
827 * Hate to put this here, but setting up a separate swap_func for
828 * kernel-loaded threads no longer works, since thread executes
829 * "for a while" (i.e., until it reaches glue code) when first
830 * created, even if it's nominally suspended. Hence we can't
831 * transfer the PCB when the thread first resumes, because we
832 * haven't initialized it yet.
835 * Have to force transfer to new stack "manually". Use a string
836 * move to transfer all of our saved state to the stack pointed
837 * to by iss.uesp, then install a pointer to it as our current
840 LEXT(return_kernel_loading)
842 movl CX(EXT(kernel_stack),%eax),%esp
843 movl $ CPD_ACTIVE_THREAD,%ebx
844 movl %gs:(%ebx),%ebx /* get active thread */
845 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
846 movl %ebx,%edx /* save for later */
847 movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */
848 FRAME_PCB_TO_STACK(%ebx)
849 movl %ebx,%esp /* start running on new stack */
850 movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */
851 movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */
852 jmp EXT(return_from_kernel)
855 * Trap from kernel mode. No need to switch stacks or load segment registers.
858 #if MACH_KDB || MACH_KGDB
861 movl %esp,%ebx /* save current stack */
863 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
867 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
870 pushl %esp /* Already on kgdb stack */
874 jmp EXT(return_from_kernel)
875 0: /* should kgdb handle this exception? */
876 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
878 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
881 cli /* disable interrupts */
882 CPU_NUMBER(%edx) /* get CPU number */
883 movl CX(EXT(kgdb_stacks),%edx),%ebx
884 xchgl %ebx,%esp /* switch to kgdb stack */
885 pushl %ebx /* pass old sp as an arg */
886 call EXT(kgdb_from_kernel)
887 popl %esp /* switch back to kernel stack */
888 jmp EXT(return_from_kernel)
890 #endif /* MACH_KGDB */
893 cmpl $0,EXT(db_active) /* could trap be from ddb? */
896 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
897 cmpl $0,CX(EXT(kdb_active),%edx)
899 #endif /* NCPUS > 1 */
901 call EXT(db_trap_from_asm)
903 jmp EXT(return_from_kernel)
907 * Dilemma: don't want to switch to kernel_stack if trap
908 * "belongs" to ddb; don't want to switch to db_stack if
909 * trap "belongs" to kernel. So have to duplicate here the
910 * set of trap types that kernel_trap() handles. Note that
911 * "unexpected" page faults will not be handled by kernel_trap().
912 * In this panic-worthy case, we fall into the debugger with
913 * kernel_stack containing the call chain that led to the
916 movl R_TRAPNO(%esp),%edx
917 cmpl $(T_PAGE_FAULT),%edx
919 cmpl $(T_NO_FPU),%edx
921 cmpl $(T_FPU_FAULT),%edx
923 cmpl $(T_FLOATING_POINT_ERROR),%edx
925 cmpl $(T_PREEMPT),%edx
928 #endif /* MACH_KDB */
930 CPU_NUMBER(%edx) /* get CPU number */
931 cmpl CX(EXT(kernel_stack),%edx),%esp
932 /* if not already on kernel stack, */
933 ja 5f /* check some more */
934 cmpl CX(EXT(active_stacks),%edx),%esp
935 ja 6f /* on kernel stack: no switch */
937 movl CX(EXT(kernel_stack),%edx),%esp
939 pushl %ebx /* save old stack */
940 pushl %ebx /* pass as parameter */
941 call EXT(kernel_trap) /* to kernel trap routine */
942 addl $4,%esp /* pop parameter */
946 * If kernel_trap returns false, trap wasn't handled.
951 movl CX(EXT(db_stacks),%edx),%esp
952 pushl %ebx /* pass old stack as parameter */
953 call EXT(db_trap_from_asm)
954 #endif /* MACH_KDB */
956 cli /* disable interrupts */
957 CPU_NUMBER(%edx) /* get CPU number */
958 movl CX(EXT(kgdb_stacks),%edx),%esp
959 pushl %ebx /* pass old stack as parameter */
960 call EXT(kgdb_from_kernel)
961 #endif /* MACH_KGDB */
962 addl $4,%esp /* pop parameter */
966 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
969 pushl %ebx /* pass old stack as parameter */
971 addl $4,%esp /* pop parameter */
973 movl %ebx,%esp /* get old stack (from callee-saves reg) */
974 #else /* MACH_KDB || MACH_KGDB */
975 pushl %esp /* pass parameter */
976 call EXT(kernel_trap) /* to kernel trap routine */
977 addl $4,%esp /* pop parameter */
978 #endif /* MACH_KDB || MACH_KGDB */
983 movl CX(EXT(need_ast),%edx),%eax /* get pending asts */
984 testl $ AST_URGENT,%eax /* any urgent preemption? */
985 je EXT(return_from_kernel) /* no, nothing to do */
986 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
987 je EXT(return_from_kernel) /* no, skip it */
988 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
989 jne EXT(return_from_kernel) /* no, nothing to do */
990 movl CX(EXT(kernel_stack),%edx),%eax
993 andl $(-KERNEL_STACK_SIZE),%ecx
994 testl %ecx,%ecx /* are we on the kernel stack? */
995 jne EXT(return_from_kernel) /* no, skip it */
997 #if PREEMPT_DEBUG_LOG
998 pushl 28(%esp) /* stack pointer */
999 pushl 24+4(%esp) /* frame pointer */
1000 pushl 56+8(%esp) /* stack pointer */
1002 call EXT(log_thread_action)
1005 0: String "trap preempt eip"
1007 #endif /* PREEMPT_DEBUG_LOG */
1009 pushl $1 /* push preemption flag */
1010 call EXT(i386_astintr) /* take the AST */
1011 addl $4,%esp /* pop preemption flag */
1012 #endif /* MACH_RT */
1014 jmp EXT(return_from_kernel)
1017 * Called as a function, makes the current thread
1018 * return from the kernel as if from an exception.
1021 .globl EXT(thread_exception_return)
1022 .globl EXT(thread_bootstrap_return)
1023 LEXT(thread_exception_return)
1024 LEXT(thread_bootstrap_return)
1025 movl %esp,%ecx /* get kernel stack */
1026 or $(KERNEL_STACK_SIZE-1),%ecx
1027 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1028 jmp EXT(return_from_trap)
1030 Entry(call_continuation)
1031 movl S_ARG0,%eax /* get continuation */
1032 movl %esp,%ecx /* get kernel stack */
1033 or $(KERNEL_STACK_SIZE-1),%ecx
1034 addl $(-3-IKS_SIZE),%ecx
1035 movl %ecx,%esp /* pop the stack */
1036 xorl %ebp,%ebp /* zero frame pointer */
1037 jmp *%eax /* goto continuation */
1040 #define LOG_INTERRUPT(info,msg) \
1044 call EXT(log_thread_action) ; \
1047 #define CHECK_INTERRUPT_TIME(n) \
1050 call EXT(check_thread_time) ; \
1054 #define LOG_INTERRUPT(info,msg)
1055 #define CHECK_INTERRUPT_TIME(n)
1059 String "interrupt start"
1061 String "interrupt end"
1064 * All interrupts enter here.
1065 * old %eax on stack; interrupt number in %eax.
1068 pushl %ecx /* save registers */
1070 cld /* clear direction flag */
1072 cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
1073 jb int_from_intstack /* if not: */
1075 pushl %ds /* save segment registers */
1077 mov %ss,%dx /* switch to kernel segments */
1085 movl CX(EXT(int_stack_top),%edx),%ecx
1086 movl 20(%esp),%edx /* get eip */
1087 xchgl %ecx,%esp /* switch to interrupt stack */
1090 pushl %ecx /* save pointer to old stack */
1092 pushl %ebx /* save %ebx - out of the way */
1093 /* so stack looks the same */
1094 pushl %ecx /* save pointer to old stack */
1095 TIME_INT_ENTRY /* do timing */
1098 pushl %edx /* pass eip to pe_incoming_interrupt */
1101 movl $ CPD_PREEMPTION_LEVEL,%edx
1103 #endif /* MACH_RT */
1105 movl $ CPD_INTERRUPT_LEVEL,%edx
1108 pushl %eax /* Push trap number */
1109 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
1110 addl $8,%esp /* Pop trap number and eip */
1112 .globl EXT(return_to_iret)
1113 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1115 movl $ CPD_INTERRUPT_LEVEL,%edx
1119 movl $ CPD_PREEMPTION_LEVEL,%edx
1121 #endif /* MACH_RT */
1125 TIME_INT_EXIT /* do timing */
1126 movl 4(%esp),%ebx /* restore the extra reg we saved */
1129 popl %esp /* switch back to old stack */
1132 movl CX(EXT(need_ast),%edx),%eax
1133 testl %eax,%eax /* any pending asts? */
1134 je 1f /* no, nothing to do */
1135 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1136 jnz ast_from_interrupt /* take it */
1137 testb $3,I_CS(%esp) /* user mode, */
1138 jnz ast_from_interrupt /* take it */
1140 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1141 jnb ast_from_interrupt /* take it */
1145 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
1146 je 1f /* no, skip it */
1147 movl $ CPD_PREEMPTION_LEVEL,%ecx
1148 cmpl $0,%gs:(%ecx) /* preemption masked? */
1149 jne 1f /* yes, skip it */
1150 testl $ AST_URGENT,%eax /* any urgent requests? */
1151 je 1f /* no, skip it */
1152 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1153 jb 1f /* yes, skip it */
1154 movl CX(EXT(kernel_stack),%edx),%eax
1157 andl $(-KERNEL_STACK_SIZE),%ecx
1158 testl %ecx,%ecx /* are we on the kernel stack? */
1159 jne 1f /* no, skip it */
1162 * Take an AST from kernel space. We don't need (and don't want)
1163 * to do as much as the case where the interrupt came from user
1166 #if PREEMPT_DEBUG_LOG
1171 call EXT(log_thread_action)
1174 0: String "intr preempt eip"
1176 #endif /* PREEMPT_DEBUG_LOG */
1179 pushl $1 /* push preemption flag */
1180 call EXT(i386_astintr) /* take the AST */
1181 addl $4,%esp /* pop preemption flag */
1182 #endif /* MACH_RT */
1185 pop %es /* restore segment regs */
1190 iret /* return to caller */
1194 movl $ CPD_PREEMPTION_LEVEL,%edx
1196 #endif /* MACH_RT */
1198 movl $ CPD_INTERRUPT_LEVEL,%edx
1202 pushl %edx /* push eip */
1204 pushl %eax /* Push trap number */
1206 call EXT(PE_incoming_interrupt)
1207 addl $4,%esp /* pop eip */
1209 LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1211 addl $4,%esp /* pop trap number */
1213 movl $ CPD_INTERRUPT_LEVEL,%edx
1217 movl $ CPD_PREEMPTION_LEVEL,%edx
1219 #endif /* MACH_RT */
1221 pop %edx /* must have been on kernel segs */
1223 pop %eax /* no ASTs */
1227 * Take an AST from an interrupt.
1241 pop %es /* restore all registers ... */
1246 sti /* Reenable interrupts */
1247 pushl $0 /* zero code */
1248 pushl $0 /* zero trap number */
1249 pusha /* save general registers */
1250 push %ds /* save segment registers */
1254 mov %ss,%dx /* switch to kernel segments */
1261 * See if we interrupted a kernel-loaded thread executing
1265 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1266 jnz 0f /* user mode trap if so */
1268 jnz 0f /* user mode, back to normal */
1270 cmpl ETEXT_ADDR,R_EIP(%esp)
1271 jb 0f /* not kernel-loaded, back to normal */
1275 * Transfer the current stack frame by hand into the PCB.
1278 movl CX(EXT(active_kloaded),%edx),%eax
1279 movl CX(EXT(kernel_stack),%edx),%ebx
1281 FRAME_STACK_TO_PCB(%eax,%ebx)
1288 movl CX(EXT(kernel_stack),%edx),%eax
1289 /* switch to kernel stack */
1293 pushl $0 /* push preemption flag */
1294 call EXT(i386_astintr) /* take the AST */
1295 addl $4,%esp /* pop preemption flag */
1296 popl %esp /* back to PCB stack */
1297 jmp EXT(return_from_trap) /* return */
1299 #if MACH_KDB || MACH_KGDB
1301 * kdb_kintr: enter kdb from keyboard interrupt.
1302 * Chase down the stack frames until we find one whose return
1303 * address is the interrupt handler. At that point, we have:
1305 * frame-> saved %ebp
1306 * return address in interrupt handler
1309 * return address == return_to_iret_i
1318 * frame-> saved %ebp
1319 * return address in interrupt handler
1322 * return address == return_to_iret
1323 * pointer to save area on old stack
1324 * [ saved %ebx, if accurate timing ]
1326 * old stack: saved %es
1335 * Call kdb, passing it that register save area.
1340 #endif /* MACH_KGDB */
1343 #endif /* MACH_KDB */
1344 movl %ebp,%eax /* save caller`s frame pointer */
1345 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1346 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1348 0: cmpl 16(%eax),%ecx /* does this frame return to */
1349 /* interrupt handler (1)? */
1351 cmpl $kdb_from_iret,16(%eax)
1353 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1355 cmpl $kdb_from_iret_i,16(%eax)
1357 movl (%eax),%eax /* try next frame */
1360 1: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1363 2: movl $kdb_from_iret_i,16(%eax)
1364 /* returns to interrupt stack */
1368 * On return from keyboard interrupt, we will execute
1370 * if returning to an interrupt on the interrupt stack
1372 * if returning to an interrupt on the user or kernel stack
1375 /* save regs in known locations */
1377 pushl %ebx /* caller`s %ebx is in reg */
1379 movl 4(%esp),%eax /* get caller`s %ebx */
1380 pushl %eax /* push on stack */
1389 pushl %esp /* pass regs */
1390 call EXT(kgdb_kentry) /* to kgdb */
1391 addl $4,%esp /* pop parameters */
1392 #endif /* MACH_KGDB */
1394 pushl %esp /* pass regs */
1395 call EXT(kdb_kentry) /* to kdb */
1396 addl $4,%esp /* pop parameters */
1397 #endif /* MACH_KDB */
1398 pop %gs /* restore registers */
1409 jmp EXT(return_to_iret) /* normal interrupt return */
1411 kdb_from_iret_i: /* on interrupt stack */
1412 pop %edx /* restore saved registers */
1415 pushl $0 /* zero error code */
1416 pushl $0 /* zero trap number */
1417 pusha /* save general registers */
1418 push %ds /* save segment registers */
1423 cli /* disable interrupts */
1424 CPU_NUMBER(%edx) /* get CPU number */
1425 movl CX(EXT(kgdb_stacks),%edx),%ebx
1426 xchgl %ebx,%esp /* switch to kgdb stack */
1427 pushl %ebx /* pass old sp as an arg */
1428 call EXT(kgdb_from_kernel)
1429 popl %esp /* switch back to interrupt stack */
1430 #endif /* MACH_KGDB */
1432 pushl %esp /* pass regs, */
1433 pushl $0 /* code, */
1434 pushl $-1 /* type to kdb */
1437 #endif /* MACH_KDB */
1438 pop %gs /* restore segment registers */
1442 popa /* restore general registers */
1446 #endif /* MACH_KDB || MACH_KGDB */
1450 * Mach RPC enters through a call gate, like a system call.
1454 pushf /* save flags as soon as possible */
1455 pushl %eax /* save system call number */
1456 pushl $0 /* clear trap number slot */
1458 pusha /* save the general registers */
1459 pushl %ds /* and the segment registers */
1464 mov %ss,%dx /* switch to kernel data segment */
1471 * Shuffle eflags,eip,cs into proper places
1474 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1475 movl R_CS(%esp),%ecx /* eip is in CS slot */
1476 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1477 movl %ecx,R_EIP(%esp) /* fix eip */
1478 movl %edx,R_CS(%esp) /* fix cs */
1479 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1484 negl %eax /* get system call number */
1485 shll $4,%eax /* manual indexing */
1488 * Check here for mach_rpc from kernel-loaded task --
1489 * - Note that kernel-loaded task returns via real return.
1490 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1491 * so transfer the stack frame into the PCB explicitly, then
1492 * start running on resulting "PCB stack". We have to set
1493 * up a simulated "uesp" manually, since there's none in the
1496 cmpl $0,CX(EXT(active_kloaded),%edx)
1499 movl CX(EXT(active_kloaded),%edx),%ebx
1500 movl CX(EXT(kernel_stack),%edx),%edx
1503 FRAME_STACK_TO_PCB(%ebx,%edx)
1511 movl CX(EXT(kernel_stack),%edx),%ebx
1512 /* get current kernel stack */
1513 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1514 /* user registers. */
1519 * Register use on entry:
1520 * eax contains syscall number
1521 * ebx contains user regs pointer
1523 #undef RPC_TRAP_REGISTERS
1524 #ifdef RPC_TRAP_REGISTERS
1530 movl EXT(mach_trap_table)(%eax),%ecx
1531 /* get number of arguments */
1532 jecxz 2f /* skip argument copy if none */
1533 movl R_UESP(%ebx),%esi /* get user stack pointer */
1534 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1535 /* and point past last argument */
1536 /* edx holds cpu number from above */
1537 movl CX(EXT(active_kloaded),%edx),%edx
1538 /* point to current thread */
1539 orl %edx,%edx /* if ! kernel-loaded, check addr */
1541 mov %ds,%dx /* kernel data segment access */
1544 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1545 ja mach_call_addr /* address error if not */
1546 movl $ USER_DS,%edx /* user data segment access */
1549 movl %esp,%edx /* save kernel ESP for error recovery */
1553 RECOVER(mach_call_addr_push)
1554 pushl %fs:(%esi) /* push argument on stack */
1555 loop 1b /* loop for all arguments */
1559 * Register use on entry:
1560 * eax contains syscall number
1561 * ebx contains user regs pointer
1565 call *EXT(mach_trap_table)+4(%eax)
1566 /* call procedure */
1567 movl %esp,%ecx /* get kernel stack */
1568 or $(KERNEL_STACK_SIZE-1),%ecx
1569 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1570 movl %eax,R_EAX(%esp) /* save return value */
1571 jmp EXT(return_from_trap) /* return to user */
1575 * Special system call entry for "int 0x80", which has the "eflags"
1576 * register saved at the right place already.
1577 * Fall back to the common syscall path after saving the registers.
1582 * old esp if trapped from user
1583 * old ss if trapped from user
1585 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1587 Entry(syscall_int80)
1588 pushl %eax /* save system call number */
1589 pushl $0 /* clear trap number slot */
1591 pusha /* save the general registers */
1592 pushl %ds /* and the segment registers */
1597 mov %ss,%dx /* switch to kernel data segment */
1606 * System call enters through a call gate. Flags are not saved -
1607 * we must shuffle stack to look like trap save area.
1614 * eax contains system call number.
1616 * NB: below use of CPU_NUMBER assumes that macro will use correct
1617 * correct segment register for any kernel data accesses.
1621 pushf /* save flags as soon as possible */
1623 pushl %eax /* save system call number */
1624 pushl $0 /* clear trap number slot */
1626 pusha /* save the general registers */
1627 pushl %ds /* and the segment registers */
1632 mov %ss,%dx /* switch to kernel data segment */
1639 * Shuffle eflags,eip,cs into proper places
1642 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1643 movl R_CS(%esp),%ecx /* eip is in CS slot */
1644 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1645 movl %ecx,R_EIP(%esp) /* fix eip */
1646 movl %edx,R_CS(%esp) /* fix cs */
1647 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1652 * Check here for syscall from kernel-loaded task --
1653 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1654 * so transfer the stack frame into the PCB explicitly, then
1655 * start running on resulting "PCB stack". We have to set
1656 * up a simulated "uesp" manually, since there's none in the
1659 cmpl $0,CX(EXT(active_kloaded),%edx)
1662 movl CX(EXT(active_kloaded),%edx),%ebx
1663 movl CX(EXT(kernel_stack),%edx),%edx
1665 FRAME_STACK_TO_PCB(%ebx,%edx)
1675 movl CX(EXT(kernel_stack),%edx),%ebx
1676 /* get current kernel stack */
1677 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1678 /* user registers. */
1679 /* user regs pointer already set */
1682 * Check for MACH or emulated system call
1683 * Register use (from here till we begin processing call):
1684 * eax contains system call number
1685 * ebx points to user regs
1688 movl $ CPD_ACTIVE_THREAD,%edx
1689 movl %gs:(%edx),%edx /* get active thread */
1690 /* point to current thread */
1691 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1692 movl ACT_TASK(%edx),%edx /* point to task */
1693 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1694 orl %edx,%edx /* if none, */
1695 je syscall_native /* do native system call */
1696 movl %eax,%ecx /* copy system call number */
1697 subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
1699 jl syscall_native /* too low - native system call */
1700 cmpl DISP_COUNT(%edx),%ecx /* check range */
1701 jnl syscall_native /* too high - native system call */
1702 movl DISP_VECTOR(%edx,%ecx,4),%edx
1703 /* get the emulation vector */
1704 orl %edx,%edx /* emulated system call if not zero */
1708 * Native system call.
1709 * Register use on entry:
1710 * eax contains syscall number
1711 * ebx points to user regs
1714 negl %eax /* get system call number */
1715 jl mach_call_range /* out of range if it was positive */
1717 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1718 jg mach_call_range /* error if out of range */
1719 shll $4,%eax /* manual indexing */
1721 movl EXT(mach_trap_table)+4(%eax),%edx
1723 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1724 jne do_native_call /* go on with Mach syscall */
1726 movl $ CPD_ACTIVE_THREAD,%edx
1727 movl %gs:(%edx),%edx /* get active thread */
1728 /* point to current thread */
1729 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1730 movl ACT_TASK(%edx),%edx /* point to task */
1731 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1732 orl %edx,%edx /* if it exists, */
1733 jne do_native_call /* do native system call */
1734 shrl $4,%eax /* restore syscall number */
1735 jmp mach_call_range /* try it as a "server" syscall */
1738 * Register use on entry:
1739 * eax contains syscall number
1740 * ebx contains user regs pointer
1743 movl EXT(mach_trap_table)(%eax),%ecx
1744 /* get number of arguments */
1745 jecxz mach_call_call /* skip argument copy if none */
1746 movl R_UESP(%ebx),%esi /* get user stack pointer */
1747 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1748 /* and point past last argument */
1750 movl CX(EXT(active_kloaded),%edx),%edx
1751 /* point to current thread */
1752 orl %edx,%edx /* if kernel-loaded, skip addr check */
1754 mov %ds,%dx /* kernel data segment access */
1757 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1758 ja mach_call_addr /* address error if not */
1759 movl $ USER_DS,%edx /* user data segment access */
1762 movl %esp,%edx /* save kernel ESP for error recovery */
1766 RECOVER(mach_call_addr_push)
1767 pushl %fs:(%esi) /* push argument on stack */
1768 loop 2b /* loop for all arguments */
1771 * Register use on entry:
1772 * eax contains syscall number
1773 * ebx contains user regs pointer
1779 #if ETAP_EVENT_MONITOR
1780 cmpl $0x200, %eax /* is this mach_msg? */
1781 jz make_syscall /* if yes, don't record event */
1783 pushal /* Otherwise: save registers */
1784 pushl %eax /* push syscall number on stack*/
1785 call EXT(etap_machcall_probe1) /* call event begin probe */
1786 add $4,%esp /* restore stack */
1787 popal /* restore registers */
1789 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1791 call EXT(etap_machcall_probe2) /* call event end probe */
1793 jmp skip_syscall /* syscall already made */
1794 #endif /* ETAP_EVENT_MONITOR */
1797 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1800 movl %esp,%ecx /* get kernel stack */
1801 or $(KERNEL_STACK_SIZE-1),%ecx
1802 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1803 movl %eax,R_EAX(%esp) /* save return value */
1804 jmp EXT(return_from_trap) /* return to user */
1807 * Address out of range. Change to page fault.
1808 * %esi holds failing address.
1809 * Register use on entry:
1810 * ebx contains user regs pointer
1812 mach_call_addr_push:
1813 movl %edx,%esp /* clean parameters from stack */
1815 movl %esi,R_CR2(%ebx) /* set fault address */
1816 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1817 /* set page-fault trap */
1818 movl $(T_PF_USER),R_ERR(%ebx)
1819 /* set error code - read user space */
1821 jmp EXT(take_trap) /* treat as a trap */
1824 * System call out of range. Treat as invalid-instruction trap.
1825 * (? general protection?)
1826 * Register use on entry:
1827 * eax contains syscall number
1830 movl $ CPD_ACTIVE_THREAD,%edx
1831 movl %gs:(%edx),%edx /* get active thread */
1833 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1834 movl ACT_TASK(%edx),%edx /* point to task */
1835 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1836 orl %edx,%edx /* if emulator, */
1837 jne EXT(syscall_failed) /* handle as illegal instruction */
1838 /* else generate syscall exception: */
1841 push $1 /* code_cnt = 1 */
1842 push %edx /* exception_type_t (see i/f docky) */
1848 .globl EXT(syscall_failed)
1849 LEXT(syscall_failed)
1850 movl %esp,%ecx /* get kernel stack */
1851 or $(KERNEL_STACK_SIZE-1),%ecx
1852 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1854 movl CX(EXT(kernel_stack),%edx),%ebx
1855 /* get current kernel stack */
1856 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1857 /* user registers. */
1858 /* user regs pointer already set */
1860 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1861 /* set invalid-operation trap */
1862 movl $0,R_ERR(%ebx) /* clear error code */
1864 jmp EXT(take_trap) /* treat as a trap */
1867 * User space emulation of system calls.
1868 * edx - user address to handle syscall
1870 * User stack will become:
1873 * Register use on entry:
1874 * ebx contains user regs pointer
1875 * edx contains emulator vector address
1878 movl R_UESP(%ebx),%edi /* get user stack pointer */
1880 movl CX(EXT(active_kloaded),%eax),%eax
1881 orl %eax,%eax /* if thread not kernel-loaded, */
1882 jz 0f /* do address checks */
1884 mov %ds,%ax /* kernel data segment access */
1885 jmp 1f /* otherwise, skip them */
1887 cmpl $(VM_MAX_ADDRESS),%edi /* in user space? */
1888 ja syscall_addr /* address error if not */
1889 subl $8,%edi /* push space for new arguments */
1890 cmpl $(VM_MIN_ADDRESS),%edi /* still in user space? */
1891 jb syscall_addr /* error if not */
1892 movl $ USER_DS,%ax /* user data segment access */
1895 movl R_EFLAGS(%ebx),%eax /* move flags */
1897 RECOVER(syscall_addr)
1898 movl %eax,%fs:0(%edi) /* to user stack */
1899 movl R_EIP(%ebx),%eax /* move eip */
1901 RECOVER(syscall_addr)
1902 movl %eax,%fs:4(%edi) /* to user stack */
1903 movl %edi,R_UESP(%ebx) /* set new user stack pointer */
1904 movl %edx,R_EIP(%ebx) /* change return address to trap */
1905 movl %ebx,%esp /* back to PCB stack */
1907 jmp EXT(return_from_trap) /* return to user */
1911 * Address error - address is in %edi.
1912 * Register use on entry:
1913 * ebx contains user regs pointer
1916 movl %edi,R_CR2(%ebx) /* set fault address */
1917 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1918 /* set page-fault trap */
1919 movl $(T_PF_USER),R_ERR(%ebx)
1920 /* set error code - read user space */
1922 jmp EXT(take_trap) /* treat as a trap */
1931 * Copy from user address space.
1932 * arg0: user address
1933 * arg1: kernel address
1939 pushl %edi /* save registers */
1941 movl 8+S_ARG0,%esi /* get user start address */
1942 movl 8+S_ARG1,%edi /* get kernel destination address */
1943 movl 8+S_ARG2,%edx /* get count */
1945 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1947 movl $ CPD_ACTIVE_THREAD,%ecx
1948 movl %gs:(%ecx),%ecx /* get active thread */
1949 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
1950 movl ACT_MAP(%ecx),%ecx /* get act->map */
1951 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1952 cmpl EXT(kernel_pmap), %ecx
1954 movl $ USER_DS,%cx /* user data segment access */
1958 jb copyin_fail /* fail if wrap-around */
1960 movl %edx,%ecx /* move by longwords first */
1963 RECOVER(copyin_fail)
1965 movsl /* move longwords */
1966 movl %edx,%ecx /* now move remaining bytes */
1969 RECOVER(copyin_fail)
1972 xorl %eax,%eax /* return 0 for success */
1974 mov %ss,%di /* restore kernel data segment */
1977 popl %edi /* restore registers */
1979 ret /* and return */
1982 movl $ EFAULT,%eax /* return error for failure */
1983 jmp copy_ret /* pop frame and return */
1986 * Copy string from user address space.
1987 * arg0: user address
1988 * arg1: kernel address
1989 * arg2: max byte count
1990 * arg3: actual byte count (OUT)
1994 pushl %edi /* save registers */
1996 movl 8+S_ARG0,%esi /* get user start address */
1997 movl 8+S_ARG1,%edi /* get kernel destination address */
1998 movl 8+S_ARG2,%edx /* get count */
2000 lea 0(%esi,%edx),%eax /* get user end address + 1 */
2002 movl $ CPD_ACTIVE_THREAD,%ecx
2003 movl %gs:(%ecx),%ecx /* get active thread */
2004 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2005 movl ACT_MAP(%ecx),%ecx /* get act->map */
2006 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2007 cmpl EXT(kernel_pmap), %ecx
2009 mov %ds,%cx /* kernel data segment access */
2012 movl $ USER_DS,%cx /* user data segment access */
2020 RECOVER(copystr_fail) /* copy bytes... */
2021 movb %fs:(%esi),%eax
2023 testl %edi,%edi /* if kernel address is ... */
2024 jz 3f /* not NULL */
2025 movb %eax,(%edi) /* copy the byte */
2029 je 5f /* Zero count.. error out */
2031 jne 2b /* .. a NUL found? */
2034 movl $ ENAMETOOLONG,%eax /* String is too long.. */
2036 xorl %eax,%eax /* return zero for success */
2037 movl 8+S_ARG3,%edi /* get OUT len ptr */
2039 jz copystr_ret /* if null, just return */
2041 movl %esi,(%edi) /* else set OUT arg to xfer len */
2043 popl %edi /* restore registers */
2045 ret /* and return */
2048 movl $ EFAULT,%eax /* return error for failure */
2049 jmp copy_ret /* pop frame and return */
2052 * Copy to user address space.
2053 * arg0: kernel address
2054 * arg1: user address
2060 pushl %edi /* save registers */
2063 movl 12+S_ARG0,%esi /* get kernel start address */
2064 movl 12+S_ARG1,%edi /* get user start address */
2065 movl 12+S_ARG2,%edx /* get count */
2067 leal 0(%edi,%edx),%eax /* get user end address + 1 */
2069 movl $ CPD_ACTIVE_THREAD,%ecx
2070 movl %gs:(%ecx),%ecx /* get active thread */
2071 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2072 movl ACT_MAP(%ecx),%ecx /* get act->map */
2073 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2074 cmpl EXT(kernel_pmap), %ecx
2076 mov %ds,%cx /* else kernel data segment access */
2084 * Check whether user address space is writable
2085 * before writing to it - hardware is broken.
2087 * Skip check if "user" address is really in
2088 * kernel space (i.e., if it's in a kernel-loaded
2092 * esi/edi source/dest pointers for rep/mov
2093 * ecx counter for rep/mov
2094 * edx counts down from 3rd arg
2095 * eax count of bytes for each (partial) page copy
2096 * ebx shadows edi, used to adjust edx
2098 movl %edi,%ebx /* copy edi for syncing up */
2100 /* if restarting after a partial copy, put edx back in sync, */
2101 addl %ebx,%edx /* edx -= (edi - ebx); */
2103 movl %edi,%ebx /* ebx = edi; */
2106 cmpl $ USER_DS,%cx /* If kernel data segment */
2107 jnz 0f /* skip check */
2109 cmpb $(CPUID_FAMILY_386), EXT(cpuid_family)
2112 movl %cr3,%ecx /* point to page directory */
2114 andl $(~0x7), %ecx /* remove cpu number */
2115 #endif /* NCPUS > 1 && AT386 */
2116 movl %edi,%eax /* get page directory bits */
2117 shrl $(PDESHIFT),%eax /* from user address */
2118 movl KERNELBASE(%ecx,%eax,4),%ecx
2119 /* get page directory pointer */
2120 testl $(PTE_V),%ecx /* present? */
2121 jz 0f /* if not, fault is OK */
2122 andl $(PTE_PFN),%ecx /* isolate page frame address */
2123 movl %edi,%eax /* get page table bits */
2124 shrl $(PTESHIFT),%eax
2125 andl $(PTEMASK),%eax /* from user address */
2126 leal KERNELBASE(%ecx,%eax,4),%ecx
2127 /* point to page table entry */
2128 movl (%ecx),%eax /* get it */
2129 testl $(PTE_V),%eax /* present? */
2130 jz 0f /* if not, fault is OK */
2131 testl $(PTE_W),%eax /* writable? */
2132 jnz 0f /* OK if so */
2134 * Not writable - must fake a fault. Turn off access to the page.
2136 andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
2137 movl %cr3,%eax /* invalidate TLB */
2141 * Copy only what fits on the current destination page.
2142 * Check for write-fault again on the next page.
2144 leal NBPG(%edi),%eax /* point to */
2145 andl $(-NBPG),%eax /* start of next page */
2146 subl %edi,%eax /* get number of bytes to that point */
2147 cmpl %edx,%eax /* bigger than count? */
2149 movl %edx,%eax /* use count */
2152 movl %eax,%ecx /* move by longwords first */
2155 RECOVER(copyout_fail)
2157 RETRY(copyout_retry)
2160 movl %eax,%ecx /* now move remaining bytes */
2163 RECOVER(copyout_fail)
2165 RETRY(copyout_retry)
2168 movl %edi,%ebx /* copy edi for syncing up */
2169 subl %eax,%edx /* and decrement count */
2170 jg copyout_retry /* restart on next page if not done */
2171 xorl %eax,%eax /* return 0 for success */
2173 mov %ss,%di /* restore kernel segment */
2177 popl %edi /* restore registers */
2179 ret /* and return */
2182 movl $ EFAULT,%eax /* return error for failure */
2183 jmp copyout_ret /* pop frame and return */
2200 pushl %eax /* get stack space */
2216 xor %eax,%eax /* clear high 16 bits of eax */
2217 fnstsw %ax /* read FP status */
2221 * Clear FPU exceptions
2228 * Clear task-switched flag.
2235 * Save complete FPU state. Save error for later.
2238 movl 4(%esp),%eax /* get save area pointer */
2239 fnsave (%eax) /* save complete state, including */
2244 * Restore FPU state.
2247 movl 4(%esp),%eax /* get save area pointer */
2248 frstor (%eax) /* restore complete state */
2258 #else /* NCPUS > 1 && AT386 */
2259 movl 4(%esp),%eax /* get new cr3 value */
2260 #endif /* NCPUS > 1 && AT386 */
2262 * Don't set PDBR to a new value (hence invalidating the
2263 * "paging cache") if the new value matches the current one.
2265 movl %cr3,%edx /* get current cr3 value */
2267 je 0f /* if two are equal, don't set */
2268 movl %eax,%cr3 /* load it (and flush cache) */
2278 andl $(~0x7), %eax /* remove cpu number */
2279 #endif /* NCPUS > 1 && AT386 */
2286 movl %cr3,%eax /* flush tlb by reloading CR3 */
2287 movl %eax,%cr3 /* with itself */
2301 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2309 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2328 * Read task register.
2336 * Set task register. Also clears busy bit of task descriptor.
2339 movl S_ARG0,%eax /* get task segment number */
2340 subl $8,%esp /* push space for SGDT */
2341 sgdt 2(%esp) /* store GDT limit and base (linear) */
2342 movl 4(%esp),%edx /* address GDT */
2343 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2344 ltr %ax /* load task register */
2345 addl $8,%esp /* clear stack */
2346 ret /* and return */
2349 * Set task-switched flag.
2352 movl %cr0,%eax /* get cr0 */
2353 orl $(CR0_TS),%eax /* or in TS bit */
2354 movl %eax,%cr0 /* set cr0 */
2358 * io register must not be used on slaves (no AT bus)
2360 #define ILL_ON_SLAVE
2368 #define PUSH_FRAME FRAME
2369 #define POP_FRAME EMARF
2371 #else /* MACH_ASSERT */
2379 #endif /* MACH_ASSERT */
2382 #if MACH_KDB || MACH_ASSERT
2385 * Following routines are also defined as macros in i386/pio.h
2386 * Compile then when MACH_KDB is configured so that they
2387 * can be invoked from the debugger.
2391 * void outb(unsigned char *io_port,
2392 * unsigned char byte)
2394 * Output a byte to an IO port.
2399 movl ARG0,%edx /* IO port address */
2400 movl ARG1,%eax /* data to output */
2401 outb %al,%dx /* send it out */
2406 * unsigned char inb(unsigned char *io_port)
2408 * Input a byte from an IO port.
2413 movl ARG0,%edx /* IO port address */
2414 xor %eax,%eax /* clear high bits of register */
2415 inb %dx,%al /* get the byte */
2420 * void outw(unsigned short *io_port,
2421 * unsigned short word)
2423 * Output a word to an IO port.
2428 movl ARG0,%edx /* IO port address */
2429 movl ARG1,%eax /* data to output */
2430 outw %ax,%dx /* send it out */
2435 * unsigned short inw(unsigned short *io_port)
2437 * Input a word from an IO port.
2442 movl ARG0,%edx /* IO port address */
2443 xor %eax,%eax /* clear high bits of register */
2444 inw %dx,%ax /* get the word */
2449 * void outl(unsigned int *io_port,
2450 * unsigned int byte)
2452 * Output an int to an IO port.
2457 movl ARG0,%edx /* IO port address*/
2458 movl ARG1,%eax /* data to output */
2459 outl %eax,%dx /* send it out */
2464 * unsigned int inl(unsigned int *io_port)
2466 * Input an int from an IO port.
2471 movl ARG0,%edx /* IO port address */
2472 inl %dx,%eax /* get the int */
2476 #endif /* MACH_KDB || MACH_ASSERT*/
2479 * void loutb(unsigned byte *io_port,
2480 * unsigned byte *data,
2481 * unsigned int count)
2483 * Output an array of bytes to an IO port.
2489 movl %esi,%eax /* save register */
2490 movl ARG0,%edx /* get io port number */
2491 movl ARG1,%esi /* get data address */
2492 movl ARG2,%ecx /* get count */
2496 movl %eax,%esi /* restore register */
2502 * void loutw(unsigned short *io_port,
2503 * unsigned short *data,
2504 * unsigned int count)
2506 * Output an array of shorts to an IO port.
2512 movl %esi,%eax /* save register */
2513 movl ARG0,%edx /* get io port number */
2514 movl ARG1,%esi /* get data address */
2515 movl ARG2,%ecx /* get count */
2519 movl %eax,%esi /* restore register */
2524 * void loutw(unsigned short io_port,
2525 * unsigned int *data,
2526 * unsigned int count)
2528 * Output an array of longs to an IO port.
2534 movl %esi,%eax /* save register */
2535 movl ARG0,%edx /* get io port number */
2536 movl ARG1,%esi /* get data address */
2537 movl ARG2,%ecx /* get count */
2541 movl %eax,%esi /* restore register */
2547 * void linb(unsigned char *io_port,
2548 * unsigned char *data,
2549 * unsigned int count)
2551 * Input an array of bytes from an IO port.
2557 movl %edi,%eax /* save register */
2558 movl ARG0,%edx /* get io port number */
2559 movl ARG1,%edi /* get data address */
2560 movl ARG2,%ecx /* get count */
2564 movl %eax,%edi /* restore register */
2570 * void linw(unsigned short *io_port,
2571 * unsigned short *data,
2572 * unsigned int count)
2574 * Input an array of shorts from an IO port.
2580 movl %edi,%eax /* save register */
2581 movl ARG0,%edx /* get io port number */
2582 movl ARG1,%edi /* get data address */
2583 movl ARG2,%ecx /* get count */
2587 movl %eax,%edi /* restore register */
2593 * void linl(unsigned short io_port,
2594 * unsigned int *data,
2595 * unsigned int count)
2597 * Input an array of longs from an IO port.
2603 movl %edi,%eax /* save register */
2604 movl ARG0,%edx /* get io port number */
2605 movl ARG1,%edi /* get data address */
2606 movl ARG2,%ecx /* get count */
2610 movl %eax,%edi /* restore register */
2616 * int inst_fetch(int eip, int cs);
2618 * Fetch instruction byte. Return -1 if invalid address.
2620 .globl EXT(inst_fetch)
2622 movl S_ARG1, %eax /* get segment */
2623 movw %ax,%fs /* into FS */
2624 movl S_ARG0, %eax /* get offset */
2626 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2628 RECOVER(EXT(inst_fetch_fault))
2629 movzbl %fs:(%eax),%eax /* load instruction byte */
2632 LEXT(inst_fetch_fault)
2633 movl $-1,%eax /* return -1 if error */
2639 * kdp_copy_kmem(char *src, char *dst, int count)
2641 * Similar to copyin except that both addresses are kernel addresses.
2644 ENTRY(kdp_copy_kmem)
2646 pushl %edi /* save registers */
2648 movl 8+S_ARG0,%esi /* get kernel start address */
2649 movl 8+S_ARG1,%edi /* get kernel destination address */
2651 movl 8+S_ARG2,%edx /* get count */
2653 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2656 jb kdp_vm_read_fail /* fail if wrap-around */
2658 movl %edx,%ecx /* move by longwords first */
2661 RECOVER(kdp_vm_read_fail)
2663 movsl /* move longwords */
2664 movl %edx,%ecx /* now move remaining bytes */
2667 RECOVER(kdp_vm_read_fail)
2671 movl 8+S_ARG2,%edx /* get count */
2672 subl %ecx,%edx /* Return number of bytes transfered */
2675 popl %edi /* restore registers */
2677 ret /* and return */
2680 xorl %eax,%eax /* didn't copy a thing. */
2689 * Done with recovery and retry tables.
2702 /* dr<i>(address, type, len, persistence)
2706 movl %eax,EXT(dr_addr)
2712 movl %eax,EXT(dr_addr)+1*4
2718 movl %eax,EXT(dr_addr)+2*4
2725 movl %eax,EXT(dr_addr)+3*4
2734 movl %edx,EXT(dr_addr)+4*4
2735 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2736 movl %edx,EXT(dr_addr)+5*4
2755 movl %edx,EXT(dr_addr)+7*4
2762 DATA(preemptable) /* Not on an MP (makes cpu_number() usage unsafe) */
2763 #if MACH_RT && (NCPUS == 1)
2764 .long 0 /* FIXME -- Currently disabled */
2766 .long 0 /* FIX ME -- Currently disabled */
2767 #endif /* MACH_RT && (NCPUS == 1) */
2780 * Determine cpu model and set global cpuid_xxx variables
2782 * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it.
2783 * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it.
2784 * Relies on CPUID instruction for next x86 generations
2785 * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array)
2788 ENTRY(set_cpu_model)
2790 pushl %ebx /* save ebx */
2791 andl $~0x3,%esp /* Align stack to avoid AC fault */
2792 pushfl /* push EFLAGS */
2793 popl %eax /* pop into eax */
2794 movl %eax,%ecx /* Save original EFLAGS */
2795 xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */
2796 pushl %eax /* push new value */
2797 popfl /* through the EFLAGS register */
2798 pushfl /* and back */
2799 popl %eax /* into eax */
2800 movb $(CPUID_FAMILY_386),EXT(cpuid_family)
2801 pushl %ecx /* push original EFLAGS */
2802 popfl /* restore EFLAGS */
2803 xorl %ecx,%eax /* see what changed */
2804 testl $ EFL_AC,%eax /* test AC bit */
2805 jz 0f /* if AC toggled (486 or higher) */
2807 movb $(CPUID_FAMILY_486),EXT(cpuid_family)
2808 testl $ EFL_ID,%eax /* test ID bit */
2809 jz 0f /* if ID toggled use cpuid instruction */
2811 xorl %eax,%eax /* get vendor identification string */
2812 .word 0xA20F /* cpuid instruction */
2813 movl %eax,EXT(cpuid_value) /* Store high value */
2814 movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */
2815 movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */
2816 movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */
2817 movl $1,%eax /* get processor signature */
2818 .word 0xA20F /* cpuid instruction */
2819 movl %edx,EXT(cpuid_feature) /* Store feature flags */
2820 movl %eax,%ecx /* Save original signature */
2821 andb $0xF,%al /* Get Stepping ID */
2822 movb %al,EXT(cpuid_stepping) /* Save Stepping ID */
2823 movl %ecx,%eax /* Get original signature */
2824 shrl $4,%eax /* Shift Stepping ID */
2825 movl %eax,%ecx /* Save original signature */
2826 andb $0xF,%al /* Get Model */
2827 movb %al,EXT(cpuid_model) /* Save Model */
2828 movl %ecx,%eax /* Get original signature */
2829 shrl $4,%eax /* Shift Stepping ID */
2830 movl %eax,%ecx /* Save original signature */
2831 andb $0xF,%al /* Get Family */
2832 movb %al,EXT(cpuid_family) /* Save Family */
2833 movl %ecx,%eax /* Get original signature */
2834 shrl $4,%eax /* Shift Stepping ID */
2835 andb $0x3,%al /* Get Type */
2836 movb %al,EXT(cpuid_type) /* Save Type */
2838 movl EXT(cpuid_value),%eax /* Get high value */
2839 cmpl $2,%eax /* Test if processor configuration */
2840 jle 0f /* is present */
2841 movl $2,%eax /* get processor configuration */
2842 .word 0xA20F /* cpuid instruction */
2843 movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */
2844 movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */
2845 movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */
2846 movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */
2848 popl %ebx /* restore ebx */
2884 lidt null_idtr /* disable the interrupt handler */
2885 xor %ecx,%ecx /* generate a divide by zero */
2886 div %ecx,%eax /* reboot now */
2887 ret /* this will "never" be executed */
2889 #endif /* SYMMETRY */
2893 * setbit(int bitno, int *s) - set bit in bit string
2896 movl S_ARG0, %ecx /* bit number */
2897 movl S_ARG1, %eax /* address */
2898 btsl %ecx, (%eax) /* set bit */
2902 * clrbit(int bitno, int *s) - clear bit in bit string
2905 movl S_ARG0, %ecx /* bit number */
2906 movl S_ARG1, %eax /* address */
2907 btrl %ecx, (%eax) /* clear bit */
2911 * ffsbit(int *s) - find first set bit in bit string
2914 movl S_ARG0, %ecx /* address */
2915 movl $0, %edx /* base offset */
2917 bsfl (%ecx), %eax /* check argument bits */
2918 jnz 1f /* found bit, return */
2919 addl $4, %ecx /* increment address */
2920 addl $32, %edx /* increment offset */
2921 jmp 0b /* try again */
2923 addl %edx, %eax /* return offset */
2927 * testbit(int nr, volatile void *array)
2929 * Test to see if the bit is set within the bit string
2933 movl S_ARG0,%eax /* Get the bit to test */
2934 movl S_ARG1,%ecx /* get the array string */
2946 movl 4(%ebp), %eax /* fetch pc of caller */
2949 ENTRY(tvals_to_etap)
2951 movl $1000000000, %ecx
2958 * etap_time_sub(etap_time_t stop, etap_time_t start)
2960 * 64bit subtract, returns stop - start
2962 ENTRY(etap_time_sub)
2963 movl S_ARG0, %eax /* stop.low */
2964 movl S_ARG1, %edx /* stop.hi */
2965 subl S_ARG2, %eax /* stop.lo - start.lo */
2966 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2977 * jail: set the EIP to "jail" to block a kernel thread.
2978 * Useful to debug synchronization problems on MPs.
2983 #endif /* NCPUS > 1 */
2986 * delay(microseconds)
2993 movl EXT(delaycount), %ecx
3006 * div_scale(unsigned int dividend,
3007 * unsigned int divisor,
3008 * unsigned int *scale)
3010 * This function returns (dividend << *scale) //divisor where *scale
3011 * is the largest possible value before overflow. This is used in
3012 * computation where precision must be achieved in order to avoid
3013 * floating point usage.
3017 * while (((dividend >> *scale) >= divisor))
3019 * *scale = 32 - *scale;
3020 * return ((dividend << *scale) / divisor);
3024 xorl %ecx, %ecx /* *scale = 0 */
3026 movl ARG0, %edx /* get dividend */
3028 cmpl ARG1, %edx /* if (divisor > dividend) */
3029 jle 1f /* goto 1f */
3030 addl $1, %ecx /* (*scale)++ */
3031 shrdl $1, %edx, %eax /* dividend >> 1 */
3032 shrl $1, %edx /* dividend >> 1 */
3033 jmp 0b /* goto 0b */
3035 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
3036 movl ARG2, %edx /* get scale */
3037 movl $32, (%edx) /* *scale = 32 */
3038 subl %ecx, (%edx) /* *scale -= %ecx */
3044 * mul_scale(unsigned int multiplicand,
3045 * unsigned int multiplier,
3046 * unsigned int *scale)
3048 * This function returns ((multiplicand * multiplier) >> *scale) where
3049 * scale is the largest possible value before overflow. This is used in
3050 * computation where precision must be achieved in order to avoid
3051 * floating point usage.
3055 * while (overflow((multiplicand * multiplier) >> *scale))
3057 * return ((multiplicand * multiplier) >> *scale);
3061 xorl %ecx, %ecx /* *scale = 0 */
3062 movl ARG0, %eax /* get multiplicand */
3063 mull ARG1 /* multiplicand * multiplier */
3065 cmpl $0, %edx /* if (!overflow()) */
3067 addl $1, %ecx /* (*scale)++ */
3068 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
3069 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
3072 movl ARG2, %edx /* get scale */
3073 movl %ecx, (%edx) /* set *scale */
3081 #endif /* NCPUS > 1 */
3085 * BSD System call entry point..
3088 Entry(trap_unix_syscall)
3089 pushf /* save flags as soon as possible */
3090 pushl %eax /* save system call number */
3091 pushl $0 /* clear trap number slot */
3093 pusha /* save the general registers */
3094 pushl %ds /* and the segment registers */
3099 mov %ss,%dx /* switch to kernel data segment */
3106 * Shuffle eflags,eip,cs into proper places
3109 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3110 movl R_CS(%esp),%ecx /* eip is in CS slot */
3111 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3112 movl %ecx,R_EIP(%esp) /* fix eip */
3113 movl %edx,R_CS(%esp) /* fix cs */
3114 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3119 negl %eax /* get system call number */
3120 shll $4,%eax /* manual indexing */
3123 movl CX(EXT(kernel_stack),%edx),%ebx
3124 /* get current kernel stack */
3125 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3126 /* user registers. */
3129 * Register use on entry:
3130 * eax contains syscall number
3131 * ebx contains user regs pointer
3134 pushl %ebx /* Push the regs set onto stack */
3135 call EXT(unix_syscall)
3137 movl %esp,%ecx /* get kernel stack */
3138 or $(KERNEL_STACK_SIZE-1),%ecx
3139 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3140 movl %eax,R_EAX(%esp) /* save return value */
3141 jmp EXT(return_from_trap) /* return to user */
3144 * Entry point for machdep system calls..
3147 Entry(trap_machdep_syscall)
3148 pushf /* save flags as soon as possible */
3149 pushl %eax /* save system call number */
3150 pushl $0 /* clear trap number slot */
3152 pusha /* save the general registers */
3153 pushl %ds /* and the segment registers */
3158 mov %ss,%dx /* switch to kernel data segment */
3165 * Shuffle eflags,eip,cs into proper places
3168 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3169 movl R_CS(%esp),%ecx /* eip is in CS slot */
3170 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3171 movl %ecx,R_EIP(%esp) /* fix eip */
3172 movl %edx,R_CS(%esp) /* fix cs */
3173 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3178 negl %eax /* get system call number */
3179 shll $4,%eax /* manual indexing */
3182 movl CX(EXT(kernel_stack),%edx),%ebx
3183 /* get current kernel stack */
3184 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3185 /* user registers. */
3188 * Register use on entry:
3189 * eax contains syscall number
3190 * ebx contains user regs pointer
3194 call EXT(machdep_syscall)
3196 movl %esp,%ecx /* get kernel stack */
3197 or $(KERNEL_STACK_SIZE-1),%ecx
3198 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3199 movl %eax,R_EAX(%esp) /* save return value */
3200 jmp EXT(return_from_trap) /* return to user */
3202 Entry(trap_mach25_syscall)
3203 pushf /* save flags as soon as possible */
3204 pushl %eax /* save system call number */
3205 pushl $0 /* clear trap number slot */
3207 pusha /* save the general registers */
3208 pushl %ds /* and the segment registers */
3213 mov %ss,%dx /* switch to kernel data segment */
3220 * Shuffle eflags,eip,cs into proper places
3223 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3224 movl R_CS(%esp),%ecx /* eip is in CS slot */
3225 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3226 movl %ecx,R_EIP(%esp) /* fix eip */
3227 movl %edx,R_CS(%esp) /* fix cs */
3228 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3233 negl %eax /* get system call number */
3234 shll $4,%eax /* manual indexing */
3237 movl CX(EXT(kernel_stack),%edx),%ebx
3238 /* get current kernel stack */
3239 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3240 /* user registers. */
3243 * Register use on entry:
3244 * eax contains syscall number
3245 * ebx contains user regs pointer
3249 call EXT(mach25_syscall)
3251 movl %esp,%ecx /* get kernel stack */
3252 or $(KERNEL_STACK_SIZE-1),%ecx
3253 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3254 movl %eax,R_EAX(%esp) /* save return value */
3255 jmp EXT(return_from_trap) /* return to user */