]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51#include <cpus.h>
52#include <etap.h>
53#include <etap_event_monitor.h>
54#include <mach_rt.h>
55#include <platforms.h>
56#include <mach_kdb.h>
57#include <mach_kgdb.h>
58#include <mach_kdp.h>
59#include <stat_time.h>
60#include <mach_assert.h>
61
62#include <sys/errno.h>
63#include <i386/asm.h>
64#include <i386/cpuid.h>
65#include <i386/eflags.h>
66#include <i386/proc_reg.h>
67#include <i386/trap.h>
68#include <assym.s>
69#include <mach/exception_types.h>
70
71#include <i386/AT386/mp/mp.h>
72
73#define PREEMPT_DEBUG_LOG 0
74
75#if __MACHO__
76/* Under Mach-O, etext is a variable which contains
77 * the last text address
78 */
79#define ETEXT_ADDR (EXT(etext))
80#else
81/* Under ELF and other non-Mach-O formats, the address of
82 * etext represents the last text address
83 */
84#define ETEXT_ADDR $EXT(etext)
85#endif
86
87#if NCPUS > 1
88
89#define CX(addr,reg) addr(,reg,4)
90
91#else
92#define CPU_NUMBER(reg)
93#define CX(addr,reg) addr
94
95#endif /* NCPUS > 1 */
96
97 .text
98locore_start:
99
100/*
101 * Fault recovery.
102 */
103
104#ifdef __MACHO__
105#define RECOVERY_SECTION .section __VECTORS, __recover
106#define RETRY_SECTION .section __VECTORS, __retries
107#else
108#define RECOVERY_SECTION .text
109#define RECOVERY_SECTION .text
110#endif
111
112#define RECOVER_TABLE_START \
113 .align 2 ; \
114 .globl EXT(recover_table) ;\
115LEXT(recover_table) ;\
116 .text
117
118#define RECOVER(addr) \
119 .align 2; \
120 .long 9f ;\
121 .long addr ;\
122 .text ;\
1239:
124
125#define RECOVER_TABLE_END \
126 .align 2 ;\
127 .globl EXT(recover_table_end) ;\
128LEXT(recover_table_end) ;\
129 .text
130
131/*
132 * Retry table for certain successful faults.
133 */
134#define RETRY_TABLE_START \
135 .align 3; \
136 .globl EXT(retry_table) ;\
137LEXT(retry_table) ;\
138 .text
139
140#define RETRY(addr) \
141 .align 3 ;\
142 .long 9f ;\
143 .long addr ;\
144 .text ;\
1459:
146
147#define RETRY_TABLE_END \
148 .align 3; \
149 .globl EXT(retry_table_end) ;\
150LEXT(retry_table_end) ;\
151 .text
152
153/*
154 * Allocate recovery and retry tables.
155 */
156 RECOVERY_SECTION
157 RECOVER_TABLE_START
158 RETRY_SECTION
159 RETRY_TABLE_START
160
161/*
162 * Timing routines.
163 */
164#if STAT_TIME
165
166#define TIME_TRAP_UENTRY
167#define TIME_TRAP_UEXIT
168#define TIME_INT_ENTRY
169#define TIME_INT_EXIT
170
171#else /* microsecond timing */
172
173/*
174 * Microsecond timing.
175 * Assumes a free-running microsecond counter.
176 * no TIMER_MAX check needed.
177 */
178
179/*
180 * There is only one current time-stamp per CPU, since only
181 * the time-stamp in the current timer is used.
182 * To save time, we allocate the current time-stamps here.
183 */
184 .comm EXT(current_tstamp), 4*NCPUS
185
186/*
187 * Update time on user trap entry.
188 * 11 instructions (including cli on entry)
189 * Assumes CPU number in %edx.
190 * Uses %ebx, %ecx.
191 */
192#define TIME_TRAP_UENTRY \
193 cli /* block interrupts */ ;\
194 movl VA_ETC,%ebx /* get timer value */ ;\
195 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
196 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
197 subl %ecx,%ebx /* elapsed = new-old */ ;\
198 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
199 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
200 jns 0f /* if overflow, */ ;\
201 call timer_normalize /* normalize timer */ ;\
2020: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
203 /* switch to sys timer */;\
204 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
205 sti /* allow interrupts */
206
207/*
208 * update time on user trap exit.
209 * 10 instructions.
210 * Assumes CPU number in %edx.
211 * Uses %ebx, %ecx.
212 */
213#define TIME_TRAP_UEXIT \
214 cli /* block interrupts */ ;\
215 movl VA_ETC,%ebx /* get timer */ ;\
216 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
217 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
218 subl %ecx,%ebx /* elapsed = new-old */ ;\
219 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
220 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
221 jns 0f /* if overflow, */ ;\
222 call timer_normalize /* normalize timer */ ;\
2230: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\
224 /* switch to user timer */;\
225 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
226
227/*
228 * update time on interrupt entry.
229 * 9 instructions.
230 * Assumes CPU number in %edx.
231 * Leaves old timer in %ebx.
232 * Uses %ecx.
233 */
234#define TIME_INT_ENTRY \
235 movl VA_ETC,%ecx /* get timer */ ;\
236 movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
237 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
238 subl %ebx,%ecx /* elapsed = new-old */ ;\
239 movl CX(EXT(current_timer),%edx),%ebx /* get current timer */;\
240 addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
241 leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
242 lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
243 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
244
245/*
246 * update time on interrupt exit.
247 * 11 instructions
248 * Assumes CPU number in %edx, old timer in %ebx.
249 * Uses %eax, %ecx.
250 */
251#define TIME_INT_EXIT \
252 movl VA_ETC,%eax /* get timer */ ;\
253 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
254 movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
255 subl %ecx,%eax /* elapsed = new-old */ ;\
256 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
257 addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
258 jns 0f /* if overflow, */ ;\
259 call timer_normalize /* normalize timer */ ;\
2600: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
261 jz 0f /* if overflow, */ ;\
262 movl %ebx,%ecx /* get old timer */ ;\
263 call timer_normalize /* normalize timer */ ;\
2640: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
265
266
267/*
268 * Normalize timer in ecx.
269 * Preserves edx; clobbers eax.
270 */
271 .align ALIGN
272timer_high_unit:
273 .long TIMER_HIGH_UNIT /* div has no immediate opnd */
274
275timer_normalize:
276 pushl %edx /* save registersz */
277 pushl %eax
278 xorl %edx,%edx /* clear divisor high */
279 movl LOW_BITS(%ecx),%eax /* get divisor low */
280 divl timer_high_unit,%eax /* quotient in eax */
281 /* remainder in edx */
282 addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
283 movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
284 addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
285 popl %eax /* restore register */
286 popl %edx
287 ret
288
289/*
290 * Switch to a new timer.
291 */
292Entry(timer_switch)
293 CPU_NUMBER(%edx) /* get this CPU */
294 movl VA_ETC,%ecx /* get timer */
295 movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
296 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
297 subl %ecx,%eax /* elapsed = new - old */
298 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
299 addl %eax,LOW_BITS(%ecx) /* add to low bits */
300 jns 0f /* if overflow, */
301 call timer_normalize /* normalize timer */
3020:
303 movl S_ARG0,%ecx /* get new timer */
304 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
305 ret
306
307/*
308 * Initialize the first timer for a CPU.
309 */
310Entry(start_timer)
311 CPU_NUMBER(%edx) /* get this CPU */
312 movl VA_ETC,%ecx /* get timer */
313 movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
314 movl S_ARG0,%ecx /* get timer */
315 movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
316 ret
317
318#endif /* accurate timing */
319
320/*
321 * Encapsulate the transfer of exception stack frames between a PCB
322 * and a thread stack. Since the whole point of these is to emulate
323 * a call or exception that changes privilege level, both macros
324 * assume that there is no user esp or ss stored in the source
325 * frame (because there was no change of privilege to generate them).
326 */
327
328/*
329 * Transfer a stack frame from a thread's user stack to its PCB.
330 * We assume the thread and stack addresses have been loaded into
331 * registers (our arguments).
332 *
333 * The macro overwrites edi, esi, ecx and whatever registers hold the
334 * thread and stack addresses (which can't be one of the above three).
335 * The thread address is overwritten with the address of its saved state
336 * (where the frame winds up).
337 *
338 * Must be called on kernel stack.
339 */
340#define FRAME_STACK_TO_PCB(thread, stkp) ;\
341 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
342 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
343 movl %edi,thread /* save for later */ ;\
344 movl stkp,%esi /* point to start of frame */ ;\
345 movl $R_UESP,%ecx ;\
346 sarl $2,%ecx /* word count for transfer */ ;\
347 cld /* we`re incrementing */ ;\
348 rep ;\
349 movsl /* transfer the frame */ ;\
350 addl $R_UESP,stkp /* derive true "user" esp */ ;\
351 movl stkp,R_UESP(thread) /* store in PCB */ ;\
352 movl $0,%ecx ;\
353 mov %ss,%cx /* get current ss */ ;\
354 movl %ecx,R_SS(thread) /* store in PCB */
355
356/*
357 * Transfer a stack frame from a thread's PCB to the stack pointed
358 * to by the PCB. We assume the thread address has been loaded into
359 * a register (our argument).
360 *
361 * The macro overwrites edi, esi, ecx and whatever register holds the
362 * thread address (which can't be one of the above three). The
363 * thread address is overwritten with the address of its saved state
364 * (where the frame winds up).
365 *
366 * Must be called on kernel stack.
367 */
368#define FRAME_PCB_TO_STACK(thread) ;\
369 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
370 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
371 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
372 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
373 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
374 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
375 jz 1f /* use kernel data segment */ ;\
376 movl $USER_DS,%cx /* else use user data segment */;\
377 mov %cx,%es ;\
3781: ;\
379 movl $R_UESP,%ecx ;\
380 subl %ecx,%edi /* derive start of frame */ ;\
381 movl %edi,thread /* save for later */ ;\
382 sarl $2,%ecx /* word count for transfer */ ;\
383 cld /* we`re incrementing */ ;\
384 rep ;\
385 movsl /* transfer the frame */ ;\
386 mov %ss,%cx /* restore kernel segments */ ;\
387 mov %cx,%es
388
389#undef PDEBUG
390
391#ifdef PDEBUG
392
393/*
394 * Traditional, not ANSI.
395 */
396#define CAH(label) \
397 .data ;\
398 .globl label/**/count ;\
399label/**/count: ;\
400 .long 0 ;\
401 .globl label/**/limit ;\
402label/**/limit: ;\
403 .long 0 ;\
404 .text ;\
405 addl $1,%ss:label/**/count ;\
406 cmpl $0,label/**/limit ;\
407 jz label/**/exit ;\
408 pushl %eax ;\
409label/**/loop: ;\
410 movl %ss:label/**/count,%eax ;\
411 cmpl %eax,%ss:label/**/limit ;\
412 je label/**/loop ;\
413 popl %eax ;\
414label/**/exit:
415
416#else /* PDEBUG */
417
418#define CAH(label)
419
420#endif /* PDEBUG */
421
422#if MACH_KDB
423/*
424 * Last-ditch debug code to handle faults that might result
425 * from entering kernel (from collocated server) on an invalid
426 * stack. On collocated entry, there's no hardware-initiated
427 * stack switch, so a valid stack must be in place when an
428 * exception occurs, or we may double-fault.
429 *
430 * In case of a double-fault, our only recourse is to switch
431 * hardware "tasks", so that we avoid using the current stack.
432 *
433 * The idea here is just to get the processor into the debugger,
434 * post-haste. No attempt is made to fix up whatever error got
435 * us here, so presumably continuing from the debugger will
436 * simply land us here again -- at best.
437 */
438#if 0
439/*
440 * Note that the per-fault entry points are not currently
441 * functional. The only way to make them work would be to
442 * set up separate TSS's for each fault type, which doesn't
443 * currently seem worthwhile. (The offset part of a task
444 * gate is always ignored.) So all faults that task switch
445 * currently resume at db_task_start.
446 */
447/*
448 * Double fault (Murphy's point) - error code (0) on stack
449 */
450Entry(db_task_dbl_fault)
451 popl %eax
452 movl $(T_DOUBLE_FAULT),%ebx
453 jmp db_task_start
454/*
455 * Segment not present - error code on stack
456 */
457Entry(db_task_seg_np)
458 popl %eax
459 movl $(T_SEGMENT_NOT_PRESENT),%ebx
460 jmp db_task_start
461/*
462 * Stack fault - error code on (current) stack
463 */
464Entry(db_task_stk_fault)
465 popl %eax
466 movl $(T_STACK_FAULT),%ebx
467 jmp db_task_start
468/*
469 * General protection fault - error code on stack
470 */
471Entry(db_task_gen_prot)
472 popl %eax
473 movl $(T_GENERAL_PROTECTION),%ebx
474 jmp db_task_start
475#endif /* 0 */
476/*
477 * The entry point where execution resumes after last-ditch debugger task
478 * switch.
479 */
480Entry(db_task_start)
481 movl %esp,%edx
482 subl $ISS_SIZE,%edx
483 movl %edx,%esp /* allocate i386_saved_state on stack */
484 movl %eax,R_ERR(%esp)
485 movl %ebx,R_TRAPNO(%esp)
486 pushl %edx
487#if NCPUS > 1
488 CPU_NUMBER(%edx)
489 movl CX(EXT(mp_dbtss),%edx),%edx
490 movl TSS_LINK(%edx),%eax
491#else
492 movl EXT(dbtss)+TSS_LINK,%eax
493#endif
494 pushl %eax /* pass along selector of previous TSS */
495 call EXT(db_tss_to_frame)
496 popl %eax /* get rid of TSS selector */
497 call EXT(db_trap_from_asm)
498 addl $0x4,%esp
499 /*
500 * And now...?
501 */
502 iret /* ha, ha, ha... */
503#endif /* MACH_KDB */
504
505/*
506 * Trap/interrupt entry points.
507 *
508 * All traps must create the following save area on the PCB "stack":
509 *
510 * gs
511 * fs
512 * es
513 * ds
514 * edi
515 * esi
516 * ebp
517 * cr2 if page fault - otherwise unused
518 * ebx
519 * edx
520 * ecx
521 * eax
522 * trap number
523 * error code
524 * eip
525 * cs
526 * eflags
527 * user esp - if from user
528 * user ss - if from user
529 * es - if from V86 thread
530 * ds - if from V86 thread
531 * fs - if from V86 thread
532 * gs - if from V86 thread
533 *
534 */
535
536/*
537 * General protection or segment-not-present fault.
538 * Check for a GP/NP fault in the kernel_return
539 * sequence; if there, report it as a GP/NP fault on the user's instruction.
540 *
541 * esp-> 0: trap code (NP or GP)
542 * 4: segment number in error
543 * 8 eip
544 * 12 cs
545 * 16 eflags
546 * 20 old registers (trap is from kernel)
547 */
548Entry(t_gen_prot)
549 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
550 jmp trap_check_kernel_exit /* check for kernel exit sequence */
551
552Entry(t_segnp)
553 pushl $(T_SEGMENT_NOT_PRESENT)
554 /* indicate fault type */
555
556trap_check_kernel_exit:
557 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
558 jnz EXT(alltraps) /* isn`t kernel trap if so */
559 testl $3,12(%esp) /* is trap from kernel mode? */
560 jne EXT(alltraps) /* if so: */
561 /* check for the kernel exit sequence */
562 cmpl $EXT(kret_iret),8(%esp) /* on IRET? */
563 je fault_iret
564 cmpl $EXT(kret_popl_ds),8(%esp) /* popping DS? */
565 je fault_popl_ds
566 cmpl $EXT(kret_popl_es),8(%esp) /* popping ES? */
567 je fault_popl_es
568 cmpl $EXT(kret_popl_fs),8(%esp) /* popping FS? */
569 je fault_popl_fs
570 cmpl $EXT(kret_popl_gs),8(%esp) /* popping GS? */
571 je fault_popl_gs
572take_fault: /* if none of the above: */
573 jmp EXT(alltraps) /* treat as normal trap. */
574
575/*
576 * GP/NP fault on IRET: CS or SS is in error.
577 * All registers contain the user's values.
578 *
579 * on SP is
580 * 0 trap number
581 * 4 errcode
582 * 8 eip
583 * 12 cs --> trapno
584 * 16 efl --> errcode
585 * 20 user eip
586 * 24 user cs
587 * 28 user eflags
588 * 32 user esp
589 * 36 user ss
590 */
591fault_iret:
592 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
593 popl %eax /* get trap number */
594 movl %eax,12-4(%esp) /* put in user trap number */
595 popl %eax /* get error code */
596 movl %eax,16-8(%esp) /* put in user errcode */
597 popl %eax /* restore eax */
598 CAH(fltir)
599 jmp EXT(alltraps) /* take fault */
600
601/*
602 * Fault restoring a segment register. The user's registers are still
603 * saved on the stack. The offending segment register has not been
604 * popped.
605 */
606fault_popl_ds:
607 popl %eax /* get trap number */
608 popl %edx /* get error code */
609 addl $12,%esp /* pop stack to user regs */
610 jmp push_es /* (DS on top of stack) */
611fault_popl_es:
612 popl %eax /* get trap number */
613 popl %edx /* get error code */
614 addl $12,%esp /* pop stack to user regs */
615 jmp push_fs /* (ES on top of stack) */
616fault_popl_fs:
617 popl %eax /* get trap number */
618 popl %edx /* get error code */
619 addl $12,%esp /* pop stack to user regs */
620 jmp push_gs /* (FS on top of stack) */
621fault_popl_gs:
622 popl %eax /* get trap number */
623 popl %edx /* get error code */
624 addl $12,%esp /* pop stack to user regs */
625 jmp push_segregs /* (GS on top of stack) */
626
627push_es:
628 pushl %es /* restore es, */
629push_fs:
630 pushl %fs /* restore fs, */
631push_gs:
632 pushl %gs /* restore gs. */
633push_segregs:
634 movl %eax,R_TRAPNO(%esp) /* set trap number */
635 movl %edx,R_ERR(%esp) /* set error code */
636 CAH(fltpp)
637 jmp trap_set_segs /* take trap */
638
639/*
640 * Debug trap. Check for single-stepping across system call into
641 * kernel. If this is the case, taking the debug trap has turned
642 * off single-stepping - save the flags register with the trace
643 * bit set.
644 */
645Entry(t_debug)
646 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
647 jnz 0f /* isn`t kernel trap if so */
648 testl $3,4(%esp) /* is trap from kernel mode? */
649 jnz 0f /* if so: */
650 cmpl $syscall_entry,(%esp) /* system call entry? */
651 jne 0f /* if so: */
652 /* flags are sitting where syscall */
653 /* wants them */
654 addl $8,%esp /* remove eip/cs */
655 jmp syscall_entry_2 /* continue system call entry */
656
6570: pushl $0 /* otherwise: */
658 pushl $(T_DEBUG) /* handle as normal */
659 jmp EXT(alltraps) /* debug fault */
660
661/*
662 * Page fault traps save cr2.
663 */
664Entry(t_page_fault)
665 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
666 pusha /* save the general registers */
667 movl %cr2,%eax /* get the faulting address */
668 movl %eax,12(%esp) /* save in esp save slot */
669 jmp trap_push_segs /* continue fault */
670
671/*
672 * All 'exceptions' enter here with:
673 * esp-> trap number
674 * error code
675 * old eip
676 * old cs
677 * old eflags
678 * old esp if trapped from user
679 * old ss if trapped from user
680 *
681 * NB: below use of CPU_NUMBER assumes that macro will use correct
682 * segment register for any kernel data accesses.
683 */
684Entry(alltraps)
685 pusha /* save the general registers */
686trap_push_segs:
687 pushl %ds /* save the segment registers */
688 pushl %es
689 pushl %fs
690 pushl %gs
691
692trap_set_segs:
693 movl %ss,%ax
694 movl %ax,%ds
695 movl %ax,%es /* switch to kernel data seg */
696 cld /* clear direction flag */
697 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
698 jnz trap_from_user /* user mode trap if so */
699 testb $3,R_CS(%esp) /* user mode trap? */
700 jnz trap_from_user
701 CPU_NUMBER(%edx)
702 cmpl $0,CX(EXT(active_kloaded),%edx)
703 je trap_from_kernel /* if clear, truly in kernel */
704#ifdef FIXME
705 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
706 jb trap_from_kernel
707#endif
708trap_from_kloaded:
709 /*
710 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
711 * so transfer the stack frame into the PCB explicitly, then
712 * start running on resulting "PCB stack". We have to set
713 * up a simulated "uesp" manually, since there's none in the
714 * frame.
715 */
716 mov $CPU_DATA,%dx
717 mov %dx,%gs
718 CAH(atstart)
719 CPU_NUMBER(%edx)
720 movl CX(EXT(active_kloaded),%edx),%ebx
721 movl CX(EXT(kernel_stack),%edx),%eax
722 xchgl %esp,%eax
723 FRAME_STACK_TO_PCB(%ebx,%eax)
724 CAH(atend)
725 jmp EXT(take_trap)
726
727trap_from_user:
728 mov $CPU_DATA,%ax
729 mov %ax,%gs
730
731 CPU_NUMBER(%edx)
732 TIME_TRAP_UENTRY
733
734 movl CX(EXT(kernel_stack),%edx),%ebx
735 xchgl %ebx,%esp /* switch to kernel stack */
736 /* user regs pointer already set */
737LEXT(take_trap)
738 pushl %ebx /* record register save area */
739 pushl %ebx /* pass register save area to trap */
740 call EXT(user_trap) /* call user trap routine */
741 movl 4(%esp),%esp /* switch back to PCB stack */
742
743/*
744 * Return from trap or system call, checking for ASTs.
745 * On PCB stack.
746 */
747
748LEXT(return_from_trap)
749 CPU_NUMBER(%edx)
750 cmpl $0,CX(EXT(need_ast),%edx)
751 je EXT(return_to_user) /* if we need an AST: */
752
753 movl CX(EXT(kernel_stack),%edx),%esp
754 /* switch to kernel stack */
755 pushl $0 /* push preemption flag */
756 call EXT(i386_astintr) /* take the AST */
757 addl $4,%esp /* pop preemption flag */
758 popl %esp /* switch back to PCB stack (w/exc link) */
759 jmp EXT(return_from_trap) /* and check again (rare) */
760 /* ASTs after this point will */
761 /* have to wait */
762
763/*
764 * Arrange the checks needed for kernel-loaded (or kernel-loading)
765 * threads so that branch is taken in kernel-loaded case.
766 */
767LEXT(return_to_user)
768 TIME_TRAP_UEXIT
769 CPU_NUMBER(%eax)
770 cmpl $0,CX(EXT(active_kloaded),%eax)
771 jnz EXT(return_xfer_stack)
772 movl $CPD_ACTIVE_THREAD,%ebx
773 movl %gs:(%ebx),%ebx /* get active thread */
774 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
775 cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */
776 jnz EXT(return_kernel_loading)
777
778#if MACH_RT
779#if MACH_ASSERT
780 movl $CPD_PREEMPTION_LEVEL,%ebx
781 cmpl $0,%gs:(%ebx)
782 je EXT(return_from_kernel)
783 int $3
784#endif /* MACH_ASSERT */
785#endif /* MACH_RT */
786
787/*
788 * Return from kernel mode to interrupted thread.
789 */
790
791LEXT(return_from_kernel)
792LEXT(kret_popl_gs)
793 popl %gs /* restore segment registers */
794LEXT(kret_popl_fs)
795 popl %fs
796LEXT(kret_popl_es)
797 popl %es
798LEXT(kret_popl_ds)
799 popl %ds
800 popa /* restore general registers */
801 addl $8,%esp /* discard trap number and error code */
802
803LEXT(kret_iret)
804 iret /* return from interrupt */
805
806
807LEXT(return_xfer_stack)
808 /*
809 * If we're on PCB stack in a kernel-loaded task, we have
810 * to transfer saved state back to thread stack and swap
811 * stack pointers here, because the hardware's not going
812 * to do so for us.
813 */
814 CAH(rxsstart)
815 CPU_NUMBER(%eax)
816 movl CX(EXT(kernel_stack),%eax),%esp
817 movl CX(EXT(active_kloaded),%eax),%eax
818 FRAME_PCB_TO_STACK(%eax)
819 movl %eax,%esp
820 CAH(rxsend)
821 jmp EXT(return_from_kernel)
822
823/*
824 * Hate to put this here, but setting up a separate swap_func for
825 * kernel-loaded threads no longer works, since thread executes
826 * "for a while" (i.e., until it reaches glue code) when first
827 * created, even if it's nominally suspended. Hence we can't
828 * transfer the PCB when the thread first resumes, because we
829 * haven't initialized it yet.
830 */
831/*
832 * Have to force transfer to new stack "manually". Use a string
833 * move to transfer all of our saved state to the stack pointed
834 * to by iss.uesp, then install a pointer to it as our current
835 * stack pointer.
836 */
837LEXT(return_kernel_loading)
838 CPU_NUMBER(%eax)
839 movl CX(EXT(kernel_stack),%eax),%esp
840 movl $CPD_ACTIVE_THREAD,%ebx
841 movl %gs:(%ebx),%ebx /* get active thread */
842 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
843 movl %ebx,%edx /* save for later */
844 movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */
845 FRAME_PCB_TO_STACK(%ebx)
846 movl %ebx,%esp /* start running on new stack */
847 movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */
848 movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */
849 jmp EXT(return_from_kernel)
850
851/*
852 * Trap from kernel mode. No need to switch stacks or load segment registers.
853 */
854trap_from_kernel:
855#if MACH_KDB || MACH_KGDB
856 mov $CPU_DATA,%ax
857 mov %ax,%gs
858 movl %esp,%ebx /* save current stack */
859
860 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
861 jb 6f /* OK if so */
862
863#if MACH_KGDB
864 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
865 je 0f /* no */
866
867 pushl %esp /* Already on kgdb stack */
868 cli
869 call EXT(kgdb_trap)
870 addl $4,%esp
871 jmp EXT(return_from_kernel)
8720: /* should kgdb handle this exception? */
873 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
874 je 2f /* yes */
875 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
876 je 2f /* yes */
8771:
878 cli /* disable interrupts */
879 CPU_NUMBER(%edx) /* get CPU number */
880 movl CX(EXT(kgdb_stacks),%edx),%ebx
881 xchgl %ebx,%esp /* switch to kgdb stack */
882 pushl %ebx /* pass old sp as an arg */
883 call EXT(kgdb_from_kernel)
884 popl %esp /* switch back to kernel stack */
885 jmp EXT(return_from_kernel)
8862:
887#endif /* MACH_KGDB */
888
889#if MACH_KDB
890 cmpl $0,EXT(db_active) /* could trap be from ddb? */
891 je 3f /* no */
892#if NCPUS > 1
893 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
894 cmpl $0,CX(EXT(kdb_active),%edx)
895 je 3f /* no */
896#endif /* NCPUS > 1 */
897 pushl %esp
898 call EXT(db_trap_from_asm)
899 addl $0x4,%esp
900 jmp EXT(return_from_kernel)
901
9023:
903 /*
904 * Dilemma: don't want to switch to kernel_stack if trap
905 * "belongs" to ddb; don't want to switch to db_stack if
906 * trap "belongs" to kernel. So have to duplicate here the
907 * set of trap types that kernel_trap() handles. Note that
908 * "unexpected" page faults will not be handled by kernel_trap().
909 * In this panic-worthy case, we fall into the debugger with
910 * kernel_stack containing the call chain that led to the
911 * bogus fault.
912 */
913 movl R_TRAPNO(%esp),%edx
914 cmpl $(T_PAGE_FAULT),%edx
915 je 4f
916 cmpl $(T_NO_FPU),%edx
917 je 4f
918 cmpl $(T_FPU_FAULT),%edx
919 je 4f
920 cmpl $(T_FLOATING_POINT_ERROR),%edx
921 je 4f
922 cmpl $(T_PREEMPT),%edx
923 jne 7f
9244:
925#endif /* MACH_KDB */
926
927 CPU_NUMBER(%edx) /* get CPU number */
928 cmpl CX(EXT(kernel_stack),%edx),%esp
929 /* if not already on kernel stack, */
930 ja 5f /* check some more */
931 cmpl CX(EXT(active_stacks),%edx),%esp
932 ja 6f /* on kernel stack: no switch */
9335:
934 movl CX(EXT(kernel_stack),%edx),%esp
9356:
936 pushl %ebx /* save old stack */
937 pushl %ebx /* pass as parameter */
938 call EXT(kernel_trap) /* to kernel trap routine */
939 addl $4,%esp /* pop parameter */
940 testl %eax,%eax
941 jne 8f
942 /*
943 * If kernel_trap returns false, trap wasn't handled.
944 */
9457:
946#if MACH_KDB
947 CPU_NUMBER(%edx)
948 movl CX(EXT(db_stacks),%edx),%esp
949 pushl %ebx /* pass old stack as parameter */
950 call EXT(db_trap_from_asm)
951#endif /* MACH_KDB */
952#if MACH_KGDB
953 cli /* disable interrupts */
954 CPU_NUMBER(%edx) /* get CPU number */
955 movl CX(EXT(kgdb_stacks),%edx),%esp
956 pushl %ebx /* pass old stack as parameter */
957 call EXT(kgdb_from_kernel)
958#endif /* MACH_KGDB */
959 addl $4,%esp /* pop parameter */
960 testl %eax,%eax
961 jne 8f
962 /*
963 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
964 * wasn't handled.
965 */
966 pushl %ebx /* pass old stack as parameter */
967 call EXT(panic_trap)
968 addl $4,%esp /* pop parameter */
9698:
970 movl %ebx,%esp /* get old stack (from callee-saves reg) */
971#else /* MACH_KDB || MACH_KGDB */
972 pushl %esp /* pass parameter */
973 call EXT(kernel_trap) /* to kernel trap routine */
974 addl $4,%esp /* pop parameter */
975#endif /* MACH_KDB || MACH_KGDB */
976
977#if MACH_RT
978 CPU_NUMBER(%edx)
979
980 movl CX(EXT(need_ast),%edx),%eax /* get pending asts */
981 testl $AST_URGENT,%eax /* any urgent preemption? */
982 je EXT(return_from_kernel) /* no, nothing to do */
983 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
984 je EXT(return_from_kernel) /* no, skip it */
985 cmpl $T_PREEMPT,48(%esp) /* preempt request? */
986 jne EXT(return_from_kernel) /* no, nothing to do */
987 movl CX(EXT(kernel_stack),%edx),%eax
988 movl %esp,%ecx
989 xorl %eax,%ecx
990 andl $(-KERNEL_STACK_SIZE),%ecx
991 testl %ecx,%ecx /* are we on the kernel stack? */
992 jne EXT(return_from_kernel) /* no, skip it */
993
994#if PREEMPT_DEBUG_LOG
995 pushl 28(%esp) /* stack pointer */
996 pushl 24+4(%esp) /* frame pointer */
997 pushl 56+8(%esp) /* stack pointer */
998 pushl $0f
999 call EXT(log_thread_action)
1000 addl $16, %esp
1001 .data
10020: String "trap preempt eip"
1003 .text
1004#endif /* PREEMPT_DEBUG_LOG */
1005
1006 pushl $1 /* push preemption flag */
1007 call EXT(i386_astintr) /* take the AST */
1008 addl $4,%esp /* pop preemption flag */
1009#endif /* MACH_RT */
1010
1011 jmp EXT(return_from_kernel)
1012
1013/*
1014 * Called as a function, makes the current thread
1015 * return from the kernel as if from an exception.
1016 */
1017
1018 .globl EXT(thread_exception_return)
1019 .globl EXT(thread_bootstrap_return)
1020LEXT(thread_exception_return)
1021LEXT(thread_bootstrap_return)
1022 movl %esp,%ecx /* get kernel stack */
1023 or $(KERNEL_STACK_SIZE-1),%ecx
1024 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1025 jmp EXT(return_from_trap)
1026
1027Entry(call_continuation)
1028 movl S_ARG0,%eax /* get continuation */
1029 movl %esp,%ecx /* get kernel stack */
1030 or $(KERNEL_STACK_SIZE-1),%ecx
1031 addl $(-3-IKS_SIZE),%ecx
1032 movl %ecx,%esp /* pop the stack */
1033 xorl %ebp,%ebp /* zero frame pointer */
1034 jmp *%eax /* goto continuation */
1035
1036#if 0
1037#define LOG_INTERRUPT(info,msg) \
1038 pushal ; \
1039 pushl msg ; \
1040 pushl info ; \
1041 call EXT(log_thread_action) ; \
1042 add $8,%esp ; \
1043 popal
1044#define CHECK_INTERRUPT_TIME(n) \
1045 pushal ; \
1046 pushl $n ; \
1047 call EXT(check_thread_time) ; \
1048 add $4,%esp ; \
1049 popal
1050#else
1051#define LOG_INTERRUPT(info,msg)
1052#define CHECK_INTERRUPT_TIME(n)
1053#endif
1054
1055imsg_start:
1056 String "interrupt start"
1057imsg_end:
1058 String "interrupt end"
1059
1060/*
1061 * All interrupts enter here.
1062 * old %eax on stack; interrupt number in %eax.
1063 */
1064Entry(all_intrs)
1065 pushl %ecx /* save registers */
1066 pushl %edx
1067 cld /* clear direction flag */
1068
1069 cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
1070 jb int_from_intstack /* if not: */
1071
1072 pushl %ds /* save segment registers */
1073 pushl %es
1074 mov %ss,%dx /* switch to kernel segments */
1075 mov %dx,%ds
1076 mov %dx,%es
1077 mov $CPU_DATA,%dx
1078 mov %dx,%gs
1079
1080 CPU_NUMBER(%edx)
1081
1082 movl CX(EXT(int_stack_top),%edx),%ecx
1083 xchgl %ecx,%esp /* switch to interrupt stack */
1084
1085#if STAT_TIME
1086 pushl %ecx /* save pointer to old stack */
1087#else
1088 pushl %ebx /* save %ebx - out of the way */
1089 /* so stack looks the same */
1090 pushl %ecx /* save pointer to old stack */
1091 TIME_INT_ENTRY /* do timing */
1092#endif
1093
1094#if MACH_RT
1095 movl $CPD_PREEMPTION_LEVEL,%edx
1096 incl %gs:(%edx)
1097#endif /* MACH_RT */
1098
1099 movl $CPD_INTERRUPT_LEVEL,%edx
1100 incl %gs:(%edx)
1101
1102 pushl %eax /* Push trap number */
1103 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
1104 addl $4,%esp /* Pop trap number */
1105
1106 .globl EXT(return_to_iret)
1107LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1108
1109 movl $CPD_INTERRUPT_LEVEL,%edx
1110 decl %gs:(%edx)
1111
1112#if MACH_RT
1113 movl $CPD_PREEMPTION_LEVEL,%edx
1114 decl %gs:(%edx)
1115#endif /* MACH_RT */
1116
1117#if STAT_TIME
1118#else
1119 TIME_INT_EXIT /* do timing */
1120 movl 4(%esp),%ebx /* restore the extra reg we saved */
1121#endif
1122
1123 popl %esp /* switch back to old stack */
1124
1125 CPU_NUMBER(%edx)
1126 movl CX(EXT(need_ast),%edx),%eax
1127 testl %eax,%eax /* any pending asts? */
1128 je 1f /* no, nothing to do */
1129 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1130 jnz ast_from_interrupt /* take it */
1131 testb $3,I_CS(%esp) /* user mode, */
1132 jnz ast_from_interrupt /* take it */
1133#ifdef FIXME
1134 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1135 jnb ast_from_interrupt /* take it */
1136#endif
1137
1138#if MACH_RT
1139 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
1140 je 1f /* no, skip it */
1141 movl $CPD_PREEMPTION_LEVEL,%ecx
1142 cmpl $0,%gs:(%ecx) /* preemption masked? */
1143 jne 1f /* yes, skip it */
1144 testl $AST_URGENT,%eax /* any urgent requests? */
1145 je 1f /* no, skip it */
1146 cmpl $LEXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1147 jb 1f /* yes, skip it */
1148 movl CX(EXT(kernel_stack),%edx),%eax
1149 movl %esp,%ecx
1150 xorl %eax,%ecx
1151 andl $(-KERNEL_STACK_SIZE),%ecx
1152 testl %ecx,%ecx /* are we on the kernel stack? */
1153 jne 1f /* no, skip it */
1154
1155/*
1156 * Take an AST from kernel space. We don't need (and don't want)
1157 * to do as much as the case where the interrupt came from user
1158 * space.
1159 */
1160#if PREEMPT_DEBUG_LOG
1161 pushl $0
1162 pushl $0
1163 pushl I_EIP+8(%esp)
1164 pushl $0f
1165 call EXT(log_thread_action)
1166 addl $16, %esp
1167 .data
11680: String "intr preempt eip"
1169 .text
1170#endif /* PREEMPT_DEBUG_LOG */
1171
1172 sti
1173 pushl $1 /* push preemption flag */
1174 call EXT(i386_astintr) /* take the AST */
1175 addl $4,%esp /* pop preemption flag */
1176#endif /* MACH_RT */
1177
11781:
1179 pop %es /* restore segment regs */
1180 pop %ds
1181 pop %edx
1182 pop %ecx
1183 pop %eax
1184 iret /* return to caller */
1185
1186int_from_intstack:
1187#if MACH_RT
1188 movl $CPD_PREEMPTION_LEVEL,%edx
1189 incl %gs:(%edx)
1190#endif /* MACH_RT */
1191
1192 movl $CPD_INTERRUPT_LEVEL,%edx
1193 incl %gs:(%edx)
1194
1195 pushl %eax /* Push trap number */
1196
1197 call EXT(PE_incoming_interrupt)
1198
1199LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1200
1201 addl $4,%esp /* pop trap number */
1202
1203 movl $CPD_INTERRUPT_LEVEL,%edx
1204 decl %gs:(%edx)
1205
1206#if MACH_RT
1207 movl $CPD_PREEMPTION_LEVEL,%edx
1208 decl %gs:(%edx)
1209#endif /* MACH_RT */
1210
1211 pop %edx /* must have been on kernel segs */
1212 pop %ecx
1213 pop %eax /* no ASTs */
1214 iret
1215
1216/*
1217 * Take an AST from an interrupt.
1218 * On PCB stack.
1219 * sp-> es -> edx
1220 * ds -> ecx
1221 * edx -> eax
1222 * ecx -> trapno
1223 * eax -> code
1224 * eip
1225 * cs
1226 * efl
1227 * esp
1228 * ss
1229 */
1230ast_from_interrupt:
1231 pop %es /* restore all registers ... */
1232 pop %ds
1233 popl %edx
1234 popl %ecx
1235 popl %eax
1236 sti /* Reenable interrupts */
1237 pushl $0 /* zero code */
1238 pushl $0 /* zero trap number */
1239 pusha /* save general registers */
1240 push %ds /* save segment registers */
1241 push %es
1242 push %fs
1243 push %gs
1244 mov %ss,%dx /* switch to kernel segments */
1245 mov %dx,%ds
1246 mov %dx,%es
1247 mov $CPU_DATA,%dx
1248 mov %dx,%gs
1249
1250 /*
1251 * See if we interrupted a kernel-loaded thread executing
1252 * in its own task.
1253 */
1254 CPU_NUMBER(%edx)
1255 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1256 jnz 0f /* user mode trap if so */
1257 testb $3,R_CS(%esp)
1258 jnz 0f /* user mode, back to normal */
1259#ifdef FIXME
1260 cmpl ETEXT_ADDR,R_EIP(%esp)
1261 jb 0f /* not kernel-loaded, back to normal */
1262#endif
1263
1264 /*
1265 * Transfer the current stack frame by hand into the PCB.
1266 */
1267 CAH(afistart)
1268 movl CX(EXT(active_kloaded),%edx),%eax
1269 movl CX(EXT(kernel_stack),%edx),%ebx
1270 xchgl %ebx,%esp
1271 FRAME_STACK_TO_PCB(%eax,%ebx)
1272 CAH(afiend)
1273 TIME_TRAP_UENTRY
1274 jmp 3f
12750:
1276 TIME_TRAP_UENTRY
1277
1278 movl CX(EXT(kernel_stack),%edx),%eax
1279 /* switch to kernel stack */
1280 xchgl %eax,%esp
12813:
1282 pushl %eax
1283 pushl $0 /* push preemption flag */
1284 call EXT(i386_astintr) /* take the AST */
1285 addl $4,%esp /* pop preemption flag */
1286 popl %esp /* back to PCB stack */
1287 jmp EXT(return_from_trap) /* return */
1288
1289#if MACH_KDB || MACH_KGDB
1290/*
1291 * kdb_kintr: enter kdb from keyboard interrupt.
1292 * Chase down the stack frames until we find one whose return
1293 * address is the interrupt handler. At that point, we have:
1294 *
1295 * frame-> saved %ebp
1296 * return address in interrupt handler
1297 * ivect
1298 * saved SPL
1299 * return address == return_to_iret_i
1300 * saved %edx
1301 * saved %ecx
1302 * saved %eax
1303 * saved %eip
1304 * saved %cs
1305 * saved %efl
1306 *
1307 * OR:
1308 * frame-> saved %ebp
1309 * return address in interrupt handler
1310 * ivect
1311 * saved SPL
1312 * return address == return_to_iret
1313 * pointer to save area on old stack
1314 * [ saved %ebx, if accurate timing ]
1315 *
1316 * old stack: saved %es
1317 * saved %ds
1318 * saved %edx
1319 * saved %ecx
1320 * saved %eax
1321 * saved %eip
1322 * saved %cs
1323 * saved %efl
1324 *
1325 * Call kdb, passing it that register save area.
1326 */
1327
1328#if MACH_KGDB
1329Entry(kgdb_kintr)
1330#endif /* MACH_KGDB */
1331#if MACH_KDB
1332Entry(kdb_kintr)
1333#endif /* MACH_KDB */
1334 movl %ebp,%eax /* save caller`s frame pointer */
1335 movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
1336 movl $EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1337
13380: cmpl 16(%eax),%ecx /* does this frame return to */
1339 /* interrupt handler (1)? */
1340 je 1f
1341 cmpl $kdb_from_iret,16(%eax)
1342 je 1f
1343 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1344 je 2f /* if not: */
1345 cmpl $kdb_from_iret_i,16(%eax)
1346 je 2f
1347 movl (%eax),%eax /* try next frame */
1348 jmp 0b
1349
13501: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1351 ret
1352
13532: movl $kdb_from_iret_i,16(%eax)
1354 /* returns to interrupt stack */
1355 ret
1356
1357/*
1358 * On return from keyboard interrupt, we will execute
1359 * kdb_from_iret_i
1360 * if returning to an interrupt on the interrupt stack
1361 * kdb_from_iret
1362 * if returning to an interrupt on the user or kernel stack
1363 */
1364kdb_from_iret:
1365 /* save regs in known locations */
1366#if STAT_TIME
1367 pushl %ebx /* caller`s %ebx is in reg */
1368#else
1369 movl 4(%esp),%eax /* get caller`s %ebx */
1370 pushl %eax /* push on stack */
1371#endif
1372 pushl %ebp
1373 pushl %esi
1374 pushl %edi
1375 push %fs
1376 push %gs
1377#if MACH_KGDB
1378 cli
1379 pushl %esp /* pass regs */
1380 call EXT(kgdb_kentry) /* to kgdb */
1381 addl $4,%esp /* pop parameters */
1382#endif /* MACH_KGDB */
1383#if MACH_KDB
1384 pushl %esp /* pass regs */
1385 call EXT(kdb_kentry) /* to kdb */
1386 addl $4,%esp /* pop parameters */
1387#endif /* MACH_KDB */
1388 pop %gs /* restore registers */
1389 pop %fs
1390 popl %edi
1391 popl %esi
1392 popl %ebp
1393#if STAT_TIME
1394 popl %ebx
1395#else
1396 popl %eax
1397 movl %eax,4(%esp)
1398#endif
1399 jmp EXT(return_to_iret) /* normal interrupt return */
1400
1401kdb_from_iret_i: /* on interrupt stack */
1402 pop %edx /* restore saved registers */
1403 pop %ecx
1404 pop %eax
1405 pushl $0 /* zero error code */
1406 pushl $0 /* zero trap number */
1407 pusha /* save general registers */
1408 push %ds /* save segment registers */
1409 push %es
1410 push %fs
1411 push %gs
1412#if MACH_KGDB
1413 cli /* disable interrupts */
1414 CPU_NUMBER(%edx) /* get CPU number */
1415 movl CX(EXT(kgdb_stacks),%edx),%ebx
1416 xchgl %ebx,%esp /* switch to kgdb stack */
1417 pushl %ebx /* pass old sp as an arg */
1418 call EXT(kgdb_from_kernel)
1419 popl %esp /* switch back to interrupt stack */
1420#endif /* MACH_KGDB */
1421#if MACH_KDB
1422 pushl %esp /* pass regs, */
1423 pushl $0 /* code, */
1424 pushl $-1 /* type to kdb */
1425 call EXT(kdb_trap)
1426 addl $12,%esp
1427#endif /* MACH_KDB */
1428 pop %gs /* restore segment registers */
1429 pop %fs
1430 pop %es
1431 pop %ds
1432 popa /* restore general registers */
1433 addl $8,%esp
1434 iret
1435
1436#endif /* MACH_KDB || MACH_KGDB */
1437
1438
1439/*
1440 * Mach RPC enters through a call gate, like a system call.
1441 */
1442
1443Entry(mach_rpc)
1444 pushf /* save flags as soon as possible */
1445 pushl %eax /* save system call number */
1446 pushl $0 /* clear trap number slot */
1447
1448 pusha /* save the general registers */
1449 pushl %ds /* and the segment registers */
1450 pushl %es
1451 pushl %fs
1452 pushl %gs
1453
1454 mov %ss,%dx /* switch to kernel data segment */
1455 mov %dx,%ds
1456 mov %dx,%es
1457 mov $CPU_DATA,%dx
1458 mov %dx,%gs
1459
1460/*
1461 * Shuffle eflags,eip,cs into proper places
1462 */
1463
1464 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1465 movl R_CS(%esp),%ecx /* eip is in CS slot */
1466 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1467 movl %ecx,R_EIP(%esp) /* fix eip */
1468 movl %edx,R_CS(%esp) /* fix cs */
1469 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1470
1471 CPU_NUMBER(%edx)
1472 TIME_TRAP_UENTRY
1473
1474 negl %eax /* get system call number */
1475 shll $4,%eax /* manual indexing */
1476
1477/*
1478 * Check here for mach_rpc from kernel-loaded task --
1479 * - Note that kernel-loaded task returns via real return.
1480 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1481 * so transfer the stack frame into the PCB explicitly, then
1482 * start running on resulting "PCB stack". We have to set
1483 * up a simulated "uesp" manually, since there's none in the
1484 * frame.
1485 */
1486 cmpl $0,CX(EXT(active_kloaded),%edx)
1487 jz 2f
1488 CAH(mrstart)
1489 movl CX(EXT(active_kloaded),%edx),%ebx
1490 movl CX(EXT(kernel_stack),%edx),%edx
1491 xchgl %edx,%esp
1492
1493 FRAME_STACK_TO_PCB(%ebx,%edx)
1494 CAH(mrend)
1495
1496 CPU_NUMBER(%edx)
1497 jmp 3f
1498
14992:
1500 CPU_NUMBER(%edx)
1501 movl CX(EXT(kernel_stack),%edx),%ebx
1502 /* get current kernel stack */
1503 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1504 /* user registers. */
1505
15063:
1507
1508/*
1509 * Register use on entry:
1510 * eax contains syscall number
1511 * ebx contains user regs pointer
1512 */
1513#undef RPC_TRAP_REGISTERS
1514#ifdef RPC_TRAP_REGISTERS
1515 pushl R_ESI(%ebx)
1516 pushl R_EDI(%ebx)
1517 pushl R_ECX(%ebx)
1518 pushl R_EDX(%ebx)
1519#else
1520 movl EXT(mach_trap_table)(%eax),%ecx
1521 /* get number of arguments */
1522 jecxz 2f /* skip argument copy if none */
1523 movl R_UESP(%ebx),%esi /* get user stack pointer */
1524 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1525 /* and point past last argument */
1526 /* edx holds cpu number from above */
1527 movl CX(EXT(active_kloaded),%edx),%edx
1528 /* point to current thread */
1529 orl %edx,%edx /* if ! kernel-loaded, check addr */
1530 jz 4f /* else */
1531 mov %ds,%dx /* kernel data segment access */
1532 jmp 5f
15334:
1534 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1535 ja mach_call_addr /* address error if not */
1536 movl $USER_DS,%edx /* user data segment access */
15375:
1538 mov %dx,%fs
1539 movl %esp,%edx /* save kernel ESP for error recovery */
15401:
1541 subl $4,%esi
1542 RECOVERY_SECTION
1543 RECOVER(mach_call_addr_push)
1544 pushl %fs:(%esi) /* push argument on stack */
1545 loop 1b /* loop for all arguments */
1546#endif
1547
1548/*
1549 * Register use on entry:
1550 * eax contains syscall number
1551 * ebx contains user regs pointer
1552 */
15532:
1554 CAH(call_call)
1555 call *EXT(mach_trap_table)+4(%eax)
1556 /* call procedure */
1557 movl %esp,%ecx /* get kernel stack */
1558 or $(KERNEL_STACK_SIZE-1),%ecx
1559 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1560 movl %eax,R_EAX(%esp) /* save return value */
1561 jmp EXT(return_from_trap) /* return to user */
1562
1563
1564/*
1565 * Special system call entry for "int 0x80", which has the "eflags"
1566 * register saved at the right place already.
1567 * Fall back to the common syscall path after saving the registers.
1568 *
1569 * esp -> old eip
1570 * old cs
1571 * old eflags
1572 * old esp if trapped from user
1573 * old ss if trapped from user
1574 *
1575 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1576 */
1577Entry(syscall_int80)
1578 pushl %eax /* save system call number */
1579 pushl $0 /* clear trap number slot */
1580
1581 pusha /* save the general registers */
1582 pushl %ds /* and the segment registers */
1583 pushl %es
1584 pushl %fs
1585 pushl %gs
1586
1587 mov %ss,%dx /* switch to kernel data segment */
1588 mov %dx,%ds
1589 mov %dx,%es
1590 mov $CPU_DATA,%dx
1591 mov %dx,%gs
1592
1593 jmp syscall_entry_3
1594
1595/*
1596 * System call enters through a call gate. Flags are not saved -
1597 * we must shuffle stack to look like trap save area.
1598 *
1599 * esp-> old eip
1600 * old cs
1601 * old esp
1602 * old ss
1603 *
1604 * eax contains system call number.
1605 *
1606 * NB: below use of CPU_NUMBER assumes that macro will use correct
1607 * correct segment register for any kernel data accesses.
1608 */
1609Entry(syscall)
1610syscall_entry:
1611 pushf /* save flags as soon as possible */
1612syscall_entry_2:
1613 pushl %eax /* save system call number */
1614 pushl $0 /* clear trap number slot */
1615
1616 pusha /* save the general registers */
1617 pushl %ds /* and the segment registers */
1618 pushl %es
1619 pushl %fs
1620 pushl %gs
1621
1622 mov %ss,%dx /* switch to kernel data segment */
1623 mov %dx,%ds
1624 mov %dx,%es
1625 mov $CPU_DATA,%dx
1626 mov %dx,%gs
1627
1628/*
1629 * Shuffle eflags,eip,cs into proper places
1630 */
1631
1632 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1633 movl R_CS(%esp),%ecx /* eip is in CS slot */
1634 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1635 movl %ecx,R_EIP(%esp) /* fix eip */
1636 movl %edx,R_CS(%esp) /* fix cs */
1637 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1638
1639syscall_entry_3:
1640 CPU_NUMBER(%edx)
1641/*
1642 * Check here for syscall from kernel-loaded task --
1643 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1644 * so transfer the stack frame into the PCB explicitly, then
1645 * start running on resulting "PCB stack". We have to set
1646 * up a simulated "uesp" manually, since there's none in the
1647 * frame.
1648 */
1649 cmpl $0,CX(EXT(active_kloaded),%edx)
1650 jz 0f
1651 CAH(scstart)
1652 movl CX(EXT(active_kloaded),%edx),%ebx
1653 movl CX(EXT(kernel_stack),%edx),%edx
1654 xchgl %edx,%esp
1655 FRAME_STACK_TO_PCB(%ebx,%edx)
1656 CAH(scend)
1657 TIME_TRAP_UENTRY
1658 CPU_NUMBER(%edx)
1659 jmp 1f
1660
16610:
1662 TIME_TRAP_UENTRY
1663
1664 CPU_NUMBER(%edx)
1665 movl CX(EXT(kernel_stack),%edx),%ebx
1666 /* get current kernel stack */
1667 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1668 /* user registers. */
1669 /* user regs pointer already set */
1670
1671/*
1672 * Check for MACH or emulated system call
1673 * Register use (from here till we begin processing call):
1674 * eax contains system call number
1675 * ebx points to user regs
1676 */
16771:
1678 movl $CPD_ACTIVE_THREAD,%edx
1679 movl %gs:(%edx),%edx /* get active thread */
1680 /* point to current thread */
1681 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1682 movl ACT_TASK(%edx),%edx /* point to task */
1683 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1684 orl %edx,%edx /* if none, */
1685 je syscall_native /* do native system call */
1686 movl %eax,%ecx /* copy system call number */
1687 subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
1688 /* vector table */
1689 jl syscall_native /* too low - native system call */
1690 cmpl DISP_COUNT(%edx),%ecx /* check range */
1691 jnl syscall_native /* too high - native system call */
1692 movl DISP_VECTOR(%edx,%ecx,4),%edx
1693 /* get the emulation vector */
1694 orl %edx,%edx /* emulated system call if not zero */
1695 jnz syscall_emul
1696
1697/*
1698 * Native system call.
1699 * Register use on entry:
1700 * eax contains syscall number
1701 * ebx points to user regs
1702 */
1703syscall_native:
1704 negl %eax /* get system call number */
1705 jl mach_call_range /* out of range if it was positive */
1706
1707 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1708 jg mach_call_range /* error if out of range */
1709 shll $4,%eax /* manual indexing */
1710
1711 movl EXT(mach_trap_table)+4(%eax),%edx
1712 /* get procedure */
1713 cmpl $EXT(kern_invalid),%edx /* if not "kern_invalid" */
1714 jne mach_syscall_native /* go on with Mach syscall */
1715
1716 movl $CPD_ACTIVE_THREAD,%edx
1717 movl %gs:(%edx),%edx /* get active thread */
1718 /* point to current thread */
1719 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1720 movl ACT_TASK(%edx),%edx /* point to task */
1721 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1722 orl %edx,%edx /* if it exists, */
1723 jne mach_syscall_native /* do native system call */
1724 shrl $4,%eax /* restore syscall number */
1725 jmp mach_call_range /* try it as a "server" syscall */
1726
1727mach_syscall_native:
1728 movl $CPD_ACTIVE_THREAD,%edx
1729 movl %gs:(%edx),%edx /* get active thread */
1730
1731 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1732 movl ACT_MACH_EXC_PORT(%edx),%edx
1733 movl $EXT(realhost),%ecx
1734 movl HOST_NAME(%ecx),%ecx
1735 cmpl %edx,%ecx /* act->mach_exc_port = host_name ? */
1736 je do_native_call /* -> send to kernel, do not collect $200 */
1737 cmpl $0,%edx /* thread->mach_exc_port = null ? */
1738 je try_task /* try task */
1739 jmp mach_syscall_exception
1740 /* NOT REACHED */
1741
1742try_task:
1743 movl $CPD_ACTIVE_THREAD,%edx
1744 movl %gs:(%edx),%edx /* get active thread */
1745
1746 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1747 movl ACT_TASK(%edx),%edx /* point to task */
1748 movl TASK_MACH_EXC_PORT(%edx),%edx
1749 movl $EXT(realhost),%ecx
1750 movl HOST_NAME(%ecx),%ecx
1751 cmpl %edx,%ecx /* thread->mach_exc_port = host_name ? */
1752 je do_native_call /* -> send to kernel */
1753 cmpl $0,%edx /* thread->mach_exc_port = null ? */
1754 je EXT(syscall_failed) /* try task */
1755 jmp mach_syscall_exception
1756 /* NOT REACHED */
1757
1758/*
1759 * Register use on entry:
1760 * eax contains syscall number
1761 * ebx contains user regs pointer
1762 */
1763do_native_call:
1764 movl EXT(mach_trap_table)(%eax),%ecx
1765 /* get number of arguments */
1766 jecxz mach_call_call /* skip argument copy if none */
1767 movl R_UESP(%ebx),%esi /* get user stack pointer */
1768 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1769 /* and point past last argument */
1770 CPU_NUMBER(%edx)
1771 movl CX(EXT(active_kloaded),%edx),%edx
1772 /* point to current thread */
1773 orl %edx,%edx /* if kernel-loaded, skip addr check */
1774 jz 0f /* else */
1775 mov %ds,%dx /* kernel data segment access */
1776 jmp 1f
17770:
1778 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1779 ja mach_call_addr /* address error if not */
1780 movl $USER_DS,%edx /* user data segment access */
17811:
1782 mov %dx,%fs
1783 movl %esp,%edx /* save kernel ESP for error recovery */
17842:
1785 subl $4,%esi
1786 RECOVERY_SECTION
1787 RECOVER(mach_call_addr_push)
1788 pushl %fs:(%esi) /* push argument on stack */
1789 loop 2b /* loop for all arguments */
1790
1791/*
1792 * Register use on entry:
1793 * eax contains syscall number
1794 * ebx contains user regs pointer
1795 */
1796mach_call_call:
1797
1798 CAH(call_call)
1799
1800#if ETAP_EVENT_MONITOR
1801 cmpl $0x200, %eax /* is this mach_msg? */
1802 jz make_syscall /* if yes, don't record event */
1803
1804 pushal /* Otherwise: save registers */
1805 pushl %eax /* push syscall number on stack*/
1806 call EXT(etap_machcall_probe1) /* call event begin probe */
1807 add $4,%esp /* restore stack */
1808 popal /* restore registers */
1809
1810 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1811 pushal
1812 call EXT(etap_machcall_probe2) /* call event end probe */
1813 popal
1814 jmp skip_syscall /* syscall already made */
1815#endif /* ETAP_EVENT_MONITOR */
1816
1817make_syscall:
1818 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1819skip_syscall:
1820
1821 movl %esp,%ecx /* get kernel stack */
1822 or $(KERNEL_STACK_SIZE-1),%ecx
1823 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1824 movl %eax,R_EAX(%esp) /* save return value */
1825 jmp EXT(return_from_trap) /* return to user */
1826
1827/*
1828 * Address out of range. Change to page fault.
1829 * %esi holds failing address.
1830 * Register use on entry:
1831 * ebx contains user regs pointer
1832 */
1833mach_call_addr_push:
1834 movl %edx,%esp /* clean parameters from stack */
1835mach_call_addr:
1836 movl %esi,R_CR2(%ebx) /* set fault address */
1837 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1838 /* set page-fault trap */
1839 movl $(T_PF_USER),R_ERR(%ebx)
1840 /* set error code - read user space */
1841 CAH(call_addr)
1842 jmp EXT(take_trap) /* treat as a trap */
1843
1844/*
1845 * try sending mach system call exception to server
1846 * Register use on entry:
1847 * eax contains syscall number
1848 */
1849mach_syscall_exception:
1850 push %eax /* code (syscall no.) */
1851 movl %esp,%edx
1852 push $1 /* code_cnt = 1 */
1853 push %edx /* exception_type_t (see i/f docky) */
1854 push $EXC_MACH_SYSCALL /* exception */
1855
1856 CAH(exception)
1857 call EXT(exception)
1858 /* no return */
1859
1860/*
1861 * System call out of range. Treat as invalid-instruction trap.
1862 * (? general protection?)
1863 * Register use on entry:
1864 * eax contains syscall number
1865 */
1866mach_call_range:
1867 movl $CPD_ACTIVE_THREAD,%edx
1868 movl %gs:(%edx),%edx /* get active thread */
1869
1870 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1871 movl ACT_TASK(%edx),%edx /* point to task */
1872 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1873 orl %edx,%edx /* if emulator, */
1874 jne EXT(syscall_failed) /* handle as illegal instruction */
1875 /* else generate syscall exception: */
1876 push %eax
1877 movl %esp,%edx
1878 push $1 /* code_cnt = 1 */
1879 push %edx /* exception_type_t (see i/f docky) */
1880 push $EXC_SYSCALL
1881 CAH(call_range)
1882 call EXT(exception)
1883 /* no return */
1884
1885 .globl EXT(syscall_failed)
1886LEXT(syscall_failed)
1887 movl %esp,%ecx /* get kernel stack */
1888 or $(KERNEL_STACK_SIZE-1),%ecx
1889 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1890 CPU_NUMBER(%edx)
1891 movl CX(EXT(kernel_stack),%edx),%ebx
1892 /* get current kernel stack */
1893 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1894 /* user registers. */
1895 /* user regs pointer already set */
1896
1897 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1898 /* set invalid-operation trap */
1899 movl $0,R_ERR(%ebx) /* clear error code */
1900 CAH(failed)
1901 jmp EXT(take_trap) /* treat as a trap */
1902
1903/*
1904 * User space emulation of system calls.
1905 * edx - user address to handle syscall
1906 *
1907 * User stack will become:
1908 * uesp-> eflags
1909 * eip
1910 * Register use on entry:
1911 * ebx contains user regs pointer
1912 * edx contains emulator vector address
1913 */
1914syscall_emul:
1915 movl R_UESP(%ebx),%edi /* get user stack pointer */
1916 CPU_NUMBER(%eax)
1917 movl CX(EXT(active_kloaded),%eax),%eax
1918 orl %eax,%eax /* if thread not kernel-loaded, */
1919 jz 0f /* do address checks */
1920 subl $8,%edi
1921 mov %ds,%ax /* kernel data segment access */
1922 jmp 1f /* otherwise, skip them */
19230:
1924 cmpl $(VM_MAX_ADDRESS),%edi /* in user space? */
1925 ja syscall_addr /* address error if not */
1926 subl $8,%edi /* push space for new arguments */
1927 cmpl $(VM_MIN_ADDRESS),%edi /* still in user space? */
1928 jb syscall_addr /* error if not */
1929 movl $USER_DS,%ax /* user data segment access */
19301:
1931 mov %ax,%fs
1932 movl R_EFLAGS(%ebx),%eax /* move flags */
1933 RECOVERY_SECTION
1934 RECOVER(syscall_addr)
1935 movl %eax,%fs:0(%edi) /* to user stack */
1936 movl R_EIP(%ebx),%eax /* move eip */
1937 RECOVERY_SECTION
1938 RECOVER(syscall_addr)
1939 movl %eax,%fs:4(%edi) /* to user stack */
1940 movl %edi,R_UESP(%ebx) /* set new user stack pointer */
1941 movl %edx,R_EIP(%ebx) /* change return address to trap */
1942 movl %ebx,%esp /* back to PCB stack */
1943 CAH(emul)
1944 jmp EXT(return_from_trap) /* return to user */
1945
1946
1947/*
1948 * Address error - address is in %edi.
1949 * Register use on entry:
1950 * ebx contains user regs pointer
1951 */
1952syscall_addr:
1953 movl %edi,R_CR2(%ebx) /* set fault address */
1954 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1955 /* set page-fault trap */
1956 movl $(T_PF_USER),R_ERR(%ebx)
1957 /* set error code - read user space */
1958 CAH(addr)
1959 jmp EXT(take_trap) /* treat as a trap */
1960
1961/*\f*/
1962/*
1963 * Utility routines.
1964 */
1965
1966
1967/*
1968 * Copy from user address space.
1969 * arg0: user address
1970 * arg1: kernel address
1971 * arg2: byte count
1972 */
1973Entry(copyinmsg)
1974ENTRY(copyin)
1975 pushl %esi
1976 pushl %edi /* save registers */
1977
1978 movl 8+S_ARG0,%esi /* get user start address */
1979 movl 8+S_ARG1,%edi /* get kernel destination address */
1980 movl 8+S_ARG2,%edx /* get count */
1981
1982 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1983
1984 movl $CPD_ACTIVE_THREAD,%ecx
1985 movl %gs:(%ecx),%ecx /* get active thread */
1986 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
1987 movl ACT_MAP(%ecx),%ecx /* get act->map */
1988 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1989 cmpl EXT(kernel_pmap), %ecx
1990 jz 1f
1991 movl $USER_DS,%cx /* user data segment access */
1992 mov %cx,%ds
19931:
1994 cmpl %esi,%eax
1995 jb copyin_fail /* fail if wrap-around */
1996 cld /* count up */
1997 movl %edx,%ecx /* move by longwords first */
1998 shrl $2,%ecx
1999 RECOVERY_SECTION
2000 RECOVER(copyin_fail)
2001 rep
2002 movsl /* move longwords */
2003 movl %edx,%ecx /* now move remaining bytes */
2004 andl $3,%ecx
2005 RECOVERY_SECTION
2006 RECOVER(copyin_fail)
2007 rep
2008 movsb
2009 xorl %eax,%eax /* return 0 for success */
2010copy_ret:
2011 mov %ss,%di /* restore kernel data segment */
2012 mov %di,%ds
2013
2014 popl %edi /* restore registers */
2015 popl %esi
2016 ret /* and return */
2017
2018copyin_fail:
2019 movl $EFAULT,%eax /* return error for failure */
2020 jmp copy_ret /* pop frame and return */
2021
2022/*
2023 * Copy string from user address space.
2024 * arg0: user address
2025 * arg1: kernel address
2026 * arg2: max byte count
2027 * arg3: actual byte count (OUT)
2028 */
2029Entry(copyinstr)
2030 pushl %esi
2031 pushl %edi /* save registers */
2032
2033 movl 8+S_ARG0,%esi /* get user start address */
2034 movl 8+S_ARG1,%edi /* get kernel destination address */
2035 movl 8+S_ARG2,%edx /* get count */
2036
2037 lea 0(%esi,%edx),%eax /* get user end address + 1 */
2038
2039 movl $CPD_ACTIVE_THREAD,%ecx
2040 movl %gs:(%ecx),%ecx /* get active thread */
2041 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2042 movl ACT_MAP(%ecx),%ecx /* get act->map */
2043 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2044 cmpl EXT(kernel_pmap), %ecx
2045 jne 0f
2046 mov %ds,%cx /* kernel data segment access */
2047 jmp 1f
20480:
2049 movl $USER_DS,%cx /* user data segment access */
20501:
2051 mov %cx,%fs
2052 xorl %eax,%eax
2053 cmpl $0,%edx
2054 je 4f
20552:
2056 RECOVERY_SECTION
2057 RECOVER(copystr_fail) /* copy bytes... */
2058 movb %fs:(%esi),%eax
2059 incl %esi
2060 testl %edi,%edi /* if kernel address is ... */
2061 jz 3f /* not NULL */
2062 movb %eax,(%edi) /* copy the byte */
2063 incl %edi
20643:
2065 decl %edx
2066 je 5f /* Zero count.. error out */
2067 cmpl $0,%eax
2068 jne 2b /* .. a NUL found? */
2069 jmp 4f
20705:
2071 movl $ENAMETOOLONG,%eax /* String is too long.. */
20724:
2073 xorl %eax,%eax /* return zero for success */
2074 movl 8+S_ARG3,%edi /* get OUT len ptr */
2075 cmpl $0,%edi
2076 jz copystr_ret /* if null, just return */
2077 subl 8+S_ARG0,%esi
2078 movl %esi,(%edi) /* else set OUT arg to xfer len */
2079copystr_ret:
2080 popl %edi /* restore registers */
2081 popl %esi
2082 ret /* and return */
2083
2084copystr_fail:
2085 movl $EFAULT,%eax /* return error for failure */
2086 jmp copy_ret /* pop frame and return */
2087
2088/*
2089 * Copy to user address space.
2090 * arg0: kernel address
2091 * arg1: user address
2092 * arg2: byte count
2093 */
2094Entry(copyoutmsg)
2095ENTRY(copyout)
2096 pushl %esi
2097 pushl %edi /* save registers */
2098 pushl %ebx
2099
2100 movl 12+S_ARG0,%esi /* get kernel start address */
2101 movl 12+S_ARG1,%edi /* get user start address */
2102 movl 12+S_ARG2,%edx /* get count */
2103
2104 leal 0(%edi,%edx),%eax /* get user end address + 1 */
2105
2106 movl $CPD_ACTIVE_THREAD,%ecx
2107 movl %gs:(%ecx),%ecx /* get active thread */
2108 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2109 movl ACT_MAP(%ecx),%ecx /* get act->map */
2110 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2111 cmpl EXT(kernel_pmap), %ecx
2112 jne 0f
2113 mov %ds,%cx /* else kernel data segment access */
2114 jmp 1f
21150:
2116 movl $USER_DS,%cx
21171:
2118 mov %cx,%es
2119
2120/*
2121 * Check whether user address space is writable
2122 * before writing to it - hardware is broken.
2123 *
2124 * Skip check if "user" address is really in
2125 * kernel space (i.e., if it's in a kernel-loaded
2126 * task).
2127 *
2128 * Register usage:
2129 * esi/edi source/dest pointers for rep/mov
2130 * ecx counter for rep/mov
2131 * edx counts down from 3rd arg
2132 * eax count of bytes for each (partial) page copy
2133 * ebx shadows edi, used to adjust edx
2134 */
2135 movl %edi,%ebx /* copy edi for syncing up */
2136copyout_retry:
2137 /* if restarting after a partial copy, put edx back in sync, */
2138 addl %ebx,%edx /* edx -= (edi - ebx); */
2139 subl %edi,%edx /
2140 movl %edi,%ebx /* ebx = edi; */
2141
2142 mov %es,%cx
2143 cmpl $USER_DS,%cx /* If kernel data segment */
2144 jnz 0f /* skip check */
2145
2146 cmpb $(CPUID_FAMILY_386), EXT(cpuid_family)
2147 ja 0f
2148
2149 movl %cr3,%ecx /* point to page directory */
2150#if NCPUS > 1
2151 andl $(~0x7), %ecx /* remove cpu number */
2152#endif /* NCPUS > 1 && AT386 */
2153 movl %edi,%eax /* get page directory bits */
2154 shrl $(PDESHIFT),%eax /* from user address */
2155 movl KERNELBASE(%ecx,%eax,4),%ecx
2156 /* get page directory pointer */
2157 testl $(PTE_V),%ecx /* present? */
2158 jz 0f /* if not, fault is OK */
2159 andl $(PTE_PFN),%ecx /* isolate page frame address */
2160 movl %edi,%eax /* get page table bits */
2161 shrl $(PTESHIFT),%eax
2162 andl $(PTEMASK),%eax /* from user address */
2163 leal KERNELBASE(%ecx,%eax,4),%ecx
2164 /* point to page table entry */
2165 movl (%ecx),%eax /* get it */
2166 testl $(PTE_V),%eax /* present? */
2167 jz 0f /* if not, fault is OK */
2168 testl $(PTE_W),%eax /* writable? */
2169 jnz 0f /* OK if so */
2170/*
2171 * Not writable - must fake a fault. Turn off access to the page.
2172 */
2173 andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
2174 movl %cr3,%eax /* invalidate TLB */
2175 movl %eax,%cr3
21760:
2177/*
2178 * Copy only what fits on the current destination page.
2179 * Check for write-fault again on the next page.
2180 */
2181 leal NBPG(%edi),%eax /* point to */
2182 andl $(-NBPG),%eax /* start of next page */
2183 subl %edi,%eax /* get number of bytes to that point */
2184 cmpl %edx,%eax /* bigger than count? */
2185 jle 1f /* if so, */
2186 movl %edx,%eax /* use count */
21871:
2188 cld /* count up */
2189 movl %eax,%ecx /* move by longwords first */
2190 shrl $2,%ecx
2191 RECOVERY_SECTION
2192 RECOVER(copyout_fail)
2193 RETRY_SECTION
2194 RETRY(copyout_retry)
2195 rep
2196 movsl
2197 movl %eax,%ecx /* now move remaining bytes */
2198 andl $3,%ecx
2199 RECOVERY_SECTION
2200 RECOVER(copyout_fail)
2201 RETRY_SECTION
2202 RETRY(copyout_retry)
2203 rep
2204 movsb /* move */
2205 movl %edi,%ebx /* copy edi for syncing up */
2206 subl %eax,%edx /* and decrement count */
2207 jg copyout_retry /* restart on next page if not done */
2208 xorl %eax,%eax /* return 0 for success */
2209copyout_ret:
2210 mov %ss,%di /* restore kernel segment */
2211 mov %di,%es
2212
2213 popl %ebx
2214 popl %edi /* restore registers */
2215 popl %esi
2216 ret /* and return */
2217
2218copyout_fail:
2219 movl $EFAULT,%eax /* return error for failure */
2220 jmp copyout_ret /* pop frame and return */
2221
2222/*
2223 * FPU routines.
2224 */
2225
2226/*
2227 * Initialize FPU.
2228 */
2229ENTRY(_fninit)
2230 fninit
2231 ret
2232
2233/*
2234 * Read control word
2235 */
2236ENTRY(_fstcw)
2237 pushl %eax /* get stack space */
2238 fstcw (%esp)
2239 popl %eax
2240 ret
2241
2242/*
2243 * Set control word
2244 */
2245ENTRY(_fldcw)
2246 fldcw 4(%esp)
2247 ret
2248
2249/*
2250 * Read status word
2251 */
2252ENTRY(_fnstsw)
2253 xor %eax,%eax /* clear high 16 bits of eax */
2254 fnstsw %ax /* read FP status */
2255 ret
2256
2257/*
2258 * Clear FPU exceptions
2259 */
2260ENTRY(_fnclex)
2261 fnclex
2262 ret
2263
2264/*
2265 * Clear task-switched flag.
2266 */
2267ENTRY(_clts)
2268 clts
2269 ret
2270
2271/*
2272 * Save complete FPU state. Save error for later.
2273 */
2274ENTRY(_fpsave)
2275 movl 4(%esp),%eax /* get save area pointer */
2276 fnsave (%eax) /* save complete state, including */
2277 /* errors */
2278 ret
2279
2280/*
2281 * Restore FPU state.
2282 */
2283ENTRY(_fprestore)
2284 movl 4(%esp),%eax /* get save area pointer */
2285 frstor (%eax) /* restore complete state */
2286 ret
2287
2288/*
2289 * Set cr3
2290 */
2291ENTRY(set_cr3)
2292#if NCPUS > 1
2293 CPU_NUMBER(%eax)
2294 orl 4(%esp), %eax
2295#else /* NCPUS > 1 && AT386 */
2296 movl 4(%esp),%eax /* get new cr3 value */
2297#endif /* NCPUS > 1 && AT386 */
2298 /*
2299 * Don't set PDBR to a new value (hence invalidating the
2300 * "paging cache") if the new value matches the current one.
2301 */
2302 movl %cr3,%edx /* get current cr3 value */
2303 cmpl %eax,%edx
2304 je 0f /* if two are equal, don't set */
2305 movl %eax,%cr3 /* load it (and flush cache) */
23060:
2307 ret
2308
2309/*
2310 * Read cr3
2311 */
2312ENTRY(get_cr3)
2313 movl %cr3,%eax
2314#if NCPUS > 1
2315 andl $(~0x7), %eax /* remove cpu number */
2316#endif /* NCPUS > 1 && AT386 */
2317 ret
2318
2319/*
2320 * Flush TLB
2321 */
2322ENTRY(flush_tlb)
2323 movl %cr3,%eax /* flush tlb by reloading CR3 */
2324 movl %eax,%cr3 /* with itself */
2325 ret
2326
2327/*
2328 * Read cr2
2329 */
2330ENTRY(get_cr2)
2331 movl %cr2,%eax
2332 ret
2333
2334/*
2335 * Read cr4
2336 */
2337ENTRY(get_cr4)
2338 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2339 ret
2340
2341/*
2342 * Write cr4
2343 */
2344ENTRY(set_cr4)
2345 movl 4(%esp), %eax
2346 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2347 ret
2348
2349/*
2350 * Read ldtr
2351 */
2352Entry(get_ldt)
2353 xorl %eax,%eax
2354 sldt %ax
2355 ret
2356
2357/*
2358 * Set ldtr
2359 */
2360Entry(set_ldt)
2361 lldt 4(%esp)
2362 ret
2363
2364/*
2365 * Read task register.
2366 */
2367ENTRY(get_tr)
2368 xorl %eax,%eax
2369 str %ax
2370 ret
2371
2372/*
2373 * Set task register. Also clears busy bit of task descriptor.
2374 */
2375ENTRY(set_tr)
2376 movl S_ARG0,%eax /* get task segment number */
2377 subl $8,%esp /* push space for SGDT */
2378 sgdt 2(%esp) /* store GDT limit and base (linear) */
2379 movl 4(%esp),%edx /* address GDT */
2380 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2381 ltr %ax /* load task register */
2382 addl $8,%esp /* clear stack */
2383 ret /* and return */
2384
2385/*
2386 * Set task-switched flag.
2387 */
2388ENTRY(_setts)
2389 movl %cr0,%eax /* get cr0 */
2390 orl $(CR0_TS),%eax /* or in TS bit */
2391 movl %eax,%cr0 /* set cr0 */
2392 ret
2393
2394/*
2395 * io register must not be used on slaves (no AT bus)
2396 */
2397#define ILL_ON_SLAVE
2398
2399
2400#if MACH_ASSERT
2401
2402#define ARG0 B_ARG0
2403#define ARG1 B_ARG1
2404#define ARG2 B_ARG2
2405#define PUSH_FRAME FRAME
2406#define POP_FRAME EMARF
2407
2408#else /* MACH_ASSERT */
2409
2410#define ARG0 S_ARG0
2411#define ARG1 S_ARG1
2412#define ARG2 S_ARG2
2413#define PUSH_FRAME
2414#define POP_FRAME
2415
2416#endif /* MACH_ASSERT */
2417
2418
2419#if MACH_KDB || MACH_ASSERT
2420
2421/*
2422 * Following routines are also defined as macros in i386/pio.h
2423 * Compile then when MACH_KDB is configured so that they
2424 * can be invoked from the debugger.
2425 */
2426
2427/*
2428 * void outb(unsigned char *io_port,
2429 * unsigned char byte)
2430 *
2431 * Output a byte to an IO port.
2432 */
2433ENTRY(outb)
2434 PUSH_FRAME
2435 ILL_ON_SLAVE
2436 movl ARG0,%edx /* IO port address */
2437 movl ARG1,%eax /* data to output */
2438 outb %al,%dx /* send it out */
2439 POP_FRAME
2440 ret
2441
2442/*
2443 * unsigned char inb(unsigned char *io_port)
2444 *
2445 * Input a byte from an IO port.
2446 */
2447ENTRY(inb)
2448 PUSH_FRAME
2449 ILL_ON_SLAVE
2450 movl ARG0,%edx /* IO port address */
2451 xor %eax,%eax /* clear high bits of register */
2452 inb %dx,%al /* get the byte */
2453 POP_FRAME
2454 ret
2455
2456/*
2457 * void outw(unsigned short *io_port,
2458 * unsigned short word)
2459 *
2460 * Output a word to an IO port.
2461 */
2462ENTRY(outw)
2463 PUSH_FRAME
2464 ILL_ON_SLAVE
2465 movl ARG0,%edx /* IO port address */
2466 movl ARG1,%eax /* data to output */
2467 outw %ax,%dx /* send it out */
2468 POP_FRAME
2469 ret
2470
2471/*
2472 * unsigned short inw(unsigned short *io_port)
2473 *
2474 * Input a word from an IO port.
2475 */
2476ENTRY(inw)
2477 PUSH_FRAME
2478 ILL_ON_SLAVE
2479 movl ARG0,%edx /* IO port address */
2480 xor %eax,%eax /* clear high bits of register */
2481 inw %dx,%ax /* get the word */
2482 POP_FRAME
2483 ret
2484
2485/*
2486 * void outl(unsigned int *io_port,
2487 * unsigned int byte)
2488 *
2489 * Output an int to an IO port.
2490 */
2491ENTRY(outl)
2492 PUSH_FRAME
2493 ILL_ON_SLAVE
2494 movl ARG0,%edx /* IO port address*/
2495 movl ARG1,%eax /* data to output */
2496 outl %eax,%dx /* send it out */
2497 POP_FRAME
2498 ret
2499
2500/*
2501 * unsigned int inl(unsigned int *io_port)
2502 *
2503 * Input an int from an IO port.
2504 */
2505ENTRY(inl)
2506 PUSH_FRAME
2507 ILL_ON_SLAVE
2508 movl ARG0,%edx /* IO port address */
2509 inl %dx,%eax /* get the int */
2510 POP_FRAME
2511 ret
2512
2513#endif /* MACH_KDB || MACH_ASSERT*/
2514
2515/*
2516 * void loutb(unsigned byte *io_port,
2517 * unsigned byte *data,
2518 * unsigned int count)
2519 *
2520 * Output an array of bytes to an IO port.
2521 */
2522ENTRY(loutb)
2523ENTRY(outsb)
2524 PUSH_FRAME
2525 ILL_ON_SLAVE
2526 movl %esi,%eax /* save register */
2527 movl ARG0,%edx /* get io port number */
2528 movl ARG1,%esi /* get data address */
2529 movl ARG2,%ecx /* get count */
2530 cld /* count up */
2531 rep
2532 outsb /* output */
2533 movl %eax,%esi /* restore register */
2534 POP_FRAME
2535 ret
2536
2537
2538/*
2539 * void loutw(unsigned short *io_port,
2540 * unsigned short *data,
2541 * unsigned int count)
2542 *
2543 * Output an array of shorts to an IO port.
2544 */
2545ENTRY(loutw)
2546ENTRY(outsw)
2547 PUSH_FRAME
2548 ILL_ON_SLAVE
2549 movl %esi,%eax /* save register */
2550 movl ARG0,%edx /* get io port number */
2551 movl ARG1,%esi /* get data address */
2552 movl ARG2,%ecx /* get count */
2553 cld /* count up */
2554 rep
2555 outsw /* output */
2556 movl %eax,%esi /* restore register */
2557 POP_FRAME
2558 ret
2559
2560/*
2561 * void loutw(unsigned short io_port,
2562 * unsigned int *data,
2563 * unsigned int count)
2564 *
2565 * Output an array of longs to an IO port.
2566 */
2567ENTRY(loutl)
2568ENTRY(outsl)
2569 PUSH_FRAME
2570 ILL_ON_SLAVE
2571 movl %esi,%eax /* save register */
2572 movl ARG0,%edx /* get io port number */
2573 movl ARG1,%esi /* get data address */
2574 movl ARG2,%ecx /* get count */
2575 cld /* count up */
2576 rep
2577 outsl /* output */
2578 movl %eax,%esi /* restore register */
2579 POP_FRAME
2580 ret
2581
2582
2583/*
2584 * void linb(unsigned char *io_port,
2585 * unsigned char *data,
2586 * unsigned int count)
2587 *
2588 * Input an array of bytes from an IO port.
2589 */
2590ENTRY(linb)
2591ENTRY(insb)
2592 PUSH_FRAME
2593 ILL_ON_SLAVE
2594 movl %edi,%eax /* save register */
2595 movl ARG0,%edx /* get io port number */
2596 movl ARG1,%edi /* get data address */
2597 movl ARG2,%ecx /* get count */
2598 cld /* count up */
2599 rep
2600 insb /* input */
2601 movl %eax,%edi /* restore register */
2602 POP_FRAME
2603 ret
2604
2605
2606/*
2607 * void linw(unsigned short *io_port,
2608 * unsigned short *data,
2609 * unsigned int count)
2610 *
2611 * Input an array of shorts from an IO port.
2612 */
2613ENTRY(linw)
2614ENTRY(insw)
2615 PUSH_FRAME
2616 ILL_ON_SLAVE
2617 movl %edi,%eax /* save register */
2618 movl ARG0,%edx /* get io port number */
2619 movl ARG1,%edi /* get data address */
2620 movl ARG2,%ecx /* get count */
2621 cld /* count up */
2622 rep
2623 insw /* input */
2624 movl %eax,%edi /* restore register */
2625 POP_FRAME
2626 ret
2627
2628
2629/*
2630 * void linl(unsigned short io_port,
2631 * unsigned int *data,
2632 * unsigned int count)
2633 *
2634 * Input an array of longs from an IO port.
2635 */
2636ENTRY(linl)
2637ENTRY(insl)
2638 PUSH_FRAME
2639 ILL_ON_SLAVE
2640 movl %edi,%eax /* save register */
2641 movl ARG0,%edx /* get io port number */
2642 movl ARG1,%edi /* get data address */
2643 movl ARG2,%ecx /* get count */
2644 cld /* count up */
2645 rep
2646 insl /* input */
2647 movl %eax,%edi /* restore register */
2648 POP_FRAME
2649 ret
2650
2651
2652/*
2653 * int inst_fetch(int eip, int cs);
2654 *
2655 * Fetch instruction byte. Return -1 if invalid address.
2656 */
2657 .globl EXT(inst_fetch)
2658LEXT(inst_fetch)
2659 movl S_ARG1, %eax /* get segment */
2660 movw %ax,%fs /* into FS */
2661 movl S_ARG0, %eax /* get offset */
2662 RETRY_SECTION
2663 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2664 RECOVERY_SECTION
2665 RECOVER(EXT(inst_fetch_fault))
2666 movzbl %fs:(%eax),%eax /* load instruction byte */
2667 ret
2668
2669LEXT(inst_fetch_fault)
2670 movl $-1,%eax /* return -1 if error */
2671 ret
2672
2673
2674#if MACH_KDP
2675/*
2676 * kdp_copy_kmem(char *src, char *dst, int count)
2677 *
2678 * Similar to copyin except that both addresses are kernel addresses.
2679 */
2680
2681ENTRY(kdp_copy_kmem)
2682 pushl %esi
2683 pushl %edi /* save registers */
2684
2685 movl 8+S_ARG0,%esi /* get kernel start address */
2686 movl 8+S_ARG1,%edi /* get kernel destination address */
2687
2688 movl 8+S_ARG2,%edx /* get count */
2689
2690 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2691
2692 cmpl %esi,%eax
2693 jb kdp_vm_read_fail /* fail if wrap-around */
2694 cld /* count up */
2695 movl %edx,%ecx /* move by longwords first */
2696 shrl $2,%ecx
2697 RECOVERY_SECTION
2698 RECOVER(kdp_vm_read_fail)
2699 rep
2700 movsl /* move longwords */
2701 movl %edx,%ecx /* now move remaining bytes */
2702 andl $3,%ecx
2703 RECOVERY_SECTION
2704 RECOVER(kdp_vm_read_fail)
2705 rep
2706 movsb
2707kdp_vm_read_done:
2708 movl 8+S_ARG2,%edx /* get count */
2709 subl %ecx,%edx /* Return number of bytes transfered */
2710 movl %edx,%eax
2711
2712 popl %edi /* restore registers */
2713 popl %esi
2714 ret /* and return */
2715
2716kdp_vm_read_fail:
2717 xorl %eax,%eax /* didn't copy a thing. */
2718
2719 popl %edi
2720 popl %esi
2721 ret
2722#endif
2723
2724
2725/*
2726 * Done with recovery and retry tables.
2727 */
2728 RECOVERY_SECTION
2729 RECOVER_TABLE_END
2730 RETRY_SECTION
2731 RETRY_TABLE_END
2732
2733
2734
2735ENTRY(dr6)
2736 movl %db6, %eax
2737 ret
2738
2739/* dr<i>(address, type, len, persistence)
2740 */
2741ENTRY(dr0)
2742 movl S_ARG0, %eax
2743 movl %eax,EXT(dr_addr)
2744 movl %eax, %db0
2745 movl $0, %ecx
2746 jmp 0f
2747ENTRY(dr1)
2748 movl S_ARG0, %eax
2749 movl %eax,EXT(dr_addr)+1*4
2750 movl %eax, %db1
2751 movl $2, %ecx
2752 jmp 0f
2753ENTRY(dr2)
2754 movl S_ARG0, %eax
2755 movl %eax,EXT(dr_addr)+2*4
2756 movl %eax, %db2
2757 movl $4, %ecx
2758 jmp 0f
2759
2760ENTRY(dr3)
2761 movl S_ARG0, %eax
2762 movl %eax,EXT(dr_addr)+3*4
2763 movl %eax, %db3
2764 movl $6, %ecx
2765
27660:
2767 pushl %ebp
2768 movl %esp, %ebp
2769
2770 movl %db7, %edx
2771 movl %edx,EXT(dr_addr)+4*4
2772 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2773 movl %edx,EXT(dr_addr)+5*4
2774 movzbl B_ARG3, %eax
2775 andb $3, %al
2776 shll %cl, %eax
2777 orl %eax, %edx
2778
2779 movzbl B_ARG1, %eax
2780 andb $3, %al
2781 addb $0x10, %ecx
2782 shll %cl, %eax
2783 orl %eax, %edx
2784
2785 movzbl B_ARG2, %eax
2786 andb $3, %al
2787 addb $0x2, %ecx
2788 shll %cl, %eax
2789 orl %eax, %edx
2790
2791 movl %edx, %db7
2792 movl %edx,EXT(dr_addr)+7*4
2793 movl %edx, %eax
2794 leave
2795 ret
2796
2797 .data
2798
2799DATA(preemptable) /* Not on an MP (makes cpu_number() usage unsafe) */
2800#if MACH_RT && (NCPUS == 1)
2801 .long 0 /* FIXME -- Currently disabled */
2802#else
2803 .long 0 /* FIX ME -- Currently disabled */
2804#endif /* MACH_RT && (NCPUS == 1) */
2805
2806dr_msk:
2807 .long ~0x000f0003
2808 .long ~0x00f0000c
2809 .long ~0x0f000030
2810 .long ~0xf00000c0
2811ENTRY(dr_addr)
2812 .long 0,0,0,0
2813 .long 0,0,0,0
2814 .text
2815
2816/*
2817 * Determine cpu model and set global cpuid_xxx variables
2818 *
2819 * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it.
2820 * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it.
2821 * Relies on CPUID instruction for next x86 generations
2822 * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array)
2823 */
2824
2825ENTRY(set_cpu_model)
2826 FRAME
2827 pushl %ebx /* save ebx */
2828 andl $~0x3,%esp /* Align stack to avoid AC fault */
2829 pushfl /* push EFLAGS */
2830 popl %eax /* pop into eax */
2831 movl %eax,%ecx /* Save original EFLAGS */
2832 xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */
2833 pushl %eax /* push new value */
2834 popfl /* through the EFLAGS register */
2835 pushfl /* and back */
2836 popl %eax /* into eax */
2837 movb $(CPUID_FAMILY_386),EXT(cpuid_family)
2838 pushl %ecx /* push original EFLAGS */
2839 popfl /* restore EFLAGS */
2840 xorl %ecx,%eax /* see what changed */
2841 testl $EFL_AC,%eax /* test AC bit */
2842 jz 0f /* if AC toggled (486 or higher) */
2843
2844 movb $(CPUID_FAMILY_486),EXT(cpuid_family)
2845 testl $EFL_ID,%eax /* test ID bit */
2846 jz 0f /* if ID toggled use cpuid instruction */
2847
2848 xorl %eax,%eax /* get vendor identification string */
2849 .word 0xA20F /* cpuid instruction */
2850 movl %eax,EXT(cpuid_value) /* Store high value */
2851 movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */
2852 movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */
2853 movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */
2854 movl $1,%eax /* get processor signature */
2855 .word 0xA20F /* cpuid instruction */
2856 movl %edx,EXT(cpuid_feature) /* Store feature flags */
2857 movl %eax,%ecx /* Save original signature */
2858 andb $0xF,%al /* Get Stepping ID */
2859 movb %al,EXT(cpuid_stepping) /* Save Stepping ID */
2860 movl %ecx,%eax /* Get original signature */
2861 shrl $4,%eax /* Shift Stepping ID */
2862 movl %eax,%ecx /* Save original signature */
2863 andb $0xF,%al /* Get Model */
2864 movb %al,EXT(cpuid_model) /* Save Model */
2865 movl %ecx,%eax /* Get original signature */
2866 shrl $4,%eax /* Shift Stepping ID */
2867 movl %eax,%ecx /* Save original signature */
2868 andb $0xF,%al /* Get Family */
2869 movb %al,EXT(cpuid_family) /* Save Family */
2870 movl %ecx,%eax /* Get original signature */
2871 shrl $4,%eax /* Shift Stepping ID */
2872 andb $0x3,%al /* Get Type */
2873 movb %al,EXT(cpuid_type) /* Save Type */
2874
2875 movl EXT(cpuid_value),%eax /* Get high value */
2876 cmpl $2,%eax /* Test if processor configuration */
2877 jle 0f /* is present */
2878 movl $2,%eax /* get processor configuration */
2879 .word 0xA20F /* cpuid instruction */
2880 movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */
2881 movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */
2882 movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */
2883 movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */
28840:
2885 popl %ebx /* restore ebx */
2886 EMARF
2887 ret /* return */
2888
2889ENTRY(get_cr0)
2890 movl %cr0, %eax
2891 ret
2892
2893ENTRY(set_cr0)
2894 movl 4(%esp), %eax
2895 movl %eax, %cr0
2896 ret
2897
2898#ifndef SYMMETRY
2899
2900/*
2901 * ffs(mask)
2902 */
2903ENTRY(ffs)
2904 bsfl S_ARG0, %eax
2905 jz 0f
2906 incl %eax
2907 ret
29080: xorl %eax, %eax
2909 ret
2910
2911/*
2912 * cpu_shutdown()
2913 * Force reboot
2914 */
2915
2916null_idtr:
2917 .word 0
2918 .long 0
2919
2920Entry(cpu_shutdown)
2921 lidt null_idtr /* disable the interrupt handler */
2922 xor %ecx,%ecx /* generate a divide by zero */
2923 div %ecx,%eax /* reboot now */
2924 ret /* this will "never" be executed */
2925
2926#endif /* SYMMETRY */
2927
2928
2929/*
2930 * setbit(int bitno, int *s) - set bit in bit string
2931 */
2932ENTRY(setbit)
2933 movl S_ARG0, %ecx /* bit number */
2934 movl S_ARG1, %eax /* address */
2935 btsl %ecx, (%eax) /* set bit */
2936 ret
2937
2938/*
2939 * clrbit(int bitno, int *s) - clear bit in bit string
2940 */
2941ENTRY(clrbit)
2942 movl S_ARG0, %ecx /* bit number */
2943 movl S_ARG1, %eax /* address */
2944 btrl %ecx, (%eax) /* clear bit */
2945 ret
2946
2947/*
2948 * ffsbit(int *s) - find first set bit in bit string
2949 */
2950ENTRY(ffsbit)
2951 movl S_ARG0, %ecx /* address */
2952 movl $0, %edx /* base offset */
29530:
2954 bsfl (%ecx), %eax /* check argument bits */
2955 jnz 1f /* found bit, return */
2956 addl $4, %ecx /* increment address */
2957 addl $32, %edx /* increment offset */
2958 jmp 0b /* try again */
29591:
2960 addl %edx, %eax /* return offset */
2961 ret
2962
2963/*
2964 * testbit(int nr, volatile void *array)
2965 *
2966 * Test to see if the bit is set within the bit string
2967 */
2968
2969ENTRY(testbit)
2970 movl S_ARG0,%eax /* Get the bit to test */
2971 movl S_ARG1,%ecx /* get the array string */
2972 btl %eax,(%ecx)
2973 sbbl %eax,%eax
2974 ret
2975
2976ENTRY(get_pc)
2977 movl 4(%ebp),%eax
2978 ret
2979
2980#if ETAP
2981
2982ENTRY(etap_get_pc)
2983 movl 4(%ebp), %eax /* fetch pc of caller */
2984 ret
2985
2986ENTRY(tvals_to_etap)
2987 movl S_ARG0, %eax
2988 movl $1000000000, %ecx
2989 mull %ecx
2990 addl S_ARG1, %eax
2991 adc $0, %edx
2992 ret
2993
2994/* etap_time_t
2995 * etap_time_sub(etap_time_t stop, etap_time_t start)
2996 *
2997 * 64bit subtract, returns stop - start
2998 */
2999ENTRY(etap_time_sub)
3000 movl S_ARG0, %eax /* stop.low */
3001 movl S_ARG1, %edx /* stop.hi */
3002 subl S_ARG2, %eax /* stop.lo - start.lo */
3003 sbbl S_ARG3, %edx /* stop.hi - start.hi */
3004 ret
3005
3006#endif /* ETAP */
3007
3008#if NCPUS > 1
3009
3010ENTRY(minsecurity)
3011 pushl %ebp
3012 movl %esp,%ebp
3013/*
3014 * jail: set the EIP to "jail" to block a kernel thread.
3015 * Useful to debug synchronization problems on MPs.
3016 */
3017ENTRY(jail)
3018 jmp EXT(jail)
3019
3020#endif /* NCPUS > 1 */
3021
3022/*
3023 * delay(microseconds)
3024 */
3025
3026ENTRY(delay)
3027 movl 4(%esp),%eax
3028 testl %eax, %eax
3029 jle 3f
3030 movl EXT(delaycount), %ecx
30311:
3032 movl %ecx, %edx
30332:
3034 decl %edx
3035 jne 2b
3036 decl %eax
3037 jne 1b
30383:
3039 ret
3040
3041/*
3042 * unsigned int
3043 * div_scale(unsigned int dividend,
3044 * unsigned int divisor,
3045 * unsigned int *scale)
3046 *
3047 * This function returns (dividend << *scale) //divisor where *scale
3048 * is the largest possible value before overflow. This is used in
3049 * computation where precision must be achieved in order to avoid
3050 * floating point usage.
3051 *
3052 * Algorithm:
3053 * *scale = 0;
3054 * while (((dividend >> *scale) >= divisor))
3055 * (*scale)++;
3056 * *scale = 32 - *scale;
3057 * return ((dividend << *scale) / divisor);
3058 */
3059ENTRY(div_scale)
3060 PUSH_FRAME
3061 xorl %ecx, %ecx /* *scale = 0 */
3062 xorl %eax, %eax
3063 movl ARG0, %edx /* get dividend */
30640:
3065 cmpl ARG1, %edx /* if (divisor > dividend) */
3066 jle 1f /* goto 1f */
3067 addl $1, %ecx /* (*scale)++ */
3068 shrdl $1, %edx, %eax /* dividend >> 1 */
3069 shrl $1, %edx /* dividend >> 1 */
3070 jmp 0b /* goto 0b */
30711:
3072 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
3073 movl ARG2, %edx /* get scale */
3074 movl $32, (%edx) /* *scale = 32 */
3075 subl %ecx, (%edx) /* *scale -= %ecx */
3076 POP_FRAME
3077 ret
3078
3079/*
3080 * unsigned int
3081 * mul_scale(unsigned int multiplicand,
3082 * unsigned int multiplier,
3083 * unsigned int *scale)
3084 *
3085 * This function returns ((multiplicand * multiplier) >> *scale) where
3086 * scale is the largest possible value before overflow. This is used in
3087 * computation where precision must be achieved in order to avoid
3088 * floating point usage.
3089 *
3090 * Algorithm:
3091 * *scale = 0;
3092 * while (overflow((multiplicand * multiplier) >> *scale))
3093 * (*scale)++;
3094 * return ((multiplicand * multiplier) >> *scale);
3095 */
3096ENTRY(mul_scale)
3097 PUSH_FRAME
3098 xorl %ecx, %ecx /* *scale = 0 */
3099 movl ARG0, %eax /* get multiplicand */
3100 mull ARG1 /* multiplicand * multiplier */
31010:
3102 cmpl $0, %edx /* if (!overflow()) */
3103 je 1f /* goto 1 */
3104 addl $1, %ecx /* (*scale)++ */
3105 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
3106 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
3107 jmp 0b
31081:
3109 movl ARG2, %edx /* get scale */
3110 movl %ecx, (%edx) /* set *scale */
3111 POP_FRAME
3112 ret
3113
3114#if NCPUS > 1
3115ENTRY(_cpu_number)
3116 CPU_NUMBER(%eax)
3117 ret
3118#endif /* NCPUS > 1 */
3119
3120#ifdef MACH_BSD
3121/*
3122 * BSD System call entry point..
3123 */
3124
3125Entry(trap_unix_syscall)
3126 pushf /* save flags as soon as possible */
3127 pushl %eax /* save system call number */
3128 pushl $0 /* clear trap number slot */
3129
3130 pusha /* save the general registers */
3131 pushl %ds /* and the segment registers */
3132 pushl %es
3133 pushl %fs
3134 pushl %gs
3135
3136 mov %ss,%dx /* switch to kernel data segment */
3137 mov %dx,%ds
3138 mov %dx,%es
3139 mov $CPU_DATA,%dx
3140 mov %dx,%gs
3141
3142/*
3143 * Shuffle eflags,eip,cs into proper places
3144 */
3145
3146 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3147 movl R_CS(%esp),%ecx /* eip is in CS slot */
3148 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3149 movl %ecx,R_EIP(%esp) /* fix eip */
3150 movl %edx,R_CS(%esp) /* fix cs */
3151 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3152
3153 CPU_NUMBER(%edx)
3154 TIME_TRAP_UENTRY
3155
3156 negl %eax /* get system call number */
3157 shll $4,%eax /* manual indexing */
3158
3159 CPU_NUMBER(%edx)
3160 movl CX(EXT(kernel_stack),%edx),%ebx
3161 /* get current kernel stack */
3162 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3163 /* user registers. */
3164
3165/*
3166 * Register use on entry:
3167 * eax contains syscall number
3168 * ebx contains user regs pointer
3169 */
3170 CAH(call_call)
3171 pushl %ebx /* Push the regs set onto stack */
3172 call EXT(unix_syscall)
3173 popl %ebx
3174 movl %esp,%ecx /* get kernel stack */
3175 or $(KERNEL_STACK_SIZE-1),%ecx
3176 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3177 movl %eax,R_EAX(%esp) /* save return value */
3178 jmp EXT(return_from_trap) /* return to user */
3179
3180/*
3181 * Entry point for machdep system calls..
3182 */
3183
3184Entry(trap_machdep_syscall)
3185 pushf /* save flags as soon as possible */
3186 pushl %eax /* save system call number */
3187 pushl $0 /* clear trap number slot */
3188
3189 pusha /* save the general registers */
3190 pushl %ds /* and the segment registers */
3191 pushl %es
3192 pushl %fs
3193 pushl %gs
3194
3195 mov %ss,%dx /* switch to kernel data segment */
3196 mov %dx,%ds
3197 mov %dx,%es
3198 mov $CPU_DATA,%dx
3199 mov %dx,%gs
3200
3201/*
3202 * Shuffle eflags,eip,cs into proper places
3203 */
3204
3205 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3206 movl R_CS(%esp),%ecx /* eip is in CS slot */
3207 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3208 movl %ecx,R_EIP(%esp) /* fix eip */
3209 movl %edx,R_CS(%esp) /* fix cs */
3210 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3211
3212 CPU_NUMBER(%edx)
3213 TIME_TRAP_UENTRY
3214
3215 negl %eax /* get system call number */
3216 shll $4,%eax /* manual indexing */
3217
3218 CPU_NUMBER(%edx)
3219 movl CX(EXT(kernel_stack),%edx),%ebx
3220 /* get current kernel stack */
3221 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3222 /* user registers. */
3223
3224/*
3225 * Register use on entry:
3226 * eax contains syscall number
3227 * ebx contains user regs pointer
3228 */
3229 CAH(call_call)
3230 pushl %ebx
3231 call EXT(machdep_syscall)
3232 popl %ebx
3233 movl %esp,%ecx /* get kernel stack */
3234 or $(KERNEL_STACK_SIZE-1),%ecx
3235 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3236 movl %eax,R_EAX(%esp) /* save return value */
3237 jmp EXT(return_from_trap) /* return to user */
3238
3239Entry(trap_mach25_syscall)
3240 pushf /* save flags as soon as possible */
3241 pushl %eax /* save system call number */
3242 pushl $0 /* clear trap number slot */
3243
3244 pusha /* save the general registers */
3245 pushl %ds /* and the segment registers */
3246 pushl %es
3247 pushl %fs
3248 pushl %gs
3249
3250 mov %ss,%dx /* switch to kernel data segment */
3251 mov %dx,%ds
3252 mov %dx,%es
3253 mov $CPU_DATA,%dx
3254 mov %dx,%gs
3255
3256/*
3257 * Shuffle eflags,eip,cs into proper places
3258 */
3259
3260 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3261 movl R_CS(%esp),%ecx /* eip is in CS slot */
3262 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3263 movl %ecx,R_EIP(%esp) /* fix eip */
3264 movl %edx,R_CS(%esp) /* fix cs */
3265 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3266
3267 CPU_NUMBER(%edx)
3268 TIME_TRAP_UENTRY
3269
3270 negl %eax /* get system call number */
3271 shll $4,%eax /* manual indexing */
3272
3273 CPU_NUMBER(%edx)
3274 movl CX(EXT(kernel_stack),%edx),%ebx
3275 /* get current kernel stack */
3276 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3277 /* user registers. */
3278
3279/*
3280 * Register use on entry:
3281 * eax contains syscall number
3282 * ebx contains user regs pointer
3283 */
3284 CAH(call_call)
3285 pushl %ebx
3286 call EXT(mach25_syscall)
3287 popl %ebx
3288 movl %esp,%ecx /* get kernel stack */
3289 or $(KERNEL_STACK_SIZE-1),%ecx
3290 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3291 movl %eax,R_EAX(%esp) /* save return value */
3292 jmp EXT(return_from_trap) /* return to user */
3293
3294#endif