]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53
54#include <cpus.h>
55#include <etap.h>
56#include <etap_event_monitor.h>
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
69#include <i386/proc_reg.h>
70#include <i386/trap.h>
71#include <assym.s>
72#include <mach/exception_types.h>
73
74#include <i386/AT386/mp/mp.h>
75
76#define PREEMPT_DEBUG_LOG 0
77
78#if __MACHO__
79/* Under Mach-O, etext is a variable which contains
80 * the last text address
81 */
82#define ETEXT_ADDR (EXT(etext))
83#else
84/* Under ELF and other non-Mach-O formats, the address of
85 * etext represents the last text address
86 */
9bccf70c 87#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
88#endif
89
90#if NCPUS > 1
91
92#define CX(addr,reg) addr(,reg,4)
93
94#else
95#define CPU_NUMBER(reg)
96#define CX(addr,reg) addr
97
98#endif /* NCPUS > 1 */
99
100 .text
101locore_start:
102
103/*
104 * Fault recovery.
105 */
106
107#ifdef __MACHO__
108#define RECOVERY_SECTION .section __VECTORS, __recover
109#define RETRY_SECTION .section __VECTORS, __retries
110#else
111#define RECOVERY_SECTION .text
112#define RECOVERY_SECTION .text
113#endif
114
115#define RECOVER_TABLE_START \
116 .align 2 ; \
117 .globl EXT(recover_table) ;\
118LEXT(recover_table) ;\
119 .text
120
121#define RECOVER(addr) \
122 .align 2; \
123 .long 9f ;\
124 .long addr ;\
125 .text ;\
1269:
127
128#define RECOVER_TABLE_END \
129 .align 2 ;\
130 .globl EXT(recover_table_end) ;\
131LEXT(recover_table_end) ;\
132 .text
133
134/*
135 * Retry table for certain successful faults.
136 */
137#define RETRY_TABLE_START \
138 .align 3; \
139 .globl EXT(retry_table) ;\
140LEXT(retry_table) ;\
141 .text
142
143#define RETRY(addr) \
144 .align 3 ;\
145 .long 9f ;\
146 .long addr ;\
147 .text ;\
1489:
149
150#define RETRY_TABLE_END \
151 .align 3; \
152 .globl EXT(retry_table_end) ;\
153LEXT(retry_table_end) ;\
154 .text
155
156/*
157 * Allocate recovery and retry tables.
158 */
159 RECOVERY_SECTION
160 RECOVER_TABLE_START
161 RETRY_SECTION
162 RETRY_TABLE_START
163
164/*
165 * Timing routines.
166 */
167#if STAT_TIME
168
169#define TIME_TRAP_UENTRY
170#define TIME_TRAP_UEXIT
171#define TIME_INT_ENTRY
172#define TIME_INT_EXIT
173
174#else /* microsecond timing */
175
176/*
177 * Microsecond timing.
178 * Assumes a free-running microsecond counter.
179 * no TIMER_MAX check needed.
180 */
181
182/*
183 * There is only one current time-stamp per CPU, since only
184 * the time-stamp in the current timer is used.
185 * To save time, we allocate the current time-stamps here.
186 */
187 .comm EXT(current_tstamp), 4*NCPUS
188
189/*
190 * Update time on user trap entry.
191 * 11 instructions (including cli on entry)
192 * Assumes CPU number in %edx.
193 * Uses %ebx, %ecx.
194 */
195#define TIME_TRAP_UENTRY \
196 cli /* block interrupts */ ;\
197 movl VA_ETC,%ebx /* get timer value */ ;\
198 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
199 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
200 subl %ecx,%ebx /* elapsed = new-old */ ;\
201 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
202 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
203 jns 0f /* if overflow, */ ;\
204 call timer_normalize /* normalize timer */ ;\
2050: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\
206 /* switch to sys timer */;\
207 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
208 sti /* allow interrupts */
209
210/*
211 * update time on user trap exit.
212 * 10 instructions.
213 * Assumes CPU number in %edx.
214 * Uses %ebx, %ecx.
215 */
216#define TIME_TRAP_UEXIT \
217 cli /* block interrupts */ ;\
218 movl VA_ETC,%ebx /* get timer */ ;\
219 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
220 movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
221 subl %ecx,%ebx /* elapsed = new-old */ ;\
222 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
223 addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
224 jns 0f /* if overflow, */ ;\
225 call timer_normalize /* normalize timer */ ;\
2260: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\
227 /* switch to user timer */;\
228 movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
229
230/*
231 * update time on interrupt entry.
232 * 9 instructions.
233 * Assumes CPU number in %edx.
234 * Leaves old timer in %ebx.
235 * Uses %ecx.
236 */
237#define TIME_INT_ENTRY \
238 movl VA_ETC,%ecx /* get timer */ ;\
239 movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
240 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
241 subl %ebx,%ecx /* elapsed = new-old */ ;\
242 movl CX(EXT(current_timer),%edx),%ebx /* get current timer */;\
243 addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
244 leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
245 lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
246 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
247
248/*
249 * update time on interrupt exit.
250 * 11 instructions
251 * Assumes CPU number in %edx, old timer in %ebx.
252 * Uses %eax, %ecx.
253 */
254#define TIME_INT_EXIT \
255 movl VA_ETC,%eax /* get timer */ ;\
256 movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
257 movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
258 subl %ecx,%eax /* elapsed = new-old */ ;\
259 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
260 addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
261 jns 0f /* if overflow, */ ;\
262 call timer_normalize /* normalize timer */ ;\
2630: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
264 jz 0f /* if overflow, */ ;\
265 movl %ebx,%ecx /* get old timer */ ;\
266 call timer_normalize /* normalize timer */ ;\
2670: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
268
269
270/*
271 * Normalize timer in ecx.
272 * Preserves edx; clobbers eax.
273 */
274 .align ALIGN
275timer_high_unit:
276 .long TIMER_HIGH_UNIT /* div has no immediate opnd */
277
278timer_normalize:
279 pushl %edx /* save registersz */
280 pushl %eax
281 xorl %edx,%edx /* clear divisor high */
282 movl LOW_BITS(%ecx),%eax /* get divisor low */
283 divl timer_high_unit,%eax /* quotient in eax */
284 /* remainder in edx */
285 addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
286 movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
287 addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
288 popl %eax /* restore register */
289 popl %edx
290 ret
291
292/*
293 * Switch to a new timer.
294 */
295Entry(timer_switch)
296 CPU_NUMBER(%edx) /* get this CPU */
297 movl VA_ETC,%ecx /* get timer */
298 movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
299 movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
300 subl %ecx,%eax /* elapsed = new - old */
301 movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
302 addl %eax,LOW_BITS(%ecx) /* add to low bits */
303 jns 0f /* if overflow, */
304 call timer_normalize /* normalize timer */
3050:
306 movl S_ARG0,%ecx /* get new timer */
307 movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
308 ret
309
310/*
311 * Initialize the first timer for a CPU.
312 */
313Entry(start_timer)
314 CPU_NUMBER(%edx) /* get this CPU */
315 movl VA_ETC,%ecx /* get timer */
316 movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
317 movl S_ARG0,%ecx /* get timer */
318 movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
319 ret
320
321#endif /* accurate timing */
322
323/*
324 * Encapsulate the transfer of exception stack frames between a PCB
325 * and a thread stack. Since the whole point of these is to emulate
326 * a call or exception that changes privilege level, both macros
327 * assume that there is no user esp or ss stored in the source
328 * frame (because there was no change of privilege to generate them).
329 */
330
331/*
332 * Transfer a stack frame from a thread's user stack to its PCB.
333 * We assume the thread and stack addresses have been loaded into
334 * registers (our arguments).
335 *
336 * The macro overwrites edi, esi, ecx and whatever registers hold the
337 * thread and stack addresses (which can't be one of the above three).
338 * The thread address is overwritten with the address of its saved state
339 * (where the frame winds up).
340 *
341 * Must be called on kernel stack.
342 */
343#define FRAME_STACK_TO_PCB(thread, stkp) ;\
344 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
345 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
346 movl %edi,thread /* save for later */ ;\
347 movl stkp,%esi /* point to start of frame */ ;\
9bccf70c 348 movl $ R_UESP,%ecx ;\
1c79356b
A
349 sarl $2,%ecx /* word count for transfer */ ;\
350 cld /* we`re incrementing */ ;\
351 rep ;\
352 movsl /* transfer the frame */ ;\
9bccf70c 353 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
1c79356b
A
354 movl stkp,R_UESP(thread) /* store in PCB */ ;\
355 movl $0,%ecx ;\
356 mov %ss,%cx /* get current ss */ ;\
357 movl %ecx,R_SS(thread) /* store in PCB */
358
359/*
360 * Transfer a stack frame from a thread's PCB to the stack pointed
361 * to by the PCB. We assume the thread address has been loaded into
362 * a register (our argument).
363 *
364 * The macro overwrites edi, esi, ecx and whatever register holds the
365 * thread address (which can't be one of the above three). The
366 * thread address is overwritten with the address of its saved state
367 * (where the frame winds up).
368 *
369 * Must be called on kernel stack.
370 */
371#define FRAME_PCB_TO_STACK(thread) ;\
372 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
373 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
374 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
375 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
376 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
377 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
378 jz 1f /* use kernel data segment */ ;\
9bccf70c 379 movl $ USER_DS,%cx /* else use user data segment */;\
1c79356b
A
380 mov %cx,%es ;\
3811: ;\
9bccf70c 382 movl $ R_UESP,%ecx ;\
1c79356b
A
383 subl %ecx,%edi /* derive start of frame */ ;\
384 movl %edi,thread /* save for later */ ;\
385 sarl $2,%ecx /* word count for transfer */ ;\
386 cld /* we`re incrementing */ ;\
387 rep ;\
388 movsl /* transfer the frame */ ;\
389 mov %ss,%cx /* restore kernel segments */ ;\
390 mov %cx,%es
391
392#undef PDEBUG
393
394#ifdef PDEBUG
395
396/*
397 * Traditional, not ANSI.
398 */
399#define CAH(label) \
400 .data ;\
401 .globl label/**/count ;\
402label/**/count: ;\
403 .long 0 ;\
404 .globl label/**/limit ;\
405label/**/limit: ;\
406 .long 0 ;\
407 .text ;\
408 addl $1,%ss:label/**/count ;\
409 cmpl $0,label/**/limit ;\
410 jz label/**/exit ;\
411 pushl %eax ;\
412label/**/loop: ;\
413 movl %ss:label/**/count,%eax ;\
414 cmpl %eax,%ss:label/**/limit ;\
415 je label/**/loop ;\
416 popl %eax ;\
417label/**/exit:
418
419#else /* PDEBUG */
420
421#define CAH(label)
422
423#endif /* PDEBUG */
424
425#if MACH_KDB
426/*
427 * Last-ditch debug code to handle faults that might result
428 * from entering kernel (from collocated server) on an invalid
429 * stack. On collocated entry, there's no hardware-initiated
430 * stack switch, so a valid stack must be in place when an
431 * exception occurs, or we may double-fault.
432 *
433 * In case of a double-fault, our only recourse is to switch
434 * hardware "tasks", so that we avoid using the current stack.
435 *
436 * The idea here is just to get the processor into the debugger,
437 * post-haste. No attempt is made to fix up whatever error got
438 * us here, so presumably continuing from the debugger will
439 * simply land us here again -- at best.
440 */
441#if 0
442/*
443 * Note that the per-fault entry points are not currently
444 * functional. The only way to make them work would be to
445 * set up separate TSS's for each fault type, which doesn't
446 * currently seem worthwhile. (The offset part of a task
447 * gate is always ignored.) So all faults that task switch
448 * currently resume at db_task_start.
449 */
450/*
451 * Double fault (Murphy's point) - error code (0) on stack
452 */
453Entry(db_task_dbl_fault)
454 popl %eax
455 movl $(T_DOUBLE_FAULT),%ebx
456 jmp db_task_start
457/*
458 * Segment not present - error code on stack
459 */
460Entry(db_task_seg_np)
461 popl %eax
462 movl $(T_SEGMENT_NOT_PRESENT),%ebx
463 jmp db_task_start
464/*
465 * Stack fault - error code on (current) stack
466 */
467Entry(db_task_stk_fault)
468 popl %eax
469 movl $(T_STACK_FAULT),%ebx
470 jmp db_task_start
471/*
472 * General protection fault - error code on stack
473 */
474Entry(db_task_gen_prot)
475 popl %eax
476 movl $(T_GENERAL_PROTECTION),%ebx
477 jmp db_task_start
478#endif /* 0 */
479/*
480 * The entry point where execution resumes after last-ditch debugger task
481 * switch.
482 */
483Entry(db_task_start)
484 movl %esp,%edx
485 subl $ISS_SIZE,%edx
486 movl %edx,%esp /* allocate i386_saved_state on stack */
487 movl %eax,R_ERR(%esp)
488 movl %ebx,R_TRAPNO(%esp)
489 pushl %edx
490#if NCPUS > 1
491 CPU_NUMBER(%edx)
492 movl CX(EXT(mp_dbtss),%edx),%edx
493 movl TSS_LINK(%edx),%eax
494#else
495 movl EXT(dbtss)+TSS_LINK,%eax
496#endif
497 pushl %eax /* pass along selector of previous TSS */
498 call EXT(db_tss_to_frame)
499 popl %eax /* get rid of TSS selector */
500 call EXT(db_trap_from_asm)
501 addl $0x4,%esp
502 /*
503 * And now...?
504 */
505 iret /* ha, ha, ha... */
506#endif /* MACH_KDB */
507
508/*
509 * Trap/interrupt entry points.
510 *
511 * All traps must create the following save area on the PCB "stack":
512 *
513 * gs
514 * fs
515 * es
516 * ds
517 * edi
518 * esi
519 * ebp
520 * cr2 if page fault - otherwise unused
521 * ebx
522 * edx
523 * ecx
524 * eax
525 * trap number
526 * error code
527 * eip
528 * cs
529 * eflags
530 * user esp - if from user
531 * user ss - if from user
532 * es - if from V86 thread
533 * ds - if from V86 thread
534 * fs - if from V86 thread
535 * gs - if from V86 thread
536 *
537 */
538
539/*
540 * General protection or segment-not-present fault.
541 * Check for a GP/NP fault in the kernel_return
542 * sequence; if there, report it as a GP/NP fault on the user's instruction.
543 *
544 * esp-> 0: trap code (NP or GP)
545 * 4: segment number in error
546 * 8 eip
547 * 12 cs
548 * 16 eflags
549 * 20 old registers (trap is from kernel)
550 */
551Entry(t_gen_prot)
552 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
553 jmp trap_check_kernel_exit /* check for kernel exit sequence */
554
555Entry(t_segnp)
556 pushl $(T_SEGMENT_NOT_PRESENT)
557 /* indicate fault type */
558
559trap_check_kernel_exit:
560 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
561 jnz EXT(alltraps) /* isn`t kernel trap if so */
562 testl $3,12(%esp) /* is trap from kernel mode? */
563 jne EXT(alltraps) /* if so: */
564 /* check for the kernel exit sequence */
9bccf70c 565 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
1c79356b 566 je fault_iret
9bccf70c 567 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
1c79356b 568 je fault_popl_ds
9bccf70c 569 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
1c79356b 570 je fault_popl_es
9bccf70c 571 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
1c79356b 572 je fault_popl_fs
9bccf70c 573 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
1c79356b
A
574 je fault_popl_gs
575take_fault: /* if none of the above: */
576 jmp EXT(alltraps) /* treat as normal trap. */
577
578/*
579 * GP/NP fault on IRET: CS or SS is in error.
580 * All registers contain the user's values.
581 *
582 * on SP is
583 * 0 trap number
584 * 4 errcode
585 * 8 eip
586 * 12 cs --> trapno
587 * 16 efl --> errcode
588 * 20 user eip
589 * 24 user cs
590 * 28 user eflags
591 * 32 user esp
592 * 36 user ss
593 */
594fault_iret:
595 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
596 popl %eax /* get trap number */
597 movl %eax,12-4(%esp) /* put in user trap number */
598 popl %eax /* get error code */
599 movl %eax,16-8(%esp) /* put in user errcode */
600 popl %eax /* restore eax */
601 CAH(fltir)
602 jmp EXT(alltraps) /* take fault */
603
604/*
605 * Fault restoring a segment register. The user's registers are still
606 * saved on the stack. The offending segment register has not been
607 * popped.
608 */
609fault_popl_ds:
610 popl %eax /* get trap number */
611 popl %edx /* get error code */
612 addl $12,%esp /* pop stack to user regs */
613 jmp push_es /* (DS on top of stack) */
614fault_popl_es:
615 popl %eax /* get trap number */
616 popl %edx /* get error code */
617 addl $12,%esp /* pop stack to user regs */
618 jmp push_fs /* (ES on top of stack) */
619fault_popl_fs:
620 popl %eax /* get trap number */
621 popl %edx /* get error code */
622 addl $12,%esp /* pop stack to user regs */
623 jmp push_gs /* (FS on top of stack) */
624fault_popl_gs:
625 popl %eax /* get trap number */
626 popl %edx /* get error code */
627 addl $12,%esp /* pop stack to user regs */
628 jmp push_segregs /* (GS on top of stack) */
629
630push_es:
631 pushl %es /* restore es, */
632push_fs:
633 pushl %fs /* restore fs, */
634push_gs:
635 pushl %gs /* restore gs. */
636push_segregs:
637 movl %eax,R_TRAPNO(%esp) /* set trap number */
638 movl %edx,R_ERR(%esp) /* set error code */
639 CAH(fltpp)
640 jmp trap_set_segs /* take trap */
641
642/*
643 * Debug trap. Check for single-stepping across system call into
644 * kernel. If this is the case, taking the debug trap has turned
645 * off single-stepping - save the flags register with the trace
646 * bit set.
647 */
648Entry(t_debug)
649 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
650 jnz 0f /* isn`t kernel trap if so */
651 testl $3,4(%esp) /* is trap from kernel mode? */
652 jnz 0f /* if so: */
653 cmpl $syscall_entry,(%esp) /* system call entry? */
654 jne 0f /* if so: */
655 /* flags are sitting where syscall */
656 /* wants them */
657 addl $8,%esp /* remove eip/cs */
658 jmp syscall_entry_2 /* continue system call entry */
659
6600: pushl $0 /* otherwise: */
661 pushl $(T_DEBUG) /* handle as normal */
662 jmp EXT(alltraps) /* debug fault */
663
664/*
665 * Page fault traps save cr2.
666 */
667Entry(t_page_fault)
668 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
669 pusha /* save the general registers */
670 movl %cr2,%eax /* get the faulting address */
671 movl %eax,12(%esp) /* save in esp save slot */
672 jmp trap_push_segs /* continue fault */
673
674/*
675 * All 'exceptions' enter here with:
676 * esp-> trap number
677 * error code
678 * old eip
679 * old cs
680 * old eflags
681 * old esp if trapped from user
682 * old ss if trapped from user
683 *
684 * NB: below use of CPU_NUMBER assumes that macro will use correct
685 * segment register for any kernel data accesses.
686 */
687Entry(alltraps)
688 pusha /* save the general registers */
689trap_push_segs:
690 pushl %ds /* save the segment registers */
691 pushl %es
692 pushl %fs
693 pushl %gs
694
695trap_set_segs:
696 movl %ss,%ax
697 movl %ax,%ds
698 movl %ax,%es /* switch to kernel data seg */
699 cld /* clear direction flag */
700 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
701 jnz trap_from_user /* user mode trap if so */
702 testb $3,R_CS(%esp) /* user mode trap? */
703 jnz trap_from_user
704 CPU_NUMBER(%edx)
705 cmpl $0,CX(EXT(active_kloaded),%edx)
706 je trap_from_kernel /* if clear, truly in kernel */
707#ifdef FIXME
708 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
709 jb trap_from_kernel
710#endif
711trap_from_kloaded:
712 /*
713 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
714 * so transfer the stack frame into the PCB explicitly, then
715 * start running on resulting "PCB stack". We have to set
716 * up a simulated "uesp" manually, since there's none in the
717 * frame.
718 */
9bccf70c 719 mov $ CPU_DATA,%dx
1c79356b
A
720 mov %dx,%gs
721 CAH(atstart)
722 CPU_NUMBER(%edx)
723 movl CX(EXT(active_kloaded),%edx),%ebx
724 movl CX(EXT(kernel_stack),%edx),%eax
725 xchgl %esp,%eax
726 FRAME_STACK_TO_PCB(%ebx,%eax)
727 CAH(atend)
728 jmp EXT(take_trap)
729
730trap_from_user:
9bccf70c 731 mov $ CPU_DATA,%ax
1c79356b
A
732 mov %ax,%gs
733
734 CPU_NUMBER(%edx)
735 TIME_TRAP_UENTRY
736
737 movl CX(EXT(kernel_stack),%edx),%ebx
738 xchgl %ebx,%esp /* switch to kernel stack */
739 /* user regs pointer already set */
740LEXT(take_trap)
741 pushl %ebx /* record register save area */
742 pushl %ebx /* pass register save area to trap */
743 call EXT(user_trap) /* call user trap routine */
744 movl 4(%esp),%esp /* switch back to PCB stack */
745
746/*
747 * Return from trap or system call, checking for ASTs.
748 * On PCB stack.
749 */
750
751LEXT(return_from_trap)
752 CPU_NUMBER(%edx)
753 cmpl $0,CX(EXT(need_ast),%edx)
754 je EXT(return_to_user) /* if we need an AST: */
755
756 movl CX(EXT(kernel_stack),%edx),%esp
757 /* switch to kernel stack */
758 pushl $0 /* push preemption flag */
759 call EXT(i386_astintr) /* take the AST */
760 addl $4,%esp /* pop preemption flag */
761 popl %esp /* switch back to PCB stack (w/exc link) */
762 jmp EXT(return_from_trap) /* and check again (rare) */
763 /* ASTs after this point will */
764 /* have to wait */
765
766/*
767 * Arrange the checks needed for kernel-loaded (or kernel-loading)
768 * threads so that branch is taken in kernel-loaded case.
769 */
770LEXT(return_to_user)
771 TIME_TRAP_UEXIT
772 CPU_NUMBER(%eax)
773 cmpl $0,CX(EXT(active_kloaded),%eax)
774 jnz EXT(return_xfer_stack)
9bccf70c 775 movl $ CPD_ACTIVE_THREAD,%ebx
1c79356b
A
776 movl %gs:(%ebx),%ebx /* get active thread */
777 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
778 cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */
779 jnz EXT(return_kernel_loading)
780
781#if MACH_RT
782#if MACH_ASSERT
9bccf70c 783 movl $ CPD_PREEMPTION_LEVEL,%ebx
1c79356b
A
784 cmpl $0,%gs:(%ebx)
785 je EXT(return_from_kernel)
786 int $3
787#endif /* MACH_ASSERT */
788#endif /* MACH_RT */
789
790/*
791 * Return from kernel mode to interrupted thread.
792 */
793
794LEXT(return_from_kernel)
795LEXT(kret_popl_gs)
796 popl %gs /* restore segment registers */
797LEXT(kret_popl_fs)
798 popl %fs
799LEXT(kret_popl_es)
800 popl %es
801LEXT(kret_popl_ds)
802 popl %ds
803 popa /* restore general registers */
804 addl $8,%esp /* discard trap number and error code */
805
806LEXT(kret_iret)
807 iret /* return from interrupt */
808
809
810LEXT(return_xfer_stack)
811 /*
812 * If we're on PCB stack in a kernel-loaded task, we have
813 * to transfer saved state back to thread stack and swap
814 * stack pointers here, because the hardware's not going
815 * to do so for us.
816 */
817 CAH(rxsstart)
818 CPU_NUMBER(%eax)
819 movl CX(EXT(kernel_stack),%eax),%esp
820 movl CX(EXT(active_kloaded),%eax),%eax
821 FRAME_PCB_TO_STACK(%eax)
822 movl %eax,%esp
823 CAH(rxsend)
824 jmp EXT(return_from_kernel)
825
826/*
827 * Hate to put this here, but setting up a separate swap_func for
828 * kernel-loaded threads no longer works, since thread executes
829 * "for a while" (i.e., until it reaches glue code) when first
830 * created, even if it's nominally suspended. Hence we can't
831 * transfer the PCB when the thread first resumes, because we
832 * haven't initialized it yet.
833 */
834/*
835 * Have to force transfer to new stack "manually". Use a string
836 * move to transfer all of our saved state to the stack pointed
837 * to by iss.uesp, then install a pointer to it as our current
838 * stack pointer.
839 */
840LEXT(return_kernel_loading)
841 CPU_NUMBER(%eax)
842 movl CX(EXT(kernel_stack),%eax),%esp
9bccf70c 843 movl $ CPD_ACTIVE_THREAD,%ebx
1c79356b
A
844 movl %gs:(%ebx),%ebx /* get active thread */
845 movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */
846 movl %ebx,%edx /* save for later */
847 movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */
848 FRAME_PCB_TO_STACK(%ebx)
849 movl %ebx,%esp /* start running on new stack */
850 movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */
851 movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */
852 jmp EXT(return_from_kernel)
853
854/*
855 * Trap from kernel mode. No need to switch stacks or load segment registers.
856 */
857trap_from_kernel:
858#if MACH_KDB || MACH_KGDB
9bccf70c 859 mov $ CPU_DATA,%ax
1c79356b
A
860 mov %ax,%gs
861 movl %esp,%ebx /* save current stack */
862
863 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
864 jb 6f /* OK if so */
865
866#if MACH_KGDB
867 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
868 je 0f /* no */
869
870 pushl %esp /* Already on kgdb stack */
871 cli
872 call EXT(kgdb_trap)
873 addl $4,%esp
874 jmp EXT(return_from_kernel)
8750: /* should kgdb handle this exception? */
876 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
877 je 2f /* yes */
878 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
879 je 2f /* yes */
8801:
881 cli /* disable interrupts */
882 CPU_NUMBER(%edx) /* get CPU number */
883 movl CX(EXT(kgdb_stacks),%edx),%ebx
884 xchgl %ebx,%esp /* switch to kgdb stack */
885 pushl %ebx /* pass old sp as an arg */
886 call EXT(kgdb_from_kernel)
887 popl %esp /* switch back to kernel stack */
888 jmp EXT(return_from_kernel)
8892:
890#endif /* MACH_KGDB */
891
892#if MACH_KDB
893 cmpl $0,EXT(db_active) /* could trap be from ddb? */
894 je 3f /* no */
895#if NCPUS > 1
896 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
897 cmpl $0,CX(EXT(kdb_active),%edx)
898 je 3f /* no */
899#endif /* NCPUS > 1 */
900 pushl %esp
901 call EXT(db_trap_from_asm)
902 addl $0x4,%esp
903 jmp EXT(return_from_kernel)
904
9053:
906 /*
907 * Dilemma: don't want to switch to kernel_stack if trap
908 * "belongs" to ddb; don't want to switch to db_stack if
909 * trap "belongs" to kernel. So have to duplicate here the
910 * set of trap types that kernel_trap() handles. Note that
911 * "unexpected" page faults will not be handled by kernel_trap().
912 * In this panic-worthy case, we fall into the debugger with
913 * kernel_stack containing the call chain that led to the
914 * bogus fault.
915 */
916 movl R_TRAPNO(%esp),%edx
917 cmpl $(T_PAGE_FAULT),%edx
918 je 4f
919 cmpl $(T_NO_FPU),%edx
920 je 4f
921 cmpl $(T_FPU_FAULT),%edx
922 je 4f
923 cmpl $(T_FLOATING_POINT_ERROR),%edx
924 je 4f
925 cmpl $(T_PREEMPT),%edx
926 jne 7f
9274:
928#endif /* MACH_KDB */
929
930 CPU_NUMBER(%edx) /* get CPU number */
931 cmpl CX(EXT(kernel_stack),%edx),%esp
932 /* if not already on kernel stack, */
933 ja 5f /* check some more */
934 cmpl CX(EXT(active_stacks),%edx),%esp
935 ja 6f /* on kernel stack: no switch */
9365:
937 movl CX(EXT(kernel_stack),%edx),%esp
9386:
939 pushl %ebx /* save old stack */
940 pushl %ebx /* pass as parameter */
941 call EXT(kernel_trap) /* to kernel trap routine */
942 addl $4,%esp /* pop parameter */
943 testl %eax,%eax
944 jne 8f
945 /*
946 * If kernel_trap returns false, trap wasn't handled.
947 */
9487:
949#if MACH_KDB
950 CPU_NUMBER(%edx)
951 movl CX(EXT(db_stacks),%edx),%esp
952 pushl %ebx /* pass old stack as parameter */
953 call EXT(db_trap_from_asm)
954#endif /* MACH_KDB */
955#if MACH_KGDB
956 cli /* disable interrupts */
957 CPU_NUMBER(%edx) /* get CPU number */
958 movl CX(EXT(kgdb_stacks),%edx),%esp
959 pushl %ebx /* pass old stack as parameter */
960 call EXT(kgdb_from_kernel)
961#endif /* MACH_KGDB */
962 addl $4,%esp /* pop parameter */
963 testl %eax,%eax
964 jne 8f
965 /*
966 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
967 * wasn't handled.
968 */
969 pushl %ebx /* pass old stack as parameter */
970 call EXT(panic_trap)
971 addl $4,%esp /* pop parameter */
9728:
973 movl %ebx,%esp /* get old stack (from callee-saves reg) */
974#else /* MACH_KDB || MACH_KGDB */
975 pushl %esp /* pass parameter */
976 call EXT(kernel_trap) /* to kernel trap routine */
977 addl $4,%esp /* pop parameter */
978#endif /* MACH_KDB || MACH_KGDB */
979
980#if MACH_RT
981 CPU_NUMBER(%edx)
982
983 movl CX(EXT(need_ast),%edx),%eax /* get pending asts */
9bccf70c 984 testl $ AST_URGENT,%eax /* any urgent preemption? */
1c79356b
A
985 je EXT(return_from_kernel) /* no, nothing to do */
986 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
987 je EXT(return_from_kernel) /* no, skip it */
9bccf70c 988 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
1c79356b
A
989 jne EXT(return_from_kernel) /* no, nothing to do */
990 movl CX(EXT(kernel_stack),%edx),%eax
991 movl %esp,%ecx
992 xorl %eax,%ecx
993 andl $(-KERNEL_STACK_SIZE),%ecx
994 testl %ecx,%ecx /* are we on the kernel stack? */
995 jne EXT(return_from_kernel) /* no, skip it */
996
997#if PREEMPT_DEBUG_LOG
998 pushl 28(%esp) /* stack pointer */
999 pushl 24+4(%esp) /* frame pointer */
1000 pushl 56+8(%esp) /* stack pointer */
1001 pushl $0f
1002 call EXT(log_thread_action)
1003 addl $16, %esp
1004 .data
10050: String "trap preempt eip"
1006 .text
1007#endif /* PREEMPT_DEBUG_LOG */
1008
1009 pushl $1 /* push preemption flag */
1010 call EXT(i386_astintr) /* take the AST */
1011 addl $4,%esp /* pop preemption flag */
1012#endif /* MACH_RT */
1013
1014 jmp EXT(return_from_kernel)
1015
1016/*
1017 * Called as a function, makes the current thread
1018 * return from the kernel as if from an exception.
1019 */
1020
1021 .globl EXT(thread_exception_return)
1022 .globl EXT(thread_bootstrap_return)
1023LEXT(thread_exception_return)
1024LEXT(thread_bootstrap_return)
1025 movl %esp,%ecx /* get kernel stack */
1026 or $(KERNEL_STACK_SIZE-1),%ecx
1027 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1028 jmp EXT(return_from_trap)
1029
1030Entry(call_continuation)
1031 movl S_ARG0,%eax /* get continuation */
1032 movl %esp,%ecx /* get kernel stack */
1033 or $(KERNEL_STACK_SIZE-1),%ecx
1034 addl $(-3-IKS_SIZE),%ecx
1035 movl %ecx,%esp /* pop the stack */
1036 xorl %ebp,%ebp /* zero frame pointer */
1037 jmp *%eax /* goto continuation */
1038
1039#if 0
1040#define LOG_INTERRUPT(info,msg) \
1041 pushal ; \
1042 pushl msg ; \
1043 pushl info ; \
1044 call EXT(log_thread_action) ; \
1045 add $8,%esp ; \
1046 popal
1047#define CHECK_INTERRUPT_TIME(n) \
1048 pushal ; \
1049 pushl $n ; \
1050 call EXT(check_thread_time) ; \
1051 add $4,%esp ; \
1052 popal
1053#else
1054#define LOG_INTERRUPT(info,msg)
1055#define CHECK_INTERRUPT_TIME(n)
1056#endif
1057
1058imsg_start:
1059 String "interrupt start"
1060imsg_end:
1061 String "interrupt end"
1062
1063/*
1064 * All interrupts enter here.
1065 * old %eax on stack; interrupt number in %eax.
1066 */
1067Entry(all_intrs)
1068 pushl %ecx /* save registers */
1069 pushl %edx
1070 cld /* clear direction flag */
1071
1072 cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
1073 jb int_from_intstack /* if not: */
1074
1075 pushl %ds /* save segment registers */
1076 pushl %es
1077 mov %ss,%dx /* switch to kernel segments */
1078 mov %dx,%ds
1079 mov %dx,%es
9bccf70c 1080 mov $ CPU_DATA,%dx
1c79356b
A
1081 mov %dx,%gs
1082
1083 CPU_NUMBER(%edx)
1084
1085 movl CX(EXT(int_stack_top),%edx),%ecx
9bccf70c 1086 movl 20(%esp),%edx /* get eip */
1c79356b
A
1087 xchgl %ecx,%esp /* switch to interrupt stack */
1088
1089#if STAT_TIME
1090 pushl %ecx /* save pointer to old stack */
1091#else
1092 pushl %ebx /* save %ebx - out of the way */
1093 /* so stack looks the same */
1094 pushl %ecx /* save pointer to old stack */
1095 TIME_INT_ENTRY /* do timing */
1096#endif
1097
9bccf70c
A
1098 pushl %edx /* pass eip to pe_incoming_interrupt */
1099
1c79356b 1100#if MACH_RT
9bccf70c 1101 movl $ CPD_PREEMPTION_LEVEL,%edx
1c79356b
A
1102 incl %gs:(%edx)
1103#endif /* MACH_RT */
1104
9bccf70c 1105 movl $ CPD_INTERRUPT_LEVEL,%edx
1c79356b
A
1106 incl %gs:(%edx)
1107
1108 pushl %eax /* Push trap number */
1109 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
9bccf70c 1110 addl $8,%esp /* Pop trap number and eip */
1c79356b
A
1111
1112 .globl EXT(return_to_iret)
1113LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1114
9bccf70c 1115 movl $ CPD_INTERRUPT_LEVEL,%edx
1c79356b
A
1116 decl %gs:(%edx)
1117
1118#if MACH_RT
9bccf70c 1119 movl $ CPD_PREEMPTION_LEVEL,%edx
1c79356b
A
1120 decl %gs:(%edx)
1121#endif /* MACH_RT */
1122
1123#if STAT_TIME
1124#else
1125 TIME_INT_EXIT /* do timing */
1126 movl 4(%esp),%ebx /* restore the extra reg we saved */
1127#endif
1128
1129 popl %esp /* switch back to old stack */
1130
1131 CPU_NUMBER(%edx)
1132 movl CX(EXT(need_ast),%edx),%eax
1133 testl %eax,%eax /* any pending asts? */
1134 je 1f /* no, nothing to do */
1135 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1136 jnz ast_from_interrupt /* take it */
1137 testb $3,I_CS(%esp) /* user mode, */
1138 jnz ast_from_interrupt /* take it */
1139#ifdef FIXME
1140 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1141 jnb ast_from_interrupt /* take it */
1142#endif
1143
1144#if MACH_RT
1145 cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */
1146 je 1f /* no, skip it */
9bccf70c 1147 movl $ CPD_PREEMPTION_LEVEL,%ecx
1c79356b
A
1148 cmpl $0,%gs:(%ecx) /* preemption masked? */
1149 jne 1f /* yes, skip it */
9bccf70c 1150 testl $ AST_URGENT,%eax /* any urgent requests? */
1c79356b 1151 je 1f /* no, skip it */
9bccf70c 1152 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1c79356b
A
1153 jb 1f /* yes, skip it */
1154 movl CX(EXT(kernel_stack),%edx),%eax
1155 movl %esp,%ecx
1156 xorl %eax,%ecx
1157 andl $(-KERNEL_STACK_SIZE),%ecx
1158 testl %ecx,%ecx /* are we on the kernel stack? */
1159 jne 1f /* no, skip it */
1160
1161/*
1162 * Take an AST from kernel space. We don't need (and don't want)
1163 * to do as much as the case where the interrupt came from user
1164 * space.
1165 */
1166#if PREEMPT_DEBUG_LOG
1167 pushl $0
1168 pushl $0
1169 pushl I_EIP+8(%esp)
1170 pushl $0f
1171 call EXT(log_thread_action)
1172 addl $16, %esp
1173 .data
11740: String "intr preempt eip"
1175 .text
1176#endif /* PREEMPT_DEBUG_LOG */
1177
1178 sti
1179 pushl $1 /* push preemption flag */
1180 call EXT(i386_astintr) /* take the AST */
1181 addl $4,%esp /* pop preemption flag */
1182#endif /* MACH_RT */
1183
11841:
1185 pop %es /* restore segment regs */
1186 pop %ds
1187 pop %edx
1188 pop %ecx
1189 pop %eax
1190 iret /* return to caller */
1191
1192int_from_intstack:
1193#if MACH_RT
9bccf70c 1194 movl $ CPD_PREEMPTION_LEVEL,%edx
1c79356b
A
1195 incl %gs:(%edx)
1196#endif /* MACH_RT */
1197
9bccf70c 1198 movl $ CPD_INTERRUPT_LEVEL,%edx
1c79356b
A
1199 incl %gs:(%edx)
1200
9bccf70c
A
1201 movl 12(%esp),%edx
1202 pushl %edx /* push eip */
1203
1c79356b
A
1204 pushl %eax /* Push trap number */
1205
1206 call EXT(PE_incoming_interrupt)
9bccf70c 1207 addl $4,%esp /* pop eip */
1c79356b
A
1208
1209LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1210
1211 addl $4,%esp /* pop trap number */
1212
9bccf70c 1213 movl $ CPD_INTERRUPT_LEVEL,%edx
1c79356b
A
1214 decl %gs:(%edx)
1215
1216#if MACH_RT
9bccf70c 1217 movl $ CPD_PREEMPTION_LEVEL,%edx
1c79356b
A
1218 decl %gs:(%edx)
1219#endif /* MACH_RT */
1220
1221 pop %edx /* must have been on kernel segs */
1222 pop %ecx
1223 pop %eax /* no ASTs */
1224 iret
1225
1226/*
1227 * Take an AST from an interrupt.
1228 * On PCB stack.
1229 * sp-> es -> edx
1230 * ds -> ecx
1231 * edx -> eax
1232 * ecx -> trapno
1233 * eax -> code
1234 * eip
1235 * cs
1236 * efl
1237 * esp
1238 * ss
1239 */
1240ast_from_interrupt:
1241 pop %es /* restore all registers ... */
1242 pop %ds
1243 popl %edx
1244 popl %ecx
1245 popl %eax
1246 sti /* Reenable interrupts */
1247 pushl $0 /* zero code */
1248 pushl $0 /* zero trap number */
1249 pusha /* save general registers */
1250 push %ds /* save segment registers */
1251 push %es
1252 push %fs
1253 push %gs
1254 mov %ss,%dx /* switch to kernel segments */
1255 mov %dx,%ds
1256 mov %dx,%es
9bccf70c 1257 mov $ CPU_DATA,%dx
1c79356b
A
1258 mov %dx,%gs
1259
1260 /*
1261 * See if we interrupted a kernel-loaded thread executing
1262 * in its own task.
1263 */
1264 CPU_NUMBER(%edx)
1265 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1266 jnz 0f /* user mode trap if so */
1267 testb $3,R_CS(%esp)
1268 jnz 0f /* user mode, back to normal */
1269#ifdef FIXME
1270 cmpl ETEXT_ADDR,R_EIP(%esp)
1271 jb 0f /* not kernel-loaded, back to normal */
1272#endif
1273
1274 /*
1275 * Transfer the current stack frame by hand into the PCB.
1276 */
1277 CAH(afistart)
1278 movl CX(EXT(active_kloaded),%edx),%eax
1279 movl CX(EXT(kernel_stack),%edx),%ebx
1280 xchgl %ebx,%esp
1281 FRAME_STACK_TO_PCB(%eax,%ebx)
1282 CAH(afiend)
1283 TIME_TRAP_UENTRY
1284 jmp 3f
12850:
1286 TIME_TRAP_UENTRY
1287
1288 movl CX(EXT(kernel_stack),%edx),%eax
1289 /* switch to kernel stack */
1290 xchgl %eax,%esp
12913:
1292 pushl %eax
1293 pushl $0 /* push preemption flag */
1294 call EXT(i386_astintr) /* take the AST */
1295 addl $4,%esp /* pop preemption flag */
1296 popl %esp /* back to PCB stack */
1297 jmp EXT(return_from_trap) /* return */
1298
1299#if MACH_KDB || MACH_KGDB
1300/*
1301 * kdb_kintr: enter kdb from keyboard interrupt.
1302 * Chase down the stack frames until we find one whose return
1303 * address is the interrupt handler. At that point, we have:
1304 *
1305 * frame-> saved %ebp
1306 * return address in interrupt handler
1307 * ivect
1308 * saved SPL
1309 * return address == return_to_iret_i
1310 * saved %edx
1311 * saved %ecx
1312 * saved %eax
1313 * saved %eip
1314 * saved %cs
1315 * saved %efl
1316 *
1317 * OR:
1318 * frame-> saved %ebp
1319 * return address in interrupt handler
1320 * ivect
1321 * saved SPL
1322 * return address == return_to_iret
1323 * pointer to save area on old stack
1324 * [ saved %ebx, if accurate timing ]
1325 *
1326 * old stack: saved %es
1327 * saved %ds
1328 * saved %edx
1329 * saved %ecx
1330 * saved %eax
1331 * saved %eip
1332 * saved %cs
1333 * saved %efl
1334 *
1335 * Call kdb, passing it that register save area.
1336 */
1337
1338#if MACH_KGDB
1339Entry(kgdb_kintr)
1340#endif /* MACH_KGDB */
1341#if MACH_KDB
1342Entry(kdb_kintr)
1343#endif /* MACH_KDB */
1344 movl %ebp,%eax /* save caller`s frame pointer */
9bccf70c
A
1345 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1346 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1c79356b
A
1347
13480: cmpl 16(%eax),%ecx /* does this frame return to */
1349 /* interrupt handler (1)? */
1350 je 1f
1351 cmpl $kdb_from_iret,16(%eax)
1352 je 1f
1353 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1354 je 2f /* if not: */
1355 cmpl $kdb_from_iret_i,16(%eax)
1356 je 2f
1357 movl (%eax),%eax /* try next frame */
1358 jmp 0b
1359
13601: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1361 ret
1362
13632: movl $kdb_from_iret_i,16(%eax)
1364 /* returns to interrupt stack */
1365 ret
1366
1367/*
1368 * On return from keyboard interrupt, we will execute
1369 * kdb_from_iret_i
1370 * if returning to an interrupt on the interrupt stack
1371 * kdb_from_iret
1372 * if returning to an interrupt on the user or kernel stack
1373 */
1374kdb_from_iret:
1375 /* save regs in known locations */
1376#if STAT_TIME
1377 pushl %ebx /* caller`s %ebx is in reg */
1378#else
1379 movl 4(%esp),%eax /* get caller`s %ebx */
1380 pushl %eax /* push on stack */
1381#endif
1382 pushl %ebp
1383 pushl %esi
1384 pushl %edi
1385 push %fs
1386 push %gs
1387#if MACH_KGDB
1388 cli
1389 pushl %esp /* pass regs */
1390 call EXT(kgdb_kentry) /* to kgdb */
1391 addl $4,%esp /* pop parameters */
1392#endif /* MACH_KGDB */
1393#if MACH_KDB
1394 pushl %esp /* pass regs */
1395 call EXT(kdb_kentry) /* to kdb */
1396 addl $4,%esp /* pop parameters */
1397#endif /* MACH_KDB */
1398 pop %gs /* restore registers */
1399 pop %fs
1400 popl %edi
1401 popl %esi
1402 popl %ebp
1403#if STAT_TIME
1404 popl %ebx
1405#else
1406 popl %eax
1407 movl %eax,4(%esp)
1408#endif
1409 jmp EXT(return_to_iret) /* normal interrupt return */
1410
1411kdb_from_iret_i: /* on interrupt stack */
1412 pop %edx /* restore saved registers */
1413 pop %ecx
1414 pop %eax
1415 pushl $0 /* zero error code */
1416 pushl $0 /* zero trap number */
1417 pusha /* save general registers */
1418 push %ds /* save segment registers */
1419 push %es
1420 push %fs
1421 push %gs
1422#if MACH_KGDB
1423 cli /* disable interrupts */
1424 CPU_NUMBER(%edx) /* get CPU number */
1425 movl CX(EXT(kgdb_stacks),%edx),%ebx
1426 xchgl %ebx,%esp /* switch to kgdb stack */
1427 pushl %ebx /* pass old sp as an arg */
1428 call EXT(kgdb_from_kernel)
1429 popl %esp /* switch back to interrupt stack */
1430#endif /* MACH_KGDB */
1431#if MACH_KDB
1432 pushl %esp /* pass regs, */
1433 pushl $0 /* code, */
1434 pushl $-1 /* type to kdb */
1435 call EXT(kdb_trap)
1436 addl $12,%esp
1437#endif /* MACH_KDB */
1438 pop %gs /* restore segment registers */
1439 pop %fs
1440 pop %es
1441 pop %ds
1442 popa /* restore general registers */
1443 addl $8,%esp
1444 iret
1445
1446#endif /* MACH_KDB || MACH_KGDB */
1447
1448
1449/*
1450 * Mach RPC enters through a call gate, like a system call.
1451 */
1452
1453Entry(mach_rpc)
1454 pushf /* save flags as soon as possible */
1455 pushl %eax /* save system call number */
1456 pushl $0 /* clear trap number slot */
1457
1458 pusha /* save the general registers */
1459 pushl %ds /* and the segment registers */
1460 pushl %es
1461 pushl %fs
1462 pushl %gs
1463
1464 mov %ss,%dx /* switch to kernel data segment */
1465 mov %dx,%ds
1466 mov %dx,%es
9bccf70c 1467 mov $ CPU_DATA,%dx
1c79356b
A
1468 mov %dx,%gs
1469
1470/*
1471 * Shuffle eflags,eip,cs into proper places
1472 */
1473
1474 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1475 movl R_CS(%esp),%ecx /* eip is in CS slot */
1476 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1477 movl %ecx,R_EIP(%esp) /* fix eip */
1478 movl %edx,R_CS(%esp) /* fix cs */
1479 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1480
1481 CPU_NUMBER(%edx)
1482 TIME_TRAP_UENTRY
1483
1484 negl %eax /* get system call number */
1485 shll $4,%eax /* manual indexing */
1486
1487/*
1488 * Check here for mach_rpc from kernel-loaded task --
1489 * - Note that kernel-loaded task returns via real return.
1490 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1491 * so transfer the stack frame into the PCB explicitly, then
1492 * start running on resulting "PCB stack". We have to set
1493 * up a simulated "uesp" manually, since there's none in the
1494 * frame.
1495 */
1496 cmpl $0,CX(EXT(active_kloaded),%edx)
1497 jz 2f
1498 CAH(mrstart)
1499 movl CX(EXT(active_kloaded),%edx),%ebx
1500 movl CX(EXT(kernel_stack),%edx),%edx
1501 xchgl %edx,%esp
1502
1503 FRAME_STACK_TO_PCB(%ebx,%edx)
1504 CAH(mrend)
1505
1506 CPU_NUMBER(%edx)
1507 jmp 3f
1508
15092:
1510 CPU_NUMBER(%edx)
1511 movl CX(EXT(kernel_stack),%edx),%ebx
1512 /* get current kernel stack */
1513 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1514 /* user registers. */
1515
15163:
1517
1518/*
1519 * Register use on entry:
1520 * eax contains syscall number
1521 * ebx contains user regs pointer
1522 */
1523#undef RPC_TRAP_REGISTERS
1524#ifdef RPC_TRAP_REGISTERS
1525 pushl R_ESI(%ebx)
1526 pushl R_EDI(%ebx)
1527 pushl R_ECX(%ebx)
1528 pushl R_EDX(%ebx)
1529#else
1530 movl EXT(mach_trap_table)(%eax),%ecx
1531 /* get number of arguments */
1532 jecxz 2f /* skip argument copy if none */
1533 movl R_UESP(%ebx),%esi /* get user stack pointer */
1534 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1535 /* and point past last argument */
1536 /* edx holds cpu number from above */
1537 movl CX(EXT(active_kloaded),%edx),%edx
1538 /* point to current thread */
1539 orl %edx,%edx /* if ! kernel-loaded, check addr */
1540 jz 4f /* else */
1541 mov %ds,%dx /* kernel data segment access */
1542 jmp 5f
15434:
1544 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1545 ja mach_call_addr /* address error if not */
9bccf70c 1546 movl $ USER_DS,%edx /* user data segment access */
1c79356b
A
15475:
1548 mov %dx,%fs
1549 movl %esp,%edx /* save kernel ESP for error recovery */
15501:
1551 subl $4,%esi
1552 RECOVERY_SECTION
1553 RECOVER(mach_call_addr_push)
1554 pushl %fs:(%esi) /* push argument on stack */
1555 loop 1b /* loop for all arguments */
1556#endif
1557
1558/*
1559 * Register use on entry:
1560 * eax contains syscall number
1561 * ebx contains user regs pointer
1562 */
15632:
1564 CAH(call_call)
1565 call *EXT(mach_trap_table)+4(%eax)
1566 /* call procedure */
1567 movl %esp,%ecx /* get kernel stack */
1568 or $(KERNEL_STACK_SIZE-1),%ecx
1569 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1570 movl %eax,R_EAX(%esp) /* save return value */
1571 jmp EXT(return_from_trap) /* return to user */
1572
1573
1574/*
1575 * Special system call entry for "int 0x80", which has the "eflags"
1576 * register saved at the right place already.
1577 * Fall back to the common syscall path after saving the registers.
1578 *
1579 * esp -> old eip
1580 * old cs
1581 * old eflags
1582 * old esp if trapped from user
1583 * old ss if trapped from user
1584 *
1585 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1586 */
1587Entry(syscall_int80)
1588 pushl %eax /* save system call number */
1589 pushl $0 /* clear trap number slot */
1590
1591 pusha /* save the general registers */
1592 pushl %ds /* and the segment registers */
1593 pushl %es
1594 pushl %fs
1595 pushl %gs
1596
1597 mov %ss,%dx /* switch to kernel data segment */
1598 mov %dx,%ds
1599 mov %dx,%es
9bccf70c 1600 mov $ CPU_DATA,%dx
1c79356b
A
1601 mov %dx,%gs
1602
1603 jmp syscall_entry_3
1604
1605/*
1606 * System call enters through a call gate. Flags are not saved -
1607 * we must shuffle stack to look like trap save area.
1608 *
1609 * esp-> old eip
1610 * old cs
1611 * old esp
1612 * old ss
1613 *
1614 * eax contains system call number.
1615 *
1616 * NB: below use of CPU_NUMBER assumes that macro will use correct
1617 * correct segment register for any kernel data accesses.
1618 */
1619Entry(syscall)
1620syscall_entry:
1621 pushf /* save flags as soon as possible */
1622syscall_entry_2:
1623 pushl %eax /* save system call number */
1624 pushl $0 /* clear trap number slot */
1625
1626 pusha /* save the general registers */
1627 pushl %ds /* and the segment registers */
1628 pushl %es
1629 pushl %fs
1630 pushl %gs
1631
1632 mov %ss,%dx /* switch to kernel data segment */
1633 mov %dx,%ds
1634 mov %dx,%es
9bccf70c 1635 mov $ CPU_DATA,%dx
1c79356b
A
1636 mov %dx,%gs
1637
1638/*
1639 * Shuffle eflags,eip,cs into proper places
1640 */
1641
1642 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1643 movl R_CS(%esp),%ecx /* eip is in CS slot */
1644 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1645 movl %ecx,R_EIP(%esp) /* fix eip */
1646 movl %edx,R_CS(%esp) /* fix cs */
1647 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1648
1649syscall_entry_3:
1650 CPU_NUMBER(%edx)
1651/*
1652 * Check here for syscall from kernel-loaded task --
1653 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1654 * so transfer the stack frame into the PCB explicitly, then
1655 * start running on resulting "PCB stack". We have to set
1656 * up a simulated "uesp" manually, since there's none in the
1657 * frame.
1658 */
1659 cmpl $0,CX(EXT(active_kloaded),%edx)
1660 jz 0f
1661 CAH(scstart)
1662 movl CX(EXT(active_kloaded),%edx),%ebx
1663 movl CX(EXT(kernel_stack),%edx),%edx
1664 xchgl %edx,%esp
1665 FRAME_STACK_TO_PCB(%ebx,%edx)
1666 CAH(scend)
1667 TIME_TRAP_UENTRY
1668 CPU_NUMBER(%edx)
1669 jmp 1f
1670
16710:
1672 TIME_TRAP_UENTRY
1673
1674 CPU_NUMBER(%edx)
1675 movl CX(EXT(kernel_stack),%edx),%ebx
1676 /* get current kernel stack */
1677 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1678 /* user registers. */
1679 /* user regs pointer already set */
1680
1681/*
1682 * Check for MACH or emulated system call
1683 * Register use (from here till we begin processing call):
1684 * eax contains system call number
1685 * ebx points to user regs
1686 */
16871:
9bccf70c 1688 movl $ CPD_ACTIVE_THREAD,%edx
1c79356b
A
1689 movl %gs:(%edx),%edx /* get active thread */
1690 /* point to current thread */
1691 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1692 movl ACT_TASK(%edx),%edx /* point to task */
1693 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1694 orl %edx,%edx /* if none, */
1695 je syscall_native /* do native system call */
1696 movl %eax,%ecx /* copy system call number */
1697 subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
1698 /* vector table */
1699 jl syscall_native /* too low - native system call */
1700 cmpl DISP_COUNT(%edx),%ecx /* check range */
1701 jnl syscall_native /* too high - native system call */
1702 movl DISP_VECTOR(%edx,%ecx,4),%edx
1703 /* get the emulation vector */
1704 orl %edx,%edx /* emulated system call if not zero */
1705 jnz syscall_emul
1706
1707/*
1708 * Native system call.
1709 * Register use on entry:
1710 * eax contains syscall number
1711 * ebx points to user regs
1712 */
1713syscall_native:
1714 negl %eax /* get system call number */
1715 jl mach_call_range /* out of range if it was positive */
1716
1717 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1718 jg mach_call_range /* error if out of range */
1719 shll $4,%eax /* manual indexing */
1720
1721 movl EXT(mach_trap_table)+4(%eax),%edx
1722 /* get procedure */
9bccf70c
A
1723 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1724 jne do_native_call /* go on with Mach syscall */
1c79356b 1725
9bccf70c 1726 movl $ CPD_ACTIVE_THREAD,%edx
1c79356b
A
1727 movl %gs:(%edx),%edx /* get active thread */
1728 /* point to current thread */
1729 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1730 movl ACT_TASK(%edx),%edx /* point to task */
1731 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1732 orl %edx,%edx /* if it exists, */
9bccf70c 1733 jne do_native_call /* do native system call */
1c79356b
A
1734 shrl $4,%eax /* restore syscall number */
1735 jmp mach_call_range /* try it as a "server" syscall */
1736
1c79356b
A
1737/*
1738 * Register use on entry:
1739 * eax contains syscall number
1740 * ebx contains user regs pointer
1741 */
1742do_native_call:
1743 movl EXT(mach_trap_table)(%eax),%ecx
1744 /* get number of arguments */
1745 jecxz mach_call_call /* skip argument copy if none */
1746 movl R_UESP(%ebx),%esi /* get user stack pointer */
1747 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1748 /* and point past last argument */
1749 CPU_NUMBER(%edx)
1750 movl CX(EXT(active_kloaded),%edx),%edx
1751 /* point to current thread */
1752 orl %edx,%edx /* if kernel-loaded, skip addr check */
1753 jz 0f /* else */
1754 mov %ds,%dx /* kernel data segment access */
1755 jmp 1f
17560:
1757 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1758 ja mach_call_addr /* address error if not */
9bccf70c 1759 movl $ USER_DS,%edx /* user data segment access */
1c79356b
A
17601:
1761 mov %dx,%fs
1762 movl %esp,%edx /* save kernel ESP for error recovery */
17632:
1764 subl $4,%esi
1765 RECOVERY_SECTION
1766 RECOVER(mach_call_addr_push)
1767 pushl %fs:(%esi) /* push argument on stack */
1768 loop 2b /* loop for all arguments */
1769
1770/*
1771 * Register use on entry:
1772 * eax contains syscall number
1773 * ebx contains user regs pointer
1774 */
1775mach_call_call:
1776
1777 CAH(call_call)
1778
1779#if ETAP_EVENT_MONITOR
1780 cmpl $0x200, %eax /* is this mach_msg? */
1781 jz make_syscall /* if yes, don't record event */
1782
1783 pushal /* Otherwise: save registers */
1784 pushl %eax /* push syscall number on stack*/
1785 call EXT(etap_machcall_probe1) /* call event begin probe */
1786 add $4,%esp /* restore stack */
1787 popal /* restore registers */
1788
1789 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1790 pushal
1791 call EXT(etap_machcall_probe2) /* call event end probe */
1792 popal
1793 jmp skip_syscall /* syscall already made */
1794#endif /* ETAP_EVENT_MONITOR */
1795
1796make_syscall:
1797 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1798skip_syscall:
1799
1800 movl %esp,%ecx /* get kernel stack */
1801 or $(KERNEL_STACK_SIZE-1),%ecx
1802 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1803 movl %eax,R_EAX(%esp) /* save return value */
1804 jmp EXT(return_from_trap) /* return to user */
1805
1806/*
1807 * Address out of range. Change to page fault.
1808 * %esi holds failing address.
1809 * Register use on entry:
1810 * ebx contains user regs pointer
1811 */
1812mach_call_addr_push:
1813 movl %edx,%esp /* clean parameters from stack */
1814mach_call_addr:
1815 movl %esi,R_CR2(%ebx) /* set fault address */
1816 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1817 /* set page-fault trap */
1818 movl $(T_PF_USER),R_ERR(%ebx)
1819 /* set error code - read user space */
1820 CAH(call_addr)
1821 jmp EXT(take_trap) /* treat as a trap */
1822
1c79356b
A
1823/*
1824 * System call out of range. Treat as invalid-instruction trap.
1825 * (? general protection?)
1826 * Register use on entry:
1827 * eax contains syscall number
1828 */
1829mach_call_range:
9bccf70c 1830 movl $ CPD_ACTIVE_THREAD,%edx
1c79356b
A
1831 movl %gs:(%edx),%edx /* get active thread */
1832
1833 movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */
1834 movl ACT_TASK(%edx),%edx /* point to task */
1835 movl TASK_EMUL(%edx),%edx /* get emulation vector */
1836 orl %edx,%edx /* if emulator, */
1837 jne EXT(syscall_failed) /* handle as illegal instruction */
1838 /* else generate syscall exception: */
1839 push %eax
1840 movl %esp,%edx
1841 push $1 /* code_cnt = 1 */
1842 push %edx /* exception_type_t (see i/f docky) */
9bccf70c 1843 push $ EXC_SYSCALL
1c79356b
A
1844 CAH(call_range)
1845 call EXT(exception)
1846 /* no return */
1847
1848 .globl EXT(syscall_failed)
1849LEXT(syscall_failed)
1850 movl %esp,%ecx /* get kernel stack */
1851 or $(KERNEL_STACK_SIZE-1),%ecx
1852 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1853 CPU_NUMBER(%edx)
1854 movl CX(EXT(kernel_stack),%edx),%ebx
1855 /* get current kernel stack */
1856 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1857 /* user registers. */
1858 /* user regs pointer already set */
1859
1860 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1861 /* set invalid-operation trap */
1862 movl $0,R_ERR(%ebx) /* clear error code */
1863 CAH(failed)
1864 jmp EXT(take_trap) /* treat as a trap */
1865
1866/*
1867 * User space emulation of system calls.
1868 * edx - user address to handle syscall
1869 *
1870 * User stack will become:
1871 * uesp-> eflags
1872 * eip
1873 * Register use on entry:
1874 * ebx contains user regs pointer
1875 * edx contains emulator vector address
1876 */
1877syscall_emul:
1878 movl R_UESP(%ebx),%edi /* get user stack pointer */
1879 CPU_NUMBER(%eax)
1880 movl CX(EXT(active_kloaded),%eax),%eax
1881 orl %eax,%eax /* if thread not kernel-loaded, */
1882 jz 0f /* do address checks */
1883 subl $8,%edi
1884 mov %ds,%ax /* kernel data segment access */
1885 jmp 1f /* otherwise, skip them */
18860:
1887 cmpl $(VM_MAX_ADDRESS),%edi /* in user space? */
1888 ja syscall_addr /* address error if not */
1889 subl $8,%edi /* push space for new arguments */
1890 cmpl $(VM_MIN_ADDRESS),%edi /* still in user space? */
1891 jb syscall_addr /* error if not */
9bccf70c 1892 movl $ USER_DS,%ax /* user data segment access */
1c79356b
A
18931:
1894 mov %ax,%fs
1895 movl R_EFLAGS(%ebx),%eax /* move flags */
1896 RECOVERY_SECTION
1897 RECOVER(syscall_addr)
1898 movl %eax,%fs:0(%edi) /* to user stack */
1899 movl R_EIP(%ebx),%eax /* move eip */
1900 RECOVERY_SECTION
1901 RECOVER(syscall_addr)
1902 movl %eax,%fs:4(%edi) /* to user stack */
1903 movl %edi,R_UESP(%ebx) /* set new user stack pointer */
1904 movl %edx,R_EIP(%ebx) /* change return address to trap */
1905 movl %ebx,%esp /* back to PCB stack */
1906 CAH(emul)
1907 jmp EXT(return_from_trap) /* return to user */
1908
1909
1910/*
1911 * Address error - address is in %edi.
1912 * Register use on entry:
1913 * ebx contains user regs pointer
1914 */
1915syscall_addr:
1916 movl %edi,R_CR2(%ebx) /* set fault address */
1917 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1918 /* set page-fault trap */
1919 movl $(T_PF_USER),R_ERR(%ebx)
1920 /* set error code - read user space */
1921 CAH(addr)
1922 jmp EXT(take_trap) /* treat as a trap */
1923
1924/*\f*/
1925/*
1926 * Utility routines.
1927 */
1928
1929
1930/*
1931 * Copy from user address space.
1932 * arg0: user address
1933 * arg1: kernel address
1934 * arg2: byte count
1935 */
1936Entry(copyinmsg)
1937ENTRY(copyin)
1938 pushl %esi
1939 pushl %edi /* save registers */
1940
1941 movl 8+S_ARG0,%esi /* get user start address */
1942 movl 8+S_ARG1,%edi /* get kernel destination address */
1943 movl 8+S_ARG2,%edx /* get count */
1944
1945 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1946
9bccf70c 1947 movl $ CPD_ACTIVE_THREAD,%ecx
1c79356b
A
1948 movl %gs:(%ecx),%ecx /* get active thread */
1949 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
1950 movl ACT_MAP(%ecx),%ecx /* get act->map */
1951 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1952 cmpl EXT(kernel_pmap), %ecx
1953 jz 1f
9bccf70c 1954 movl $ USER_DS,%cx /* user data segment access */
1c79356b
A
1955 mov %cx,%ds
19561:
1957 cmpl %esi,%eax
1958 jb copyin_fail /* fail if wrap-around */
1959 cld /* count up */
1960 movl %edx,%ecx /* move by longwords first */
1961 shrl $2,%ecx
1962 RECOVERY_SECTION
1963 RECOVER(copyin_fail)
1964 rep
1965 movsl /* move longwords */
1966 movl %edx,%ecx /* now move remaining bytes */
1967 andl $3,%ecx
1968 RECOVERY_SECTION
1969 RECOVER(copyin_fail)
1970 rep
1971 movsb
1972 xorl %eax,%eax /* return 0 for success */
1973copy_ret:
1974 mov %ss,%di /* restore kernel data segment */
1975 mov %di,%ds
1976
1977 popl %edi /* restore registers */
1978 popl %esi
1979 ret /* and return */
1980
1981copyin_fail:
9bccf70c 1982 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
1983 jmp copy_ret /* pop frame and return */
1984
1985/*
1986 * Copy string from user address space.
1987 * arg0: user address
1988 * arg1: kernel address
1989 * arg2: max byte count
1990 * arg3: actual byte count (OUT)
1991 */
1992Entry(copyinstr)
1993 pushl %esi
1994 pushl %edi /* save registers */
1995
1996 movl 8+S_ARG0,%esi /* get user start address */
1997 movl 8+S_ARG1,%edi /* get kernel destination address */
1998 movl 8+S_ARG2,%edx /* get count */
1999
2000 lea 0(%esi,%edx),%eax /* get user end address + 1 */
2001
9bccf70c 2002 movl $ CPD_ACTIVE_THREAD,%ecx
1c79356b
A
2003 movl %gs:(%ecx),%ecx /* get active thread */
2004 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2005 movl ACT_MAP(%ecx),%ecx /* get act->map */
2006 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2007 cmpl EXT(kernel_pmap), %ecx
2008 jne 0f
2009 mov %ds,%cx /* kernel data segment access */
2010 jmp 1f
20110:
9bccf70c 2012 movl $ USER_DS,%cx /* user data segment access */
1c79356b
A
20131:
2014 mov %cx,%fs
2015 xorl %eax,%eax
2016 cmpl $0,%edx
2017 je 4f
20182:
2019 RECOVERY_SECTION
2020 RECOVER(copystr_fail) /* copy bytes... */
2021 movb %fs:(%esi),%eax
2022 incl %esi
2023 testl %edi,%edi /* if kernel address is ... */
2024 jz 3f /* not NULL */
2025 movb %eax,(%edi) /* copy the byte */
2026 incl %edi
20273:
2028 decl %edx
2029 je 5f /* Zero count.. error out */
2030 cmpl $0,%eax
2031 jne 2b /* .. a NUL found? */
2032 jmp 4f
20335:
9bccf70c 2034 movl $ ENAMETOOLONG,%eax /* String is too long.. */
1c79356b
A
20354:
2036 xorl %eax,%eax /* return zero for success */
2037 movl 8+S_ARG3,%edi /* get OUT len ptr */
2038 cmpl $0,%edi
2039 jz copystr_ret /* if null, just return */
2040 subl 8+S_ARG0,%esi
2041 movl %esi,(%edi) /* else set OUT arg to xfer len */
2042copystr_ret:
2043 popl %edi /* restore registers */
2044 popl %esi
2045 ret /* and return */
2046
2047copystr_fail:
9bccf70c 2048 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
2049 jmp copy_ret /* pop frame and return */
2050
2051/*
2052 * Copy to user address space.
2053 * arg0: kernel address
2054 * arg1: user address
2055 * arg2: byte count
2056 */
2057Entry(copyoutmsg)
2058ENTRY(copyout)
2059 pushl %esi
2060 pushl %edi /* save registers */
2061 pushl %ebx
2062
2063 movl 12+S_ARG0,%esi /* get kernel start address */
2064 movl 12+S_ARG1,%edi /* get user start address */
2065 movl 12+S_ARG2,%edx /* get count */
2066
2067 leal 0(%edi,%edx),%eax /* get user end address + 1 */
2068
9bccf70c 2069 movl $ CPD_ACTIVE_THREAD,%ecx
1c79356b
A
2070 movl %gs:(%ecx),%ecx /* get active thread */
2071 movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */
2072 movl ACT_MAP(%ecx),%ecx /* get act->map */
2073 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
2074 cmpl EXT(kernel_pmap), %ecx
2075 jne 0f
2076 mov %ds,%cx /* else kernel data segment access */
2077 jmp 1f
20780:
9bccf70c 2079 movl $ USER_DS,%cx
1c79356b
A
20801:
2081 mov %cx,%es
2082
2083/*
2084 * Check whether user address space is writable
2085 * before writing to it - hardware is broken.
2086 *
2087 * Skip check if "user" address is really in
2088 * kernel space (i.e., if it's in a kernel-loaded
2089 * task).
2090 *
2091 * Register usage:
2092 * esi/edi source/dest pointers for rep/mov
2093 * ecx counter for rep/mov
2094 * edx counts down from 3rd arg
2095 * eax count of bytes for each (partial) page copy
2096 * ebx shadows edi, used to adjust edx
2097 */
2098 movl %edi,%ebx /* copy edi for syncing up */
2099copyout_retry:
2100 /* if restarting after a partial copy, put edx back in sync, */
2101 addl %ebx,%edx /* edx -= (edi - ebx); */
2102 subl %edi,%edx /
2103 movl %edi,%ebx /* ebx = edi; */
2104
2105 mov %es,%cx
9bccf70c 2106 cmpl $ USER_DS,%cx /* If kernel data segment */
1c79356b
A
2107 jnz 0f /* skip check */
2108
2109 cmpb $(CPUID_FAMILY_386), EXT(cpuid_family)
2110 ja 0f
2111
2112 movl %cr3,%ecx /* point to page directory */
2113#if NCPUS > 1
2114 andl $(~0x7), %ecx /* remove cpu number */
2115#endif /* NCPUS > 1 && AT386 */
2116 movl %edi,%eax /* get page directory bits */
2117 shrl $(PDESHIFT),%eax /* from user address */
2118 movl KERNELBASE(%ecx,%eax,4),%ecx
2119 /* get page directory pointer */
2120 testl $(PTE_V),%ecx /* present? */
2121 jz 0f /* if not, fault is OK */
2122 andl $(PTE_PFN),%ecx /* isolate page frame address */
2123 movl %edi,%eax /* get page table bits */
2124 shrl $(PTESHIFT),%eax
2125 andl $(PTEMASK),%eax /* from user address */
2126 leal KERNELBASE(%ecx,%eax,4),%ecx
2127 /* point to page table entry */
2128 movl (%ecx),%eax /* get it */
2129 testl $(PTE_V),%eax /* present? */
2130 jz 0f /* if not, fault is OK */
2131 testl $(PTE_W),%eax /* writable? */
2132 jnz 0f /* OK if so */
2133/*
2134 * Not writable - must fake a fault. Turn off access to the page.
2135 */
2136 andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
2137 movl %cr3,%eax /* invalidate TLB */
2138 movl %eax,%cr3
21390:
2140/*
2141 * Copy only what fits on the current destination page.
2142 * Check for write-fault again on the next page.
2143 */
2144 leal NBPG(%edi),%eax /* point to */
2145 andl $(-NBPG),%eax /* start of next page */
2146 subl %edi,%eax /* get number of bytes to that point */
2147 cmpl %edx,%eax /* bigger than count? */
2148 jle 1f /* if so, */
2149 movl %edx,%eax /* use count */
21501:
2151 cld /* count up */
2152 movl %eax,%ecx /* move by longwords first */
2153 shrl $2,%ecx
2154 RECOVERY_SECTION
2155 RECOVER(copyout_fail)
2156 RETRY_SECTION
2157 RETRY(copyout_retry)
2158 rep
2159 movsl
2160 movl %eax,%ecx /* now move remaining bytes */
2161 andl $3,%ecx
2162 RECOVERY_SECTION
2163 RECOVER(copyout_fail)
2164 RETRY_SECTION
2165 RETRY(copyout_retry)
2166 rep
2167 movsb /* move */
2168 movl %edi,%ebx /* copy edi for syncing up */
2169 subl %eax,%edx /* and decrement count */
2170 jg copyout_retry /* restart on next page if not done */
2171 xorl %eax,%eax /* return 0 for success */
2172copyout_ret:
2173 mov %ss,%di /* restore kernel segment */
2174 mov %di,%es
2175
2176 popl %ebx
2177 popl %edi /* restore registers */
2178 popl %esi
2179 ret /* and return */
2180
2181copyout_fail:
9bccf70c 2182 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
2183 jmp copyout_ret /* pop frame and return */
2184
2185/*
2186 * FPU routines.
2187 */
2188
2189/*
2190 * Initialize FPU.
2191 */
2192ENTRY(_fninit)
2193 fninit
2194 ret
2195
2196/*
2197 * Read control word
2198 */
2199ENTRY(_fstcw)
2200 pushl %eax /* get stack space */
2201 fstcw (%esp)
2202 popl %eax
2203 ret
2204
2205/*
2206 * Set control word
2207 */
2208ENTRY(_fldcw)
2209 fldcw 4(%esp)
2210 ret
2211
2212/*
2213 * Read status word
2214 */
2215ENTRY(_fnstsw)
2216 xor %eax,%eax /* clear high 16 bits of eax */
2217 fnstsw %ax /* read FP status */
2218 ret
2219
2220/*
2221 * Clear FPU exceptions
2222 */
2223ENTRY(_fnclex)
2224 fnclex
2225 ret
2226
2227/*
2228 * Clear task-switched flag.
2229 */
2230ENTRY(_clts)
2231 clts
2232 ret
2233
2234/*
2235 * Save complete FPU state. Save error for later.
2236 */
2237ENTRY(_fpsave)
2238 movl 4(%esp),%eax /* get save area pointer */
2239 fnsave (%eax) /* save complete state, including */
2240 /* errors */
2241 ret
2242
2243/*
2244 * Restore FPU state.
2245 */
2246ENTRY(_fprestore)
2247 movl 4(%esp),%eax /* get save area pointer */
2248 frstor (%eax) /* restore complete state */
2249 ret
2250
2251/*
2252 * Set cr3
2253 */
2254ENTRY(set_cr3)
2255#if NCPUS > 1
2256 CPU_NUMBER(%eax)
2257 orl 4(%esp), %eax
2258#else /* NCPUS > 1 && AT386 */
2259 movl 4(%esp),%eax /* get new cr3 value */
2260#endif /* NCPUS > 1 && AT386 */
2261 /*
2262 * Don't set PDBR to a new value (hence invalidating the
2263 * "paging cache") if the new value matches the current one.
2264 */
2265 movl %cr3,%edx /* get current cr3 value */
2266 cmpl %eax,%edx
2267 je 0f /* if two are equal, don't set */
2268 movl %eax,%cr3 /* load it (and flush cache) */
22690:
2270 ret
2271
2272/*
2273 * Read cr3
2274 */
2275ENTRY(get_cr3)
2276 movl %cr3,%eax
2277#if NCPUS > 1
2278 andl $(~0x7), %eax /* remove cpu number */
2279#endif /* NCPUS > 1 && AT386 */
2280 ret
2281
2282/*
2283 * Flush TLB
2284 */
2285ENTRY(flush_tlb)
2286 movl %cr3,%eax /* flush tlb by reloading CR3 */
2287 movl %eax,%cr3 /* with itself */
2288 ret
2289
2290/*
2291 * Read cr2
2292 */
2293ENTRY(get_cr2)
2294 movl %cr2,%eax
2295 ret
2296
2297/*
2298 * Read cr4
2299 */
2300ENTRY(get_cr4)
2301 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2302 ret
2303
2304/*
2305 * Write cr4
2306 */
2307ENTRY(set_cr4)
2308 movl 4(%esp), %eax
2309 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2310 ret
2311
2312/*
2313 * Read ldtr
2314 */
2315Entry(get_ldt)
2316 xorl %eax,%eax
2317 sldt %ax
2318 ret
2319
2320/*
2321 * Set ldtr
2322 */
2323Entry(set_ldt)
2324 lldt 4(%esp)
2325 ret
2326
2327/*
2328 * Read task register.
2329 */
2330ENTRY(get_tr)
2331 xorl %eax,%eax
2332 str %ax
2333 ret
2334
2335/*
2336 * Set task register. Also clears busy bit of task descriptor.
2337 */
2338ENTRY(set_tr)
2339 movl S_ARG0,%eax /* get task segment number */
2340 subl $8,%esp /* push space for SGDT */
2341 sgdt 2(%esp) /* store GDT limit and base (linear) */
2342 movl 4(%esp),%edx /* address GDT */
2343 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2344 ltr %ax /* load task register */
2345 addl $8,%esp /* clear stack */
2346 ret /* and return */
2347
2348/*
2349 * Set task-switched flag.
2350 */
2351ENTRY(_setts)
2352 movl %cr0,%eax /* get cr0 */
2353 orl $(CR0_TS),%eax /* or in TS bit */
2354 movl %eax,%cr0 /* set cr0 */
2355 ret
2356
2357/*
2358 * io register must not be used on slaves (no AT bus)
2359 */
2360#define ILL_ON_SLAVE
2361
2362
2363#if MACH_ASSERT
2364
2365#define ARG0 B_ARG0
2366#define ARG1 B_ARG1
2367#define ARG2 B_ARG2
2368#define PUSH_FRAME FRAME
2369#define POP_FRAME EMARF
2370
2371#else /* MACH_ASSERT */
2372
2373#define ARG0 S_ARG0
2374#define ARG1 S_ARG1
2375#define ARG2 S_ARG2
2376#define PUSH_FRAME
2377#define POP_FRAME
2378
2379#endif /* MACH_ASSERT */
2380
2381
2382#if MACH_KDB || MACH_ASSERT
2383
2384/*
2385 * Following routines are also defined as macros in i386/pio.h
2386 * Compile then when MACH_KDB is configured so that they
2387 * can be invoked from the debugger.
2388 */
2389
2390/*
2391 * void outb(unsigned char *io_port,
2392 * unsigned char byte)
2393 *
2394 * Output a byte to an IO port.
2395 */
2396ENTRY(outb)
2397 PUSH_FRAME
2398 ILL_ON_SLAVE
2399 movl ARG0,%edx /* IO port address */
2400 movl ARG1,%eax /* data to output */
2401 outb %al,%dx /* send it out */
2402 POP_FRAME
2403 ret
2404
2405/*
2406 * unsigned char inb(unsigned char *io_port)
2407 *
2408 * Input a byte from an IO port.
2409 */
2410ENTRY(inb)
2411 PUSH_FRAME
2412 ILL_ON_SLAVE
2413 movl ARG0,%edx /* IO port address */
2414 xor %eax,%eax /* clear high bits of register */
2415 inb %dx,%al /* get the byte */
2416 POP_FRAME
2417 ret
2418
2419/*
2420 * void outw(unsigned short *io_port,
2421 * unsigned short word)
2422 *
2423 * Output a word to an IO port.
2424 */
2425ENTRY(outw)
2426 PUSH_FRAME
2427 ILL_ON_SLAVE
2428 movl ARG0,%edx /* IO port address */
2429 movl ARG1,%eax /* data to output */
2430 outw %ax,%dx /* send it out */
2431 POP_FRAME
2432 ret
2433
2434/*
2435 * unsigned short inw(unsigned short *io_port)
2436 *
2437 * Input a word from an IO port.
2438 */
2439ENTRY(inw)
2440 PUSH_FRAME
2441 ILL_ON_SLAVE
2442 movl ARG0,%edx /* IO port address */
2443 xor %eax,%eax /* clear high bits of register */
2444 inw %dx,%ax /* get the word */
2445 POP_FRAME
2446 ret
2447
2448/*
2449 * void outl(unsigned int *io_port,
2450 * unsigned int byte)
2451 *
2452 * Output an int to an IO port.
2453 */
2454ENTRY(outl)
2455 PUSH_FRAME
2456 ILL_ON_SLAVE
2457 movl ARG0,%edx /* IO port address*/
2458 movl ARG1,%eax /* data to output */
2459 outl %eax,%dx /* send it out */
2460 POP_FRAME
2461 ret
2462
2463/*
2464 * unsigned int inl(unsigned int *io_port)
2465 *
2466 * Input an int from an IO port.
2467 */
2468ENTRY(inl)
2469 PUSH_FRAME
2470 ILL_ON_SLAVE
2471 movl ARG0,%edx /* IO port address */
2472 inl %dx,%eax /* get the int */
2473 POP_FRAME
2474 ret
2475
2476#endif /* MACH_KDB || MACH_ASSERT*/
2477
2478/*
2479 * void loutb(unsigned byte *io_port,
2480 * unsigned byte *data,
2481 * unsigned int count)
2482 *
2483 * Output an array of bytes to an IO port.
2484 */
2485ENTRY(loutb)
2486ENTRY(outsb)
2487 PUSH_FRAME
2488 ILL_ON_SLAVE
2489 movl %esi,%eax /* save register */
2490 movl ARG0,%edx /* get io port number */
2491 movl ARG1,%esi /* get data address */
2492 movl ARG2,%ecx /* get count */
2493 cld /* count up */
2494 rep
2495 outsb /* output */
2496 movl %eax,%esi /* restore register */
2497 POP_FRAME
2498 ret
2499
2500
2501/*
2502 * void loutw(unsigned short *io_port,
2503 * unsigned short *data,
2504 * unsigned int count)
2505 *
2506 * Output an array of shorts to an IO port.
2507 */
2508ENTRY(loutw)
2509ENTRY(outsw)
2510 PUSH_FRAME
2511 ILL_ON_SLAVE
2512 movl %esi,%eax /* save register */
2513 movl ARG0,%edx /* get io port number */
2514 movl ARG1,%esi /* get data address */
2515 movl ARG2,%ecx /* get count */
2516 cld /* count up */
2517 rep
2518 outsw /* output */
2519 movl %eax,%esi /* restore register */
2520 POP_FRAME
2521 ret
2522
2523/*
2524 * void loutw(unsigned short io_port,
2525 * unsigned int *data,
2526 * unsigned int count)
2527 *
2528 * Output an array of longs to an IO port.
2529 */
2530ENTRY(loutl)
2531ENTRY(outsl)
2532 PUSH_FRAME
2533 ILL_ON_SLAVE
2534 movl %esi,%eax /* save register */
2535 movl ARG0,%edx /* get io port number */
2536 movl ARG1,%esi /* get data address */
2537 movl ARG2,%ecx /* get count */
2538 cld /* count up */
2539 rep
2540 outsl /* output */
2541 movl %eax,%esi /* restore register */
2542 POP_FRAME
2543 ret
2544
2545
2546/*
2547 * void linb(unsigned char *io_port,
2548 * unsigned char *data,
2549 * unsigned int count)
2550 *
2551 * Input an array of bytes from an IO port.
2552 */
2553ENTRY(linb)
2554ENTRY(insb)
2555 PUSH_FRAME
2556 ILL_ON_SLAVE
2557 movl %edi,%eax /* save register */
2558 movl ARG0,%edx /* get io port number */
2559 movl ARG1,%edi /* get data address */
2560 movl ARG2,%ecx /* get count */
2561 cld /* count up */
2562 rep
2563 insb /* input */
2564 movl %eax,%edi /* restore register */
2565 POP_FRAME
2566 ret
2567
2568
2569/*
2570 * void linw(unsigned short *io_port,
2571 * unsigned short *data,
2572 * unsigned int count)
2573 *
2574 * Input an array of shorts from an IO port.
2575 */
2576ENTRY(linw)
2577ENTRY(insw)
2578 PUSH_FRAME
2579 ILL_ON_SLAVE
2580 movl %edi,%eax /* save register */
2581 movl ARG0,%edx /* get io port number */
2582 movl ARG1,%edi /* get data address */
2583 movl ARG2,%ecx /* get count */
2584 cld /* count up */
2585 rep
2586 insw /* input */
2587 movl %eax,%edi /* restore register */
2588 POP_FRAME
2589 ret
2590
2591
2592/*
2593 * void linl(unsigned short io_port,
2594 * unsigned int *data,
2595 * unsigned int count)
2596 *
2597 * Input an array of longs from an IO port.
2598 */
2599ENTRY(linl)
2600ENTRY(insl)
2601 PUSH_FRAME
2602 ILL_ON_SLAVE
2603 movl %edi,%eax /* save register */
2604 movl ARG0,%edx /* get io port number */
2605 movl ARG1,%edi /* get data address */
2606 movl ARG2,%ecx /* get count */
2607 cld /* count up */
2608 rep
2609 insl /* input */
2610 movl %eax,%edi /* restore register */
2611 POP_FRAME
2612 ret
2613
2614
2615/*
2616 * int inst_fetch(int eip, int cs);
2617 *
2618 * Fetch instruction byte. Return -1 if invalid address.
2619 */
2620 .globl EXT(inst_fetch)
2621LEXT(inst_fetch)
2622 movl S_ARG1, %eax /* get segment */
2623 movw %ax,%fs /* into FS */
2624 movl S_ARG0, %eax /* get offset */
2625 RETRY_SECTION
2626 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2627 RECOVERY_SECTION
2628 RECOVER(EXT(inst_fetch_fault))
2629 movzbl %fs:(%eax),%eax /* load instruction byte */
2630 ret
2631
2632LEXT(inst_fetch_fault)
2633 movl $-1,%eax /* return -1 if error */
2634 ret
2635
2636
2637#if MACH_KDP
2638/*
2639 * kdp_copy_kmem(char *src, char *dst, int count)
2640 *
2641 * Similar to copyin except that both addresses are kernel addresses.
2642 */
2643
2644ENTRY(kdp_copy_kmem)
2645 pushl %esi
2646 pushl %edi /* save registers */
2647
2648 movl 8+S_ARG0,%esi /* get kernel start address */
2649 movl 8+S_ARG1,%edi /* get kernel destination address */
2650
2651 movl 8+S_ARG2,%edx /* get count */
2652
2653 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2654
2655 cmpl %esi,%eax
2656 jb kdp_vm_read_fail /* fail if wrap-around */
2657 cld /* count up */
2658 movl %edx,%ecx /* move by longwords first */
2659 shrl $2,%ecx
2660 RECOVERY_SECTION
2661 RECOVER(kdp_vm_read_fail)
2662 rep
2663 movsl /* move longwords */
2664 movl %edx,%ecx /* now move remaining bytes */
2665 andl $3,%ecx
2666 RECOVERY_SECTION
2667 RECOVER(kdp_vm_read_fail)
2668 rep
2669 movsb
2670kdp_vm_read_done:
2671 movl 8+S_ARG2,%edx /* get count */
2672 subl %ecx,%edx /* Return number of bytes transfered */
2673 movl %edx,%eax
2674
2675 popl %edi /* restore registers */
2676 popl %esi
2677 ret /* and return */
2678
2679kdp_vm_read_fail:
2680 xorl %eax,%eax /* didn't copy a thing. */
2681
2682 popl %edi
2683 popl %esi
2684 ret
2685#endif
2686
2687
2688/*
2689 * Done with recovery and retry tables.
2690 */
2691 RECOVERY_SECTION
2692 RECOVER_TABLE_END
2693 RETRY_SECTION
2694 RETRY_TABLE_END
2695
2696
2697
2698ENTRY(dr6)
2699 movl %db6, %eax
2700 ret
2701
2702/* dr<i>(address, type, len, persistence)
2703 */
2704ENTRY(dr0)
2705 movl S_ARG0, %eax
2706 movl %eax,EXT(dr_addr)
2707 movl %eax, %db0
2708 movl $0, %ecx
2709 jmp 0f
2710ENTRY(dr1)
2711 movl S_ARG0, %eax
2712 movl %eax,EXT(dr_addr)+1*4
2713 movl %eax, %db1
2714 movl $2, %ecx
2715 jmp 0f
2716ENTRY(dr2)
2717 movl S_ARG0, %eax
2718 movl %eax,EXT(dr_addr)+2*4
2719 movl %eax, %db2
2720 movl $4, %ecx
2721 jmp 0f
2722
2723ENTRY(dr3)
2724 movl S_ARG0, %eax
2725 movl %eax,EXT(dr_addr)+3*4
2726 movl %eax, %db3
2727 movl $6, %ecx
2728
27290:
2730 pushl %ebp
2731 movl %esp, %ebp
2732
2733 movl %db7, %edx
2734 movl %edx,EXT(dr_addr)+4*4
2735 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2736 movl %edx,EXT(dr_addr)+5*4
2737 movzbl B_ARG3, %eax
2738 andb $3, %al
2739 shll %cl, %eax
2740 orl %eax, %edx
2741
2742 movzbl B_ARG1, %eax
2743 andb $3, %al
2744 addb $0x10, %ecx
2745 shll %cl, %eax
2746 orl %eax, %edx
2747
2748 movzbl B_ARG2, %eax
2749 andb $3, %al
2750 addb $0x2, %ecx
2751 shll %cl, %eax
2752 orl %eax, %edx
2753
2754 movl %edx, %db7
2755 movl %edx,EXT(dr_addr)+7*4
2756 movl %edx, %eax
2757 leave
2758 ret
2759
2760 .data
2761
2762DATA(preemptable) /* Not on an MP (makes cpu_number() usage unsafe) */
2763#if MACH_RT && (NCPUS == 1)
2764 .long 0 /* FIXME -- Currently disabled */
2765#else
2766 .long 0 /* FIX ME -- Currently disabled */
2767#endif /* MACH_RT && (NCPUS == 1) */
2768
2769dr_msk:
2770 .long ~0x000f0003
2771 .long ~0x00f0000c
2772 .long ~0x0f000030
2773 .long ~0xf00000c0
2774ENTRY(dr_addr)
2775 .long 0,0,0,0
2776 .long 0,0,0,0
2777 .text
2778
de355530
A
2779/*
2780 * Determine cpu model and set global cpuid_xxx variables
2781 *
2782 * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it.
2783 * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it.
2784 * Relies on CPUID instruction for next x86 generations
2785 * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array)
2786 */
2787
2788ENTRY(set_cpu_model)
2789 FRAME
2790 pushl %ebx /* save ebx */
2791 andl $~0x3,%esp /* Align stack to avoid AC fault */
2792 pushfl /* push EFLAGS */
2793 popl %eax /* pop into eax */
2794 movl %eax,%ecx /* Save original EFLAGS */
2795 xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */
2796 pushl %eax /* push new value */
2797 popfl /* through the EFLAGS register */
2798 pushfl /* and back */
2799 popl %eax /* into eax */
2800 movb $(CPUID_FAMILY_386),EXT(cpuid_family)
2801 pushl %ecx /* push original EFLAGS */
2802 popfl /* restore EFLAGS */
2803 xorl %ecx,%eax /* see what changed */
2804 testl $ EFL_AC,%eax /* test AC bit */
2805 jz 0f /* if AC toggled (486 or higher) */
2806
2807 movb $(CPUID_FAMILY_486),EXT(cpuid_family)
2808 testl $ EFL_ID,%eax /* test ID bit */
2809 jz 0f /* if ID toggled use cpuid instruction */
2810
2811 xorl %eax,%eax /* get vendor identification string */
2812 .word 0xA20F /* cpuid instruction */
2813 movl %eax,EXT(cpuid_value) /* Store high value */
2814 movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */
2815 movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */
2816 movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */
2817 movl $1,%eax /* get processor signature */
2818 .word 0xA20F /* cpuid instruction */
2819 movl %edx,EXT(cpuid_feature) /* Store feature flags */
2820 movl %eax,%ecx /* Save original signature */
2821 andb $0xF,%al /* Get Stepping ID */
2822 movb %al,EXT(cpuid_stepping) /* Save Stepping ID */
2823 movl %ecx,%eax /* Get original signature */
2824 shrl $4,%eax /* Shift Stepping ID */
2825 movl %eax,%ecx /* Save original signature */
2826 andb $0xF,%al /* Get Model */
2827 movb %al,EXT(cpuid_model) /* Save Model */
2828 movl %ecx,%eax /* Get original signature */
2829 shrl $4,%eax /* Shift Stepping ID */
2830 movl %eax,%ecx /* Save original signature */
2831 andb $0xF,%al /* Get Family */
2832 movb %al,EXT(cpuid_family) /* Save Family */
2833 movl %ecx,%eax /* Get original signature */
2834 shrl $4,%eax /* Shift Stepping ID */
2835 andb $0x3,%al /* Get Type */
2836 movb %al,EXT(cpuid_type) /* Save Type */
2837
2838 movl EXT(cpuid_value),%eax /* Get high value */
2839 cmpl $2,%eax /* Test if processor configuration */
2840 jle 0f /* is present */
2841 movl $2,%eax /* get processor configuration */
2842 .word 0xA20F /* cpuid instruction */
2843 movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */
2844 movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */
2845 movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */
2846 movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */
28470:
2848 popl %ebx /* restore ebx */
2849 EMARF
2850 ret /* return */
2851
1c79356b
A
2852ENTRY(get_cr0)
2853 movl %cr0, %eax
2854 ret
2855
2856ENTRY(set_cr0)
2857 movl 4(%esp), %eax
2858 movl %eax, %cr0
2859 ret
2860
2861#ifndef SYMMETRY
2862
2863/*
2864 * ffs(mask)
2865 */
2866ENTRY(ffs)
2867 bsfl S_ARG0, %eax
2868 jz 0f
2869 incl %eax
2870 ret
28710: xorl %eax, %eax
2872 ret
2873
2874/*
2875 * cpu_shutdown()
2876 * Force reboot
2877 */
2878
2879null_idtr:
2880 .word 0
2881 .long 0
2882
2883Entry(cpu_shutdown)
2884 lidt null_idtr /* disable the interrupt handler */
2885 xor %ecx,%ecx /* generate a divide by zero */
2886 div %ecx,%eax /* reboot now */
2887 ret /* this will "never" be executed */
2888
2889#endif /* SYMMETRY */
2890
2891
2892/*
2893 * setbit(int bitno, int *s) - set bit in bit string
2894 */
2895ENTRY(setbit)
2896 movl S_ARG0, %ecx /* bit number */
2897 movl S_ARG1, %eax /* address */
2898 btsl %ecx, (%eax) /* set bit */
2899 ret
2900
2901/*
2902 * clrbit(int bitno, int *s) - clear bit in bit string
2903 */
2904ENTRY(clrbit)
2905 movl S_ARG0, %ecx /* bit number */
2906 movl S_ARG1, %eax /* address */
2907 btrl %ecx, (%eax) /* clear bit */
2908 ret
2909
2910/*
2911 * ffsbit(int *s) - find first set bit in bit string
2912 */
2913ENTRY(ffsbit)
2914 movl S_ARG0, %ecx /* address */
2915 movl $0, %edx /* base offset */
29160:
2917 bsfl (%ecx), %eax /* check argument bits */
2918 jnz 1f /* found bit, return */
2919 addl $4, %ecx /* increment address */
2920 addl $32, %edx /* increment offset */
2921 jmp 0b /* try again */
29221:
2923 addl %edx, %eax /* return offset */
2924 ret
2925
2926/*
2927 * testbit(int nr, volatile void *array)
2928 *
2929 * Test to see if the bit is set within the bit string
2930 */
2931
2932ENTRY(testbit)
2933 movl S_ARG0,%eax /* Get the bit to test */
2934 movl S_ARG1,%ecx /* get the array string */
2935 btl %eax,(%ecx)
2936 sbbl %eax,%eax
2937 ret
2938
2939ENTRY(get_pc)
2940 movl 4(%ebp),%eax
2941 ret
2942
2943#if ETAP
2944
2945ENTRY(etap_get_pc)
2946 movl 4(%ebp), %eax /* fetch pc of caller */
2947 ret
2948
2949ENTRY(tvals_to_etap)
2950 movl S_ARG0, %eax
2951 movl $1000000000, %ecx
2952 mull %ecx
2953 addl S_ARG1, %eax
2954 adc $0, %edx
2955 ret
2956
2957/* etap_time_t
2958 * etap_time_sub(etap_time_t stop, etap_time_t start)
2959 *
2960 * 64bit subtract, returns stop - start
2961 */
2962ENTRY(etap_time_sub)
2963 movl S_ARG0, %eax /* stop.low */
2964 movl S_ARG1, %edx /* stop.hi */
2965 subl S_ARG2, %eax /* stop.lo - start.lo */
2966 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2967 ret
2968
2969#endif /* ETAP */
2970
2971#if NCPUS > 1
2972
2973ENTRY(minsecurity)
2974 pushl %ebp
2975 movl %esp,%ebp
2976/*
2977 * jail: set the EIP to "jail" to block a kernel thread.
2978 * Useful to debug synchronization problems on MPs.
2979 */
2980ENTRY(jail)
2981 jmp EXT(jail)
2982
2983#endif /* NCPUS > 1 */
2984
2985/*
2986 * delay(microseconds)
2987 */
2988
2989ENTRY(delay)
2990 movl 4(%esp),%eax
2991 testl %eax, %eax
2992 jle 3f
2993 movl EXT(delaycount), %ecx
29941:
2995 movl %ecx, %edx
29962:
2997 decl %edx
2998 jne 2b
2999 decl %eax
3000 jne 1b
30013:
3002 ret
3003
3004/*
3005 * unsigned int
3006 * div_scale(unsigned int dividend,
3007 * unsigned int divisor,
3008 * unsigned int *scale)
3009 *
3010 * This function returns (dividend << *scale) //divisor where *scale
3011 * is the largest possible value before overflow. This is used in
3012 * computation where precision must be achieved in order to avoid
3013 * floating point usage.
3014 *
3015 * Algorithm:
3016 * *scale = 0;
3017 * while (((dividend >> *scale) >= divisor))
3018 * (*scale)++;
3019 * *scale = 32 - *scale;
3020 * return ((dividend << *scale) / divisor);
3021 */
3022ENTRY(div_scale)
3023 PUSH_FRAME
3024 xorl %ecx, %ecx /* *scale = 0 */
3025 xorl %eax, %eax
3026 movl ARG0, %edx /* get dividend */
30270:
3028 cmpl ARG1, %edx /* if (divisor > dividend) */
3029 jle 1f /* goto 1f */
3030 addl $1, %ecx /* (*scale)++ */
3031 shrdl $1, %edx, %eax /* dividend >> 1 */
3032 shrl $1, %edx /* dividend >> 1 */
3033 jmp 0b /* goto 0b */
30341:
3035 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
3036 movl ARG2, %edx /* get scale */
3037 movl $32, (%edx) /* *scale = 32 */
3038 subl %ecx, (%edx) /* *scale -= %ecx */
3039 POP_FRAME
3040 ret
3041
3042/*
3043 * unsigned int
3044 * mul_scale(unsigned int multiplicand,
3045 * unsigned int multiplier,
3046 * unsigned int *scale)
3047 *
3048 * This function returns ((multiplicand * multiplier) >> *scale) where
3049 * scale is the largest possible value before overflow. This is used in
3050 * computation where precision must be achieved in order to avoid
3051 * floating point usage.
3052 *
3053 * Algorithm:
3054 * *scale = 0;
3055 * while (overflow((multiplicand * multiplier) >> *scale))
3056 * (*scale)++;
3057 * return ((multiplicand * multiplier) >> *scale);
3058 */
3059ENTRY(mul_scale)
3060 PUSH_FRAME
3061 xorl %ecx, %ecx /* *scale = 0 */
3062 movl ARG0, %eax /* get multiplicand */
3063 mull ARG1 /* multiplicand * multiplier */
30640:
3065 cmpl $0, %edx /* if (!overflow()) */
3066 je 1f /* goto 1 */
3067 addl $1, %ecx /* (*scale)++ */
3068 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
3069 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
3070 jmp 0b
30711:
3072 movl ARG2, %edx /* get scale */
3073 movl %ecx, (%edx) /* set *scale */
3074 POP_FRAME
3075 ret
3076
3077#if NCPUS > 1
3078ENTRY(_cpu_number)
3079 CPU_NUMBER(%eax)
3080 ret
3081#endif /* NCPUS > 1 */
3082
3083#ifdef MACH_BSD
3084/*
3085 * BSD System call entry point..
3086 */
3087
3088Entry(trap_unix_syscall)
3089 pushf /* save flags as soon as possible */
3090 pushl %eax /* save system call number */
3091 pushl $0 /* clear trap number slot */
3092
3093 pusha /* save the general registers */
3094 pushl %ds /* and the segment registers */
3095 pushl %es
3096 pushl %fs
3097 pushl %gs
3098
3099 mov %ss,%dx /* switch to kernel data segment */
3100 mov %dx,%ds
3101 mov %dx,%es
9bccf70c 3102 mov $ CPU_DATA,%dx
1c79356b
A
3103 mov %dx,%gs
3104
3105/*
3106 * Shuffle eflags,eip,cs into proper places
3107 */
3108
3109 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3110 movl R_CS(%esp),%ecx /* eip is in CS slot */
3111 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3112 movl %ecx,R_EIP(%esp) /* fix eip */
3113 movl %edx,R_CS(%esp) /* fix cs */
3114 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3115
3116 CPU_NUMBER(%edx)
3117 TIME_TRAP_UENTRY
3118
3119 negl %eax /* get system call number */
3120 shll $4,%eax /* manual indexing */
3121
3122 CPU_NUMBER(%edx)
3123 movl CX(EXT(kernel_stack),%edx),%ebx
3124 /* get current kernel stack */
3125 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3126 /* user registers. */
3127
3128/*
3129 * Register use on entry:
3130 * eax contains syscall number
3131 * ebx contains user regs pointer
3132 */
3133 CAH(call_call)
3134 pushl %ebx /* Push the regs set onto stack */
3135 call EXT(unix_syscall)
3136 popl %ebx
3137 movl %esp,%ecx /* get kernel stack */
3138 or $(KERNEL_STACK_SIZE-1),%ecx
3139 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3140 movl %eax,R_EAX(%esp) /* save return value */
3141 jmp EXT(return_from_trap) /* return to user */
3142
3143/*
3144 * Entry point for machdep system calls..
3145 */
3146
3147Entry(trap_machdep_syscall)
3148 pushf /* save flags as soon as possible */
3149 pushl %eax /* save system call number */
3150 pushl $0 /* clear trap number slot */
3151
3152 pusha /* save the general registers */
3153 pushl %ds /* and the segment registers */
3154 pushl %es
3155 pushl %fs
3156 pushl %gs
3157
3158 mov %ss,%dx /* switch to kernel data segment */
3159 mov %dx,%ds
3160 mov %dx,%es
9bccf70c 3161 mov $ CPU_DATA,%dx
1c79356b
A
3162 mov %dx,%gs
3163
3164/*
3165 * Shuffle eflags,eip,cs into proper places
3166 */
3167
3168 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3169 movl R_CS(%esp),%ecx /* eip is in CS slot */
3170 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3171 movl %ecx,R_EIP(%esp) /* fix eip */
3172 movl %edx,R_CS(%esp) /* fix cs */
3173 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3174
3175 CPU_NUMBER(%edx)
3176 TIME_TRAP_UENTRY
3177
3178 negl %eax /* get system call number */
3179 shll $4,%eax /* manual indexing */
3180
3181 CPU_NUMBER(%edx)
3182 movl CX(EXT(kernel_stack),%edx),%ebx
3183 /* get current kernel stack */
3184 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3185 /* user registers. */
3186
3187/*
3188 * Register use on entry:
3189 * eax contains syscall number
3190 * ebx contains user regs pointer
3191 */
3192 CAH(call_call)
3193 pushl %ebx
3194 call EXT(machdep_syscall)
3195 popl %ebx
3196 movl %esp,%ecx /* get kernel stack */
3197 or $(KERNEL_STACK_SIZE-1),%ecx
3198 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3199 movl %eax,R_EAX(%esp) /* save return value */
3200 jmp EXT(return_from_trap) /* return to user */
3201
3202Entry(trap_mach25_syscall)
3203 pushf /* save flags as soon as possible */
3204 pushl %eax /* save system call number */
3205 pushl $0 /* clear trap number slot */
3206
3207 pusha /* save the general registers */
3208 pushl %ds /* and the segment registers */
3209 pushl %es
3210 pushl %fs
3211 pushl %gs
3212
3213 mov %ss,%dx /* switch to kernel data segment */
3214 mov %dx,%ds
3215 mov %dx,%es
9bccf70c 3216 mov $ CPU_DATA,%dx
1c79356b
A
3217 mov %dx,%gs
3218
3219/*
3220 * Shuffle eflags,eip,cs into proper places
3221 */
3222
3223 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
3224 movl R_CS(%esp),%ecx /* eip is in CS slot */
3225 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
3226 movl %ecx,R_EIP(%esp) /* fix eip */
3227 movl %edx,R_CS(%esp) /* fix cs */
3228 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
3229
3230 CPU_NUMBER(%edx)
3231 TIME_TRAP_UENTRY
3232
3233 negl %eax /* get system call number */
3234 shll $4,%eax /* manual indexing */
3235
3236 CPU_NUMBER(%edx)
3237 movl CX(EXT(kernel_stack),%edx),%ebx
3238 /* get current kernel stack */
3239 xchgl %ebx,%esp /* switch stacks - %ebx points to */
3240 /* user registers. */
3241
3242/*
3243 * Register use on entry:
3244 * eax contains syscall number
3245 * ebx contains user regs pointer
3246 */
3247 CAH(call_call)
3248 pushl %ebx
3249 call EXT(mach25_syscall)
3250 popl %ebx
3251 movl %esp,%ecx /* get kernel stack */
3252 or $(KERNEL_STACK_SIZE-1),%ecx
3253 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
3254 movl %eax,R_EAX(%esp) /* save return value */
3255 jmp EXT(return_from_trap) /* return to user */
3256
3257#endif