]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <platforms.h>
59 #include <mach_kdb.h>
60 #include <mach_kgdb.h>
61 #include <mach_kdp.h>
62 #include <stat_time.h>
63 #include <mach_assert.h>
64
65 #include <sys/errno.h>
66 #include <i386/asm.h>
67 #include <i386/cpuid.h>
68 #include <i386/eflags.h>
69 #include <i386/proc_reg.h>
70 #include <i386/trap.h>
71 #include <assym.s>
72 #include <mach/exception_types.h>
73
74 #include <i386/mp.h>
75
76 #define PREEMPT_DEBUG_LOG 0
77
78
79 /*
80 * PTmap is recursive pagemap at top of virtual address space.
81 * Within PTmap, the page directory can be found (third indirection).
82 */
83 .globl _PTmap,_PTD,_PTDpde
84 .set _PTmap,(PTDPTDI << PDESHIFT)
85 .set _PTD,_PTmap + (PTDPTDI * NBPG)
86 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
87
88 /*
89 * APTmap, APTD is the alternate recursive pagemap.
90 * It's used when modifying another process's page tables.
91 */
92 .globl _APTmap,_APTD,_APTDpde
93 .set _APTmap,(APTDPTDI << PDESHIFT)
94 .set _APTD,_APTmap + (APTDPTDI * NBPG)
95 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
96
97 #if __MACHO__
98 /* Under Mach-O, etext is a variable which contains
99 * the last text address
100 */
101 #define ETEXT_ADDR (EXT(etext))
102 #else
103 /* Under ELF and other non-Mach-O formats, the address of
104 * etext represents the last text address
105 */
106 #define ETEXT_ADDR $ EXT(etext)
107 #endif
108
109 #define CX(addr,reg) addr(,reg,4)
110
111 .text
112 locore_start:
113
114 /*
115 * Fault recovery.
116 */
117
118 #ifdef __MACHO__
119 #define RECOVERY_SECTION .section __VECTORS, __recover
120 #define RETRY_SECTION .section __VECTORS, __retries
121 #else
122 #define RECOVERY_SECTION .text
123 #define RECOVERY_SECTION .text
124 #endif
125
126 #define RECOVER_TABLE_START \
127 .align 2 ; \
128 .globl EXT(recover_table) ;\
129 LEXT(recover_table) ;\
130 .text
131
132 #define RECOVER(addr) \
133 .align 2; \
134 .long 9f ;\
135 .long addr ;\
136 .text ;\
137 9:
138
139 #define RECOVER_TABLE_END \
140 .align 2 ;\
141 .globl EXT(recover_table_end) ;\
142 LEXT(recover_table_end) ;\
143 .text
144
145 /*
146 * Retry table for certain successful faults.
147 */
148 #define RETRY_TABLE_START \
149 .align 3; \
150 .globl EXT(retry_table) ;\
151 LEXT(retry_table) ;\
152 .text
153
154 #define RETRY(addr) \
155 .align 3 ;\
156 .long 9f ;\
157 .long addr ;\
158 .text ;\
159 9:
160
161 #define RETRY_TABLE_END \
162 .align 3; \
163 .globl EXT(retry_table_end) ;\
164 LEXT(retry_table_end) ;\
165 .text
166
167 /*
168 * Allocate recovery and retry tables.
169 */
170 RECOVERY_SECTION
171 RECOVER_TABLE_START
172 RETRY_SECTION
173 RETRY_TABLE_START
174
175 /*
176 * Timing routines.
177 */
178 Entry(timer_update)
179 movl 4(%esp),%ecx
180 movl 8(%esp),%eax
181 movl 12(%esp),%edx
182 movl %eax,TIMER_HIGHCHK(%ecx)
183 movl %edx,TIMER_LOW(%ecx)
184 movl %eax,TIMER_HIGH(%ecx)
185 ret
186
187 Entry(timer_grab)
188 movl 4(%esp),%ecx
189 0: movl TIMER_HIGH(%ecx),%edx
190 movl TIMER_LOW(%ecx),%eax
191 cmpl TIMER_HIGHCHK(%ecx),%edx
192 jne 0b
193 ret
194
195 #if STAT_TIME
196
197 #define TIME_TRAP_UENTRY
198 #define TIME_TRAP_UEXIT
199 #define TIME_INT_ENTRY
200 #define TIME_INT_EXIT
201
202 #else
203 /*
204 * Nanosecond timing.
205 */
206
207 /*
208 * Low 32-bits of nanotime returned in %eax.
209 * Computed from tsc using conversion scale/shift from per-cpu data.
210 * Uses %ecx and %edx.
211 */
212 #define NANOTIME32 \
213 pushl %esi /* save %esi */ ;\
214 movl %gs:CPU_THIS,%esi /* per-cpu data ptr */ ;\
215 addl $(CPU_RTC_NANOTIME),%esi /* esi -> per-cpu nanotime*/ ;\
216 rdtsc /* edx:eax = tsc */ ;\
217 subl RTN_TSC(%esi),%eax /* eax = (tsc - base_tsc) */ ;\
218 mull RTN_SCALE(%esi) /* eax *= scale */ ;\
219 movl RTN_SHIFT(%esi),%ecx /* ecx = shift */ ;\
220 shrdl %cl,%edx,%eax /* edx:eax >> shift */ ;\
221 andb $32,%cl /* shift == 32? */ ;\
222 cmovnel %edx,%eax /* %eax = %edx if so */ ;\
223 addl RTN_NANOS(%esi),%eax /* add base ns */ ;\
224 popl %esi
225
226 /*
227 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
228 */
229 #define TIMER_UPDATE(treg,dreg) \
230 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
231 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
232 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
233 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
234 movl dreg,TIMER_HIGH(treg) /* to high bita */
235
236 /*
237 * Add time delta to old timer and start new.
238 */
239 #define TIMER_EVENT(old,new) \
240 pushl %eax /* must be invariant */ ;\
241 cli /* block interrupts */ ;\
242 NANOTIME32 /* eax low bits nanosecs */ ;\
243 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
244 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
245 movl %eax,%edx /* save timestamp in %edx */ ;\
246 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
247 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
248 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
249 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
250 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
251 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */ ;\
252 sti /* interrupts on */ ;\
253 popl %eax /* must be invariant */
254
255 /*
256 * Update time on user trap entry.
257 * Uses %ecx,%edx.
258 */
259 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
260
261 /*
262 * update time on user trap exit.
263 * Uses %ecx,%edx.
264 */
265 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
266
267 /*
268 * update time on interrupt entry.
269 * Uses %eax,%ecx,%edx.
270 */
271 #define TIME_INT_ENTRY \
272 NANOTIME32 /* eax low bits nanosecs */ ;\
273 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
274 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
275 movl %eax,%edx /* save timestamp in %edx */ ;\
276 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
277 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
278 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
279 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
280 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
281
282 /*
283 * update time on interrupt exit.
284 * Uses %eax, %ecx, %edx.
285 */
286 #define TIME_INT_EXIT \
287 NANOTIME32 /* eax low bits nanosecs */ ;\
288 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
289 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
290 movl %eax,%edx /* save timestamp in %edx */ ;\
291 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
292 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
293 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
294 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
295 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
296
297 #endif /* STAT_TIME */
298
299 /*
300 * Encapsulate the transfer of exception stack frames between a PCB
301 * and a thread stack. Since the whole point of these is to emulate
302 * a call or exception that changes privilege level, both macros
303 * assume that there is no user esp or ss stored in the source
304 * frame (because there was no change of privilege to generate them).
305 */
306
307 /*
308 * Transfer a stack frame from a thread's user stack to its PCB.
309 * We assume the thread and stack addresses have been loaded into
310 * registers (our arguments).
311 *
312 * The macro overwrites edi, esi, ecx and whatever registers hold the
313 * thread and stack addresses (which can't be one of the above three).
314 * The thread address is overwritten with the address of its saved state
315 * (where the frame winds up).
316 *
317 * Must be called on kernel stack.
318 */
319 #define FRAME_STACK_TO_PCB(thread, stkp) ;\
320 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
321 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
322 movl %edi,thread /* save for later */ ;\
323 movl stkp,%esi /* point to start of frame */ ;\
324 movl $ R_UESP,%ecx ;\
325 sarl $2,%ecx /* word count for transfer */ ;\
326 cld /* we`re incrementing */ ;\
327 rep ;\
328 movsl /* transfer the frame */ ;\
329 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
330 movl stkp,R_UESP(thread) /* store in PCB */ ;\
331 movl $0,%ecx ;\
332 mov %ss,%cx /* get current ss */ ;\
333 movl %ecx,R_SS(thread) /* store in PCB */
334
335 /*
336 * Transfer a stack frame from a thread's PCB to the stack pointed
337 * to by the PCB. We assume the thread address has been loaded into
338 * a register (our argument).
339 *
340 * The macro overwrites edi, esi, ecx and whatever register holds the
341 * thread address (which can't be one of the above three). The
342 * thread address is overwritten with the address of its saved state
343 * (where the frame winds up).
344 *
345 * Must be called on kernel stack.
346 */
347 #define FRAME_PCB_TO_STACK(thread) ;\
348 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
349 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
350 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
351 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
352 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
353 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
354 jz 1f /* use kernel data segment */ ;\
355 movl $ USER_DS,%ecx /* else use user data segment */;\
356 mov %cx,%es ;\
357 1: ;\
358 movl $ R_UESP,%ecx ;\
359 subl %ecx,%edi /* derive start of frame */ ;\
360 movl %edi,thread /* save for later */ ;\
361 sarl $2,%ecx /* word count for transfer */ ;\
362 cld /* we`re incrementing */ ;\
363 rep ;\
364 movsl /* transfer the frame */ ;\
365 mov %ss,%cx /* restore kernel segments */ ;\
366 mov %cx,%es
367
368 #undef PDEBUG
369
370 #ifdef PDEBUG
371
372 /*
373 * Traditional, not ANSI.
374 */
375 #define CAH(label) \
376 .data ;\
377 .globl label/**/count ;\
378 label/**/count: ;\
379 .long 0 ;\
380 .globl label/**/limit ;\
381 label/**/limit: ;\
382 .long 0 ;\
383 .text ;\
384 addl $1,%ss:label/**/count ;\
385 cmpl $0,label/**/limit ;\
386 jz label/**/exit ;\
387 pushl %eax ;\
388 label/**/loop: ;\
389 movl %ss:label/**/count,%eax ;\
390 cmpl %eax,%ss:label/**/limit ;\
391 je label/**/loop ;\
392 popl %eax ;\
393 label/**/exit:
394
395 #else /* PDEBUG */
396
397 #define CAH(label)
398
399 #endif /* PDEBUG */
400
401 #if MACH_KDB
402 /*
403 * Last-ditch debug code to handle faults that might result
404 * from entering kernel (from collocated server) on an invalid
405 * stack. On collocated entry, there's no hardware-initiated
406 * stack switch, so a valid stack must be in place when an
407 * exception occurs, or we may double-fault.
408 *
409 * In case of a double-fault, our only recourse is to switch
410 * hardware "tasks", so that we avoid using the current stack.
411 *
412 * The idea here is just to get the processor into the debugger,
413 * post-haste. No attempt is made to fix up whatever error got
414 * us here, so presumably continuing from the debugger will
415 * simply land us here again -- at best.
416 */
417 #if 0
418 /*
419 * Note that the per-fault entry points are not currently
420 * functional. The only way to make them work would be to
421 * set up separate TSS's for each fault type, which doesn't
422 * currently seem worthwhile. (The offset part of a task
423 * gate is always ignored.) So all faults that task switch
424 * currently resume at db_task_start.
425 */
426 /*
427 * Double fault (Murphy's point) - error code (0) on stack
428 */
429 Entry(db_task_dbl_fault)
430 popl %eax
431 movl $(T_DOUBLE_FAULT),%ebx
432 jmp db_task_start
433 /*
434 * Segment not present - error code on stack
435 */
436 Entry(db_task_seg_np)
437 popl %eax
438 movl $(T_SEGMENT_NOT_PRESENT),%ebx
439 jmp db_task_start
440 /*
441 * Stack fault - error code on (current) stack
442 */
443 Entry(db_task_stk_fault)
444 popl %eax
445 movl $(T_STACK_FAULT),%ebx
446 jmp db_task_start
447 /*
448 * General protection fault - error code on stack
449 */
450 Entry(db_task_gen_prot)
451 popl %eax
452 movl $(T_GENERAL_PROTECTION),%ebx
453 jmp db_task_start
454 #endif /* 0 */
455 /*
456 * The entry point where execution resumes after last-ditch debugger task
457 * switch.
458 */
459 Entry(db_task_start)
460 movl %esp,%edx
461 subl $ISS_SIZE,%edx
462 movl %edx,%esp /* allocate i386_saved_state on stack */
463 movl %eax,R_ERR(%esp)
464 movl %ebx,R_TRAPNO(%esp)
465 pushl %edx
466 CPU_NUMBER(%edx)
467 movl CX(EXT(mp_dbtss),%edx),%edx
468 movl TSS_LINK(%edx),%eax
469 pushl %eax /* pass along selector of previous TSS */
470 call EXT(db_tss_to_frame)
471 popl %eax /* get rid of TSS selector */
472 call EXT(db_trap_from_asm)
473 addl $0x4,%esp
474 /*
475 * And now...?
476 */
477 iret /* ha, ha, ha... */
478 #endif /* MACH_KDB */
479
480 /*
481 * Trap/interrupt entry points.
482 *
483 * All traps must create the following save area on the PCB "stack":
484 *
485 * gs
486 * fs
487 * es
488 * ds
489 * edi
490 * esi
491 * ebp
492 * cr2 if page fault - otherwise unused
493 * ebx
494 * edx
495 * ecx
496 * eax
497 * trap number
498 * error code
499 * eip
500 * cs
501 * eflags
502 * user esp - if from user
503 * user ss - if from user
504 * es - if from V86 thread
505 * ds - if from V86 thread
506 * fs - if from V86 thread
507 * gs - if from V86 thread
508 *
509 */
510
511 /*
512 * General protection or segment-not-present fault.
513 * Check for a GP/NP fault in the kernel_return
514 * sequence; if there, report it as a GP/NP fault on the user's instruction.
515 *
516 * esp-> 0: trap code (NP or GP)
517 * 4: segment number in error
518 * 8 eip
519 * 12 cs
520 * 16 eflags
521 * 20 old registers (trap is from kernel)
522 */
523 Entry(t_gen_prot)
524 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
525 jmp trap_check_kernel_exit /* check for kernel exit sequence */
526
527 Entry(t_segnp)
528 pushl $(T_SEGMENT_NOT_PRESENT)
529 /* indicate fault type */
530
531 trap_check_kernel_exit:
532 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
533 jnz EXT(alltraps) /* isn`t kernel trap if so */
534 testl $3,12(%esp) /* is trap from kernel mode? */
535 jne EXT(alltraps) /* if so: */
536 /* check for the kernel exit sequence */
537 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
538 je fault_iret
539 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
540 je fault_popl_ds
541 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
542 je fault_popl_es
543 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
544 je fault_popl_fs
545 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
546 je fault_popl_gs
547 take_fault: /* if none of the above: */
548 jmp EXT(alltraps) /* treat as normal trap. */
549
550 /*
551 * GP/NP fault on IRET: CS or SS is in error.
552 * All registers contain the user's values.
553 *
554 * on SP is
555 * 0 trap number
556 * 4 errcode
557 * 8 eip
558 * 12 cs --> trapno
559 * 16 efl --> errcode
560 * 20 user eip
561 * 24 user cs
562 * 28 user eflags
563 * 32 user esp
564 * 36 user ss
565 */
566 fault_iret:
567 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
568 popl %eax /* get trap number */
569 movl %eax,12-4(%esp) /* put in user trap number */
570 popl %eax /* get error code */
571 movl %eax,16-8(%esp) /* put in user errcode */
572 popl %eax /* restore eax */
573 CAH(fltir)
574 jmp EXT(alltraps) /* take fault */
575
576 /*
577 * Fault restoring a segment register. The user's registers are still
578 * saved on the stack. The offending segment register has not been
579 * popped.
580 */
581 fault_popl_ds:
582 popl %eax /* get trap number */
583 popl %edx /* get error code */
584 addl $12,%esp /* pop stack to user regs */
585 jmp push_es /* (DS on top of stack) */
586 fault_popl_es:
587 popl %eax /* get trap number */
588 popl %edx /* get error code */
589 addl $12,%esp /* pop stack to user regs */
590 jmp push_fs /* (ES on top of stack) */
591 fault_popl_fs:
592 popl %eax /* get trap number */
593 popl %edx /* get error code */
594 addl $12,%esp /* pop stack to user regs */
595 jmp push_gs /* (FS on top of stack) */
596 fault_popl_gs:
597 popl %eax /* get trap number */
598 popl %edx /* get error code */
599 addl $12,%esp /* pop stack to user regs */
600 jmp push_segregs /* (GS on top of stack) */
601
602 push_es:
603 pushl %es /* restore es, */
604 push_fs:
605 pushl %fs /* restore fs, */
606 push_gs:
607 pushl %gs /* restore gs. */
608 push_segregs:
609 movl %eax,R_TRAPNO(%esp) /* set trap number */
610 movl %edx,R_ERR(%esp) /* set error code */
611 CAH(fltpp)
612 jmp trap_set_segs /* take trap */
613
614 /*
615 * Debug trap. Check for single-stepping across system call into
616 * kernel. If this is the case, taking the debug trap has turned
617 * off single-stepping - save the flags register with the trace
618 * bit set.
619 */
620 Entry(t_debug)
621 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
622 jnz 0f /* isn`t kernel trap if so */
623 testl $3,4(%esp) /* is trap from kernel mode? */
624 jnz 0f /* if so: */
625 cmpl $syscall_entry,(%esp) /* system call entry? */
626 jne 1f /* if so: */
627 /* flags are sitting where syscall */
628 /* wants them */
629 addl $8,%esp /* remove eip/cs */
630 jmp syscall_entry_2 /* continue system call entry */
631
632 1: cmpl $trap_unix_addr,(%esp)
633 jne 0f
634 addl $8,%esp
635 jmp trap_unix_2
636
637 0: pushl $0 /* otherwise: */
638 pushl $(T_DEBUG) /* handle as normal */
639 jmp EXT(alltraps) /* debug fault */
640
641 /*
642 * Page fault traps save cr2.
643 */
644 Entry(t_page_fault)
645 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
646 pusha /* save the general registers */
647 movl %cr2,%eax /* get the faulting address */
648 movl %eax,12(%esp) /* save in esp save slot */
649 jmp trap_push_segs /* continue fault */
650
651 /*
652 * All 'exceptions' enter here with:
653 * esp-> trap number
654 * error code
655 * old eip
656 * old cs
657 * old eflags
658 * old esp if trapped from user
659 * old ss if trapped from user
660 *
661 * NB: below use of CPU_NUMBER assumes that macro will use correct
662 * segment register for any kernel data accesses.
663 */
664 Entry(alltraps)
665 pusha /* save the general registers */
666 trap_push_segs:
667 pushl %ds /* save the segment registers */
668 pushl %es
669 pushl %fs
670 pushl %gs
671
672 trap_set_segs:
673 movl %ss,%eax
674 movl %eax,%ds
675 movl %eax,%es /* switch to kernel data seg */
676 cld /* clear direction flag */
677 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
678 jnz trap_from_user /* user mode trap if so */
679 testb $3,R_CS(%esp) /* user mode trap? */
680 jnz trap_from_user
681 cmpl $0,%gs:CPU_ACTIVE_KLOADED
682 je trap_from_kernel /* if clear, truly in kernel */
683 #ifdef FIXME
684 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
685 jb trap_from_kernel
686 #endif
687 trap_from_kloaded:
688 /*
689 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
690 * so transfer the stack frame into the PCB explicitly, then
691 * start running on resulting "PCB stack". We have to set
692 * up a simulated "uesp" manually, since there's none in the
693 * frame.
694 */
695 mov $ CPU_DATA_GS,%dx
696 mov %dx,%gs
697 CAH(atstart)
698 movl %gs:CPU_ACTIVE_KLOADED,%ebx
699 movl %gs:CPU_KERNEL_STACK,%eax
700 xchgl %esp,%eax
701 FRAME_STACK_TO_PCB(%ebx,%eax)
702 CAH(atend)
703 jmp EXT(take_trap)
704
705 trap_from_user:
706 mov $ CPU_DATA_GS,%ax
707 mov %ax,%gs
708
709 TIME_TRAP_UENTRY
710
711 movl %gs:CPU_KERNEL_STACK,%ebx
712 xchgl %ebx,%esp /* switch to kernel stack */
713 /* user regs pointer already set */
714 LEXT(take_trap)
715 pushl %ebx /* record register save area */
716 pushl %ebx /* pass register save area to trap */
717 call EXT(user_trap) /* call user trap routine */
718 movl 4(%esp),%esp /* switch back to PCB stack */
719
720 /*
721 * Return from trap or system call, checking for ASTs.
722 * On PCB stack.
723 */
724
725 LEXT(return_from_trap)
726 movl %gs:CPU_PENDING_AST,%edx
727 cmpl $0,%edx
728 je EXT(return_to_user) /* if we need an AST: */
729
730 movl %gs:CPU_KERNEL_STACK,%esp
731 /* switch to kernel stack */
732 pushl $0 /* push preemption flag */
733 call EXT(i386_astintr) /* take the AST */
734 addl $4,%esp /* pop preemption flag */
735 popl %esp /* switch back to PCB stack (w/exc link) */
736 jmp EXT(return_from_trap) /* and check again (rare) */
737 /* ASTs after this point will */
738 /* have to wait */
739
740 /*
741 * Arrange the checks needed for kernel-loaded (or kernel-loading)
742 * threads so that branch is taken in kernel-loaded case.
743 */
744 LEXT(return_to_user)
745 TIME_TRAP_UEXIT
746 cmpl $0,%gs:CPU_ACTIVE_KLOADED
747 jnz EXT(return_xfer_stack)
748 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
749
750 #if MACH_RT
751 #if MACH_ASSERT
752 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
753 je EXT(return_from_kernel)
754 int $3
755 #endif /* MACH_ASSERT */
756 #endif /* MACH_RT */
757
758 /*
759 * Return from kernel mode to interrupted thread.
760 */
761
762 LEXT(return_from_kernel)
763 LEXT(kret_popl_gs)
764 popl %gs /* restore segment registers */
765 LEXT(kret_popl_fs)
766 popl %fs
767 LEXT(kret_popl_es)
768 popl %es
769 LEXT(kret_popl_ds)
770 popl %ds
771 popa /* restore general registers */
772 addl $8,%esp /* discard trap number and error code */
773
774 LEXT(kret_iret)
775 iret /* return from interrupt */
776
777
778 LEXT(return_xfer_stack)
779 /*
780 * If we're on PCB stack in a kernel-loaded task, we have
781 * to transfer saved state back to thread stack and swap
782 * stack pointers here, because the hardware's not going
783 * to do so for us.
784 */
785 CAH(rxsstart)
786 movl %gs:CPU_KERNEL_STACK,%esp
787 movl %gs:CPU_ACTIVE_KLOADED,%eax
788 FRAME_PCB_TO_STACK(%eax)
789 movl %eax,%esp
790 CAH(rxsend)
791 jmp EXT(return_from_kernel)
792
793 /*
794 * Hate to put this here, but setting up a separate swap_func for
795 * kernel-loaded threads no longer works, since thread executes
796 * "for a while" (i.e., until it reaches glue code) when first
797 * created, even if it's nominally suspended. Hence we can't
798 * transfer the PCB when the thread first resumes, because we
799 * haven't initialized it yet.
800 */
801 /*
802 * Have to force transfer to new stack "manually". Use a string
803 * move to transfer all of our saved state to the stack pointed
804 * to by iss.uesp, then install a pointer to it as our current
805 * stack pointer.
806 */
807 LEXT(return_kernel_loading)
808 movl %gs:CPU_KERNEL_STACK,%esp
809 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
810 movl %ebx,%edx /* save for later */
811 FRAME_PCB_TO_STACK(%ebx)
812 movl %ebx,%esp /* start running on new stack */
813 movl $0,%gs:CPU_ACTIVE_KLOADED /* set cached indicator */
814 jmp EXT(return_from_kernel)
815
816 /*
817 * Trap from kernel mode. No need to switch stacks or load segment registers.
818 */
819 trap_from_kernel:
820 #if MACH_KDB || MACH_KGDB
821 mov $ CPU_DATA_GS,%ax
822 mov %ax,%gs
823 movl %esp,%ebx /* save current stack */
824
825 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
826 jb 6f /* OK if so */
827
828 #if MACH_KGDB
829 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
830 je 0f /* no */
831
832 pushl %esp /* Already on kgdb stack */
833 cli
834 call EXT(kgdb_trap)
835 addl $4,%esp
836 jmp EXT(return_from_kernel)
837 0: /* should kgdb handle this exception? */
838 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
839 je 2f /* yes */
840 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
841 je 2f /* yes */
842 1:
843 cli /* disable interrupts */
844 CPU_NUMBER(%edx) /* get CPU number */
845 movl CX(EXT(kgdb_stacks),%edx),%ebx
846 xchgl %ebx,%esp /* switch to kgdb stack */
847 pushl %ebx /* pass old sp as an arg */
848 call EXT(kgdb_from_kernel)
849 popl %esp /* switch back to kernel stack */
850 jmp EXT(return_from_kernel)
851 2:
852 #endif /* MACH_KGDB */
853
854 #if MACH_KDB
855 cmpl $0,EXT(db_active) /* could trap be from ddb? */
856 je 3f /* no */
857 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
858 cmpl $0,CX(EXT(kdb_active),%edx)
859 je 3f /* no */
860 pushl %esp
861 call EXT(db_trap_from_asm)
862 addl $0x4,%esp
863 jmp EXT(return_from_kernel)
864
865 3:
866 /*
867 * Dilemma: don't want to switch to kernel_stack if trap
868 * "belongs" to ddb; don't want to switch to db_stack if
869 * trap "belongs" to kernel. So have to duplicate here the
870 * set of trap types that kernel_trap() handles. Note that
871 * "unexpected" page faults will not be handled by kernel_trap().
872 * In this panic-worthy case, we fall into the debugger with
873 * kernel_stack containing the call chain that led to the
874 * bogus fault.
875 */
876 movl R_TRAPNO(%esp),%edx
877 cmpl $(T_PAGE_FAULT),%edx
878 je 4f
879 cmpl $(T_NO_FPU),%edx
880 je 4f
881 cmpl $(T_FPU_FAULT),%edx
882 je 4f
883 cmpl $(T_FLOATING_POINT_ERROR),%edx
884 je 4f
885 cmpl $(T_PREEMPT),%edx
886 jne 7f
887 4:
888 #endif /* MACH_KDB */
889
890 cmpl %gs:CPU_KERNEL_STACK,%esp
891 /* if not already on kernel stack, */
892 ja 5f /* check some more */
893 cmpl %gs:CPU_ACTIVE_STACK,%esp
894 ja 6f /* on kernel stack: no switch */
895 5:
896 movl %gs:CPU_KERNEL_STACK,%esp
897 6:
898 pushl %ebx /* save old stack */
899 pushl %ebx /* pass as parameter */
900 call EXT(kernel_trap) /* to kernel trap routine */
901 addl $4,%esp /* pop parameter */
902 testl %eax,%eax
903 jne 8f
904 /*
905 * If kernel_trap returns false, trap wasn't handled.
906 */
907 7:
908 #if MACH_KDB
909 CPU_NUMBER(%edx)
910 movl CX(EXT(db_stacks),%edx),%esp
911 pushl %ebx /* pass old stack as parameter */
912 call EXT(db_trap_from_asm)
913 #endif /* MACH_KDB */
914 #if MACH_KGDB
915 cli /* disable interrupts */
916 CPU_NUMBER(%edx) /* get CPU number */
917 movl CX(EXT(kgdb_stacks),%edx),%esp
918 pushl %ebx /* pass old stack as parameter */
919 call EXT(kgdb_from_kernel)
920 #endif /* MACH_KGDB */
921 addl $4,%esp /* pop parameter */
922 testl %eax,%eax
923 jne 8f
924 /*
925 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
926 * wasn't handled.
927 */
928 pushl %ebx /* pass old stack as parameter */
929 call EXT(panic_trap)
930 addl $4,%esp /* pop parameter */
931 8:
932 movl %ebx,%esp /* get old stack (from callee-saves reg) */
933 #else /* MACH_KDB || MACH_KGDB */
934 pushl %esp /* pass parameter */
935 call EXT(kernel_trap) /* to kernel trap routine */
936 addl $4,%esp /* pop parameter */
937 #endif /* MACH_KDB || MACH_KGDB */
938
939 #if MACH_RT
940 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
941 testl $ AST_URGENT,%eax /* any urgent preemption? */
942 je EXT(return_from_kernel) /* no, nothing to do */
943 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
944 jne EXT(return_from_kernel) /* no, nothing to do */
945 movl %gs:CPU_KERNEL_STACK,%eax
946 movl %esp,%ecx
947 xorl %eax,%ecx
948 andl $(-KERNEL_STACK_SIZE),%ecx
949 testl %ecx,%ecx /* are we on the kernel stack? */
950 jne EXT(return_from_kernel) /* no, skip it */
951
952 #if PREEMPT_DEBUG_LOG
953 pushl 28(%esp) /* stack pointer */
954 pushl 24+4(%esp) /* frame pointer */
955 pushl 56+8(%esp) /* stack pointer */
956 pushl $0f
957 call EXT(log_thread_action)
958 addl $16, %esp
959 .data
960 0: String "trap preempt eip"
961 .text
962 #endif /* PREEMPT_DEBUG_LOG */
963
964 pushl $1 /* push preemption flag */
965 call EXT(i386_astintr) /* take the AST */
966 addl $4,%esp /* pop preemption flag */
967 #endif /* MACH_RT */
968
969 jmp EXT(return_from_kernel)
970
971 /*
972 * Called as a function, makes the current thread
973 * return from the kernel as if from an exception.
974 */
975
976 .globl EXT(thread_exception_return)
977 .globl EXT(thread_bootstrap_return)
978 LEXT(thread_exception_return)
979 LEXT(thread_bootstrap_return)
980 movl %esp,%ecx /* get kernel stack */
981 or $(KERNEL_STACK_SIZE-1),%ecx
982 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
983 jmp EXT(return_from_trap)
984
985 Entry(call_continuation)
986 movl S_ARG0,%eax /* get continuation */
987 movl S_ARG1,%edx /* continuation param */
988 movl S_ARG2,%ecx /* wait result */
989 movl %esp,%ebp /* get kernel stack */
990 or $(KERNEL_STACK_SIZE-1),%ebp
991 addl $(-3-IKS_SIZE),%ebp
992 movl %ebp,%esp /* pop the stack */
993 xorl %ebp,%ebp /* zero frame pointer */
994 pushl %ecx
995 pushl %edx
996 call *%eax /* call continuation */
997 addl $8,%esp
998 movl %gs:CPU_ACTIVE_THREAD,%eax
999 pushl %eax
1000 call EXT(thread_terminate)
1001
1002 #if 0
1003 #define LOG_INTERRUPT(info,msg) \
1004 pushal ; \
1005 pushl msg ; \
1006 pushl info ; \
1007 call EXT(log_thread_action) ; \
1008 add $8,%esp ; \
1009 popal
1010 #define CHECK_INTERRUPT_TIME(n) \
1011 pushal ; \
1012 pushl $n ; \
1013 call EXT(check_thread_time) ; \
1014 add $4,%esp ; \
1015 popal
1016 #else
1017 #define LOG_INTERRUPT(info,msg)
1018 #define CHECK_INTERRUPT_TIME(n)
1019 #endif
1020
1021 .data
1022 imsg_start:
1023 String "interrupt start"
1024 imsg_end:
1025 String "interrupt end"
1026
1027 .text
1028 /*
1029 * All interrupts enter here.
1030 * old %eax on stack; interrupt number in %eax.
1031 */
1032 Entry(all_intrs)
1033 pushl %ecx /* save registers */
1034 pushl %edx
1035 cld /* clear direction flag */
1036
1037 pushl %ds /* save segment registers */
1038 pushl %es
1039 pushl %fs
1040 pushl %gs
1041 mov %ss,%dx /* switch to kernel segments */
1042 mov %dx,%ds
1043 mov %dx,%es
1044 mov $ CPU_DATA_GS,%dx
1045 mov %dx,%gs
1046
1047 /*
1048 * test whether already on interrupt stack
1049 */
1050 movl %gs:CPU_INT_STACK_TOP,%ecx
1051 cmpl %esp,%ecx
1052 jb 1f
1053 leal -INTSTACK_SIZE(%ecx),%edx
1054 cmpl %esp,%edx
1055 jb int_from_intstack
1056 1:
1057 movl %esp,%edx /* & i386_interrupt_state */
1058 xchgl %ecx,%esp /* switch to interrupt stack */
1059
1060 pushl %ecx /* save pointer to old stack */
1061 pushl %edx /* pass &i386_interrupt_state to pe_incoming_interrupt */
1062 pushl %eax /* push trap number */
1063
1064 TIME_INT_ENTRY /* do timing */
1065
1066 #if MACH_RT
1067 incl %gs:CPU_PREEMPTION_LEVEL
1068 #endif /* MACH_RT */
1069 incl %gs:CPU_INTERRUPT_LEVEL
1070
1071 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
1072 addl $8,%esp /* Pop trap number and eip */
1073
1074 .globl EXT(return_to_iret)
1075 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1076
1077 decl %gs:CPU_INTERRUPT_LEVEL
1078
1079 #if MACH_RT
1080 decl %gs:CPU_PREEMPTION_LEVEL
1081 #endif /* MACH_RT */
1082
1083 TIME_INT_EXIT /* do timing */
1084
1085 popl %esp /* switch back to old stack */
1086
1087 movl %gs:CPU_PENDING_AST,%eax
1088 testl %eax,%eax /* any pending asts? */
1089 je 1f /* no, nothing to do */
1090 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1091 jnz ast_from_interrupt /* take it */
1092 testb $3,I_CS(%esp) /* user mode, */
1093 jnz ast_from_interrupt /* take it */
1094 #ifdef FIXME
1095 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1096 jnb ast_from_interrupt /* take it */
1097 #endif
1098
1099 #if MACH_RT
1100 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption masked? */
1101 jne 1f /* yes, skip it */
1102 testl $ AST_URGENT,%eax /* any urgent requests? */
1103 je 1f /* no, skip it */
1104 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1105 jb 1f /* yes, skip it */
1106 movl %gs:CPU_KERNEL_STACK,%eax
1107 movl %esp,%ecx
1108 xorl %eax,%ecx
1109 andl $(-KERNEL_STACK_SIZE),%ecx
1110 testl %ecx,%ecx /* are we on the kernel stack? */
1111 jne 1f /* no, skip it */
1112
1113 /*
1114 * Take an AST from kernel space. We don't need (and don't want)
1115 * to do as much as the case where the interrupt came from user
1116 * space.
1117 */
1118 #if PREEMPT_DEBUG_LOG
1119 pushl $0
1120 pushl $0
1121 pushl I_EIP+8(%esp)
1122 pushl $0f
1123 call EXT(log_thread_action)
1124 addl $16, %esp
1125 .data
1126 0: String "intr preempt eip"
1127 .text
1128 #endif /* PREEMPT_DEBUG_LOG */
1129
1130 sti
1131 pushl $1 /* push preemption flag */
1132 call EXT(i386_astintr) /* take the AST */
1133 addl $4,%esp /* pop preemption flag */
1134 #endif /* MACH_RT */
1135
1136 1:
1137 pop %gs
1138 pop %fs
1139 pop %es /* restore segment regs */
1140 pop %ds
1141 pop %edx
1142 pop %ecx
1143 pop %eax
1144 iret /* return to caller */
1145
1146 int_from_intstack:
1147 #if MACH_RT
1148 incl %gs:CPU_PREEMPTION_LEVEL
1149 #endif /* MACH_RT */
1150
1151 incl %gs:CPU_INTERRUPT_LEVEL
1152
1153 movl %esp, %edx /* i386_interrupt_state */
1154 pushl %edx /* pass &i386_interrupt_state to PE_incoming_interrupt /*
1155
1156 pushl %eax /* Push trap number */
1157
1158 call EXT(PE_incoming_interrupt)
1159 addl $20,%esp /* pop i386_interrupt_state, gs,fs,es,ds */
1160
1161 LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1162
1163 addl $4,%esp /* pop trap number */
1164
1165 decl %gs:CPU_INTERRUPT_LEVEL
1166
1167 #if MACH_RT
1168 decl %gs:CPU_PREEMPTION_LEVEL
1169 #endif /* MACH_RT */
1170
1171 pop %edx /* must have been on kernel segs */
1172 pop %ecx
1173 pop %eax /* no ASTs */
1174 iret
1175
1176 /*
1177 * Take an AST from an interrupt.
1178 * On PCB stack.
1179 * sp-> es -> edx
1180 * ds -> ecx
1181 * edx -> eax
1182 * ecx -> trapno
1183 * eax -> code
1184 * eip
1185 * cs
1186 * efl
1187 * esp
1188 * ss
1189 */
1190 ast_from_interrupt:
1191 pop %gs
1192 pop %fs
1193 pop %es /* restore all registers ... */
1194 pop %ds
1195 popl %edx
1196 popl %ecx
1197 popl %eax
1198 sti /* Reenable interrupts */
1199 pushl $0 /* zero code */
1200 pushl $0 /* zero trap number */
1201 pusha /* save general registers */
1202 push %ds /* save segment registers */
1203 push %es
1204 push %fs
1205 push %gs
1206 mov %ss,%dx /* switch to kernel segments */
1207 mov %dx,%ds
1208 mov %dx,%es
1209 mov $ CPU_DATA_GS,%dx
1210 mov %dx,%gs
1211
1212 /*
1213 * See if we interrupted a kernel-loaded thread executing
1214 * in its own task.
1215 */
1216 CPU_NUMBER(%edx)
1217 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1218 jnz 0f /* user mode trap if so */
1219 testb $3,R_CS(%esp)
1220 jnz 0f /* user mode, back to normal */
1221 #ifdef FIXME
1222 cmpl ETEXT_ADDR,R_EIP(%esp)
1223 jb 0f /* not kernel-loaded, back to normal */
1224 #endif
1225
1226 /*
1227 * Transfer the current stack frame by hand into the PCB.
1228 */
1229 CAH(afistart)
1230 movl %gs:CPU_ACTIVE_KLOADED,%eax
1231 movl %gs:CPU_KERNEL_STACK,%ebx
1232 xchgl %ebx,%esp
1233 FRAME_STACK_TO_PCB(%eax,%ebx)
1234 CAH(afiend)
1235 TIME_TRAP_UENTRY
1236 jmp 3f
1237 0:
1238 TIME_TRAP_UENTRY
1239
1240 movl %gs:CPU_KERNEL_STACK,%eax
1241 /* switch to kernel stack */
1242 xchgl %eax,%esp
1243 3:
1244 pushl %eax
1245 pushl $0 /* push preemption flag */
1246 call EXT(i386_astintr) /* take the AST */
1247 addl $4,%esp /* pop preemption flag */
1248 popl %esp /* back to PCB stack */
1249 jmp EXT(return_from_trap) /* return */
1250
1251 #if MACH_KDB || MACH_KGDB
1252 /*
1253 * kdb_kintr: enter kdb from keyboard interrupt.
1254 * Chase down the stack frames until we find one whose return
1255 * address is the interrupt handler. At that point, we have:
1256 *
1257 * frame-> saved %ebp
1258 * return address in interrupt handler
1259 * ivect
1260 * saved SPL
1261 * return address == return_to_iret_i
1262 * saved %edx
1263 * saved %ecx
1264 * saved %eax
1265 * saved %eip
1266 * saved %cs
1267 * saved %efl
1268 *
1269 * OR:
1270 * frame-> saved %ebp
1271 * return address in interrupt handler
1272 * ivect
1273 * saved SPL
1274 * return address == return_to_iret
1275 * pointer to save area on old stack
1276 * [ saved %ebx, if accurate timing ]
1277 *
1278 * old stack: saved %es
1279 * saved %ds
1280 * saved %edx
1281 * saved %ecx
1282 * saved %eax
1283 * saved %eip
1284 * saved %cs
1285 * saved %efl
1286 *
1287 * Call kdb, passing it that register save area.
1288 */
1289
1290 #if MACH_KGDB
1291 Entry(kgdb_kintr)
1292 #endif /* MACH_KGDB */
1293 #if MACH_KDB
1294 Entry(kdb_kintr)
1295 #endif /* MACH_KDB */
1296 movl %ebp,%eax /* save caller`s frame pointer */
1297 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1298 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1299
1300 0: cmpl 16(%eax),%ecx /* does this frame return to */
1301 /* interrupt handler (1)? */
1302 je 1f
1303 cmpl $kdb_from_iret,16(%eax)
1304 je 1f
1305 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1306 je 2f /* if not: */
1307 cmpl $kdb_from_iret_i,16(%eax)
1308 je 2f
1309 movl (%eax),%eax /* try next frame */
1310 jmp 0b
1311
1312 1: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1313 ret
1314
1315 2: movl $kdb_from_iret_i,16(%eax)
1316 /* returns to interrupt stack */
1317 ret
1318
1319 /*
1320 * On return from keyboard interrupt, we will execute
1321 * kdb_from_iret_i
1322 * if returning to an interrupt on the interrupt stack
1323 * kdb_from_iret
1324 * if returning to an interrupt on the user or kernel stack
1325 */
1326 kdb_from_iret:
1327 /* save regs in known locations */
1328 pushl %ebx /* caller`s %ebx is in reg */
1329 pushl %ebp
1330 pushl %esi
1331 pushl %edi
1332 push %fs
1333 push %gs
1334 #if MACH_KGDB
1335 cli
1336 pushl %esp /* pass regs */
1337 call EXT(kgdb_kentry) /* to kgdb */
1338 addl $4,%esp /* pop parameters */
1339 #endif /* MACH_KGDB */
1340 #if MACH_KDB
1341 pushl %esp /* pass regs */
1342 call EXT(kdb_kentry) /* to kdb */
1343 addl $4,%esp /* pop parameters */
1344 #endif /* MACH_KDB */
1345 pop %gs /* restore registers */
1346 pop %fs
1347 popl %edi
1348 popl %esi
1349 popl %ebp
1350 popl %ebx
1351 jmp EXT(return_to_iret) /* normal interrupt return */
1352
1353 kdb_from_iret_i: /* on interrupt stack */
1354 pop %edx /* restore saved registers */
1355 pop %ecx
1356 pop %eax
1357 pushl $0 /* zero error code */
1358 pushl $0 /* zero trap number */
1359 pusha /* save general registers */
1360 push %ds /* save segment registers */
1361 push %es
1362 push %fs
1363 push %gs
1364 #if MACH_KGDB
1365 cli /* disable interrupts */
1366 CPU_NUMBER(%edx) /* get CPU number */
1367 movl CX(EXT(kgdb_stacks),%edx),%ebx
1368 xchgl %ebx,%esp /* switch to kgdb stack */
1369 pushl %ebx /* pass old sp as an arg */
1370 call EXT(kgdb_from_kernel)
1371 popl %esp /* switch back to interrupt stack */
1372 #endif /* MACH_KGDB */
1373 #if MACH_KDB
1374 pushl %esp /* pass regs, */
1375 pushl $0 /* code, */
1376 pushl $-1 /* type to kdb */
1377 call EXT(kdb_trap)
1378 addl $12,%esp
1379 #endif /* MACH_KDB */
1380 pop %gs /* restore segment registers */
1381 pop %fs
1382 pop %es
1383 pop %ds
1384 popa /* restore general registers */
1385 addl $8,%esp
1386 iret
1387
1388 #endif /* MACH_KDB || MACH_KGDB */
1389
1390
1391 /*
1392 * Mach RPC enters through a call gate, like a system call.
1393 */
1394
1395 Entry(mach_rpc)
1396 pushf /* save flags as soon as possible */
1397 pushl %eax /* save system call number */
1398 pushl $0 /* clear trap number slot */
1399
1400 pusha /* save the general registers */
1401 pushl %ds /* and the segment registers */
1402 pushl %es
1403 pushl %fs
1404 pushl %gs
1405
1406 mov %ss,%dx /* switch to kernel data segment */
1407 mov %dx,%ds
1408 mov %dx,%es
1409 mov $ CPU_DATA_GS,%dx
1410 mov %dx,%gs
1411
1412 /*
1413 * Shuffle eflags,eip,cs into proper places
1414 */
1415
1416 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1417 movl R_CS(%esp),%ecx /* eip is in CS slot */
1418 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1419 movl %ecx,R_EIP(%esp) /* fix eip */
1420 movl %edx,R_CS(%esp) /* fix cs */
1421 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1422
1423 TIME_TRAP_UENTRY
1424
1425 negl %eax /* get system call number */
1426 shll $4,%eax /* manual indexing */
1427
1428 /*
1429 * Check here for mach_rpc from kernel-loaded task --
1430 * - Note that kernel-loaded task returns via real return.
1431 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1432 * so transfer the stack frame into the PCB explicitly, then
1433 * start running on resulting "PCB stack". We have to set
1434 * up a simulated "uesp" manually, since there's none in the
1435 * frame.
1436 */
1437 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1438 jz 2f
1439 CAH(mrstart)
1440 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1441 movl %gs:CPU_KERNEL_STACK,%edx
1442 xchgl %edx,%esp
1443
1444 FRAME_STACK_TO_PCB(%ebx,%edx)
1445 CAH(mrend)
1446
1447 jmp 3f
1448
1449 2:
1450 movl %gs:CPU_KERNEL_STACK,%ebx
1451 /* get current kernel stack */
1452 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1453 /* user registers. */
1454
1455 3:
1456
1457 /*
1458 * Register use on entry:
1459 * eax contains syscall number
1460 * ebx contains user regs pointer
1461 */
1462 #undef RPC_TRAP_REGISTERS
1463 #ifdef RPC_TRAP_REGISTERS
1464 pushl R_ESI(%ebx)
1465 pushl R_EDI(%ebx)
1466 pushl R_ECX(%ebx)
1467 pushl R_EDX(%ebx)
1468 #else
1469 movl EXT(mach_trap_table)(%eax),%ecx
1470 /* get number of arguments */
1471 jecxz 2f /* skip argument copy if none */
1472 movl R_UESP(%ebx),%esi /* get user stack pointer */
1473 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1474 /* and point past last argument */
1475 movl %gs:CPU_ACTIVE_KLOADED,%edx
1476 /* point to current thread */
1477 orl %edx,%edx /* if ! kernel-loaded, check addr */
1478 jz 4f /* else */
1479 mov %ds,%dx /* kernel data segment access */
1480 jmp 5f
1481 4:
1482 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1483 ja mach_call_addr /* address error if not */
1484 movl $ USER_DS,%edx /* user data segment access */
1485 5:
1486 mov %dx,%fs
1487 movl %esp,%edx /* save kernel ESP for error recovery */
1488 1:
1489 subl $4,%esi
1490 RECOVERY_SECTION
1491 RECOVER(mach_call_addr_push)
1492 pushl %fs:(%esi) /* push argument on stack */
1493 loop 1b /* loop for all arguments */
1494 #endif
1495
1496 /*
1497 * Register use on entry:
1498 * eax contains syscall number << 4
1499 * mach_call_munger is declared regparm(1), so the first arg is %eax
1500 */
1501 2:
1502
1503 call EXT(mach_call_munger)
1504
1505 movl %esp,%ecx /* get kernel stack */
1506 or $(KERNEL_STACK_SIZE-1),%ecx
1507 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1508 movl %eax,R_EAX(%esp) /* save return value */
1509 jmp EXT(return_from_trap) /* return to user */
1510
1511
1512 /*
1513 * Special system call entry for "int 0x80", which has the "eflags"
1514 * register saved at the right place already.
1515 * Fall back to the common syscall path after saving the registers.
1516 *
1517 * esp -> old eip
1518 * old cs
1519 * old eflags
1520 * old esp if trapped from user
1521 * old ss if trapped from user
1522 *
1523 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1524 */
1525 Entry(syscall_int80)
1526 pushl %eax /* save system call number */
1527 pushl $0 /* clear trap number slot */
1528
1529 pusha /* save the general registers */
1530 pushl %ds /* and the segment registers */
1531 pushl %es
1532 pushl %fs
1533 pushl %gs
1534
1535 mov %ss,%dx /* switch to kernel data segment */
1536 mov %dx,%ds
1537 mov %dx,%es
1538 mov $ CPU_DATA_GS,%dx
1539 mov %dx,%gs
1540
1541 jmp syscall_entry_3
1542
1543 /*
1544 * System call enters through a call gate. Flags are not saved -
1545 * we must shuffle stack to look like trap save area.
1546 *
1547 * esp-> old eip
1548 * old cs
1549 * old esp
1550 * old ss
1551 *
1552 * eax contains system call number.
1553 *
1554 * NB: below use of CPU_NUMBER assumes that macro will use correct
1555 * correct segment register for any kernel data accesses.
1556 */
1557 Entry(syscall)
1558 syscall_entry:
1559 pushf /* save flags as soon as possible */
1560 syscall_entry_2:
1561 pushl %eax /* save system call number */
1562 pushl $0 /* clear trap number slot */
1563
1564 pusha /* save the general registers */
1565 pushl %ds /* and the segment registers */
1566 pushl %es
1567 pushl %fs
1568 pushl %gs
1569
1570 mov %ss,%dx /* switch to kernel data segment */
1571 mov %dx,%ds
1572 mov %dx,%es
1573 mov $ CPU_DATA_GS,%dx
1574 mov %dx,%gs
1575
1576 /*
1577 * Shuffle eflags,eip,cs into proper places
1578 */
1579
1580 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1581 movl R_CS(%esp),%ecx /* eip is in CS slot */
1582 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1583 movl %ecx,R_EIP(%esp) /* fix eip */
1584 movl %edx,R_CS(%esp) /* fix cs */
1585 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1586
1587 syscall_entry_3:
1588 /*
1589 * Check here for syscall from kernel-loaded task --
1590 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1591 * so transfer the stack frame into the PCB explicitly, then
1592 * start running on resulting "PCB stack". We have to set
1593 * up a simulated "uesp" manually, since there's none in the
1594 * frame.
1595 */
1596 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1597 jz 0f
1598 CAH(scstart)
1599 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1600 movl %gs:CPU_KERNEL_STACK,%edx
1601 xchgl %edx,%esp
1602 FRAME_STACK_TO_PCB(%ebx,%edx)
1603 CAH(scend)
1604 TIME_TRAP_UENTRY
1605 jmp 1f
1606
1607 0:
1608 TIME_TRAP_UENTRY
1609
1610 movl %gs:CPU_KERNEL_STACK,%ebx
1611 /* get current kernel stack */
1612 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1613 /* user registers. */
1614 /* user regs pointer already set */
1615
1616 /*
1617 * Native system call.
1618 * Register use on entry:
1619 * eax contains syscall number
1620 * ebx points to user regs
1621 */
1622 1:
1623 negl %eax /* get system call number */
1624 jl mach_call_range /* out of range if it was positive */
1625
1626 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1627 jg mach_call_range /* error if out of range */
1628 shll $4,%eax /* manual indexing */
1629
1630 movl EXT(mach_trap_table)+4(%eax),%edx
1631 /* get procedure */
1632 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1633 jne do_native_call /* go on with Mach syscall */
1634 shrl $4,%eax /* restore syscall number */
1635 jmp mach_call_range /* try it as a "server" syscall */
1636
1637 /*
1638 * Register use on entry:
1639 * eax contains syscall number
1640 * ebx contains user regs pointer
1641 */
1642 do_native_call:
1643 movl EXT(mach_trap_table)(%eax),%ecx
1644 /* get number of arguments */
1645 jecxz mach_call_call /* skip argument copy if none */
1646 movl R_UESP(%ebx),%esi /* get user stack pointer */
1647 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1648 /* and point past last argument */
1649 movl %gs:CPU_ACTIVE_KLOADED,%edx
1650 /* point to current thread */
1651 orl %edx,%edx /* if kernel-loaded, skip addr check */
1652 jz 0f /* else */
1653 mov %ds,%dx /* kernel data segment access */
1654 jmp 1f
1655 0:
1656 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1657 ja mach_call_addr /* address error if not */
1658 movl $ USER_DS,%edx /* user data segment access */
1659 1:
1660 mov %dx,%fs
1661 movl %esp,%edx /* save kernel ESP for error recovery */
1662 2:
1663 subl $4,%esi
1664 RECOVERY_SECTION
1665 RECOVER(mach_call_addr_push)
1666 pushl %fs:(%esi) /* push argument on stack */
1667 loop 2b /* loop for all arguments */
1668
1669 /*
1670 * Register use on entry:
1671 * eax contains syscall number
1672 * ebx contains user regs pointer
1673 */
1674 mach_call_call:
1675
1676 CAH(call_call)
1677
1678 #if ETAP_EVENT_MONITOR
1679 cmpl $0x200, %eax /* is this mach_msg? */
1680 jz make_syscall /* if yes, don't record event */
1681
1682 pushal /* Otherwise: save registers */
1683 pushl %eax /* push syscall number on stack*/
1684 call EXT(etap_machcall_probe1) /* call event begin probe */
1685 add $4,%esp /* restore stack */
1686 popal /* restore registers */
1687
1688 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1689 pushal
1690 call EXT(etap_machcall_probe2) /* call event end probe */
1691 popal
1692 jmp skip_syscall /* syscall already made */
1693 #endif /* ETAP_EVENT_MONITOR */
1694
1695 make_syscall:
1696
1697 /*
1698 * mach_call_munger is declared regparm(1) so the first arg is %eax
1699 */
1700 call EXT(mach_call_munger)
1701
1702 skip_syscall:
1703
1704 movl %esp,%ecx /* get kernel stack */
1705 or $(KERNEL_STACK_SIZE-1),%ecx
1706 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1707 movl %eax,R_EAX(%esp) /* save return value */
1708 jmp EXT(return_from_trap) /* return to user */
1709
1710 /*
1711 * Address out of range. Change to page fault.
1712 * %esi holds failing address.
1713 * Register use on entry:
1714 * ebx contains user regs pointer
1715 */
1716 mach_call_addr_push:
1717 movl %edx,%esp /* clean parameters from stack */
1718 mach_call_addr:
1719 movl %esi,R_CR2(%ebx) /* set fault address */
1720 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1721 /* set page-fault trap */
1722 movl $(T_PF_USER),R_ERR(%ebx)
1723 /* set error code - read user space */
1724 CAH(call_addr)
1725 jmp EXT(take_trap) /* treat as a trap */
1726
1727 /*
1728 * System call out of range. Treat as invalid-instruction trap.
1729 * (? general protection?)
1730 * Register use on entry:
1731 * eax contains syscall number
1732 */
1733 mach_call_range:
1734 push %eax
1735 movl %esp,%edx
1736 push $1 /* code_cnt = 1 */
1737 push %edx /* exception_type_t (see i/f docky) */
1738 push $ EXC_SYSCALL
1739 CAH(call_range)
1740 call EXT(exception_triage)
1741 /* no return */
1742
1743 .globl EXT(syscall_failed)
1744 LEXT(syscall_failed)
1745 movl %esp,%ecx /* get kernel stack */
1746 or $(KERNEL_STACK_SIZE-1),%ecx
1747 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1748 movl %gs:CPU_KERNEL_STACK,%ebx
1749 /* get current kernel stack */
1750 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1751 /* user registers. */
1752 /* user regs pointer already set */
1753
1754 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1755 /* set invalid-operation trap */
1756 movl $0,R_ERR(%ebx) /* clear error code */
1757 CAH(failed)
1758 jmp EXT(take_trap) /* treat as a trap */
1759
1760 /*\f*/
1761 /*
1762 * Utility routines.
1763 */
1764
1765
1766 /*
1767 * Copy from user address space.
1768 * arg0: user address
1769 * arg1: kernel address
1770 * arg2: byte count
1771 */
1772 Entry(copyinmsg)
1773 ENTRY(copyin)
1774 pushl %esi
1775 pushl %edi /* save registers */
1776
1777 movl 8+S_ARG0,%esi /* get user start address */
1778 movl 8+S_ARG1,%edi /* get kernel destination address */
1779 movl 8+S_ARG2,%edx /* get count */
1780
1781 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1782
1783 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1784 movl ACT_MAP(%ecx),%ecx /* get act->map */
1785 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1786 cmpl EXT(kernel_pmap), %ecx
1787 jz 1f
1788 movl $ USER_DS,%ecx /* user data segment access */
1789 mov %cx,%ds
1790 1:
1791 cmpl %esi,%eax
1792 jb copyin_fail /* fail if wrap-around */
1793 cld /* count up */
1794 movl %edx,%ecx /* move by longwords first */
1795 shrl $2,%ecx
1796 RECOVERY_SECTION
1797 RECOVER(copyin_fail)
1798 rep
1799 movsl /* move longwords */
1800 movl %edx,%ecx /* now move remaining bytes */
1801 andl $3,%ecx
1802 RECOVERY_SECTION
1803 RECOVER(copyin_fail)
1804 rep
1805 movsb
1806 xorl %eax,%eax /* return 0 for success */
1807 copy_ret:
1808 mov %ss,%di /* restore kernel data segment */
1809 mov %di,%ds
1810
1811 popl %edi /* restore registers */
1812 popl %esi
1813 ret /* and return */
1814
1815 copyin_fail:
1816 movl $ EFAULT,%eax /* return error for failure */
1817 jmp copy_ret /* pop frame and return */
1818
1819 /*
1820 * Copy string from user address space.
1821 * arg0: user address
1822 * arg1: kernel address
1823 * arg2: max byte count
1824 * arg3: actual byte count (OUT)
1825 */
1826 Entry(copyinstr)
1827 pushl %esi
1828 pushl %edi /* save registers */
1829
1830 movl 8+S_ARG0,%esi /* get user start address */
1831 movl 8+S_ARG1,%edi /* get kernel destination address */
1832 movl 8+S_ARG2,%edx /* get count */
1833
1834 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1835
1836 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1837 movl ACT_MAP(%ecx),%ecx /* get act->map */
1838 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1839 cmpl EXT(kernel_pmap), %ecx
1840 jne 0f
1841 mov %ds,%cx /* kernel data segment access */
1842 jmp 1f
1843 0:
1844 movl $ USER_DS,%ecx /* user data segment access */
1845 1:
1846 mov %cx,%fs
1847 xorl %eax,%eax
1848 cmpl $0,%edx
1849 je 4f
1850 2:
1851 RECOVERY_SECTION
1852 RECOVER(copystr_fail) /* copy bytes... */
1853 movb %fs:(%esi),%al
1854 incl %esi
1855 testl %edi,%edi /* if kernel address is ... */
1856 jz 3f /* not NULL */
1857 movb %al,(%edi) /* copy the byte */
1858 incl %edi
1859 3:
1860 decl %edx
1861 je 5f /* Zero count.. error out */
1862 cmpl $0,%eax
1863 jne 2b /* .. a NUL found? */
1864 jmp 4f /* return zero (%eax) */
1865 5:
1866 movl $ ENAMETOOLONG,%eax /* String is too long.. */
1867 4:
1868 movl 8+S_ARG3,%edi /* get OUT len ptr */
1869 cmpl $0,%edi
1870 jz copystr_ret /* if null, just return */
1871 subl 8+S_ARG0,%esi
1872 movl %esi,(%edi) /* else set OUT arg to xfer len */
1873 copystr_ret:
1874 popl %edi /* restore registers */
1875 popl %esi
1876 ret /* and return */
1877
1878 copystr_fail:
1879 movl $ EFAULT,%eax /* return error for failure */
1880 jmp copy_ret /* pop frame and return */
1881
1882 /*
1883 * Copy to user address space.
1884 * arg0: kernel address
1885 * arg1: user address
1886 * arg2: byte count
1887 */
1888 Entry(copyoutmsg)
1889 ENTRY(copyout)
1890 pushl %esi
1891 pushl %edi /* save registers */
1892 pushl %ebx
1893
1894 movl 12+S_ARG0,%esi /* get kernel start address */
1895 movl 12+S_ARG1,%edi /* get user start address */
1896 movl 12+S_ARG2,%edx /* get count */
1897
1898 leal 0(%edi,%edx),%eax /* get user end address + 1 */
1899
1900 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1901 movl ACT_MAP(%ecx),%ecx /* get act->map */
1902 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1903 cmpl EXT(kernel_pmap), %ecx
1904 jne 0f
1905 mov %ds,%cx /* else kernel data segment access */
1906 jmp 1f
1907 0:
1908 movl $ USER_DS,%ecx
1909 1:
1910 mov %cx,%es
1911
1912 /*
1913 * Check whether user address space is writable
1914 * before writing to it - hardware is broken.
1915 *
1916 * Skip check if "user" address is really in
1917 * kernel space (i.e., if it's in a kernel-loaded
1918 * task).
1919 *
1920 * Register usage:
1921 * esi/edi source/dest pointers for rep/mov
1922 * ecx counter for rep/mov
1923 * edx counts down from 3rd arg
1924 * eax count of bytes for each (partial) page copy
1925 * ebx shadows edi, used to adjust edx
1926 */
1927 movl %edi,%ebx /* copy edi for syncing up */
1928 copyout_retry:
1929 /* if restarting after a partial copy, put edx back in sync, */
1930 addl %ebx,%edx /* edx -= (edi - ebx); */
1931 subl %edi,%edx
1932 movl %edi,%ebx /* ebx = edi; */
1933
1934 /*
1935 * Copy only what fits on the current destination page.
1936 * Check for write-fault again on the next page.
1937 */
1938 leal NBPG(%edi),%eax /* point to */
1939 andl $(-NBPG),%eax /* start of next page */
1940 subl %edi,%eax /* get number of bytes to that point */
1941 cmpl %edx,%eax /* bigger than count? */
1942 jle 1f /* if so, */
1943 movl %edx,%eax /* use count */
1944 1:
1945 cld /* count up */
1946 movl %eax,%ecx /* move by longwords first */
1947 shrl $2,%ecx
1948 RECOVERY_SECTION
1949 RECOVER(copyout_fail)
1950 RETRY_SECTION
1951 RETRY(copyout_retry)
1952 rep
1953 movsl
1954 movl %eax,%ecx /* now move remaining bytes */
1955 andl $3,%ecx
1956 RECOVERY_SECTION
1957 RECOVER(copyout_fail)
1958 RETRY_SECTION
1959 RETRY(copyout_retry)
1960 rep
1961 movsb /* move */
1962 movl %edi,%ebx /* copy edi for syncing up */
1963 subl %eax,%edx /* and decrement count */
1964 jg copyout_retry /* restart on next page if not done */
1965 xorl %eax,%eax /* return 0 for success */
1966 copyout_ret:
1967 mov %ss,%di /* restore kernel segment */
1968 mov %di,%es
1969
1970 popl %ebx
1971 popl %edi /* restore registers */
1972 popl %esi
1973 ret /* and return */
1974
1975 copyout_fail:
1976 movl $ EFAULT,%eax /* return error for failure */
1977 jmp copyout_ret /* pop frame and return */
1978
1979 /*
1980 * FPU routines.
1981 */
1982
1983 /*
1984 * Initialize FPU.
1985 */
1986 ENTRY(_fninit)
1987 fninit
1988 ret
1989
1990 /*
1991 * Read control word
1992 */
1993 ENTRY(_fstcw)
1994 pushl %eax /* get stack space */
1995 fstcw (%esp)
1996 popl %eax
1997 ret
1998
1999 /*
2000 * Set control word
2001 */
2002 ENTRY(_fldcw)
2003 fldcw 4(%esp)
2004 ret
2005
2006 /*
2007 * Read status word
2008 */
2009 ENTRY(_fnstsw)
2010 xor %eax,%eax /* clear high 16 bits of eax */
2011 fnstsw %ax /* read FP status */
2012 ret
2013
2014 /*
2015 * Clear FPU exceptions
2016 */
2017 ENTRY(_fnclex)
2018 fnclex
2019 ret
2020
2021 /*
2022 * Clear task-switched flag.
2023 */
2024 ENTRY(_clts)
2025 clts
2026 ret
2027
2028 /*
2029 * Save complete FPU state. Save error for later.
2030 */
2031 ENTRY(_fpsave)
2032 movl 4(%esp),%eax /* get save area pointer */
2033 fnsave (%eax) /* save complete state, including */
2034 /* errors */
2035 ret
2036
2037 /*
2038 * Restore FPU state.
2039 */
2040 ENTRY(_fprestore)
2041 movl 4(%esp),%eax /* get save area pointer */
2042 frstor (%eax) /* restore complete state */
2043 ret
2044
2045 /*
2046 * Set cr3
2047 */
2048 ENTRY(set_cr3)
2049 CPU_NUMBER(%eax)
2050 orl 4(%esp), %eax
2051 /*
2052 * Don't set PDBR to a new value (hence invalidating the
2053 * "paging cache") if the new value matches the current one.
2054 */
2055 movl %cr3,%edx /* get current cr3 value */
2056 cmpl %eax,%edx
2057 je 0f /* if two are equal, don't set */
2058 movl %eax,%cr3 /* load it (and flush cache) */
2059 0:
2060 ret
2061
2062 /*
2063 * Read cr3
2064 */
2065 ENTRY(get_cr3)
2066 movl %cr3,%eax
2067 andl $(~0x7), %eax /* remove cpu number */
2068 ret
2069
2070 /*
2071 * Flush TLB
2072 */
2073 ENTRY(flush_tlb)
2074 movl %cr3,%eax /* flush tlb by reloading CR3 */
2075 movl %eax,%cr3 /* with itself */
2076 ret
2077
2078 /*
2079 * Read cr2
2080 */
2081 ENTRY(get_cr2)
2082 movl %cr2,%eax
2083 ret
2084
2085 /*
2086 * Read cr4
2087 */
2088 ENTRY(get_cr4)
2089 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2090 ret
2091
2092 /*
2093 * Write cr4
2094 */
2095 ENTRY(set_cr4)
2096 movl 4(%esp), %eax
2097 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2098 ret
2099
2100 /*
2101 * Read ldtr
2102 */
2103 Entry(get_ldt)
2104 xorl %eax,%eax
2105 sldt %ax
2106 ret
2107
2108 /*
2109 * Set ldtr
2110 */
2111 Entry(set_ldt)
2112 lldt 4(%esp)
2113 ret
2114
2115 /*
2116 * Read task register.
2117 */
2118 ENTRY(get_tr)
2119 xorl %eax,%eax
2120 str %ax
2121 ret
2122
2123 /*
2124 * Set task register. Also clears busy bit of task descriptor.
2125 */
2126 ENTRY(set_tr)
2127 movl S_ARG0,%eax /* get task segment number */
2128 subl $8,%esp /* push space for SGDT */
2129 sgdt 2(%esp) /* store GDT limit and base (linear) */
2130 movl 4(%esp),%edx /* address GDT */
2131 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2132 ltr %ax /* load task register */
2133 addl $8,%esp /* clear stack */
2134 ret /* and return */
2135
2136 /*
2137 * Set task-switched flag.
2138 */
2139 ENTRY(_setts)
2140 movl %cr0,%eax /* get cr0 */
2141 orl $(CR0_TS),%eax /* or in TS bit */
2142 movl %eax,%cr0 /* set cr0 */
2143 ret
2144
2145 /*
2146 * io register must not be used on slaves (no AT bus)
2147 */
2148 #define ILL_ON_SLAVE
2149
2150
2151 #if MACH_ASSERT
2152
2153 #define ARG0 B_ARG0
2154 #define ARG1 B_ARG1
2155 #define ARG2 B_ARG2
2156 #define PUSH_FRAME FRAME
2157 #define POP_FRAME EMARF
2158
2159 #else /* MACH_ASSERT */
2160
2161 #define ARG0 S_ARG0
2162 #define ARG1 S_ARG1
2163 #define ARG2 S_ARG2
2164 #define PUSH_FRAME
2165 #define POP_FRAME
2166
2167 #endif /* MACH_ASSERT */
2168
2169
2170 #if MACH_KDB || MACH_ASSERT
2171
2172 /*
2173 * Following routines are also defined as macros in i386/pio.h
2174 * Compile then when MACH_KDB is configured so that they
2175 * can be invoked from the debugger.
2176 */
2177
2178 /*
2179 * void outb(unsigned char *io_port,
2180 * unsigned char byte)
2181 *
2182 * Output a byte to an IO port.
2183 */
2184 ENTRY(outb)
2185 PUSH_FRAME
2186 ILL_ON_SLAVE
2187 movl ARG0,%edx /* IO port address */
2188 movl ARG1,%eax /* data to output */
2189 outb %al,%dx /* send it out */
2190 POP_FRAME
2191 ret
2192
2193 /*
2194 * unsigned char inb(unsigned char *io_port)
2195 *
2196 * Input a byte from an IO port.
2197 */
2198 ENTRY(inb)
2199 PUSH_FRAME
2200 ILL_ON_SLAVE
2201 movl ARG0,%edx /* IO port address */
2202 xor %eax,%eax /* clear high bits of register */
2203 inb %dx,%al /* get the byte */
2204 POP_FRAME
2205 ret
2206
2207 /*
2208 * void outw(unsigned short *io_port,
2209 * unsigned short word)
2210 *
2211 * Output a word to an IO port.
2212 */
2213 ENTRY(outw)
2214 PUSH_FRAME
2215 ILL_ON_SLAVE
2216 movl ARG0,%edx /* IO port address */
2217 movl ARG1,%eax /* data to output */
2218 outw %ax,%dx /* send it out */
2219 POP_FRAME
2220 ret
2221
2222 /*
2223 * unsigned short inw(unsigned short *io_port)
2224 *
2225 * Input a word from an IO port.
2226 */
2227 ENTRY(inw)
2228 PUSH_FRAME
2229 ILL_ON_SLAVE
2230 movl ARG0,%edx /* IO port address */
2231 xor %eax,%eax /* clear high bits of register */
2232 inw %dx,%ax /* get the word */
2233 POP_FRAME
2234 ret
2235
2236 /*
2237 * void outl(unsigned int *io_port,
2238 * unsigned int byte)
2239 *
2240 * Output an int to an IO port.
2241 */
2242 ENTRY(outl)
2243 PUSH_FRAME
2244 ILL_ON_SLAVE
2245 movl ARG0,%edx /* IO port address*/
2246 movl ARG1,%eax /* data to output */
2247 outl %eax,%dx /* send it out */
2248 POP_FRAME
2249 ret
2250
2251 /*
2252 * unsigned int inl(unsigned int *io_port)
2253 *
2254 * Input an int from an IO port.
2255 */
2256 ENTRY(inl)
2257 PUSH_FRAME
2258 ILL_ON_SLAVE
2259 movl ARG0,%edx /* IO port address */
2260 inl %dx,%eax /* get the int */
2261 POP_FRAME
2262 ret
2263
2264 #endif /* MACH_KDB || MACH_ASSERT*/
2265
2266 /*
2267 * void loutb(unsigned byte *io_port,
2268 * unsigned byte *data,
2269 * unsigned int count)
2270 *
2271 * Output an array of bytes to an IO port.
2272 */
2273 ENTRY(loutb)
2274 ENTRY(outsb)
2275 PUSH_FRAME
2276 ILL_ON_SLAVE
2277 movl %esi,%eax /* save register */
2278 movl ARG0,%edx /* get io port number */
2279 movl ARG1,%esi /* get data address */
2280 movl ARG2,%ecx /* get count */
2281 cld /* count up */
2282 rep
2283 outsb /* output */
2284 movl %eax,%esi /* restore register */
2285 POP_FRAME
2286 ret
2287
2288
2289 /*
2290 * void loutw(unsigned short *io_port,
2291 * unsigned short *data,
2292 * unsigned int count)
2293 *
2294 * Output an array of shorts to an IO port.
2295 */
2296 ENTRY(loutw)
2297 ENTRY(outsw)
2298 PUSH_FRAME
2299 ILL_ON_SLAVE
2300 movl %esi,%eax /* save register */
2301 movl ARG0,%edx /* get io port number */
2302 movl ARG1,%esi /* get data address */
2303 movl ARG2,%ecx /* get count */
2304 cld /* count up */
2305 rep
2306 outsw /* output */
2307 movl %eax,%esi /* restore register */
2308 POP_FRAME
2309 ret
2310
2311 /*
2312 * void loutw(unsigned short io_port,
2313 * unsigned int *data,
2314 * unsigned int count)
2315 *
2316 * Output an array of longs to an IO port.
2317 */
2318 ENTRY(loutl)
2319 ENTRY(outsl)
2320 PUSH_FRAME
2321 ILL_ON_SLAVE
2322 movl %esi,%eax /* save register */
2323 movl ARG0,%edx /* get io port number */
2324 movl ARG1,%esi /* get data address */
2325 movl ARG2,%ecx /* get count */
2326 cld /* count up */
2327 rep
2328 outsl /* output */
2329 movl %eax,%esi /* restore register */
2330 POP_FRAME
2331 ret
2332
2333
2334 /*
2335 * void linb(unsigned char *io_port,
2336 * unsigned char *data,
2337 * unsigned int count)
2338 *
2339 * Input an array of bytes from an IO port.
2340 */
2341 ENTRY(linb)
2342 ENTRY(insb)
2343 PUSH_FRAME
2344 ILL_ON_SLAVE
2345 movl %edi,%eax /* save register */
2346 movl ARG0,%edx /* get io port number */
2347 movl ARG1,%edi /* get data address */
2348 movl ARG2,%ecx /* get count */
2349 cld /* count up */
2350 rep
2351 insb /* input */
2352 movl %eax,%edi /* restore register */
2353 POP_FRAME
2354 ret
2355
2356
2357 /*
2358 * void linw(unsigned short *io_port,
2359 * unsigned short *data,
2360 * unsigned int count)
2361 *
2362 * Input an array of shorts from an IO port.
2363 */
2364 ENTRY(linw)
2365 ENTRY(insw)
2366 PUSH_FRAME
2367 ILL_ON_SLAVE
2368 movl %edi,%eax /* save register */
2369 movl ARG0,%edx /* get io port number */
2370 movl ARG1,%edi /* get data address */
2371 movl ARG2,%ecx /* get count */
2372 cld /* count up */
2373 rep
2374 insw /* input */
2375 movl %eax,%edi /* restore register */
2376 POP_FRAME
2377 ret
2378
2379
2380 /*
2381 * void linl(unsigned short io_port,
2382 * unsigned int *data,
2383 * unsigned int count)
2384 *
2385 * Input an array of longs from an IO port.
2386 */
2387 ENTRY(linl)
2388 ENTRY(insl)
2389 PUSH_FRAME
2390 ILL_ON_SLAVE
2391 movl %edi,%eax /* save register */
2392 movl ARG0,%edx /* get io port number */
2393 movl ARG1,%edi /* get data address */
2394 movl ARG2,%ecx /* get count */
2395 cld /* count up */
2396 rep
2397 insl /* input */
2398 movl %eax,%edi /* restore register */
2399 POP_FRAME
2400 ret
2401
2402
2403 /*
2404 * int inst_fetch(int eip, int cs);
2405 *
2406 * Fetch instruction byte. Return -1 if invalid address.
2407 */
2408 .globl EXT(inst_fetch)
2409 LEXT(inst_fetch)
2410 movl S_ARG1, %eax /* get segment */
2411 movw %ax,%fs /* into FS */
2412 movl S_ARG0, %eax /* get offset */
2413 RETRY_SECTION
2414 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2415 RECOVERY_SECTION
2416 RECOVER(EXT(inst_fetch_fault))
2417 movzbl %fs:(%eax),%eax /* load instruction byte */
2418 ret
2419
2420 LEXT(inst_fetch_fault)
2421 movl $-1,%eax /* return -1 if error */
2422 ret
2423
2424
2425 #if MACH_KDP
2426 /*
2427 * kdp_copy_kmem(char *src, char *dst, int count)
2428 *
2429 * Similar to copyin except that both addresses are kernel addresses.
2430 */
2431
2432 ENTRY(kdp_copy_kmem)
2433 pushl %esi
2434 pushl %edi /* save registers */
2435
2436 movl 8+S_ARG0,%esi /* get kernel start address */
2437 movl 8+S_ARG1,%edi /* get kernel destination address */
2438
2439 movl 8+S_ARG2,%edx /* get count */
2440
2441 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2442
2443 cmpl %esi,%eax
2444 jb kdp_vm_read_fail /* fail if wrap-around */
2445 cld /* count up */
2446 movl %edx,%ecx /* move by longwords first */
2447 shrl $2,%ecx
2448 RECOVERY_SECTION
2449 RECOVER(kdp_vm_read_fail)
2450 rep
2451 movsl /* move longwords */
2452 movl %edx,%ecx /* now move remaining bytes */
2453 andl $3,%ecx
2454 RECOVERY_SECTION
2455 RECOVER(kdp_vm_read_fail)
2456 rep
2457 movsb
2458 kdp_vm_read_done:
2459 movl 8+S_ARG2,%edx /* get count */
2460 subl %ecx,%edx /* Return number of bytes transfered */
2461 movl %edx,%eax
2462
2463 popl %edi /* restore registers */
2464 popl %esi
2465 ret /* and return */
2466
2467 kdp_vm_read_fail:
2468 xorl %eax,%eax /* didn't copy a thing. */
2469
2470 popl %edi
2471 popl %esi
2472 ret
2473 #endif
2474
2475 /*
2476 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
2477 */
2478 ENTRY(rdmsr_carefully)
2479 movl S_ARG0, %ecx
2480 RECOVERY_SECTION
2481 RECOVER(rdmsr_fail)
2482 rdmsr
2483 movl S_ARG1, %ecx
2484 movl %eax, (%ecx)
2485 movl S_ARG2, %ecx
2486 movl %edx, (%ecx)
2487 movl $0, %eax
2488 ret
2489
2490 rdmsr_fail:
2491 movl $1, %eax
2492 ret
2493
2494 /*
2495 * Done with recovery and retry tables.
2496 */
2497 RECOVERY_SECTION
2498 RECOVER_TABLE_END
2499 RETRY_SECTION
2500 RETRY_TABLE_END
2501
2502
2503
2504 ENTRY(dr6)
2505 movl %db6, %eax
2506 ret
2507
2508 /* dr<i>(address, type, len, persistence)
2509 */
2510 ENTRY(dr0)
2511 movl S_ARG0, %eax
2512 movl %eax,EXT(dr_addr)
2513 movl %eax, %db0
2514 movl $0, %ecx
2515 jmp 0f
2516 ENTRY(dr1)
2517 movl S_ARG0, %eax
2518 movl %eax,EXT(dr_addr)+1*4
2519 movl %eax, %db1
2520 movl $2, %ecx
2521 jmp 0f
2522 ENTRY(dr2)
2523 movl S_ARG0, %eax
2524 movl %eax,EXT(dr_addr)+2*4
2525 movl %eax, %db2
2526 movl $4, %ecx
2527 jmp 0f
2528
2529 ENTRY(dr3)
2530 movl S_ARG0, %eax
2531 movl %eax,EXT(dr_addr)+3*4
2532 movl %eax, %db3
2533 movl $6, %ecx
2534
2535 0:
2536 pushl %ebp
2537 movl %esp, %ebp
2538
2539 movl %db7, %edx
2540 movl %edx,EXT(dr_addr)+4*4
2541 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2542 movl %edx,EXT(dr_addr)+5*4
2543 movzbl B_ARG3, %eax
2544 andb $3, %al
2545 shll %cl, %eax
2546 orl %eax, %edx
2547
2548 movzbl B_ARG1, %eax
2549 andb $3, %al
2550 addb $0x10, %cl
2551 shll %cl, %eax
2552 orl %eax, %edx
2553
2554 movzbl B_ARG2, %eax
2555 andb $3, %al
2556 addb $0x2, %cl
2557 shll %cl, %eax
2558 orl %eax, %edx
2559
2560 movl %edx, %db7
2561 movl %edx,EXT(dr_addr)+7*4
2562 movl %edx, %eax
2563 leave
2564 ret
2565
2566 .data
2567 dr_msk:
2568 .long ~0x000f0003
2569 .long ~0x00f0000c
2570 .long ~0x0f000030
2571 .long ~0xf00000c0
2572 ENTRY(dr_addr)
2573 .long 0,0,0,0
2574 .long 0,0,0,0
2575 .text
2576
2577 ENTRY(get_cr0)
2578 movl %cr0, %eax
2579 ret
2580
2581 ENTRY(set_cr0)
2582 movl 4(%esp), %eax
2583 movl %eax, %cr0
2584 ret
2585
2586 #ifndef SYMMETRY
2587
2588 /*
2589 * ffs(mask)
2590 */
2591 ENTRY(ffs)
2592 bsfl S_ARG0, %eax
2593 jz 0f
2594 incl %eax
2595 ret
2596 0: xorl %eax, %eax
2597 ret
2598
2599 /*
2600 * cpu_shutdown()
2601 * Force reboot
2602 */
2603
2604 null_idtr:
2605 .word 0
2606 .long 0
2607
2608 Entry(cpu_shutdown)
2609 lidt null_idtr /* disable the interrupt handler */
2610 xor %ecx,%ecx /* generate a divide by zero */
2611 div %ecx,%eax /* reboot now */
2612 ret /* this will "never" be executed */
2613
2614 #endif /* SYMMETRY */
2615
2616
2617 /*
2618 * setbit(int bitno, int *s) - set bit in bit string
2619 */
2620 ENTRY(setbit)
2621 movl S_ARG0, %ecx /* bit number */
2622 movl S_ARG1, %eax /* address */
2623 btsl %ecx, (%eax) /* set bit */
2624 ret
2625
2626 /*
2627 * clrbit(int bitno, int *s) - clear bit in bit string
2628 */
2629 ENTRY(clrbit)
2630 movl S_ARG0, %ecx /* bit number */
2631 movl S_ARG1, %eax /* address */
2632 btrl %ecx, (%eax) /* clear bit */
2633 ret
2634
2635 /*
2636 * ffsbit(int *s) - find first set bit in bit string
2637 */
2638 ENTRY(ffsbit)
2639 movl S_ARG0, %ecx /* address */
2640 movl $0, %edx /* base offset */
2641 0:
2642 bsfl (%ecx), %eax /* check argument bits */
2643 jnz 1f /* found bit, return */
2644 addl $4, %ecx /* increment address */
2645 addl $32, %edx /* increment offset */
2646 jmp 0b /* try again */
2647 1:
2648 addl %edx, %eax /* return offset */
2649 ret
2650
2651 /*
2652 * testbit(int nr, volatile void *array)
2653 *
2654 * Test to see if the bit is set within the bit string
2655 */
2656
2657 ENTRY(testbit)
2658 movl S_ARG0,%eax /* Get the bit to test */
2659 movl S_ARG1,%ecx /* get the array string */
2660 btl %eax,(%ecx)
2661 sbbl %eax,%eax
2662 ret
2663
2664 ENTRY(get_pc)
2665 movl 4(%ebp),%eax
2666 ret
2667
2668 #if ETAP
2669
2670 ENTRY(etap_get_pc)
2671 movl 4(%ebp), %eax /* fetch pc of caller */
2672 ret
2673
2674 ENTRY(tvals_to_etap)
2675 movl S_ARG0, %eax
2676 movl $1000000000, %ecx
2677 mull %ecx
2678 addl S_ARG1, %eax
2679 adc $0, %edx
2680 ret
2681
2682 /* etap_time_t
2683 * etap_time_sub(etap_time_t stop, etap_time_t start)
2684 *
2685 * 64bit subtract, returns stop - start
2686 */
2687 ENTRY(etap_time_sub)
2688 movl S_ARG0, %eax /* stop.low */
2689 movl S_ARG1, %edx /* stop.hi */
2690 subl S_ARG2, %eax /* stop.lo - start.lo */
2691 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2692 ret
2693
2694 #endif /* ETAP */
2695
2696 ENTRY(minsecurity)
2697 pushl %ebp
2698 movl %esp,%ebp
2699 /*
2700 * jail: set the EIP to "jail" to block a kernel thread.
2701 * Useful to debug synchronization problems on MPs.
2702 */
2703 ENTRY(jail)
2704 jmp EXT(jail)
2705
2706 /*
2707 * unsigned int
2708 * div_scale(unsigned int dividend,
2709 * unsigned int divisor,
2710 * unsigned int *scale)
2711 *
2712 * This function returns (dividend << *scale) //divisor where *scale
2713 * is the largest possible value before overflow. This is used in
2714 * computation where precision must be achieved in order to avoid
2715 * floating point usage.
2716 *
2717 * Algorithm:
2718 * *scale = 0;
2719 * while (((dividend >> *scale) >= divisor))
2720 * (*scale)++;
2721 * *scale = 32 - *scale;
2722 * return ((dividend << *scale) / divisor);
2723 */
2724 ENTRY(div_scale)
2725 PUSH_FRAME
2726 xorl %ecx, %ecx /* *scale = 0 */
2727 xorl %eax, %eax
2728 movl ARG0, %edx /* get dividend */
2729 0:
2730 cmpl ARG1, %edx /* if (divisor > dividend) */
2731 jle 1f /* goto 1f */
2732 addl $1, %ecx /* (*scale)++ */
2733 shrdl $1, %edx, %eax /* dividend >> 1 */
2734 shrl $1, %edx /* dividend >> 1 */
2735 jmp 0b /* goto 0b */
2736 1:
2737 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
2738 movl ARG2, %edx /* get scale */
2739 movl $32, (%edx) /* *scale = 32 */
2740 subl %ecx, (%edx) /* *scale -= %ecx */
2741 POP_FRAME
2742 ret
2743
2744 /*
2745 * unsigned int
2746 * mul_scale(unsigned int multiplicand,
2747 * unsigned int multiplier,
2748 * unsigned int *scale)
2749 *
2750 * This function returns ((multiplicand * multiplier) >> *scale) where
2751 * scale is the largest possible value before overflow. This is used in
2752 * computation where precision must be achieved in order to avoid
2753 * floating point usage.
2754 *
2755 * Algorithm:
2756 * *scale = 0;
2757 * while (overflow((multiplicand * multiplier) >> *scale))
2758 * (*scale)++;
2759 * return ((multiplicand * multiplier) >> *scale);
2760 */
2761 ENTRY(mul_scale)
2762 PUSH_FRAME
2763 xorl %ecx, %ecx /* *scale = 0 */
2764 movl ARG0, %eax /* get multiplicand */
2765 mull ARG1 /* multiplicand * multiplier */
2766 0:
2767 cmpl $0, %edx /* if (!overflow()) */
2768 je 1f /* goto 1 */
2769 addl $1, %ecx /* (*scale)++ */
2770 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
2771 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
2772 jmp 0b
2773 1:
2774 movl ARG2, %edx /* get scale */
2775 movl %ecx, (%edx) /* set *scale */
2776 POP_FRAME
2777 ret
2778
2779 #ifdef MACH_BSD
2780 /*
2781 * BSD System call entry point..
2782 */
2783
2784 Entry(trap_unix_syscall)
2785 trap_unix_addr:
2786 pushf /* save flags as soon as possible */
2787 trap_unix_2:
2788 pushl %eax /* save system call number */
2789 pushl $0 /* clear trap number slot */
2790
2791 pusha /* save the general registers */
2792 pushl %ds /* and the segment registers */
2793 pushl %es
2794 pushl %fs
2795 pushl %gs
2796
2797 mov %ss,%dx /* switch to kernel data segment */
2798 mov %dx,%ds
2799 mov %dx,%es
2800 mov $ CPU_DATA_GS,%dx
2801 mov %dx,%gs
2802
2803 /*
2804 * Shuffle eflags,eip,cs into proper places
2805 */
2806
2807 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2808 movl R_CS(%esp),%ecx /* eip is in CS slot */
2809 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2810 movl %ecx,R_EIP(%esp) /* fix eip */
2811 movl %edx,R_CS(%esp) /* fix cs */
2812 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2813
2814 TIME_TRAP_UENTRY
2815
2816 negl %eax /* get system call number */
2817 shll $4,%eax /* manual indexing */
2818
2819 movl %gs:CPU_KERNEL_STACK,%ebx
2820 /* get current kernel stack */
2821 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2822 /* user registers. */
2823
2824 /*
2825 * Register use on entry:
2826 * eax contains syscall number
2827 * ebx contains user regs pointer
2828 */
2829 CAH(call_call)
2830 pushl %ebx /* Push the regs set onto stack */
2831 call EXT(unix_syscall)
2832 popl %ebx
2833 movl %esp,%ecx /* get kernel stack */
2834 or $(KERNEL_STACK_SIZE-1),%ecx
2835 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2836 movl %eax,R_EAX(%esp) /* save return value */
2837 jmp EXT(return_from_trap) /* return to user */
2838
2839 /*
2840 * Entry point for machdep system calls..
2841 */
2842
2843 Entry(trap_machdep_syscall)
2844 pushf /* save flags as soon as possible */
2845 pushl %eax /* save system call number */
2846 pushl $0 /* clear trap number slot */
2847
2848 pusha /* save the general registers */
2849 pushl %ds /* and the segment registers */
2850 pushl %es
2851 pushl %fs
2852 pushl %gs
2853
2854 mov %ss,%dx /* switch to kernel data segment */
2855 mov %dx,%ds
2856 mov %dx,%es
2857 mov $ CPU_DATA_GS,%dx
2858 mov %dx,%gs
2859
2860 /*
2861 * Shuffle eflags,eip,cs into proper places
2862 */
2863
2864 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2865 movl R_CS(%esp),%ecx /* eip is in CS slot */
2866 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2867 movl %ecx,R_EIP(%esp) /* fix eip */
2868 movl %edx,R_CS(%esp) /* fix cs */
2869 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2870
2871 TIME_TRAP_UENTRY
2872
2873 negl %eax /* get system call number */
2874 shll $4,%eax /* manual indexing */
2875
2876 movl %gs:CPU_KERNEL_STACK,%ebx
2877 /* get current kernel stack */
2878 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2879 /* user registers. */
2880
2881 /*
2882 * Register use on entry:
2883 * eax contains syscall number
2884 * ebx contains user regs pointer
2885 */
2886 CAH(call_call)
2887 pushl %ebx
2888 call EXT(machdep_syscall)
2889 popl %ebx
2890 movl %esp,%ecx /* get kernel stack */
2891 or $(KERNEL_STACK_SIZE-1),%ecx
2892 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2893 movl %eax,R_EAX(%esp) /* save return value */
2894 jmp EXT(return_from_trap) /* return to user */
2895
2896 Entry(trap_mach25_syscall)
2897 pushf /* save flags as soon as possible */
2898 pushl %eax /* save system call number */
2899 pushl $0 /* clear trap number slot */
2900
2901 pusha /* save the general registers */
2902 pushl %ds /* and the segment registers */
2903 pushl %es
2904 pushl %fs
2905 pushl %gs
2906
2907 mov %ss,%dx /* switch to kernel data segment */
2908 mov %dx,%ds
2909 mov %dx,%es
2910 mov $ CPU_DATA_GS,%dx
2911 mov %dx,%gs
2912
2913 /*
2914 * Shuffle eflags,eip,cs into proper places
2915 */
2916
2917 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2918 movl R_CS(%esp),%ecx /* eip is in CS slot */
2919 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2920 movl %ecx,R_EIP(%esp) /* fix eip */
2921 movl %edx,R_CS(%esp) /* fix cs */
2922 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2923
2924 TIME_TRAP_UENTRY
2925
2926 negl %eax /* get system call number */
2927 shll $4,%eax /* manual indexing */
2928
2929 movl %gs:CPU_KERNEL_STACK,%ebx
2930 /* get current kernel stack */
2931 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2932 /* user registers. */
2933
2934 /*
2935 * Register use on entry:
2936 * eax contains syscall number
2937 * ebx contains user regs pointer
2938 */
2939 CAH(call_call)
2940 pushl %ebx
2941 call EXT(mach25_syscall)
2942 popl %ebx
2943 movl %esp,%ecx /* get kernel stack */
2944 or $(KERNEL_STACK_SIZE-1),%ecx
2945 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2946 movl %eax,R_EAX(%esp) /* save return value */
2947 jmp EXT(return_from_trap) /* return to user */
2948
2949 #endif