]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 #include <mach_rt.h>
60 #include <platforms.h>
61 #include <mach_kdb.h>
62 #include <mach_kgdb.h>
63 #include <mach_kdp.h>
64 #include <stat_time.h>
65 #include <mach_assert.h>
66
67 #include <sys/errno.h>
68 #include <i386/asm.h>
69 #include <i386/cpuid.h>
70 #include <i386/eflags.h>
71 #include <i386/proc_reg.h>
72 #include <i386/trap.h>
73 #include <assym.s>
74 #include <mach/exception_types.h>
75
76 #include <i386/mp.h>
77
78 #define PREEMPT_DEBUG_LOG 0
79
80
81 /*
82 * PTmap is recursive pagemap at top of virtual address space.
83 * Within PTmap, the page directory can be found (third indirection).
84 */
85 .globl _PTmap,_PTD,_PTDpde
86 .set _PTmap,(PTDPTDI << PDESHIFT)
87 .set _PTD,_PTmap + (PTDPTDI * NBPG)
88 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
89
90 /*
91 * APTmap, APTD is the alternate recursive pagemap.
92 * It's used when modifying another process's page tables.
93 */
94 .globl _APTmap,_APTD,_APTDpde
95 .set _APTmap,(APTDPTDI << PDESHIFT)
96 .set _APTD,_APTmap + (APTDPTDI * NBPG)
97 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
98
99 #if __MACHO__
100 /* Under Mach-O, etext is a variable which contains
101 * the last text address
102 */
103 #define ETEXT_ADDR (EXT(etext))
104 #else
105 /* Under ELF and other non-Mach-O formats, the address of
106 * etext represents the last text address
107 */
108 #define ETEXT_ADDR $ EXT(etext)
109 #endif
110
111 #define CX(addr,reg) addr(,reg,4)
112
113 .text
114 locore_start:
115
116 /*
117 * Fault recovery.
118 */
119
120 #ifdef __MACHO__
121 #define RECOVERY_SECTION .section __VECTORS, __recover
122 #define RETRY_SECTION .section __VECTORS, __retries
123 #else
124 #define RECOVERY_SECTION .text
125 #define RECOVERY_SECTION .text
126 #endif
127
128 #define RECOVER_TABLE_START \
129 .align 2 ; \
130 .globl EXT(recover_table) ;\
131 LEXT(recover_table) ;\
132 .text
133
134 #define RECOVER(addr) \
135 .align 2; \
136 .long 9f ;\
137 .long addr ;\
138 .text ;\
139 9:
140
141 #define RECOVER_TABLE_END \
142 .align 2 ;\
143 .globl EXT(recover_table_end) ;\
144 LEXT(recover_table_end) ;\
145 .text
146
147 /*
148 * Retry table for certain successful faults.
149 */
150 #define RETRY_TABLE_START \
151 .align 3; \
152 .globl EXT(retry_table) ;\
153 LEXT(retry_table) ;\
154 .text
155
156 #define RETRY(addr) \
157 .align 3 ;\
158 .long 9f ;\
159 .long addr ;\
160 .text ;\
161 9:
162
163 #define RETRY_TABLE_END \
164 .align 3; \
165 .globl EXT(retry_table_end) ;\
166 LEXT(retry_table_end) ;\
167 .text
168
169 /*
170 * Allocate recovery and retry tables.
171 */
172 RECOVERY_SECTION
173 RECOVER_TABLE_START
174 RETRY_SECTION
175 RETRY_TABLE_START
176
177 /*
178 * Timing routines.
179 */
180 Entry(timer_update)
181 movl 4(%esp),%ecx
182 movl 8(%esp),%eax
183 movl 12(%esp),%edx
184 movl %eax,TIMER_HIGHCHK(%ecx)
185 movl %edx,TIMER_LOW(%ecx)
186 movl %eax,TIMER_HIGH(%ecx)
187 ret
188
189 Entry(timer_grab)
190 movl 4(%esp),%ecx
191 0: movl TIMER_HIGH(%ecx),%edx
192 movl TIMER_LOW(%ecx),%eax
193 cmpl TIMER_HIGHCHK(%ecx),%edx
194 jne 0b
195 ret
196
197 #if STAT_TIME
198
199 #define TIME_TRAP_UENTRY
200 #define TIME_TRAP_UEXIT
201 #define TIME_INT_ENTRY
202 #define TIME_INT_EXIT
203
204 #else
205 /*
206 * Nanosecond timing.
207 */
208
209 /*
210 * Low 32-bits of nanotime returned in %eax.
211 * Computed from tsc using conversion scale/shift from per-cpu data.
212 * Uses %ecx and %edx.
213 */
214 #define NANOTIME32 \
215 pushl %esi /* save %esi */ ;\
216 movl %gs:CPU_THIS,%esi /* per-cpu data ptr */ ;\
217 addl $(CPU_RTC_NANOTIME),%esi /* esi -> per-cpu nanotime*/ ;\
218 rdtsc /* edx:eax = tsc */ ;\
219 subl RTN_TSC(%esi),%eax /* eax = (tsc - base_tsc) */ ;\
220 mull RTN_SCALE(%esi) /* eax *= scale */ ;\
221 movl RTN_SHIFT(%esi),%ecx /* ecx = shift */ ;\
222 shrdl %cl,%edx,%eax /* edx:eax >> shift */ ;\
223 andb $32,%cl /* shift == 32? */ ;\
224 cmovnel %edx,%eax /* %eax = %edx if so */ ;\
225 addl RTN_NANOS(%esi),%eax /* add base ns */ ;\
226 popl %esi
227
228 /*
229 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
230 */
231 #define TIMER_UPDATE(treg,dreg) \
232 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
233 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
234 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
235 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
236 movl dreg,TIMER_HIGH(treg) /* to high bita */
237
238 /*
239 * Add time delta to old timer and start new.
240 */
241 #define TIMER_EVENT(old,new) \
242 pushl %eax /* must be invariant */ ;\
243 cli /* block interrupts */ ;\
244 NANOTIME32 /* eax low bits nanosecs */ ;\
245 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
246 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
247 movl %eax,%edx /* save timestamp in %edx */ ;\
248 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
249 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
250 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
251 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
252 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
253 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */ ;\
254 sti /* interrupts on */ ;\
255 popl %eax /* must be invariant */
256
257 /*
258 * Update time on user trap entry.
259 * Uses %ecx,%edx.
260 */
261 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
262
263 /*
264 * update time on user trap exit.
265 * Uses %ecx,%edx.
266 */
267 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
268
269 /*
270 * update time on interrupt entry.
271 * Uses %eax,%ecx,%edx.
272 */
273 #define TIME_INT_ENTRY \
274 NANOTIME32 /* eax low bits nanosecs */ ;\
275 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
276 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
277 movl %eax,%edx /* save timestamp in %edx */ ;\
278 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
279 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
280 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
281 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
282 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
283
284 /*
285 * update time on interrupt exit.
286 * Uses %eax, %ecx, %edx.
287 */
288 #define TIME_INT_EXIT \
289 NANOTIME32 /* eax low bits nanosecs */ ;\
290 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
291 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
292 movl %eax,%edx /* save timestamp in %edx */ ;\
293 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
294 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
295 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
296 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
297 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
298
299 #endif /* STAT_TIME */
300
301 /*
302 * Encapsulate the transfer of exception stack frames between a PCB
303 * and a thread stack. Since the whole point of these is to emulate
304 * a call or exception that changes privilege level, both macros
305 * assume that there is no user esp or ss stored in the source
306 * frame (because there was no change of privilege to generate them).
307 */
308
309 /*
310 * Transfer a stack frame from a thread's user stack to its PCB.
311 * We assume the thread and stack addresses have been loaded into
312 * registers (our arguments).
313 *
314 * The macro overwrites edi, esi, ecx and whatever registers hold the
315 * thread and stack addresses (which can't be one of the above three).
316 * The thread address is overwritten with the address of its saved state
317 * (where the frame winds up).
318 *
319 * Must be called on kernel stack.
320 */
321 #define FRAME_STACK_TO_PCB(thread, stkp) ;\
322 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
323 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
324 movl %edi,thread /* save for later */ ;\
325 movl stkp,%esi /* point to start of frame */ ;\
326 movl $ R_UESP,%ecx ;\
327 sarl $2,%ecx /* word count for transfer */ ;\
328 cld /* we`re incrementing */ ;\
329 rep ;\
330 movsl /* transfer the frame */ ;\
331 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
332 movl stkp,R_UESP(thread) /* store in PCB */ ;\
333 movl $0,%ecx ;\
334 mov %ss,%cx /* get current ss */ ;\
335 movl %ecx,R_SS(thread) /* store in PCB */
336
337 /*
338 * Transfer a stack frame from a thread's PCB to the stack pointed
339 * to by the PCB. We assume the thread address has been loaded into
340 * a register (our argument).
341 *
342 * The macro overwrites edi, esi, ecx and whatever register holds the
343 * thread address (which can't be one of the above three). The
344 * thread address is overwritten with the address of its saved state
345 * (where the frame winds up).
346 *
347 * Must be called on kernel stack.
348 */
349 #define FRAME_PCB_TO_STACK(thread) ;\
350 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
351 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
352 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
353 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
354 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
355 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
356 jz 1f /* use kernel data segment */ ;\
357 movl $ USER_DS,%ecx /* else use user data segment */;\
358 mov %cx,%es ;\
359 1: ;\
360 movl $ R_UESP,%ecx ;\
361 subl %ecx,%edi /* derive start of frame */ ;\
362 movl %edi,thread /* save for later */ ;\
363 sarl $2,%ecx /* word count for transfer */ ;\
364 cld /* we`re incrementing */ ;\
365 rep ;\
366 movsl /* transfer the frame */ ;\
367 mov %ss,%cx /* restore kernel segments */ ;\
368 mov %cx,%es
369
370 #undef PDEBUG
371
372 #ifdef PDEBUG
373
374 /*
375 * Traditional, not ANSI.
376 */
377 #define CAH(label) \
378 .data ;\
379 .globl label/**/count ;\
380 label/**/count: ;\
381 .long 0 ;\
382 .globl label/**/limit ;\
383 label/**/limit: ;\
384 .long 0 ;\
385 .text ;\
386 addl $1,%ss:label/**/count ;\
387 cmpl $0,label/**/limit ;\
388 jz label/**/exit ;\
389 pushl %eax ;\
390 label/**/loop: ;\
391 movl %ss:label/**/count,%eax ;\
392 cmpl %eax,%ss:label/**/limit ;\
393 je label/**/loop ;\
394 popl %eax ;\
395 label/**/exit:
396
397 #else /* PDEBUG */
398
399 #define CAH(label)
400
401 #endif /* PDEBUG */
402
403 #if MACH_KDB
404 /*
405 * Last-ditch debug code to handle faults that might result
406 * from entering kernel (from collocated server) on an invalid
407 * stack. On collocated entry, there's no hardware-initiated
408 * stack switch, so a valid stack must be in place when an
409 * exception occurs, or we may double-fault.
410 *
411 * In case of a double-fault, our only recourse is to switch
412 * hardware "tasks", so that we avoid using the current stack.
413 *
414 * The idea here is just to get the processor into the debugger,
415 * post-haste. No attempt is made to fix up whatever error got
416 * us here, so presumably continuing from the debugger will
417 * simply land us here again -- at best.
418 */
419 #if 0
420 /*
421 * Note that the per-fault entry points are not currently
422 * functional. The only way to make them work would be to
423 * set up separate TSS's for each fault type, which doesn't
424 * currently seem worthwhile. (The offset part of a task
425 * gate is always ignored.) So all faults that task switch
426 * currently resume at db_task_start.
427 */
428 /*
429 * Double fault (Murphy's point) - error code (0) on stack
430 */
431 Entry(db_task_dbl_fault)
432 popl %eax
433 movl $(T_DOUBLE_FAULT),%ebx
434 jmp db_task_start
435 /*
436 * Segment not present - error code on stack
437 */
438 Entry(db_task_seg_np)
439 popl %eax
440 movl $(T_SEGMENT_NOT_PRESENT),%ebx
441 jmp db_task_start
442 /*
443 * Stack fault - error code on (current) stack
444 */
445 Entry(db_task_stk_fault)
446 popl %eax
447 movl $(T_STACK_FAULT),%ebx
448 jmp db_task_start
449 /*
450 * General protection fault - error code on stack
451 */
452 Entry(db_task_gen_prot)
453 popl %eax
454 movl $(T_GENERAL_PROTECTION),%ebx
455 jmp db_task_start
456 #endif /* 0 */
457 /*
458 * The entry point where execution resumes after last-ditch debugger task
459 * switch.
460 */
461 Entry(db_task_start)
462 movl %esp,%edx
463 subl $ISS_SIZE,%edx
464 movl %edx,%esp /* allocate i386_saved_state on stack */
465 movl %eax,R_ERR(%esp)
466 movl %ebx,R_TRAPNO(%esp)
467 pushl %edx
468 CPU_NUMBER(%edx)
469 movl CX(EXT(mp_dbtss),%edx),%edx
470 movl TSS_LINK(%edx),%eax
471 pushl %eax /* pass along selector of previous TSS */
472 call EXT(db_tss_to_frame)
473 popl %eax /* get rid of TSS selector */
474 call EXT(db_trap_from_asm)
475 addl $0x4,%esp
476 /*
477 * And now...?
478 */
479 iret /* ha, ha, ha... */
480 #endif /* MACH_KDB */
481
482 /*
483 * Trap/interrupt entry points.
484 *
485 * All traps must create the following save area on the PCB "stack":
486 *
487 * gs
488 * fs
489 * es
490 * ds
491 * edi
492 * esi
493 * ebp
494 * cr2 if page fault - otherwise unused
495 * ebx
496 * edx
497 * ecx
498 * eax
499 * trap number
500 * error code
501 * eip
502 * cs
503 * eflags
504 * user esp - if from user
505 * user ss - if from user
506 * es - if from V86 thread
507 * ds - if from V86 thread
508 * fs - if from V86 thread
509 * gs - if from V86 thread
510 *
511 */
512
513 /*
514 * General protection or segment-not-present fault.
515 * Check for a GP/NP fault in the kernel_return
516 * sequence; if there, report it as a GP/NP fault on the user's instruction.
517 *
518 * esp-> 0: trap code (NP or GP)
519 * 4: segment number in error
520 * 8 eip
521 * 12 cs
522 * 16 eflags
523 * 20 old registers (trap is from kernel)
524 */
525 Entry(t_gen_prot)
526 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
527 jmp trap_check_kernel_exit /* check for kernel exit sequence */
528
529 Entry(t_segnp)
530 pushl $(T_SEGMENT_NOT_PRESENT)
531 /* indicate fault type */
532
533 trap_check_kernel_exit:
534 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
535 jnz EXT(alltraps) /* isn`t kernel trap if so */
536 testl $3,12(%esp) /* is trap from kernel mode? */
537 jne EXT(alltraps) /* if so: */
538 /* check for the kernel exit sequence */
539 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
540 je fault_iret
541 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
542 je fault_popl_ds
543 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
544 je fault_popl_es
545 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
546 je fault_popl_fs
547 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
548 je fault_popl_gs
549 take_fault: /* if none of the above: */
550 jmp EXT(alltraps) /* treat as normal trap. */
551
552 /*
553 * GP/NP fault on IRET: CS or SS is in error.
554 * All registers contain the user's values.
555 *
556 * on SP is
557 * 0 trap number
558 * 4 errcode
559 * 8 eip
560 * 12 cs --> trapno
561 * 16 efl --> errcode
562 * 20 user eip
563 * 24 user cs
564 * 28 user eflags
565 * 32 user esp
566 * 36 user ss
567 */
568 fault_iret:
569 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
570 popl %eax /* get trap number */
571 movl %eax,12-4(%esp) /* put in user trap number */
572 popl %eax /* get error code */
573 movl %eax,16-8(%esp) /* put in user errcode */
574 popl %eax /* restore eax */
575 CAH(fltir)
576 jmp EXT(alltraps) /* take fault */
577
578 /*
579 * Fault restoring a segment register. The user's registers are still
580 * saved on the stack. The offending segment register has not been
581 * popped.
582 */
583 fault_popl_ds:
584 popl %eax /* get trap number */
585 popl %edx /* get error code */
586 addl $12,%esp /* pop stack to user regs */
587 jmp push_es /* (DS on top of stack) */
588 fault_popl_es:
589 popl %eax /* get trap number */
590 popl %edx /* get error code */
591 addl $12,%esp /* pop stack to user regs */
592 jmp push_fs /* (ES on top of stack) */
593 fault_popl_fs:
594 popl %eax /* get trap number */
595 popl %edx /* get error code */
596 addl $12,%esp /* pop stack to user regs */
597 jmp push_gs /* (FS on top of stack) */
598 fault_popl_gs:
599 popl %eax /* get trap number */
600 popl %edx /* get error code */
601 addl $12,%esp /* pop stack to user regs */
602 jmp push_segregs /* (GS on top of stack) */
603
604 push_es:
605 pushl %es /* restore es, */
606 push_fs:
607 pushl %fs /* restore fs, */
608 push_gs:
609 pushl %gs /* restore gs. */
610 push_segregs:
611 movl %eax,R_TRAPNO(%esp) /* set trap number */
612 movl %edx,R_ERR(%esp) /* set error code */
613 CAH(fltpp)
614 jmp trap_set_segs /* take trap */
615
616 /*
617 * Debug trap. Check for single-stepping across system call into
618 * kernel. If this is the case, taking the debug trap has turned
619 * off single-stepping - save the flags register with the trace
620 * bit set.
621 */
622 Entry(t_debug)
623 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
624 jnz 0f /* isn`t kernel trap if so */
625 testl $3,4(%esp) /* is trap from kernel mode? */
626 jnz 0f /* if so: */
627 cmpl $syscall_entry,(%esp) /* system call entry? */
628 jne 1f /* if so: */
629 /* flags are sitting where syscall */
630 /* wants them */
631 addl $8,%esp /* remove eip/cs */
632 jmp syscall_entry_2 /* continue system call entry */
633
634 1: cmpl $trap_unix_addr,(%esp)
635 jne 0f
636 addl $8,%esp
637 jmp trap_unix_2
638
639 0: pushl $0 /* otherwise: */
640 pushl $(T_DEBUG) /* handle as normal */
641 jmp EXT(alltraps) /* debug fault */
642
643 /*
644 * Page fault traps save cr2.
645 */
646 Entry(t_page_fault)
647 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
648 pusha /* save the general registers */
649 movl %cr2,%eax /* get the faulting address */
650 movl %eax,12(%esp) /* save in esp save slot */
651 jmp trap_push_segs /* continue fault */
652
653 /*
654 * All 'exceptions' enter here with:
655 * esp-> trap number
656 * error code
657 * old eip
658 * old cs
659 * old eflags
660 * old esp if trapped from user
661 * old ss if trapped from user
662 *
663 * NB: below use of CPU_NUMBER assumes that macro will use correct
664 * segment register for any kernel data accesses.
665 */
666 Entry(alltraps)
667 pusha /* save the general registers */
668 trap_push_segs:
669 pushl %ds /* save the segment registers */
670 pushl %es
671 pushl %fs
672 pushl %gs
673
674 trap_set_segs:
675 movl %ss,%eax
676 movl %eax,%ds
677 movl %eax,%es /* switch to kernel data seg */
678 cld /* clear direction flag */
679 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
680 jnz trap_from_user /* user mode trap if so */
681 testb $3,R_CS(%esp) /* user mode trap? */
682 jnz trap_from_user
683 cmpl $0,%gs:CPU_ACTIVE_KLOADED
684 je trap_from_kernel /* if clear, truly in kernel */
685 #ifdef FIXME
686 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
687 jb trap_from_kernel
688 #endif
689 trap_from_kloaded:
690 /*
691 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
692 * so transfer the stack frame into the PCB explicitly, then
693 * start running on resulting "PCB stack". We have to set
694 * up a simulated "uesp" manually, since there's none in the
695 * frame.
696 */
697 mov $ CPU_DATA_GS,%dx
698 mov %dx,%gs
699 CAH(atstart)
700 movl %gs:CPU_ACTIVE_KLOADED,%ebx
701 movl %gs:CPU_KERNEL_STACK,%eax
702 xchgl %esp,%eax
703 FRAME_STACK_TO_PCB(%ebx,%eax)
704 CAH(atend)
705 jmp EXT(take_trap)
706
707 trap_from_user:
708 mov $ CPU_DATA_GS,%ax
709 mov %ax,%gs
710
711 TIME_TRAP_UENTRY
712
713 movl %gs:CPU_KERNEL_STACK,%ebx
714 xchgl %ebx,%esp /* switch to kernel stack */
715 /* user regs pointer already set */
716 LEXT(take_trap)
717 pushl %ebx /* record register save area */
718 pushl %ebx /* pass register save area to trap */
719 call EXT(user_trap) /* call user trap routine */
720 movl 4(%esp),%esp /* switch back to PCB stack */
721
722 /*
723 * Return from trap or system call, checking for ASTs.
724 * On PCB stack.
725 */
726
727 LEXT(return_from_trap)
728 movl %gs:CPU_PENDING_AST,%edx
729 cmpl $0,%edx
730 je EXT(return_to_user) /* if we need an AST: */
731
732 movl %gs:CPU_KERNEL_STACK,%esp
733 /* switch to kernel stack */
734 pushl $0 /* push preemption flag */
735 call EXT(i386_astintr) /* take the AST */
736 addl $4,%esp /* pop preemption flag */
737 popl %esp /* switch back to PCB stack (w/exc link) */
738 jmp EXT(return_from_trap) /* and check again (rare) */
739 /* ASTs after this point will */
740 /* have to wait */
741
742 /*
743 * Arrange the checks needed for kernel-loaded (or kernel-loading)
744 * threads so that branch is taken in kernel-loaded case.
745 */
746 LEXT(return_to_user)
747 TIME_TRAP_UEXIT
748 cmpl $0,%gs:CPU_ACTIVE_KLOADED
749 jnz EXT(return_xfer_stack)
750 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
751
752 #if MACH_RT
753 #if MACH_ASSERT
754 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
755 je EXT(return_from_kernel)
756 int $3
757 #endif /* MACH_ASSERT */
758 #endif /* MACH_RT */
759
760 /*
761 * Return from kernel mode to interrupted thread.
762 */
763
764 LEXT(return_from_kernel)
765 LEXT(kret_popl_gs)
766 popl %gs /* restore segment registers */
767 LEXT(kret_popl_fs)
768 popl %fs
769 LEXT(kret_popl_es)
770 popl %es
771 LEXT(kret_popl_ds)
772 popl %ds
773 popa /* restore general registers */
774 addl $8,%esp /* discard trap number and error code */
775
776 LEXT(kret_iret)
777 iret /* return from interrupt */
778
779
780 LEXT(return_xfer_stack)
781 /*
782 * If we're on PCB stack in a kernel-loaded task, we have
783 * to transfer saved state back to thread stack and swap
784 * stack pointers here, because the hardware's not going
785 * to do so for us.
786 */
787 CAH(rxsstart)
788 movl %gs:CPU_KERNEL_STACK,%esp
789 movl %gs:CPU_ACTIVE_KLOADED,%eax
790 FRAME_PCB_TO_STACK(%eax)
791 movl %eax,%esp
792 CAH(rxsend)
793 jmp EXT(return_from_kernel)
794
795 /*
796 * Hate to put this here, but setting up a separate swap_func for
797 * kernel-loaded threads no longer works, since thread executes
798 * "for a while" (i.e., until it reaches glue code) when first
799 * created, even if it's nominally suspended. Hence we can't
800 * transfer the PCB when the thread first resumes, because we
801 * haven't initialized it yet.
802 */
803 /*
804 * Have to force transfer to new stack "manually". Use a string
805 * move to transfer all of our saved state to the stack pointed
806 * to by iss.uesp, then install a pointer to it as our current
807 * stack pointer.
808 */
809 LEXT(return_kernel_loading)
810 movl %gs:CPU_KERNEL_STACK,%esp
811 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
812 movl %ebx,%edx /* save for later */
813 FRAME_PCB_TO_STACK(%ebx)
814 movl %ebx,%esp /* start running on new stack */
815 movl $0,%gs:CPU_ACTIVE_KLOADED /* set cached indicator */
816 jmp EXT(return_from_kernel)
817
818 /*
819 * Trap from kernel mode. No need to switch stacks or load segment registers.
820 */
821 trap_from_kernel:
822 #if MACH_KDB || MACH_KGDB
823 mov $ CPU_DATA_GS,%ax
824 mov %ax,%gs
825 movl %esp,%ebx /* save current stack */
826
827 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
828 jb 6f /* OK if so */
829
830 #if MACH_KGDB
831 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
832 je 0f /* no */
833
834 pushl %esp /* Already on kgdb stack */
835 cli
836 call EXT(kgdb_trap)
837 addl $4,%esp
838 jmp EXT(return_from_kernel)
839 0: /* should kgdb handle this exception? */
840 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
841 je 2f /* yes */
842 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
843 je 2f /* yes */
844 1:
845 cli /* disable interrupts */
846 CPU_NUMBER(%edx) /* get CPU number */
847 movl CX(EXT(kgdb_stacks),%edx),%ebx
848 xchgl %ebx,%esp /* switch to kgdb stack */
849 pushl %ebx /* pass old sp as an arg */
850 call EXT(kgdb_from_kernel)
851 popl %esp /* switch back to kernel stack */
852 jmp EXT(return_from_kernel)
853 2:
854 #endif /* MACH_KGDB */
855
856 #if MACH_KDB
857 cmpl $0,EXT(db_active) /* could trap be from ddb? */
858 je 3f /* no */
859 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
860 cmpl $0,CX(EXT(kdb_active),%edx)
861 je 3f /* no */
862 pushl %esp
863 call EXT(db_trap_from_asm)
864 addl $0x4,%esp
865 jmp EXT(return_from_kernel)
866
867 3:
868 /*
869 * Dilemma: don't want to switch to kernel_stack if trap
870 * "belongs" to ddb; don't want to switch to db_stack if
871 * trap "belongs" to kernel. So have to duplicate here the
872 * set of trap types that kernel_trap() handles. Note that
873 * "unexpected" page faults will not be handled by kernel_trap().
874 * In this panic-worthy case, we fall into the debugger with
875 * kernel_stack containing the call chain that led to the
876 * bogus fault.
877 */
878 movl R_TRAPNO(%esp),%edx
879 cmpl $(T_PAGE_FAULT),%edx
880 je 4f
881 cmpl $(T_NO_FPU),%edx
882 je 4f
883 cmpl $(T_FPU_FAULT),%edx
884 je 4f
885 cmpl $(T_FLOATING_POINT_ERROR),%edx
886 je 4f
887 cmpl $(T_PREEMPT),%edx
888 jne 7f
889 4:
890 #endif /* MACH_KDB */
891
892 cmpl %gs:CPU_KERNEL_STACK,%esp
893 /* if not already on kernel stack, */
894 ja 5f /* check some more */
895 cmpl %gs:CPU_ACTIVE_STACK,%esp
896 ja 6f /* on kernel stack: no switch */
897 5:
898 movl %gs:CPU_KERNEL_STACK,%esp
899 6:
900 pushl %ebx /* save old stack */
901 pushl %ebx /* pass as parameter */
902 call EXT(kernel_trap) /* to kernel trap routine */
903 addl $4,%esp /* pop parameter */
904 testl %eax,%eax
905 jne 8f
906 /*
907 * If kernel_trap returns false, trap wasn't handled.
908 */
909 7:
910 #if MACH_KDB
911 CPU_NUMBER(%edx)
912 movl CX(EXT(db_stacks),%edx),%esp
913 pushl %ebx /* pass old stack as parameter */
914 call EXT(db_trap_from_asm)
915 #endif /* MACH_KDB */
916 #if MACH_KGDB
917 cli /* disable interrupts */
918 CPU_NUMBER(%edx) /* get CPU number */
919 movl CX(EXT(kgdb_stacks),%edx),%esp
920 pushl %ebx /* pass old stack as parameter */
921 call EXT(kgdb_from_kernel)
922 #endif /* MACH_KGDB */
923 addl $4,%esp /* pop parameter */
924 testl %eax,%eax
925 jne 8f
926 /*
927 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
928 * wasn't handled.
929 */
930 pushl %ebx /* pass old stack as parameter */
931 call EXT(panic_trap)
932 addl $4,%esp /* pop parameter */
933 8:
934 movl %ebx,%esp /* get old stack (from callee-saves reg) */
935 #else /* MACH_KDB || MACH_KGDB */
936 pushl %esp /* pass parameter */
937 call EXT(kernel_trap) /* to kernel trap routine */
938 addl $4,%esp /* pop parameter */
939 #endif /* MACH_KDB || MACH_KGDB */
940
941 #if MACH_RT
942 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
943 testl $ AST_URGENT,%eax /* any urgent preemption? */
944 je EXT(return_from_kernel) /* no, nothing to do */
945 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
946 jne EXT(return_from_kernel) /* no, nothing to do */
947 movl %gs:CPU_KERNEL_STACK,%eax
948 movl %esp,%ecx
949 xorl %eax,%ecx
950 andl $(-KERNEL_STACK_SIZE),%ecx
951 testl %ecx,%ecx /* are we on the kernel stack? */
952 jne EXT(return_from_kernel) /* no, skip it */
953
954 #if PREEMPT_DEBUG_LOG
955 pushl 28(%esp) /* stack pointer */
956 pushl 24+4(%esp) /* frame pointer */
957 pushl 56+8(%esp) /* stack pointer */
958 pushl $0f
959 call EXT(log_thread_action)
960 addl $16, %esp
961 .data
962 0: String "trap preempt eip"
963 .text
964 #endif /* PREEMPT_DEBUG_LOG */
965
966 pushl $1 /* push preemption flag */
967 call EXT(i386_astintr) /* take the AST */
968 addl $4,%esp /* pop preemption flag */
969 #endif /* MACH_RT */
970
971 jmp EXT(return_from_kernel)
972
973 /*
974 * Called as a function, makes the current thread
975 * return from the kernel as if from an exception.
976 */
977
978 .globl EXT(thread_exception_return)
979 .globl EXT(thread_bootstrap_return)
980 LEXT(thread_exception_return)
981 LEXT(thread_bootstrap_return)
982 movl %esp,%ecx /* get kernel stack */
983 or $(KERNEL_STACK_SIZE-1),%ecx
984 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
985 jmp EXT(return_from_trap)
986
987 Entry(call_continuation)
988 movl S_ARG0,%eax /* get continuation */
989 movl S_ARG1,%edx /* continuation param */
990 movl S_ARG2,%ecx /* wait result */
991 movl %esp,%ebp /* get kernel stack */
992 or $(KERNEL_STACK_SIZE-1),%ebp
993 addl $(-3-IKS_SIZE),%ebp
994 movl %ebp,%esp /* pop the stack */
995 xorl %ebp,%ebp /* zero frame pointer */
996 pushl %ecx
997 pushl %edx
998 call *%eax /* call continuation */
999 addl $8,%esp
1000 movl %gs:CPU_ACTIVE_THREAD,%eax
1001 pushl %eax
1002 call EXT(thread_terminate)
1003
1004 #if 0
1005 #define LOG_INTERRUPT(info,msg) \
1006 pushal ; \
1007 pushl msg ; \
1008 pushl info ; \
1009 call EXT(log_thread_action) ; \
1010 add $8,%esp ; \
1011 popal
1012 #define CHECK_INTERRUPT_TIME(n) \
1013 pushal ; \
1014 pushl $n ; \
1015 call EXT(check_thread_time) ; \
1016 add $4,%esp ; \
1017 popal
1018 #else
1019 #define LOG_INTERRUPT(info,msg)
1020 #define CHECK_INTERRUPT_TIME(n)
1021 #endif
1022
1023 .data
1024 imsg_start:
1025 String "interrupt start"
1026 imsg_end:
1027 String "interrupt end"
1028
1029 .text
1030 /*
1031 * All interrupts enter here.
1032 * old %eax on stack; interrupt number in %eax.
1033 */
1034 Entry(all_intrs)
1035 pushl %ecx /* save registers */
1036 pushl %edx
1037 cld /* clear direction flag */
1038
1039 pushl %ds /* save segment registers */
1040 pushl %es
1041 pushl %fs
1042 pushl %gs
1043 mov %ss,%dx /* switch to kernel segments */
1044 mov %dx,%ds
1045 mov %dx,%es
1046 mov $ CPU_DATA_GS,%dx
1047 mov %dx,%gs
1048
1049 /*
1050 * test whether already on interrupt stack
1051 */
1052 movl %gs:CPU_INT_STACK_TOP,%ecx
1053 cmpl %esp,%ecx
1054 jb 1f
1055 leal -INTSTACK_SIZE(%ecx),%edx
1056 cmpl %esp,%edx
1057 jb int_from_intstack
1058 1:
1059 movl %esp,%edx /* & i386_interrupt_state */
1060 xchgl %ecx,%esp /* switch to interrupt stack */
1061
1062 pushl %ecx /* save pointer to old stack */
1063 pushl %edx /* pass &i386_interrupt_state to pe_incoming_interrupt */
1064 pushl %eax /* push trap number */
1065
1066 TIME_INT_ENTRY /* do timing */
1067
1068 #if MACH_RT
1069 incl %gs:CPU_PREEMPTION_LEVEL
1070 #endif /* MACH_RT */
1071 incl %gs:CPU_INTERRUPT_LEVEL
1072
1073 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
1074 addl $8,%esp /* Pop trap number and eip */
1075
1076 .globl EXT(return_to_iret)
1077 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1078
1079 decl %gs:CPU_INTERRUPT_LEVEL
1080
1081 #if MACH_RT
1082 decl %gs:CPU_PREEMPTION_LEVEL
1083 #endif /* MACH_RT */
1084
1085 TIME_INT_EXIT /* do timing */
1086
1087 popl %esp /* switch back to old stack */
1088
1089 movl %gs:CPU_PENDING_AST,%eax
1090 testl %eax,%eax /* any pending asts? */
1091 je 1f /* no, nothing to do */
1092 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1093 jnz ast_from_interrupt /* take it */
1094 testb $3,I_CS(%esp) /* user mode, */
1095 jnz ast_from_interrupt /* take it */
1096 #ifdef FIXME
1097 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1098 jnb ast_from_interrupt /* take it */
1099 #endif
1100
1101 #if MACH_RT
1102 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption masked? */
1103 jne 1f /* yes, skip it */
1104 testl $ AST_URGENT,%eax /* any urgent requests? */
1105 je 1f /* no, skip it */
1106 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1107 jb 1f /* yes, skip it */
1108 movl %gs:CPU_KERNEL_STACK,%eax
1109 movl %esp,%ecx
1110 xorl %eax,%ecx
1111 andl $(-KERNEL_STACK_SIZE),%ecx
1112 testl %ecx,%ecx /* are we on the kernel stack? */
1113 jne 1f /* no, skip it */
1114
1115 /*
1116 * Take an AST from kernel space. We don't need (and don't want)
1117 * to do as much as the case where the interrupt came from user
1118 * space.
1119 */
1120 #if PREEMPT_DEBUG_LOG
1121 pushl $0
1122 pushl $0
1123 pushl I_EIP+8(%esp)
1124 pushl $0f
1125 call EXT(log_thread_action)
1126 addl $16, %esp
1127 .data
1128 0: String "intr preempt eip"
1129 .text
1130 #endif /* PREEMPT_DEBUG_LOG */
1131
1132 sti
1133 pushl $1 /* push preemption flag */
1134 call EXT(i386_astintr) /* take the AST */
1135 addl $4,%esp /* pop preemption flag */
1136 #endif /* MACH_RT */
1137
1138 1:
1139 pop %gs
1140 pop %fs
1141 pop %es /* restore segment regs */
1142 pop %ds
1143 pop %edx
1144 pop %ecx
1145 pop %eax
1146 iret /* return to caller */
1147
1148 int_from_intstack:
1149 #if MACH_RT
1150 incl %gs:CPU_PREEMPTION_LEVEL
1151 #endif /* MACH_RT */
1152
1153 incl %gs:CPU_INTERRUPT_LEVEL
1154
1155 movl %esp, %edx /* i386_interrupt_state */
1156 pushl %edx /* pass &i386_interrupt_state to PE_incoming_interrupt /*
1157
1158 pushl %eax /* Push trap number */
1159
1160 call EXT(PE_incoming_interrupt)
1161 addl $20,%esp /* pop i386_interrupt_state, gs,fs,es,ds */
1162
1163 LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1164
1165 addl $4,%esp /* pop trap number */
1166
1167 decl %gs:CPU_INTERRUPT_LEVEL
1168
1169 #if MACH_RT
1170 decl %gs:CPU_PREEMPTION_LEVEL
1171 #endif /* MACH_RT */
1172
1173 pop %edx /* must have been on kernel segs */
1174 pop %ecx
1175 pop %eax /* no ASTs */
1176 iret
1177
1178 /*
1179 * Take an AST from an interrupt.
1180 * On PCB stack.
1181 * sp-> es -> edx
1182 * ds -> ecx
1183 * edx -> eax
1184 * ecx -> trapno
1185 * eax -> code
1186 * eip
1187 * cs
1188 * efl
1189 * esp
1190 * ss
1191 */
1192 ast_from_interrupt:
1193 pop %gs
1194 pop %fs
1195 pop %es /* restore all registers ... */
1196 pop %ds
1197 popl %edx
1198 popl %ecx
1199 popl %eax
1200 sti /* Reenable interrupts */
1201 pushl $0 /* zero code */
1202 pushl $0 /* zero trap number */
1203 pusha /* save general registers */
1204 push %ds /* save segment registers */
1205 push %es
1206 push %fs
1207 push %gs
1208 mov %ss,%dx /* switch to kernel segments */
1209 mov %dx,%ds
1210 mov %dx,%es
1211 mov $ CPU_DATA_GS,%dx
1212 mov %dx,%gs
1213
1214 /*
1215 * See if we interrupted a kernel-loaded thread executing
1216 * in its own task.
1217 */
1218 CPU_NUMBER(%edx)
1219 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1220 jnz 0f /* user mode trap if so */
1221 testb $3,R_CS(%esp)
1222 jnz 0f /* user mode, back to normal */
1223 #ifdef FIXME
1224 cmpl ETEXT_ADDR,R_EIP(%esp)
1225 jb 0f /* not kernel-loaded, back to normal */
1226 #endif
1227
1228 /*
1229 * Transfer the current stack frame by hand into the PCB.
1230 */
1231 CAH(afistart)
1232 movl %gs:CPU_ACTIVE_KLOADED,%eax
1233 movl %gs:CPU_KERNEL_STACK,%ebx
1234 xchgl %ebx,%esp
1235 FRAME_STACK_TO_PCB(%eax,%ebx)
1236 CAH(afiend)
1237 TIME_TRAP_UENTRY
1238 jmp 3f
1239 0:
1240 TIME_TRAP_UENTRY
1241
1242 movl %gs:CPU_KERNEL_STACK,%eax
1243 /* switch to kernel stack */
1244 xchgl %eax,%esp
1245 3:
1246 pushl %eax
1247 pushl $0 /* push preemption flag */
1248 call EXT(i386_astintr) /* take the AST */
1249 addl $4,%esp /* pop preemption flag */
1250 popl %esp /* back to PCB stack */
1251 jmp EXT(return_from_trap) /* return */
1252
1253 #if MACH_KDB || MACH_KGDB
1254 /*
1255 * kdb_kintr: enter kdb from keyboard interrupt.
1256 * Chase down the stack frames until we find one whose return
1257 * address is the interrupt handler. At that point, we have:
1258 *
1259 * frame-> saved %ebp
1260 * return address in interrupt handler
1261 * ivect
1262 * saved SPL
1263 * return address == return_to_iret_i
1264 * saved %edx
1265 * saved %ecx
1266 * saved %eax
1267 * saved %eip
1268 * saved %cs
1269 * saved %efl
1270 *
1271 * OR:
1272 * frame-> saved %ebp
1273 * return address in interrupt handler
1274 * ivect
1275 * saved SPL
1276 * return address == return_to_iret
1277 * pointer to save area on old stack
1278 * [ saved %ebx, if accurate timing ]
1279 *
1280 * old stack: saved %es
1281 * saved %ds
1282 * saved %edx
1283 * saved %ecx
1284 * saved %eax
1285 * saved %eip
1286 * saved %cs
1287 * saved %efl
1288 *
1289 * Call kdb, passing it that register save area.
1290 */
1291
1292 #if MACH_KGDB
1293 Entry(kgdb_kintr)
1294 #endif /* MACH_KGDB */
1295 #if MACH_KDB
1296 Entry(kdb_kintr)
1297 #endif /* MACH_KDB */
1298 movl %ebp,%eax /* save caller`s frame pointer */
1299 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1300 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1301
1302 0: cmpl 16(%eax),%ecx /* does this frame return to */
1303 /* interrupt handler (1)? */
1304 je 1f
1305 cmpl $kdb_from_iret,16(%eax)
1306 je 1f
1307 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1308 je 2f /* if not: */
1309 cmpl $kdb_from_iret_i,16(%eax)
1310 je 2f
1311 movl (%eax),%eax /* try next frame */
1312 jmp 0b
1313
1314 1: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1315 ret
1316
1317 2: movl $kdb_from_iret_i,16(%eax)
1318 /* returns to interrupt stack */
1319 ret
1320
1321 /*
1322 * On return from keyboard interrupt, we will execute
1323 * kdb_from_iret_i
1324 * if returning to an interrupt on the interrupt stack
1325 * kdb_from_iret
1326 * if returning to an interrupt on the user or kernel stack
1327 */
1328 kdb_from_iret:
1329 /* save regs in known locations */
1330 pushl %ebx /* caller`s %ebx is in reg */
1331 pushl %ebp
1332 pushl %esi
1333 pushl %edi
1334 push %fs
1335 push %gs
1336 #if MACH_KGDB
1337 cli
1338 pushl %esp /* pass regs */
1339 call EXT(kgdb_kentry) /* to kgdb */
1340 addl $4,%esp /* pop parameters */
1341 #endif /* MACH_KGDB */
1342 #if MACH_KDB
1343 pushl %esp /* pass regs */
1344 call EXT(kdb_kentry) /* to kdb */
1345 addl $4,%esp /* pop parameters */
1346 #endif /* MACH_KDB */
1347 pop %gs /* restore registers */
1348 pop %fs
1349 popl %edi
1350 popl %esi
1351 popl %ebp
1352 popl %ebx
1353 jmp EXT(return_to_iret) /* normal interrupt return */
1354
1355 kdb_from_iret_i: /* on interrupt stack */
1356 pop %edx /* restore saved registers */
1357 pop %ecx
1358 pop %eax
1359 pushl $0 /* zero error code */
1360 pushl $0 /* zero trap number */
1361 pusha /* save general registers */
1362 push %ds /* save segment registers */
1363 push %es
1364 push %fs
1365 push %gs
1366 #if MACH_KGDB
1367 cli /* disable interrupts */
1368 CPU_NUMBER(%edx) /* get CPU number */
1369 movl CX(EXT(kgdb_stacks),%edx),%ebx
1370 xchgl %ebx,%esp /* switch to kgdb stack */
1371 pushl %ebx /* pass old sp as an arg */
1372 call EXT(kgdb_from_kernel)
1373 popl %esp /* switch back to interrupt stack */
1374 #endif /* MACH_KGDB */
1375 #if MACH_KDB
1376 pushl %esp /* pass regs, */
1377 pushl $0 /* code, */
1378 pushl $-1 /* type to kdb */
1379 call EXT(kdb_trap)
1380 addl $12,%esp
1381 #endif /* MACH_KDB */
1382 pop %gs /* restore segment registers */
1383 pop %fs
1384 pop %es
1385 pop %ds
1386 popa /* restore general registers */
1387 addl $8,%esp
1388 iret
1389
1390 #endif /* MACH_KDB || MACH_KGDB */
1391
1392
1393 /*
1394 * Mach RPC enters through a call gate, like a system call.
1395 */
1396
1397 Entry(mach_rpc)
1398 pushf /* save flags as soon as possible */
1399 pushl %eax /* save system call number */
1400 pushl $0 /* clear trap number slot */
1401
1402 pusha /* save the general registers */
1403 pushl %ds /* and the segment registers */
1404 pushl %es
1405 pushl %fs
1406 pushl %gs
1407
1408 mov %ss,%dx /* switch to kernel data segment */
1409 mov %dx,%ds
1410 mov %dx,%es
1411 mov $ CPU_DATA_GS,%dx
1412 mov %dx,%gs
1413
1414 /*
1415 * Shuffle eflags,eip,cs into proper places
1416 */
1417
1418 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1419 movl R_CS(%esp),%ecx /* eip is in CS slot */
1420 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1421 movl %ecx,R_EIP(%esp) /* fix eip */
1422 movl %edx,R_CS(%esp) /* fix cs */
1423 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1424
1425 TIME_TRAP_UENTRY
1426
1427 negl %eax /* get system call number */
1428 shll $4,%eax /* manual indexing */
1429
1430 /*
1431 * Check here for mach_rpc from kernel-loaded task --
1432 * - Note that kernel-loaded task returns via real return.
1433 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1434 * so transfer the stack frame into the PCB explicitly, then
1435 * start running on resulting "PCB stack". We have to set
1436 * up a simulated "uesp" manually, since there's none in the
1437 * frame.
1438 */
1439 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1440 jz 2f
1441 CAH(mrstart)
1442 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1443 movl %gs:CPU_KERNEL_STACK,%edx
1444 xchgl %edx,%esp
1445
1446 FRAME_STACK_TO_PCB(%ebx,%edx)
1447 CAH(mrend)
1448
1449 jmp 3f
1450
1451 2:
1452 movl %gs:CPU_KERNEL_STACK,%ebx
1453 /* get current kernel stack */
1454 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1455 /* user registers. */
1456
1457 3:
1458
1459 /*
1460 * Register use on entry:
1461 * eax contains syscall number
1462 * ebx contains user regs pointer
1463 */
1464 #undef RPC_TRAP_REGISTERS
1465 #ifdef RPC_TRAP_REGISTERS
1466 pushl R_ESI(%ebx)
1467 pushl R_EDI(%ebx)
1468 pushl R_ECX(%ebx)
1469 pushl R_EDX(%ebx)
1470 #else
1471 movl EXT(mach_trap_table)(%eax),%ecx
1472 /* get number of arguments */
1473 jecxz 2f /* skip argument copy if none */
1474 movl R_UESP(%ebx),%esi /* get user stack pointer */
1475 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1476 /* and point past last argument */
1477 movl %gs:CPU_ACTIVE_KLOADED,%edx
1478 /* point to current thread */
1479 orl %edx,%edx /* if ! kernel-loaded, check addr */
1480 jz 4f /* else */
1481 mov %ds,%dx /* kernel data segment access */
1482 jmp 5f
1483 4:
1484 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1485 ja mach_call_addr /* address error if not */
1486 movl $ USER_DS,%edx /* user data segment access */
1487 5:
1488 mov %dx,%fs
1489 movl %esp,%edx /* save kernel ESP for error recovery */
1490 1:
1491 subl $4,%esi
1492 RECOVERY_SECTION
1493 RECOVER(mach_call_addr_push)
1494 pushl %fs:(%esi) /* push argument on stack */
1495 loop 1b /* loop for all arguments */
1496 #endif
1497
1498 /*
1499 * Register use on entry:
1500 * eax contains syscall number << 4
1501 * mach_call_munger is declared regparm(1), so the first arg is %eax
1502 */
1503 2:
1504
1505 call EXT(mach_call_munger)
1506
1507 movl %esp,%ecx /* get kernel stack */
1508 or $(KERNEL_STACK_SIZE-1),%ecx
1509 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1510 movl %eax,R_EAX(%esp) /* save return value */
1511 jmp EXT(return_from_trap) /* return to user */
1512
1513
1514 /*
1515 * Special system call entry for "int 0x80", which has the "eflags"
1516 * register saved at the right place already.
1517 * Fall back to the common syscall path after saving the registers.
1518 *
1519 * esp -> old eip
1520 * old cs
1521 * old eflags
1522 * old esp if trapped from user
1523 * old ss if trapped from user
1524 *
1525 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1526 */
1527 Entry(syscall_int80)
1528 pushl %eax /* save system call number */
1529 pushl $0 /* clear trap number slot */
1530
1531 pusha /* save the general registers */
1532 pushl %ds /* and the segment registers */
1533 pushl %es
1534 pushl %fs
1535 pushl %gs
1536
1537 mov %ss,%dx /* switch to kernel data segment */
1538 mov %dx,%ds
1539 mov %dx,%es
1540 mov $ CPU_DATA_GS,%dx
1541 mov %dx,%gs
1542
1543 jmp syscall_entry_3
1544
1545 /*
1546 * System call enters through a call gate. Flags are not saved -
1547 * we must shuffle stack to look like trap save area.
1548 *
1549 * esp-> old eip
1550 * old cs
1551 * old esp
1552 * old ss
1553 *
1554 * eax contains system call number.
1555 *
1556 * NB: below use of CPU_NUMBER assumes that macro will use correct
1557 * correct segment register for any kernel data accesses.
1558 */
1559 Entry(syscall)
1560 syscall_entry:
1561 pushf /* save flags as soon as possible */
1562 syscall_entry_2:
1563 pushl %eax /* save system call number */
1564 pushl $0 /* clear trap number slot */
1565
1566 pusha /* save the general registers */
1567 pushl %ds /* and the segment registers */
1568 pushl %es
1569 pushl %fs
1570 pushl %gs
1571
1572 mov %ss,%dx /* switch to kernel data segment */
1573 mov %dx,%ds
1574 mov %dx,%es
1575 mov $ CPU_DATA_GS,%dx
1576 mov %dx,%gs
1577
1578 /*
1579 * Shuffle eflags,eip,cs into proper places
1580 */
1581
1582 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1583 movl R_CS(%esp),%ecx /* eip is in CS slot */
1584 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1585 movl %ecx,R_EIP(%esp) /* fix eip */
1586 movl %edx,R_CS(%esp) /* fix cs */
1587 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1588
1589 syscall_entry_3:
1590 /*
1591 * Check here for syscall from kernel-loaded task --
1592 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1593 * so transfer the stack frame into the PCB explicitly, then
1594 * start running on resulting "PCB stack". We have to set
1595 * up a simulated "uesp" manually, since there's none in the
1596 * frame.
1597 */
1598 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1599 jz 0f
1600 CAH(scstart)
1601 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1602 movl %gs:CPU_KERNEL_STACK,%edx
1603 xchgl %edx,%esp
1604 FRAME_STACK_TO_PCB(%ebx,%edx)
1605 CAH(scend)
1606 TIME_TRAP_UENTRY
1607 jmp 1f
1608
1609 0:
1610 TIME_TRAP_UENTRY
1611
1612 movl %gs:CPU_KERNEL_STACK,%ebx
1613 /* get current kernel stack */
1614 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1615 /* user registers. */
1616 /* user regs pointer already set */
1617
1618 /*
1619 * Native system call.
1620 * Register use on entry:
1621 * eax contains syscall number
1622 * ebx points to user regs
1623 */
1624 1:
1625 negl %eax /* get system call number */
1626 jl mach_call_range /* out of range if it was positive */
1627
1628 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1629 jg mach_call_range /* error if out of range */
1630 shll $4,%eax /* manual indexing */
1631
1632 movl EXT(mach_trap_table)+4(%eax),%edx
1633 /* get procedure */
1634 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1635 jne do_native_call /* go on with Mach syscall */
1636 shrl $4,%eax /* restore syscall number */
1637 jmp mach_call_range /* try it as a "server" syscall */
1638
1639 /*
1640 * Register use on entry:
1641 * eax contains syscall number
1642 * ebx contains user regs pointer
1643 */
1644 do_native_call:
1645 movl EXT(mach_trap_table)(%eax),%ecx
1646 /* get number of arguments */
1647 jecxz mach_call_call /* skip argument copy if none */
1648 movl R_UESP(%ebx),%esi /* get user stack pointer */
1649 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1650 /* and point past last argument */
1651 movl %gs:CPU_ACTIVE_KLOADED,%edx
1652 /* point to current thread */
1653 orl %edx,%edx /* if kernel-loaded, skip addr check */
1654 jz 0f /* else */
1655 mov %ds,%dx /* kernel data segment access */
1656 jmp 1f
1657 0:
1658 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1659 ja mach_call_addr /* address error if not */
1660 movl $ USER_DS,%edx /* user data segment access */
1661 1:
1662 mov %dx,%fs
1663 movl %esp,%edx /* save kernel ESP for error recovery */
1664 2:
1665 subl $4,%esi
1666 RECOVERY_SECTION
1667 RECOVER(mach_call_addr_push)
1668 pushl %fs:(%esi) /* push argument on stack */
1669 loop 2b /* loop for all arguments */
1670
1671 /*
1672 * Register use on entry:
1673 * eax contains syscall number
1674 * ebx contains user regs pointer
1675 */
1676 mach_call_call:
1677
1678 CAH(call_call)
1679
1680 #if ETAP_EVENT_MONITOR
1681 cmpl $0x200, %eax /* is this mach_msg? */
1682 jz make_syscall /* if yes, don't record event */
1683
1684 pushal /* Otherwise: save registers */
1685 pushl %eax /* push syscall number on stack*/
1686 call EXT(etap_machcall_probe1) /* call event begin probe */
1687 add $4,%esp /* restore stack */
1688 popal /* restore registers */
1689
1690 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1691 pushal
1692 call EXT(etap_machcall_probe2) /* call event end probe */
1693 popal
1694 jmp skip_syscall /* syscall already made */
1695 #endif /* ETAP_EVENT_MONITOR */
1696
1697 make_syscall:
1698
1699 /*
1700 * mach_call_munger is declared regparm(1) so the first arg is %eax
1701 */
1702 call EXT(mach_call_munger)
1703
1704 skip_syscall:
1705
1706 movl %esp,%ecx /* get kernel stack */
1707 or $(KERNEL_STACK_SIZE-1),%ecx
1708 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1709 movl %eax,R_EAX(%esp) /* save return value */
1710 jmp EXT(return_from_trap) /* return to user */
1711
1712 /*
1713 * Address out of range. Change to page fault.
1714 * %esi holds failing address.
1715 * Register use on entry:
1716 * ebx contains user regs pointer
1717 */
1718 mach_call_addr_push:
1719 movl %edx,%esp /* clean parameters from stack */
1720 mach_call_addr:
1721 movl %esi,R_CR2(%ebx) /* set fault address */
1722 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1723 /* set page-fault trap */
1724 movl $(T_PF_USER),R_ERR(%ebx)
1725 /* set error code - read user space */
1726 CAH(call_addr)
1727 jmp EXT(take_trap) /* treat as a trap */
1728
1729 /*
1730 * System call out of range. Treat as invalid-instruction trap.
1731 * (? general protection?)
1732 * Register use on entry:
1733 * eax contains syscall number
1734 */
1735 mach_call_range:
1736 push %eax
1737 movl %esp,%edx
1738 push $1 /* code_cnt = 1 */
1739 push %edx /* exception_type_t (see i/f docky) */
1740 push $ EXC_SYSCALL
1741 CAH(call_range)
1742 call EXT(exception_triage)
1743 /* no return */
1744
1745 .globl EXT(syscall_failed)
1746 LEXT(syscall_failed)
1747 movl %esp,%ecx /* get kernel stack */
1748 or $(KERNEL_STACK_SIZE-1),%ecx
1749 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1750 movl %gs:CPU_KERNEL_STACK,%ebx
1751 /* get current kernel stack */
1752 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1753 /* user registers. */
1754 /* user regs pointer already set */
1755
1756 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1757 /* set invalid-operation trap */
1758 movl $0,R_ERR(%ebx) /* clear error code */
1759 CAH(failed)
1760 jmp EXT(take_trap) /* treat as a trap */
1761
1762 /*\f*/
1763 /*
1764 * Utility routines.
1765 */
1766
1767
1768 /*
1769 * Copy from user address space.
1770 * arg0: user address
1771 * arg1: kernel address
1772 * arg2: byte count
1773 */
1774 Entry(copyinmsg)
1775 ENTRY(copyin)
1776 pushl %esi
1777 pushl %edi /* save registers */
1778
1779 movl 8+S_ARG0,%esi /* get user start address */
1780 movl 8+S_ARG1,%edi /* get kernel destination address */
1781 movl 8+S_ARG2,%edx /* get count */
1782
1783 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1784
1785 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1786 movl ACT_MAP(%ecx),%ecx /* get act->map */
1787 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1788 cmpl EXT(kernel_pmap), %ecx
1789 jz 1f
1790 movl $ USER_DS,%ecx /* user data segment access */
1791 mov %cx,%ds
1792 1:
1793 cmpl %esi,%eax
1794 jb copyin_fail /* fail if wrap-around */
1795 cld /* count up */
1796 movl %edx,%ecx /* move by longwords first */
1797 shrl $2,%ecx
1798 RECOVERY_SECTION
1799 RECOVER(copyin_fail)
1800 rep
1801 movsl /* move longwords */
1802 movl %edx,%ecx /* now move remaining bytes */
1803 andl $3,%ecx
1804 RECOVERY_SECTION
1805 RECOVER(copyin_fail)
1806 rep
1807 movsb
1808 xorl %eax,%eax /* return 0 for success */
1809 copy_ret:
1810 mov %ss,%di /* restore kernel data segment */
1811 mov %di,%ds
1812
1813 popl %edi /* restore registers */
1814 popl %esi
1815 ret /* and return */
1816
1817 copyin_fail:
1818 movl $ EFAULT,%eax /* return error for failure */
1819 jmp copy_ret /* pop frame and return */
1820
1821 /*
1822 * Copy string from user address space.
1823 * arg0: user address
1824 * arg1: kernel address
1825 * arg2: max byte count
1826 * arg3: actual byte count (OUT)
1827 */
1828 Entry(copyinstr)
1829 pushl %esi
1830 pushl %edi /* save registers */
1831
1832 movl 8+S_ARG0,%esi /* get user start address */
1833 movl 8+S_ARG1,%edi /* get kernel destination address */
1834 movl 8+S_ARG2,%edx /* get count */
1835
1836 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1837
1838 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1839 movl ACT_MAP(%ecx),%ecx /* get act->map */
1840 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1841 cmpl EXT(kernel_pmap), %ecx
1842 jne 0f
1843 mov %ds,%cx /* kernel data segment access */
1844 jmp 1f
1845 0:
1846 movl $ USER_DS,%ecx /* user data segment access */
1847 1:
1848 mov %cx,%fs
1849 xorl %eax,%eax
1850 cmpl $0,%edx
1851 je 4f
1852 2:
1853 RECOVERY_SECTION
1854 RECOVER(copystr_fail) /* copy bytes... */
1855 movb %fs:(%esi),%al
1856 incl %esi
1857 testl %edi,%edi /* if kernel address is ... */
1858 jz 3f /* not NULL */
1859 movb %al,(%edi) /* copy the byte */
1860 incl %edi
1861 3:
1862 decl %edx
1863 je 5f /* Zero count.. error out */
1864 cmpl $0,%eax
1865 jne 2b /* .. a NUL found? */
1866 jmp 4f /* return zero (%eax) */
1867 5:
1868 movl $ ENAMETOOLONG,%eax /* String is too long.. */
1869 4:
1870 movl 8+S_ARG3,%edi /* get OUT len ptr */
1871 cmpl $0,%edi
1872 jz copystr_ret /* if null, just return */
1873 subl 8+S_ARG0,%esi
1874 movl %esi,(%edi) /* else set OUT arg to xfer len */
1875 copystr_ret:
1876 popl %edi /* restore registers */
1877 popl %esi
1878 ret /* and return */
1879
1880 copystr_fail:
1881 movl $ EFAULT,%eax /* return error for failure */
1882 jmp copy_ret /* pop frame and return */
1883
1884 /*
1885 * Copy to user address space.
1886 * arg0: kernel address
1887 * arg1: user address
1888 * arg2: byte count
1889 */
1890 Entry(copyoutmsg)
1891 ENTRY(copyout)
1892 pushl %esi
1893 pushl %edi /* save registers */
1894 pushl %ebx
1895
1896 movl 12+S_ARG0,%esi /* get kernel start address */
1897 movl 12+S_ARG1,%edi /* get user start address */
1898 movl 12+S_ARG2,%edx /* get count */
1899
1900 leal 0(%edi,%edx),%eax /* get user end address + 1 */
1901
1902 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1903 movl ACT_MAP(%ecx),%ecx /* get act->map */
1904 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1905 cmpl EXT(kernel_pmap), %ecx
1906 jne 0f
1907 mov %ds,%cx /* else kernel data segment access */
1908 jmp 1f
1909 0:
1910 movl $ USER_DS,%ecx
1911 1:
1912 mov %cx,%es
1913
1914 /*
1915 * Check whether user address space is writable
1916 * before writing to it - hardware is broken.
1917 *
1918 * Skip check if "user" address is really in
1919 * kernel space (i.e., if it's in a kernel-loaded
1920 * task).
1921 *
1922 * Register usage:
1923 * esi/edi source/dest pointers for rep/mov
1924 * ecx counter for rep/mov
1925 * edx counts down from 3rd arg
1926 * eax count of bytes for each (partial) page copy
1927 * ebx shadows edi, used to adjust edx
1928 */
1929 movl %edi,%ebx /* copy edi for syncing up */
1930 copyout_retry:
1931 /* if restarting after a partial copy, put edx back in sync, */
1932 addl %ebx,%edx /* edx -= (edi - ebx); */
1933 subl %edi,%edx
1934 movl %edi,%ebx /* ebx = edi; */
1935
1936 /*
1937 * Copy only what fits on the current destination page.
1938 * Check for write-fault again on the next page.
1939 */
1940 leal NBPG(%edi),%eax /* point to */
1941 andl $(-NBPG),%eax /* start of next page */
1942 subl %edi,%eax /* get number of bytes to that point */
1943 cmpl %edx,%eax /* bigger than count? */
1944 jle 1f /* if so, */
1945 movl %edx,%eax /* use count */
1946 1:
1947 cld /* count up */
1948 movl %eax,%ecx /* move by longwords first */
1949 shrl $2,%ecx
1950 RECOVERY_SECTION
1951 RECOVER(copyout_fail)
1952 RETRY_SECTION
1953 RETRY(copyout_retry)
1954 rep
1955 movsl
1956 movl %eax,%ecx /* now move remaining bytes */
1957 andl $3,%ecx
1958 RECOVERY_SECTION
1959 RECOVER(copyout_fail)
1960 RETRY_SECTION
1961 RETRY(copyout_retry)
1962 rep
1963 movsb /* move */
1964 movl %edi,%ebx /* copy edi for syncing up */
1965 subl %eax,%edx /* and decrement count */
1966 jg copyout_retry /* restart on next page if not done */
1967 xorl %eax,%eax /* return 0 for success */
1968 copyout_ret:
1969 mov %ss,%di /* restore kernel segment */
1970 mov %di,%es
1971
1972 popl %ebx
1973 popl %edi /* restore registers */
1974 popl %esi
1975 ret /* and return */
1976
1977 copyout_fail:
1978 movl $ EFAULT,%eax /* return error for failure */
1979 jmp copyout_ret /* pop frame and return */
1980
1981 /*
1982 * FPU routines.
1983 */
1984
1985 /*
1986 * Initialize FPU.
1987 */
1988 ENTRY(_fninit)
1989 fninit
1990 ret
1991
1992 /*
1993 * Read control word
1994 */
1995 ENTRY(_fstcw)
1996 pushl %eax /* get stack space */
1997 fstcw (%esp)
1998 popl %eax
1999 ret
2000
2001 /*
2002 * Set control word
2003 */
2004 ENTRY(_fldcw)
2005 fldcw 4(%esp)
2006 ret
2007
2008 /*
2009 * Read status word
2010 */
2011 ENTRY(_fnstsw)
2012 xor %eax,%eax /* clear high 16 bits of eax */
2013 fnstsw %ax /* read FP status */
2014 ret
2015
2016 /*
2017 * Clear FPU exceptions
2018 */
2019 ENTRY(_fnclex)
2020 fnclex
2021 ret
2022
2023 /*
2024 * Clear task-switched flag.
2025 */
2026 ENTRY(_clts)
2027 clts
2028 ret
2029
2030 /*
2031 * Save complete FPU state. Save error for later.
2032 */
2033 ENTRY(_fpsave)
2034 movl 4(%esp),%eax /* get save area pointer */
2035 fnsave (%eax) /* save complete state, including */
2036 /* errors */
2037 ret
2038
2039 /*
2040 * Restore FPU state.
2041 */
2042 ENTRY(_fprestore)
2043 movl 4(%esp),%eax /* get save area pointer */
2044 frstor (%eax) /* restore complete state */
2045 ret
2046
2047 /*
2048 * Set cr3
2049 */
2050 ENTRY(set_cr3)
2051 CPU_NUMBER(%eax)
2052 orl 4(%esp), %eax
2053 /*
2054 * Don't set PDBR to a new value (hence invalidating the
2055 * "paging cache") if the new value matches the current one.
2056 */
2057 movl %cr3,%edx /* get current cr3 value */
2058 cmpl %eax,%edx
2059 je 0f /* if two are equal, don't set */
2060 movl %eax,%cr3 /* load it (and flush cache) */
2061 0:
2062 ret
2063
2064 /*
2065 * Read cr3
2066 */
2067 ENTRY(get_cr3)
2068 movl %cr3,%eax
2069 andl $(~0x7), %eax /* remove cpu number */
2070 ret
2071
2072 /*
2073 * Flush TLB
2074 */
2075 ENTRY(flush_tlb)
2076 movl %cr3,%eax /* flush tlb by reloading CR3 */
2077 movl %eax,%cr3 /* with itself */
2078 ret
2079
2080 /*
2081 * Read cr2
2082 */
2083 ENTRY(get_cr2)
2084 movl %cr2,%eax
2085 ret
2086
2087 /*
2088 * Read cr4
2089 */
2090 ENTRY(get_cr4)
2091 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2092 ret
2093
2094 /*
2095 * Write cr4
2096 */
2097 ENTRY(set_cr4)
2098 movl 4(%esp), %eax
2099 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2100 ret
2101
2102 /*
2103 * Read ldtr
2104 */
2105 Entry(get_ldt)
2106 xorl %eax,%eax
2107 sldt %ax
2108 ret
2109
2110 /*
2111 * Set ldtr
2112 */
2113 Entry(set_ldt)
2114 lldt 4(%esp)
2115 ret
2116
2117 /*
2118 * Read task register.
2119 */
2120 ENTRY(get_tr)
2121 xorl %eax,%eax
2122 str %ax
2123 ret
2124
2125 /*
2126 * Set task register. Also clears busy bit of task descriptor.
2127 */
2128 ENTRY(set_tr)
2129 movl S_ARG0,%eax /* get task segment number */
2130 subl $8,%esp /* push space for SGDT */
2131 sgdt 2(%esp) /* store GDT limit and base (linear) */
2132 movl 4(%esp),%edx /* address GDT */
2133 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2134 ltr %ax /* load task register */
2135 addl $8,%esp /* clear stack */
2136 ret /* and return */
2137
2138 /*
2139 * Set task-switched flag.
2140 */
2141 ENTRY(_setts)
2142 movl %cr0,%eax /* get cr0 */
2143 orl $(CR0_TS),%eax /* or in TS bit */
2144 movl %eax,%cr0 /* set cr0 */
2145 ret
2146
2147 /*
2148 * io register must not be used on slaves (no AT bus)
2149 */
2150 #define ILL_ON_SLAVE
2151
2152
2153 #if MACH_ASSERT
2154
2155 #define ARG0 B_ARG0
2156 #define ARG1 B_ARG1
2157 #define ARG2 B_ARG2
2158 #define PUSH_FRAME FRAME
2159 #define POP_FRAME EMARF
2160
2161 #else /* MACH_ASSERT */
2162
2163 #define ARG0 S_ARG0
2164 #define ARG1 S_ARG1
2165 #define ARG2 S_ARG2
2166 #define PUSH_FRAME
2167 #define POP_FRAME
2168
2169 #endif /* MACH_ASSERT */
2170
2171
2172 #if MACH_KDB || MACH_ASSERT
2173
2174 /*
2175 * Following routines are also defined as macros in i386/pio.h
2176 * Compile then when MACH_KDB is configured so that they
2177 * can be invoked from the debugger.
2178 */
2179
2180 /*
2181 * void outb(unsigned char *io_port,
2182 * unsigned char byte)
2183 *
2184 * Output a byte to an IO port.
2185 */
2186 ENTRY(outb)
2187 PUSH_FRAME
2188 ILL_ON_SLAVE
2189 movl ARG0,%edx /* IO port address */
2190 movl ARG1,%eax /* data to output */
2191 outb %al,%dx /* send it out */
2192 POP_FRAME
2193 ret
2194
2195 /*
2196 * unsigned char inb(unsigned char *io_port)
2197 *
2198 * Input a byte from an IO port.
2199 */
2200 ENTRY(inb)
2201 PUSH_FRAME
2202 ILL_ON_SLAVE
2203 movl ARG0,%edx /* IO port address */
2204 xor %eax,%eax /* clear high bits of register */
2205 inb %dx,%al /* get the byte */
2206 POP_FRAME
2207 ret
2208
2209 /*
2210 * void outw(unsigned short *io_port,
2211 * unsigned short word)
2212 *
2213 * Output a word to an IO port.
2214 */
2215 ENTRY(outw)
2216 PUSH_FRAME
2217 ILL_ON_SLAVE
2218 movl ARG0,%edx /* IO port address */
2219 movl ARG1,%eax /* data to output */
2220 outw %ax,%dx /* send it out */
2221 POP_FRAME
2222 ret
2223
2224 /*
2225 * unsigned short inw(unsigned short *io_port)
2226 *
2227 * Input a word from an IO port.
2228 */
2229 ENTRY(inw)
2230 PUSH_FRAME
2231 ILL_ON_SLAVE
2232 movl ARG0,%edx /* IO port address */
2233 xor %eax,%eax /* clear high bits of register */
2234 inw %dx,%ax /* get the word */
2235 POP_FRAME
2236 ret
2237
2238 /*
2239 * void outl(unsigned int *io_port,
2240 * unsigned int byte)
2241 *
2242 * Output an int to an IO port.
2243 */
2244 ENTRY(outl)
2245 PUSH_FRAME
2246 ILL_ON_SLAVE
2247 movl ARG0,%edx /* IO port address*/
2248 movl ARG1,%eax /* data to output */
2249 outl %eax,%dx /* send it out */
2250 POP_FRAME
2251 ret
2252
2253 /*
2254 * unsigned int inl(unsigned int *io_port)
2255 *
2256 * Input an int from an IO port.
2257 */
2258 ENTRY(inl)
2259 PUSH_FRAME
2260 ILL_ON_SLAVE
2261 movl ARG0,%edx /* IO port address */
2262 inl %dx,%eax /* get the int */
2263 POP_FRAME
2264 ret
2265
2266 #endif /* MACH_KDB || MACH_ASSERT*/
2267
2268 /*
2269 * void loutb(unsigned byte *io_port,
2270 * unsigned byte *data,
2271 * unsigned int count)
2272 *
2273 * Output an array of bytes to an IO port.
2274 */
2275 ENTRY(loutb)
2276 ENTRY(outsb)
2277 PUSH_FRAME
2278 ILL_ON_SLAVE
2279 movl %esi,%eax /* save register */
2280 movl ARG0,%edx /* get io port number */
2281 movl ARG1,%esi /* get data address */
2282 movl ARG2,%ecx /* get count */
2283 cld /* count up */
2284 rep
2285 outsb /* output */
2286 movl %eax,%esi /* restore register */
2287 POP_FRAME
2288 ret
2289
2290
2291 /*
2292 * void loutw(unsigned short *io_port,
2293 * unsigned short *data,
2294 * unsigned int count)
2295 *
2296 * Output an array of shorts to an IO port.
2297 */
2298 ENTRY(loutw)
2299 ENTRY(outsw)
2300 PUSH_FRAME
2301 ILL_ON_SLAVE
2302 movl %esi,%eax /* save register */
2303 movl ARG0,%edx /* get io port number */
2304 movl ARG1,%esi /* get data address */
2305 movl ARG2,%ecx /* get count */
2306 cld /* count up */
2307 rep
2308 outsw /* output */
2309 movl %eax,%esi /* restore register */
2310 POP_FRAME
2311 ret
2312
2313 /*
2314 * void loutw(unsigned short io_port,
2315 * unsigned int *data,
2316 * unsigned int count)
2317 *
2318 * Output an array of longs to an IO port.
2319 */
2320 ENTRY(loutl)
2321 ENTRY(outsl)
2322 PUSH_FRAME
2323 ILL_ON_SLAVE
2324 movl %esi,%eax /* save register */
2325 movl ARG0,%edx /* get io port number */
2326 movl ARG1,%esi /* get data address */
2327 movl ARG2,%ecx /* get count */
2328 cld /* count up */
2329 rep
2330 outsl /* output */
2331 movl %eax,%esi /* restore register */
2332 POP_FRAME
2333 ret
2334
2335
2336 /*
2337 * void linb(unsigned char *io_port,
2338 * unsigned char *data,
2339 * unsigned int count)
2340 *
2341 * Input an array of bytes from an IO port.
2342 */
2343 ENTRY(linb)
2344 ENTRY(insb)
2345 PUSH_FRAME
2346 ILL_ON_SLAVE
2347 movl %edi,%eax /* save register */
2348 movl ARG0,%edx /* get io port number */
2349 movl ARG1,%edi /* get data address */
2350 movl ARG2,%ecx /* get count */
2351 cld /* count up */
2352 rep
2353 insb /* input */
2354 movl %eax,%edi /* restore register */
2355 POP_FRAME
2356 ret
2357
2358
2359 /*
2360 * void linw(unsigned short *io_port,
2361 * unsigned short *data,
2362 * unsigned int count)
2363 *
2364 * Input an array of shorts from an IO port.
2365 */
2366 ENTRY(linw)
2367 ENTRY(insw)
2368 PUSH_FRAME
2369 ILL_ON_SLAVE
2370 movl %edi,%eax /* save register */
2371 movl ARG0,%edx /* get io port number */
2372 movl ARG1,%edi /* get data address */
2373 movl ARG2,%ecx /* get count */
2374 cld /* count up */
2375 rep
2376 insw /* input */
2377 movl %eax,%edi /* restore register */
2378 POP_FRAME
2379 ret
2380
2381
2382 /*
2383 * void linl(unsigned short io_port,
2384 * unsigned int *data,
2385 * unsigned int count)
2386 *
2387 * Input an array of longs from an IO port.
2388 */
2389 ENTRY(linl)
2390 ENTRY(insl)
2391 PUSH_FRAME
2392 ILL_ON_SLAVE
2393 movl %edi,%eax /* save register */
2394 movl ARG0,%edx /* get io port number */
2395 movl ARG1,%edi /* get data address */
2396 movl ARG2,%ecx /* get count */
2397 cld /* count up */
2398 rep
2399 insl /* input */
2400 movl %eax,%edi /* restore register */
2401 POP_FRAME
2402 ret
2403
2404
2405 /*
2406 * int inst_fetch(int eip, int cs);
2407 *
2408 * Fetch instruction byte. Return -1 if invalid address.
2409 */
2410 .globl EXT(inst_fetch)
2411 LEXT(inst_fetch)
2412 movl S_ARG1, %eax /* get segment */
2413 movw %ax,%fs /* into FS */
2414 movl S_ARG0, %eax /* get offset */
2415 RETRY_SECTION
2416 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2417 RECOVERY_SECTION
2418 RECOVER(EXT(inst_fetch_fault))
2419 movzbl %fs:(%eax),%eax /* load instruction byte */
2420 ret
2421
2422 LEXT(inst_fetch_fault)
2423 movl $-1,%eax /* return -1 if error */
2424 ret
2425
2426
2427 #if MACH_KDP
2428 /*
2429 * kdp_copy_kmem(char *src, char *dst, int count)
2430 *
2431 * Similar to copyin except that both addresses are kernel addresses.
2432 */
2433
2434 ENTRY(kdp_copy_kmem)
2435 pushl %esi
2436 pushl %edi /* save registers */
2437
2438 movl 8+S_ARG0,%esi /* get kernel start address */
2439 movl 8+S_ARG1,%edi /* get kernel destination address */
2440
2441 movl 8+S_ARG2,%edx /* get count */
2442
2443 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2444
2445 cmpl %esi,%eax
2446 jb kdp_vm_read_fail /* fail if wrap-around */
2447 cld /* count up */
2448 movl %edx,%ecx /* move by longwords first */
2449 shrl $2,%ecx
2450 RECOVERY_SECTION
2451 RECOVER(kdp_vm_read_fail)
2452 rep
2453 movsl /* move longwords */
2454 movl %edx,%ecx /* now move remaining bytes */
2455 andl $3,%ecx
2456 RECOVERY_SECTION
2457 RECOVER(kdp_vm_read_fail)
2458 rep
2459 movsb
2460 kdp_vm_read_done:
2461 movl 8+S_ARG2,%edx /* get count */
2462 subl %ecx,%edx /* Return number of bytes transfered */
2463 movl %edx,%eax
2464
2465 popl %edi /* restore registers */
2466 popl %esi
2467 ret /* and return */
2468
2469 kdp_vm_read_fail:
2470 xorl %eax,%eax /* didn't copy a thing. */
2471
2472 popl %edi
2473 popl %esi
2474 ret
2475 #endif
2476
2477 /*
2478 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
2479 */
2480 ENTRY(rdmsr_carefully)
2481 movl S_ARG0, %ecx
2482 RECOVERY_SECTION
2483 RECOVER(rdmsr_fail)
2484 rdmsr
2485 movl S_ARG1, %ecx
2486 movl %eax, (%ecx)
2487 movl S_ARG2, %ecx
2488 movl %edx, (%ecx)
2489 movl $0, %eax
2490 ret
2491
2492 rdmsr_fail:
2493 movl $1, %eax
2494 ret
2495
2496 /*
2497 * Done with recovery and retry tables.
2498 */
2499 RECOVERY_SECTION
2500 RECOVER_TABLE_END
2501 RETRY_SECTION
2502 RETRY_TABLE_END
2503
2504
2505
2506 ENTRY(dr6)
2507 movl %db6, %eax
2508 ret
2509
2510 /* dr<i>(address, type, len, persistence)
2511 */
2512 ENTRY(dr0)
2513 movl S_ARG0, %eax
2514 movl %eax,EXT(dr_addr)
2515 movl %eax, %db0
2516 movl $0, %ecx
2517 jmp 0f
2518 ENTRY(dr1)
2519 movl S_ARG0, %eax
2520 movl %eax,EXT(dr_addr)+1*4
2521 movl %eax, %db1
2522 movl $2, %ecx
2523 jmp 0f
2524 ENTRY(dr2)
2525 movl S_ARG0, %eax
2526 movl %eax,EXT(dr_addr)+2*4
2527 movl %eax, %db2
2528 movl $4, %ecx
2529 jmp 0f
2530
2531 ENTRY(dr3)
2532 movl S_ARG0, %eax
2533 movl %eax,EXT(dr_addr)+3*4
2534 movl %eax, %db3
2535 movl $6, %ecx
2536
2537 0:
2538 pushl %ebp
2539 movl %esp, %ebp
2540
2541 movl %db7, %edx
2542 movl %edx,EXT(dr_addr)+4*4
2543 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2544 movl %edx,EXT(dr_addr)+5*4
2545 movzbl B_ARG3, %eax
2546 andb $3, %al
2547 shll %cl, %eax
2548 orl %eax, %edx
2549
2550 movzbl B_ARG1, %eax
2551 andb $3, %al
2552 addb $0x10, %cl
2553 shll %cl, %eax
2554 orl %eax, %edx
2555
2556 movzbl B_ARG2, %eax
2557 andb $3, %al
2558 addb $0x2, %cl
2559 shll %cl, %eax
2560 orl %eax, %edx
2561
2562 movl %edx, %db7
2563 movl %edx,EXT(dr_addr)+7*4
2564 movl %edx, %eax
2565 leave
2566 ret
2567
2568 .data
2569 dr_msk:
2570 .long ~0x000f0003
2571 .long ~0x00f0000c
2572 .long ~0x0f000030
2573 .long ~0xf00000c0
2574 ENTRY(dr_addr)
2575 .long 0,0,0,0
2576 .long 0,0,0,0
2577 .text
2578
2579 ENTRY(get_cr0)
2580 movl %cr0, %eax
2581 ret
2582
2583 ENTRY(set_cr0)
2584 movl 4(%esp), %eax
2585 movl %eax, %cr0
2586 ret
2587
2588 #ifndef SYMMETRY
2589
2590 /*
2591 * ffs(mask)
2592 */
2593 ENTRY(ffs)
2594 bsfl S_ARG0, %eax
2595 jz 0f
2596 incl %eax
2597 ret
2598 0: xorl %eax, %eax
2599 ret
2600
2601 /*
2602 * cpu_shutdown()
2603 * Force reboot
2604 */
2605
2606 null_idtr:
2607 .word 0
2608 .long 0
2609
2610 Entry(cpu_shutdown)
2611 lidt null_idtr /* disable the interrupt handler */
2612 xor %ecx,%ecx /* generate a divide by zero */
2613 div %ecx,%eax /* reboot now */
2614 ret /* this will "never" be executed */
2615
2616 #endif /* SYMMETRY */
2617
2618
2619 /*
2620 * setbit(int bitno, int *s) - set bit in bit string
2621 */
2622 ENTRY(setbit)
2623 movl S_ARG0, %ecx /* bit number */
2624 movl S_ARG1, %eax /* address */
2625 btsl %ecx, (%eax) /* set bit */
2626 ret
2627
2628 /*
2629 * clrbit(int bitno, int *s) - clear bit in bit string
2630 */
2631 ENTRY(clrbit)
2632 movl S_ARG0, %ecx /* bit number */
2633 movl S_ARG1, %eax /* address */
2634 btrl %ecx, (%eax) /* clear bit */
2635 ret
2636
2637 /*
2638 * ffsbit(int *s) - find first set bit in bit string
2639 */
2640 ENTRY(ffsbit)
2641 movl S_ARG0, %ecx /* address */
2642 movl $0, %edx /* base offset */
2643 0:
2644 bsfl (%ecx), %eax /* check argument bits */
2645 jnz 1f /* found bit, return */
2646 addl $4, %ecx /* increment address */
2647 addl $32, %edx /* increment offset */
2648 jmp 0b /* try again */
2649 1:
2650 addl %edx, %eax /* return offset */
2651 ret
2652
2653 /*
2654 * testbit(int nr, volatile void *array)
2655 *
2656 * Test to see if the bit is set within the bit string
2657 */
2658
2659 ENTRY(testbit)
2660 movl S_ARG0,%eax /* Get the bit to test */
2661 movl S_ARG1,%ecx /* get the array string */
2662 btl %eax,(%ecx)
2663 sbbl %eax,%eax
2664 ret
2665
2666 ENTRY(get_pc)
2667 movl 4(%ebp),%eax
2668 ret
2669
2670 #if ETAP
2671
2672 ENTRY(etap_get_pc)
2673 movl 4(%ebp), %eax /* fetch pc of caller */
2674 ret
2675
2676 ENTRY(tvals_to_etap)
2677 movl S_ARG0, %eax
2678 movl $1000000000, %ecx
2679 mull %ecx
2680 addl S_ARG1, %eax
2681 adc $0, %edx
2682 ret
2683
2684 /* etap_time_t
2685 * etap_time_sub(etap_time_t stop, etap_time_t start)
2686 *
2687 * 64bit subtract, returns stop - start
2688 */
2689 ENTRY(etap_time_sub)
2690 movl S_ARG0, %eax /* stop.low */
2691 movl S_ARG1, %edx /* stop.hi */
2692 subl S_ARG2, %eax /* stop.lo - start.lo */
2693 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2694 ret
2695
2696 #endif /* ETAP */
2697
2698 ENTRY(minsecurity)
2699 pushl %ebp
2700 movl %esp,%ebp
2701 /*
2702 * jail: set the EIP to "jail" to block a kernel thread.
2703 * Useful to debug synchronization problems on MPs.
2704 */
2705 ENTRY(jail)
2706 jmp EXT(jail)
2707
2708 /*
2709 * unsigned int
2710 * div_scale(unsigned int dividend,
2711 * unsigned int divisor,
2712 * unsigned int *scale)
2713 *
2714 * This function returns (dividend << *scale) //divisor where *scale
2715 * is the largest possible value before overflow. This is used in
2716 * computation where precision must be achieved in order to avoid
2717 * floating point usage.
2718 *
2719 * Algorithm:
2720 * *scale = 0;
2721 * while (((dividend >> *scale) >= divisor))
2722 * (*scale)++;
2723 * *scale = 32 - *scale;
2724 * return ((dividend << *scale) / divisor);
2725 */
2726 ENTRY(div_scale)
2727 PUSH_FRAME
2728 xorl %ecx, %ecx /* *scale = 0 */
2729 xorl %eax, %eax
2730 movl ARG0, %edx /* get dividend */
2731 0:
2732 cmpl ARG1, %edx /* if (divisor > dividend) */
2733 jle 1f /* goto 1f */
2734 addl $1, %ecx /* (*scale)++ */
2735 shrdl $1, %edx, %eax /* dividend >> 1 */
2736 shrl $1, %edx /* dividend >> 1 */
2737 jmp 0b /* goto 0b */
2738 1:
2739 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
2740 movl ARG2, %edx /* get scale */
2741 movl $32, (%edx) /* *scale = 32 */
2742 subl %ecx, (%edx) /* *scale -= %ecx */
2743 POP_FRAME
2744 ret
2745
2746 /*
2747 * unsigned int
2748 * mul_scale(unsigned int multiplicand,
2749 * unsigned int multiplier,
2750 * unsigned int *scale)
2751 *
2752 * This function returns ((multiplicand * multiplier) >> *scale) where
2753 * scale is the largest possible value before overflow. This is used in
2754 * computation where precision must be achieved in order to avoid
2755 * floating point usage.
2756 *
2757 * Algorithm:
2758 * *scale = 0;
2759 * while (overflow((multiplicand * multiplier) >> *scale))
2760 * (*scale)++;
2761 * return ((multiplicand * multiplier) >> *scale);
2762 */
2763 ENTRY(mul_scale)
2764 PUSH_FRAME
2765 xorl %ecx, %ecx /* *scale = 0 */
2766 movl ARG0, %eax /* get multiplicand */
2767 mull ARG1 /* multiplicand * multiplier */
2768 0:
2769 cmpl $0, %edx /* if (!overflow()) */
2770 je 1f /* goto 1 */
2771 addl $1, %ecx /* (*scale)++ */
2772 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
2773 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
2774 jmp 0b
2775 1:
2776 movl ARG2, %edx /* get scale */
2777 movl %ecx, (%edx) /* set *scale */
2778 POP_FRAME
2779 ret
2780
2781 #ifdef MACH_BSD
2782 /*
2783 * BSD System call entry point..
2784 */
2785
2786 Entry(trap_unix_syscall)
2787 trap_unix_addr:
2788 pushf /* save flags as soon as possible */
2789 trap_unix_2:
2790 pushl %eax /* save system call number */
2791 pushl $0 /* clear trap number slot */
2792
2793 pusha /* save the general registers */
2794 pushl %ds /* and the segment registers */
2795 pushl %es
2796 pushl %fs
2797 pushl %gs
2798
2799 mov %ss,%dx /* switch to kernel data segment */
2800 mov %dx,%ds
2801 mov %dx,%es
2802 mov $ CPU_DATA_GS,%dx
2803 mov %dx,%gs
2804
2805 /*
2806 * Shuffle eflags,eip,cs into proper places
2807 */
2808
2809 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2810 movl R_CS(%esp),%ecx /* eip is in CS slot */
2811 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2812 movl %ecx,R_EIP(%esp) /* fix eip */
2813 movl %edx,R_CS(%esp) /* fix cs */
2814 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2815
2816 TIME_TRAP_UENTRY
2817
2818 negl %eax /* get system call number */
2819 shll $4,%eax /* manual indexing */
2820
2821 movl %gs:CPU_KERNEL_STACK,%ebx
2822 /* get current kernel stack */
2823 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2824 /* user registers. */
2825
2826 /*
2827 * Register use on entry:
2828 * eax contains syscall number
2829 * ebx contains user regs pointer
2830 */
2831 CAH(call_call)
2832 pushl %ebx /* Push the regs set onto stack */
2833 call EXT(unix_syscall)
2834 popl %ebx
2835 movl %esp,%ecx /* get kernel stack */
2836 or $(KERNEL_STACK_SIZE-1),%ecx
2837 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2838 movl %eax,R_EAX(%esp) /* save return value */
2839 jmp EXT(return_from_trap) /* return to user */
2840
2841 /*
2842 * Entry point for machdep system calls..
2843 */
2844
2845 Entry(trap_machdep_syscall)
2846 pushf /* save flags as soon as possible */
2847 pushl %eax /* save system call number */
2848 pushl $0 /* clear trap number slot */
2849
2850 pusha /* save the general registers */
2851 pushl %ds /* and the segment registers */
2852 pushl %es
2853 pushl %fs
2854 pushl %gs
2855
2856 mov %ss,%dx /* switch to kernel data segment */
2857 mov %dx,%ds
2858 mov %dx,%es
2859 mov $ CPU_DATA_GS,%dx
2860 mov %dx,%gs
2861
2862 /*
2863 * Shuffle eflags,eip,cs into proper places
2864 */
2865
2866 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2867 movl R_CS(%esp),%ecx /* eip is in CS slot */
2868 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2869 movl %ecx,R_EIP(%esp) /* fix eip */
2870 movl %edx,R_CS(%esp) /* fix cs */
2871 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2872
2873 TIME_TRAP_UENTRY
2874
2875 negl %eax /* get system call number */
2876 shll $4,%eax /* manual indexing */
2877
2878 movl %gs:CPU_KERNEL_STACK,%ebx
2879 /* get current kernel stack */
2880 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2881 /* user registers. */
2882
2883 /*
2884 * Register use on entry:
2885 * eax contains syscall number
2886 * ebx contains user regs pointer
2887 */
2888 CAH(call_call)
2889 pushl %ebx
2890 call EXT(machdep_syscall)
2891 popl %ebx
2892 movl %esp,%ecx /* get kernel stack */
2893 or $(KERNEL_STACK_SIZE-1),%ecx
2894 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2895 movl %eax,R_EAX(%esp) /* save return value */
2896 jmp EXT(return_from_trap) /* return to user */
2897
2898 Entry(trap_mach25_syscall)
2899 pushf /* save flags as soon as possible */
2900 pushl %eax /* save system call number */
2901 pushl $0 /* clear trap number slot */
2902
2903 pusha /* save the general registers */
2904 pushl %ds /* and the segment registers */
2905 pushl %es
2906 pushl %fs
2907 pushl %gs
2908
2909 mov %ss,%dx /* switch to kernel data segment */
2910 mov %dx,%ds
2911 mov %dx,%es
2912 mov $ CPU_DATA_GS,%dx
2913 mov %dx,%gs
2914
2915 /*
2916 * Shuffle eflags,eip,cs into proper places
2917 */
2918
2919 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2920 movl R_CS(%esp),%ecx /* eip is in CS slot */
2921 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2922 movl %ecx,R_EIP(%esp) /* fix eip */
2923 movl %edx,R_CS(%esp) /* fix cs */
2924 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2925
2926 TIME_TRAP_UENTRY
2927
2928 negl %eax /* get system call number */
2929 shll $4,%eax /* manual indexing */
2930
2931 movl %gs:CPU_KERNEL_STACK,%ebx
2932 /* get current kernel stack */
2933 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2934 /* user registers. */
2935
2936 /*
2937 * Register use on entry:
2938 * eax contains syscall number
2939 * ebx contains user regs pointer
2940 */
2941 CAH(call_call)
2942 pushl %ebx
2943 call EXT(mach25_syscall)
2944 popl %ebx
2945 movl %esp,%ecx /* get kernel stack */
2946 or $(KERNEL_STACK_SIZE-1),%ecx
2947 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2948 movl %eax,R_EAX(%esp) /* save return value */
2949 jmp EXT(return_from_trap) /* return to user */
2950
2951 #endif