]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-792.6.76.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
1c79356b
A
51#include <mach_rt.h>
52#include <platforms.h>
53#include <mach_kdb.h>
54#include <mach_kgdb.h>
55#include <mach_kdp.h>
56#include <stat_time.h>
57#include <mach_assert.h>
58
59#include <sys/errno.h>
60#include <i386/asm.h>
61#include <i386/cpuid.h>
62#include <i386/eflags.h>
63#include <i386/proc_reg.h>
64#include <i386/trap.h>
65#include <assym.s>
66#include <mach/exception_types.h>
67
55e303ae 68#include <i386/mp.h>
1c79356b
A
69
70#define PREEMPT_DEBUG_LOG 0
71
91447636
A
72
73/*
74 * PTmap is recursive pagemap at top of virtual address space.
75 * Within PTmap, the page directory can be found (third indirection).
76*/
77 .globl _PTmap,_PTD,_PTDpde
78 .set _PTmap,(PTDPTDI << PDESHIFT)
79 .set _PTD,_PTmap + (PTDPTDI * NBPG)
80 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
81
82/*
83 * APTmap, APTD is the alternate recursive pagemap.
84 * It's used when modifying another process's page tables.
85 */
86 .globl _APTmap,_APTD,_APTDpde
87 .set _APTmap,(APTDPTDI << PDESHIFT)
88 .set _APTD,_APTmap + (APTDPTDI * NBPG)
89 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
90
1c79356b
A
91#if __MACHO__
92/* Under Mach-O, etext is a variable which contains
93 * the last text address
94 */
95#define ETEXT_ADDR (EXT(etext))
96#else
97/* Under ELF and other non-Mach-O formats, the address of
98 * etext represents the last text address
99 */
9bccf70c 100#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
101#endif
102
1c79356b
A
103#define CX(addr,reg) addr(,reg,4)
104
1c79356b
A
105 .text
106locore_start:
107
108/*
109 * Fault recovery.
110 */
111
112#ifdef __MACHO__
113#define RECOVERY_SECTION .section __VECTORS, __recover
114#define RETRY_SECTION .section __VECTORS, __retries
115#else
116#define RECOVERY_SECTION .text
117#define RECOVERY_SECTION .text
118#endif
119
120#define RECOVER_TABLE_START \
121 .align 2 ; \
122 .globl EXT(recover_table) ;\
123LEXT(recover_table) ;\
124 .text
125
126#define RECOVER(addr) \
127 .align 2; \
128 .long 9f ;\
129 .long addr ;\
130 .text ;\
1319:
132
133#define RECOVER_TABLE_END \
134 .align 2 ;\
135 .globl EXT(recover_table_end) ;\
136LEXT(recover_table_end) ;\
137 .text
138
139/*
140 * Retry table for certain successful faults.
141 */
142#define RETRY_TABLE_START \
143 .align 3; \
144 .globl EXT(retry_table) ;\
145LEXT(retry_table) ;\
146 .text
147
148#define RETRY(addr) \
149 .align 3 ;\
150 .long 9f ;\
151 .long addr ;\
152 .text ;\
1539:
154
155#define RETRY_TABLE_END \
156 .align 3; \
157 .globl EXT(retry_table_end) ;\
158LEXT(retry_table_end) ;\
159 .text
160
161/*
162 * Allocate recovery and retry tables.
163 */
164 RECOVERY_SECTION
165 RECOVER_TABLE_START
166 RETRY_SECTION
167 RETRY_TABLE_START
168
169/*
170 * Timing routines.
171 */
91447636
A
172Entry(timer_update)
173 movl 4(%esp),%ecx
174 movl 8(%esp),%eax
175 movl 12(%esp),%edx
176 movl %eax,TIMER_HIGHCHK(%ecx)
177 movl %edx,TIMER_LOW(%ecx)
178 movl %eax,TIMER_HIGH(%ecx)
179 ret
180
181Entry(timer_grab)
182 movl 4(%esp),%ecx
1830: movl TIMER_HIGH(%ecx),%edx
184 movl TIMER_LOW(%ecx),%eax
185 cmpl TIMER_HIGHCHK(%ecx),%edx
186 jne 0b
187 ret
188
1c79356b
A
189#if STAT_TIME
190
191#define TIME_TRAP_UENTRY
192#define TIME_TRAP_UEXIT
193#define TIME_INT_ENTRY
194#define TIME_INT_EXIT
195
91447636
A
196#else
197/*
198 * Nanosecond timing.
199 */
200
201/*
202 * Low 32-bits of nanotime returned in %eax.
203 * Computed from tsc using conversion scale/shift from per-cpu data.
204 * Uses %ecx and %edx.
205 */
206#define NANOTIME32 \
207 pushl %esi /* save %esi */ ;\
208 movl %gs:CPU_THIS,%esi /* per-cpu data ptr */ ;\
209 addl $(CPU_RTC_NANOTIME),%esi /* esi -> per-cpu nanotime*/ ;\
210 rdtsc /* edx:eax = tsc */ ;\
211 subl RTN_TSC(%esi),%eax /* eax = (tsc - base_tsc) */ ;\
212 mull RTN_SCALE(%esi) /* eax *= scale */ ;\
213 movl RTN_SHIFT(%esi),%ecx /* ecx = shift */ ;\
214 shrdl %cl,%edx,%eax /* edx:eax >> shift */ ;\
215 andb $32,%cl /* shift == 32? */ ;\
216 cmovnel %edx,%eax /* %eax = %edx if so */ ;\
217 addl RTN_NANOS(%esi),%eax /* add base ns */ ;\
218 popl %esi
1c79356b
A
219
220/*
91447636 221 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
1c79356b 222 */
91447636
A
223#define TIMER_UPDATE(treg,dreg) \
224 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
225 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
226 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
227 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
228 movl dreg,TIMER_HIGH(treg) /* to high bita */
1c79356b
A
229
230/*
91447636 231 * Add time delta to old timer and start new.
1c79356b 232 */
91447636
A
233#define TIMER_EVENT(old,new) \
234 pushl %eax /* must be invariant */ ;\
235 cli /* block interrupts */ ;\
236 NANOTIME32 /* eax low bits nanosecs */ ;\
237 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
238 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
239 movl %eax,%edx /* save timestamp in %edx */ ;\
240 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
241 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
242 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
243 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
244 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
245 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */ ;\
246 sti /* interrupts on */ ;\
247 popl %eax /* must be invariant */
1c79356b
A
248
249/*
250 * Update time on user trap entry.
91447636
A
251 * Uses %ecx,%edx.
252 */
253#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
254
255/*
256 * update time on user trap exit.
91447636
A
257 * Uses %ecx,%edx.
258 */
259#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
260
261/*
262 * update time on interrupt entry.
91447636 263 * Uses %eax,%ecx,%edx.
1c79356b
A
264 */
265#define TIME_INT_ENTRY \
91447636
A
266 NANOTIME32 /* eax low bits nanosecs */ ;\
267 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
268 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
269 movl %eax,%edx /* save timestamp in %edx */ ;\
270 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
271 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
272 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
273 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
274 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
1c79356b
A
275
276/*
277 * update time on interrupt exit.
91447636 278 * Uses %eax, %ecx, %edx.
1c79356b
A
279 */
280#define TIME_INT_EXIT \
91447636
A
281 NANOTIME32 /* eax low bits nanosecs */ ;\
282 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
283 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
284 movl %eax,%edx /* save timestamp in %edx */ ;\
285 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
286 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
287 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
288 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
289 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
1c79356b 290
91447636 291#endif /* STAT_TIME */
1c79356b
A
292
293/*
294 * Encapsulate the transfer of exception stack frames between a PCB
295 * and a thread stack. Since the whole point of these is to emulate
296 * a call or exception that changes privilege level, both macros
297 * assume that there is no user esp or ss stored in the source
298 * frame (because there was no change of privilege to generate them).
299 */
300
301/*
302 * Transfer a stack frame from a thread's user stack to its PCB.
303 * We assume the thread and stack addresses have been loaded into
304 * registers (our arguments).
305 *
306 * The macro overwrites edi, esi, ecx and whatever registers hold the
307 * thread and stack addresses (which can't be one of the above three).
308 * The thread address is overwritten with the address of its saved state
309 * (where the frame winds up).
310 *
311 * Must be called on kernel stack.
312 */
313#define FRAME_STACK_TO_PCB(thread, stkp) ;\
314 movl ACT_PCB(thread),thread /* get act`s PCB */ ;\
315 leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\
316 movl %edi,thread /* save for later */ ;\
317 movl stkp,%esi /* point to start of frame */ ;\
9bccf70c 318 movl $ R_UESP,%ecx ;\
1c79356b
A
319 sarl $2,%ecx /* word count for transfer */ ;\
320 cld /* we`re incrementing */ ;\
321 rep ;\
322 movsl /* transfer the frame */ ;\
9bccf70c 323 addl $ R_UESP,stkp /* derive true "user" esp */ ;\
1c79356b
A
324 movl stkp,R_UESP(thread) /* store in PCB */ ;\
325 movl $0,%ecx ;\
326 mov %ss,%cx /* get current ss */ ;\
327 movl %ecx,R_SS(thread) /* store in PCB */
328
329/*
330 * Transfer a stack frame from a thread's PCB to the stack pointed
331 * to by the PCB. We assume the thread address has been loaded into
332 * a register (our argument).
333 *
334 * The macro overwrites edi, esi, ecx and whatever register holds the
335 * thread address (which can't be one of the above three). The
336 * thread address is overwritten with the address of its saved state
337 * (where the frame winds up).
338 *
339 * Must be called on kernel stack.
340 */
341#define FRAME_PCB_TO_STACK(thread) ;\
342 movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\
343 leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\
344 movl R_UESP(%esi),%edi /* point to end of dest frame */;\
345 movl ACT_MAP(thread),%ecx /* get act's map */ ;\
346 movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\
347 cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\
348 jz 1f /* use kernel data segment */ ;\
9bccf70c 349 movl $ USER_DS,%cx /* else use user data segment */;\
1c79356b
A
350 mov %cx,%es ;\
3511: ;\
9bccf70c 352 movl $ R_UESP,%ecx ;\
1c79356b
A
353 subl %ecx,%edi /* derive start of frame */ ;\
354 movl %edi,thread /* save for later */ ;\
355 sarl $2,%ecx /* word count for transfer */ ;\
356 cld /* we`re incrementing */ ;\
357 rep ;\
358 movsl /* transfer the frame */ ;\
359 mov %ss,%cx /* restore kernel segments */ ;\
360 mov %cx,%es
361
362#undef PDEBUG
363
364#ifdef PDEBUG
365
366/*
367 * Traditional, not ANSI.
368 */
369#define CAH(label) \
370 .data ;\
371 .globl label/**/count ;\
372label/**/count: ;\
373 .long 0 ;\
374 .globl label/**/limit ;\
375label/**/limit: ;\
376 .long 0 ;\
377 .text ;\
378 addl $1,%ss:label/**/count ;\
379 cmpl $0,label/**/limit ;\
380 jz label/**/exit ;\
381 pushl %eax ;\
382label/**/loop: ;\
383 movl %ss:label/**/count,%eax ;\
384 cmpl %eax,%ss:label/**/limit ;\
385 je label/**/loop ;\
386 popl %eax ;\
387label/**/exit:
388
389#else /* PDEBUG */
390
391#define CAH(label)
392
393#endif /* PDEBUG */
394
395#if MACH_KDB
396/*
397 * Last-ditch debug code to handle faults that might result
398 * from entering kernel (from collocated server) on an invalid
399 * stack. On collocated entry, there's no hardware-initiated
400 * stack switch, so a valid stack must be in place when an
401 * exception occurs, or we may double-fault.
402 *
403 * In case of a double-fault, our only recourse is to switch
404 * hardware "tasks", so that we avoid using the current stack.
405 *
406 * The idea here is just to get the processor into the debugger,
407 * post-haste. No attempt is made to fix up whatever error got
408 * us here, so presumably continuing from the debugger will
409 * simply land us here again -- at best.
410 */
411#if 0
412/*
413 * Note that the per-fault entry points are not currently
414 * functional. The only way to make them work would be to
415 * set up separate TSS's for each fault type, which doesn't
416 * currently seem worthwhile. (The offset part of a task
417 * gate is always ignored.) So all faults that task switch
418 * currently resume at db_task_start.
419 */
420/*
421 * Double fault (Murphy's point) - error code (0) on stack
422 */
423Entry(db_task_dbl_fault)
424 popl %eax
425 movl $(T_DOUBLE_FAULT),%ebx
426 jmp db_task_start
427/*
428 * Segment not present - error code on stack
429 */
430Entry(db_task_seg_np)
431 popl %eax
432 movl $(T_SEGMENT_NOT_PRESENT),%ebx
433 jmp db_task_start
434/*
435 * Stack fault - error code on (current) stack
436 */
437Entry(db_task_stk_fault)
438 popl %eax
439 movl $(T_STACK_FAULT),%ebx
440 jmp db_task_start
441/*
442 * General protection fault - error code on stack
443 */
444Entry(db_task_gen_prot)
445 popl %eax
446 movl $(T_GENERAL_PROTECTION),%ebx
447 jmp db_task_start
448#endif /* 0 */
449/*
450 * The entry point where execution resumes after last-ditch debugger task
451 * switch.
452 */
453Entry(db_task_start)
454 movl %esp,%edx
455 subl $ISS_SIZE,%edx
456 movl %edx,%esp /* allocate i386_saved_state on stack */
457 movl %eax,R_ERR(%esp)
458 movl %ebx,R_TRAPNO(%esp)
459 pushl %edx
1c79356b
A
460 CPU_NUMBER(%edx)
461 movl CX(EXT(mp_dbtss),%edx),%edx
462 movl TSS_LINK(%edx),%eax
1c79356b
A
463 pushl %eax /* pass along selector of previous TSS */
464 call EXT(db_tss_to_frame)
465 popl %eax /* get rid of TSS selector */
466 call EXT(db_trap_from_asm)
467 addl $0x4,%esp
468 /*
469 * And now...?
470 */
471 iret /* ha, ha, ha... */
472#endif /* MACH_KDB */
473
474/*
475 * Trap/interrupt entry points.
476 *
477 * All traps must create the following save area on the PCB "stack":
478 *
479 * gs
480 * fs
481 * es
482 * ds
483 * edi
484 * esi
485 * ebp
486 * cr2 if page fault - otherwise unused
487 * ebx
488 * edx
489 * ecx
490 * eax
491 * trap number
492 * error code
493 * eip
494 * cs
495 * eflags
496 * user esp - if from user
497 * user ss - if from user
498 * es - if from V86 thread
499 * ds - if from V86 thread
500 * fs - if from V86 thread
501 * gs - if from V86 thread
502 *
503 */
504
505/*
506 * General protection or segment-not-present fault.
507 * Check for a GP/NP fault in the kernel_return
508 * sequence; if there, report it as a GP/NP fault on the user's instruction.
509 *
510 * esp-> 0: trap code (NP or GP)
511 * 4: segment number in error
512 * 8 eip
513 * 12 cs
514 * 16 eflags
515 * 20 old registers (trap is from kernel)
516 */
517Entry(t_gen_prot)
518 pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
519 jmp trap_check_kernel_exit /* check for kernel exit sequence */
520
521Entry(t_segnp)
522 pushl $(T_SEGMENT_NOT_PRESENT)
523 /* indicate fault type */
524
525trap_check_kernel_exit:
526 testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
527 jnz EXT(alltraps) /* isn`t kernel trap if so */
528 testl $3,12(%esp) /* is trap from kernel mode? */
529 jne EXT(alltraps) /* if so: */
530 /* check for the kernel exit sequence */
9bccf70c 531 cmpl $ EXT(kret_iret),8(%esp) /* on IRET? */
1c79356b 532 je fault_iret
9bccf70c 533 cmpl $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
1c79356b 534 je fault_popl_ds
9bccf70c 535 cmpl $ EXT(kret_popl_es),8(%esp) /* popping ES? */
1c79356b 536 je fault_popl_es
9bccf70c 537 cmpl $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
1c79356b 538 je fault_popl_fs
9bccf70c 539 cmpl $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
1c79356b
A
540 je fault_popl_gs
541take_fault: /* if none of the above: */
542 jmp EXT(alltraps) /* treat as normal trap. */
543
544/*
545 * GP/NP fault on IRET: CS or SS is in error.
546 * All registers contain the user's values.
547 *
548 * on SP is
549 * 0 trap number
550 * 4 errcode
551 * 8 eip
552 * 12 cs --> trapno
553 * 16 efl --> errcode
554 * 20 user eip
555 * 24 user cs
556 * 28 user eflags
557 * 32 user esp
558 * 36 user ss
559 */
560fault_iret:
561 movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
562 popl %eax /* get trap number */
563 movl %eax,12-4(%esp) /* put in user trap number */
564 popl %eax /* get error code */
565 movl %eax,16-8(%esp) /* put in user errcode */
566 popl %eax /* restore eax */
567 CAH(fltir)
568 jmp EXT(alltraps) /* take fault */
569
570/*
571 * Fault restoring a segment register. The user's registers are still
572 * saved on the stack. The offending segment register has not been
573 * popped.
574 */
575fault_popl_ds:
576 popl %eax /* get trap number */
577 popl %edx /* get error code */
578 addl $12,%esp /* pop stack to user regs */
579 jmp push_es /* (DS on top of stack) */
580fault_popl_es:
581 popl %eax /* get trap number */
582 popl %edx /* get error code */
583 addl $12,%esp /* pop stack to user regs */
584 jmp push_fs /* (ES on top of stack) */
585fault_popl_fs:
586 popl %eax /* get trap number */
587 popl %edx /* get error code */
588 addl $12,%esp /* pop stack to user regs */
589 jmp push_gs /* (FS on top of stack) */
590fault_popl_gs:
591 popl %eax /* get trap number */
592 popl %edx /* get error code */
593 addl $12,%esp /* pop stack to user regs */
594 jmp push_segregs /* (GS on top of stack) */
595
596push_es:
597 pushl %es /* restore es, */
598push_fs:
599 pushl %fs /* restore fs, */
600push_gs:
601 pushl %gs /* restore gs. */
602push_segregs:
603 movl %eax,R_TRAPNO(%esp) /* set trap number */
604 movl %edx,R_ERR(%esp) /* set error code */
605 CAH(fltpp)
606 jmp trap_set_segs /* take trap */
607
608/*
609 * Debug trap. Check for single-stepping across system call into
610 * kernel. If this is the case, taking the debug trap has turned
611 * off single-stepping - save the flags register with the trace
612 * bit set.
613 */
614Entry(t_debug)
615 testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
616 jnz 0f /* isn`t kernel trap if so */
617 testl $3,4(%esp) /* is trap from kernel mode? */
618 jnz 0f /* if so: */
619 cmpl $syscall_entry,(%esp) /* system call entry? */
55e303ae 620 jne 1f /* if so: */
1c79356b
A
621 /* flags are sitting where syscall */
622 /* wants them */
623 addl $8,%esp /* remove eip/cs */
624 jmp syscall_entry_2 /* continue system call entry */
625
55e303ae
A
6261: cmpl $trap_unix_addr,(%esp)
627 jne 0f
628 addl $8,%esp
629 jmp trap_unix_2
630
1c79356b
A
6310: pushl $0 /* otherwise: */
632 pushl $(T_DEBUG) /* handle as normal */
633 jmp EXT(alltraps) /* debug fault */
634
635/*
636 * Page fault traps save cr2.
637 */
638Entry(t_page_fault)
639 pushl $(T_PAGE_FAULT) /* mark a page fault trap */
640 pusha /* save the general registers */
641 movl %cr2,%eax /* get the faulting address */
642 movl %eax,12(%esp) /* save in esp save slot */
643 jmp trap_push_segs /* continue fault */
644
645/*
646 * All 'exceptions' enter here with:
647 * esp-> trap number
648 * error code
649 * old eip
650 * old cs
651 * old eflags
652 * old esp if trapped from user
653 * old ss if trapped from user
654 *
655 * NB: below use of CPU_NUMBER assumes that macro will use correct
656 * segment register for any kernel data accesses.
657 */
658Entry(alltraps)
659 pusha /* save the general registers */
660trap_push_segs:
661 pushl %ds /* save the segment registers */
662 pushl %es
663 pushl %fs
664 pushl %gs
665
666trap_set_segs:
667 movl %ss,%ax
668 movl %ax,%ds
669 movl %ax,%es /* switch to kernel data seg */
670 cld /* clear direction flag */
671 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
672 jnz trap_from_user /* user mode trap if so */
673 testb $3,R_CS(%esp) /* user mode trap? */
674 jnz trap_from_user
91447636 675 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1c79356b
A
676 je trap_from_kernel /* if clear, truly in kernel */
677#ifdef FIXME
678 cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */
679 jb trap_from_kernel
680#endif
681trap_from_kloaded:
682 /*
683 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
684 * so transfer the stack frame into the PCB explicitly, then
685 * start running on resulting "PCB stack". We have to set
686 * up a simulated "uesp" manually, since there's none in the
687 * frame.
688 */
91447636 689 mov $ CPU_DATA_GS,%dx
1c79356b
A
690 mov %dx,%gs
691 CAH(atstart)
91447636
A
692 movl %gs:CPU_ACTIVE_KLOADED,%ebx
693 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
694 xchgl %esp,%eax
695 FRAME_STACK_TO_PCB(%ebx,%eax)
696 CAH(atend)
697 jmp EXT(take_trap)
698
699trap_from_user:
91447636 700 mov $ CPU_DATA_GS,%ax
1c79356b
A
701 mov %ax,%gs
702
1c79356b
A
703 TIME_TRAP_UENTRY
704
91447636 705 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
706 xchgl %ebx,%esp /* switch to kernel stack */
707 /* user regs pointer already set */
708LEXT(take_trap)
709 pushl %ebx /* record register save area */
710 pushl %ebx /* pass register save area to trap */
711 call EXT(user_trap) /* call user trap routine */
712 movl 4(%esp),%esp /* switch back to PCB stack */
713
714/*
715 * Return from trap or system call, checking for ASTs.
716 * On PCB stack.
717 */
718
719LEXT(return_from_trap)
91447636
A
720 movl %gs:CPU_PENDING_AST,%edx
721 cmpl $0,%edx
1c79356b
A
722 je EXT(return_to_user) /* if we need an AST: */
723
91447636 724 movl %gs:CPU_KERNEL_STACK,%esp
1c79356b
A
725 /* switch to kernel stack */
726 pushl $0 /* push preemption flag */
727 call EXT(i386_astintr) /* take the AST */
728 addl $4,%esp /* pop preemption flag */
729 popl %esp /* switch back to PCB stack (w/exc link) */
730 jmp EXT(return_from_trap) /* and check again (rare) */
731 /* ASTs after this point will */
732 /* have to wait */
733
734/*
735 * Arrange the checks needed for kernel-loaded (or kernel-loading)
736 * threads so that branch is taken in kernel-loaded case.
737 */
738LEXT(return_to_user)
739 TIME_TRAP_UEXIT
91447636 740 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1c79356b 741 jnz EXT(return_xfer_stack)
91447636 742 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
1c79356b
A
743
744#if MACH_RT
745#if MACH_ASSERT
91447636 746 cmpl $0,%gs:CPU_PREEMPTION_LEVEL
1c79356b
A
747 je EXT(return_from_kernel)
748 int $3
749#endif /* MACH_ASSERT */
750#endif /* MACH_RT */
751
752/*
753 * Return from kernel mode to interrupted thread.
754 */
755
756LEXT(return_from_kernel)
757LEXT(kret_popl_gs)
758 popl %gs /* restore segment registers */
759LEXT(kret_popl_fs)
760 popl %fs
761LEXT(kret_popl_es)
762 popl %es
763LEXT(kret_popl_ds)
764 popl %ds
765 popa /* restore general registers */
766 addl $8,%esp /* discard trap number and error code */
767
768LEXT(kret_iret)
769 iret /* return from interrupt */
770
771
772LEXT(return_xfer_stack)
773 /*
774 * If we're on PCB stack in a kernel-loaded task, we have
775 * to transfer saved state back to thread stack and swap
776 * stack pointers here, because the hardware's not going
777 * to do so for us.
778 */
779 CAH(rxsstart)
91447636
A
780 movl %gs:CPU_KERNEL_STACK,%esp
781 movl %gs:CPU_ACTIVE_KLOADED,%eax
1c79356b
A
782 FRAME_PCB_TO_STACK(%eax)
783 movl %eax,%esp
784 CAH(rxsend)
785 jmp EXT(return_from_kernel)
786
787/*
788 * Hate to put this here, but setting up a separate swap_func for
789 * kernel-loaded threads no longer works, since thread executes
790 * "for a while" (i.e., until it reaches glue code) when first
791 * created, even if it's nominally suspended. Hence we can't
792 * transfer the PCB when the thread first resumes, because we
793 * haven't initialized it yet.
794 */
795/*
796 * Have to force transfer to new stack "manually". Use a string
797 * move to transfer all of our saved state to the stack pointed
798 * to by iss.uesp, then install a pointer to it as our current
799 * stack pointer.
800 */
801LEXT(return_kernel_loading)
91447636
A
802 movl %gs:CPU_KERNEL_STACK,%esp
803 movl %gs:CPU_ACTIVE_THREAD, %ebx /* get active thread */
1c79356b 804 movl %ebx,%edx /* save for later */
1c79356b
A
805 FRAME_PCB_TO_STACK(%ebx)
806 movl %ebx,%esp /* start running on new stack */
91447636 807 movl $0,%gs:CPU_ACTIVE_KLOADED /* set cached indicator */
1c79356b
A
808 jmp EXT(return_from_kernel)
809
810/*
811 * Trap from kernel mode. No need to switch stacks or load segment registers.
812 */
813trap_from_kernel:
814#if MACH_KDB || MACH_KGDB
91447636 815 mov $ CPU_DATA_GS,%ax
1c79356b
A
816 mov %ax,%gs
817 movl %esp,%ebx /* save current stack */
818
819 cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
820 jb 6f /* OK if so */
821
822#if MACH_KGDB
823 cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */
824 je 0f /* no */
825
826 pushl %esp /* Already on kgdb stack */
827 cli
828 call EXT(kgdb_trap)
829 addl $4,%esp
830 jmp EXT(return_from_kernel)
8310: /* should kgdb handle this exception? */
832 cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
833 je 2f /* yes */
834 cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */
835 je 2f /* yes */
8361:
837 cli /* disable interrupts */
838 CPU_NUMBER(%edx) /* get CPU number */
839 movl CX(EXT(kgdb_stacks),%edx),%ebx
840 xchgl %ebx,%esp /* switch to kgdb stack */
841 pushl %ebx /* pass old sp as an arg */
842 call EXT(kgdb_from_kernel)
843 popl %esp /* switch back to kernel stack */
844 jmp EXT(return_from_kernel)
8452:
846#endif /* MACH_KGDB */
847
848#if MACH_KDB
849 cmpl $0,EXT(db_active) /* could trap be from ddb? */
850 je 3f /* no */
1c79356b
A
851 CPU_NUMBER(%edx) /* see if this CPU is in ddb */
852 cmpl $0,CX(EXT(kdb_active),%edx)
853 je 3f /* no */
1c79356b
A
854 pushl %esp
855 call EXT(db_trap_from_asm)
856 addl $0x4,%esp
857 jmp EXT(return_from_kernel)
858
8593:
860 /*
861 * Dilemma: don't want to switch to kernel_stack if trap
862 * "belongs" to ddb; don't want to switch to db_stack if
863 * trap "belongs" to kernel. So have to duplicate here the
864 * set of trap types that kernel_trap() handles. Note that
865 * "unexpected" page faults will not be handled by kernel_trap().
866 * In this panic-worthy case, we fall into the debugger with
867 * kernel_stack containing the call chain that led to the
868 * bogus fault.
869 */
870 movl R_TRAPNO(%esp),%edx
871 cmpl $(T_PAGE_FAULT),%edx
872 je 4f
873 cmpl $(T_NO_FPU),%edx
874 je 4f
875 cmpl $(T_FPU_FAULT),%edx
876 je 4f
877 cmpl $(T_FLOATING_POINT_ERROR),%edx
878 je 4f
879 cmpl $(T_PREEMPT),%edx
880 jne 7f
8814:
882#endif /* MACH_KDB */
883
91447636 884 cmpl %gs:CPU_KERNEL_STACK,%esp
1c79356b
A
885 /* if not already on kernel stack, */
886 ja 5f /* check some more */
91447636 887 cmpl %gs:CPU_ACTIVE_STACK,%esp
1c79356b
A
888 ja 6f /* on kernel stack: no switch */
8895:
91447636 890 movl %gs:CPU_KERNEL_STACK,%esp
1c79356b
A
8916:
892 pushl %ebx /* save old stack */
893 pushl %ebx /* pass as parameter */
894 call EXT(kernel_trap) /* to kernel trap routine */
895 addl $4,%esp /* pop parameter */
896 testl %eax,%eax
897 jne 8f
898 /*
899 * If kernel_trap returns false, trap wasn't handled.
900 */
9017:
902#if MACH_KDB
903 CPU_NUMBER(%edx)
904 movl CX(EXT(db_stacks),%edx),%esp
905 pushl %ebx /* pass old stack as parameter */
906 call EXT(db_trap_from_asm)
907#endif /* MACH_KDB */
908#if MACH_KGDB
909 cli /* disable interrupts */
910 CPU_NUMBER(%edx) /* get CPU number */
911 movl CX(EXT(kgdb_stacks),%edx),%esp
912 pushl %ebx /* pass old stack as parameter */
913 call EXT(kgdb_from_kernel)
914#endif /* MACH_KGDB */
915 addl $4,%esp /* pop parameter */
916 testl %eax,%eax
917 jne 8f
918 /*
919 * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
920 * wasn't handled.
921 */
922 pushl %ebx /* pass old stack as parameter */
923 call EXT(panic_trap)
924 addl $4,%esp /* pop parameter */
9258:
926 movl %ebx,%esp /* get old stack (from callee-saves reg) */
927#else /* MACH_KDB || MACH_KGDB */
928 pushl %esp /* pass parameter */
929 call EXT(kernel_trap) /* to kernel trap routine */
930 addl $4,%esp /* pop parameter */
931#endif /* MACH_KDB || MACH_KGDB */
932
933#if MACH_RT
91447636 934 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 935 testl $ AST_URGENT,%eax /* any urgent preemption? */
1c79356b 936 je EXT(return_from_kernel) /* no, nothing to do */
9bccf70c 937 cmpl $ T_PREEMPT,48(%esp) /* preempt request? */
1c79356b 938 jne EXT(return_from_kernel) /* no, nothing to do */
91447636 939 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
940 movl %esp,%ecx
941 xorl %eax,%ecx
942 andl $(-KERNEL_STACK_SIZE),%ecx
943 testl %ecx,%ecx /* are we on the kernel stack? */
944 jne EXT(return_from_kernel) /* no, skip it */
945
946#if PREEMPT_DEBUG_LOG
947 pushl 28(%esp) /* stack pointer */
948 pushl 24+4(%esp) /* frame pointer */
949 pushl 56+8(%esp) /* stack pointer */
950 pushl $0f
951 call EXT(log_thread_action)
952 addl $16, %esp
953 .data
9540: String "trap preempt eip"
955 .text
956#endif /* PREEMPT_DEBUG_LOG */
957
958 pushl $1 /* push preemption flag */
959 call EXT(i386_astintr) /* take the AST */
960 addl $4,%esp /* pop preemption flag */
961#endif /* MACH_RT */
962
963 jmp EXT(return_from_kernel)
964
965/*
966 * Called as a function, makes the current thread
967 * return from the kernel as if from an exception.
968 */
969
970 .globl EXT(thread_exception_return)
971 .globl EXT(thread_bootstrap_return)
972LEXT(thread_exception_return)
973LEXT(thread_bootstrap_return)
974 movl %esp,%ecx /* get kernel stack */
975 or $(KERNEL_STACK_SIZE-1),%ecx
976 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
977 jmp EXT(return_from_trap)
978
979Entry(call_continuation)
980 movl S_ARG0,%eax /* get continuation */
91447636
A
981 movl S_ARG1,%edx /* continuation param */
982 movl S_ARG2,%ecx /* wait result */
983 movl %esp,%ebp /* get kernel stack */
984 or $(KERNEL_STACK_SIZE-1),%ebp
985 addl $(-3-IKS_SIZE),%ebp
986 movl %ebp,%esp /* pop the stack */
1c79356b 987 xorl %ebp,%ebp /* zero frame pointer */
91447636
A
988 pushl %ecx
989 pushl %edx
990 call *%eax /* call continuation */
991 addl $8,%esp
992 movl %gs:CPU_ACTIVE_THREAD,%eax
993 pushl %eax
994 call EXT(thread_terminate)
1c79356b
A
995
996#if 0
997#define LOG_INTERRUPT(info,msg) \
998 pushal ; \
999 pushl msg ; \
1000 pushl info ; \
1001 call EXT(log_thread_action) ; \
1002 add $8,%esp ; \
1003 popal
1004#define CHECK_INTERRUPT_TIME(n) \
1005 pushal ; \
1006 pushl $n ; \
1007 call EXT(check_thread_time) ; \
1008 add $4,%esp ; \
1009 popal
1010#else
1011#define LOG_INTERRUPT(info,msg)
1012#define CHECK_INTERRUPT_TIME(n)
1013#endif
1014
55e303ae 1015.data
1c79356b
A
1016imsg_start:
1017 String "interrupt start"
1018imsg_end:
1019 String "interrupt end"
1020
55e303ae 1021.text
1c79356b
A
1022/*
1023 * All interrupts enter here.
1024 * old %eax on stack; interrupt number in %eax.
1025 */
1026Entry(all_intrs)
1027 pushl %ecx /* save registers */
1028 pushl %edx
1029 cld /* clear direction flag */
1030
1c79356b
A
1031 pushl %ds /* save segment registers */
1032 pushl %es
55e303ae
A
1033 pushl %fs
1034 pushl %gs
1c79356b
A
1035 mov %ss,%dx /* switch to kernel segments */
1036 mov %dx,%ds
1037 mov %dx,%es
91447636 1038 mov $ CPU_DATA_GS,%dx
1c79356b
A
1039 mov %dx,%gs
1040
91447636
A
1041 /*
1042 * test whether already on interrupt stack
1043 */
1044 movl %gs:CPU_INT_STACK_TOP,%ecx
1045 cmpl %esp,%ecx
1046 jb 1f
1047 leal -INTSTACK_SIZE(%ecx),%edx
1048 cmpl %esp,%edx
1049 jb int_from_intstack
10501:
55e303ae 1051 movl %esp,%edx /* & i386_interrupt_state */
1c79356b
A
1052 xchgl %ecx,%esp /* switch to interrupt stack */
1053
1c79356b 1054 pushl %ecx /* save pointer to old stack */
55e303ae 1055 pushl %edx /* pass &i386_interrupt_state to pe_incoming_interrupt */
91447636 1056 pushl %eax /* push trap number */
9bccf70c 1057
91447636
A
1058 TIME_INT_ENTRY /* do timing */
1059
1c79356b 1060#if MACH_RT
91447636 1061 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b 1062#endif /* MACH_RT */
91447636 1063 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 1064
1c79356b 1065 call EXT(PE_incoming_interrupt) /* call generic interrupt routine */
9bccf70c 1066 addl $8,%esp /* Pop trap number and eip */
1c79356b
A
1067
1068 .globl EXT(return_to_iret)
1069LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1070
91447636 1071 decl %gs:CPU_INTERRUPT_LEVEL
1c79356b
A
1072
1073#if MACH_RT
91447636 1074 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
1075#endif /* MACH_RT */
1076
1c79356b 1077 TIME_INT_EXIT /* do timing */
1c79356b
A
1078
1079 popl %esp /* switch back to old stack */
1080
91447636 1081 movl %gs:CPU_PENDING_AST,%eax
1c79356b
A
1082 testl %eax,%eax /* any pending asts? */
1083 je 1f /* no, nothing to do */
1084 testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
1085 jnz ast_from_interrupt /* take it */
1086 testb $3,I_CS(%esp) /* user mode, */
1087 jnz ast_from_interrupt /* take it */
1088#ifdef FIXME
1089 cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
1090 jnb ast_from_interrupt /* take it */
1091#endif
1092
1093#if MACH_RT
91447636 1094 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption masked? */
1c79356b 1095 jne 1f /* yes, skip it */
9bccf70c 1096 testl $ AST_URGENT,%eax /* any urgent requests? */
1c79356b 1097 je 1f /* no, skip it */
9bccf70c 1098 cmpl $ EXT(locore_end),I_EIP(%esp) /* are we in locore code? */
1c79356b 1099 jb 1f /* yes, skip it */
91447636 1100 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
1101 movl %esp,%ecx
1102 xorl %eax,%ecx
1103 andl $(-KERNEL_STACK_SIZE),%ecx
1104 testl %ecx,%ecx /* are we on the kernel stack? */
1105 jne 1f /* no, skip it */
1106
1107/*
1108 * Take an AST from kernel space. We don't need (and don't want)
1109 * to do as much as the case where the interrupt came from user
1110 * space.
1111 */
1112#if PREEMPT_DEBUG_LOG
1113 pushl $0
1114 pushl $0
1115 pushl I_EIP+8(%esp)
1116 pushl $0f
1117 call EXT(log_thread_action)
1118 addl $16, %esp
1119 .data
11200: String "intr preempt eip"
1121 .text
1122#endif /* PREEMPT_DEBUG_LOG */
1123
1124 sti
1125 pushl $1 /* push preemption flag */
1126 call EXT(i386_astintr) /* take the AST */
1127 addl $4,%esp /* pop preemption flag */
1128#endif /* MACH_RT */
1129
11301:
55e303ae
A
1131 pop %gs
1132 pop %fs
1c79356b
A
1133 pop %es /* restore segment regs */
1134 pop %ds
1135 pop %edx
1136 pop %ecx
1137 pop %eax
1138 iret /* return to caller */
1139
1140int_from_intstack:
1141#if MACH_RT
91447636 1142 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
1143#endif /* MACH_RT */
1144
91447636 1145 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 1146
91447636 1147 movl %esp, %edx /* i386_interrupt_state */
55e303ae 1148 pushl %edx /* pass &i386_interrupt_state to PE_incoming_interrupt /*
9bccf70c 1149
1c79356b
A
1150 pushl %eax /* Push trap number */
1151
1152 call EXT(PE_incoming_interrupt)
91447636 1153 addl $20,%esp /* pop i386_interrupt_state, gs,fs,es,ds */
1c79356b
A
1154
1155LEXT(return_to_iret_i) /* ( label for kdb_kintr) */
1156
1157 addl $4,%esp /* pop trap number */
1158
91447636 1159 decl %gs:CPU_INTERRUPT_LEVEL
1c79356b
A
1160
1161#if MACH_RT
91447636 1162 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
1163#endif /* MACH_RT */
1164
1165 pop %edx /* must have been on kernel segs */
1166 pop %ecx
1167 pop %eax /* no ASTs */
1168 iret
1169
1170/*
1171 * Take an AST from an interrupt.
1172 * On PCB stack.
1173 * sp-> es -> edx
1174 * ds -> ecx
1175 * edx -> eax
1176 * ecx -> trapno
1177 * eax -> code
1178 * eip
1179 * cs
1180 * efl
1181 * esp
1182 * ss
1183 */
1184ast_from_interrupt:
55e303ae
A
1185 pop %gs
1186 pop %fs
1c79356b
A
1187 pop %es /* restore all registers ... */
1188 pop %ds
1189 popl %edx
1190 popl %ecx
1191 popl %eax
1192 sti /* Reenable interrupts */
1193 pushl $0 /* zero code */
1194 pushl $0 /* zero trap number */
1195 pusha /* save general registers */
1196 push %ds /* save segment registers */
1197 push %es
1198 push %fs
1199 push %gs
1200 mov %ss,%dx /* switch to kernel segments */
1201 mov %dx,%ds
1202 mov %dx,%es
91447636 1203 mov $ CPU_DATA_GS,%dx
1c79356b
A
1204 mov %dx,%gs
1205
1206 /*
1207 * See if we interrupted a kernel-loaded thread executing
1208 * in its own task.
1209 */
1210 CPU_NUMBER(%edx)
1211 testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
1212 jnz 0f /* user mode trap if so */
1213 testb $3,R_CS(%esp)
1214 jnz 0f /* user mode, back to normal */
1215#ifdef FIXME
1216 cmpl ETEXT_ADDR,R_EIP(%esp)
1217 jb 0f /* not kernel-loaded, back to normal */
1218#endif
1219
1220 /*
1221 * Transfer the current stack frame by hand into the PCB.
1222 */
1223 CAH(afistart)
91447636
A
1224 movl %gs:CPU_ACTIVE_KLOADED,%eax
1225 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
1226 xchgl %ebx,%esp
1227 FRAME_STACK_TO_PCB(%eax,%ebx)
1228 CAH(afiend)
1229 TIME_TRAP_UENTRY
1230 jmp 3f
12310:
1232 TIME_TRAP_UENTRY
1233
91447636 1234 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
1235 /* switch to kernel stack */
1236 xchgl %eax,%esp
12373:
1238 pushl %eax
1239 pushl $0 /* push preemption flag */
1240 call EXT(i386_astintr) /* take the AST */
1241 addl $4,%esp /* pop preemption flag */
1242 popl %esp /* back to PCB stack */
1243 jmp EXT(return_from_trap) /* return */
1244
1245#if MACH_KDB || MACH_KGDB
1246/*
1247 * kdb_kintr: enter kdb from keyboard interrupt.
1248 * Chase down the stack frames until we find one whose return
1249 * address is the interrupt handler. At that point, we have:
1250 *
1251 * frame-> saved %ebp
1252 * return address in interrupt handler
1253 * ivect
1254 * saved SPL
1255 * return address == return_to_iret_i
1256 * saved %edx
1257 * saved %ecx
1258 * saved %eax
1259 * saved %eip
1260 * saved %cs
1261 * saved %efl
1262 *
1263 * OR:
1264 * frame-> saved %ebp
1265 * return address in interrupt handler
1266 * ivect
1267 * saved SPL
1268 * return address == return_to_iret
1269 * pointer to save area on old stack
1270 * [ saved %ebx, if accurate timing ]
1271 *
1272 * old stack: saved %es
1273 * saved %ds
1274 * saved %edx
1275 * saved %ecx
1276 * saved %eax
1277 * saved %eip
1278 * saved %cs
1279 * saved %efl
1280 *
1281 * Call kdb, passing it that register save area.
1282 */
1283
1284#if MACH_KGDB
1285Entry(kgdb_kintr)
1286#endif /* MACH_KGDB */
1287#if MACH_KDB
1288Entry(kdb_kintr)
1289#endif /* MACH_KDB */
1290 movl %ebp,%eax /* save caller`s frame pointer */
9bccf70c
A
1291 movl $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
1292 movl $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
1c79356b
A
1293
12940: cmpl 16(%eax),%ecx /* does this frame return to */
1295 /* interrupt handler (1)? */
1296 je 1f
1297 cmpl $kdb_from_iret,16(%eax)
1298 je 1f
1299 cmpl 16(%eax),%edx /* interrupt handler (2)? */
1300 je 2f /* if not: */
1301 cmpl $kdb_from_iret_i,16(%eax)
1302 je 2f
1303 movl (%eax),%eax /* try next frame */
1304 jmp 0b
1305
13061: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
1307 ret
1308
13092: movl $kdb_from_iret_i,16(%eax)
1310 /* returns to interrupt stack */
1311 ret
1312
1313/*
1314 * On return from keyboard interrupt, we will execute
1315 * kdb_from_iret_i
1316 * if returning to an interrupt on the interrupt stack
1317 * kdb_from_iret
1318 * if returning to an interrupt on the user or kernel stack
1319 */
1320kdb_from_iret:
1321 /* save regs in known locations */
1c79356b 1322 pushl %ebx /* caller`s %ebx is in reg */
1c79356b
A
1323 pushl %ebp
1324 pushl %esi
1325 pushl %edi
1326 push %fs
1327 push %gs
1328#if MACH_KGDB
1329 cli
1330 pushl %esp /* pass regs */
1331 call EXT(kgdb_kentry) /* to kgdb */
1332 addl $4,%esp /* pop parameters */
1333#endif /* MACH_KGDB */
1334#if MACH_KDB
1335 pushl %esp /* pass regs */
1336 call EXT(kdb_kentry) /* to kdb */
1337 addl $4,%esp /* pop parameters */
1338#endif /* MACH_KDB */
1339 pop %gs /* restore registers */
1340 pop %fs
1341 popl %edi
1342 popl %esi
1343 popl %ebp
1c79356b 1344 popl %ebx
1c79356b
A
1345 jmp EXT(return_to_iret) /* normal interrupt return */
1346
1347kdb_from_iret_i: /* on interrupt stack */
1348 pop %edx /* restore saved registers */
1349 pop %ecx
1350 pop %eax
1351 pushl $0 /* zero error code */
1352 pushl $0 /* zero trap number */
1353 pusha /* save general registers */
1354 push %ds /* save segment registers */
1355 push %es
1356 push %fs
1357 push %gs
1358#if MACH_KGDB
1359 cli /* disable interrupts */
1360 CPU_NUMBER(%edx) /* get CPU number */
1361 movl CX(EXT(kgdb_stacks),%edx),%ebx
1362 xchgl %ebx,%esp /* switch to kgdb stack */
1363 pushl %ebx /* pass old sp as an arg */
1364 call EXT(kgdb_from_kernel)
1365 popl %esp /* switch back to interrupt stack */
1366#endif /* MACH_KGDB */
1367#if MACH_KDB
1368 pushl %esp /* pass regs, */
1369 pushl $0 /* code, */
1370 pushl $-1 /* type to kdb */
1371 call EXT(kdb_trap)
1372 addl $12,%esp
1373#endif /* MACH_KDB */
1374 pop %gs /* restore segment registers */
1375 pop %fs
1376 pop %es
1377 pop %ds
1378 popa /* restore general registers */
1379 addl $8,%esp
1380 iret
1381
1382#endif /* MACH_KDB || MACH_KGDB */
1383
1384
1385/*
1386 * Mach RPC enters through a call gate, like a system call.
1387 */
1388
1389Entry(mach_rpc)
1390 pushf /* save flags as soon as possible */
1391 pushl %eax /* save system call number */
1392 pushl $0 /* clear trap number slot */
1393
1394 pusha /* save the general registers */
1395 pushl %ds /* and the segment registers */
1396 pushl %es
1397 pushl %fs
1398 pushl %gs
1399
1400 mov %ss,%dx /* switch to kernel data segment */
1401 mov %dx,%ds
1402 mov %dx,%es
91447636 1403 mov $ CPU_DATA_GS,%dx
1c79356b
A
1404 mov %dx,%gs
1405
1406/*
1407 * Shuffle eflags,eip,cs into proper places
1408 */
1409
1410 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1411 movl R_CS(%esp),%ecx /* eip is in CS slot */
1412 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1413 movl %ecx,R_EIP(%esp) /* fix eip */
1414 movl %edx,R_CS(%esp) /* fix cs */
1415 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1416
1c79356b
A
1417 TIME_TRAP_UENTRY
1418
1419 negl %eax /* get system call number */
1420 shll $4,%eax /* manual indexing */
1421
1422/*
1423 * Check here for mach_rpc from kernel-loaded task --
1424 * - Note that kernel-loaded task returns via real return.
1425 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1426 * so transfer the stack frame into the PCB explicitly, then
1427 * start running on resulting "PCB stack". We have to set
1428 * up a simulated "uesp" manually, since there's none in the
1429 * frame.
1430 */
91447636 1431 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1c79356b
A
1432 jz 2f
1433 CAH(mrstart)
91447636
A
1434 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1435 movl %gs:CPU_KERNEL_STACK,%edx
1c79356b
A
1436 xchgl %edx,%esp
1437
1438 FRAME_STACK_TO_PCB(%ebx,%edx)
1439 CAH(mrend)
1440
1c79356b
A
1441 jmp 3f
1442
14432:
91447636 1444 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
1445 /* get current kernel stack */
1446 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1447 /* user registers. */
1448
14493:
1450
1451/*
1452 * Register use on entry:
1453 * eax contains syscall number
1454 * ebx contains user regs pointer
1455 */
1456#undef RPC_TRAP_REGISTERS
1457#ifdef RPC_TRAP_REGISTERS
1458 pushl R_ESI(%ebx)
1459 pushl R_EDI(%ebx)
1460 pushl R_ECX(%ebx)
1461 pushl R_EDX(%ebx)
1462#else
1463 movl EXT(mach_trap_table)(%eax),%ecx
1464 /* get number of arguments */
1465 jecxz 2f /* skip argument copy if none */
1466 movl R_UESP(%ebx),%esi /* get user stack pointer */
1467 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1468 /* and point past last argument */
91447636 1469 movl %gs:CPU_ACTIVE_KLOADED,%edx
1c79356b
A
1470 /* point to current thread */
1471 orl %edx,%edx /* if ! kernel-loaded, check addr */
1472 jz 4f /* else */
1473 mov %ds,%dx /* kernel data segment access */
1474 jmp 5f
14754:
1476 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1477 ja mach_call_addr /* address error if not */
9bccf70c 1478 movl $ USER_DS,%edx /* user data segment access */
1c79356b
A
14795:
1480 mov %dx,%fs
1481 movl %esp,%edx /* save kernel ESP for error recovery */
14821:
1483 subl $4,%esi
1484 RECOVERY_SECTION
1485 RECOVER(mach_call_addr_push)
1486 pushl %fs:(%esi) /* push argument on stack */
1487 loop 1b /* loop for all arguments */
1488#endif
1489
1490/*
1491 * Register use on entry:
91447636
A
1492 * eax contains syscall number << 4
1493 * mach_call_munger is declared regparm(1), so the first arg is %eax
1c79356b
A
1494 */
14952:
55e303ae 1496
91447636 1497 call EXT(mach_call_munger)
55e303ae 1498
1c79356b
A
1499 movl %esp,%ecx /* get kernel stack */
1500 or $(KERNEL_STACK_SIZE-1),%ecx
1501 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1502 movl %eax,R_EAX(%esp) /* save return value */
1503 jmp EXT(return_from_trap) /* return to user */
1504
1505
1506/*
1507 * Special system call entry for "int 0x80", which has the "eflags"
1508 * register saved at the right place already.
1509 * Fall back to the common syscall path after saving the registers.
1510 *
1511 * esp -> old eip
1512 * old cs
1513 * old eflags
1514 * old esp if trapped from user
1515 * old ss if trapped from user
1516 *
1517 * XXX: for the moment, we don't check for int 0x80 from kernel mode.
1518 */
1519Entry(syscall_int80)
1520 pushl %eax /* save system call number */
1521 pushl $0 /* clear trap number slot */
1522
1523 pusha /* save the general registers */
1524 pushl %ds /* and the segment registers */
1525 pushl %es
1526 pushl %fs
1527 pushl %gs
1528
1529 mov %ss,%dx /* switch to kernel data segment */
1530 mov %dx,%ds
1531 mov %dx,%es
91447636 1532 mov $ CPU_DATA_GS,%dx
1c79356b
A
1533 mov %dx,%gs
1534
1535 jmp syscall_entry_3
1536
1537/*
1538 * System call enters through a call gate. Flags are not saved -
1539 * we must shuffle stack to look like trap save area.
1540 *
1541 * esp-> old eip
1542 * old cs
1543 * old esp
1544 * old ss
1545 *
1546 * eax contains system call number.
1547 *
1548 * NB: below use of CPU_NUMBER assumes that macro will use correct
1549 * correct segment register for any kernel data accesses.
1550 */
1551Entry(syscall)
1552syscall_entry:
1553 pushf /* save flags as soon as possible */
1554syscall_entry_2:
1555 pushl %eax /* save system call number */
1556 pushl $0 /* clear trap number slot */
1557
1558 pusha /* save the general registers */
1559 pushl %ds /* and the segment registers */
1560 pushl %es
1561 pushl %fs
1562 pushl %gs
1563
1564 mov %ss,%dx /* switch to kernel data segment */
1565 mov %dx,%ds
1566 mov %dx,%es
91447636 1567 mov $ CPU_DATA_GS,%dx
1c79356b
A
1568 mov %dx,%gs
1569
1570/*
1571 * Shuffle eflags,eip,cs into proper places
1572 */
1573
1574 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
1575 movl R_CS(%esp),%ecx /* eip is in CS slot */
1576 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
1577 movl %ecx,R_EIP(%esp) /* fix eip */
1578 movl %edx,R_CS(%esp) /* fix cs */
1579 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
1580
1581syscall_entry_3:
1c79356b
A
1582/*
1583 * Check here for syscall from kernel-loaded task --
1584 * We didn't enter here "through" PCB (i.e., using ring 0 stack),
1585 * so transfer the stack frame into the PCB explicitly, then
1586 * start running on resulting "PCB stack". We have to set
1587 * up a simulated "uesp" manually, since there's none in the
1588 * frame.
1589 */
91447636 1590 cmpl $0,%gs:CPU_ACTIVE_KLOADED
1c79356b
A
1591 jz 0f
1592 CAH(scstart)
91447636
A
1593 movl %gs:CPU_ACTIVE_KLOADED,%ebx
1594 movl %gs:CPU_KERNEL_STACK,%edx
1c79356b
A
1595 xchgl %edx,%esp
1596 FRAME_STACK_TO_PCB(%ebx,%edx)
1597 CAH(scend)
1598 TIME_TRAP_UENTRY
1c79356b
A
1599 jmp 1f
1600
16010:
1602 TIME_TRAP_UENTRY
1603
91447636 1604 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
1605 /* get current kernel stack */
1606 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1607 /* user registers. */
1608 /* user regs pointer already set */
1609
1c79356b
A
1610/*
1611 * Native system call.
1612 * Register use on entry:
1613 * eax contains syscall number
1614 * ebx points to user regs
1615 */
91447636 16161:
1c79356b
A
1617 negl %eax /* get system call number */
1618 jl mach_call_range /* out of range if it was positive */
1619
1620 cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
1621 jg mach_call_range /* error if out of range */
1622 shll $4,%eax /* manual indexing */
1623
1624 movl EXT(mach_trap_table)+4(%eax),%edx
1625 /* get procedure */
9bccf70c
A
1626 cmpl $ EXT(kern_invalid),%edx /* if not "kern_invalid" */
1627 jne do_native_call /* go on with Mach syscall */
1c79356b
A
1628 shrl $4,%eax /* restore syscall number */
1629 jmp mach_call_range /* try it as a "server" syscall */
1630
1c79356b
A
1631/*
1632 * Register use on entry:
1633 * eax contains syscall number
1634 * ebx contains user regs pointer
1635 */
1636do_native_call:
1637 movl EXT(mach_trap_table)(%eax),%ecx
1638 /* get number of arguments */
1639 jecxz mach_call_call /* skip argument copy if none */
1640 movl R_UESP(%ebx),%esi /* get user stack pointer */
1641 lea 4(%esi,%ecx,4),%esi /* skip user return address, */
1642 /* and point past last argument */
91447636 1643 movl %gs:CPU_ACTIVE_KLOADED,%edx
1c79356b
A
1644 /* point to current thread */
1645 orl %edx,%edx /* if kernel-loaded, skip addr check */
1646 jz 0f /* else */
1647 mov %ds,%dx /* kernel data segment access */
1648 jmp 1f
16490:
1650 cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */
1651 ja mach_call_addr /* address error if not */
9bccf70c 1652 movl $ USER_DS,%edx /* user data segment access */
1c79356b
A
16531:
1654 mov %dx,%fs
1655 movl %esp,%edx /* save kernel ESP for error recovery */
16562:
1657 subl $4,%esi
1658 RECOVERY_SECTION
1659 RECOVER(mach_call_addr_push)
1660 pushl %fs:(%esi) /* push argument on stack */
1661 loop 2b /* loop for all arguments */
1662
1663/*
1664 * Register use on entry:
1665 * eax contains syscall number
1666 * ebx contains user regs pointer
1667 */
1668mach_call_call:
1669
1670 CAH(call_call)
1671
1672#if ETAP_EVENT_MONITOR
1673 cmpl $0x200, %eax /* is this mach_msg? */
1674 jz make_syscall /* if yes, don't record event */
1675
1676 pushal /* Otherwise: save registers */
1677 pushl %eax /* push syscall number on stack*/
1678 call EXT(etap_machcall_probe1) /* call event begin probe */
1679 add $4,%esp /* restore stack */
1680 popal /* restore registers */
1681
1682 call *EXT(mach_trap_table)+4(%eax) /* call procedure */
1683 pushal
1684 call EXT(etap_machcall_probe2) /* call event end probe */
1685 popal
1686 jmp skip_syscall /* syscall already made */
1687#endif /* ETAP_EVENT_MONITOR */
1688
1689make_syscall:
55e303ae 1690
91447636
A
1691/*
1692 * mach_call_munger is declared regparm(1) so the first arg is %eax
1693 */
1694 call EXT(mach_call_munger)
55e303ae 1695
1c79356b
A
1696skip_syscall:
1697
1698 movl %esp,%ecx /* get kernel stack */
1699 or $(KERNEL_STACK_SIZE-1),%ecx
1700 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
1701 movl %eax,R_EAX(%esp) /* save return value */
1702 jmp EXT(return_from_trap) /* return to user */
1703
1704/*
1705 * Address out of range. Change to page fault.
1706 * %esi holds failing address.
1707 * Register use on entry:
1708 * ebx contains user regs pointer
1709 */
1710mach_call_addr_push:
1711 movl %edx,%esp /* clean parameters from stack */
1712mach_call_addr:
1713 movl %esi,R_CR2(%ebx) /* set fault address */
1714 movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
1715 /* set page-fault trap */
1716 movl $(T_PF_USER),R_ERR(%ebx)
1717 /* set error code - read user space */
1718 CAH(call_addr)
1719 jmp EXT(take_trap) /* treat as a trap */
1720
1c79356b
A
1721/*
1722 * System call out of range. Treat as invalid-instruction trap.
1723 * (? general protection?)
1724 * Register use on entry:
1725 * eax contains syscall number
1726 */
1727mach_call_range:
1c79356b
A
1728 push %eax
1729 movl %esp,%edx
1730 push $1 /* code_cnt = 1 */
1731 push %edx /* exception_type_t (see i/f docky) */
9bccf70c 1732 push $ EXC_SYSCALL
1c79356b 1733 CAH(call_range)
91447636 1734 call EXT(exception_triage)
1c79356b
A
1735 /* no return */
1736
1737 .globl EXT(syscall_failed)
1738LEXT(syscall_failed)
1739 movl %esp,%ecx /* get kernel stack */
1740 or $(KERNEL_STACK_SIZE-1),%ecx
1741 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
91447636 1742 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
1743 /* get current kernel stack */
1744 xchgl %ebx,%esp /* switch stacks - %ebx points to */
1745 /* user registers. */
1746 /* user regs pointer already set */
1747
1748 movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
1749 /* set invalid-operation trap */
1750 movl $0,R_ERR(%ebx) /* clear error code */
1751 CAH(failed)
1752 jmp EXT(take_trap) /* treat as a trap */
1753
1c79356b
A
1754/*\f*/
1755/*
1756 * Utility routines.
1757 */
1758
1759
1760/*
1761 * Copy from user address space.
1762 * arg0: user address
1763 * arg1: kernel address
1764 * arg2: byte count
1765 */
1766Entry(copyinmsg)
1767ENTRY(copyin)
1768 pushl %esi
1769 pushl %edi /* save registers */
1770
1771 movl 8+S_ARG0,%esi /* get user start address */
1772 movl 8+S_ARG1,%edi /* get kernel destination address */
1773 movl 8+S_ARG2,%edx /* get count */
1774
1775 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1776
91447636 1777 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1c79356b
A
1778 movl ACT_MAP(%ecx),%ecx /* get act->map */
1779 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1780 cmpl EXT(kernel_pmap), %ecx
1781 jz 1f
9bccf70c 1782 movl $ USER_DS,%cx /* user data segment access */
1c79356b
A
1783 mov %cx,%ds
17841:
1785 cmpl %esi,%eax
1786 jb copyin_fail /* fail if wrap-around */
1787 cld /* count up */
1788 movl %edx,%ecx /* move by longwords first */
1789 shrl $2,%ecx
1790 RECOVERY_SECTION
1791 RECOVER(copyin_fail)
1792 rep
1793 movsl /* move longwords */
1794 movl %edx,%ecx /* now move remaining bytes */
1795 andl $3,%ecx
1796 RECOVERY_SECTION
1797 RECOVER(copyin_fail)
1798 rep
1799 movsb
1800 xorl %eax,%eax /* return 0 for success */
1801copy_ret:
1802 mov %ss,%di /* restore kernel data segment */
1803 mov %di,%ds
1804
1805 popl %edi /* restore registers */
1806 popl %esi
1807 ret /* and return */
1808
1809copyin_fail:
9bccf70c 1810 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
1811 jmp copy_ret /* pop frame and return */
1812
1813/*
1814 * Copy string from user address space.
1815 * arg0: user address
1816 * arg1: kernel address
1817 * arg2: max byte count
1818 * arg3: actual byte count (OUT)
1819 */
1820Entry(copyinstr)
1821 pushl %esi
1822 pushl %edi /* save registers */
1823
1824 movl 8+S_ARG0,%esi /* get user start address */
1825 movl 8+S_ARG1,%edi /* get kernel destination address */
1826 movl 8+S_ARG2,%edx /* get count */
1827
1828 lea 0(%esi,%edx),%eax /* get user end address + 1 */
1829
91447636 1830 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1c79356b
A
1831 movl ACT_MAP(%ecx),%ecx /* get act->map */
1832 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1833 cmpl EXT(kernel_pmap), %ecx
1834 jne 0f
1835 mov %ds,%cx /* kernel data segment access */
1836 jmp 1f
18370:
9bccf70c 1838 movl $ USER_DS,%cx /* user data segment access */
1c79356b
A
18391:
1840 mov %cx,%fs
1841 xorl %eax,%eax
1842 cmpl $0,%edx
1843 je 4f
18442:
1845 RECOVERY_SECTION
1846 RECOVER(copystr_fail) /* copy bytes... */
1847 movb %fs:(%esi),%eax
1848 incl %esi
1849 testl %edi,%edi /* if kernel address is ... */
1850 jz 3f /* not NULL */
1851 movb %eax,(%edi) /* copy the byte */
1852 incl %edi
18533:
1854 decl %edx
1855 je 5f /* Zero count.. error out */
1856 cmpl $0,%eax
1857 jne 2b /* .. a NUL found? */
55e303ae 1858 jmp 4f /* return zero (%eax) */
1c79356b 18595:
9bccf70c 1860 movl $ ENAMETOOLONG,%eax /* String is too long.. */
1c79356b 18614:
1c79356b
A
1862 movl 8+S_ARG3,%edi /* get OUT len ptr */
1863 cmpl $0,%edi
1864 jz copystr_ret /* if null, just return */
1865 subl 8+S_ARG0,%esi
1866 movl %esi,(%edi) /* else set OUT arg to xfer len */
1867copystr_ret:
1868 popl %edi /* restore registers */
1869 popl %esi
1870 ret /* and return */
1871
1872copystr_fail:
9bccf70c 1873 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
1874 jmp copy_ret /* pop frame and return */
1875
1876/*
1877 * Copy to user address space.
1878 * arg0: kernel address
1879 * arg1: user address
1880 * arg2: byte count
1881 */
1882Entry(copyoutmsg)
1883ENTRY(copyout)
1884 pushl %esi
1885 pushl %edi /* save registers */
1886 pushl %ebx
1887
1888 movl 12+S_ARG0,%esi /* get kernel start address */
1889 movl 12+S_ARG1,%edi /* get user start address */
1890 movl 12+S_ARG2,%edx /* get count */
1891
1892 leal 0(%edi,%edx),%eax /* get user end address + 1 */
1893
91447636 1894 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get active thread */
1c79356b
A
1895 movl ACT_MAP(%ecx),%ecx /* get act->map */
1896 movl MAP_PMAP(%ecx),%ecx /* get map->pmap */
1897 cmpl EXT(kernel_pmap), %ecx
1898 jne 0f
1899 mov %ds,%cx /* else kernel data segment access */
1900 jmp 1f
19010:
9bccf70c 1902 movl $ USER_DS,%cx
1c79356b
A
19031:
1904 mov %cx,%es
1905
1906/*
1907 * Check whether user address space is writable
1908 * before writing to it - hardware is broken.
1909 *
1910 * Skip check if "user" address is really in
1911 * kernel space (i.e., if it's in a kernel-loaded
1912 * task).
1913 *
1914 * Register usage:
1915 * esi/edi source/dest pointers for rep/mov
1916 * ecx counter for rep/mov
1917 * edx counts down from 3rd arg
1918 * eax count of bytes for each (partial) page copy
1919 * ebx shadows edi, used to adjust edx
1920 */
1921 movl %edi,%ebx /* copy edi for syncing up */
1922copyout_retry:
1923 /* if restarting after a partial copy, put edx back in sync, */
1924 addl %ebx,%edx /* edx -= (edi - ebx); */
1925 subl %edi,%edx /
1926 movl %edi,%ebx /* ebx = edi; */
1927
1c79356b
A
1928/*
1929 * Copy only what fits on the current destination page.
1930 * Check for write-fault again on the next page.
1931 */
1932 leal NBPG(%edi),%eax /* point to */
1933 andl $(-NBPG),%eax /* start of next page */
1934 subl %edi,%eax /* get number of bytes to that point */
1935 cmpl %edx,%eax /* bigger than count? */
1936 jle 1f /* if so, */
1937 movl %edx,%eax /* use count */
19381:
1939 cld /* count up */
1940 movl %eax,%ecx /* move by longwords first */
1941 shrl $2,%ecx
1942 RECOVERY_SECTION
1943 RECOVER(copyout_fail)
1944 RETRY_SECTION
1945 RETRY(copyout_retry)
1946 rep
1947 movsl
1948 movl %eax,%ecx /* now move remaining bytes */
1949 andl $3,%ecx
1950 RECOVERY_SECTION
1951 RECOVER(copyout_fail)
1952 RETRY_SECTION
1953 RETRY(copyout_retry)
1954 rep
1955 movsb /* move */
1956 movl %edi,%ebx /* copy edi for syncing up */
1957 subl %eax,%edx /* and decrement count */
1958 jg copyout_retry /* restart on next page if not done */
1959 xorl %eax,%eax /* return 0 for success */
1960copyout_ret:
1961 mov %ss,%di /* restore kernel segment */
1962 mov %di,%es
1963
1964 popl %ebx
1965 popl %edi /* restore registers */
1966 popl %esi
1967 ret /* and return */
1968
1969copyout_fail:
9bccf70c 1970 movl $ EFAULT,%eax /* return error for failure */
1c79356b
A
1971 jmp copyout_ret /* pop frame and return */
1972
1973/*
1974 * FPU routines.
1975 */
1976
1977/*
1978 * Initialize FPU.
1979 */
1980ENTRY(_fninit)
1981 fninit
1982 ret
1983
1984/*
1985 * Read control word
1986 */
1987ENTRY(_fstcw)
1988 pushl %eax /* get stack space */
1989 fstcw (%esp)
1990 popl %eax
1991 ret
1992
1993/*
1994 * Set control word
1995 */
1996ENTRY(_fldcw)
1997 fldcw 4(%esp)
1998 ret
1999
2000/*
2001 * Read status word
2002 */
2003ENTRY(_fnstsw)
2004 xor %eax,%eax /* clear high 16 bits of eax */
2005 fnstsw %ax /* read FP status */
2006 ret
2007
2008/*
2009 * Clear FPU exceptions
2010 */
2011ENTRY(_fnclex)
2012 fnclex
2013 ret
2014
2015/*
2016 * Clear task-switched flag.
2017 */
2018ENTRY(_clts)
2019 clts
2020 ret
2021
2022/*
2023 * Save complete FPU state. Save error for later.
2024 */
2025ENTRY(_fpsave)
2026 movl 4(%esp),%eax /* get save area pointer */
2027 fnsave (%eax) /* save complete state, including */
2028 /* errors */
2029 ret
2030
2031/*
2032 * Restore FPU state.
2033 */
2034ENTRY(_fprestore)
2035 movl 4(%esp),%eax /* get save area pointer */
2036 frstor (%eax) /* restore complete state */
2037 ret
2038
2039/*
2040 * Set cr3
2041 */
2042ENTRY(set_cr3)
1c79356b
A
2043 CPU_NUMBER(%eax)
2044 orl 4(%esp), %eax
1c79356b
A
2045 /*
2046 * Don't set PDBR to a new value (hence invalidating the
2047 * "paging cache") if the new value matches the current one.
2048 */
2049 movl %cr3,%edx /* get current cr3 value */
2050 cmpl %eax,%edx
2051 je 0f /* if two are equal, don't set */
2052 movl %eax,%cr3 /* load it (and flush cache) */
20530:
2054 ret
2055
2056/*
2057 * Read cr3
2058 */
2059ENTRY(get_cr3)
2060 movl %cr3,%eax
1c79356b 2061 andl $(~0x7), %eax /* remove cpu number */
1c79356b
A
2062 ret
2063
2064/*
2065 * Flush TLB
2066 */
2067ENTRY(flush_tlb)
2068 movl %cr3,%eax /* flush tlb by reloading CR3 */
2069 movl %eax,%cr3 /* with itself */
2070 ret
2071
2072/*
2073 * Read cr2
2074 */
2075ENTRY(get_cr2)
2076 movl %cr2,%eax
2077 ret
2078
2079/*
2080 * Read cr4
2081 */
2082ENTRY(get_cr4)
2083 .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */
2084 ret
2085
2086/*
2087 * Write cr4
2088 */
2089ENTRY(set_cr4)
2090 movl 4(%esp), %eax
2091 .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */
2092 ret
2093
2094/*
2095 * Read ldtr
2096 */
2097Entry(get_ldt)
2098 xorl %eax,%eax
2099 sldt %ax
2100 ret
2101
2102/*
2103 * Set ldtr
2104 */
2105Entry(set_ldt)
2106 lldt 4(%esp)
2107 ret
2108
2109/*
2110 * Read task register.
2111 */
2112ENTRY(get_tr)
2113 xorl %eax,%eax
2114 str %ax
2115 ret
2116
2117/*
2118 * Set task register. Also clears busy bit of task descriptor.
2119 */
2120ENTRY(set_tr)
2121 movl S_ARG0,%eax /* get task segment number */
2122 subl $8,%esp /* push space for SGDT */
2123 sgdt 2(%esp) /* store GDT limit and base (linear) */
2124 movl 4(%esp),%edx /* address GDT */
2125 movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */
2126 ltr %ax /* load task register */
2127 addl $8,%esp /* clear stack */
2128 ret /* and return */
2129
2130/*
2131 * Set task-switched flag.
2132 */
2133ENTRY(_setts)
2134 movl %cr0,%eax /* get cr0 */
2135 orl $(CR0_TS),%eax /* or in TS bit */
2136 movl %eax,%cr0 /* set cr0 */
2137 ret
2138
2139/*
2140 * io register must not be used on slaves (no AT bus)
2141 */
2142#define ILL_ON_SLAVE
2143
2144
2145#if MACH_ASSERT
2146
2147#define ARG0 B_ARG0
2148#define ARG1 B_ARG1
2149#define ARG2 B_ARG2
2150#define PUSH_FRAME FRAME
2151#define POP_FRAME EMARF
2152
2153#else /* MACH_ASSERT */
2154
2155#define ARG0 S_ARG0
2156#define ARG1 S_ARG1
2157#define ARG2 S_ARG2
2158#define PUSH_FRAME
2159#define POP_FRAME
2160
2161#endif /* MACH_ASSERT */
2162
2163
2164#if MACH_KDB || MACH_ASSERT
2165
2166/*
2167 * Following routines are also defined as macros in i386/pio.h
2168 * Compile then when MACH_KDB is configured so that they
2169 * can be invoked from the debugger.
2170 */
2171
2172/*
2173 * void outb(unsigned char *io_port,
2174 * unsigned char byte)
2175 *
2176 * Output a byte to an IO port.
2177 */
2178ENTRY(outb)
2179 PUSH_FRAME
2180 ILL_ON_SLAVE
2181 movl ARG0,%edx /* IO port address */
2182 movl ARG1,%eax /* data to output */
2183 outb %al,%dx /* send it out */
2184 POP_FRAME
2185 ret
2186
2187/*
2188 * unsigned char inb(unsigned char *io_port)
2189 *
2190 * Input a byte from an IO port.
2191 */
2192ENTRY(inb)
2193 PUSH_FRAME
2194 ILL_ON_SLAVE
2195 movl ARG0,%edx /* IO port address */
2196 xor %eax,%eax /* clear high bits of register */
2197 inb %dx,%al /* get the byte */
2198 POP_FRAME
2199 ret
2200
2201/*
2202 * void outw(unsigned short *io_port,
2203 * unsigned short word)
2204 *
2205 * Output a word to an IO port.
2206 */
2207ENTRY(outw)
2208 PUSH_FRAME
2209 ILL_ON_SLAVE
2210 movl ARG0,%edx /* IO port address */
2211 movl ARG1,%eax /* data to output */
2212 outw %ax,%dx /* send it out */
2213 POP_FRAME
2214 ret
2215
2216/*
2217 * unsigned short inw(unsigned short *io_port)
2218 *
2219 * Input a word from an IO port.
2220 */
2221ENTRY(inw)
2222 PUSH_FRAME
2223 ILL_ON_SLAVE
2224 movl ARG0,%edx /* IO port address */
2225 xor %eax,%eax /* clear high bits of register */
2226 inw %dx,%ax /* get the word */
2227 POP_FRAME
2228 ret
2229
2230/*
2231 * void outl(unsigned int *io_port,
2232 * unsigned int byte)
2233 *
2234 * Output an int to an IO port.
2235 */
2236ENTRY(outl)
2237 PUSH_FRAME
2238 ILL_ON_SLAVE
2239 movl ARG0,%edx /* IO port address*/
2240 movl ARG1,%eax /* data to output */
2241 outl %eax,%dx /* send it out */
2242 POP_FRAME
2243 ret
2244
2245/*
2246 * unsigned int inl(unsigned int *io_port)
2247 *
2248 * Input an int from an IO port.
2249 */
2250ENTRY(inl)
2251 PUSH_FRAME
2252 ILL_ON_SLAVE
2253 movl ARG0,%edx /* IO port address */
2254 inl %dx,%eax /* get the int */
2255 POP_FRAME
2256 ret
2257
2258#endif /* MACH_KDB || MACH_ASSERT*/
2259
2260/*
2261 * void loutb(unsigned byte *io_port,
2262 * unsigned byte *data,
2263 * unsigned int count)
2264 *
2265 * Output an array of bytes to an IO port.
2266 */
2267ENTRY(loutb)
2268ENTRY(outsb)
2269 PUSH_FRAME
2270 ILL_ON_SLAVE
2271 movl %esi,%eax /* save register */
2272 movl ARG0,%edx /* get io port number */
2273 movl ARG1,%esi /* get data address */
2274 movl ARG2,%ecx /* get count */
2275 cld /* count up */
2276 rep
2277 outsb /* output */
2278 movl %eax,%esi /* restore register */
2279 POP_FRAME
2280 ret
2281
2282
2283/*
2284 * void loutw(unsigned short *io_port,
2285 * unsigned short *data,
2286 * unsigned int count)
2287 *
2288 * Output an array of shorts to an IO port.
2289 */
2290ENTRY(loutw)
2291ENTRY(outsw)
2292 PUSH_FRAME
2293 ILL_ON_SLAVE
2294 movl %esi,%eax /* save register */
2295 movl ARG0,%edx /* get io port number */
2296 movl ARG1,%esi /* get data address */
2297 movl ARG2,%ecx /* get count */
2298 cld /* count up */
2299 rep
2300 outsw /* output */
2301 movl %eax,%esi /* restore register */
2302 POP_FRAME
2303 ret
2304
2305/*
2306 * void loutw(unsigned short io_port,
2307 * unsigned int *data,
2308 * unsigned int count)
2309 *
2310 * Output an array of longs to an IO port.
2311 */
2312ENTRY(loutl)
2313ENTRY(outsl)
2314 PUSH_FRAME
2315 ILL_ON_SLAVE
2316 movl %esi,%eax /* save register */
2317 movl ARG0,%edx /* get io port number */
2318 movl ARG1,%esi /* get data address */
2319 movl ARG2,%ecx /* get count */
2320 cld /* count up */
2321 rep
2322 outsl /* output */
2323 movl %eax,%esi /* restore register */
2324 POP_FRAME
2325 ret
2326
2327
2328/*
2329 * void linb(unsigned char *io_port,
2330 * unsigned char *data,
2331 * unsigned int count)
2332 *
2333 * Input an array of bytes from an IO port.
2334 */
2335ENTRY(linb)
2336ENTRY(insb)
2337 PUSH_FRAME
2338 ILL_ON_SLAVE
2339 movl %edi,%eax /* save register */
2340 movl ARG0,%edx /* get io port number */
2341 movl ARG1,%edi /* get data address */
2342 movl ARG2,%ecx /* get count */
2343 cld /* count up */
2344 rep
2345 insb /* input */
2346 movl %eax,%edi /* restore register */
2347 POP_FRAME
2348 ret
2349
2350
2351/*
2352 * void linw(unsigned short *io_port,
2353 * unsigned short *data,
2354 * unsigned int count)
2355 *
2356 * Input an array of shorts from an IO port.
2357 */
2358ENTRY(linw)
2359ENTRY(insw)
2360 PUSH_FRAME
2361 ILL_ON_SLAVE
2362 movl %edi,%eax /* save register */
2363 movl ARG0,%edx /* get io port number */
2364 movl ARG1,%edi /* get data address */
2365 movl ARG2,%ecx /* get count */
2366 cld /* count up */
2367 rep
2368 insw /* input */
2369 movl %eax,%edi /* restore register */
2370 POP_FRAME
2371 ret
2372
2373
2374/*
2375 * void linl(unsigned short io_port,
2376 * unsigned int *data,
2377 * unsigned int count)
2378 *
2379 * Input an array of longs from an IO port.
2380 */
2381ENTRY(linl)
2382ENTRY(insl)
2383 PUSH_FRAME
2384 ILL_ON_SLAVE
2385 movl %edi,%eax /* save register */
2386 movl ARG0,%edx /* get io port number */
2387 movl ARG1,%edi /* get data address */
2388 movl ARG2,%ecx /* get count */
2389 cld /* count up */
2390 rep
2391 insl /* input */
2392 movl %eax,%edi /* restore register */
2393 POP_FRAME
2394 ret
2395
2396
2397/*
2398 * int inst_fetch(int eip, int cs);
2399 *
2400 * Fetch instruction byte. Return -1 if invalid address.
2401 */
2402 .globl EXT(inst_fetch)
2403LEXT(inst_fetch)
2404 movl S_ARG1, %eax /* get segment */
2405 movw %ax,%fs /* into FS */
2406 movl S_ARG0, %eax /* get offset */
2407 RETRY_SECTION
2408 RETRY(EXT(inst_fetch)) /* re-load FS on retry */
2409 RECOVERY_SECTION
2410 RECOVER(EXT(inst_fetch_fault))
2411 movzbl %fs:(%eax),%eax /* load instruction byte */
2412 ret
2413
2414LEXT(inst_fetch_fault)
2415 movl $-1,%eax /* return -1 if error */
2416 ret
2417
2418
2419#if MACH_KDP
2420/*
2421 * kdp_copy_kmem(char *src, char *dst, int count)
2422 *
2423 * Similar to copyin except that both addresses are kernel addresses.
2424 */
2425
2426ENTRY(kdp_copy_kmem)
2427 pushl %esi
2428 pushl %edi /* save registers */
2429
2430 movl 8+S_ARG0,%esi /* get kernel start address */
2431 movl 8+S_ARG1,%edi /* get kernel destination address */
2432
2433 movl 8+S_ARG2,%edx /* get count */
2434
2435 lea 0(%esi,%edx),%eax /* get kernel end address + 1 */
2436
2437 cmpl %esi,%eax
2438 jb kdp_vm_read_fail /* fail if wrap-around */
2439 cld /* count up */
2440 movl %edx,%ecx /* move by longwords first */
2441 shrl $2,%ecx
2442 RECOVERY_SECTION
2443 RECOVER(kdp_vm_read_fail)
2444 rep
2445 movsl /* move longwords */
2446 movl %edx,%ecx /* now move remaining bytes */
2447 andl $3,%ecx
2448 RECOVERY_SECTION
2449 RECOVER(kdp_vm_read_fail)
2450 rep
2451 movsb
2452kdp_vm_read_done:
2453 movl 8+S_ARG2,%edx /* get count */
2454 subl %ecx,%edx /* Return number of bytes transfered */
2455 movl %edx,%eax
2456
2457 popl %edi /* restore registers */
2458 popl %esi
2459 ret /* and return */
2460
2461kdp_vm_read_fail:
2462 xorl %eax,%eax /* didn't copy a thing. */
2463
2464 popl %edi
2465 popl %esi
2466 ret
2467#endif
2468
91447636
A
2469/*
2470 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
2471 */
2472ENTRY(rdmsr_carefully)
2473 movl S_ARG0, %ecx
2474 RECOVERY_SECTION
2475 RECOVER(rdmsr_fail)
2476 rdmsr
2477 movl S_ARG1, %ecx
2478 movl %eax, (%ecx)
2479 movl S_ARG2, %ecx
2480 movl %edx, (%ecx)
2481 movl $0, %eax
2482 ret
2483
2484rdmsr_fail:
2485 movl $1, %eax
2486 ret
1c79356b
A
2487
2488/*
2489 * Done with recovery and retry tables.
2490 */
2491 RECOVERY_SECTION
2492 RECOVER_TABLE_END
2493 RETRY_SECTION
2494 RETRY_TABLE_END
2495
2496
2497
2498ENTRY(dr6)
2499 movl %db6, %eax
2500 ret
2501
2502/* dr<i>(address, type, len, persistence)
2503 */
2504ENTRY(dr0)
2505 movl S_ARG0, %eax
2506 movl %eax,EXT(dr_addr)
2507 movl %eax, %db0
2508 movl $0, %ecx
2509 jmp 0f
2510ENTRY(dr1)
2511 movl S_ARG0, %eax
2512 movl %eax,EXT(dr_addr)+1*4
2513 movl %eax, %db1
2514 movl $2, %ecx
2515 jmp 0f
2516ENTRY(dr2)
2517 movl S_ARG0, %eax
2518 movl %eax,EXT(dr_addr)+2*4
2519 movl %eax, %db2
2520 movl $4, %ecx
2521 jmp 0f
2522
2523ENTRY(dr3)
2524 movl S_ARG0, %eax
2525 movl %eax,EXT(dr_addr)+3*4
2526 movl %eax, %db3
2527 movl $6, %ecx
2528
25290:
2530 pushl %ebp
2531 movl %esp, %ebp
2532
2533 movl %db7, %edx
2534 movl %edx,EXT(dr_addr)+4*4
2535 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
2536 movl %edx,EXT(dr_addr)+5*4
2537 movzbl B_ARG3, %eax
2538 andb $3, %al
2539 shll %cl, %eax
2540 orl %eax, %edx
2541
2542 movzbl B_ARG1, %eax
2543 andb $3, %al
2544 addb $0x10, %ecx
2545 shll %cl, %eax
2546 orl %eax, %edx
2547
2548 movzbl B_ARG2, %eax
2549 andb $3, %al
2550 addb $0x2, %ecx
2551 shll %cl, %eax
2552 orl %eax, %edx
2553
2554 movl %edx, %db7
2555 movl %edx,EXT(dr_addr)+7*4
2556 movl %edx, %eax
2557 leave
2558 ret
2559
2560 .data
1c79356b
A
2561dr_msk:
2562 .long ~0x000f0003
2563 .long ~0x00f0000c
2564 .long ~0x0f000030
2565 .long ~0xf00000c0
2566ENTRY(dr_addr)
2567 .long 0,0,0,0
2568 .long 0,0,0,0
2569 .text
2570
1c79356b
A
2571ENTRY(get_cr0)
2572 movl %cr0, %eax
2573 ret
2574
2575ENTRY(set_cr0)
2576 movl 4(%esp), %eax
2577 movl %eax, %cr0
2578 ret
2579
2580#ifndef SYMMETRY
2581
2582/*
2583 * ffs(mask)
2584 */
2585ENTRY(ffs)
2586 bsfl S_ARG0, %eax
2587 jz 0f
2588 incl %eax
2589 ret
25900: xorl %eax, %eax
2591 ret
2592
2593/*
2594 * cpu_shutdown()
2595 * Force reboot
2596 */
2597
2598null_idtr:
2599 .word 0
2600 .long 0
2601
2602Entry(cpu_shutdown)
2603 lidt null_idtr /* disable the interrupt handler */
2604 xor %ecx,%ecx /* generate a divide by zero */
2605 div %ecx,%eax /* reboot now */
2606 ret /* this will "never" be executed */
2607
2608#endif /* SYMMETRY */
2609
2610
2611/*
2612 * setbit(int bitno, int *s) - set bit in bit string
2613 */
2614ENTRY(setbit)
2615 movl S_ARG0, %ecx /* bit number */
2616 movl S_ARG1, %eax /* address */
2617 btsl %ecx, (%eax) /* set bit */
2618 ret
2619
2620/*
2621 * clrbit(int bitno, int *s) - clear bit in bit string
2622 */
2623ENTRY(clrbit)
2624 movl S_ARG0, %ecx /* bit number */
2625 movl S_ARG1, %eax /* address */
2626 btrl %ecx, (%eax) /* clear bit */
2627 ret
2628
2629/*
2630 * ffsbit(int *s) - find first set bit in bit string
2631 */
2632ENTRY(ffsbit)
2633 movl S_ARG0, %ecx /* address */
2634 movl $0, %edx /* base offset */
26350:
2636 bsfl (%ecx), %eax /* check argument bits */
2637 jnz 1f /* found bit, return */
2638 addl $4, %ecx /* increment address */
2639 addl $32, %edx /* increment offset */
2640 jmp 0b /* try again */
26411:
2642 addl %edx, %eax /* return offset */
2643 ret
2644
2645/*
2646 * testbit(int nr, volatile void *array)
2647 *
2648 * Test to see if the bit is set within the bit string
2649 */
2650
2651ENTRY(testbit)
2652 movl S_ARG0,%eax /* Get the bit to test */
2653 movl S_ARG1,%ecx /* get the array string */
2654 btl %eax,(%ecx)
2655 sbbl %eax,%eax
2656 ret
2657
2658ENTRY(get_pc)
2659 movl 4(%ebp),%eax
2660 ret
2661
2662#if ETAP
2663
2664ENTRY(etap_get_pc)
2665 movl 4(%ebp), %eax /* fetch pc of caller */
2666 ret
2667
2668ENTRY(tvals_to_etap)
2669 movl S_ARG0, %eax
2670 movl $1000000000, %ecx
2671 mull %ecx
2672 addl S_ARG1, %eax
2673 adc $0, %edx
2674 ret
2675
2676/* etap_time_t
2677 * etap_time_sub(etap_time_t stop, etap_time_t start)
2678 *
2679 * 64bit subtract, returns stop - start
2680 */
2681ENTRY(etap_time_sub)
2682 movl S_ARG0, %eax /* stop.low */
2683 movl S_ARG1, %edx /* stop.hi */
2684 subl S_ARG2, %eax /* stop.lo - start.lo */
2685 sbbl S_ARG3, %edx /* stop.hi - start.hi */
2686 ret
2687
2688#endif /* ETAP */
2689
1c79356b
A
2690ENTRY(minsecurity)
2691 pushl %ebp
2692 movl %esp,%ebp
2693/*
2694 * jail: set the EIP to "jail" to block a kernel thread.
2695 * Useful to debug synchronization problems on MPs.
2696 */
2697ENTRY(jail)
2698 jmp EXT(jail)
2699
1c79356b
A
2700/*
2701 * unsigned int
2702 * div_scale(unsigned int dividend,
2703 * unsigned int divisor,
2704 * unsigned int *scale)
2705 *
2706 * This function returns (dividend << *scale) //divisor where *scale
2707 * is the largest possible value before overflow. This is used in
2708 * computation where precision must be achieved in order to avoid
2709 * floating point usage.
2710 *
2711 * Algorithm:
2712 * *scale = 0;
2713 * while (((dividend >> *scale) >= divisor))
2714 * (*scale)++;
2715 * *scale = 32 - *scale;
2716 * return ((dividend << *scale) / divisor);
2717 */
2718ENTRY(div_scale)
2719 PUSH_FRAME
2720 xorl %ecx, %ecx /* *scale = 0 */
2721 xorl %eax, %eax
2722 movl ARG0, %edx /* get dividend */
27230:
2724 cmpl ARG1, %edx /* if (divisor > dividend) */
2725 jle 1f /* goto 1f */
2726 addl $1, %ecx /* (*scale)++ */
2727 shrdl $1, %edx, %eax /* dividend >> 1 */
2728 shrl $1, %edx /* dividend >> 1 */
2729 jmp 0b /* goto 0b */
27301:
2731 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
2732 movl ARG2, %edx /* get scale */
2733 movl $32, (%edx) /* *scale = 32 */
2734 subl %ecx, (%edx) /* *scale -= %ecx */
2735 POP_FRAME
2736 ret
2737
2738/*
2739 * unsigned int
2740 * mul_scale(unsigned int multiplicand,
2741 * unsigned int multiplier,
2742 * unsigned int *scale)
2743 *
2744 * This function returns ((multiplicand * multiplier) >> *scale) where
2745 * scale is the largest possible value before overflow. This is used in
2746 * computation where precision must be achieved in order to avoid
2747 * floating point usage.
2748 *
2749 * Algorithm:
2750 * *scale = 0;
2751 * while (overflow((multiplicand * multiplier) >> *scale))
2752 * (*scale)++;
2753 * return ((multiplicand * multiplier) >> *scale);
2754 */
2755ENTRY(mul_scale)
2756 PUSH_FRAME
2757 xorl %ecx, %ecx /* *scale = 0 */
2758 movl ARG0, %eax /* get multiplicand */
2759 mull ARG1 /* multiplicand * multiplier */
27600:
2761 cmpl $0, %edx /* if (!overflow()) */
2762 je 1f /* goto 1 */
2763 addl $1, %ecx /* (*scale)++ */
2764 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
2765 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
2766 jmp 0b
27671:
2768 movl ARG2, %edx /* get scale */
2769 movl %ecx, (%edx) /* set *scale */
2770 POP_FRAME
2771 ret
2772
1c79356b
A
2773#ifdef MACH_BSD
2774/*
2775 * BSD System call entry point..
2776 */
2777
2778Entry(trap_unix_syscall)
55e303ae 2779trap_unix_addr:
1c79356b 2780 pushf /* save flags as soon as possible */
55e303ae 2781trap_unix_2:
1c79356b
A
2782 pushl %eax /* save system call number */
2783 pushl $0 /* clear trap number slot */
2784
2785 pusha /* save the general registers */
2786 pushl %ds /* and the segment registers */
2787 pushl %es
2788 pushl %fs
2789 pushl %gs
2790
2791 mov %ss,%dx /* switch to kernel data segment */
2792 mov %dx,%ds
2793 mov %dx,%es
91447636 2794 mov $ CPU_DATA_GS,%dx
1c79356b
A
2795 mov %dx,%gs
2796
2797/*
2798 * Shuffle eflags,eip,cs into proper places
2799 */
2800
2801 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2802 movl R_CS(%esp),%ecx /* eip is in CS slot */
2803 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2804 movl %ecx,R_EIP(%esp) /* fix eip */
2805 movl %edx,R_CS(%esp) /* fix cs */
2806 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2807
1c79356b
A
2808 TIME_TRAP_UENTRY
2809
2810 negl %eax /* get system call number */
2811 shll $4,%eax /* manual indexing */
2812
91447636 2813 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
2814 /* get current kernel stack */
2815 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2816 /* user registers. */
2817
2818/*
2819 * Register use on entry:
2820 * eax contains syscall number
2821 * ebx contains user regs pointer
2822 */
2823 CAH(call_call)
2824 pushl %ebx /* Push the regs set onto stack */
2825 call EXT(unix_syscall)
2826 popl %ebx
2827 movl %esp,%ecx /* get kernel stack */
2828 or $(KERNEL_STACK_SIZE-1),%ecx
2829 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2830 movl %eax,R_EAX(%esp) /* save return value */
2831 jmp EXT(return_from_trap) /* return to user */
2832
2833/*
2834 * Entry point for machdep system calls..
2835 */
2836
2837Entry(trap_machdep_syscall)
2838 pushf /* save flags as soon as possible */
2839 pushl %eax /* save system call number */
2840 pushl $0 /* clear trap number slot */
2841
2842 pusha /* save the general registers */
2843 pushl %ds /* and the segment registers */
2844 pushl %es
2845 pushl %fs
2846 pushl %gs
2847
2848 mov %ss,%dx /* switch to kernel data segment */
2849 mov %dx,%ds
2850 mov %dx,%es
91447636 2851 mov $ CPU_DATA_GS,%dx
1c79356b
A
2852 mov %dx,%gs
2853
2854/*
2855 * Shuffle eflags,eip,cs into proper places
2856 */
2857
2858 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2859 movl R_CS(%esp),%ecx /* eip is in CS slot */
2860 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2861 movl %ecx,R_EIP(%esp) /* fix eip */
2862 movl %edx,R_CS(%esp) /* fix cs */
2863 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2864
1c79356b
A
2865 TIME_TRAP_UENTRY
2866
2867 negl %eax /* get system call number */
2868 shll $4,%eax /* manual indexing */
2869
91447636 2870 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
2871 /* get current kernel stack */
2872 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2873 /* user registers. */
2874
2875/*
2876 * Register use on entry:
2877 * eax contains syscall number
2878 * ebx contains user regs pointer
2879 */
2880 CAH(call_call)
2881 pushl %ebx
2882 call EXT(machdep_syscall)
2883 popl %ebx
2884 movl %esp,%ecx /* get kernel stack */
2885 or $(KERNEL_STACK_SIZE-1),%ecx
2886 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2887 movl %eax,R_EAX(%esp) /* save return value */
2888 jmp EXT(return_from_trap) /* return to user */
2889
2890Entry(trap_mach25_syscall)
2891 pushf /* save flags as soon as possible */
2892 pushl %eax /* save system call number */
2893 pushl $0 /* clear trap number slot */
2894
2895 pusha /* save the general registers */
2896 pushl %ds /* and the segment registers */
2897 pushl %es
2898 pushl %fs
2899 pushl %gs
2900
2901 mov %ss,%dx /* switch to kernel data segment */
2902 mov %dx,%ds
2903 mov %dx,%es
91447636 2904 mov $ CPU_DATA_GS,%dx
1c79356b
A
2905 mov %dx,%gs
2906
2907/*
2908 * Shuffle eflags,eip,cs into proper places
2909 */
2910
2911 movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
2912 movl R_CS(%esp),%ecx /* eip is in CS slot */
2913 movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
2914 movl %ecx,R_EIP(%esp) /* fix eip */
2915 movl %edx,R_CS(%esp) /* fix cs */
2916 movl %ebx,R_EFLAGS(%esp) /* fix eflags */
2917
1c79356b
A
2918 TIME_TRAP_UENTRY
2919
2920 negl %eax /* get system call number */
2921 shll $4,%eax /* manual indexing */
2922
91447636 2923 movl %gs:CPU_KERNEL_STACK,%ebx
1c79356b
A
2924 /* get current kernel stack */
2925 xchgl %ebx,%esp /* switch stacks - %ebx points to */
2926 /* user registers. */
2927
2928/*
2929 * Register use on entry:
2930 * eax contains syscall number
2931 * ebx contains user regs pointer
2932 */
2933 CAH(call_call)
2934 pushl %ebx
2935 call EXT(mach25_syscall)
2936 popl %ebx
2937 movl %esp,%ecx /* get kernel stack */
2938 or $(KERNEL_STACK_SIZE-1),%ecx
2939 movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
2940 movl %eax,R_EAX(%esp) /* save return value */
2941 jmp EXT(return_from_trap) /* return to user */
2942
2943#endif