]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 #include <mach_rt.h>
60 #include <platforms.h>
61 #include <mach_kdb.h>
62 #include <mach_kgdb.h>
63 #include <mach_kdp.h>
64 #include <stat_time.h>
65 #include <mach_assert.h>
66
67 #include <sys/errno.h>
68 #include <i386/asm.h>
69 #include <i386/cpuid.h>
70 #include <i386/eflags.h>
71 #include <i386/proc_reg.h>
72 #include <i386/trap.h>
73 #include <assym.s>
74 #include <mach/exception_types.h>
75
76 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
77 #include <mach/i386/syscall_sw.h>
78
79 #include <i386/mp.h>
80
81 /*
82 * PTmap is recursive pagemap at top of virtual address space.
83 * Within PTmap, the page directory can be found (third indirection).
84 */
85 .globl _PTmap,_PTD,_PTDpde
86 .set _PTmap,(PTDPTDI << PDESHIFT)
87 .set _PTD,_PTmap + (PTDPTDI * NBPG)
88 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
89
90 /*
91 * APTmap, APTD is the alternate recursive pagemap.
92 * It's used when modifying another process's page tables.
93 */
94 .globl _APTmap,_APTD,_APTDpde
95 .set _APTmap,(APTDPTDI << PDESHIFT)
96 .set _APTD,_APTmap + (APTDPTDI * NBPG)
97 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
98
99 #if __MACHO__
100 /* Under Mach-O, etext is a variable which contains
101 * the last text address
102 */
103 #define ETEXT_ADDR (EXT(etext))
104 #else
105 /* Under ELF and other non-Mach-O formats, the address of
106 * etext represents the last text address
107 */
108 #define ETEXT_ADDR $ EXT(etext)
109 #endif
110
111 #define CX(addr,reg) addr(,reg,4)
112
113 /*
114 * The following macros make calls into C code.
115 * They dynamically align the stack to 16 bytes.
116 * Arguments are moved (not pushed) onto the correctly aligned stack.
117 * NOTE: EDI is destroyed in the process, and hence cannot
118 * be directly used as a parameter. Users of this macro must
119 * independently preserve EDI (a non-volatile) if the routine is
120 * intended to be called from C, for instance.
121 */
122
123 #define CCALL(fn) \
124 movl %esp, %edi ;\
125 andl $0xFFFFFFF0, %esp ;\
126 call EXT(fn) ;\
127 movl %edi, %esp
128
129 #define CCALL1(fn, arg1) \
130 movl %esp, %edi ;\
131 subl $4, %esp ;\
132 andl $0xFFFFFFF0, %esp ;\
133 movl arg1, 0(%esp) ;\
134 call EXT(fn) ;\
135 movl %edi, %esp
136
137 #define CCALL2(fn, arg1, arg2) \
138 movl %esp, %edi ;\
139 subl $8, %esp ;\
140 andl $0xFFFFFFF0, %esp ;\
141 movl arg2, 4(%esp) ;\
142 movl arg1, 0(%esp) ;\
143 call EXT(fn) ;\
144 movl %edi, %esp
145
146 #define CCALL3(fn, arg1, arg2, arg3) \
147 movl %esp, %edi ;\
148 subl $12, %esp ;\
149 andl $0xFFFFFFF0, %esp ;\
150 movl arg3, 8(%esp) ;\
151 movl arg2, 4(%esp) ;\
152 movl arg1, 0(%esp) ;\
153 call EXT(fn) ;\
154 movl %edi, %esp
155
156 .text
157 locore_start:
158
159 /*
160 * Fault recovery.
161 */
162
163 #ifdef __MACHO__
164 #define RECOVERY_SECTION .section __VECTORS, __recover
165 #else
166 #define RECOVERY_SECTION .text
167 #define RECOVERY_SECTION .text
168 #endif
169
170 #define RECOVER_TABLE_START \
171 .align 2 ; \
172 .globl EXT(recover_table) ;\
173 LEXT(recover_table) ;\
174 .text
175
176 #define RECOVER(addr) \
177 .align 2; \
178 .long 9f ;\
179 .long addr ;\
180 .text ;\
181 9:
182
183 #define RECOVER_TABLE_END \
184 .align 2 ;\
185 .globl EXT(recover_table_end) ;\
186 LEXT(recover_table_end) ;\
187 .text
188
189 /*
190 * Allocate recovery and table.
191 */
192 RECOVERY_SECTION
193 RECOVER_TABLE_START
194
195 /*
196 * Timing routines.
197 */
198 Entry(timer_update)
199 movl 4(%esp),%ecx
200 movl 8(%esp),%eax
201 movl 12(%esp),%edx
202 movl %eax,TIMER_HIGHCHK(%ecx)
203 movl %edx,TIMER_LOW(%ecx)
204 movl %eax,TIMER_HIGH(%ecx)
205 ret
206
207 Entry(timer_grab)
208 movl 4(%esp),%ecx
209 0: movl TIMER_HIGH(%ecx),%edx
210 movl TIMER_LOW(%ecx),%eax
211 cmpl TIMER_HIGHCHK(%ecx),%edx
212 jne 0b
213 ret
214
215 #if STAT_TIME
216
217 #define TIME_TRAP_UENTRY
218 #define TIME_TRAP_UEXIT
219 #define TIME_INT_ENTRY
220 #define TIME_INT_EXIT
221
222 #else
223 /*
224 * Nanosecond timing.
225 */
226
227 /*
228 * Low 32-bits of nanotime returned in %eax.
229 * Computed from tsc based on the scale factor
230 * and an implicit 32 bit shift.
231 *
232 * Uses %esi, %edi, %ebx, %ecx and %edx.
233 */
234 #define RNT_INFO _rtc_nanotime_info
235 #define NANOTIME32 \
236 0: movl RNT_INFO+RNT_TSC_BASE,%esi ;\
237 movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
238 rdtsc ;\
239 subl %esi,%eax /* tsc - tsc_base */ ;\
240 sbbl %edi,%edx ;\
241 movl RNT_INFO+RNT_SCALE,%ecx ;\
242 movl %edx,%ebx /* delta * scale */ ;\
243 mull %ecx ;\
244 movl %ebx,%eax ;\
245 movl %edx,%ebx ;\
246 mull %ecx ;\
247 addl %ebx,%eax ;\
248 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\
249 cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\
250 jne 0b ;\
251 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
252 jne 0b
253
254 /*
255 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
256 */
257 #define TIMER_UPDATE(treg,dreg) \
258 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
259 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
260 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
261 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
262 movl dreg,TIMER_HIGH(treg) /* to high bita */
263
264 /*
265 * Add time delta to old timer and start new.
266 */
267 #define TIMER_EVENT(old,new) \
268 NANOTIME32 /* eax low bits nanosecs */ ;\
269 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
270 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
271 movl %eax,%edx /* save timestamp in %edx */ ;\
272 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
273 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
274 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
275 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
276 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
277 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */
278
279
280 /*
281 * Update time on user trap entry.
282 * Uses %eax,%ecx,%edx,%esi.
283 */
284 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
285
286 /*
287 * update time on user trap exit.
288 * Uses %eax,%ecx,%edx,%esi.
289 */
290 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
291
292 /*
293 * update time on interrupt entry.
294 * Uses %eax,%ecx,%edx,%esi.
295 */
296 #define TIME_INT_ENTRY \
297 NANOTIME32 /* eax low bits nanosecs */ ;\
298 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
299 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
300 movl %eax,%edx /* save timestamp in %edx */ ;\
301 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
302 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
303 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
304 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
305 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
306
307 /*
308 * update time on interrupt exit.
309 * Uses %eax, %ecx, %edx, %esi.
310 */
311 #define TIME_INT_EXIT \
312 NANOTIME32 /* eax low bits nanosecs */ ;\
313 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
314 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
315 movl %eax,%edx /* save timestamp in %edx */ ;\
316 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
317 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
318 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
319 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
320 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
321
322 #endif /* STAT_TIME */
323
324 #undef PDEBUG
325
326 #ifdef PDEBUG
327
328 /*
329 * Traditional, not ANSI.
330 */
331 #define CAH(label) \
332 .data ;\
333 .globl label/**/count ;\
334 label/**/count: ;\
335 .long 0 ;\
336 .globl label/**/limit ;\
337 label/**/limit: ;\
338 .long 0 ;\
339 .text ;\
340 addl $1,%ss:label/**/count ;\
341 cmpl $0,label/**/limit ;\
342 jz label/**/exit ;\
343 pushl %eax ;\
344 label/**/loop: ;\
345 movl %ss:label/**/count,%eax ;\
346 cmpl %eax,%ss:label/**/limit ;\
347 je label/**/loop ;\
348 popl %eax ;\
349 label/**/exit:
350
351 #else /* PDEBUG */
352
353 #define CAH(label)
354
355 #endif /* PDEBUG */
356
357 #if MACH_KDB
358 /*
359 * Last-ditch debug code to handle faults that might result
360 * from entering kernel (from collocated server) on an invalid
361 * stack. On collocated entry, there's no hardware-initiated
362 * stack switch, so a valid stack must be in place when an
363 * exception occurs, or we may double-fault.
364 *
365 * In case of a double-fault, our only recourse is to switch
366 * hardware "tasks", so that we avoid using the current stack.
367 *
368 * The idea here is just to get the processor into the debugger,
369 * post-haste. No attempt is made to fix up whatever error got
370 * us here, so presumably continuing from the debugger will
371 * simply land us here again -- at best.
372 */
373 #if 0
374 /*
375 * Note that the per-fault entry points are not currently
376 * functional. The only way to make them work would be to
377 * set up separate TSS's for each fault type, which doesn't
378 * currently seem worthwhile. (The offset part of a task
379 * gate is always ignored.) So all faults that task switch
380 * currently resume at db_task_start.
381 */
382 /*
383 * Double fault (Murphy's point) - error code (0) on stack
384 */
385 Entry(db_task_dbl_fault)
386 popl %eax
387 movl $(T_DOUBLE_FAULT),%ebx
388 jmp db_task_start
389 /*
390 * Segment not present - error code on stack
391 */
392 Entry(db_task_seg_np)
393 popl %eax
394 movl $(T_SEGMENT_NOT_PRESENT),%ebx
395 jmp db_task_start
396 /*
397 * Stack fault - error code on (current) stack
398 */
399 Entry(db_task_stk_fault)
400 popl %eax
401 movl $(T_STACK_FAULT),%ebx
402 jmp db_task_start
403 /*
404 * General protection fault - error code on stack
405 */
406 Entry(db_task_gen_prot)
407 popl %eax
408 movl $(T_GENERAL_PROTECTION),%ebx
409 jmp db_task_start
410 #endif /* 0 */
411 /*
412 * The entry point where execution resumes after last-ditch debugger task
413 * switch.
414 */
415 Entry(db_task_start)
416 movl %esp,%edx
417 subl $(ISS32_SIZE),%edx
418 movl %edx,%esp /* allocate i386_saved_state on stack */
419 movl %eax,R_ERR(%esp)
420 movl %ebx,R_TRAPNO(%esp)
421 pushl %edx
422 CPU_NUMBER(%edx)
423 movl CX(EXT(master_dbtss),%edx),%edx
424 movl TSS_LINK(%edx),%eax
425 pushl %eax /* pass along selector of previous TSS */
426 call EXT(db_tss_to_frame)
427 popl %eax /* get rid of TSS selector */
428 call EXT(db_trap_from_asm)
429 addl $0x4,%esp
430 /*
431 * And now...?
432 */
433 iret /* ha, ha, ha... */
434 #endif /* MACH_KDB */
435
436 /*
437 * Called as a function, makes the current thread
438 * return from the kernel as if from an exception.
439 */
440
441 .globl EXT(thread_exception_return)
442 .globl EXT(thread_bootstrap_return)
443 LEXT(thread_exception_return)
444 LEXT(thread_bootstrap_return)
445 cli
446 movl %gs:CPU_KERNEL_STACK,%ecx
447 movl (%ecx),%esp /* switch back to PCB stack */
448 jmp EXT(return_from_trap)
449
450 Entry(call_continuation)
451 movl S_ARG0,%eax /* get continuation */
452 movl S_ARG1,%edx /* continuation param */
453 movl S_ARG2,%ecx /* wait result */
454 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
455 xorl %ebp,%ebp /* zero frame pointer */
456 subl $8,%esp /* align the stack */
457 pushl %ecx
458 pushl %edx
459 call *%eax /* call continuation */
460 addl $16,%esp
461 movl %gs:CPU_ACTIVE_THREAD,%eax
462 pushl %eax
463 call EXT(thread_terminate)
464
465
466
467 /*******************************************************************************************************
468 *
469 * All 64 bit task 'exceptions' enter lo_alltraps:
470 * esp -> x86_saved_state_t
471 *
472 * The rest of the state is set up as:
473 * cr3 -> kernel directory
474 * esp -> low based stack
475 * gs -> CPU_DATA_GS
476 * cs -> KERNEL_CS
477 * ss/ds/es -> KERNEL_DS
478 *
479 * interrupts disabled
480 * direction flag cleared
481 */
482 Entry(lo_alltraps)
483 movl R_CS(%esp),%eax /* assume 32-bit state */
484 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
485 jne 1f
486 movl R64_CS(%esp),%eax /* 64-bit user mode */
487 1:
488 testb $3,%eax
489 jz trap_from_kernel
490 /* user mode trap */
491 TIME_TRAP_UENTRY
492
493 movl %gs:CPU_KERNEL_STACK,%ebx
494 xchgl %ebx,%esp /* switch to kernel stack */
495 sti
496
497 CCALL1(user_trap, %ebx) /* call user trap routine */
498 cli /* hold off intrs - critical section */
499 popl %esp /* switch back to PCB stack */
500
501 /*
502 * Return from trap or system call, checking for ASTs.
503 * On lowbase PCB stack with intrs disabled
504 */
505 LEXT(return_from_trap)
506 movl %gs:CPU_PENDING_AST,%eax
507 testl %eax,%eax
508 je EXT(return_to_user) /* branch if no AST */
509
510 movl %gs:CPU_KERNEL_STACK,%ebx
511 xchgl %ebx,%esp /* switch to kernel stack */
512 sti /* interrupts always enabled on return to user mode */
513
514 pushl %ebx /* save PCB stack */
515 CCALL1(i386_astintr, $0) /* take the AST */
516 cli
517 popl %esp /* switch back to PCB stack (w/exc link) */
518 jmp EXT(return_from_trap) /* and check again (rare) */
519
520 LEXT(return_to_user)
521 TIME_TRAP_UEXIT
522
523 LEXT(ret_to_user)
524 cmpl $0, %gs:CPU_IS64BIT
525 je EXT(lo_ret_to_user)
526 jmp EXT(lo64_ret_to_user)
527
528
529
530 /*
531 * Trap from kernel mode. No need to switch stacks.
532 * Interrupts must be off here - we will set them to state at time of trap
533 * as soon as it's safe for us to do so and not recurse doing preemption
534 */
535 trap_from_kernel:
536 movl %esp, %eax /* saved state addr */
537 CCALL1(kernel_trap, %eax) /* to kernel trap routine */
538 cli
539
540 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
541 testl $ AST_URGENT,%eax /* any urgent preemption? */
542 je ret_to_kernel /* no, nothing to do */
543 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
544 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
545 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
546 je ret_to_kernel
547 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
548 jne ret_to_kernel
549 movl %gs:CPU_KERNEL_STACK,%eax
550 movl %esp,%ecx
551 xorl %eax,%ecx
552 andl $(-KERNEL_STACK_SIZE),%ecx
553 testl %ecx,%ecx /* are we on the kernel stack? */
554 jne ret_to_kernel /* no, skip it */
555
556 CCALL1(i386_astintr, $1) /* take the AST */
557
558 ret_to_kernel:
559 cmpl $0, %gs:CPU_IS64BIT
560 je EXT(lo_ret_to_kernel)
561 jmp EXT(lo64_ret_to_kernel)
562
563
564
565 /*******************************************************************************************************
566 *
567 * All interrupts on all tasks enter here with:
568 * esp-> -> x86_saved_state_t
569 *
570 * cr3 -> kernel directory
571 * esp -> low based stack
572 * gs -> CPU_DATA_GS
573 * cs -> KERNEL_CS
574 * ss/ds/es -> KERNEL_DS
575 *
576 * interrupts disabled
577 * direction flag cleared
578 */
579 Entry(lo_allintrs)
580 /*
581 * test whether already on interrupt stack
582 */
583 movl %gs:CPU_INT_STACK_TOP,%ecx
584 cmpl %esp,%ecx
585 jb 1f
586 leal -INTSTACK_SIZE(%ecx),%edx
587 cmpl %esp,%edx
588 jb int_from_intstack
589 1:
590 xchgl %ecx,%esp /* switch to interrupt stack */
591
592 movl %cr0,%eax /* get cr0 */
593 orl $(CR0_TS),%eax /* or in TS bit */
594 movl %eax,%cr0 /* set cr0 */
595
596 subl $8, %esp /* for 16-byte stack alignment */
597 pushl %ecx /* save pointer to old stack */
598 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
599
600 TIME_INT_ENTRY /* do timing */
601
602 incl %gs:CPU_PREEMPTION_LEVEL
603 incl %gs:CPU_INTERRUPT_LEVEL
604
605 movl %gs:CPU_INT_STATE, %eax
606 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
607
608 cli /* just in case we returned with intrs enabled */
609 xorl %eax,%eax
610 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
611
612 .globl EXT(return_to_iret)
613 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
614
615 decl %gs:CPU_INTERRUPT_LEVEL
616 decl %gs:CPU_PREEMPTION_LEVEL
617
618 TIME_INT_EXIT /* do timing */
619
620 movl %gs:CPU_ACTIVE_THREAD,%eax
621 movl ACT_PCB(%eax),%eax /* get act`s PCB */
622 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
623 cmpl $0,%eax /* Is there a context */
624 je 1f /* Branch if not */
625 movl FP_VALID(%eax),%eax /* Load fp_valid */
626 cmpl $0,%eax /* Check if valid */
627 jne 1f /* Branch if valid */
628 clts /* Clear TS */
629 jmp 2f
630 1:
631 movl %cr0,%eax /* get cr0 */
632 orl $(CR0_TS),%eax /* or in TS bit */
633 movl %eax,%cr0 /* set cr0 */
634 2:
635 popl %esp /* switch back to old stack */
636
637 /* Load interrupted code segment into %eax */
638 movl R_CS(%esp),%eax /* assume 32-bit state */
639 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
640 jne 3f
641 movl R64_CS(%esp),%eax /* 64-bit user mode */
642 3:
643 testb $3,%eax /* user mode, */
644 jnz ast_from_interrupt_user /* go handle potential ASTs */
645 /*
646 * we only want to handle preemption requests if
647 * the interrupt fell in the kernel context
648 * and preemption isn't disabled
649 */
650 movl %gs:CPU_PENDING_AST,%eax
651 testl $ AST_URGENT,%eax /* any urgent requests? */
652 je ret_to_kernel /* no, nothing to do */
653
654 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
655 jne ret_to_kernel /* yes, skip it */
656
657 movl %gs:CPU_KERNEL_STACK,%eax
658 movl %esp,%ecx
659 xorl %eax,%ecx
660 andl $(-KERNEL_STACK_SIZE),%ecx
661 testl %ecx,%ecx /* are we on the kernel stack? */
662 jne ret_to_kernel /* no, skip it */
663
664 /*
665 * Take an AST from kernel space. We don't need (and don't want)
666 * to do as much as the case where the interrupt came from user
667 * space.
668 */
669 CCALL1(i386_astintr, $1)
670
671 jmp ret_to_kernel
672
673
674 /*
675 * nested int - simple path, can't preempt etc on way out
676 */
677 int_from_intstack:
678 incl %gs:CPU_PREEMPTION_LEVEL
679 incl %gs:CPU_INTERRUPT_LEVEL
680
681 movl %esp, %edx /* i386_saved_state */
682 CCALL1(PE_incoming_interrupt, %edx)
683
684 decl %gs:CPU_INTERRUPT_LEVEL
685 decl %gs:CPU_PREEMPTION_LEVEL
686
687 jmp ret_to_kernel
688
689 /*
690 * Take an AST from an interrupted user
691 */
692 ast_from_interrupt_user:
693 movl %gs:CPU_PENDING_AST,%eax
694 testl %eax,%eax /* pending ASTs? */
695 je EXT(ret_to_user) /* no, nothing to do */
696
697 TIME_TRAP_UENTRY
698
699 jmp EXT(return_from_trap) /* return */
700
701
702 /*******************************************************************************************************
703 *
704 * 32bit Tasks
705 * System call entries via INTR_GATE or sysenter:
706 *
707 * esp -> i386_saved_state_t
708 * cr3 -> kernel directory
709 * esp -> low based stack
710 * gs -> CPU_DATA_GS
711 * cs -> KERNEL_CS
712 * ss/ds/es -> KERNEL_DS
713 *
714 * interrupts disabled
715 * direction flag cleared
716 */
717
718 Entry(lo_sysenter)
719 /*
720 * We can be here either for a mach syscall or a unix syscall,
721 * as indicated by the sign of the code:
722 */
723 movl R_EAX(%esp),%eax
724 testl %eax,%eax
725 js EXT(lo_mach_scall) /* < 0 => mach */
726 /* > 0 => unix */
727
728 Entry(lo_unix_scall)
729 TIME_TRAP_UENTRY
730
731 movl %gs:CPU_KERNEL_STACK,%ebx
732 xchgl %ebx,%esp /* switch to kernel stack */
733
734 sti
735 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
736 movl ACT_TASK(%ecx),%ecx /* point to current task */
737 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
738
739 CCALL1(unix_syscall, %ebx)
740 /*
741 * always returns through thread_exception_return
742 */
743
744
745 Entry(lo_mach_scall)
746 TIME_TRAP_UENTRY
747
748 movl %gs:CPU_KERNEL_STACK,%ebx
749 xchgl %ebx,%esp /* switch to kernel stack */
750
751 sti
752 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
753 movl ACT_TASK(%ecx),%ecx /* point to current task */
754 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
755
756 CCALL1(mach_call_munger, %ebx)
757 /*
758 * always returns through thread_exception_return
759 */
760
761
762 Entry(lo_mdep_scall)
763 TIME_TRAP_UENTRY
764
765 movl %gs:CPU_KERNEL_STACK,%ebx
766 xchgl %ebx,%esp /* switch to kernel stack */
767
768 sti
769
770 CCALL1(machdep_syscall, %ebx)
771 /*
772 * always returns through thread_exception_return
773 */
774
775
776 Entry(lo_diag_scall)
777 TIME_TRAP_UENTRY
778
779 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
780 xchgl %ebx,%esp // Switch to it, saving the previous
781
782 CCALL1(diagCall, %ebx) // Call diagnostics
783 cli // Disable interruptions just in case they were enabled
784 popl %esp // Get back the original stack
785
786 cmpl $0,%eax // What kind of return is this?
787 jne EXT(return_to_user) // Normal return, do not check asts...
788
789 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
790 // pass what would be the diag syscall
791 // error return - cause an exception
792 /* no return */
793
794
795
796 /*******************************************************************************************************
797 *
798 * 64bit Tasks
799 * System call entries via syscall only:
800 *
801 * esp -> x86_saved_state64_t
802 * cr3 -> kernel directory
803 * esp -> low based stack
804 * gs -> CPU_DATA_GS
805 * cs -> KERNEL_CS
806 * ss/ds/es -> KERNEL_DS
807 *
808 * interrupts disabled
809 * direction flag cleared
810 */
811
812 Entry(lo_syscall)
813 /*
814 * We can be here either for a mach, unix machdep or diag syscall,
815 * as indicated by the syscall class:
816 */
817 movl R64_RAX(%esp), %eax /* syscall number/class */
818 movl %eax, %ebx
819 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
820 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
821 je EXT(lo64_mach_scall)
822 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
823 je EXT(lo64_unix_scall)
824 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
825 je EXT(lo64_mdep_scall)
826 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
827 je EXT(lo64_diag_scall)
828
829 /* Syscall class unknown */
830 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
831 /* no return */
832
833 Entry(lo64_unix_scall)
834 TIME_TRAP_UENTRY
835
836 movl %gs:CPU_KERNEL_STACK,%ebx
837 xchgl %ebx,%esp /* switch to kernel stack */
838
839 sti
840 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
841 movl ACT_TASK(%ecx),%ecx /* point to current task */
842 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
843
844 CCALL1(unix_syscall64, %ebx)
845 /*
846 * always returns through thread_exception_return
847 */
848
849
850 Entry(lo64_mach_scall)
851 TIME_TRAP_UENTRY
852
853 movl %gs:CPU_KERNEL_STACK,%ebx
854 xchgl %ebx,%esp /* switch to kernel stack */
855
856 sti
857 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
858 movl ACT_TASK(%ecx),%ecx /* point to current task */
859 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
860
861 CCALL1(mach_call_munger64, %ebx)
862 /*
863 * always returns through thread_exception_return
864 */
865
866
867 Entry(lo64_mdep_scall)
868 TIME_TRAP_UENTRY
869
870 movl %gs:CPU_KERNEL_STACK,%ebx
871 xchgl %ebx,%esp /* switch to kernel stack */
872
873 sti
874
875 CCALL1(machdep_syscall64, %ebx)
876 /*
877 * always returns through thread_exception_return
878 */
879
880
881 Entry(lo64_diag_scall)
882 TIME_TRAP_UENTRY
883
884 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
885 xchgl %ebx,%esp // Switch to it, saving the previous
886
887 pushl %ebx // Push the previous stack
888 CCALL1(diagCall64, %ebx) // Call diagnostics
889 cli // Disable interruptions just in case they were enabled
890 popl %esp // Get back the original stack
891
892 cmpl $0,%eax // What kind of return is this?
893 jne EXT(return_to_user) // Normal return, do not check asts...
894
895 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
896 /* no return */
897
898
899
900 /******************************************************************************************************
901
902 /*\f*/
903 /*
904 * Utility routines.
905 */
906
907
908 /*
909 * Copy from user/kernel address space.
910 * arg0: window offset or kernel address
911 * arg1: kernel address
912 * arg2: byte count
913 */
914 ENTRY(copyinphys_user)
915 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
916 mov %cx,%ds
917
918 ENTRY(copyinphys_kern)
919 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
920 mov %cx,%es
921 jmp copyin_common
922
923 ENTRY(copyin_user)
924 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
925 mov %cx,%ds
926
927 ENTRY(copyin_kern)
928
929 copyin_common:
930 pushl %esi
931 pushl %edi /* save registers */
932
933 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
934 movl 8+S_ARG1,%edi /* get destination - kernel address */
935 movl 8+S_ARG2,%edx /* get count */
936
937 cld /* count up */
938 movl %edx,%ecx /* move by longwords first */
939 shrl $2,%ecx
940 RECOVERY_SECTION
941 RECOVER(copyin_fail)
942 rep
943 movsl /* move longwords */
944 movl %edx,%ecx /* now move remaining bytes */
945 andl $3,%ecx
946 RECOVERY_SECTION
947 RECOVER(copyin_fail)
948 rep
949 movsb
950 xorl %eax,%eax /* return 0 for success */
951 copyin_ret:
952 mov %ss,%cx /* restore kernel data and extended segments */
953 mov %cx,%ds
954 mov %cx,%es
955
956 popl %edi /* restore registers */
957 popl %esi
958 ret /* and return */
959
960 copyin_fail:
961 movl $(EFAULT),%eax /* return error for failure */
962 jmp copyin_ret /* pop frame and return */
963
964
965
966 /*
967 * Copy string from user/kern address space.
968 * arg0: window offset or kernel address
969 * arg1: kernel address
970 * arg2: max byte count
971 * arg3: actual byte count (OUT)
972 */
973 Entry(copyinstr_kern)
974 mov %ds,%cx
975 jmp copyinstr_common
976
977 Entry(copyinstr_user)
978 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
979
980 copyinstr_common:
981 mov %cx,%fs
982
983 pushl %esi
984 pushl %edi /* save registers */
985
986 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
987 movl 8+S_ARG1,%edi /* get destination - kernel address */
988 movl 8+S_ARG2,%edx /* get count */
989
990 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
991 /* are 0 for the cmpl against 0 */
992 2:
993 RECOVERY_SECTION
994 RECOVER(copystr_fail) /* copy bytes... */
995 movb %fs:(%esi),%al
996 incl %esi
997 testl %edi,%edi /* if kernel address is ... */
998 jz 3f /* not NULL */
999 movb %al,(%edi) /* copy the byte */
1000 incl %edi
1001 3:
1002 testl %eax,%eax /* did we just stuff the 0-byte? */
1003 jz 4f /* yes, return 0 status already in %eax */
1004 decl %edx /* decrement #bytes left in buffer */
1005 jnz 2b /* buffer not full so copy in another byte */
1006 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1007 4:
1008 movl 8+S_ARG3,%edi /* get OUT len ptr */
1009 cmpl $0,%edi
1010 jz copystr_ret /* if null, just return */
1011 subl 8+S_ARG0,%esi
1012 movl %esi,(%edi) /* else set OUT arg to xfer len */
1013 copystr_ret:
1014 popl %edi /* restore registers */
1015 popl %esi
1016 ret /* and return */
1017
1018 copystr_fail:
1019 movl $(EFAULT),%eax /* return error for failure */
1020 jmp copystr_ret /* pop frame and return */
1021
1022
1023 /*
1024 * Copy to user/kern address space.
1025 * arg0: kernel address
1026 * arg1: window offset or kernel address
1027 * arg2: byte count
1028 */
1029 ENTRY(copyoutphys_user)
1030 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1031 mov %cx,%es
1032
1033 ENTRY(copyoutphys_kern)
1034 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1035 mov %cx,%ds
1036 jmp copyout_common
1037
1038 ENTRY(copyout_user)
1039 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1040 mov %cx,%es
1041
1042 ENTRY(copyout_kern)
1043
1044 copyout_common:
1045 pushl %esi
1046 pushl %edi /* save registers */
1047
1048 movl 8+S_ARG0,%esi /* get source - kernel address */
1049 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1050 movl 8+S_ARG2,%edx /* get count */
1051
1052 cld /* count up */
1053 movl %edx,%ecx /* move by longwords first */
1054 shrl $2,%ecx
1055 RECOVERY_SECTION
1056 RECOVER(copyout_fail)
1057 rep
1058 movsl
1059 movl %edx,%ecx /* now move remaining bytes */
1060 andl $3,%ecx
1061 RECOVERY_SECTION
1062 RECOVER(copyout_fail)
1063 rep
1064 movsb /* move */
1065 xorl %eax,%eax /* return 0 for success */
1066 copyout_ret:
1067 mov %ss,%cx /* restore kernel segment */
1068 mov %cx,%es
1069 mov %cx,%ds
1070
1071 popl %edi /* restore registers */
1072 popl %esi
1073 ret /* and return */
1074
1075 copyout_fail:
1076 movl $(EFAULT),%eax /* return error for failure */
1077 jmp copyout_ret /* pop frame and return */
1078
1079
1080 /*
1081 * io register must not be used on slaves (no AT bus)
1082 */
1083 #define ILL_ON_SLAVE
1084
1085
1086 #if MACH_ASSERT
1087
1088 #define ARG0 B_ARG0
1089 #define ARG1 B_ARG1
1090 #define ARG2 B_ARG2
1091 #define PUSH_FRAME FRAME
1092 #define POP_FRAME EMARF
1093
1094 #else /* MACH_ASSERT */
1095
1096 #define ARG0 S_ARG0
1097 #define ARG1 S_ARG1
1098 #define ARG2 S_ARG2
1099 #define PUSH_FRAME
1100 #define POP_FRAME
1101
1102 #endif /* MACH_ASSERT */
1103
1104
1105 #if MACH_KDB || MACH_ASSERT
1106
1107 /*
1108 * Following routines are also defined as macros in i386/pio.h
1109 * Compile then when MACH_KDB is configured so that they
1110 * can be invoked from the debugger.
1111 */
1112
1113 /*
1114 * void outb(unsigned char *io_port,
1115 * unsigned char byte)
1116 *
1117 * Output a byte to an IO port.
1118 */
1119 ENTRY(outb)
1120 PUSH_FRAME
1121 ILL_ON_SLAVE
1122 movl ARG0,%edx /* IO port address */
1123 movl ARG1,%eax /* data to output */
1124 outb %al,%dx /* send it out */
1125 POP_FRAME
1126 ret
1127
1128 /*
1129 * unsigned char inb(unsigned char *io_port)
1130 *
1131 * Input a byte from an IO port.
1132 */
1133 ENTRY(inb)
1134 PUSH_FRAME
1135 ILL_ON_SLAVE
1136 movl ARG0,%edx /* IO port address */
1137 xor %eax,%eax /* clear high bits of register */
1138 inb %dx,%al /* get the byte */
1139 POP_FRAME
1140 ret
1141
1142 /*
1143 * void outw(unsigned short *io_port,
1144 * unsigned short word)
1145 *
1146 * Output a word to an IO port.
1147 */
1148 ENTRY(outw)
1149 PUSH_FRAME
1150 ILL_ON_SLAVE
1151 movl ARG0,%edx /* IO port address */
1152 movl ARG1,%eax /* data to output */
1153 outw %ax,%dx /* send it out */
1154 POP_FRAME
1155 ret
1156
1157 /*
1158 * unsigned short inw(unsigned short *io_port)
1159 *
1160 * Input a word from an IO port.
1161 */
1162 ENTRY(inw)
1163 PUSH_FRAME
1164 ILL_ON_SLAVE
1165 movl ARG0,%edx /* IO port address */
1166 xor %eax,%eax /* clear high bits of register */
1167 inw %dx,%ax /* get the word */
1168 POP_FRAME
1169 ret
1170
1171 /*
1172 * void outl(unsigned int *io_port,
1173 * unsigned int byte)
1174 *
1175 * Output an int to an IO port.
1176 */
1177 ENTRY(outl)
1178 PUSH_FRAME
1179 ILL_ON_SLAVE
1180 movl ARG0,%edx /* IO port address*/
1181 movl ARG1,%eax /* data to output */
1182 outl %eax,%dx /* send it out */
1183 POP_FRAME
1184 ret
1185
1186 /*
1187 * unsigned int inl(unsigned int *io_port)
1188 *
1189 * Input an int from an IO port.
1190 */
1191 ENTRY(inl)
1192 PUSH_FRAME
1193 ILL_ON_SLAVE
1194 movl ARG0,%edx /* IO port address */
1195 inl %dx,%eax /* get the int */
1196 POP_FRAME
1197 ret
1198
1199 #endif /* MACH_KDB || MACH_ASSERT*/
1200
1201 /*
1202 * void loutb(unsigned byte *io_port,
1203 * unsigned byte *data,
1204 * unsigned int count)
1205 *
1206 * Output an array of bytes to an IO port.
1207 */
1208 ENTRY(loutb)
1209 ENTRY(outsb)
1210 PUSH_FRAME
1211 ILL_ON_SLAVE
1212 movl %esi,%eax /* save register */
1213 movl ARG0,%edx /* get io port number */
1214 movl ARG1,%esi /* get data address */
1215 movl ARG2,%ecx /* get count */
1216 cld /* count up */
1217 rep
1218 outsb /* output */
1219 movl %eax,%esi /* restore register */
1220 POP_FRAME
1221 ret
1222
1223
1224 /*
1225 * void loutw(unsigned short *io_port,
1226 * unsigned short *data,
1227 * unsigned int count)
1228 *
1229 * Output an array of shorts to an IO port.
1230 */
1231 ENTRY(loutw)
1232 ENTRY(outsw)
1233 PUSH_FRAME
1234 ILL_ON_SLAVE
1235 movl %esi,%eax /* save register */
1236 movl ARG0,%edx /* get io port number */
1237 movl ARG1,%esi /* get data address */
1238 movl ARG2,%ecx /* get count */
1239 cld /* count up */
1240 rep
1241 outsw /* output */
1242 movl %eax,%esi /* restore register */
1243 POP_FRAME
1244 ret
1245
1246 /*
1247 * void loutw(unsigned short io_port,
1248 * unsigned int *data,
1249 * unsigned int count)
1250 *
1251 * Output an array of longs to an IO port.
1252 */
1253 ENTRY(loutl)
1254 ENTRY(outsl)
1255 PUSH_FRAME
1256 ILL_ON_SLAVE
1257 movl %esi,%eax /* save register */
1258 movl ARG0,%edx /* get io port number */
1259 movl ARG1,%esi /* get data address */
1260 movl ARG2,%ecx /* get count */
1261 cld /* count up */
1262 rep
1263 outsl /* output */
1264 movl %eax,%esi /* restore register */
1265 POP_FRAME
1266 ret
1267
1268
1269 /*
1270 * void linb(unsigned char *io_port,
1271 * unsigned char *data,
1272 * unsigned int count)
1273 *
1274 * Input an array of bytes from an IO port.
1275 */
1276 ENTRY(linb)
1277 ENTRY(insb)
1278 PUSH_FRAME
1279 ILL_ON_SLAVE
1280 movl %edi,%eax /* save register */
1281 movl ARG0,%edx /* get io port number */
1282 movl ARG1,%edi /* get data address */
1283 movl ARG2,%ecx /* get count */
1284 cld /* count up */
1285 rep
1286 insb /* input */
1287 movl %eax,%edi /* restore register */
1288 POP_FRAME
1289 ret
1290
1291
1292 /*
1293 * void linw(unsigned short *io_port,
1294 * unsigned short *data,
1295 * unsigned int count)
1296 *
1297 * Input an array of shorts from an IO port.
1298 */
1299 ENTRY(linw)
1300 ENTRY(insw)
1301 PUSH_FRAME
1302 ILL_ON_SLAVE
1303 movl %edi,%eax /* save register */
1304 movl ARG0,%edx /* get io port number */
1305 movl ARG1,%edi /* get data address */
1306 movl ARG2,%ecx /* get count */
1307 cld /* count up */
1308 rep
1309 insw /* input */
1310 movl %eax,%edi /* restore register */
1311 POP_FRAME
1312 ret
1313
1314
1315 /*
1316 * void linl(unsigned short io_port,
1317 * unsigned int *data,
1318 * unsigned int count)
1319 *
1320 * Input an array of longs from an IO port.
1321 */
1322 ENTRY(linl)
1323 ENTRY(insl)
1324 PUSH_FRAME
1325 ILL_ON_SLAVE
1326 movl %edi,%eax /* save register */
1327 movl ARG0,%edx /* get io port number */
1328 movl ARG1,%edi /* get data address */
1329 movl ARG2,%ecx /* get count */
1330 cld /* count up */
1331 rep
1332 insl /* input */
1333 movl %eax,%edi /* restore register */
1334 POP_FRAME
1335 ret
1336
1337 /*
1338 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1339 */
1340 ENTRY(rdmsr_carefully)
1341 movl S_ARG0, %ecx
1342 RECOVERY_SECTION
1343 RECOVER(rdmsr_fail)
1344 rdmsr
1345 movl S_ARG1, %ecx
1346 movl %eax, (%ecx)
1347 movl S_ARG2, %ecx
1348 movl %edx, (%ecx)
1349 movl $0, %eax
1350 ret
1351
1352 rdmsr_fail:
1353 movl $1, %eax
1354 ret
1355
1356 /*
1357 * Done with recovery table.
1358 */
1359 RECOVERY_SECTION
1360 RECOVER_TABLE_END
1361
1362
1363
1364 ENTRY(dr6)
1365 movl %db6, %eax
1366 ret
1367
1368 /* dr<i>(address, type, len, persistence)
1369 */
1370 ENTRY(dr0)
1371 movl S_ARG0, %eax
1372 movl %eax,EXT(dr_addr)
1373 movl %eax, %db0
1374 movl $0, %ecx
1375 jmp 0f
1376 ENTRY(dr1)
1377 movl S_ARG0, %eax
1378 movl %eax,EXT(dr_addr)+1*4
1379 movl %eax, %db1
1380 movl $2, %ecx
1381 jmp 0f
1382 ENTRY(dr2)
1383 movl S_ARG0, %eax
1384 movl %eax,EXT(dr_addr)+2*4
1385 movl %eax, %db2
1386 movl $4, %ecx
1387 jmp 0f
1388
1389 ENTRY(dr3)
1390 movl S_ARG0, %eax
1391 movl %eax,EXT(dr_addr)+3*4
1392 movl %eax, %db3
1393 movl $6, %ecx
1394
1395 0:
1396 pushl %ebp
1397 movl %esp, %ebp
1398
1399 movl %db7, %edx
1400 movl %edx,EXT(dr_addr)+4*4
1401 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
1402 movl %edx,EXT(dr_addr)+5*4
1403 movzbl B_ARG3, %eax
1404 andb $3, %al
1405 shll %cl, %eax
1406 orl %eax, %edx
1407
1408 movzbl B_ARG1, %eax
1409 andb $3, %al
1410 addb $0x10, %cl
1411 shll %cl, %eax
1412 orl %eax, %edx
1413
1414 movzbl B_ARG2, %eax
1415 andb $3, %al
1416 addb $0x2, %cl
1417 shll %cl, %eax
1418 orl %eax, %edx
1419
1420 movl %edx, %db7
1421 movl %edx,EXT(dr_addr)+7*4
1422 movl %edx, %eax
1423 leave
1424 ret
1425
1426 .data
1427 dr_msk:
1428 .long ~0x000f0003
1429 .long ~0x00f0000c
1430 .long ~0x0f000030
1431 .long ~0xf00000c0
1432 ENTRY(dr_addr)
1433 .long 0,0,0,0
1434 .long 0,0,0,0
1435
1436 .text
1437
1438 ENTRY(get_cr0)
1439 movl %cr0, %eax
1440 ret
1441
1442 ENTRY(set_cr0)
1443 movl 4(%esp), %eax
1444 movl %eax, %cr0
1445 ret
1446
1447 #ifndef SYMMETRY
1448
1449 /*
1450 * ffs(mask)
1451 */
1452 ENTRY(ffs)
1453 bsfl S_ARG0, %eax
1454 jz 0f
1455 incl %eax
1456 ret
1457 0: xorl %eax, %eax
1458 ret
1459
1460 /*
1461 * cpu_shutdown()
1462 * Force reboot
1463 */
1464
1465 null_idtr:
1466 .word 0
1467 .long 0
1468
1469 Entry(cpu_shutdown)
1470 lidt null_idtr /* disable the interrupt handler */
1471 xor %ecx,%ecx /* generate a divide by zero */
1472 div %ecx,%eax /* reboot now */
1473 ret /* this will "never" be executed */
1474
1475 #endif /* SYMMETRY */
1476
1477
1478 /*
1479 * setbit(int bitno, int *s) - set bit in bit string
1480 */
1481 ENTRY(setbit)
1482 movl S_ARG0, %ecx /* bit number */
1483 movl S_ARG1, %eax /* address */
1484 btsl %ecx, (%eax) /* set bit */
1485 ret
1486
1487 /*
1488 * clrbit(int bitno, int *s) - clear bit in bit string
1489 */
1490 ENTRY(clrbit)
1491 movl S_ARG0, %ecx /* bit number */
1492 movl S_ARG1, %eax /* address */
1493 btrl %ecx, (%eax) /* clear bit */
1494 ret
1495
1496 /*
1497 * ffsbit(int *s) - find first set bit in bit string
1498 */
1499 ENTRY(ffsbit)
1500 movl S_ARG0, %ecx /* address */
1501 movl $0, %edx /* base offset */
1502 0:
1503 bsfl (%ecx), %eax /* check argument bits */
1504 jnz 1f /* found bit, return */
1505 addl $4, %ecx /* increment address */
1506 addl $32, %edx /* increment offset */
1507 jmp 0b /* try again */
1508 1:
1509 addl %edx, %eax /* return offset */
1510 ret
1511
1512 /*
1513 * testbit(int nr, volatile void *array)
1514 *
1515 * Test to see if the bit is set within the bit string
1516 */
1517
1518 ENTRY(testbit)
1519 movl S_ARG0,%eax /* Get the bit to test */
1520 movl S_ARG1,%ecx /* get the array string */
1521 btl %eax,(%ecx)
1522 sbbl %eax,%eax
1523 ret
1524
1525 ENTRY(get_pc)
1526 movl 4(%ebp),%eax
1527 ret
1528
1529 ENTRY(minsecurity)
1530 pushl %ebp
1531 movl %esp,%ebp
1532 /*
1533 * jail: set the EIP to "jail" to block a kernel thread.
1534 * Useful to debug synchronization problems on MPs.
1535 */
1536 ENTRY(jail)
1537 jmp EXT(jail)
1538
1539 /*
1540 * unsigned int
1541 * div_scale(unsigned int dividend,
1542 * unsigned int divisor,
1543 * unsigned int *scale)
1544 *
1545 * This function returns (dividend << *scale) //divisor where *scale
1546 * is the largest possible value before overflow. This is used in
1547 * computation where precision must be achieved in order to avoid
1548 * floating point usage.
1549 *
1550 * Algorithm:
1551 * *scale = 0;
1552 * while (((dividend >> *scale) >= divisor))
1553 * (*scale)++;
1554 * *scale = 32 - *scale;
1555 * return ((dividend << *scale) / divisor);
1556 */
1557 ENTRY(div_scale)
1558 PUSH_FRAME
1559 xorl %ecx, %ecx /* *scale = 0 */
1560 xorl %eax, %eax
1561 movl ARG0, %edx /* get dividend */
1562 0:
1563 cmpl ARG1, %edx /* if (divisor > dividend) */
1564 jle 1f /* goto 1f */
1565 addl $1, %ecx /* (*scale)++ */
1566 shrdl $1, %edx, %eax /* dividend >> 1 */
1567 shrl $1, %edx /* dividend >> 1 */
1568 jmp 0b /* goto 0b */
1569 1:
1570 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1571 movl ARG2, %edx /* get scale */
1572 movl $32, (%edx) /* *scale = 32 */
1573 subl %ecx, (%edx) /* *scale -= %ecx */
1574 POP_FRAME
1575 ret
1576
1577 /*
1578 * unsigned int
1579 * mul_scale(unsigned int multiplicand,
1580 * unsigned int multiplier,
1581 * unsigned int *scale)
1582 *
1583 * This function returns ((multiplicand * multiplier) >> *scale) where
1584 * scale is the largest possible value before overflow. This is used in
1585 * computation where precision must be achieved in order to avoid
1586 * floating point usage.
1587 *
1588 * Algorithm:
1589 * *scale = 0;
1590 * while (overflow((multiplicand * multiplier) >> *scale))
1591 * (*scale)++;
1592 * return ((multiplicand * multiplier) >> *scale);
1593 */
1594 ENTRY(mul_scale)
1595 PUSH_FRAME
1596 xorl %ecx, %ecx /* *scale = 0 */
1597 movl ARG0, %eax /* get multiplicand */
1598 mull ARG1 /* multiplicand * multiplier */
1599 0:
1600 cmpl $0, %edx /* if (!overflow()) */
1601 je 1f /* goto 1 */
1602 addl $1, %ecx /* (*scale)++ */
1603 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1604 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1605 jmp 0b
1606 1:
1607 movl ARG2, %edx /* get scale */
1608 movl %ecx, (%edx) /* set *scale */
1609 POP_FRAME
1610 ret
1611
1612
1613
1614 /*
1615 * Double-fault exception handler task. The last gasp...
1616 */
1617 Entry(df_task_start)
1618 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1619 hlt
1620
1621
1622 /*
1623 * machine-check handler task. The last gasp...
1624 */
1625 Entry(mc_task_start)
1626 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1627 hlt
1628
1629 /*
1630 * Compatibility mode's last gasp...
1631 */
1632 Entry(lo_df64)
1633 movl %esp, %eax
1634 CCALL1(panic_double_fault64, %eax)
1635 hlt
1636
1637 Entry(lo_mc64)
1638 movl %esp, %eax
1639 CCALL1(panic_machine_check64, %eax)
1640 hlt
1641