]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locore.s
2b6b1c71875e1fd08cdcdf4cd1a034ea8ba6637f
[apple/xnu.git] / osfmk / i386 / locore.s
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 #include <mach_rt.h>
52 #include <platforms.h>
53 #include <mach_kdb.h>
54 #include <mach_kgdb.h>
55 #include <mach_kdp.h>
56 #include <stat_time.h>
57 #include <mach_assert.h>
58
59 #include <sys/errno.h>
60 #include <i386/asm.h>
61 #include <i386/cpuid.h>
62 #include <i386/eflags.h>
63 #include <i386/proc_reg.h>
64 #include <i386/trap.h>
65 #include <assym.s>
66 #include <mach/exception_types.h>
67
68 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
69 #include <mach/i386/syscall_sw.h>
70
71 #include <i386/mp.h>
72
73 /*
74 * PTmap is recursive pagemap at top of virtual address space.
75 * Within PTmap, the page directory can be found (third indirection).
76 */
77 .globl _PTmap,_PTD,_PTDpde
78 .set _PTmap,(PTDPTDI << PDESHIFT)
79 .set _PTD,_PTmap + (PTDPTDI * NBPG)
80 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
81
82 /*
83 * APTmap, APTD is the alternate recursive pagemap.
84 * It's used when modifying another process's page tables.
85 */
86 .globl _APTmap,_APTD,_APTDpde
87 .set _APTmap,(APTDPTDI << PDESHIFT)
88 .set _APTD,_APTmap + (APTDPTDI * NBPG)
89 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
90
91 #if __MACHO__
92 /* Under Mach-O, etext is a variable which contains
93 * the last text address
94 */
95 #define ETEXT_ADDR (EXT(etext))
96 #else
97 /* Under ELF and other non-Mach-O formats, the address of
98 * etext represents the last text address
99 */
100 #define ETEXT_ADDR $ EXT(etext)
101 #endif
102
103 #define CX(addr,reg) addr(,reg,4)
104
105 /*
106 * The following macros make calls into C code.
107 * They dynamically align the stack to 16 bytes.
108 * Arguments are moved (not pushed) onto the correctly aligned stack.
109 * NOTE: EDI is destroyed in the process, and hence cannot
110 * be directly used as a parameter. Users of this macro must
111 * independently preserve EDI (a non-volatile) if the routine is
112 * intended to be called from C, for instance.
113 */
114
115 #define CCALL(fn) \
116 movl %esp, %edi ;\
117 andl $0xFFFFFFF0, %esp ;\
118 call EXT(fn) ;\
119 movl %edi, %esp
120
121 #define CCALL1(fn, arg1) \
122 movl %esp, %edi ;\
123 subl $4, %esp ;\
124 andl $0xFFFFFFF0, %esp ;\
125 movl arg1, 0(%esp) ;\
126 call EXT(fn) ;\
127 movl %edi, %esp
128
129 #define CCALL2(fn, arg1, arg2) \
130 movl %esp, %edi ;\
131 subl $8, %esp ;\
132 andl $0xFFFFFFF0, %esp ;\
133 movl arg2, 4(%esp) ;\
134 movl arg1, 0(%esp) ;\
135 call EXT(fn) ;\
136 movl %edi, %esp
137
138 #define CCALL3(fn, arg1, arg2, arg3) \
139 movl %esp, %edi ;\
140 subl $12, %esp ;\
141 andl $0xFFFFFFF0, %esp ;\
142 movl arg3, 8(%esp) ;\
143 movl arg2, 4(%esp) ;\
144 movl arg1, 0(%esp) ;\
145 call EXT(fn) ;\
146 movl %edi, %esp
147
148 .text
149 locore_start:
150
151 /*
152 * Fault recovery.
153 */
154
155 #ifdef __MACHO__
156 #define RECOVERY_SECTION .section __VECTORS, __recover
157 #else
158 #define RECOVERY_SECTION .text
159 #define RECOVERY_SECTION .text
160 #endif
161
162 #define RECOVER_TABLE_START \
163 .align 2 ; \
164 .globl EXT(recover_table) ;\
165 LEXT(recover_table) ;\
166 .text
167
168 #define RECOVER(addr) \
169 .align 2; \
170 .long 9f ;\
171 .long addr ;\
172 .text ;\
173 9:
174
175 #define RECOVER_TABLE_END \
176 .align 2 ;\
177 .globl EXT(recover_table_end) ;\
178 LEXT(recover_table_end) ;\
179 .text
180
181 /*
182 * Allocate recovery and table.
183 */
184 RECOVERY_SECTION
185 RECOVER_TABLE_START
186
187 /*
188 * Timing routines.
189 */
190 Entry(timer_update)
191 movl 4(%esp),%ecx
192 movl 8(%esp),%eax
193 movl 12(%esp),%edx
194 movl %eax,TIMER_HIGHCHK(%ecx)
195 movl %edx,TIMER_LOW(%ecx)
196 movl %eax,TIMER_HIGH(%ecx)
197 ret
198
199 Entry(timer_grab)
200 movl 4(%esp),%ecx
201 0: movl TIMER_HIGH(%ecx),%edx
202 movl TIMER_LOW(%ecx),%eax
203 cmpl TIMER_HIGHCHK(%ecx),%edx
204 jne 0b
205 ret
206
207 #if STAT_TIME
208
209 #define TIME_TRAP_UENTRY
210 #define TIME_TRAP_UEXIT
211 #define TIME_INT_ENTRY
212 #define TIME_INT_EXIT
213
214 #else
215 /*
216 * Nanosecond timing.
217 */
218
219 /*
220 * Low 32-bits of nanotime returned in %eax.
221 * Computed from tsc based on the scale factor
222 * and an implicit 32 bit shift.
223 *
224 * Uses %esi, %edi, %ebx, %ecx and %edx.
225 */
226 #define RNT_INFO _rtc_nanotime_info
227 #define NANOTIME32 \
228 0: movl RNT_INFO+RNT_TSC_BASE,%esi ;\
229 movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
230 rdtsc ;\
231 subl %esi,%eax /* tsc - tsc_base */ ;\
232 sbbl %edi,%edx ;\
233 movl RNT_INFO+RNT_SCALE,%ecx ;\
234 movl %edx,%ebx /* delta * scale */ ;\
235 mull %ecx ;\
236 movl %ebx,%eax ;\
237 movl %edx,%ebx ;\
238 mull %ecx ;\
239 addl %ebx,%eax ;\
240 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\
241 cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\
242 jne 0b ;\
243 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
244 jne 0b
245
246 /*
247 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
248 */
249 #define TIMER_UPDATE(treg,dreg) \
250 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
251 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
252 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
253 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
254 movl dreg,TIMER_HIGH(treg) /* to high bita */
255
256 /*
257 * Add time delta to old timer and start new.
258 */
259 #define TIMER_EVENT(old,new) \
260 NANOTIME32 /* eax low bits nanosecs */ ;\
261 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
262 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
263 movl %eax,%edx /* save timestamp in %edx */ ;\
264 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
265 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
266 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
267 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
268 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
269 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */
270
271
272 /*
273 * Update time on user trap entry.
274 * Uses %eax,%ecx,%edx,%esi.
275 */
276 #define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
277
278 /*
279 * update time on user trap exit.
280 * Uses %eax,%ecx,%edx,%esi.
281 */
282 #define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
283
284 /*
285 * update time on interrupt entry.
286 * Uses %eax,%ecx,%edx,%esi.
287 */
288 #define TIME_INT_ENTRY \
289 NANOTIME32 /* eax low bits nanosecs */ ;\
290 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
291 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
292 movl %eax,%edx /* save timestamp in %edx */ ;\
293 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
294 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
295 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
296 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
297 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
298
299 /*
300 * update time on interrupt exit.
301 * Uses %eax, %ecx, %edx, %esi.
302 */
303 #define TIME_INT_EXIT \
304 NANOTIME32 /* eax low bits nanosecs */ ;\
305 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
306 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
307 movl %eax,%edx /* save timestamp in %edx */ ;\
308 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
309 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
310 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
311 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
312 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
313
314 #endif /* STAT_TIME */
315
316 #undef PDEBUG
317
318 #ifdef PDEBUG
319
320 /*
321 * Traditional, not ANSI.
322 */
323 #define CAH(label) \
324 .data ;\
325 .globl label/**/count ;\
326 label/**/count: ;\
327 .long 0 ;\
328 .globl label/**/limit ;\
329 label/**/limit: ;\
330 .long 0 ;\
331 .text ;\
332 addl $1,%ss:label/**/count ;\
333 cmpl $0,label/**/limit ;\
334 jz label/**/exit ;\
335 pushl %eax ;\
336 label/**/loop: ;\
337 movl %ss:label/**/count,%eax ;\
338 cmpl %eax,%ss:label/**/limit ;\
339 je label/**/loop ;\
340 popl %eax ;\
341 label/**/exit:
342
343 #else /* PDEBUG */
344
345 #define CAH(label)
346
347 #endif /* PDEBUG */
348
349 #if MACH_KDB
350 /*
351 * Last-ditch debug code to handle faults that might result
352 * from entering kernel (from collocated server) on an invalid
353 * stack. On collocated entry, there's no hardware-initiated
354 * stack switch, so a valid stack must be in place when an
355 * exception occurs, or we may double-fault.
356 *
357 * In case of a double-fault, our only recourse is to switch
358 * hardware "tasks", so that we avoid using the current stack.
359 *
360 * The idea here is just to get the processor into the debugger,
361 * post-haste. No attempt is made to fix up whatever error got
362 * us here, so presumably continuing from the debugger will
363 * simply land us here again -- at best.
364 */
365 #if 0
366 /*
367 * Note that the per-fault entry points are not currently
368 * functional. The only way to make them work would be to
369 * set up separate TSS's for each fault type, which doesn't
370 * currently seem worthwhile. (The offset part of a task
371 * gate is always ignored.) So all faults that task switch
372 * currently resume at db_task_start.
373 */
374 /*
375 * Double fault (Murphy's point) - error code (0) on stack
376 */
377 Entry(db_task_dbl_fault)
378 popl %eax
379 movl $(T_DOUBLE_FAULT),%ebx
380 jmp db_task_start
381 /*
382 * Segment not present - error code on stack
383 */
384 Entry(db_task_seg_np)
385 popl %eax
386 movl $(T_SEGMENT_NOT_PRESENT),%ebx
387 jmp db_task_start
388 /*
389 * Stack fault - error code on (current) stack
390 */
391 Entry(db_task_stk_fault)
392 popl %eax
393 movl $(T_STACK_FAULT),%ebx
394 jmp db_task_start
395 /*
396 * General protection fault - error code on stack
397 */
398 Entry(db_task_gen_prot)
399 popl %eax
400 movl $(T_GENERAL_PROTECTION),%ebx
401 jmp db_task_start
402 #endif /* 0 */
403 /*
404 * The entry point where execution resumes after last-ditch debugger task
405 * switch.
406 */
407 Entry(db_task_start)
408 movl %esp,%edx
409 subl $(ISS32_SIZE),%edx
410 movl %edx,%esp /* allocate i386_saved_state on stack */
411 movl %eax,R_ERR(%esp)
412 movl %ebx,R_TRAPNO(%esp)
413 pushl %edx
414 CPU_NUMBER(%edx)
415 movl CX(EXT(master_dbtss),%edx),%edx
416 movl TSS_LINK(%edx),%eax
417 pushl %eax /* pass along selector of previous TSS */
418 call EXT(db_tss_to_frame)
419 popl %eax /* get rid of TSS selector */
420 call EXT(db_trap_from_asm)
421 addl $0x4,%esp
422 /*
423 * And now...?
424 */
425 iret /* ha, ha, ha... */
426 #endif /* MACH_KDB */
427
428 /*
429 * Called as a function, makes the current thread
430 * return from the kernel as if from an exception.
431 */
432
433 .globl EXT(thread_exception_return)
434 .globl EXT(thread_bootstrap_return)
435 LEXT(thread_exception_return)
436 LEXT(thread_bootstrap_return)
437 cli
438 movl %gs:CPU_KERNEL_STACK,%ecx
439 movl (%ecx),%esp /* switch back to PCB stack */
440 jmp EXT(return_from_trap)
441
442 Entry(call_continuation)
443 movl S_ARG0,%eax /* get continuation */
444 movl S_ARG1,%edx /* continuation param */
445 movl S_ARG2,%ecx /* wait result */
446 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
447 xorl %ebp,%ebp /* zero frame pointer */
448 subl $8,%esp /* align the stack */
449 pushl %ecx
450 pushl %edx
451 call *%eax /* call continuation */
452 addl $16,%esp
453 movl %gs:CPU_ACTIVE_THREAD,%eax
454 pushl %eax
455 call EXT(thread_terminate)
456
457
458
459 /*******************************************************************************************************
460 *
461 * All 64 bit task 'exceptions' enter lo_alltraps:
462 * esp -> x86_saved_state_t
463 *
464 * The rest of the state is set up as:
465 * cr3 -> kernel directory
466 * esp -> low based stack
467 * gs -> CPU_DATA_GS
468 * cs -> KERNEL_CS
469 * ss/ds/es -> KERNEL_DS
470 *
471 * interrupts disabled
472 * direction flag cleared
473 */
474 Entry(lo_alltraps)
475 movl R_CS(%esp),%eax /* assume 32-bit state */
476 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
477 jne 1f
478 movl R64_CS(%esp),%eax /* 64-bit user mode */
479 1:
480 testb $3,%eax
481 jz trap_from_kernel
482 /* user mode trap */
483 TIME_TRAP_UENTRY
484
485 movl %gs:CPU_KERNEL_STACK,%ebx
486 xchgl %ebx,%esp /* switch to kernel stack */
487 sti
488
489 CCALL1(user_trap, %ebx) /* call user trap routine */
490 cli /* hold off intrs - critical section */
491 popl %esp /* switch back to PCB stack */
492
493 /*
494 * Return from trap or system call, checking for ASTs.
495 * On lowbase PCB stack with intrs disabled
496 */
497 LEXT(return_from_trap)
498 movl %gs:CPU_PENDING_AST,%eax
499 testl %eax,%eax
500 je EXT(return_to_user) /* branch if no AST */
501
502 movl %gs:CPU_KERNEL_STACK,%ebx
503 xchgl %ebx,%esp /* switch to kernel stack */
504 sti /* interrupts always enabled on return to user mode */
505
506 pushl %ebx /* save PCB stack */
507 CCALL1(i386_astintr, $0) /* take the AST */
508 cli
509 popl %esp /* switch back to PCB stack (w/exc link) */
510 jmp EXT(return_from_trap) /* and check again (rare) */
511
512 LEXT(return_to_user)
513 TIME_TRAP_UEXIT
514
515 LEXT(ret_to_user)
516 cmpl $0, %gs:CPU_IS64BIT
517 je EXT(lo_ret_to_user)
518 jmp EXT(lo64_ret_to_user)
519
520
521
522 /*
523 * Trap from kernel mode. No need to switch stacks.
524 * Interrupts must be off here - we will set them to state at time of trap
525 * as soon as it's safe for us to do so and not recurse doing preemption
526 */
527 trap_from_kernel:
528 movl %esp, %eax /* saved state addr */
529 CCALL1(kernel_trap, %eax) /* to kernel trap routine */
530 cli
531
532 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
533 testl $ AST_URGENT,%eax /* any urgent preemption? */
534 je ret_to_kernel /* no, nothing to do */
535 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
536 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
537 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
538 je ret_to_kernel
539 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
540 jne ret_to_kernel
541 movl %gs:CPU_KERNEL_STACK,%eax
542 movl %esp,%ecx
543 xorl %eax,%ecx
544 andl $(-KERNEL_STACK_SIZE),%ecx
545 testl %ecx,%ecx /* are we on the kernel stack? */
546 jne ret_to_kernel /* no, skip it */
547
548 CCALL1(i386_astintr, $1) /* take the AST */
549
550 ret_to_kernel:
551 cmpl $0, %gs:CPU_IS64BIT
552 je EXT(lo_ret_to_kernel)
553 jmp EXT(lo64_ret_to_kernel)
554
555
556
557 /*******************************************************************************************************
558 *
559 * All interrupts on all tasks enter here with:
560 * esp-> -> x86_saved_state_t
561 *
562 * cr3 -> kernel directory
563 * esp -> low based stack
564 * gs -> CPU_DATA_GS
565 * cs -> KERNEL_CS
566 * ss/ds/es -> KERNEL_DS
567 *
568 * interrupts disabled
569 * direction flag cleared
570 */
571 Entry(lo_allintrs)
572 /*
573 * test whether already on interrupt stack
574 */
575 movl %gs:CPU_INT_STACK_TOP,%ecx
576 cmpl %esp,%ecx
577 jb 1f
578 leal -INTSTACK_SIZE(%ecx),%edx
579 cmpl %esp,%edx
580 jb int_from_intstack
581 1:
582 xchgl %ecx,%esp /* switch to interrupt stack */
583
584 movl %cr0,%eax /* get cr0 */
585 orl $(CR0_TS),%eax /* or in TS bit */
586 movl %eax,%cr0 /* set cr0 */
587
588 subl $8, %esp /* for 16-byte stack alignment */
589 pushl %ecx /* save pointer to old stack */
590 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
591
592 TIME_INT_ENTRY /* do timing */
593
594 incl %gs:CPU_PREEMPTION_LEVEL
595 incl %gs:CPU_INTERRUPT_LEVEL
596
597 movl %gs:CPU_INT_STATE, %eax
598 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
599
600 cli /* just in case we returned with intrs enabled */
601 xorl %eax,%eax
602 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
603
604 .globl EXT(return_to_iret)
605 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
606
607 decl %gs:CPU_INTERRUPT_LEVEL
608 decl %gs:CPU_PREEMPTION_LEVEL
609
610 TIME_INT_EXIT /* do timing */
611
612 movl %gs:CPU_ACTIVE_THREAD,%eax
613 movl ACT_PCB(%eax),%eax /* get act`s PCB */
614 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
615 cmpl $0,%eax /* Is there a context */
616 je 1f /* Branch if not */
617 movl FP_VALID(%eax),%eax /* Load fp_valid */
618 cmpl $0,%eax /* Check if valid */
619 jne 1f /* Branch if valid */
620 clts /* Clear TS */
621 jmp 2f
622 1:
623 movl %cr0,%eax /* get cr0 */
624 orl $(CR0_TS),%eax /* or in TS bit */
625 movl %eax,%cr0 /* set cr0 */
626 2:
627 popl %esp /* switch back to old stack */
628
629 /* Load interrupted code segment into %eax */
630 movl R_CS(%esp),%eax /* assume 32-bit state */
631 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
632 jne 3f
633 movl R64_CS(%esp),%eax /* 64-bit user mode */
634 3:
635 testb $3,%eax /* user mode, */
636 jnz ast_from_interrupt_user /* go handle potential ASTs */
637 /*
638 * we only want to handle preemption requests if
639 * the interrupt fell in the kernel context
640 * and preemption isn't disabled
641 */
642 movl %gs:CPU_PENDING_AST,%eax
643 testl $ AST_URGENT,%eax /* any urgent requests? */
644 je ret_to_kernel /* no, nothing to do */
645
646 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
647 jne ret_to_kernel /* yes, skip it */
648
649 movl %gs:CPU_KERNEL_STACK,%eax
650 movl %esp,%ecx
651 xorl %eax,%ecx
652 andl $(-KERNEL_STACK_SIZE),%ecx
653 testl %ecx,%ecx /* are we on the kernel stack? */
654 jne ret_to_kernel /* no, skip it */
655
656 /*
657 * Take an AST from kernel space. We don't need (and don't want)
658 * to do as much as the case where the interrupt came from user
659 * space.
660 */
661 CCALL1(i386_astintr, $1)
662
663 jmp ret_to_kernel
664
665
666 /*
667 * nested int - simple path, can't preempt etc on way out
668 */
669 int_from_intstack:
670 incl %gs:CPU_PREEMPTION_LEVEL
671 incl %gs:CPU_INTERRUPT_LEVEL
672
673 movl %esp, %edx /* i386_saved_state */
674 CCALL1(PE_incoming_interrupt, %edx)
675
676 decl %gs:CPU_INTERRUPT_LEVEL
677 decl %gs:CPU_PREEMPTION_LEVEL
678
679 jmp ret_to_kernel
680
681 /*
682 * Take an AST from an interrupted user
683 */
684 ast_from_interrupt_user:
685 movl %gs:CPU_PENDING_AST,%eax
686 testl %eax,%eax /* pending ASTs? */
687 je EXT(ret_to_user) /* no, nothing to do */
688
689 TIME_TRAP_UENTRY
690
691 jmp EXT(return_from_trap) /* return */
692
693
694 /*******************************************************************************************************
695 *
696 * 32bit Tasks
697 * System call entries via INTR_GATE or sysenter:
698 *
699 * esp -> i386_saved_state_t
700 * cr3 -> kernel directory
701 * esp -> low based stack
702 * gs -> CPU_DATA_GS
703 * cs -> KERNEL_CS
704 * ss/ds/es -> KERNEL_DS
705 *
706 * interrupts disabled
707 * direction flag cleared
708 */
709
710 Entry(lo_sysenter)
711 /*
712 * We can be here either for a mach syscall or a unix syscall,
713 * as indicated by the sign of the code:
714 */
715 movl R_EAX(%esp),%eax
716 testl %eax,%eax
717 js EXT(lo_mach_scall) /* < 0 => mach */
718 /* > 0 => unix */
719
720 Entry(lo_unix_scall)
721 TIME_TRAP_UENTRY
722
723 movl %gs:CPU_KERNEL_STACK,%ebx
724 xchgl %ebx,%esp /* switch to kernel stack */
725
726 sti
727 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
728 movl ACT_TASK(%ecx),%ecx /* point to current task */
729 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
730
731 CCALL1(unix_syscall, %ebx)
732 /*
733 * always returns through thread_exception_return
734 */
735
736
737 Entry(lo_mach_scall)
738 TIME_TRAP_UENTRY
739
740 movl %gs:CPU_KERNEL_STACK,%ebx
741 xchgl %ebx,%esp /* switch to kernel stack */
742
743 sti
744 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
745 movl ACT_TASK(%ecx),%ecx /* point to current task */
746 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
747
748 CCALL1(mach_call_munger, %ebx)
749 /*
750 * always returns through thread_exception_return
751 */
752
753
754 Entry(lo_mdep_scall)
755 TIME_TRAP_UENTRY
756
757 movl %gs:CPU_KERNEL_STACK,%ebx
758 xchgl %ebx,%esp /* switch to kernel stack */
759
760 sti
761
762 CCALL1(machdep_syscall, %ebx)
763 /*
764 * always returns through thread_exception_return
765 */
766
767
768 Entry(lo_diag_scall)
769 TIME_TRAP_UENTRY
770
771 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
772 xchgl %ebx,%esp // Switch to it, saving the previous
773
774 CCALL1(diagCall, %ebx) // Call diagnostics
775 cli // Disable interruptions just in case they were enabled
776 popl %esp // Get back the original stack
777
778 cmpl $0,%eax // What kind of return is this?
779 jne EXT(return_to_user) // Normal return, do not check asts...
780
781 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
782 // pass what would be the diag syscall
783 // error return - cause an exception
784 /* no return */
785
786
787
788 /*******************************************************************************************************
789 *
790 * 64bit Tasks
791 * System call entries via syscall only:
792 *
793 * esp -> x86_saved_state64_t
794 * cr3 -> kernel directory
795 * esp -> low based stack
796 * gs -> CPU_DATA_GS
797 * cs -> KERNEL_CS
798 * ss/ds/es -> KERNEL_DS
799 *
800 * interrupts disabled
801 * direction flag cleared
802 */
803
804 Entry(lo_syscall)
805 /*
806 * We can be here either for a mach, unix machdep or diag syscall,
807 * as indicated by the syscall class:
808 */
809 movl R64_RAX(%esp), %eax /* syscall number/class */
810 movl %eax, %ebx
811 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
812 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
813 je EXT(lo64_mach_scall)
814 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
815 je EXT(lo64_unix_scall)
816 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
817 je EXT(lo64_mdep_scall)
818 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
819 je EXT(lo64_diag_scall)
820
821 /* Syscall class unknown */
822 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
823 /* no return */
824
825 Entry(lo64_unix_scall)
826 TIME_TRAP_UENTRY
827
828 movl %gs:CPU_KERNEL_STACK,%ebx
829 xchgl %ebx,%esp /* switch to kernel stack */
830
831 sti
832 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
833 movl ACT_TASK(%ecx),%ecx /* point to current task */
834 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
835
836 CCALL1(unix_syscall64, %ebx)
837 /*
838 * always returns through thread_exception_return
839 */
840
841
842 Entry(lo64_mach_scall)
843 TIME_TRAP_UENTRY
844
845 movl %gs:CPU_KERNEL_STACK,%ebx
846 xchgl %ebx,%esp /* switch to kernel stack */
847
848 sti
849 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
850 movl ACT_TASK(%ecx),%ecx /* point to current task */
851 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
852
853 CCALL1(mach_call_munger64, %ebx)
854 /*
855 * always returns through thread_exception_return
856 */
857
858
859 Entry(lo64_mdep_scall)
860 TIME_TRAP_UENTRY
861
862 movl %gs:CPU_KERNEL_STACK,%ebx
863 xchgl %ebx,%esp /* switch to kernel stack */
864
865 sti
866
867 CCALL1(machdep_syscall64, %ebx)
868 /*
869 * always returns through thread_exception_return
870 */
871
872
873 Entry(lo64_diag_scall)
874 TIME_TRAP_UENTRY
875
876 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
877 xchgl %ebx,%esp // Switch to it, saving the previous
878
879 pushl %ebx // Push the previous stack
880 CCALL1(diagCall64, %ebx) // Call diagnostics
881 cli // Disable interruptions just in case they were enabled
882 popl %esp // Get back the original stack
883
884 cmpl $0,%eax // What kind of return is this?
885 jne EXT(return_to_user) // Normal return, do not check asts...
886
887 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
888 /* no return */
889
890
891
892 /******************************************************************************************************
893
894 /*\f*/
895 /*
896 * Utility routines.
897 */
898
899
900 /*
901 * Copy from user/kernel address space.
902 * arg0: window offset or kernel address
903 * arg1: kernel address
904 * arg2: byte count
905 */
906 ENTRY(copyinphys_user)
907 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
908 mov %cx,%ds
909
910 ENTRY(copyinphys_kern)
911 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
912 mov %cx,%es
913 jmp copyin_common
914
915 ENTRY(copyin_user)
916 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
917 mov %cx,%ds
918
919 ENTRY(copyin_kern)
920
921 copyin_common:
922 pushl %esi
923 pushl %edi /* save registers */
924
925 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
926 movl 8+S_ARG1,%edi /* get destination - kernel address */
927 movl 8+S_ARG2,%edx /* get count */
928
929 cld /* count up */
930 movl %edx,%ecx /* move by longwords first */
931 shrl $2,%ecx
932 RECOVERY_SECTION
933 RECOVER(copyin_fail)
934 rep
935 movsl /* move longwords */
936 movl %edx,%ecx /* now move remaining bytes */
937 andl $3,%ecx
938 RECOVERY_SECTION
939 RECOVER(copyin_fail)
940 rep
941 movsb
942 xorl %eax,%eax /* return 0 for success */
943 copyin_ret:
944 mov %ss,%cx /* restore kernel data and extended segments */
945 mov %cx,%ds
946 mov %cx,%es
947
948 popl %edi /* restore registers */
949 popl %esi
950 ret /* and return */
951
952 copyin_fail:
953 movl $(EFAULT),%eax /* return error for failure */
954 jmp copyin_ret /* pop frame and return */
955
956
957
958 /*
959 * Copy string from user/kern address space.
960 * arg0: window offset or kernel address
961 * arg1: kernel address
962 * arg2: max byte count
963 * arg3: actual byte count (OUT)
964 */
965 Entry(copyinstr_kern)
966 mov %ds,%cx
967 jmp copyinstr_common
968
969 Entry(copyinstr_user)
970 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
971
972 copyinstr_common:
973 mov %cx,%fs
974
975 pushl %esi
976 pushl %edi /* save registers */
977
978 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
979 movl 8+S_ARG1,%edi /* get destination - kernel address */
980 movl 8+S_ARG2,%edx /* get count */
981
982 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
983 /* are 0 for the cmpl against 0 */
984 2:
985 RECOVERY_SECTION
986 RECOVER(copystr_fail) /* copy bytes... */
987 movb %fs:(%esi),%al
988 incl %esi
989 testl %edi,%edi /* if kernel address is ... */
990 jz 3f /* not NULL */
991 movb %al,(%edi) /* copy the byte */
992 incl %edi
993 3:
994 testl %eax,%eax /* did we just stuff the 0-byte? */
995 jz 4f /* yes, return 0 status already in %eax */
996 decl %edx /* decrement #bytes left in buffer */
997 jnz 2b /* buffer not full so copy in another byte */
998 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
999 4:
1000 movl 8+S_ARG3,%edi /* get OUT len ptr */
1001 cmpl $0,%edi
1002 jz copystr_ret /* if null, just return */
1003 subl 8+S_ARG0,%esi
1004 movl %esi,(%edi) /* else set OUT arg to xfer len */
1005 copystr_ret:
1006 popl %edi /* restore registers */
1007 popl %esi
1008 ret /* and return */
1009
1010 copystr_fail:
1011 movl $(EFAULT),%eax /* return error for failure */
1012 jmp copystr_ret /* pop frame and return */
1013
1014
1015 /*
1016 * Copy to user/kern address space.
1017 * arg0: kernel address
1018 * arg1: window offset or kernel address
1019 * arg2: byte count
1020 */
1021 ENTRY(copyoutphys_user)
1022 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1023 mov %cx,%es
1024
1025 ENTRY(copyoutphys_kern)
1026 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1027 mov %cx,%ds
1028 jmp copyout_common
1029
1030 ENTRY(copyout_user)
1031 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1032 mov %cx,%es
1033
1034 ENTRY(copyout_kern)
1035
1036 copyout_common:
1037 pushl %esi
1038 pushl %edi /* save registers */
1039
1040 movl 8+S_ARG0,%esi /* get source - kernel address */
1041 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1042 movl 8+S_ARG2,%edx /* get count */
1043
1044 cld /* count up */
1045 movl %edx,%ecx /* move by longwords first */
1046 shrl $2,%ecx
1047 RECOVERY_SECTION
1048 RECOVER(copyout_fail)
1049 rep
1050 movsl
1051 movl %edx,%ecx /* now move remaining bytes */
1052 andl $3,%ecx
1053 RECOVERY_SECTION
1054 RECOVER(copyout_fail)
1055 rep
1056 movsb /* move */
1057 xorl %eax,%eax /* return 0 for success */
1058 copyout_ret:
1059 mov %ss,%cx /* restore kernel segment */
1060 mov %cx,%es
1061 mov %cx,%ds
1062
1063 popl %edi /* restore registers */
1064 popl %esi
1065 ret /* and return */
1066
1067 copyout_fail:
1068 movl $(EFAULT),%eax /* return error for failure */
1069 jmp copyout_ret /* pop frame and return */
1070
1071
1072 /*
1073 * io register must not be used on slaves (no AT bus)
1074 */
1075 #define ILL_ON_SLAVE
1076
1077
1078 #if MACH_ASSERT
1079
1080 #define ARG0 B_ARG0
1081 #define ARG1 B_ARG1
1082 #define ARG2 B_ARG2
1083 #define PUSH_FRAME FRAME
1084 #define POP_FRAME EMARF
1085
1086 #else /* MACH_ASSERT */
1087
1088 #define ARG0 S_ARG0
1089 #define ARG1 S_ARG1
1090 #define ARG2 S_ARG2
1091 #define PUSH_FRAME
1092 #define POP_FRAME
1093
1094 #endif /* MACH_ASSERT */
1095
1096
1097 #if MACH_KDB || MACH_ASSERT
1098
1099 /*
1100 * Following routines are also defined as macros in i386/pio.h
1101 * Compile then when MACH_KDB is configured so that they
1102 * can be invoked from the debugger.
1103 */
1104
1105 /*
1106 * void outb(unsigned char *io_port,
1107 * unsigned char byte)
1108 *
1109 * Output a byte to an IO port.
1110 */
1111 ENTRY(outb)
1112 PUSH_FRAME
1113 ILL_ON_SLAVE
1114 movl ARG0,%edx /* IO port address */
1115 movl ARG1,%eax /* data to output */
1116 outb %al,%dx /* send it out */
1117 POP_FRAME
1118 ret
1119
1120 /*
1121 * unsigned char inb(unsigned char *io_port)
1122 *
1123 * Input a byte from an IO port.
1124 */
1125 ENTRY(inb)
1126 PUSH_FRAME
1127 ILL_ON_SLAVE
1128 movl ARG0,%edx /* IO port address */
1129 xor %eax,%eax /* clear high bits of register */
1130 inb %dx,%al /* get the byte */
1131 POP_FRAME
1132 ret
1133
1134 /*
1135 * void outw(unsigned short *io_port,
1136 * unsigned short word)
1137 *
1138 * Output a word to an IO port.
1139 */
1140 ENTRY(outw)
1141 PUSH_FRAME
1142 ILL_ON_SLAVE
1143 movl ARG0,%edx /* IO port address */
1144 movl ARG1,%eax /* data to output */
1145 outw %ax,%dx /* send it out */
1146 POP_FRAME
1147 ret
1148
1149 /*
1150 * unsigned short inw(unsigned short *io_port)
1151 *
1152 * Input a word from an IO port.
1153 */
1154 ENTRY(inw)
1155 PUSH_FRAME
1156 ILL_ON_SLAVE
1157 movl ARG0,%edx /* IO port address */
1158 xor %eax,%eax /* clear high bits of register */
1159 inw %dx,%ax /* get the word */
1160 POP_FRAME
1161 ret
1162
1163 /*
1164 * void outl(unsigned int *io_port,
1165 * unsigned int byte)
1166 *
1167 * Output an int to an IO port.
1168 */
1169 ENTRY(outl)
1170 PUSH_FRAME
1171 ILL_ON_SLAVE
1172 movl ARG0,%edx /* IO port address*/
1173 movl ARG1,%eax /* data to output */
1174 outl %eax,%dx /* send it out */
1175 POP_FRAME
1176 ret
1177
1178 /*
1179 * unsigned int inl(unsigned int *io_port)
1180 *
1181 * Input an int from an IO port.
1182 */
1183 ENTRY(inl)
1184 PUSH_FRAME
1185 ILL_ON_SLAVE
1186 movl ARG0,%edx /* IO port address */
1187 inl %dx,%eax /* get the int */
1188 POP_FRAME
1189 ret
1190
1191 #endif /* MACH_KDB || MACH_ASSERT*/
1192
1193 /*
1194 * void loutb(unsigned byte *io_port,
1195 * unsigned byte *data,
1196 * unsigned int count)
1197 *
1198 * Output an array of bytes to an IO port.
1199 */
1200 ENTRY(loutb)
1201 ENTRY(outsb)
1202 PUSH_FRAME
1203 ILL_ON_SLAVE
1204 movl %esi,%eax /* save register */
1205 movl ARG0,%edx /* get io port number */
1206 movl ARG1,%esi /* get data address */
1207 movl ARG2,%ecx /* get count */
1208 cld /* count up */
1209 rep
1210 outsb /* output */
1211 movl %eax,%esi /* restore register */
1212 POP_FRAME
1213 ret
1214
1215
1216 /*
1217 * void loutw(unsigned short *io_port,
1218 * unsigned short *data,
1219 * unsigned int count)
1220 *
1221 * Output an array of shorts to an IO port.
1222 */
1223 ENTRY(loutw)
1224 ENTRY(outsw)
1225 PUSH_FRAME
1226 ILL_ON_SLAVE
1227 movl %esi,%eax /* save register */
1228 movl ARG0,%edx /* get io port number */
1229 movl ARG1,%esi /* get data address */
1230 movl ARG2,%ecx /* get count */
1231 cld /* count up */
1232 rep
1233 outsw /* output */
1234 movl %eax,%esi /* restore register */
1235 POP_FRAME
1236 ret
1237
1238 /*
1239 * void loutw(unsigned short io_port,
1240 * unsigned int *data,
1241 * unsigned int count)
1242 *
1243 * Output an array of longs to an IO port.
1244 */
1245 ENTRY(loutl)
1246 ENTRY(outsl)
1247 PUSH_FRAME
1248 ILL_ON_SLAVE
1249 movl %esi,%eax /* save register */
1250 movl ARG0,%edx /* get io port number */
1251 movl ARG1,%esi /* get data address */
1252 movl ARG2,%ecx /* get count */
1253 cld /* count up */
1254 rep
1255 outsl /* output */
1256 movl %eax,%esi /* restore register */
1257 POP_FRAME
1258 ret
1259
1260
1261 /*
1262 * void linb(unsigned char *io_port,
1263 * unsigned char *data,
1264 * unsigned int count)
1265 *
1266 * Input an array of bytes from an IO port.
1267 */
1268 ENTRY(linb)
1269 ENTRY(insb)
1270 PUSH_FRAME
1271 ILL_ON_SLAVE
1272 movl %edi,%eax /* save register */
1273 movl ARG0,%edx /* get io port number */
1274 movl ARG1,%edi /* get data address */
1275 movl ARG2,%ecx /* get count */
1276 cld /* count up */
1277 rep
1278 insb /* input */
1279 movl %eax,%edi /* restore register */
1280 POP_FRAME
1281 ret
1282
1283
1284 /*
1285 * void linw(unsigned short *io_port,
1286 * unsigned short *data,
1287 * unsigned int count)
1288 *
1289 * Input an array of shorts from an IO port.
1290 */
1291 ENTRY(linw)
1292 ENTRY(insw)
1293 PUSH_FRAME
1294 ILL_ON_SLAVE
1295 movl %edi,%eax /* save register */
1296 movl ARG0,%edx /* get io port number */
1297 movl ARG1,%edi /* get data address */
1298 movl ARG2,%ecx /* get count */
1299 cld /* count up */
1300 rep
1301 insw /* input */
1302 movl %eax,%edi /* restore register */
1303 POP_FRAME
1304 ret
1305
1306
1307 /*
1308 * void linl(unsigned short io_port,
1309 * unsigned int *data,
1310 * unsigned int count)
1311 *
1312 * Input an array of longs from an IO port.
1313 */
1314 ENTRY(linl)
1315 ENTRY(insl)
1316 PUSH_FRAME
1317 ILL_ON_SLAVE
1318 movl %edi,%eax /* save register */
1319 movl ARG0,%edx /* get io port number */
1320 movl ARG1,%edi /* get data address */
1321 movl ARG2,%ecx /* get count */
1322 cld /* count up */
1323 rep
1324 insl /* input */
1325 movl %eax,%edi /* restore register */
1326 POP_FRAME
1327 ret
1328
1329 /*
1330 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1331 */
1332 ENTRY(rdmsr_carefully)
1333 movl S_ARG0, %ecx
1334 RECOVERY_SECTION
1335 RECOVER(rdmsr_fail)
1336 rdmsr
1337 movl S_ARG1, %ecx
1338 movl %eax, (%ecx)
1339 movl S_ARG2, %ecx
1340 movl %edx, (%ecx)
1341 movl $0, %eax
1342 ret
1343
1344 rdmsr_fail:
1345 movl $1, %eax
1346 ret
1347
1348 /*
1349 * Done with recovery table.
1350 */
1351 RECOVERY_SECTION
1352 RECOVER_TABLE_END
1353
1354
1355
1356 ENTRY(dr6)
1357 movl %db6, %eax
1358 ret
1359
1360 /* dr<i>(address, type, len, persistence)
1361 */
1362 ENTRY(dr0)
1363 movl S_ARG0, %eax
1364 movl %eax,EXT(dr_addr)
1365 movl %eax, %db0
1366 movl $0, %ecx
1367 jmp 0f
1368 ENTRY(dr1)
1369 movl S_ARG0, %eax
1370 movl %eax,EXT(dr_addr)+1*4
1371 movl %eax, %db1
1372 movl $2, %ecx
1373 jmp 0f
1374 ENTRY(dr2)
1375 movl S_ARG0, %eax
1376 movl %eax,EXT(dr_addr)+2*4
1377 movl %eax, %db2
1378 movl $4, %ecx
1379 jmp 0f
1380
1381 ENTRY(dr3)
1382 movl S_ARG0, %eax
1383 movl %eax,EXT(dr_addr)+3*4
1384 movl %eax, %db3
1385 movl $6, %ecx
1386
1387 0:
1388 pushl %ebp
1389 movl %esp, %ebp
1390
1391 movl %db7, %edx
1392 movl %edx,EXT(dr_addr)+4*4
1393 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
1394 movl %edx,EXT(dr_addr)+5*4
1395 movzbl B_ARG3, %eax
1396 andb $3, %al
1397 shll %cl, %eax
1398 orl %eax, %edx
1399
1400 movzbl B_ARG1, %eax
1401 andb $3, %al
1402 addb $0x10, %cl
1403 shll %cl, %eax
1404 orl %eax, %edx
1405
1406 movzbl B_ARG2, %eax
1407 andb $3, %al
1408 addb $0x2, %cl
1409 shll %cl, %eax
1410 orl %eax, %edx
1411
1412 movl %edx, %db7
1413 movl %edx,EXT(dr_addr)+7*4
1414 movl %edx, %eax
1415 leave
1416 ret
1417
1418 .data
1419 dr_msk:
1420 .long ~0x000f0003
1421 .long ~0x00f0000c
1422 .long ~0x0f000030
1423 .long ~0xf00000c0
1424 ENTRY(dr_addr)
1425 .long 0,0,0,0
1426 .long 0,0,0,0
1427
1428 .text
1429
1430 ENTRY(get_cr0)
1431 movl %cr0, %eax
1432 ret
1433
1434 ENTRY(set_cr0)
1435 movl 4(%esp), %eax
1436 movl %eax, %cr0
1437 ret
1438
1439 #ifndef SYMMETRY
1440
1441 /*
1442 * ffs(mask)
1443 */
1444 ENTRY(ffs)
1445 bsfl S_ARG0, %eax
1446 jz 0f
1447 incl %eax
1448 ret
1449 0: xorl %eax, %eax
1450 ret
1451
1452 /*
1453 * cpu_shutdown()
1454 * Force reboot
1455 */
1456
1457 null_idtr:
1458 .word 0
1459 .long 0
1460
1461 Entry(cpu_shutdown)
1462 lidt null_idtr /* disable the interrupt handler */
1463 xor %ecx,%ecx /* generate a divide by zero */
1464 div %ecx,%eax /* reboot now */
1465 ret /* this will "never" be executed */
1466
1467 #endif /* SYMMETRY */
1468
1469
1470 /*
1471 * setbit(int bitno, int *s) - set bit in bit string
1472 */
1473 ENTRY(setbit)
1474 movl S_ARG0, %ecx /* bit number */
1475 movl S_ARG1, %eax /* address */
1476 btsl %ecx, (%eax) /* set bit */
1477 ret
1478
1479 /*
1480 * clrbit(int bitno, int *s) - clear bit in bit string
1481 */
1482 ENTRY(clrbit)
1483 movl S_ARG0, %ecx /* bit number */
1484 movl S_ARG1, %eax /* address */
1485 btrl %ecx, (%eax) /* clear bit */
1486 ret
1487
1488 /*
1489 * ffsbit(int *s) - find first set bit in bit string
1490 */
1491 ENTRY(ffsbit)
1492 movl S_ARG0, %ecx /* address */
1493 movl $0, %edx /* base offset */
1494 0:
1495 bsfl (%ecx), %eax /* check argument bits */
1496 jnz 1f /* found bit, return */
1497 addl $4, %ecx /* increment address */
1498 addl $32, %edx /* increment offset */
1499 jmp 0b /* try again */
1500 1:
1501 addl %edx, %eax /* return offset */
1502 ret
1503
1504 /*
1505 * testbit(int nr, volatile void *array)
1506 *
1507 * Test to see if the bit is set within the bit string
1508 */
1509
1510 ENTRY(testbit)
1511 movl S_ARG0,%eax /* Get the bit to test */
1512 movl S_ARG1,%ecx /* get the array string */
1513 btl %eax,(%ecx)
1514 sbbl %eax,%eax
1515 ret
1516
1517 ENTRY(get_pc)
1518 movl 4(%ebp),%eax
1519 ret
1520
1521 ENTRY(minsecurity)
1522 pushl %ebp
1523 movl %esp,%ebp
1524 /*
1525 * jail: set the EIP to "jail" to block a kernel thread.
1526 * Useful to debug synchronization problems on MPs.
1527 */
1528 ENTRY(jail)
1529 jmp EXT(jail)
1530
1531 /*
1532 * unsigned int
1533 * div_scale(unsigned int dividend,
1534 * unsigned int divisor,
1535 * unsigned int *scale)
1536 *
1537 * This function returns (dividend << *scale) //divisor where *scale
1538 * is the largest possible value before overflow. This is used in
1539 * computation where precision must be achieved in order to avoid
1540 * floating point usage.
1541 *
1542 * Algorithm:
1543 * *scale = 0;
1544 * while (((dividend >> *scale) >= divisor))
1545 * (*scale)++;
1546 * *scale = 32 - *scale;
1547 * return ((dividend << *scale) / divisor);
1548 */
1549 ENTRY(div_scale)
1550 PUSH_FRAME
1551 xorl %ecx, %ecx /* *scale = 0 */
1552 xorl %eax, %eax
1553 movl ARG0, %edx /* get dividend */
1554 0:
1555 cmpl ARG1, %edx /* if (divisor > dividend) */
1556 jle 1f /* goto 1f */
1557 addl $1, %ecx /* (*scale)++ */
1558 shrdl $1, %edx, %eax /* dividend >> 1 */
1559 shrl $1, %edx /* dividend >> 1 */
1560 jmp 0b /* goto 0b */
1561 1:
1562 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1563 movl ARG2, %edx /* get scale */
1564 movl $32, (%edx) /* *scale = 32 */
1565 subl %ecx, (%edx) /* *scale -= %ecx */
1566 POP_FRAME
1567 ret
1568
1569 /*
1570 * unsigned int
1571 * mul_scale(unsigned int multiplicand,
1572 * unsigned int multiplier,
1573 * unsigned int *scale)
1574 *
1575 * This function returns ((multiplicand * multiplier) >> *scale) where
1576 * scale is the largest possible value before overflow. This is used in
1577 * computation where precision must be achieved in order to avoid
1578 * floating point usage.
1579 *
1580 * Algorithm:
1581 * *scale = 0;
1582 * while (overflow((multiplicand * multiplier) >> *scale))
1583 * (*scale)++;
1584 * return ((multiplicand * multiplier) >> *scale);
1585 */
1586 ENTRY(mul_scale)
1587 PUSH_FRAME
1588 xorl %ecx, %ecx /* *scale = 0 */
1589 movl ARG0, %eax /* get multiplicand */
1590 mull ARG1 /* multiplicand * multiplier */
1591 0:
1592 cmpl $0, %edx /* if (!overflow()) */
1593 je 1f /* goto 1 */
1594 addl $1, %ecx /* (*scale)++ */
1595 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1596 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1597 jmp 0b
1598 1:
1599 movl ARG2, %edx /* get scale */
1600 movl %ecx, (%edx) /* set *scale */
1601 POP_FRAME
1602 ret
1603
1604
1605
1606 /*
1607 * Double-fault exception handler task. The last gasp...
1608 */
1609 Entry(df_task_start)
1610 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1611 hlt
1612
1613
1614 /*
1615 * machine-check handler task. The last gasp...
1616 */
1617 Entry(mc_task_start)
1618 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1619 hlt
1620
1621 /*
1622 * Compatibility mode's last gasp...
1623 */
1624 Entry(lo_df64)
1625 movl %esp, %eax
1626 CCALL1(panic_double_fault64, %eax)
1627 hlt
1628
1629 Entry(lo_mc64)
1630 movl %esp, %eax
1631 CCALL1(panic_machine_check64, %eax)
1632 hlt
1633