]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
69#include <i386/proc_reg.h>
70#include <i386/trap.h>
71#include <assym.s>
72#include <mach/exception_types.h>
73
89b3af67
A
74#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
75#include <mach/i386/syscall_sw.h>
8f6c56a5 76
89b3af67 77#include <i386/mp.h>
8f6c56a5 78
91447636
A
79/*
80 * PTmap is recursive pagemap at top of virtual address space.
81 * Within PTmap, the page directory can be found (third indirection).
82*/
83 .globl _PTmap,_PTD,_PTDpde
84 .set _PTmap,(PTDPTDI << PDESHIFT)
85 .set _PTD,_PTmap + (PTDPTDI * NBPG)
86 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
87
88/*
89 * APTmap, APTD is the alternate recursive pagemap.
90 * It's used when modifying another process's page tables.
91 */
92 .globl _APTmap,_APTD,_APTDpde
93 .set _APTmap,(APTDPTDI << PDESHIFT)
94 .set _APTD,_APTmap + (APTDPTDI * NBPG)
95 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
96
1c79356b
A
97#if __MACHO__
98/* Under Mach-O, etext is a variable which contains
99 * the last text address
100 */
101#define ETEXT_ADDR (EXT(etext))
102#else
103/* Under ELF and other non-Mach-O formats, the address of
104 * etext represents the last text address
105 */
9bccf70c 106#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
107#endif
108
1c79356b
A
109#define CX(addr,reg) addr(,reg,4)
110
89b3af67
A
111/*
112 * The following macros make calls into C code.
113 * They dynamically align the stack to 16 bytes.
114 * Arguments are moved (not pushed) onto the correctly aligned stack.
115 * NOTE: EDI is destroyed in the process, and hence cannot
116 * be directly used as a parameter. Users of this macro must
117 * independently preserve EDI (a non-volatile) if the routine is
118 * intended to be called from C, for instance.
119 */
120
121#define CCALL(fn) \
122 movl %esp, %edi ;\
123 andl $0xFFFFFFF0, %esp ;\
124 call EXT(fn) ;\
125 movl %edi, %esp
126
127#define CCALL1(fn, arg1) \
128 movl %esp, %edi ;\
129 subl $4, %esp ;\
130 andl $0xFFFFFFF0, %esp ;\
131 movl arg1, 0(%esp) ;\
132 call EXT(fn) ;\
133 movl %edi, %esp
134
135#define CCALL2(fn, arg1, arg2) \
136 movl %esp, %edi ;\
137 subl $8, %esp ;\
138 andl $0xFFFFFFF0, %esp ;\
139 movl arg2, 4(%esp) ;\
140 movl arg1, 0(%esp) ;\
141 call EXT(fn) ;\
142 movl %edi, %esp
143
144#define CCALL3(fn, arg1, arg2, arg3) \
145 movl %esp, %edi ;\
146 subl $12, %esp ;\
147 andl $0xFFFFFFF0, %esp ;\
148 movl arg3, 8(%esp) ;\
149 movl arg2, 4(%esp) ;\
150 movl arg1, 0(%esp) ;\
151 call EXT(fn) ;\
152 movl %edi, %esp
153
1c79356b
A
154 .text
155locore_start:
156
157/*
158 * Fault recovery.
159 */
160
161#ifdef __MACHO__
162#define RECOVERY_SECTION .section __VECTORS, __recover
1c79356b
A
163#else
164#define RECOVERY_SECTION .text
165#define RECOVERY_SECTION .text
166#endif
167
168#define RECOVER_TABLE_START \
169 .align 2 ; \
170 .globl EXT(recover_table) ;\
171LEXT(recover_table) ;\
172 .text
173
174#define RECOVER(addr) \
175 .align 2; \
176 .long 9f ;\
177 .long addr ;\
178 .text ;\
1799:
180
181#define RECOVER_TABLE_END \
182 .align 2 ;\
183 .globl EXT(recover_table_end) ;\
184LEXT(recover_table_end) ;\
185 .text
186
187/*
89b3af67 188 * Allocate recovery and table.
1c79356b
A
189 */
190 RECOVERY_SECTION
191 RECOVER_TABLE_START
1c79356b
A
192
193/*
194 * Timing routines.
195 */
91447636
A
196Entry(timer_update)
197 movl 4(%esp),%ecx
198 movl 8(%esp),%eax
199 movl 12(%esp),%edx
200 movl %eax,TIMER_HIGHCHK(%ecx)
201 movl %edx,TIMER_LOW(%ecx)
202 movl %eax,TIMER_HIGH(%ecx)
203 ret
204
205Entry(timer_grab)
206 movl 4(%esp),%ecx
2070: movl TIMER_HIGH(%ecx),%edx
208 movl TIMER_LOW(%ecx),%eax
209 cmpl TIMER_HIGHCHK(%ecx),%edx
210 jne 0b
211 ret
212
1c79356b
A
213#if STAT_TIME
214
215#define TIME_TRAP_UENTRY
216#define TIME_TRAP_UEXIT
217#define TIME_INT_ENTRY
218#define TIME_INT_EXIT
219
91447636
A
220#else
221/*
222 * Nanosecond timing.
223 */
224
225/*
226 * Low 32-bits of nanotime returned in %eax.
89b3af67
A
227 * Computed from tsc based on the scale factor
228 * and an implicit 32 bit shift.
229 *
230 * Uses %esi, %edi, %ebx, %ecx and %edx.
231 */
232#define RNT_INFO _rtc_nanotime_info
233#define NANOTIME32 \
2340: movl RNT_INFO+RNT_TSC_BASE,%esi ;\
235 movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
236 rdtsc ;\
237 subl %esi,%eax /* tsc - tsc_base */ ;\
238 sbbl %edi,%edx ;\
239 movl RNT_INFO+RNT_SCALE,%ecx ;\
240 movl %edx,%ebx /* delta * scale */ ;\
241 mull %ecx ;\
242 movl %ebx,%eax ;\
243 movl %edx,%ebx ;\
244 mull %ecx ;\
245 addl %ebx,%eax ;\
246 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\
247 cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\
248 jne 0b ;\
249 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\
250 jne 0b
1c79356b
A
251
252/*
91447636 253 * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
1c79356b 254 */
91447636
A
255#define TIMER_UPDATE(treg,dreg) \
256 addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\
257 adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\
258 movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\
259 movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\
260 movl dreg,TIMER_HIGH(treg) /* to high bita */
1c79356b
A
261
262/*
91447636 263 * Add time delta to old timer and start new.
1c79356b 264 */
91447636 265#define TIMER_EVENT(old,new) \
91447636
A
266 NANOTIME32 /* eax low bits nanosecs */ ;\
267 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
268 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
269 movl %eax,%edx /* save timestamp in %edx */ ;\
270 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
271 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
272 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\
273 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\
274 movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\
89b3af67
A
275 movl %ecx,CURRENT_TIMER(%edx) /* set current timer */
276
1c79356b
A
277
278/*
279 * Update time on user trap entry.
89b3af67 280 * Uses %eax,%ecx,%edx,%esi.
91447636
A
281 */
282#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
283
284/*
285 * update time on user trap exit.
89b3af67 286 * Uses %eax,%ecx,%edx,%esi.
91447636
A
287 */
288#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
289
290/*
291 * update time on interrupt entry.
89b3af67 292 * Uses %eax,%ecx,%edx,%esi.
1c79356b
A
293 */
294#define TIME_INT_ENTRY \
91447636
A
295 NANOTIME32 /* eax low bits nanosecs */ ;\
296 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
297 movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\
298 movl %eax,%edx /* save timestamp in %edx */ ;\
299 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
300 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
301 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
302 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
303 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
1c79356b
A
304
305/*
306 * update time on interrupt exit.
89b3af67 307 * Uses %eax, %ecx, %edx, %esi.
1c79356b
A
308 */
309#define TIME_INT_EXIT \
91447636
A
310 NANOTIME32 /* eax low bits nanosecs */ ;\
311 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\
312 addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\
313 movl %eax,%edx /* save timestamp in %edx */ ;\
314 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\
315 TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\
316 movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\
317 movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\
318 movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */
1c79356b 319
91447636 320#endif /* STAT_TIME */
1c79356b 321
1c79356b
A
322#undef PDEBUG
323
324#ifdef PDEBUG
325
326/*
327 * Traditional, not ANSI.
328 */
329#define CAH(label) \
330 .data ;\
331 .globl label/**/count ;\
332label/**/count: ;\
333 .long 0 ;\
334 .globl label/**/limit ;\
335label/**/limit: ;\
336 .long 0 ;\
337 .text ;\
338 addl $1,%ss:label/**/count ;\
339 cmpl $0,label/**/limit ;\
340 jz label/**/exit ;\
341 pushl %eax ;\
342label/**/loop: ;\
343 movl %ss:label/**/count,%eax ;\
344 cmpl %eax,%ss:label/**/limit ;\
345 je label/**/loop ;\
346 popl %eax ;\
347label/**/exit:
348
349#else /* PDEBUG */
350
351#define CAH(label)
352
353#endif /* PDEBUG */
89b3af67 354
1c79356b
A
355#if MACH_KDB
356/*
357 * Last-ditch debug code to handle faults that might result
358 * from entering kernel (from collocated server) on an invalid
359 * stack. On collocated entry, there's no hardware-initiated
360 * stack switch, so a valid stack must be in place when an
361 * exception occurs, or we may double-fault.
362 *
363 * In case of a double-fault, our only recourse is to switch
364 * hardware "tasks", so that we avoid using the current stack.
365 *
366 * The idea here is just to get the processor into the debugger,
367 * post-haste. No attempt is made to fix up whatever error got
368 * us here, so presumably continuing from the debugger will
369 * simply land us here again -- at best.
370 */
371#if 0
372/*
373 * Note that the per-fault entry points are not currently
374 * functional. The only way to make them work would be to
375 * set up separate TSS's for each fault type, which doesn't
376 * currently seem worthwhile. (The offset part of a task
377 * gate is always ignored.) So all faults that task switch
378 * currently resume at db_task_start.
379 */
380/*
381 * Double fault (Murphy's point) - error code (0) on stack
382 */
383Entry(db_task_dbl_fault)
384 popl %eax
385 movl $(T_DOUBLE_FAULT),%ebx
386 jmp db_task_start
387/*
388 * Segment not present - error code on stack
389 */
390Entry(db_task_seg_np)
391 popl %eax
392 movl $(T_SEGMENT_NOT_PRESENT),%ebx
393 jmp db_task_start
394/*
395 * Stack fault - error code on (current) stack
396 */
397Entry(db_task_stk_fault)
398 popl %eax
399 movl $(T_STACK_FAULT),%ebx
400 jmp db_task_start
401/*
402 * General protection fault - error code on stack
403 */
404Entry(db_task_gen_prot)
405 popl %eax
406 movl $(T_GENERAL_PROTECTION),%ebx
407 jmp db_task_start
408#endif /* 0 */
409/*
410 * The entry point where execution resumes after last-ditch debugger task
411 * switch.
412 */
413Entry(db_task_start)
414 movl %esp,%edx
89b3af67 415 subl $(ISS32_SIZE),%edx
1c79356b
A
416 movl %edx,%esp /* allocate i386_saved_state on stack */
417 movl %eax,R_ERR(%esp)
418 movl %ebx,R_TRAPNO(%esp)
419 pushl %edx
1c79356b 420 CPU_NUMBER(%edx)
89b3af67 421 movl CX(EXT(master_dbtss),%edx),%edx
1c79356b 422 movl TSS_LINK(%edx),%eax
1c79356b
A
423 pushl %eax /* pass along selector of previous TSS */
424 call EXT(db_tss_to_frame)
425 popl %eax /* get rid of TSS selector */
426 call EXT(db_trap_from_asm)
427 addl $0x4,%esp
428 /*
429 * And now...?
430 */
431 iret /* ha, ha, ha... */
432#endif /* MACH_KDB */
433
434/*
89b3af67
A
435 * Called as a function, makes the current thread
436 * return from the kernel as if from an exception.
1c79356b
A
437 */
438
89b3af67
A
439 .globl EXT(thread_exception_return)
440 .globl EXT(thread_bootstrap_return)
441LEXT(thread_exception_return)
442LEXT(thread_bootstrap_return)
443 cli
444 movl %gs:CPU_KERNEL_STACK,%ecx
445 movl (%ecx),%esp /* switch back to PCB stack */
446 jmp EXT(return_from_trap)
c0fea474 447
89b3af67
A
448Entry(call_continuation)
449 movl S_ARG0,%eax /* get continuation */
450 movl S_ARG1,%edx /* continuation param */
451 movl S_ARG2,%ecx /* wait result */
452 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
453 xorl %ebp,%ebp /* zero frame pointer */
454 subl $8,%esp /* align the stack */
455 pushl %ecx
456 pushl %edx
457 call *%eax /* call continuation */
458 addl $16,%esp
459 movl %gs:CPU_ACTIVE_THREAD,%eax
460 pushl %eax
461 call EXT(thread_terminate)
462
8f6c56a5 463
89b3af67
A
464
465/*******************************************************************************************************
466 *
467 * All 64 bit task 'exceptions' enter lo_alltraps:
468 * esp -> x86_saved_state_t
469 *
470 * The rest of the state is set up as:
471 * cr3 -> kernel directory
472 * esp -> low based stack
473 * gs -> CPU_DATA_GS
474 * cs -> KERNEL_CS
475 * ss/ds/es -> KERNEL_DS
8f6c56a5 476 *
89b3af67
A
477 * interrupts disabled
478 * direction flag cleared
479 */
480Entry(lo_alltraps)
481 movl R_CS(%esp),%eax /* assume 32-bit state */
482 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
483 jne 1f
484 movl R64_CS(%esp),%eax /* 64-bit user mode */
4851:
486 testb $3,%eax
487 jz trap_from_kernel
488 /* user mode trap */
8f6c56a5 489 TIME_TRAP_UENTRY
5d5c5d0d
A
490
491 movl %gs:CPU_KERNEL_STACK,%ebx
89b3af67
A
492 xchgl %ebx,%esp /* switch to kernel stack */
493 sti
494
495 CCALL1(user_trap, %ebx) /* call user trap routine */
496 cli /* hold off intrs - critical section */
497 popl %esp /* switch back to PCB stack */
5d5c5d0d 498
8f6c56a5
A
499/*
500 * Return from trap or system call, checking for ASTs.
89b3af67
A
501 * On lowbase PCB stack with intrs disabled
502 */
8f6c56a5 503LEXT(return_from_trap)
89b3af67
A
504 movl %gs:CPU_PENDING_AST,%eax
505 testl %eax,%eax
506 je EXT(return_to_user) /* branch if no AST */
507
508 movl %gs:CPU_KERNEL_STACK,%ebx
509 xchgl %ebx,%esp /* switch to kernel stack */
510 sti /* interrupts always enabled on return to user mode */
511
512 pushl %ebx /* save PCB stack */
513 CCALL1(i386_astintr, $0) /* take the AST */
514 cli
1c79356b
A
515 popl %esp /* switch back to PCB stack (w/exc link) */
516 jmp EXT(return_from_trap) /* and check again (rare) */
1c79356b 517
1c79356b
A
518LEXT(return_to_user)
519 TIME_TRAP_UEXIT
8f6c56a5 520
89b3af67
A
521LEXT(ret_to_user)
522 cmpl $0, %gs:CPU_IS64BIT
523 je EXT(lo_ret_to_user)
524 jmp EXT(lo64_ret_to_user)
8f6c56a5 525
8f6c56a5 526
89b3af67 527
8f6c56a5 528/*
89b3af67
A
529 * Trap from kernel mode. No need to switch stacks.
530 * Interrupts must be off here - we will set them to state at time of trap
531 * as soon as it's safe for us to do so and not recurse doing preemption
1c79356b
A
532 */
533trap_from_kernel:
89b3af67
A
534 movl %esp, %eax /* saved state addr */
535 CCALL1(kernel_trap, %eax) /* to kernel trap routine */
536 cli
8f6c56a5 537
91447636 538 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 539 testl $ AST_URGENT,%eax /* any urgent preemption? */
89b3af67
A
540 je ret_to_kernel /* no, nothing to do */
541 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
542 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
543 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
544 je ret_to_kernel
545 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
546 jne ret_to_kernel
91447636 547 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
548 movl %esp,%ecx
549 xorl %eax,%ecx
550 andl $(-KERNEL_STACK_SIZE),%ecx
551 testl %ecx,%ecx /* are we on the kernel stack? */
89b3af67 552 jne ret_to_kernel /* no, skip it */
1c79356b 553
89b3af67 554 CCALL1(i386_astintr, $1) /* take the AST */
1c79356b 555
89b3af67
A
556ret_to_kernel:
557 cmpl $0, %gs:CPU_IS64BIT
558 je EXT(lo_ret_to_kernel)
559 jmp EXT(lo64_ret_to_kernel)
1c79356b 560
8ad349bb 561
8f6c56a5 562
89b3af67
A
563/*******************************************************************************************************
564 *
565 * All interrupts on all tasks enter here with:
566 * esp-> -> x86_saved_state_t
567 *
568 * cr3 -> kernel directory
569 * esp -> low based stack
570 * gs -> CPU_DATA_GS
571 * cs -> KERNEL_CS
572 * ss/ds/es -> KERNEL_DS
573 *
574 * interrupts disabled
575 * direction flag cleared
576 */
577Entry(lo_allintrs)
91447636
A
578 /*
579 * test whether already on interrupt stack
580 */
581 movl %gs:CPU_INT_STACK_TOP,%ecx
582 cmpl %esp,%ecx
583 jb 1f
584 leal -INTSTACK_SIZE(%ecx),%edx
585 cmpl %esp,%edx
586 jb int_from_intstack
89b3af67 5871:
1c79356b
A
588 xchgl %ecx,%esp /* switch to interrupt stack */
589
89b3af67
A
590 movl %cr0,%eax /* get cr0 */
591 orl $(CR0_TS),%eax /* or in TS bit */
592 movl %eax,%cr0 /* set cr0 */
593
594 subl $8, %esp /* for 16-byte stack alignment */
1c79356b 595 pushl %ecx /* save pointer to old stack */
89b3af67 596 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
9bccf70c 597
91447636
A
598 TIME_INT_ENTRY /* do timing */
599
91447636 600 incl %gs:CPU_PREEMPTION_LEVEL
91447636 601 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 602
89b3af67
A
603 movl %gs:CPU_INT_STATE, %eax
604 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
605
606 cli /* just in case we returned with intrs enabled */
607 xorl %eax,%eax
608 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1c79356b
A
609
610 .globl EXT(return_to_iret)
611LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
612
91447636 613 decl %gs:CPU_INTERRUPT_LEVEL
91447636 614 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 615
1c79356b 616 TIME_INT_EXIT /* do timing */
1c79356b 617
89b3af67
A
618 movl %gs:CPU_ACTIVE_THREAD,%eax
619 movl ACT_PCB(%eax),%eax /* get act`s PCB */
620 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
621 cmpl $0,%eax /* Is there a context */
622 je 1f /* Branch if not */
623 movl FP_VALID(%eax),%eax /* Load fp_valid */
624 cmpl $0,%eax /* Check if valid */
625 jne 1f /* Branch if valid */
626 clts /* Clear TS */
627 jmp 2f
6281:
629 movl %cr0,%eax /* get cr0 */
630 orl $(CR0_TS),%eax /* or in TS bit */
631 movl %eax,%cr0 /* set cr0 */
6322:
1c79356b
A
633 popl %esp /* switch back to old stack */
634
89b3af67
A
635 /* Load interrupted code segment into %eax */
636 movl R_CS(%esp),%eax /* assume 32-bit state */
637 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
638 jne 3f
639 movl R64_CS(%esp),%eax /* 64-bit user mode */
6403:
641 testb $3,%eax /* user mode, */
642 jnz ast_from_interrupt_user /* go handle potential ASTs */
643 /*
644 * we only want to handle preemption requests if
645 * the interrupt fell in the kernel context
646 * and preemption isn't disabled
647 */
648 movl %gs:CPU_PENDING_AST,%eax
649 testl $ AST_URGENT,%eax /* any urgent requests? */
650 je ret_to_kernel /* no, nothing to do */
651
652 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
653 jne ret_to_kernel /* yes, skip it */
1c79356b 654
91447636 655 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
656 movl %esp,%ecx
657 xorl %eax,%ecx
658 andl $(-KERNEL_STACK_SIZE),%ecx
89b3af67
A
659 testl %ecx,%ecx /* are we on the kernel stack? */
660 jne ret_to_kernel /* no, skip it */
1c79356b 661
89b3af67
A
662 /*
663 * Take an AST from kernel space. We don't need (and don't want)
664 * to do as much as the case where the interrupt came from user
665 * space.
666 */
667 CCALL1(i386_astintr, $1)
1c79356b 668
89b3af67 669 jmp ret_to_kernel
1c79356b 670
1c79356b 671
89b3af67
A
672/*
673 * nested int - simple path, can't preempt etc on way out
674 */
1c79356b 675int_from_intstack:
91447636 676 incl %gs:CPU_PREEMPTION_LEVEL
91447636 677 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 678
89b3af67
A
679 movl %esp, %edx /* i386_saved_state */
680 CCALL1(PE_incoming_interrupt, %edx)
1c79356b 681
91447636 682 decl %gs:CPU_INTERRUPT_LEVEL
91447636 683 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 684
89b3af67 685 jmp ret_to_kernel
1c79356b
A
686
687/*
89b3af67
A
688 * Take an AST from an interrupted user
689 */
690ast_from_interrupt_user:
691 movl %gs:CPU_PENDING_AST,%eax
692 testl %eax,%eax /* pending ASTs? */
693 je EXT(ret_to_user) /* no, nothing to do */
8f6c56a5 694
1c79356b
A
695 TIME_TRAP_UENTRY
696
1c79356b
A
697 jmp EXT(return_from_trap) /* return */
698
89b3af67
A
699
700/*******************************************************************************************************
1c79356b 701 *
89b3af67
A
702 * 32bit Tasks
703 * System call entries via INTR_GATE or sysenter:
8ad349bb 704 *
89b3af67
A
705 * esp -> i386_saved_state_t
706 * cr3 -> kernel directory
707 * esp -> low based stack
708 * gs -> CPU_DATA_GS
709 * cs -> KERNEL_CS
710 * ss/ds/es -> KERNEL_DS
8f6c56a5 711 *
89b3af67
A
712 * interrupts disabled
713 * direction flag cleared
8f6c56a5
A
714 */
715
89b3af67
A
716Entry(lo_sysenter)
717 /*
718 * We can be here either for a mach syscall or a unix syscall,
719 * as indicated by the sign of the code:
720 */
721 movl R_EAX(%esp),%eax
722 testl %eax,%eax
723 js EXT(lo_mach_scall) /* < 0 => mach */
724 /* > 0 => unix */
725
726Entry(lo_unix_scall)
5d5c5d0d 727 TIME_TRAP_UENTRY
8ad349bb 728
5d5c5d0d 729 movl %gs:CPU_KERNEL_STACK,%ebx
89b3af67 730 xchgl %ebx,%esp /* switch to kernel stack */
8f6c56a5 731
89b3af67
A
732 sti
733 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
734 movl ACT_TASK(%ecx),%ecx /* point to current task */
735 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
8f6c56a5 736
89b3af67
A
737 CCALL1(unix_syscall, %ebx)
738 /*
739 * always returns through thread_exception_return
740 */
5d5c5d0d 741
8ad349bb 742
89b3af67
A
743Entry(lo_mach_scall)
744 TIME_TRAP_UENTRY
8ad349bb 745
89b3af67
A
746 movl %gs:CPU_KERNEL_STACK,%ebx
747 xchgl %ebx,%esp /* switch to kernel stack */
8ad349bb 748
89b3af67
A
749 sti
750 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
751 movl ACT_TASK(%ecx),%ecx /* point to current task */
752 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
8ad349bb 753
89b3af67
A
754 CCALL1(mach_call_munger, %ebx)
755 /*
756 * always returns through thread_exception_return
757 */
8f6c56a5 758
89b3af67
A
759
760Entry(lo_mdep_scall)
761 TIME_TRAP_UENTRY
8f6c56a5 762
89b3af67
A
763 movl %gs:CPU_KERNEL_STACK,%ebx
764 xchgl %ebx,%esp /* switch to kernel stack */
8f6c56a5 765
89b3af67 766 sti
8f6c56a5 767
89b3af67
A
768 CCALL1(machdep_syscall, %ebx)
769 /*
770 * always returns through thread_exception_return
771 */
772
8f6c56a5 773
89b3af67 774Entry(lo_diag_scall)
8f6c56a5
A
775 TIME_TRAP_UENTRY
776
89b3af67
A
777 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
778 xchgl %ebx,%esp // Switch to it, saving the previous
8f6c56a5 779
89b3af67
A
780 CCALL1(diagCall, %ebx) // Call diagnostics
781 cli // Disable interruptions just in case they were enabled
782 popl %esp // Get back the original stack
783
784 cmpl $0,%eax // What kind of return is this?
785 jne EXT(return_to_user) // Normal return, do not check asts...
786
787 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
788 // pass what would be the diag syscall
789 // error return - cause an exception
790 /* no return */
791
8f6c56a5 792
8f6c56a5 793
89b3af67
A
794/*******************************************************************************************************
795 *
796 * 64bit Tasks
797 * System call entries via syscall only:
798 *
799 * esp -> x86_saved_state64_t
800 * cr3 -> kernel directory
801 * esp -> low based stack
802 * gs -> CPU_DATA_GS
803 * cs -> KERNEL_CS
804 * ss/ds/es -> KERNEL_DS
805 *
806 * interrupts disabled
807 * direction flag cleared
1c79356b 808 */
1c79356b 809
89b3af67
A
810Entry(lo_syscall)
811 /*
812 * We can be here either for a mach, unix machdep or diag syscall,
813 * as indicated by the syscall class:
814 */
815 movl R64_RAX(%esp), %eax /* syscall number/class */
816 movl %eax, %ebx
817 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
818 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
819 je EXT(lo64_mach_scall)
820 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
821 je EXT(lo64_unix_scall)
822 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
823 je EXT(lo64_mdep_scall)
824 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
825 je EXT(lo64_diag_scall)
826
827 /* Syscall class unknown */
828 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
829 /* no return */
1c79356b 830
89b3af67
A
831Entry(lo64_unix_scall)
832 TIME_TRAP_UENTRY
1c79356b 833
89b3af67
A
834 movl %gs:CPU_KERNEL_STACK,%ebx
835 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 836
89b3af67
A
837 sti
838 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
839 movl ACT_TASK(%ecx),%ecx /* point to current task */
840 addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */
1c79356b 841
89b3af67
A
842 CCALL1(unix_syscall64, %ebx)
843 /*
844 * always returns through thread_exception_return
845 */
846
55e303ae 847
89b3af67
A
848Entry(lo64_mach_scall)
849 TIME_TRAP_UENTRY
55e303ae 850
89b3af67
A
851 movl %gs:CPU_KERNEL_STACK,%ebx
852 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 853
89b3af67
A
854 sti
855 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
856 movl ACT_TASK(%ecx),%ecx /* point to current task */
857 addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */
1c79356b 858
89b3af67
A
859 CCALL1(mach_call_munger64, %ebx)
860 /*
861 * always returns through thread_exception_return
862 */
1c79356b 863
89b3af67
A
864
865Entry(lo64_mdep_scall)
866 TIME_TRAP_UENTRY
1c79356b 867
91447636 868 movl %gs:CPU_KERNEL_STACK,%ebx
89b3af67
A
869 xchgl %ebx,%esp /* switch to kernel stack */
870
871 sti
872
873 CCALL1(machdep_syscall64, %ebx)
874 /*
875 * always returns through thread_exception_return
876 */
877
878
879Entry(lo64_diag_scall)
880 TIME_TRAP_UENTRY
1c79356b 881
89b3af67
A
882 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
883 xchgl %ebx,%esp // Switch to it, saving the previous
884
885 pushl %ebx // Push the previous stack
886 CCALL1(diagCall64, %ebx) // Call diagnostics
887 cli // Disable interruptions just in case they were enabled
888 popl %esp // Get back the original stack
889
890 cmpl $0,%eax // What kind of return is this?
891 jne EXT(return_to_user) // Normal return, do not check asts...
892
893 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
894 /* no return */
895
1c79356b 896
89b3af67
A
897
898/******************************************************************************************************
899
1c79356b
A
900/*\f*/
901/*
902 * Utility routines.
903 */
904
905
906/*
89b3af67
A
907 * Copy from user/kernel address space.
908 * arg0: window offset or kernel address
1c79356b
A
909 * arg1: kernel address
910 * arg2: byte count
911 */
89b3af67
A
912ENTRY(copyinphys_user)
913 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
914 mov %cx,%ds
915
916ENTRY(copyinphys_kern)
917 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
918 mov %cx,%es
919 jmp copyin_common
920
921ENTRY(copyin_user)
922 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
923 mov %cx,%ds
924
925ENTRY(copyin_kern)
926
927copyin_common:
1c79356b
A
928 pushl %esi
929 pushl %edi /* save registers */
930
89b3af67
A
931 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
932 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
933 movl 8+S_ARG2,%edx /* get count */
934
1c79356b
A
935 cld /* count up */
936 movl %edx,%ecx /* move by longwords first */
937 shrl $2,%ecx
938 RECOVERY_SECTION
939 RECOVER(copyin_fail)
940 rep
941 movsl /* move longwords */
942 movl %edx,%ecx /* now move remaining bytes */
943 andl $3,%ecx
944 RECOVERY_SECTION
945 RECOVER(copyin_fail)
946 rep
947 movsb
948 xorl %eax,%eax /* return 0 for success */
89b3af67
A
949copyin_ret:
950 mov %ss,%cx /* restore kernel data and extended segments */
951 mov %cx,%ds
952 mov %cx,%es
1c79356b
A
953
954 popl %edi /* restore registers */
955 popl %esi
956 ret /* and return */
957
958copyin_fail:
89b3af67
A
959 movl $(EFAULT),%eax /* return error for failure */
960 jmp copyin_ret /* pop frame and return */
961
1c79356b 962
89b3af67 963
1c79356b 964/*
89b3af67
A
965 * Copy string from user/kern address space.
966 * arg0: window offset or kernel address
1c79356b
A
967 * arg1: kernel address
968 * arg2: max byte count
969 * arg3: actual byte count (OUT)
970 */
89b3af67
A
971Entry(copyinstr_kern)
972 mov %ds,%cx
973 jmp copyinstr_common
974
975Entry(copyinstr_user)
976 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
977
978copyinstr_common:
979 mov %cx,%fs
980
1c79356b
A
981 pushl %esi
982 pushl %edi /* save registers */
983
89b3af67
A
984 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
985 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
986 movl 8+S_ARG2,%edx /* get count */
987
89b3af67
A
988 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
989 /* are 0 for the cmpl against 0 */
1c79356b
A
9902:
991 RECOVERY_SECTION
992 RECOVER(copystr_fail) /* copy bytes... */
c0fea474 993 movb %fs:(%esi),%al
1c79356b
A
994 incl %esi
995 testl %edi,%edi /* if kernel address is ... */
996 jz 3f /* not NULL */
c0fea474 997 movb %al,(%edi) /* copy the byte */
1c79356b
A
998 incl %edi
9993:
89b3af67
A
1000 testl %eax,%eax /* did we just stuff the 0-byte? */
1001 jz 4f /* yes, return 0 status already in %eax */
1002 decl %edx /* decrement #bytes left in buffer */
1003 jnz 2b /* buffer not full so copy in another byte */
1004 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1c79356b 10054:
1c79356b
A
1006 movl 8+S_ARG3,%edi /* get OUT len ptr */
1007 cmpl $0,%edi
1008 jz copystr_ret /* if null, just return */
1009 subl 8+S_ARG0,%esi
1010 movl %esi,(%edi) /* else set OUT arg to xfer len */
1011copystr_ret:
1012 popl %edi /* restore registers */
1013 popl %esi
1014 ret /* and return */
1015
1016copystr_fail:
89b3af67
A
1017 movl $(EFAULT),%eax /* return error for failure */
1018 jmp copystr_ret /* pop frame and return */
1019
1c79356b
A
1020
1021/*
89b3af67 1022 * Copy to user/kern address space.
1c79356b 1023 * arg0: kernel address
89b3af67 1024 * arg1: window offset or kernel address
1c79356b
A
1025 * arg2: byte count
1026 */
89b3af67
A
1027ENTRY(copyoutphys_user)
1028 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1029 mov %cx,%es
c0fea474 1030
89b3af67
A
1031ENTRY(copyoutphys_kern)
1032 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1033 mov %cx,%ds
1034 jmp copyout_common
5d5c5d0d 1035
89b3af67
A
1036ENTRY(copyout_user)
1037 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
8f6c56a5 1038 mov %cx,%es
5d5c5d0d 1039
89b3af67
A
1040ENTRY(copyout_kern)
1041
1042copyout_common:
1043 pushl %esi
1044 pushl %edi /* save registers */
1045
1046 movl 8+S_ARG0,%esi /* get source - kernel address */
1047 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1048 movl 8+S_ARG2,%edx /* get count */
1c79356b 1049
1c79356b 1050 cld /* count up */
89b3af67 1051 movl %edx,%ecx /* move by longwords first */
1c79356b
A
1052 shrl $2,%ecx
1053 RECOVERY_SECTION
1054 RECOVER(copyout_fail)
1c79356b
A
1055 rep
1056 movsl
89b3af67 1057 movl %edx,%ecx /* now move remaining bytes */
1c79356b
A
1058 andl $3,%ecx
1059 RECOVERY_SECTION
1060 RECOVER(copyout_fail)
1c79356b
A
1061 rep
1062 movsb /* move */
1c79356b
A
1063 xorl %eax,%eax /* return 0 for success */
1064copyout_ret:
89b3af67
A
1065 mov %ss,%cx /* restore kernel segment */
1066 mov %cx,%es
1067 mov %cx,%ds
1c79356b 1068
1c79356b
A
1069 popl %edi /* restore registers */
1070 popl %esi
1071 ret /* and return */
1072
1073copyout_fail:
89b3af67 1074 movl $(EFAULT),%eax /* return error for failure */
1c79356b
A
1075 jmp copyout_ret /* pop frame and return */
1076
1c79356b
A
1077
1078/*
1079 * io register must not be used on slaves (no AT bus)
1080 */
1081#define ILL_ON_SLAVE
1082
1083
1084#if MACH_ASSERT
1085
1086#define ARG0 B_ARG0
1087#define ARG1 B_ARG1
1088#define ARG2 B_ARG2
1089#define PUSH_FRAME FRAME
1090#define POP_FRAME EMARF
1091
1092#else /* MACH_ASSERT */
1093
1094#define ARG0 S_ARG0
1095#define ARG1 S_ARG1
1096#define ARG2 S_ARG2
1097#define PUSH_FRAME
1098#define POP_FRAME
1099
1100#endif /* MACH_ASSERT */
1101
1102
1103#if MACH_KDB || MACH_ASSERT
1104
1105/*
1106 * Following routines are also defined as macros in i386/pio.h
1107 * Compile then when MACH_KDB is configured so that they
1108 * can be invoked from the debugger.
1109 */
1110
1111/*
1112 * void outb(unsigned char *io_port,
1113 * unsigned char byte)
1114 *
1115 * Output a byte to an IO port.
1116 */
1117ENTRY(outb)
1118 PUSH_FRAME
1119 ILL_ON_SLAVE
1120 movl ARG0,%edx /* IO port address */
1121 movl ARG1,%eax /* data to output */
1122 outb %al,%dx /* send it out */
1123 POP_FRAME
1124 ret
1125
1126/*
1127 * unsigned char inb(unsigned char *io_port)
1128 *
1129 * Input a byte from an IO port.
1130 */
1131ENTRY(inb)
1132 PUSH_FRAME
1133 ILL_ON_SLAVE
1134 movl ARG0,%edx /* IO port address */
1135 xor %eax,%eax /* clear high bits of register */
1136 inb %dx,%al /* get the byte */
1137 POP_FRAME
1138 ret
1139
1140/*
1141 * void outw(unsigned short *io_port,
1142 * unsigned short word)
1143 *
1144 * Output a word to an IO port.
1145 */
1146ENTRY(outw)
1147 PUSH_FRAME
1148 ILL_ON_SLAVE
1149 movl ARG0,%edx /* IO port address */
1150 movl ARG1,%eax /* data to output */
1151 outw %ax,%dx /* send it out */
1152 POP_FRAME
1153 ret
1154
1155/*
1156 * unsigned short inw(unsigned short *io_port)
1157 *
1158 * Input a word from an IO port.
1159 */
1160ENTRY(inw)
1161 PUSH_FRAME
1162 ILL_ON_SLAVE
1163 movl ARG0,%edx /* IO port address */
1164 xor %eax,%eax /* clear high bits of register */
1165 inw %dx,%ax /* get the word */
1166 POP_FRAME
1167 ret
1168
1169/*
1170 * void outl(unsigned int *io_port,
1171 * unsigned int byte)
1172 *
1173 * Output an int to an IO port.
1174 */
1175ENTRY(outl)
1176 PUSH_FRAME
1177 ILL_ON_SLAVE
1178 movl ARG0,%edx /* IO port address*/
1179 movl ARG1,%eax /* data to output */
1180 outl %eax,%dx /* send it out */
1181 POP_FRAME
1182 ret
1183
1184/*
1185 * unsigned int inl(unsigned int *io_port)
1186 *
1187 * Input an int from an IO port.
1188 */
1189ENTRY(inl)
1190 PUSH_FRAME
1191 ILL_ON_SLAVE
1192 movl ARG0,%edx /* IO port address */
1193 inl %dx,%eax /* get the int */
1194 POP_FRAME
1195 ret
1196
1197#endif /* MACH_KDB || MACH_ASSERT*/
1198
1199/*
1200 * void loutb(unsigned byte *io_port,
1201 * unsigned byte *data,
1202 * unsigned int count)
1203 *
1204 * Output an array of bytes to an IO port.
1205 */
1206ENTRY(loutb)
1207ENTRY(outsb)
1208 PUSH_FRAME
1209 ILL_ON_SLAVE
1210 movl %esi,%eax /* save register */
1211 movl ARG0,%edx /* get io port number */
1212 movl ARG1,%esi /* get data address */
1213 movl ARG2,%ecx /* get count */
1214 cld /* count up */
1215 rep
1216 outsb /* output */
1217 movl %eax,%esi /* restore register */
1218 POP_FRAME
1219 ret
1220
1221
1222/*
1223 * void loutw(unsigned short *io_port,
1224 * unsigned short *data,
1225 * unsigned int count)
1226 *
1227 * Output an array of shorts to an IO port.
1228 */
1229ENTRY(loutw)
1230ENTRY(outsw)
1231 PUSH_FRAME
1232 ILL_ON_SLAVE
1233 movl %esi,%eax /* save register */
1234 movl ARG0,%edx /* get io port number */
1235 movl ARG1,%esi /* get data address */
1236 movl ARG2,%ecx /* get count */
1237 cld /* count up */
1238 rep
1239 outsw /* output */
1240 movl %eax,%esi /* restore register */
1241 POP_FRAME
1242 ret
1243
1244/*
1245 * void loutw(unsigned short io_port,
1246 * unsigned int *data,
1247 * unsigned int count)
1248 *
1249 * Output an array of longs to an IO port.
1250 */
1251ENTRY(loutl)
1252ENTRY(outsl)
1253 PUSH_FRAME
1254 ILL_ON_SLAVE
1255 movl %esi,%eax /* save register */
1256 movl ARG0,%edx /* get io port number */
1257 movl ARG1,%esi /* get data address */
1258 movl ARG2,%ecx /* get count */
1259 cld /* count up */
1260 rep
1261 outsl /* output */
1262 movl %eax,%esi /* restore register */
1263 POP_FRAME
1264 ret
1265
1266
1267/*
1268 * void linb(unsigned char *io_port,
1269 * unsigned char *data,
1270 * unsigned int count)
1271 *
1272 * Input an array of bytes from an IO port.
1273 */
1274ENTRY(linb)
1275ENTRY(insb)
1276 PUSH_FRAME
1277 ILL_ON_SLAVE
1278 movl %edi,%eax /* save register */
1279 movl ARG0,%edx /* get io port number */
1280 movl ARG1,%edi /* get data address */
1281 movl ARG2,%ecx /* get count */
1282 cld /* count up */
1283 rep
1284 insb /* input */
1285 movl %eax,%edi /* restore register */
1286 POP_FRAME
1287 ret
1288
1289
1290/*
1291 * void linw(unsigned short *io_port,
1292 * unsigned short *data,
1293 * unsigned int count)
1294 *
1295 * Input an array of shorts from an IO port.
1296 */
1297ENTRY(linw)
1298ENTRY(insw)
1299 PUSH_FRAME
1300 ILL_ON_SLAVE
1301 movl %edi,%eax /* save register */
1302 movl ARG0,%edx /* get io port number */
1303 movl ARG1,%edi /* get data address */
1304 movl ARG2,%ecx /* get count */
1305 cld /* count up */
1306 rep
1307 insw /* input */
1308 movl %eax,%edi /* restore register */
1309 POP_FRAME
1310 ret
1311
1312
1313/*
1314 * void linl(unsigned short io_port,
1315 * unsigned int *data,
1316 * unsigned int count)
1317 *
1318 * Input an array of longs from an IO port.
1319 */
1320ENTRY(linl)
1321ENTRY(insl)
1322 PUSH_FRAME
1323 ILL_ON_SLAVE
1324 movl %edi,%eax /* save register */
1325 movl ARG0,%edx /* get io port number */
1326 movl ARG1,%edi /* get data address */
1327 movl ARG2,%ecx /* get count */
1328 cld /* count up */
1329 rep
1330 insl /* input */
1331 movl %eax,%edi /* restore register */
1332 POP_FRAME
1333 ret
1334
91447636
A
1335/*
1336 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1337 */
1338ENTRY(rdmsr_carefully)
1339 movl S_ARG0, %ecx
1340 RECOVERY_SECTION
1341 RECOVER(rdmsr_fail)
1342 rdmsr
1343 movl S_ARG1, %ecx
1344 movl %eax, (%ecx)
1345 movl S_ARG2, %ecx
1346 movl %edx, (%ecx)
1347 movl $0, %eax
1348 ret
1349
1350rdmsr_fail:
1351 movl $1, %eax
1352 ret
1c79356b
A
1353
1354/*
89b3af67 1355 * Done with recovery table.
1c79356b
A
1356 */
1357 RECOVERY_SECTION
1358 RECOVER_TABLE_END
1c79356b
A
1359
1360
1361
1362ENTRY(dr6)
1363 movl %db6, %eax
1364 ret
1365
1366/* dr<i>(address, type, len, persistence)
1367 */
1368ENTRY(dr0)
1369 movl S_ARG0, %eax
1370 movl %eax,EXT(dr_addr)
1371 movl %eax, %db0
1372 movl $0, %ecx
1373 jmp 0f
1374ENTRY(dr1)
1375 movl S_ARG0, %eax
1376 movl %eax,EXT(dr_addr)+1*4
1377 movl %eax, %db1
1378 movl $2, %ecx
1379 jmp 0f
1380ENTRY(dr2)
1381 movl S_ARG0, %eax
1382 movl %eax,EXT(dr_addr)+2*4
1383 movl %eax, %db2
1384 movl $4, %ecx
1385 jmp 0f
1386
1387ENTRY(dr3)
1388 movl S_ARG0, %eax
1389 movl %eax,EXT(dr_addr)+3*4
1390 movl %eax, %db3
1391 movl $6, %ecx
1392
13930:
1394 pushl %ebp
1395 movl %esp, %ebp
1396
1397 movl %db7, %edx
1398 movl %edx,EXT(dr_addr)+4*4
1399 andl dr_msk(,%ecx,2),%edx /* clear out new entry */
1400 movl %edx,EXT(dr_addr)+5*4
1401 movzbl B_ARG3, %eax
1402 andb $3, %al
1403 shll %cl, %eax
1404 orl %eax, %edx
1405
1406 movzbl B_ARG1, %eax
1407 andb $3, %al
c0fea474 1408 addb $0x10, %cl
1c79356b
A
1409 shll %cl, %eax
1410 orl %eax, %edx
1411
1412 movzbl B_ARG2, %eax
1413 andb $3, %al
c0fea474 1414 addb $0x2, %cl
1c79356b
A
1415 shll %cl, %eax
1416 orl %eax, %edx
1417
1418 movl %edx, %db7
1419 movl %edx,EXT(dr_addr)+7*4
1420 movl %edx, %eax
1421 leave
1422 ret
1423
1424 .data
1c79356b
A
1425dr_msk:
1426 .long ~0x000f0003
1427 .long ~0x00f0000c
1428 .long ~0x0f000030
1429 .long ~0xf00000c0
1430ENTRY(dr_addr)
1431 .long 0,0,0,0
1432 .long 0,0,0,0
89b3af67 1433
1c79356b
A
1434 .text
1435
1c79356b
A
1436ENTRY(get_cr0)
1437 movl %cr0, %eax
1438 ret
1439
1440ENTRY(set_cr0)
1441 movl 4(%esp), %eax
1442 movl %eax, %cr0
1443 ret
1444
1445#ifndef SYMMETRY
1446
1447/*
1448 * ffs(mask)
1449 */
1450ENTRY(ffs)
1451 bsfl S_ARG0, %eax
1452 jz 0f
1453 incl %eax
1454 ret
14550: xorl %eax, %eax
1456 ret
1457
1458/*
1459 * cpu_shutdown()
1460 * Force reboot
1461 */
1462
1463null_idtr:
1464 .word 0
1465 .long 0
1466
1467Entry(cpu_shutdown)
1468 lidt null_idtr /* disable the interrupt handler */
1469 xor %ecx,%ecx /* generate a divide by zero */
1470 div %ecx,%eax /* reboot now */
1471 ret /* this will "never" be executed */
1472
1473#endif /* SYMMETRY */
1474
1475
1476/*
1477 * setbit(int bitno, int *s) - set bit in bit string
1478 */
1479ENTRY(setbit)
1480 movl S_ARG0, %ecx /* bit number */
1481 movl S_ARG1, %eax /* address */
1482 btsl %ecx, (%eax) /* set bit */
1483 ret
1484
1485/*
1486 * clrbit(int bitno, int *s) - clear bit in bit string
1487 */
1488ENTRY(clrbit)
1489 movl S_ARG0, %ecx /* bit number */
1490 movl S_ARG1, %eax /* address */
1491 btrl %ecx, (%eax) /* clear bit */
1492 ret
1493
1494/*
1495 * ffsbit(int *s) - find first set bit in bit string
1496 */
1497ENTRY(ffsbit)
1498 movl S_ARG0, %ecx /* address */
1499 movl $0, %edx /* base offset */
15000:
1501 bsfl (%ecx), %eax /* check argument bits */
1502 jnz 1f /* found bit, return */
1503 addl $4, %ecx /* increment address */
1504 addl $32, %edx /* increment offset */
1505 jmp 0b /* try again */
15061:
1507 addl %edx, %eax /* return offset */
1508 ret
1509
1510/*
1511 * testbit(int nr, volatile void *array)
1512 *
1513 * Test to see if the bit is set within the bit string
1514 */
1515
1516ENTRY(testbit)
1517 movl S_ARG0,%eax /* Get the bit to test */
1518 movl S_ARG1,%ecx /* get the array string */
1519 btl %eax,(%ecx)
1520 sbbl %eax,%eax
1521 ret
1522
1523ENTRY(get_pc)
1524 movl 4(%ebp),%eax
1525 ret
1526
1c79356b
A
1527ENTRY(minsecurity)
1528 pushl %ebp
1529 movl %esp,%ebp
1530/*
1531 * jail: set the EIP to "jail" to block a kernel thread.
1532 * Useful to debug synchronization problems on MPs.
1533 */
1534ENTRY(jail)
1535 jmp EXT(jail)
1536
1c79356b
A
1537/*
1538 * unsigned int
1539 * div_scale(unsigned int dividend,
1540 * unsigned int divisor,
1541 * unsigned int *scale)
1542 *
1543 * This function returns (dividend << *scale) //divisor where *scale
1544 * is the largest possible value before overflow. This is used in
1545 * computation where precision must be achieved in order to avoid
1546 * floating point usage.
1547 *
1548 * Algorithm:
1549 * *scale = 0;
1550 * while (((dividend >> *scale) >= divisor))
1551 * (*scale)++;
1552 * *scale = 32 - *scale;
1553 * return ((dividend << *scale) / divisor);
1554 */
1555ENTRY(div_scale)
1556 PUSH_FRAME
1557 xorl %ecx, %ecx /* *scale = 0 */
1558 xorl %eax, %eax
1559 movl ARG0, %edx /* get dividend */
15600:
1561 cmpl ARG1, %edx /* if (divisor > dividend) */
1562 jle 1f /* goto 1f */
1563 addl $1, %ecx /* (*scale)++ */
1564 shrdl $1, %edx, %eax /* dividend >> 1 */
1565 shrl $1, %edx /* dividend >> 1 */
1566 jmp 0b /* goto 0b */
15671:
1568 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1569 movl ARG2, %edx /* get scale */
1570 movl $32, (%edx) /* *scale = 32 */
1571 subl %ecx, (%edx) /* *scale -= %ecx */
1572 POP_FRAME
1573 ret
1574
1575/*
1576 * unsigned int
1577 * mul_scale(unsigned int multiplicand,
1578 * unsigned int multiplier,
1579 * unsigned int *scale)
1580 *
1581 * This function returns ((multiplicand * multiplier) >> *scale) where
1582 * scale is the largest possible value before overflow. This is used in
1583 * computation where precision must be achieved in order to avoid
1584 * floating point usage.
1585 *
1586 * Algorithm:
1587 * *scale = 0;
1588 * while (overflow((multiplicand * multiplier) >> *scale))
1589 * (*scale)++;
1590 * return ((multiplicand * multiplier) >> *scale);
1591 */
1592ENTRY(mul_scale)
1593 PUSH_FRAME
1594 xorl %ecx, %ecx /* *scale = 0 */
1595 movl ARG0, %eax /* get multiplicand */
1596 mull ARG1 /* multiplicand * multiplier */
15970:
1598 cmpl $0, %edx /* if (!overflow()) */
1599 je 1f /* goto 1 */
1600 addl $1, %ecx /* (*scale)++ */
1601 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1602 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1603 jmp 0b
16041:
1605 movl ARG2, %edx /* get scale */
1606 movl %ecx, (%edx) /* set *scale */
1607 POP_FRAME
1608 ret
1609
8f6c56a5 1610
89b3af67 1611
8f6c56a5 1612/*
89b3af67 1613 * Double-fault exception handler task. The last gasp...
1c79356b 1614 */
89b3af67
A
1615Entry(df_task_start)
1616 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1617 hlt
1c79356b 1618
1c79356b
A
1619
1620/*
89b3af67 1621 * machine-check handler task. The last gasp...
1c79356b 1622 */
89b3af67
A
1623Entry(mc_task_start)
1624 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1625 hlt
1c79356b
A
1626
1627/*
89b3af67 1628 * Compatibility mode's last gasp...
1c79356b 1629 */
89b3af67
A
1630Entry(lo_df64)
1631 movl %esp, %eax
1632 CCALL1(panic_double_fault64, %eax)
1633 hlt
1c79356b 1634
89b3af67
A
1635Entry(lo_mc64)
1636 movl %esp, %eax
1637 CCALL1(panic_machine_check64, %eax)
1638 hlt
1c79356b 1639