]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
593a1d5f
A
69#include <i386/lapic.h>
70#include <i386/rtclock.h>
1c79356b
A
71#include <i386/proc_reg.h>
72#include <i386/trap.h>
73#include <assym.s>
74#include <mach/exception_types.h>
75
0c530ab8
A
76#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
77#include <mach/i386/syscall_sw.h>
6601e61a 78
0c530ab8 79#include <i386/mp.h>
6601e61a 80
91447636
A
81/*
82 * PTmap is recursive pagemap at top of virtual address space.
83 * Within PTmap, the page directory can be found (third indirection).
84*/
85 .globl _PTmap,_PTD,_PTDpde
86 .set _PTmap,(PTDPTDI << PDESHIFT)
87 .set _PTD,_PTmap + (PTDPTDI * NBPG)
88 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
89
90/*
91 * APTmap, APTD is the alternate recursive pagemap.
92 * It's used when modifying another process's page tables.
93 */
94 .globl _APTmap,_APTD,_APTDpde
95 .set _APTmap,(APTDPTDI << PDESHIFT)
96 .set _APTD,_APTmap + (APTDPTDI * NBPG)
97 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
98
1c79356b
A
99#if __MACHO__
100/* Under Mach-O, etext is a variable which contains
101 * the last text address
102 */
103#define ETEXT_ADDR (EXT(etext))
104#else
105/* Under ELF and other non-Mach-O formats, the address of
106 * etext represents the last text address
107 */
9bccf70c 108#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
109#endif
110
1c79356b
A
111#define CX(addr,reg) addr(,reg,4)
112
0c530ab8
A
113/*
114 * The following macros make calls into C code.
115 * They dynamically align the stack to 16 bytes.
116 * Arguments are moved (not pushed) onto the correctly aligned stack.
117 * NOTE: EDI is destroyed in the process, and hence cannot
118 * be directly used as a parameter. Users of this macro must
119 * independently preserve EDI (a non-volatile) if the routine is
120 * intended to be called from C, for instance.
121 */
122
123#define CCALL(fn) \
124 movl %esp, %edi ;\
125 andl $0xFFFFFFF0, %esp ;\
126 call EXT(fn) ;\
127 movl %edi, %esp
128
129#define CCALL1(fn, arg1) \
130 movl %esp, %edi ;\
131 subl $4, %esp ;\
132 andl $0xFFFFFFF0, %esp ;\
133 movl arg1, 0(%esp) ;\
134 call EXT(fn) ;\
135 movl %edi, %esp
136
137#define CCALL2(fn, arg1, arg2) \
138 movl %esp, %edi ;\
139 subl $8, %esp ;\
140 andl $0xFFFFFFF0, %esp ;\
141 movl arg2, 4(%esp) ;\
142 movl arg1, 0(%esp) ;\
143 call EXT(fn) ;\
144 movl %edi, %esp
145
935ed37a
A
146/*
147 * CCALL5 is used for callee functions with 3 arguments but
148 * where arg2 (a3:a2) and arg3 (a5:a4) are 64-bit values.
149 */
150#define CCALL5(fn, a1, a2, a3, a4, a5) \
0c530ab8 151 movl %esp, %edi ;\
935ed37a 152 subl $20, %esp ;\
0c530ab8 153 andl $0xFFFFFFF0, %esp ;\
935ed37a
A
154 movl a5, 16(%esp) ;\
155 movl a4, 12(%esp) ;\
156 movl a3, 8(%esp) ;\
157 movl a2, 4(%esp) ;\
158 movl a1, 0(%esp) ;\
0c530ab8
A
159 call EXT(fn) ;\
160 movl %edi, %esp
161
1c79356b
A
162 .text
163locore_start:
164
165/*
166 * Fault recovery.
167 */
168
169#ifdef __MACHO__
170#define RECOVERY_SECTION .section __VECTORS, __recover
1c79356b
A
171#else
172#define RECOVERY_SECTION .text
173#define RECOVERY_SECTION .text
174#endif
175
176#define RECOVER_TABLE_START \
177 .align 2 ; \
178 .globl EXT(recover_table) ;\
179LEXT(recover_table) ;\
180 .text
181
182#define RECOVER(addr) \
183 .align 2; \
184 .long 9f ;\
185 .long addr ;\
186 .text ;\
1879:
188
189#define RECOVER_TABLE_END \
190 .align 2 ;\
191 .globl EXT(recover_table_end) ;\
192LEXT(recover_table_end) ;\
193 .text
194
195/*
0c530ab8 196 * Allocate recovery and table.
1c79356b
A
197 */
198 RECOVERY_SECTION
199 RECOVER_TABLE_START
1c79356b
A
200
201/*
202 * Timing routines.
203 */
91447636
A
204Entry(timer_update)
205 movl 4(%esp),%ecx
206 movl 8(%esp),%eax
207 movl 12(%esp),%edx
208 movl %eax,TIMER_HIGHCHK(%ecx)
209 movl %edx,TIMER_LOW(%ecx)
210 movl %eax,TIMER_HIGH(%ecx)
211 ret
212
213Entry(timer_grab)
214 movl 4(%esp),%ecx
2150: movl TIMER_HIGH(%ecx),%edx
216 movl TIMER_LOW(%ecx),%eax
217 cmpl TIMER_HIGHCHK(%ecx),%edx
218 jne 0b
219 ret
220
1c79356b
A
221#if STAT_TIME
222
223#define TIME_TRAP_UENTRY
224#define TIME_TRAP_UEXIT
225#define TIME_INT_ENTRY
226#define TIME_INT_EXIT
227
91447636
A
228#else
229/*
230 * Nanosecond timing.
231 */
232
233/*
2d21ac55 234 * Nanotime returned in %edx:%eax.
0c530ab8
A
235 * Computed from tsc based on the scale factor
236 * and an implicit 32 bit shift.
237 *
2d21ac55 238 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
0c530ab8 239 */
cf7d32b8 240#define NANOTIME \
593a1d5f
A
241 mov %gs:CPU_NANOTIME,%edi ; \
242 RTC_NANOTIME_READ_FAST()
cf7d32b8 243
1c79356b
A
244
245/*
2d21ac55 246 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
1c79356b 247 */
2d21ac55
A
248#define TIMER_UPDATE(treg,dreg,areg) \
249 addl TIMER_LOW(treg),areg /* add low bits */ ; \
250 adcl dreg,TIMER_HIGH(treg) /* add carry high bits */ ; \
251 movl areg,TIMER_LOW(treg) /* store updated low bit */ ; \
252 movl TIMER_HIGH(treg),dreg /* copy high bits */ ; \
253 movl dreg,TIMER_HIGHCHK(treg) /* to high check */
1c79356b
A
254
255/*
91447636 256 * Add time delta to old timer and start new.
1c79356b 257 */
2d21ac55
A
258#define TIMER_EVENT(old,new) \
259 NANOTIME /* edx:eax nanosecs */ ; \
260 movl %eax,%esi /* save timestamp */ ; \
261 movl %edx,%edi /* save timestamp */ ; \
262 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
263 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
264 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
265 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
266 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
267 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ; \
268 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
269 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
270 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
271 movl %esi,%eax /* restore timestamp */ ; \
272 movl %edi,%edx /* restore timestamp */ ; \
273 movl CURRENT_STATE(%ebx),%ecx /* current state */ ; \
274 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
275 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
276 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
277 addl $(new##_STATE-old##_STATE),%ecx /* point to new state */ ; \
278 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
279 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
280 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
281
282/*
283 * Update time on user trap entry.
2d21ac55 284 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 285 */
935ed37a 286#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
287
288/*
289 * update time on user trap exit.
2d21ac55 290 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 291 */
935ed37a 292#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
293
294/*
295 * update time on interrupt entry.
2d21ac55
A
296 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
297 * Saves processor state info on stack.
1c79356b 298 */
2d21ac55
A
299#define TIME_INT_ENTRY \
300 NANOTIME /* edx:eax nanosecs */ ; \
301 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
302 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
303 movl %eax,%esi /* save timestamp */ ; \
304 movl %edx,%edi /* save timestamp */ ; \
305 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
306 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
307 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
308 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
309 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
310 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
311 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
312 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
313 movl %esi,%eax /* restore timestamp */ ; \
314 movl %edi,%edx /* restore timestamp */ ; \
315 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
316 pushl %ecx /* save state */ ; \
317 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
318 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
319 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
320 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
321 cmpl %eax,%ecx /* compare current state */ ; \
322 je 0f /* skip if equal */ ; \
323 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
324 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
3250: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
326 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
327
328/*
329 * update time on interrupt exit.
2d21ac55
A
330 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
331 * Restores processor state info from stack.
1c79356b 332 */
2d21ac55
A
333#define TIME_INT_EXIT \
334 NANOTIME /* edx:eax nanosecs */ ; \
335 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
336 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
337 movl %eax,%esi /* save timestamp */ ; \
338 movl %edx,%edi /* save timestamp */ ; \
339 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
340 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
341 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
342 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
343 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
344 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
345 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
346 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
347 movl %esi,%eax /* restore timestamp */ ; \
348 movl %edi,%edx /* restore timestamp */ ; \
349 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
350 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
351 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
352 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
353 popl %ecx /* restore state */ ; \
354 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
355 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
356 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b 357
91447636 358#endif /* STAT_TIME */
1c79356b 359
1c79356b
A
360#undef PDEBUG
361
362#ifdef PDEBUG
363
364/*
365 * Traditional, not ANSI.
366 */
367#define CAH(label) \
368 .data ;\
369 .globl label/**/count ;\
370label/**/count: ;\
371 .long 0 ;\
372 .globl label/**/limit ;\
373label/**/limit: ;\
374 .long 0 ;\
375 .text ;\
376 addl $1,%ss:label/**/count ;\
377 cmpl $0,label/**/limit ;\
378 jz label/**/exit ;\
379 pushl %eax ;\
380label/**/loop: ;\
381 movl %ss:label/**/count,%eax ;\
382 cmpl %eax,%ss:label/**/limit ;\
383 je label/**/loop ;\
384 popl %eax ;\
385label/**/exit:
386
387#else /* PDEBUG */
388
389#define CAH(label)
390
391#endif /* PDEBUG */
0c530ab8 392
1c79356b
A
393#if MACH_KDB
394/*
395 * Last-ditch debug code to handle faults that might result
396 * from entering kernel (from collocated server) on an invalid
397 * stack. On collocated entry, there's no hardware-initiated
398 * stack switch, so a valid stack must be in place when an
399 * exception occurs, or we may double-fault.
400 *
401 * In case of a double-fault, our only recourse is to switch
402 * hardware "tasks", so that we avoid using the current stack.
403 *
404 * The idea here is just to get the processor into the debugger,
405 * post-haste. No attempt is made to fix up whatever error got
406 * us here, so presumably continuing from the debugger will
407 * simply land us here again -- at best.
408 */
409#if 0
410/*
411 * Note that the per-fault entry points are not currently
412 * functional. The only way to make them work would be to
413 * set up separate TSS's for each fault type, which doesn't
414 * currently seem worthwhile. (The offset part of a task
415 * gate is always ignored.) So all faults that task switch
416 * currently resume at db_task_start.
417 */
418/*
419 * Double fault (Murphy's point) - error code (0) on stack
420 */
421Entry(db_task_dbl_fault)
422 popl %eax
423 movl $(T_DOUBLE_FAULT),%ebx
424 jmp db_task_start
425/*
426 * Segment not present - error code on stack
427 */
428Entry(db_task_seg_np)
429 popl %eax
430 movl $(T_SEGMENT_NOT_PRESENT),%ebx
431 jmp db_task_start
432/*
433 * Stack fault - error code on (current) stack
434 */
435Entry(db_task_stk_fault)
436 popl %eax
437 movl $(T_STACK_FAULT),%ebx
438 jmp db_task_start
439/*
440 * General protection fault - error code on stack
441 */
442Entry(db_task_gen_prot)
443 popl %eax
444 movl $(T_GENERAL_PROTECTION),%ebx
445 jmp db_task_start
446#endif /* 0 */
447/*
448 * The entry point where execution resumes after last-ditch debugger task
449 * switch.
450 */
451Entry(db_task_start)
452 movl %esp,%edx
0c530ab8 453 subl $(ISS32_SIZE),%edx
2d21ac55 454 movl %edx,%esp /* allocate x86_saved_state on stack */
1c79356b
A
455 movl %eax,R_ERR(%esp)
456 movl %ebx,R_TRAPNO(%esp)
457 pushl %edx
1c79356b 458 CPU_NUMBER(%edx)
0c530ab8 459 movl CX(EXT(master_dbtss),%edx),%edx
1c79356b 460 movl TSS_LINK(%edx),%eax
1c79356b
A
461 pushl %eax /* pass along selector of previous TSS */
462 call EXT(db_tss_to_frame)
463 popl %eax /* get rid of TSS selector */
464 call EXT(db_trap_from_asm)
465 addl $0x4,%esp
466 /*
467 * And now...?
468 */
469 iret /* ha, ha, ha... */
470#endif /* MACH_KDB */
471
472/*
0c530ab8
A
473 * Called as a function, makes the current thread
474 * return from the kernel as if from an exception.
1c79356b
A
475 */
476
0c530ab8
A
477 .globl EXT(thread_exception_return)
478 .globl EXT(thread_bootstrap_return)
479LEXT(thread_exception_return)
480LEXT(thread_bootstrap_return)
481 cli
482 movl %gs:CPU_KERNEL_STACK,%ecx
483 movl (%ecx),%esp /* switch back to PCB stack */
484 jmp EXT(return_from_trap)
89b3af67 485
0c530ab8
A
486Entry(call_continuation)
487 movl S_ARG0,%eax /* get continuation */
488 movl S_ARG1,%edx /* continuation param */
489 movl S_ARG2,%ecx /* wait result */
490 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
491 xorl %ebp,%ebp /* zero frame pointer */
492 subl $8,%esp /* align the stack */
493 pushl %ecx
494 pushl %edx
495 call *%eax /* call continuation */
496 addl $16,%esp
497 movl %gs:CPU_ACTIVE_THREAD,%eax
498 pushl %eax
499 call EXT(thread_terminate)
500
2d21ac55 501
0c530ab8
A
502
503/*******************************************************************************************************
2d21ac55 504 *
0c530ab8
A
505 * All 64 bit task 'exceptions' enter lo_alltraps:
506 * esp -> x86_saved_state_t
507 *
508 * The rest of the state is set up as:
509 * cr3 -> kernel directory
510 * esp -> low based stack
511 * gs -> CPU_DATA_GS
512 * cs -> KERNEL_CS
513 * ss/ds/es -> KERNEL_DS
6601e61a 514 *
0c530ab8
A
515 * interrupts disabled
516 * direction flag cleared
517 */
518Entry(lo_alltraps)
519 movl R_CS(%esp),%eax /* assume 32-bit state */
520 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
521 jne 1f
522 movl R64_CS(%esp),%eax /* 64-bit user mode */
5231:
2d21ac55 524 testb $3,%al
0c530ab8
A
525 jz trap_from_kernel
526 /* user mode trap */
6601e61a 527 TIME_TRAP_UENTRY
4452a7af 528
2d21ac55
A
529 movl %gs:CPU_ACTIVE_THREAD,%ecx
530 movl ACT_TASK(%ecx),%ebx
531
532 /* Check for active vtimers in the current task */
533 cmpl $0,TASK_VTIMERS(%ebx)
534 jz 1f
535
536 /* Set a pending AST */
537 orl $(AST_BSD),%gs:CPU_PENDING_AST
538
539 /* Set a thread AST (atomic) */
540 lock
541 orl $(AST_BSD),ACT_AST(%ecx)
542
5431:
4452a7af 544 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
545 xchgl %ebx,%esp /* switch to kernel stack */
546 sti
547
548 CCALL1(user_trap, %ebx) /* call user trap routine */
549 cli /* hold off intrs - critical section */
550 popl %esp /* switch back to PCB stack */
4452a7af 551
6601e61a
A
552/*
553 * Return from trap or system call, checking for ASTs.
0c530ab8
A
554 * On lowbase PCB stack with intrs disabled
555 */
6601e61a 556LEXT(return_from_trap)
0c530ab8
A
557 movl %gs:CPU_PENDING_AST,%eax
558 testl %eax,%eax
559 je EXT(return_to_user) /* branch if no AST */
560
561 movl %gs:CPU_KERNEL_STACK,%ebx
562 xchgl %ebx,%esp /* switch to kernel stack */
563 sti /* interrupts always enabled on return to user mode */
564
565 pushl %ebx /* save PCB stack */
2d21ac55 566 xorl %ebp,%ebp /* Clear framepointer */
0c530ab8
A
567 CCALL1(i386_astintr, $0) /* take the AST */
568 cli
1c79356b
A
569 popl %esp /* switch back to PCB stack (w/exc link) */
570 jmp EXT(return_from_trap) /* and check again (rare) */
1c79356b 571
1c79356b
A
572LEXT(return_to_user)
573 TIME_TRAP_UEXIT
6601e61a 574
0c530ab8
A
575LEXT(ret_to_user)
576 cmpl $0, %gs:CPU_IS64BIT
577 je EXT(lo_ret_to_user)
578 jmp EXT(lo64_ret_to_user)
6601e61a 579
6601e61a 580
2d21ac55 581
6601e61a 582/*
0c530ab8
A
583 * Trap from kernel mode. No need to switch stacks.
584 * Interrupts must be off here - we will set them to state at time of trap
585 * as soon as it's safe for us to do so and not recurse doing preemption
1c79356b
A
586 */
587trap_from_kernel:
0c530ab8 588 movl %esp, %eax /* saved state addr */
2d21ac55
A
589 pushl R_EIP(%esp) /* Simulate a CALL from fault point */
590 pushl %ebp /* Extend framepointer chain */
591 movl %esp, %ebp
592 CCALL1(kernel_trap, %eax) /* Call kernel trap handler */
593 popl %ebp
594 addl $4, %esp
0c530ab8 595 cli
6601e61a 596
91447636 597 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 598 testl $ AST_URGENT,%eax /* any urgent preemption? */
0c530ab8
A
599 je ret_to_kernel /* no, nothing to do */
600 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
601 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
602 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
603 je ret_to_kernel
604 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
605 jne ret_to_kernel
91447636 606 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
607 movl %esp,%ecx
608 xorl %eax,%ecx
609 andl $(-KERNEL_STACK_SIZE),%ecx
610 testl %ecx,%ecx /* are we on the kernel stack? */
0c530ab8 611 jne ret_to_kernel /* no, skip it */
1c79356b 612
0c530ab8 613 CCALL1(i386_astintr, $1) /* take the AST */
8ad349bb 614
0c530ab8
A
615ret_to_kernel:
616 cmpl $0, %gs:CPU_IS64BIT
617 je EXT(lo_ret_to_kernel)
618 jmp EXT(lo64_ret_to_kernel)
8f6c56a5 619
21362eb3 620
6601e61a 621
0c530ab8
A
622/*******************************************************************************************************
623 *
624 * All interrupts on all tasks enter here with:
625 * esp-> -> x86_saved_state_t
626 *
627 * cr3 -> kernel directory
628 * esp -> low based stack
629 * gs -> CPU_DATA_GS
630 * cs -> KERNEL_CS
631 * ss/ds/es -> KERNEL_DS
632 *
633 * interrupts disabled
634 * direction flag cleared
635 */
636Entry(lo_allintrs)
91447636
A
637 /*
638 * test whether already on interrupt stack
639 */
640 movl %gs:CPU_INT_STACK_TOP,%ecx
641 cmpl %esp,%ecx
642 jb 1f
643 leal -INTSTACK_SIZE(%ecx),%edx
644 cmpl %esp,%edx
645 jb int_from_intstack
0c530ab8 6461:
1c79356b
A
647 xchgl %ecx,%esp /* switch to interrupt stack */
648
0c530ab8
A
649 movl %cr0,%eax /* get cr0 */
650 orl $(CR0_TS),%eax /* or in TS bit */
651 movl %eax,%cr0 /* set cr0 */
652
653 subl $8, %esp /* for 16-byte stack alignment */
1c79356b 654 pushl %ecx /* save pointer to old stack */
0c530ab8 655 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
9bccf70c 656
91447636
A
657 TIME_INT_ENTRY /* do timing */
658
2d21ac55
A
659 movl %gs:CPU_ACTIVE_THREAD,%ecx
660 movl ACT_TASK(%ecx),%ebx
661
662 /* Check for active vtimers in the current task */
663 cmpl $0,TASK_VTIMERS(%ebx)
664 jz 1f
665
666 /* Set a pending AST */
667 orl $(AST_BSD),%gs:CPU_PENDING_AST
668
669 /* Set a thread AST (atomic) */
670 lock
671 orl $(AST_BSD),ACT_AST(%ecx)
672
6731:
91447636 674 incl %gs:CPU_PREEMPTION_LEVEL
91447636 675 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 676
0c530ab8
A
677 movl %gs:CPU_INT_STATE, %eax
678 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
679
680 cli /* just in case we returned with intrs enabled */
681 xorl %eax,%eax
682 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1c79356b 683
91447636 684 decl %gs:CPU_INTERRUPT_LEVEL
91447636 685 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 686
1c79356b 687 TIME_INT_EXIT /* do timing */
1c79356b 688
0c530ab8
A
689 movl %gs:CPU_ACTIVE_THREAD,%eax
690 movl ACT_PCB(%eax),%eax /* get act`s PCB */
691 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
692 cmpl $0,%eax /* Is there a context */
693 je 1f /* Branch if not */
694 movl FP_VALID(%eax),%eax /* Load fp_valid */
695 cmpl $0,%eax /* Check if valid */
696 jne 1f /* Branch if valid */
697 clts /* Clear TS */
698 jmp 2f
6991:
700 movl %cr0,%eax /* get cr0 */
701 orl $(CR0_TS),%eax /* or in TS bit */
702 movl %eax,%cr0 /* set cr0 */
7032:
1c79356b
A
704 popl %esp /* switch back to old stack */
705
0c530ab8
A
706 /* Load interrupted code segment into %eax */
707 movl R_CS(%esp),%eax /* assume 32-bit state */
708 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
709 jne 3f
710 movl R64_CS(%esp),%eax /* 64-bit user mode */
7113:
2d21ac55 712 testb $3,%al /* user mode, */
0c530ab8
A
713 jnz ast_from_interrupt_user /* go handle potential ASTs */
714 /*
715 * we only want to handle preemption requests if
716 * the interrupt fell in the kernel context
717 * and preemption isn't disabled
718 */
719 movl %gs:CPU_PENDING_AST,%eax
720 testl $ AST_URGENT,%eax /* any urgent requests? */
721 je ret_to_kernel /* no, nothing to do */
722
723 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
724 jne ret_to_kernel /* yes, skip it */
1c79356b 725
91447636 726 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
727 movl %esp,%ecx
728 xorl %eax,%ecx
729 andl $(-KERNEL_STACK_SIZE),%ecx
0c530ab8
A
730 testl %ecx,%ecx /* are we on the kernel stack? */
731 jne ret_to_kernel /* no, skip it */
1c79356b 732
0c530ab8
A
733 /*
734 * Take an AST from kernel space. We don't need (and don't want)
735 * to do as much as the case where the interrupt came from user
736 * space.
737 */
738 CCALL1(i386_astintr, $1)
1c79356b 739
0c530ab8 740 jmp ret_to_kernel
1c79356b 741
1c79356b 742
0c530ab8
A
743/*
744 * nested int - simple path, can't preempt etc on way out
745 */
1c79356b 746int_from_intstack:
91447636 747 incl %gs:CPU_PREEMPTION_LEVEL
91447636 748 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 749
2d21ac55 750 movl %esp, %edx /* x86_saved_state */
0c530ab8 751 CCALL1(PE_incoming_interrupt, %edx)
1c79356b 752
91447636 753 decl %gs:CPU_INTERRUPT_LEVEL
91447636 754 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 755
0c530ab8 756 jmp ret_to_kernel
1c79356b
A
757
758/*
0c530ab8
A
759 * Take an AST from an interrupted user
760 */
761ast_from_interrupt_user:
762 movl %gs:CPU_PENDING_AST,%eax
763 testl %eax,%eax /* pending ASTs? */
764 je EXT(ret_to_user) /* no, nothing to do */
6601e61a 765
1c79356b
A
766 TIME_TRAP_UENTRY
767
1c79356b
A
768 jmp EXT(return_from_trap) /* return */
769
0c530ab8
A
770
771/*******************************************************************************************************
8f6c56a5 772 *
0c530ab8
A
773 * 32bit Tasks
774 * System call entries via INTR_GATE or sysenter:
21362eb3 775 *
2d21ac55 776 * esp -> x86_saved_state32_t
0c530ab8
A
777 * cr3 -> kernel directory
778 * esp -> low based stack
779 * gs -> CPU_DATA_GS
780 * cs -> KERNEL_CS
781 * ss/ds/es -> KERNEL_DS
6601e61a 782 *
0c530ab8
A
783 * interrupts disabled
784 * direction flag cleared
6601e61a
A
785 */
786
0c530ab8
A
787Entry(lo_sysenter)
788 /*
789 * We can be here either for a mach syscall or a unix syscall,
790 * as indicated by the sign of the code:
791 */
792 movl R_EAX(%esp),%eax
793 testl %eax,%eax
794 js EXT(lo_mach_scall) /* < 0 => mach */
795 /* > 0 => unix */
796
797Entry(lo_unix_scall)
2d21ac55
A
798 TIME_TRAP_UENTRY
799
800 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
801 movl ACT_TASK(%ecx),%ebx /* point to current task */
802 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
803
804 /* Check for active vtimers in the current task */
805 cmpl $0,TASK_VTIMERS(%ebx)
806 jz 1f
21362eb3 807
2d21ac55
A
808 /* Set a pending AST */
809 orl $(AST_BSD),%gs:CPU_PENDING_AST
810
811 /* Set a thread AST (atomic) */
812 lock
813 orl $(AST_BSD),ACT_AST(%ecx)
814
8151:
4452a7af 816 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8 817 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 818
0c530ab8 819 sti
6601e61a 820
0c530ab8
A
821 CCALL1(unix_syscall, %ebx)
822 /*
823 * always returns through thread_exception_return
824 */
2d21ac55 825
21362eb3 826
0c530ab8
A
827Entry(lo_mach_scall)
828 TIME_TRAP_UENTRY
21362eb3 829
2d21ac55
A
830 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
831 movl ACT_TASK(%ecx),%ebx /* point to current task */
832 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
833
834 /* Check for active vtimers in the current task */
835 cmpl $0,TASK_VTIMERS(%ebx)
836 jz 1f
837
838 /* Set a pending AST */
839 orl $(AST_BSD),%gs:CPU_PENDING_AST
840
841 /* Set a thread AST (atomic) */
842 lock
843 orl $(AST_BSD),ACT_AST(%ecx)
844
8451:
0c530ab8
A
846 movl %gs:CPU_KERNEL_STACK,%ebx
847 xchgl %ebx,%esp /* switch to kernel stack */
21362eb3 848
0c530ab8 849 sti
21362eb3 850
0c530ab8
A
851 CCALL1(mach_call_munger, %ebx)
852 /*
853 * always returns through thread_exception_return
854 */
6601e61a 855
2d21ac55 856
0c530ab8 857Entry(lo_mdep_scall)
2d21ac55
A
858 TIME_TRAP_UENTRY
859
860 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
861 movl ACT_TASK(%ecx),%ebx /* point to current task */
862
863 /* Check for active vtimers in the current task */
864 cmpl $0,TASK_VTIMERS(%ebx)
865 jz 1f
6601e61a 866
2d21ac55
A
867 /* Set a pending AST */
868 orl $(AST_BSD),%gs:CPU_PENDING_AST
869
870 /* Set a thread AST (atomic) */
871 lock
872 orl $(AST_BSD),ACT_AST(%ecx)
873
8741:
0c530ab8
A
875 movl %gs:CPU_KERNEL_STACK,%ebx
876 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 877
0c530ab8 878 sti
6601e61a 879
0c530ab8
A
880 CCALL1(machdep_syscall, %ebx)
881 /*
882 * always returns through thread_exception_return
883 */
2d21ac55 884
6601e61a 885
0c530ab8 886Entry(lo_diag_scall)
6601e61a
A
887 TIME_TRAP_UENTRY
888
2d21ac55
A
889 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
890 movl ACT_TASK(%ecx),%ebx /* point to current task */
891
892 /* Check for active vtimers in the current task */
893 cmpl $0,TASK_VTIMERS(%ebx)
894 jz 1f
895
896 /* Set a pending AST */
897 orl $(AST_BSD),%gs:CPU_PENDING_AST
898
899 /* Set a thread AST (atomic) */
900 lock
901 orl $(AST_BSD),ACT_AST(%ecx)
902
9031:
0c530ab8
A
904 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
905 xchgl %ebx,%esp // Switch to it, saving the previous
6601e61a 906
0c530ab8 907 CCALL1(diagCall, %ebx) // Call diagnostics
0c530ab8
A
908
909 cmpl $0,%eax // What kind of return is this?
2d21ac55
A
910 je 2f
911 cli // Disable interruptions just in case they were enabled
912 popl %esp // Get back the original stack
913 jmp EXT(return_to_user) // Normal return, do not check asts...
9142:
935ed37a 915 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
0c530ab8
A
916 // pass what would be the diag syscall
917 // error return - cause an exception
918 /* no return */
919
6601e61a 920
6601e61a 921
0c530ab8
A
922/*******************************************************************************************************
923 *
924 * 64bit Tasks
925 * System call entries via syscall only:
926 *
927 * esp -> x86_saved_state64_t
928 * cr3 -> kernel directory
929 * esp -> low based stack
930 * gs -> CPU_DATA_GS
931 * cs -> KERNEL_CS
932 * ss/ds/es -> KERNEL_DS
933 *
934 * interrupts disabled
935 * direction flag cleared
1c79356b 936 */
1c79356b 937
0c530ab8 938Entry(lo_syscall)
935ed37a
A
939 TIME_TRAP_UENTRY
940
0c530ab8
A
941 /*
942 * We can be here either for a mach, unix machdep or diag syscall,
943 * as indicated by the syscall class:
944 */
945 movl R64_RAX(%esp), %eax /* syscall number/class */
946 movl %eax, %ebx
947 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
948 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
949 je EXT(lo64_mach_scall)
950 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
951 je EXT(lo64_unix_scall)
952 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
953 je EXT(lo64_mdep_scall)
954 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
955 je EXT(lo64_diag_scall)
956
2d21ac55
A
957 movl %gs:CPU_KERNEL_STACK,%ebx
958 xchgl %ebx,%esp /* switch to kernel stack */
959
960 sti
961
0c530ab8 962 /* Syscall class unknown */
935ed37a 963 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
0c530ab8 964 /* no return */
1c79356b 965
2d21ac55 966
0c530ab8 967Entry(lo64_unix_scall)
2d21ac55
A
968 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
969 movl ACT_TASK(%ecx),%ebx /* point to current task */
970 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
971
972 /* Check for active vtimers in the current task */
973 cmpl $0,TASK_VTIMERS(%ebx)
974 jz 1f
975
976 /* Set a pending AST */
977 orl $(AST_BSD),%gs:CPU_PENDING_AST
1c79356b 978
2d21ac55
A
979 /* Set a thread AST (atomic) */
980 lock
981 orl $(AST_BSD),ACT_AST(%ecx)
982
9831:
0c530ab8
A
984 movl %gs:CPU_KERNEL_STACK,%ebx
985 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 986
0c530ab8 987 sti
1c79356b 988
0c530ab8
A
989 CCALL1(unix_syscall64, %ebx)
990 /*
991 * always returns through thread_exception_return
992 */
2d21ac55 993
55e303ae 994
0c530ab8 995Entry(lo64_mach_scall)
2d21ac55
A
996 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
997 movl ACT_TASK(%ecx),%ebx /* point to current task */
998 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
999
1000 /* Check for active vtimers in the current task */
1001 cmpl $0,TASK_VTIMERS(%ebx)
1002 jz 1f
1003
1004 /* Set a pending AST */
1005 orl $(AST_BSD),%gs:CPU_PENDING_AST
1006
1007 lock
1008 orl $(AST_BSD),ACT_AST(%ecx)
1009
10101:
0c530ab8
A
1011 movl %gs:CPU_KERNEL_STACK,%ebx
1012 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1013
0c530ab8 1014 sti
1c79356b 1015
0c530ab8
A
1016 CCALL1(mach_call_munger64, %ebx)
1017 /*
1018 * always returns through thread_exception_return
1019 */
1c79356b 1020
2d21ac55
A
1021
1022
0c530ab8 1023Entry(lo64_mdep_scall)
2d21ac55
A
1024 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1025 movl ACT_TASK(%ecx),%ebx /* point to current task */
1026
1027 /* Check for active vtimers in the current task */
1028 cmpl $0,TASK_VTIMERS(%ebx)
1029 jz 1f
1030
1031 /* Set a pending AST */
1032 orl $(AST_BSD),%gs:CPU_PENDING_AST
1033
1034 /* Set a thread AST (atomic) */
1035 lock
1036 orl $(AST_BSD),ACT_AST(%ecx)
1037
10381:
91447636 1039 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
1040 xchgl %ebx,%esp /* switch to kernel stack */
1041
1042 sti
1043
1044 CCALL1(machdep_syscall64, %ebx)
1045 /*
1046 * always returns through thread_exception_return
1047 */
2d21ac55 1048
0c530ab8
A
1049
1050Entry(lo64_diag_scall)
2d21ac55
A
1051 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1052 movl ACT_TASK(%ecx),%ebx /* point to current task */
1053
1054 /* Check for active vtimers in the current task */
1055 cmpl $0,TASK_VTIMERS(%ebx)
1056 jz 1f
1057
1058 /* Set a pending AST */
1059 orl $(AST_BSD),%gs:CPU_PENDING_AST
1060
1061 /* Set a thread AST (atomic) */
1062 lock
1063 orl $(AST_BSD),ACT_AST(%ecx)
0c530ab8 1064
2d21ac55
A
10651:
1066 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
1067 xchgl %ebx,%esp // Switch to it, saving the previous
1068
0c530ab8 1069 CCALL1(diagCall64, %ebx) // Call diagnostics
2d21ac55
A
1070
1071 cmpl $0,%eax // What kind of return is this?
1072 je 2f
0c530ab8
A
1073 cli // Disable interruptions just in case they were enabled
1074 popl %esp // Get back the original stack
2d21ac55
A
1075 jmp EXT(return_to_user) // Normal return, do not check asts...
10762:
935ed37a 1077 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
2d21ac55
A
1078 // pass what would be the diag syscall
1079 // error return - cause an exception
0c530ab8 1080 /* no return */
1c79356b 1081
1c79356b
A
1082/*\f*/
1083/*
1084 * Utility routines.
1085 */
1086
1087
1088/*
0c530ab8
A
1089 * Copy from user/kernel address space.
1090 * arg0: window offset or kernel address
1c79356b
A
1091 * arg1: kernel address
1092 * arg2: byte count
1093 */
2d21ac55 1094Entry(copyinphys_user)
0c530ab8
A
1095 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1096 mov %cx,%ds
1097
2d21ac55 1098Entry(copyinphys_kern)
0c530ab8
A
1099 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1100 mov %cx,%es
1101 jmp copyin_common
1102
2d21ac55 1103Entry(copyin_user)
0c530ab8
A
1104 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1105 mov %cx,%ds
1106
2d21ac55 1107Entry(copyin_kern)
0c530ab8
A
1108
1109copyin_common:
1c79356b
A
1110 pushl %esi
1111 pushl %edi /* save registers */
1112
0c530ab8
A
1113 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1114 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1115 movl 8+S_ARG2,%edx /* get count */
1116
1c79356b
A
1117 cld /* count up */
1118 movl %edx,%ecx /* move by longwords first */
1119 shrl $2,%ecx
1120 RECOVERY_SECTION
1121 RECOVER(copyin_fail)
1122 rep
1123 movsl /* move longwords */
1124 movl %edx,%ecx /* now move remaining bytes */
1125 andl $3,%ecx
1126 RECOVERY_SECTION
1127 RECOVER(copyin_fail)
1128 rep
1129 movsb
1130 xorl %eax,%eax /* return 0 for success */
0c530ab8
A
1131copyin_ret:
1132 mov %ss,%cx /* restore kernel data and extended segments */
1133 mov %cx,%ds
1134 mov %cx,%es
1c79356b
A
1135
1136 popl %edi /* restore registers */
1137 popl %esi
1138 ret /* and return */
1139
1140copyin_fail:
0c530ab8
A
1141 movl $(EFAULT),%eax /* return error for failure */
1142 jmp copyin_ret /* pop frame and return */
1143
1c79356b 1144
0c530ab8 1145
1c79356b 1146/*
0c530ab8
A
1147 * Copy string from user/kern address space.
1148 * arg0: window offset or kernel address
1c79356b
A
1149 * arg1: kernel address
1150 * arg2: max byte count
1151 * arg3: actual byte count (OUT)
1152 */
0c530ab8
A
1153Entry(copyinstr_kern)
1154 mov %ds,%cx
1155 jmp copyinstr_common
1156
1157Entry(copyinstr_user)
1158 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1159
1160copyinstr_common:
1161 mov %cx,%fs
1162
1c79356b
A
1163 pushl %esi
1164 pushl %edi /* save registers */
1165
0c530ab8
A
1166 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1167 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1168 movl 8+S_ARG2,%edx /* get count */
1169
0c530ab8
A
1170 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
1171 /* are 0 for the cmpl against 0 */
1c79356b
A
11722:
1173 RECOVERY_SECTION
1174 RECOVER(copystr_fail) /* copy bytes... */
c0fea474 1175 movb %fs:(%esi),%al
1c79356b
A
1176 incl %esi
1177 testl %edi,%edi /* if kernel address is ... */
1178 jz 3f /* not NULL */
c0fea474 1179 movb %al,(%edi) /* copy the byte */
1c79356b
A
1180 incl %edi
11813:
0c530ab8
A
1182 testl %eax,%eax /* did we just stuff the 0-byte? */
1183 jz 4f /* yes, return 0 status already in %eax */
1184 decl %edx /* decrement #bytes left in buffer */
1185 jnz 2b /* buffer not full so copy in another byte */
1186 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1c79356b 11874:
1c79356b
A
1188 movl 8+S_ARG3,%edi /* get OUT len ptr */
1189 cmpl $0,%edi
1190 jz copystr_ret /* if null, just return */
1191 subl 8+S_ARG0,%esi
1192 movl %esi,(%edi) /* else set OUT arg to xfer len */
1193copystr_ret:
1194 popl %edi /* restore registers */
1195 popl %esi
1196 ret /* and return */
1197
1198copystr_fail:
0c530ab8
A
1199 movl $(EFAULT),%eax /* return error for failure */
1200 jmp copystr_ret /* pop frame and return */
1201
1c79356b
A
1202
1203/*
0c530ab8 1204 * Copy to user/kern address space.
1c79356b 1205 * arg0: kernel address
0c530ab8 1206 * arg1: window offset or kernel address
1c79356b
A
1207 * arg2: byte count
1208 */
0c530ab8
A
1209ENTRY(copyoutphys_user)
1210 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1211 mov %cx,%es
89b3af67 1212
0c530ab8
A
1213ENTRY(copyoutphys_kern)
1214 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1215 mov %cx,%ds
1216 jmp copyout_common
4452a7af 1217
0c530ab8
A
1218ENTRY(copyout_user)
1219 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
6601e61a 1220 mov %cx,%es
4452a7af 1221
0c530ab8
A
1222ENTRY(copyout_kern)
1223
1224copyout_common:
1225 pushl %esi
1226 pushl %edi /* save registers */
1227
1228 movl 8+S_ARG0,%esi /* get source - kernel address */
1229 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1230 movl 8+S_ARG2,%edx /* get count */
1c79356b 1231
1c79356b 1232 cld /* count up */
0c530ab8 1233 movl %edx,%ecx /* move by longwords first */
1c79356b
A
1234 shrl $2,%ecx
1235 RECOVERY_SECTION
1236 RECOVER(copyout_fail)
1c79356b
A
1237 rep
1238 movsl
0c530ab8 1239 movl %edx,%ecx /* now move remaining bytes */
1c79356b
A
1240 andl $3,%ecx
1241 RECOVERY_SECTION
1242 RECOVER(copyout_fail)
1c79356b
A
1243 rep
1244 movsb /* move */
1c79356b
A
1245 xorl %eax,%eax /* return 0 for success */
1246copyout_ret:
0c530ab8
A
1247 mov %ss,%cx /* restore kernel segment */
1248 mov %cx,%es
1249 mov %cx,%ds
1c79356b 1250
1c79356b
A
1251 popl %edi /* restore registers */
1252 popl %esi
1253 ret /* and return */
1254
1255copyout_fail:
0c530ab8 1256 movl $(EFAULT),%eax /* return error for failure */
1c79356b
A
1257 jmp copyout_ret /* pop frame and return */
1258
1c79356b
A
1259/*
1260 * io register must not be used on slaves (no AT bus)
1261 */
1262#define ILL_ON_SLAVE
1263
1264
1265#if MACH_ASSERT
1266
1267#define ARG0 B_ARG0
1268#define ARG1 B_ARG1
1269#define ARG2 B_ARG2
1270#define PUSH_FRAME FRAME
1271#define POP_FRAME EMARF
1272
1273#else /* MACH_ASSERT */
1274
1275#define ARG0 S_ARG0
1276#define ARG1 S_ARG1
1277#define ARG2 S_ARG2
1278#define PUSH_FRAME
1279#define POP_FRAME
1280
1281#endif /* MACH_ASSERT */
1282
1283
1284#if MACH_KDB || MACH_ASSERT
1285
1286/*
1287 * Following routines are also defined as macros in i386/pio.h
1288 * Compile then when MACH_KDB is configured so that they
1289 * can be invoked from the debugger.
1290 */
1291
1292/*
1293 * void outb(unsigned char *io_port,
1294 * unsigned char byte)
1295 *
1296 * Output a byte to an IO port.
1297 */
1298ENTRY(outb)
1299 PUSH_FRAME
1300 ILL_ON_SLAVE
1301 movl ARG0,%edx /* IO port address */
1302 movl ARG1,%eax /* data to output */
1303 outb %al,%dx /* send it out */
1304 POP_FRAME
1305 ret
1306
1307/*
1308 * unsigned char inb(unsigned char *io_port)
1309 *
1310 * Input a byte from an IO port.
1311 */
1312ENTRY(inb)
1313 PUSH_FRAME
1314 ILL_ON_SLAVE
1315 movl ARG0,%edx /* IO port address */
1316 xor %eax,%eax /* clear high bits of register */
1317 inb %dx,%al /* get the byte */
1318 POP_FRAME
1319 ret
1320
1321/*
1322 * void outw(unsigned short *io_port,
1323 * unsigned short word)
1324 *
1325 * Output a word to an IO port.
1326 */
1327ENTRY(outw)
1328 PUSH_FRAME
1329 ILL_ON_SLAVE
1330 movl ARG0,%edx /* IO port address */
1331 movl ARG1,%eax /* data to output */
1332 outw %ax,%dx /* send it out */
1333 POP_FRAME
1334 ret
1335
1336/*
1337 * unsigned short inw(unsigned short *io_port)
1338 *
1339 * Input a word from an IO port.
1340 */
1341ENTRY(inw)
1342 PUSH_FRAME
1343 ILL_ON_SLAVE
1344 movl ARG0,%edx /* IO port address */
1345 xor %eax,%eax /* clear high bits of register */
1346 inw %dx,%ax /* get the word */
1347 POP_FRAME
1348 ret
1349
1350/*
1351 * void outl(unsigned int *io_port,
1352 * unsigned int byte)
1353 *
1354 * Output an int to an IO port.
1355 */
1356ENTRY(outl)
1357 PUSH_FRAME
1358 ILL_ON_SLAVE
1359 movl ARG0,%edx /* IO port address*/
1360 movl ARG1,%eax /* data to output */
1361 outl %eax,%dx /* send it out */
1362 POP_FRAME
1363 ret
1364
1365/*
1366 * unsigned int inl(unsigned int *io_port)
1367 *
1368 * Input an int from an IO port.
1369 */
1370ENTRY(inl)
1371 PUSH_FRAME
1372 ILL_ON_SLAVE
1373 movl ARG0,%edx /* IO port address */
1374 inl %dx,%eax /* get the int */
1375 POP_FRAME
1376 ret
1377
1378#endif /* MACH_KDB || MACH_ASSERT*/
1379
1380/*
1381 * void loutb(unsigned byte *io_port,
1382 * unsigned byte *data,
1383 * unsigned int count)
1384 *
1385 * Output an array of bytes to an IO port.
1386 */
1387ENTRY(loutb)
1388ENTRY(outsb)
1389 PUSH_FRAME
1390 ILL_ON_SLAVE
1391 movl %esi,%eax /* save register */
1392 movl ARG0,%edx /* get io port number */
1393 movl ARG1,%esi /* get data address */
1394 movl ARG2,%ecx /* get count */
1395 cld /* count up */
1396 rep
1397 outsb /* output */
1398 movl %eax,%esi /* restore register */
1399 POP_FRAME
1400 ret
1401
1402
1403/*
1404 * void loutw(unsigned short *io_port,
1405 * unsigned short *data,
1406 * unsigned int count)
1407 *
1408 * Output an array of shorts to an IO port.
1409 */
1410ENTRY(loutw)
1411ENTRY(outsw)
1412 PUSH_FRAME
1413 ILL_ON_SLAVE
1414 movl %esi,%eax /* save register */
1415 movl ARG0,%edx /* get io port number */
1416 movl ARG1,%esi /* get data address */
1417 movl ARG2,%ecx /* get count */
1418 cld /* count up */
1419 rep
1420 outsw /* output */
1421 movl %eax,%esi /* restore register */
1422 POP_FRAME
1423 ret
1424
1425/*
1426 * void loutw(unsigned short io_port,
1427 * unsigned int *data,
1428 * unsigned int count)
1429 *
1430 * Output an array of longs to an IO port.
1431 */
1432ENTRY(loutl)
1433ENTRY(outsl)
1434 PUSH_FRAME
1435 ILL_ON_SLAVE
1436 movl %esi,%eax /* save register */
1437 movl ARG0,%edx /* get io port number */
1438 movl ARG1,%esi /* get data address */
1439 movl ARG2,%ecx /* get count */
1440 cld /* count up */
1441 rep
1442 outsl /* output */
1443 movl %eax,%esi /* restore register */
1444 POP_FRAME
1445 ret
1446
1447
1448/*
1449 * void linb(unsigned char *io_port,
1450 * unsigned char *data,
1451 * unsigned int count)
1452 *
1453 * Input an array of bytes from an IO port.
1454 */
1455ENTRY(linb)
1456ENTRY(insb)
1457 PUSH_FRAME
1458 ILL_ON_SLAVE
1459 movl %edi,%eax /* save register */
1460 movl ARG0,%edx /* get io port number */
1461 movl ARG1,%edi /* get data address */
1462 movl ARG2,%ecx /* get count */
1463 cld /* count up */
1464 rep
1465 insb /* input */
1466 movl %eax,%edi /* restore register */
1467 POP_FRAME
1468 ret
1469
1470
1471/*
1472 * void linw(unsigned short *io_port,
1473 * unsigned short *data,
1474 * unsigned int count)
1475 *
1476 * Input an array of shorts from an IO port.
1477 */
1478ENTRY(linw)
1479ENTRY(insw)
1480 PUSH_FRAME
1481 ILL_ON_SLAVE
1482 movl %edi,%eax /* save register */
1483 movl ARG0,%edx /* get io port number */
1484 movl ARG1,%edi /* get data address */
1485 movl ARG2,%ecx /* get count */
1486 cld /* count up */
1487 rep
1488 insw /* input */
1489 movl %eax,%edi /* restore register */
1490 POP_FRAME
1491 ret
1492
1493
1494/*
1495 * void linl(unsigned short io_port,
1496 * unsigned int *data,
1497 * unsigned int count)
1498 *
1499 * Input an array of longs from an IO port.
1500 */
1501ENTRY(linl)
1502ENTRY(insl)
1503 PUSH_FRAME
1504 ILL_ON_SLAVE
1505 movl %edi,%eax /* save register */
1506 movl ARG0,%edx /* get io port number */
1507 movl ARG1,%edi /* get data address */
1508 movl ARG2,%ecx /* get count */
1509 cld /* count up */
1510 rep
1511 insl /* input */
1512 movl %eax,%edi /* restore register */
1513 POP_FRAME
1514 ret
1515
91447636
A
1516/*
1517 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1518 */
1519ENTRY(rdmsr_carefully)
1520 movl S_ARG0, %ecx
1521 RECOVERY_SECTION
1522 RECOVER(rdmsr_fail)
1523 rdmsr
1524 movl S_ARG1, %ecx
1525 movl %eax, (%ecx)
1526 movl S_ARG2, %ecx
1527 movl %edx, (%ecx)
1528 movl $0, %eax
1529 ret
1530
1531rdmsr_fail:
1532 movl $1, %eax
1533 ret
1c79356b
A
1534
1535/*
0c530ab8 1536 * Done with recovery table.
1c79356b
A
1537 */
1538 RECOVERY_SECTION
1539 RECOVER_TABLE_END
1c79356b 1540
1c79356b 1541 .data
1c79356b
A
1542dr_msk:
1543 .long ~0x000f0003
1544 .long ~0x00f0000c
1545 .long ~0x0f000030
1546 .long ~0xf00000c0
1547ENTRY(dr_addr)
1548 .long 0,0,0,0
1549 .long 0,0,0,0
0c530ab8 1550
1c79356b
A
1551 .text
1552
1c79356b
A
1553#ifndef SYMMETRY
1554
1555/*
1556 * ffs(mask)
1557 */
1558ENTRY(ffs)
1559 bsfl S_ARG0, %eax
1560 jz 0f
1561 incl %eax
1562 ret
15630: xorl %eax, %eax
1564 ret
1565
1566/*
1567 * cpu_shutdown()
1568 * Force reboot
1569 */
1570
1571null_idtr:
1572 .word 0
1573 .long 0
1574
1575Entry(cpu_shutdown)
1576 lidt null_idtr /* disable the interrupt handler */
1577 xor %ecx,%ecx /* generate a divide by zero */
1578 div %ecx,%eax /* reboot now */
1579 ret /* this will "never" be executed */
1580
1581#endif /* SYMMETRY */
1582
1583
1584/*
1585 * setbit(int bitno, int *s) - set bit in bit string
1586 */
1587ENTRY(setbit)
1588 movl S_ARG0, %ecx /* bit number */
1589 movl S_ARG1, %eax /* address */
1590 btsl %ecx, (%eax) /* set bit */
1591 ret
1592
1593/*
1594 * clrbit(int bitno, int *s) - clear bit in bit string
1595 */
1596ENTRY(clrbit)
1597 movl S_ARG0, %ecx /* bit number */
1598 movl S_ARG1, %eax /* address */
1599 btrl %ecx, (%eax) /* clear bit */
1600 ret
1601
1602/*
1603 * ffsbit(int *s) - find first set bit in bit string
1604 */
1605ENTRY(ffsbit)
1606 movl S_ARG0, %ecx /* address */
1607 movl $0, %edx /* base offset */
16080:
1609 bsfl (%ecx), %eax /* check argument bits */
1610 jnz 1f /* found bit, return */
1611 addl $4, %ecx /* increment address */
1612 addl $32, %edx /* increment offset */
1613 jmp 0b /* try again */
16141:
1615 addl %edx, %eax /* return offset */
1616 ret
1617
1618/*
1619 * testbit(int nr, volatile void *array)
1620 *
1621 * Test to see if the bit is set within the bit string
1622 */
1623
1624ENTRY(testbit)
1625 movl S_ARG0,%eax /* Get the bit to test */
1626 movl S_ARG1,%ecx /* get the array string */
1627 btl %eax,(%ecx)
1628 sbbl %eax,%eax
1629 ret
1630
1631ENTRY(get_pc)
1632 movl 4(%ebp),%eax
1633 ret
1634
1c79356b
A
1635ENTRY(minsecurity)
1636 pushl %ebp
1637 movl %esp,%ebp
1638/*
1639 * jail: set the EIP to "jail" to block a kernel thread.
1640 * Useful to debug synchronization problems on MPs.
1641 */
1642ENTRY(jail)
1643 jmp EXT(jail)
1644
1c79356b
A
1645/*
1646 * unsigned int
1647 * div_scale(unsigned int dividend,
1648 * unsigned int divisor,
1649 * unsigned int *scale)
1650 *
1651 * This function returns (dividend << *scale) //divisor where *scale
1652 * is the largest possible value before overflow. This is used in
1653 * computation where precision must be achieved in order to avoid
1654 * floating point usage.
1655 *
1656 * Algorithm:
1657 * *scale = 0;
1658 * while (((dividend >> *scale) >= divisor))
1659 * (*scale)++;
1660 * *scale = 32 - *scale;
1661 * return ((dividend << *scale) / divisor);
1662 */
1663ENTRY(div_scale)
1664 PUSH_FRAME
1665 xorl %ecx, %ecx /* *scale = 0 */
1666 xorl %eax, %eax
1667 movl ARG0, %edx /* get dividend */
16680:
1669 cmpl ARG1, %edx /* if (divisor > dividend) */
1670 jle 1f /* goto 1f */
1671 addl $1, %ecx /* (*scale)++ */
1672 shrdl $1, %edx, %eax /* dividend >> 1 */
1673 shrl $1, %edx /* dividend >> 1 */
1674 jmp 0b /* goto 0b */
16751:
1676 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1677 movl ARG2, %edx /* get scale */
1678 movl $32, (%edx) /* *scale = 32 */
1679 subl %ecx, (%edx) /* *scale -= %ecx */
1680 POP_FRAME
1681 ret
1682
1683/*
1684 * unsigned int
1685 * mul_scale(unsigned int multiplicand,
1686 * unsigned int multiplier,
1687 * unsigned int *scale)
1688 *
1689 * This function returns ((multiplicand * multiplier) >> *scale) where
1690 * scale is the largest possible value before overflow. This is used in
1691 * computation where precision must be achieved in order to avoid
1692 * floating point usage.
1693 *
1694 * Algorithm:
1695 * *scale = 0;
1696 * while (overflow((multiplicand * multiplier) >> *scale))
1697 * (*scale)++;
1698 * return ((multiplicand * multiplier) >> *scale);
1699 */
1700ENTRY(mul_scale)
1701 PUSH_FRAME
1702 xorl %ecx, %ecx /* *scale = 0 */
1703 movl ARG0, %eax /* get multiplicand */
1704 mull ARG1 /* multiplicand * multiplier */
17050:
1706 cmpl $0, %edx /* if (!overflow()) */
1707 je 1f /* goto 1 */
1708 addl $1, %ecx /* (*scale)++ */
1709 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1710 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1711 jmp 0b
17121:
1713 movl ARG2, %edx /* get scale */
1714 movl %ecx, (%edx) /* set *scale */
1715 POP_FRAME
1716 ret
1717
6601e61a 1718
0c530ab8 1719
6601e61a 1720/*
0c530ab8 1721 * Double-fault exception handler task. The last gasp...
1c79356b 1722 */
0c530ab8
A
1723Entry(df_task_start)
1724 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1725 hlt
1c79356b 1726
1c79356b
A
1727
1728/*
0c530ab8 1729 * machine-check handler task. The last gasp...
1c79356b 1730 */
0c530ab8
A
1731Entry(mc_task_start)
1732 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1733 hlt
1c79356b
A
1734
1735/*
0c530ab8 1736 * Compatibility mode's last gasp...
1c79356b 1737 */
0c530ab8
A
1738Entry(lo_df64)
1739 movl %esp, %eax
1740 CCALL1(panic_double_fault64, %eax)
1741 hlt
1c79356b 1742
0c530ab8
A
1743Entry(lo_mc64)
1744 movl %esp, %eax
1745 CCALL1(panic_machine_check64, %eax)
1746 hlt
1c79356b 1747