]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
593a1d5f
A
69#include <i386/lapic.h>
70#include <i386/rtclock.h>
1c79356b
A
71#include <i386/proc_reg.h>
72#include <i386/trap.h>
73#include <assym.s>
74#include <mach/exception_types.h>
b0d623f7 75#include <config_dtrace.h>
1c79356b 76
0c530ab8
A
77#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
78#include <mach/i386/syscall_sw.h>
6601e61a 79
0c530ab8 80#include <i386/mp.h>
6601e61a 81
b0d623f7
A
82
83#define CLI cli
84#define STI sti
85
91447636
A
86/*
87 * PTmap is recursive pagemap at top of virtual address space.
88 * Within PTmap, the page directory can be found (third indirection).
89*/
90 .globl _PTmap,_PTD,_PTDpde
91 .set _PTmap,(PTDPTDI << PDESHIFT)
92 .set _PTD,_PTmap + (PTDPTDI * NBPG)
93 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
94
95/*
96 * APTmap, APTD is the alternate recursive pagemap.
97 * It's used when modifying another process's page tables.
98 */
99 .globl _APTmap,_APTD,_APTDpde
100 .set _APTmap,(APTDPTDI << PDESHIFT)
101 .set _APTD,_APTmap + (APTDPTDI * NBPG)
102 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
103
1c79356b
A
104#if __MACHO__
105/* Under Mach-O, etext is a variable which contains
106 * the last text address
107 */
108#define ETEXT_ADDR (EXT(etext))
109#else
110/* Under ELF and other non-Mach-O formats, the address of
111 * etext represents the last text address
112 */
9bccf70c 113#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
114#endif
115
1c79356b
A
116#define CX(addr,reg) addr(,reg,4)
117
0c530ab8
A
118/*
119 * The following macros make calls into C code.
120 * They dynamically align the stack to 16 bytes.
121 * Arguments are moved (not pushed) onto the correctly aligned stack.
122 * NOTE: EDI is destroyed in the process, and hence cannot
123 * be directly used as a parameter. Users of this macro must
124 * independently preserve EDI (a non-volatile) if the routine is
125 * intended to be called from C, for instance.
126 */
127
128#define CCALL(fn) \
129 movl %esp, %edi ;\
130 andl $0xFFFFFFF0, %esp ;\
131 call EXT(fn) ;\
132 movl %edi, %esp
133
134#define CCALL1(fn, arg1) \
135 movl %esp, %edi ;\
136 subl $4, %esp ;\
137 andl $0xFFFFFFF0, %esp ;\
138 movl arg1, 0(%esp) ;\
139 call EXT(fn) ;\
140 movl %edi, %esp
141
142#define CCALL2(fn, arg1, arg2) \
143 movl %esp, %edi ;\
144 subl $8, %esp ;\
145 andl $0xFFFFFFF0, %esp ;\
146 movl arg2, 4(%esp) ;\
147 movl arg1, 0(%esp) ;\
148 call EXT(fn) ;\
149 movl %edi, %esp
150
935ed37a
A
151/*
152 * CCALL5 is used for callee functions with 3 arguments but
153 * where arg2 (a3:a2) and arg3 (a5:a4) are 64-bit values.
154 */
155#define CCALL5(fn, a1, a2, a3, a4, a5) \
0c530ab8 156 movl %esp, %edi ;\
935ed37a 157 subl $20, %esp ;\
0c530ab8 158 andl $0xFFFFFFF0, %esp ;\
935ed37a
A
159 movl a5, 16(%esp) ;\
160 movl a4, 12(%esp) ;\
161 movl a3, 8(%esp) ;\
162 movl a2, 4(%esp) ;\
163 movl a1, 0(%esp) ;\
0c530ab8
A
164 call EXT(fn) ;\
165 movl %edi, %esp
166
1c79356b
A
167 .text
168locore_start:
169
170/*
171 * Fault recovery.
172 */
173
174#ifdef __MACHO__
175#define RECOVERY_SECTION .section __VECTORS, __recover
1c79356b
A
176#else
177#define RECOVERY_SECTION .text
178#define RECOVERY_SECTION .text
179#endif
180
181#define RECOVER_TABLE_START \
182 .align 2 ; \
183 .globl EXT(recover_table) ;\
184LEXT(recover_table) ;\
185 .text
186
187#define RECOVER(addr) \
188 .align 2; \
189 .long 9f ;\
190 .long addr ;\
191 .text ;\
1929:
193
194#define RECOVER_TABLE_END \
195 .align 2 ;\
196 .globl EXT(recover_table_end) ;\
197LEXT(recover_table_end) ;\
b0d623f7
A
198 .long 0 /* workaround see comment below */ ;\
199 .text ;
200
201/* TODO FIXME
202 * the .long 0 is to work around a linker bug (insert radar# here)
203 * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s
204 * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each
205 * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point
206 * into the SLEEP section
207 */
1c79356b
A
208
209/*
0c530ab8 210 * Allocate recovery and table.
1c79356b
A
211 */
212 RECOVERY_SECTION
213 RECOVER_TABLE_START
1c79356b
A
214
215/*
216 * Timing routines.
217 */
91447636
A
218Entry(timer_update)
219 movl 4(%esp),%ecx
220 movl 8(%esp),%eax
221 movl 12(%esp),%edx
222 movl %eax,TIMER_HIGHCHK(%ecx)
223 movl %edx,TIMER_LOW(%ecx)
224 movl %eax,TIMER_HIGH(%ecx)
225 ret
226
227Entry(timer_grab)
228 movl 4(%esp),%ecx
2290: movl TIMER_HIGH(%ecx),%edx
230 movl TIMER_LOW(%ecx),%eax
231 cmpl TIMER_HIGHCHK(%ecx),%edx
232 jne 0b
233 ret
234
1c79356b
A
235#if STAT_TIME
236
237#define TIME_TRAP_UENTRY
238#define TIME_TRAP_UEXIT
239#define TIME_INT_ENTRY
240#define TIME_INT_EXIT
241
91447636
A
242#else
243/*
244 * Nanosecond timing.
245 */
246
247/*
2d21ac55 248 * Nanotime returned in %edx:%eax.
0c530ab8
A
249 * Computed from tsc based on the scale factor
250 * and an implicit 32 bit shift.
251 *
2d21ac55 252 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
0c530ab8 253 */
cf7d32b8 254#define NANOTIME \
593a1d5f
A
255 mov %gs:CPU_NANOTIME,%edi ; \
256 RTC_NANOTIME_READ_FAST()
cf7d32b8 257
1c79356b
A
258
259/*
2d21ac55 260 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
1c79356b 261 */
b0d623f7
A
262#define TIMER_UPDATE(treg,dreg,areg,offset) \
263 addl (TIMER_LOW+(offset))(treg),areg /* add low bits */ ;\
264 adcl dreg,(TIMER_HIGH+(offset))(treg) /* add carry high bits */ ;\
265 movl areg,(TIMER_LOW+(offset))(treg) /* store updated low bit */ ;\
266 movl (TIMER_HIGH+(offset))(treg),dreg /* copy high bits */ ;\
267 movl dreg,(TIMER_HIGHCHK+(offset))(treg) /* to high check */
1c79356b
A
268
269/*
91447636 270 * Add time delta to old timer and start new.
1c79356b 271 */
2d21ac55
A
272#define TIMER_EVENT(old,new) \
273 NANOTIME /* edx:eax nanosecs */ ; \
274 movl %eax,%esi /* save timestamp */ ; \
275 movl %edx,%edi /* save timestamp */ ; \
b0d623f7
A
276 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ; \
277 subl (old##_TIMER)+TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
278 sbbl (old##_TIMER)+TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
279 TIMER_UPDATE(%ecx,%edx,%eax,old##_TIMER) /* update timer */ ; \
280 movl %esi,(new##_TIMER)+TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
281 movl %edi,(new##_TIMER)+TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
282 leal (new##_TIMER)(%ecx), %ecx /* compute new timer pointer */ ; \
2d21ac55 283 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
2d21ac55
A
284 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
285 movl %esi,%eax /* restore timestamp */ ; \
286 movl %edi,%edx /* restore timestamp */ ; \
b0d623f7
A
287 subl (old##_STATE)+TIMER_TSTAMP(%ebx),%eax /* compute elapsed time */ ; \
288 sbbl (old##_STATE)+TIMER_TSTAMP+4(%ebx),%edx /* compute elapsed time */ ; \
289 TIMER_UPDATE(%ebx,%edx,%eax,old##_STATE) /* update timer */ ; \
290 leal (new##_STATE)(%ebx),%ecx /* compute new state pointer */ ; \
2d21ac55
A
291 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
292 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
293 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
294
295/*
296 * Update time on user trap entry.
2d21ac55 297 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 298 */
935ed37a 299#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
300
301/*
302 * update time on user trap exit.
2d21ac55 303 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 304 */
935ed37a 305#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
306
307/*
308 * update time on interrupt entry.
2d21ac55
A
309 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
310 * Saves processor state info on stack.
1c79356b 311 */
2d21ac55
A
312#define TIME_INT_ENTRY \
313 NANOTIME /* edx:eax nanosecs */ ; \
314 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
315 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
316 movl %eax,%esi /* save timestamp */ ; \
317 movl %edx,%edi /* save timestamp */ ; \
318 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
319 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
320 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
321 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
b0d623f7 322 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
2d21ac55
A
323 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
324 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
325 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
326 movl %esi,%eax /* restore timestamp */ ; \
327 movl %edi,%edx /* restore timestamp */ ; \
328 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
329 pushl %ecx /* save state */ ; \
330 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
331 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
b0d623f7 332 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
2d21ac55
A
333 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
334 cmpl %eax,%ecx /* compare current state */ ; \
335 je 0f /* skip if equal */ ; \
336 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
337 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
3380: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
339 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
340
341/*
342 * update time on interrupt exit.
2d21ac55
A
343 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
344 * Restores processor state info from stack.
1c79356b 345 */
2d21ac55
A
346#define TIME_INT_EXIT \
347 NANOTIME /* edx:eax nanosecs */ ; \
348 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
349 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
350 movl %eax,%esi /* save timestamp */ ; \
351 movl %edx,%edi /* save timestamp */ ; \
352 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
353 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
354 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
355 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
b0d623f7 356 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
2d21ac55
A
357 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
358 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
359 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
360 movl %esi,%eax /* restore timestamp */ ; \
361 movl %edi,%edx /* restore timestamp */ ; \
362 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
363 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
364 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
b0d623f7 365 TIMER_UPDATE(%ecx,%edx,%eax,0) /* update timer */ ; \
2d21ac55
A
366 popl %ecx /* restore state */ ; \
367 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
368 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
369 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b 370
91447636 371#endif /* STAT_TIME */
1c79356b 372
1c79356b
A
373#undef PDEBUG
374
375#ifdef PDEBUG
376
377/*
378 * Traditional, not ANSI.
379 */
380#define CAH(label) \
381 .data ;\
382 .globl label/**/count ;\
383label/**/count: ;\
384 .long 0 ;\
385 .globl label/**/limit ;\
386label/**/limit: ;\
387 .long 0 ;\
388 .text ;\
389 addl $1,%ss:label/**/count ;\
390 cmpl $0,label/**/limit ;\
391 jz label/**/exit ;\
392 pushl %eax ;\
393label/**/loop: ;\
394 movl %ss:label/**/count,%eax ;\
395 cmpl %eax,%ss:label/**/limit ;\
396 je label/**/loop ;\
397 popl %eax ;\
398label/**/exit:
399
400#else /* PDEBUG */
401
402#define CAH(label)
403
404#endif /* PDEBUG */
0c530ab8 405
1c79356b
A
406#if MACH_KDB
407/*
408 * Last-ditch debug code to handle faults that might result
409 * from entering kernel (from collocated server) on an invalid
410 * stack. On collocated entry, there's no hardware-initiated
411 * stack switch, so a valid stack must be in place when an
412 * exception occurs, or we may double-fault.
413 *
414 * In case of a double-fault, our only recourse is to switch
415 * hardware "tasks", so that we avoid using the current stack.
416 *
417 * The idea here is just to get the processor into the debugger,
418 * post-haste. No attempt is made to fix up whatever error got
419 * us here, so presumably continuing from the debugger will
420 * simply land us here again -- at best.
421 */
422#if 0
423/*
424 * Note that the per-fault entry points are not currently
425 * functional. The only way to make them work would be to
426 * set up separate TSS's for each fault type, which doesn't
427 * currently seem worthwhile. (The offset part of a task
428 * gate is always ignored.) So all faults that task switch
429 * currently resume at db_task_start.
430 */
431/*
432 * Double fault (Murphy's point) - error code (0) on stack
433 */
434Entry(db_task_dbl_fault)
435 popl %eax
436 movl $(T_DOUBLE_FAULT),%ebx
437 jmp db_task_start
438/*
439 * Segment not present - error code on stack
440 */
441Entry(db_task_seg_np)
442 popl %eax
443 movl $(T_SEGMENT_NOT_PRESENT),%ebx
444 jmp db_task_start
445/*
446 * Stack fault - error code on (current) stack
447 */
448Entry(db_task_stk_fault)
449 popl %eax
450 movl $(T_STACK_FAULT),%ebx
451 jmp db_task_start
452/*
453 * General protection fault - error code on stack
454 */
455Entry(db_task_gen_prot)
456 popl %eax
457 movl $(T_GENERAL_PROTECTION),%ebx
458 jmp db_task_start
459#endif /* 0 */
460/*
461 * The entry point where execution resumes after last-ditch debugger task
462 * switch.
463 */
464Entry(db_task_start)
465 movl %esp,%edx
0c530ab8 466 subl $(ISS32_SIZE),%edx
2d21ac55 467 movl %edx,%esp /* allocate x86_saved_state on stack */
b0d623f7
A
468 movl %eax,R32_ERR(%esp)
469 movl %ebx,R32_TRAPNO(%esp)
1c79356b 470 pushl %edx
1c79356b 471 CPU_NUMBER(%edx)
0c530ab8 472 movl CX(EXT(master_dbtss),%edx),%edx
1c79356b 473 movl TSS_LINK(%edx),%eax
1c79356b
A
474 pushl %eax /* pass along selector of previous TSS */
475 call EXT(db_tss_to_frame)
476 popl %eax /* get rid of TSS selector */
477 call EXT(db_trap_from_asm)
478 addl $0x4,%esp
479 /*
480 * And now...?
481 */
482 iret /* ha, ha, ha... */
483#endif /* MACH_KDB */
484
485/*
0c530ab8
A
486 * Called as a function, makes the current thread
487 * return from the kernel as if from an exception.
b0d623f7
A
488 * We will consult with DTrace if this is a
489 * newly created thread and we need to fire a probe.
1c79356b
A
490 */
491
0c530ab8
A
492 .globl EXT(thread_exception_return)
493 .globl EXT(thread_bootstrap_return)
0c530ab8 494LEXT(thread_bootstrap_return)
b0d623f7
A
495#if CONFIG_DTRACE
496 call EXT(dtrace_thread_bootstrap)
497#endif
498
499LEXT(thread_exception_return)
500 CLI
0c530ab8 501 movl %gs:CPU_KERNEL_STACK,%ecx
b0d623f7 502
0c530ab8 503 movl (%ecx),%esp /* switch back to PCB stack */
b0d623f7 504 xorl %ecx,%ecx /* don't check if we're in the PFZ */
0c530ab8 505 jmp EXT(return_from_trap)
89b3af67 506
0c530ab8
A
507Entry(call_continuation)
508 movl S_ARG0,%eax /* get continuation */
509 movl S_ARG1,%edx /* continuation param */
510 movl S_ARG2,%ecx /* wait result */
511 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
512 xorl %ebp,%ebp /* zero frame pointer */
513 subl $8,%esp /* align the stack */
514 pushl %ecx
515 pushl %edx
516 call *%eax /* call continuation */
517 addl $16,%esp
518 movl %gs:CPU_ACTIVE_THREAD,%eax
519 pushl %eax
520 call EXT(thread_terminate)
521
2d21ac55 522
0c530ab8
A
523
524/*******************************************************************************************************
2d21ac55 525 *
0c530ab8
A
526 * All 64 bit task 'exceptions' enter lo_alltraps:
527 * esp -> x86_saved_state_t
528 *
529 * The rest of the state is set up as:
530 * cr3 -> kernel directory
531 * esp -> low based stack
532 * gs -> CPU_DATA_GS
b0d623f7 533 * cs -> KERNEL32_CS
0c530ab8 534 * ss/ds/es -> KERNEL_DS
6601e61a 535 *
0c530ab8
A
536 * interrupts disabled
537 * direction flag cleared
538 */
539Entry(lo_alltraps)
b0d623f7 540 movl R32_CS(%esp),%eax /* assume 32-bit state */
0c530ab8
A
541 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
542 jne 1f
543 movl R64_CS(%esp),%eax /* 64-bit user mode */
5441:
2d21ac55 545 testb $3,%al
0c530ab8
A
546 jz trap_from_kernel
547 /* user mode trap */
6601e61a 548 TIME_TRAP_UENTRY
4452a7af 549
2d21ac55
A
550 movl %gs:CPU_ACTIVE_THREAD,%ecx
551 movl ACT_TASK(%ecx),%ebx
552
553 /* Check for active vtimers in the current task */
554 cmpl $0,TASK_VTIMERS(%ebx)
555 jz 1f
556
557 /* Set a pending AST */
558 orl $(AST_BSD),%gs:CPU_PENDING_AST
559
560 /* Set a thread AST (atomic) */
561 lock
562 orl $(AST_BSD),ACT_AST(%ecx)
563
5641:
4452a7af 565 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
566 xchgl %ebx,%esp /* switch to kernel stack */
567 sti
568
569 CCALL1(user_trap, %ebx) /* call user trap routine */
570 cli /* hold off intrs - critical section */
571 popl %esp /* switch back to PCB stack */
b0d623f7
A
572 xorl %ecx,%ecx /* don't check if we're in the PFZ */
573
6601e61a
A
574/*
575 * Return from trap or system call, checking for ASTs.
0c530ab8
A
576 * On lowbase PCB stack with intrs disabled
577 */
6601e61a 578LEXT(return_from_trap)
b0d623f7
A
579 movl %gs:CPU_PENDING_AST, %eax
580 testl %eax, %eax
0c530ab8
A
581 je EXT(return_to_user) /* branch if no AST */
582
b0d623f7
A
583LEXT(return_from_trap_with_ast)
584 movl %gs:CPU_KERNEL_STACK, %ebx
585 xchgl %ebx, %esp /* switch to kernel stack */
0c530ab8 586
b0d623f7
A
587 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
588 je 2f /* no, go handle the AST */
589 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
590 je 1f
591 /* no... 32-bit user mode */
592 movl R32_EIP(%ebx), %eax
593 pushl %ebx /* save PCB stack */
594 xorl %ebp, %ebp /* clear frame pointer */
595 CCALL1(commpage_is_in_pfz32, %eax)
596 popl %ebx /* retrieve pointer to PCB stack */
597 testl %eax, %eax
598 je 2f /* not in the PFZ... go service AST */
599 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
600 xchgl %ebx, %esp /* switch back to PCB stack */
601 jmp EXT(return_to_user)
6021: /* 64-bit user mode */
603 movl R64_RIP(%ebx), %ecx
604 movl R64_RIP+4(%ebx), %eax
0c530ab8 605 pushl %ebx /* save PCB stack */
b0d623f7
A
606 xorl %ebp, %ebp /* clear frame pointer */
607 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
608 popl %ebx /* retrieve pointer to PCB stack */
609 testl %eax, %eax
610 je 2f /* not in the PFZ... go service AST */
611 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
612 xchgl %ebx, %esp /* switch back to PCB stack */
613 jmp EXT(return_to_user)
6142:
615 STI /* interrupts always enabled on return to user mode */
616 pushl %ebx /* save PCB stack */
617 xorl %ebp, %ebp /* Clear framepointer */
0c530ab8 618 CCALL1(i386_astintr, $0) /* take the AST */
b0d623f7
A
619 CLI
620
1c79356b 621 popl %esp /* switch back to PCB stack (w/exc link) */
b0d623f7
A
622
623 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1c79356b 624 jmp EXT(return_from_trap) /* and check again (rare) */
1c79356b 625
1c79356b
A
626LEXT(return_to_user)
627 TIME_TRAP_UEXIT
b0d623f7 628
0c530ab8
A
629LEXT(ret_to_user)
630 cmpl $0, %gs:CPU_IS64BIT
631 je EXT(lo_ret_to_user)
632 jmp EXT(lo64_ret_to_user)
6601e61a 633
6601e61a 634
2d21ac55 635
6601e61a 636/*
0c530ab8
A
637 * Trap from kernel mode. No need to switch stacks.
638 * Interrupts must be off here - we will set them to state at time of trap
639 * as soon as it's safe for us to do so and not recurse doing preemption
1c79356b
A
640 */
641trap_from_kernel:
0c530ab8 642 movl %esp, %eax /* saved state addr */
b0d623f7 643 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
2d21ac55
A
644 pushl %ebp /* Extend framepointer chain */
645 movl %esp, %ebp
646 CCALL1(kernel_trap, %eax) /* Call kernel trap handler */
647 popl %ebp
648 addl $4, %esp
0c530ab8 649 cli
6601e61a 650
91447636 651 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 652 testl $ AST_URGENT,%eax /* any urgent preemption? */
0c530ab8 653 je ret_to_kernel /* no, nothing to do */
b0d623f7 654 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
0c530ab8 655 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
b0d623f7 656 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
0c530ab8
A
657 je ret_to_kernel
658 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
659 jne ret_to_kernel
91447636 660 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
661 movl %esp,%ecx
662 xorl %eax,%ecx
b0d623f7 663 and EXT(kernel_stack_mask),%ecx
1c79356b 664 testl %ecx,%ecx /* are we on the kernel stack? */
0c530ab8 665 jne ret_to_kernel /* no, skip it */
1c79356b 666
0c530ab8 667 CCALL1(i386_astintr, $1) /* take the AST */
8ad349bb 668
0c530ab8
A
669ret_to_kernel:
670 cmpl $0, %gs:CPU_IS64BIT
671 je EXT(lo_ret_to_kernel)
672 jmp EXT(lo64_ret_to_kernel)
8f6c56a5 673
21362eb3 674
6601e61a 675
0c530ab8
A
676/*******************************************************************************************************
677 *
678 * All interrupts on all tasks enter here with:
679 * esp-> -> x86_saved_state_t
680 *
681 * cr3 -> kernel directory
682 * esp -> low based stack
683 * gs -> CPU_DATA_GS
b0d623f7 684 * cs -> KERNEL32_CS
0c530ab8
A
685 * ss/ds/es -> KERNEL_DS
686 *
687 * interrupts disabled
688 * direction flag cleared
689 */
690Entry(lo_allintrs)
91447636
A
691 /*
692 * test whether already on interrupt stack
693 */
694 movl %gs:CPU_INT_STACK_TOP,%ecx
695 cmpl %esp,%ecx
696 jb 1f
697 leal -INTSTACK_SIZE(%ecx),%edx
698 cmpl %esp,%edx
699 jb int_from_intstack
0c530ab8 7001:
1c79356b
A
701 xchgl %ecx,%esp /* switch to interrupt stack */
702
0c530ab8
A
703 movl %cr0,%eax /* get cr0 */
704 orl $(CR0_TS),%eax /* or in TS bit */
705 movl %eax,%cr0 /* set cr0 */
706
707 subl $8, %esp /* for 16-byte stack alignment */
1c79356b 708 pushl %ecx /* save pointer to old stack */
0c530ab8 709 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
9bccf70c 710
91447636
A
711 TIME_INT_ENTRY /* do timing */
712
2d21ac55
A
713 movl %gs:CPU_ACTIVE_THREAD,%ecx
714 movl ACT_TASK(%ecx),%ebx
715
716 /* Check for active vtimers in the current task */
717 cmpl $0,TASK_VTIMERS(%ebx)
718 jz 1f
719
720 /* Set a pending AST */
721 orl $(AST_BSD),%gs:CPU_PENDING_AST
722
723 /* Set a thread AST (atomic) */
724 lock
725 orl $(AST_BSD),ACT_AST(%ecx)
726
7271:
91447636 728 incl %gs:CPU_PREEMPTION_LEVEL
91447636 729 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 730
0c530ab8 731 movl %gs:CPU_INT_STATE, %eax
b0d623f7 732 CCALL1(interrupt, %eax) /* call generic interrupt routine */
0c530ab8
A
733
734 cli /* just in case we returned with intrs enabled */
735 xorl %eax,%eax
736 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1c79356b 737
91447636 738 decl %gs:CPU_INTERRUPT_LEVEL
91447636 739 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 740
1c79356b 741 TIME_INT_EXIT /* do timing */
1c79356b 742
0c530ab8
A
743 movl %gs:CPU_ACTIVE_THREAD,%eax
744 movl ACT_PCB(%eax),%eax /* get act`s PCB */
745 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
746 cmpl $0,%eax /* Is there a context */
747 je 1f /* Branch if not */
748 movl FP_VALID(%eax),%eax /* Load fp_valid */
749 cmpl $0,%eax /* Check if valid */
750 jne 1f /* Branch if valid */
751 clts /* Clear TS */
752 jmp 2f
7531:
754 movl %cr0,%eax /* get cr0 */
755 orl $(CR0_TS),%eax /* or in TS bit */
756 movl %eax,%cr0 /* set cr0 */
7572:
1c79356b
A
758 popl %esp /* switch back to old stack */
759
0c530ab8 760 /* Load interrupted code segment into %eax */
b0d623f7 761 movl R32_CS(%esp),%eax /* assume 32-bit state */
0c530ab8
A
762 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
763 jne 3f
764 movl R64_CS(%esp),%eax /* 64-bit user mode */
7653:
2d21ac55 766 testb $3,%al /* user mode, */
0c530ab8
A
767 jnz ast_from_interrupt_user /* go handle potential ASTs */
768 /*
769 * we only want to handle preemption requests if
770 * the interrupt fell in the kernel context
771 * and preemption isn't disabled
772 */
773 movl %gs:CPU_PENDING_AST,%eax
774 testl $ AST_URGENT,%eax /* any urgent requests? */
775 je ret_to_kernel /* no, nothing to do */
776
777 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
778 jne ret_to_kernel /* yes, skip it */
1c79356b 779
91447636 780 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
781 movl %esp,%ecx
782 xorl %eax,%ecx
b0d623f7 783 and EXT(kernel_stack_mask),%ecx
0c530ab8
A
784 testl %ecx,%ecx /* are we on the kernel stack? */
785 jne ret_to_kernel /* no, skip it */
1c79356b 786
0c530ab8
A
787 /*
788 * Take an AST from kernel space. We don't need (and don't want)
789 * to do as much as the case where the interrupt came from user
790 * space.
791 */
792 CCALL1(i386_astintr, $1)
1c79356b 793
0c530ab8 794 jmp ret_to_kernel
1c79356b 795
1c79356b 796
0c530ab8
A
797/*
798 * nested int - simple path, can't preempt etc on way out
799 */
1c79356b 800int_from_intstack:
91447636 801 incl %gs:CPU_PREEMPTION_LEVEL
91447636 802 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 803
2d21ac55 804 movl %esp, %edx /* x86_saved_state */
b0d623f7 805 CCALL1(interrupt, %edx)
1c79356b 806
91447636 807 decl %gs:CPU_INTERRUPT_LEVEL
91447636 808 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 809
0c530ab8 810 jmp ret_to_kernel
1c79356b
A
811
812/*
0c530ab8
A
813 * Take an AST from an interrupted user
814 */
815ast_from_interrupt_user:
816 movl %gs:CPU_PENDING_AST,%eax
817 testl %eax,%eax /* pending ASTs? */
818 je EXT(ret_to_user) /* no, nothing to do */
6601e61a 819
1c79356b
A
820 TIME_TRAP_UENTRY
821
b0d623f7
A
822 movl $1, %ecx /* check if we're in the PFZ */
823 jmp EXT(return_from_trap_with_ast) /* return */
1c79356b 824
0c530ab8
A
825
826/*******************************************************************************************************
8f6c56a5 827 *
0c530ab8
A
828 * 32bit Tasks
829 * System call entries via INTR_GATE or sysenter:
21362eb3 830 *
2d21ac55 831 * esp -> x86_saved_state32_t
0c530ab8
A
832 * cr3 -> kernel directory
833 * esp -> low based stack
834 * gs -> CPU_DATA_GS
b0d623f7 835 * cs -> KERNEL32_CS
0c530ab8 836 * ss/ds/es -> KERNEL_DS
6601e61a 837 *
0c530ab8
A
838 * interrupts disabled
839 * direction flag cleared
6601e61a
A
840 */
841
0c530ab8
A
842Entry(lo_sysenter)
843 /*
844 * We can be here either for a mach syscall or a unix syscall,
845 * as indicated by the sign of the code:
846 */
b0d623f7 847 movl R32_EAX(%esp),%eax
0c530ab8
A
848 testl %eax,%eax
849 js EXT(lo_mach_scall) /* < 0 => mach */
850 /* > 0 => unix */
851
852Entry(lo_unix_scall)
2d21ac55
A
853 TIME_TRAP_UENTRY
854
855 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
856 movl ACT_TASK(%ecx),%ebx /* point to current task */
857 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
858
859 /* Check for active vtimers in the current task */
860 cmpl $0,TASK_VTIMERS(%ebx)
861 jz 1f
21362eb3 862
2d21ac55
A
863 /* Set a pending AST */
864 orl $(AST_BSD),%gs:CPU_PENDING_AST
865
866 /* Set a thread AST (atomic) */
867 lock
868 orl $(AST_BSD),ACT_AST(%ecx)
869
8701:
4452a7af 871 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8 872 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 873
0c530ab8 874 sti
6601e61a 875
0c530ab8
A
876 CCALL1(unix_syscall, %ebx)
877 /*
878 * always returns through thread_exception_return
879 */
2d21ac55 880
21362eb3 881
0c530ab8
A
882Entry(lo_mach_scall)
883 TIME_TRAP_UENTRY
21362eb3 884
2d21ac55
A
885 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
886 movl ACT_TASK(%ecx),%ebx /* point to current task */
887 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
888
889 /* Check for active vtimers in the current task */
890 cmpl $0,TASK_VTIMERS(%ebx)
891 jz 1f
892
893 /* Set a pending AST */
894 orl $(AST_BSD),%gs:CPU_PENDING_AST
895
896 /* Set a thread AST (atomic) */
897 lock
898 orl $(AST_BSD),ACT_AST(%ecx)
899
9001:
0c530ab8
A
901 movl %gs:CPU_KERNEL_STACK,%ebx
902 xchgl %ebx,%esp /* switch to kernel stack */
21362eb3 903
0c530ab8 904 sti
21362eb3 905
0c530ab8
A
906 CCALL1(mach_call_munger, %ebx)
907 /*
908 * always returns through thread_exception_return
909 */
6601e61a 910
2d21ac55 911
0c530ab8 912Entry(lo_mdep_scall)
2d21ac55
A
913 TIME_TRAP_UENTRY
914
915 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
916 movl ACT_TASK(%ecx),%ebx /* point to current task */
917
918 /* Check for active vtimers in the current task */
919 cmpl $0,TASK_VTIMERS(%ebx)
920 jz 1f
6601e61a 921
2d21ac55
A
922 /* Set a pending AST */
923 orl $(AST_BSD),%gs:CPU_PENDING_AST
924
925 /* Set a thread AST (atomic) */
926 lock
927 orl $(AST_BSD),ACT_AST(%ecx)
928
9291:
0c530ab8
A
930 movl %gs:CPU_KERNEL_STACK,%ebx
931 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 932
0c530ab8 933 sti
6601e61a 934
0c530ab8
A
935 CCALL1(machdep_syscall, %ebx)
936 /*
937 * always returns through thread_exception_return
938 */
2d21ac55 939
6601e61a 940
0c530ab8 941Entry(lo_diag_scall)
6601e61a
A
942 TIME_TRAP_UENTRY
943
2d21ac55
A
944 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
945 movl ACT_TASK(%ecx),%ebx /* point to current task */
946
947 /* Check for active vtimers in the current task */
948 cmpl $0,TASK_VTIMERS(%ebx)
949 jz 1f
950
951 /* Set a pending AST */
952 orl $(AST_BSD),%gs:CPU_PENDING_AST
953
954 /* Set a thread AST (atomic) */
955 lock
956 orl $(AST_BSD),ACT_AST(%ecx)
957
9581:
0c530ab8
A
959 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
960 xchgl %ebx,%esp // Switch to it, saving the previous
6601e61a 961
0c530ab8 962 CCALL1(diagCall, %ebx) // Call diagnostics
0c530ab8
A
963
964 cmpl $0,%eax // What kind of return is this?
2d21ac55
A
965 je 2f
966 cli // Disable interruptions just in case they were enabled
967 popl %esp // Get back the original stack
968 jmp EXT(return_to_user) // Normal return, do not check asts...
9692:
935ed37a 970 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
0c530ab8
A
971 // pass what would be the diag syscall
972 // error return - cause an exception
973 /* no return */
974
6601e61a 975
6601e61a 976
0c530ab8
A
977/*******************************************************************************************************
978 *
979 * 64bit Tasks
980 * System call entries via syscall only:
981 *
982 * esp -> x86_saved_state64_t
983 * cr3 -> kernel directory
984 * esp -> low based stack
985 * gs -> CPU_DATA_GS
b0d623f7 986 * cs -> KERNEL32_CS
0c530ab8
A
987 * ss/ds/es -> KERNEL_DS
988 *
989 * interrupts disabled
990 * direction flag cleared
1c79356b 991 */
1c79356b 992
0c530ab8 993Entry(lo_syscall)
935ed37a
A
994 TIME_TRAP_UENTRY
995
0c530ab8
A
996 /*
997 * We can be here either for a mach, unix machdep or diag syscall,
998 * as indicated by the syscall class:
999 */
1000 movl R64_RAX(%esp), %eax /* syscall number/class */
1001 movl %eax, %ebx
1002 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
1003 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
1004 je EXT(lo64_mach_scall)
1005 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
1006 je EXT(lo64_unix_scall)
1007 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
1008 je EXT(lo64_mdep_scall)
1009 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
1010 je EXT(lo64_diag_scall)
1011
2d21ac55
A
1012 movl %gs:CPU_KERNEL_STACK,%ebx
1013 xchgl %ebx,%esp /* switch to kernel stack */
1014
1015 sti
1016
0c530ab8 1017 /* Syscall class unknown */
935ed37a 1018 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
0c530ab8 1019 /* no return */
1c79356b 1020
2d21ac55 1021
0c530ab8 1022Entry(lo64_unix_scall)
2d21ac55
A
1023 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1024 movl ACT_TASK(%ecx),%ebx /* point to current task */
1025 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
1026
1027 /* Check for active vtimers in the current task */
1028 cmpl $0,TASK_VTIMERS(%ebx)
1029 jz 1f
1030
1031 /* Set a pending AST */
1032 orl $(AST_BSD),%gs:CPU_PENDING_AST
1c79356b 1033
2d21ac55
A
1034 /* Set a thread AST (atomic) */
1035 lock
1036 orl $(AST_BSD),ACT_AST(%ecx)
1037
10381:
0c530ab8
A
1039 movl %gs:CPU_KERNEL_STACK,%ebx
1040 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1041
0c530ab8 1042 sti
1c79356b 1043
0c530ab8
A
1044 CCALL1(unix_syscall64, %ebx)
1045 /*
1046 * always returns through thread_exception_return
1047 */
2d21ac55 1048
55e303ae 1049
0c530ab8 1050Entry(lo64_mach_scall)
2d21ac55
A
1051 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1052 movl ACT_TASK(%ecx),%ebx /* point to current task */
1053 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
1054
1055 /* Check for active vtimers in the current task */
1056 cmpl $0,TASK_VTIMERS(%ebx)
1057 jz 1f
1058
1059 /* Set a pending AST */
1060 orl $(AST_BSD),%gs:CPU_PENDING_AST
1061
1062 lock
1063 orl $(AST_BSD),ACT_AST(%ecx)
1064
10651:
0c530ab8
A
1066 movl %gs:CPU_KERNEL_STACK,%ebx
1067 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1068
0c530ab8 1069 sti
1c79356b 1070
0c530ab8
A
1071 CCALL1(mach_call_munger64, %ebx)
1072 /*
1073 * always returns through thread_exception_return
1074 */
1c79356b 1075
2d21ac55
A
1076
1077
0c530ab8 1078Entry(lo64_mdep_scall)
2d21ac55
A
1079 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1080 movl ACT_TASK(%ecx),%ebx /* point to current task */
1081
1082 /* Check for active vtimers in the current task */
1083 cmpl $0,TASK_VTIMERS(%ebx)
1084 jz 1f
1085
1086 /* Set a pending AST */
1087 orl $(AST_BSD),%gs:CPU_PENDING_AST
1088
1089 /* Set a thread AST (atomic) */
1090 lock
1091 orl $(AST_BSD),ACT_AST(%ecx)
1092
10931:
91447636 1094 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
1095 xchgl %ebx,%esp /* switch to kernel stack */
1096
1097 sti
1098
1099 CCALL1(machdep_syscall64, %ebx)
1100 /*
1101 * always returns through thread_exception_return
1102 */
2d21ac55 1103
0c530ab8
A
1104
1105Entry(lo64_diag_scall)
2d21ac55
A
1106 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1107 movl ACT_TASK(%ecx),%ebx /* point to current task */
1108
1109 /* Check for active vtimers in the current task */
1110 cmpl $0,TASK_VTIMERS(%ebx)
1111 jz 1f
1112
1113 /* Set a pending AST */
1114 orl $(AST_BSD),%gs:CPU_PENDING_AST
1115
1116 /* Set a thread AST (atomic) */
1117 lock
1118 orl $(AST_BSD),ACT_AST(%ecx)
0c530ab8 1119
2d21ac55
A
11201:
1121 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
1122 xchgl %ebx,%esp // Switch to it, saving the previous
1123
0c530ab8 1124 CCALL1(diagCall64, %ebx) // Call diagnostics
2d21ac55
A
1125
1126 cmpl $0,%eax // What kind of return is this?
1127 je 2f
0c530ab8
A
1128 cli // Disable interruptions just in case they were enabled
1129 popl %esp // Get back the original stack
2d21ac55
A
1130 jmp EXT(return_to_user) // Normal return, do not check asts...
11312:
935ed37a 1132 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
2d21ac55
A
1133 // pass what would be the diag syscall
1134 // error return - cause an exception
0c530ab8 1135 /* no return */
1c79356b 1136
1c79356b
A
1137/*\f*/
1138/*
1139 * Utility routines.
1140 */
1141
1142
1143/*
0c530ab8
A
1144 * Copy from user/kernel address space.
1145 * arg0: window offset or kernel address
1c79356b
A
1146 * arg1: kernel address
1147 * arg2: byte count
1148 */
2d21ac55 1149Entry(copyinphys_user)
0c530ab8
A
1150 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1151 mov %cx,%ds
1152
2d21ac55 1153Entry(copyinphys_kern)
0c530ab8
A
1154 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1155 mov %cx,%es
1156 jmp copyin_common
1157
2d21ac55 1158Entry(copyin_user)
0c530ab8
A
1159 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1160 mov %cx,%ds
1161
2d21ac55 1162Entry(copyin_kern)
0c530ab8
A
1163
1164copyin_common:
1c79356b
A
1165 pushl %esi
1166 pushl %edi /* save registers */
1167
0c530ab8
A
1168 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1169 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1170 movl 8+S_ARG2,%edx /* get count */
1171
1c79356b
A
1172 cld /* count up */
1173 movl %edx,%ecx /* move by longwords first */
1174 shrl $2,%ecx
1175 RECOVERY_SECTION
1176 RECOVER(copyin_fail)
1177 rep
1178 movsl /* move longwords */
1179 movl %edx,%ecx /* now move remaining bytes */
1180 andl $3,%ecx
1181 RECOVERY_SECTION
1182 RECOVER(copyin_fail)
1183 rep
1184 movsb
1185 xorl %eax,%eax /* return 0 for success */
0c530ab8
A
1186copyin_ret:
1187 mov %ss,%cx /* restore kernel data and extended segments */
1188 mov %cx,%ds
1189 mov %cx,%es
1c79356b
A
1190
1191 popl %edi /* restore registers */
1192 popl %esi
1193 ret /* and return */
1194
1195copyin_fail:
0c530ab8
A
1196 movl $(EFAULT),%eax /* return error for failure */
1197 jmp copyin_ret /* pop frame and return */
1198
1c79356b 1199
0c530ab8 1200
1c79356b 1201/*
0c530ab8
A
1202 * Copy string from user/kern address space.
1203 * arg0: window offset or kernel address
1c79356b
A
1204 * arg1: kernel address
1205 * arg2: max byte count
1206 * arg3: actual byte count (OUT)
1207 */
0c530ab8
A
1208Entry(copyinstr_kern)
1209 mov %ds,%cx
1210 jmp copyinstr_common
1211
1212Entry(copyinstr_user)
1213 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1214
1215copyinstr_common:
1216 mov %cx,%fs
1217
1c79356b
A
1218 pushl %esi
1219 pushl %edi /* save registers */
1220
0c530ab8
A
1221 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1222 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1223 movl 8+S_ARG2,%edx /* get count */
1224
0c530ab8
A
1225 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
1226 /* are 0 for the cmpl against 0 */
1c79356b
A
12272:
1228 RECOVERY_SECTION
1229 RECOVER(copystr_fail) /* copy bytes... */
c0fea474 1230 movb %fs:(%esi),%al
1c79356b
A
1231 incl %esi
1232 testl %edi,%edi /* if kernel address is ... */
1233 jz 3f /* not NULL */
c0fea474 1234 movb %al,(%edi) /* copy the byte */
1c79356b
A
1235 incl %edi
12363:
0c530ab8
A
1237 testl %eax,%eax /* did we just stuff the 0-byte? */
1238 jz 4f /* yes, return 0 status already in %eax */
1239 decl %edx /* decrement #bytes left in buffer */
1240 jnz 2b /* buffer not full so copy in another byte */
1241 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1c79356b 12424:
1c79356b
A
1243 movl 8+S_ARG3,%edi /* get OUT len ptr */
1244 cmpl $0,%edi
1245 jz copystr_ret /* if null, just return */
1246 subl 8+S_ARG0,%esi
1247 movl %esi,(%edi) /* else set OUT arg to xfer len */
1248copystr_ret:
1249 popl %edi /* restore registers */
1250 popl %esi
1251 ret /* and return */
1252
1253copystr_fail:
0c530ab8
A
1254 movl $(EFAULT),%eax /* return error for failure */
1255 jmp copystr_ret /* pop frame and return */
1256
1c79356b
A
1257
1258/*
0c530ab8 1259 * Copy to user/kern address space.
1c79356b 1260 * arg0: kernel address
0c530ab8 1261 * arg1: window offset or kernel address
1c79356b
A
1262 * arg2: byte count
1263 */
0c530ab8
A
1264ENTRY(copyoutphys_user)
1265 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1266 mov %cx,%es
89b3af67 1267
0c530ab8
A
1268ENTRY(copyoutphys_kern)
1269 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1270 mov %cx,%ds
1271 jmp copyout_common
4452a7af 1272
0c530ab8
A
1273ENTRY(copyout_user)
1274 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
6601e61a 1275 mov %cx,%es
4452a7af 1276
0c530ab8
A
1277ENTRY(copyout_kern)
1278
1279copyout_common:
1280 pushl %esi
1281 pushl %edi /* save registers */
1282
1283 movl 8+S_ARG0,%esi /* get source - kernel address */
1284 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1285 movl 8+S_ARG2,%edx /* get count */
1c79356b 1286
1c79356b 1287 cld /* count up */
0c530ab8 1288 movl %edx,%ecx /* move by longwords first */
1c79356b
A
1289 shrl $2,%ecx
1290 RECOVERY_SECTION
1291 RECOVER(copyout_fail)
1c79356b
A
1292 rep
1293 movsl
0c530ab8 1294 movl %edx,%ecx /* now move remaining bytes */
1c79356b
A
1295 andl $3,%ecx
1296 RECOVERY_SECTION
1297 RECOVER(copyout_fail)
1c79356b
A
1298 rep
1299 movsb /* move */
1c79356b
A
1300 xorl %eax,%eax /* return 0 for success */
1301copyout_ret:
0c530ab8
A
1302 mov %ss,%cx /* restore kernel segment */
1303 mov %cx,%es
1304 mov %cx,%ds
1c79356b 1305
1c79356b
A
1306 popl %edi /* restore registers */
1307 popl %esi
1308 ret /* and return */
1309
1310copyout_fail:
0c530ab8 1311 movl $(EFAULT),%eax /* return error for failure */
1c79356b
A
1312 jmp copyout_ret /* pop frame and return */
1313
1c79356b
A
1314/*
1315 * io register must not be used on slaves (no AT bus)
1316 */
1317#define ILL_ON_SLAVE
1318
1319
1320#if MACH_ASSERT
1321
1322#define ARG0 B_ARG0
1323#define ARG1 B_ARG1
1324#define ARG2 B_ARG2
1325#define PUSH_FRAME FRAME
1326#define POP_FRAME EMARF
1327
1328#else /* MACH_ASSERT */
1329
1330#define ARG0 S_ARG0
1331#define ARG1 S_ARG1
1332#define ARG2 S_ARG2
1333#define PUSH_FRAME
1334#define POP_FRAME
1335
1336#endif /* MACH_ASSERT */
1337
1c79356b
A
1338/*
1339 * void loutb(unsigned byte *io_port,
1340 * unsigned byte *data,
1341 * unsigned int count)
1342 *
1343 * Output an array of bytes to an IO port.
1344 */
1345ENTRY(loutb)
1346ENTRY(outsb)
1347 PUSH_FRAME
1348 ILL_ON_SLAVE
1349 movl %esi,%eax /* save register */
1350 movl ARG0,%edx /* get io port number */
1351 movl ARG1,%esi /* get data address */
1352 movl ARG2,%ecx /* get count */
1353 cld /* count up */
1354 rep
1355 outsb /* output */
1356 movl %eax,%esi /* restore register */
1357 POP_FRAME
1358 ret
1359
1360
1361/*
1362 * void loutw(unsigned short *io_port,
1363 * unsigned short *data,
1364 * unsigned int count)
1365 *
1366 * Output an array of shorts to an IO port.
1367 */
1368ENTRY(loutw)
1369ENTRY(outsw)
1370 PUSH_FRAME
1371 ILL_ON_SLAVE
1372 movl %esi,%eax /* save register */
1373 movl ARG0,%edx /* get io port number */
1374 movl ARG1,%esi /* get data address */
1375 movl ARG2,%ecx /* get count */
1376 cld /* count up */
1377 rep
1378 outsw /* output */
1379 movl %eax,%esi /* restore register */
1380 POP_FRAME
1381 ret
1382
1383/*
1384 * void loutw(unsigned short io_port,
1385 * unsigned int *data,
1386 * unsigned int count)
1387 *
1388 * Output an array of longs to an IO port.
1389 */
1390ENTRY(loutl)
1391ENTRY(outsl)
1392 PUSH_FRAME
1393 ILL_ON_SLAVE
1394 movl %esi,%eax /* save register */
1395 movl ARG0,%edx /* get io port number */
1396 movl ARG1,%esi /* get data address */
1397 movl ARG2,%ecx /* get count */
1398 cld /* count up */
1399 rep
1400 outsl /* output */
1401 movl %eax,%esi /* restore register */
1402 POP_FRAME
1403 ret
1404
1405
1406/*
1407 * void linb(unsigned char *io_port,
1408 * unsigned char *data,
1409 * unsigned int count)
1410 *
1411 * Input an array of bytes from an IO port.
1412 */
1413ENTRY(linb)
1414ENTRY(insb)
1415 PUSH_FRAME
1416 ILL_ON_SLAVE
1417 movl %edi,%eax /* save register */
1418 movl ARG0,%edx /* get io port number */
1419 movl ARG1,%edi /* get data address */
1420 movl ARG2,%ecx /* get count */
1421 cld /* count up */
1422 rep
1423 insb /* input */
1424 movl %eax,%edi /* restore register */
1425 POP_FRAME
1426 ret
1427
1428
1429/*
1430 * void linw(unsigned short *io_port,
1431 * unsigned short *data,
1432 * unsigned int count)
1433 *
1434 * Input an array of shorts from an IO port.
1435 */
1436ENTRY(linw)
1437ENTRY(insw)
1438 PUSH_FRAME
1439 ILL_ON_SLAVE
1440 movl %edi,%eax /* save register */
1441 movl ARG0,%edx /* get io port number */
1442 movl ARG1,%edi /* get data address */
1443 movl ARG2,%ecx /* get count */
1444 cld /* count up */
1445 rep
1446 insw /* input */
1447 movl %eax,%edi /* restore register */
1448 POP_FRAME
1449 ret
1450
1451
1452/*
1453 * void linl(unsigned short io_port,
1454 * unsigned int *data,
1455 * unsigned int count)
1456 *
1457 * Input an array of longs from an IO port.
1458 */
1459ENTRY(linl)
1460ENTRY(insl)
1461 PUSH_FRAME
1462 ILL_ON_SLAVE
1463 movl %edi,%eax /* save register */
1464 movl ARG0,%edx /* get io port number */
1465 movl ARG1,%edi /* get data address */
1466 movl ARG2,%ecx /* get count */
1467 cld /* count up */
1468 rep
1469 insl /* input */
1470 movl %eax,%edi /* restore register */
1471 POP_FRAME
1472 ret
1473
91447636
A
1474/*
1475 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1476 */
1477ENTRY(rdmsr_carefully)
1478 movl S_ARG0, %ecx
1479 RECOVERY_SECTION
1480 RECOVER(rdmsr_fail)
1481 rdmsr
1482 movl S_ARG1, %ecx
1483 movl %eax, (%ecx)
1484 movl S_ARG2, %ecx
1485 movl %edx, (%ecx)
1486 movl $0, %eax
1487 ret
1488
1489rdmsr_fail:
1490 movl $1, %eax
1491 ret
1c79356b
A
1492
1493/*
0c530ab8 1494 * Done with recovery table.
1c79356b
A
1495 */
1496 RECOVERY_SECTION
1497 RECOVER_TABLE_END
1c79356b 1498
1c79356b 1499 .data
1c79356b
A
1500dr_msk:
1501 .long ~0x000f0003
1502 .long ~0x00f0000c
1503 .long ~0x0f000030
1504 .long ~0xf00000c0
1505ENTRY(dr_addr)
1506 .long 0,0,0,0
1507 .long 0,0,0,0
0c530ab8 1508
1c79356b
A
1509 .text
1510
1c79356b
A
1511#ifndef SYMMETRY
1512
1513/*
1514 * ffs(mask)
1515 */
1516ENTRY(ffs)
1517 bsfl S_ARG0, %eax
1518 jz 0f
1519 incl %eax
1520 ret
15210: xorl %eax, %eax
1522 ret
1523
1524/*
1525 * cpu_shutdown()
1526 * Force reboot
1527 */
1528
1529null_idtr:
1530 .word 0
1531 .long 0
1532
1533Entry(cpu_shutdown)
1534 lidt null_idtr /* disable the interrupt handler */
1535 xor %ecx,%ecx /* generate a divide by zero */
1536 div %ecx,%eax /* reboot now */
1537 ret /* this will "never" be executed */
1538
1539#endif /* SYMMETRY */
1540
1541
1542/*
1543 * setbit(int bitno, int *s) - set bit in bit string
1544 */
1545ENTRY(setbit)
1546 movl S_ARG0, %ecx /* bit number */
1547 movl S_ARG1, %eax /* address */
1548 btsl %ecx, (%eax) /* set bit */
1549 ret
1550
1551/*
1552 * clrbit(int bitno, int *s) - clear bit in bit string
1553 */
1554ENTRY(clrbit)
1555 movl S_ARG0, %ecx /* bit number */
1556 movl S_ARG1, %eax /* address */
1557 btrl %ecx, (%eax) /* clear bit */
1558 ret
1559
1560/*
1561 * ffsbit(int *s) - find first set bit in bit string
1562 */
1563ENTRY(ffsbit)
1564 movl S_ARG0, %ecx /* address */
1565 movl $0, %edx /* base offset */
15660:
1567 bsfl (%ecx), %eax /* check argument bits */
1568 jnz 1f /* found bit, return */
1569 addl $4, %ecx /* increment address */
1570 addl $32, %edx /* increment offset */
1571 jmp 0b /* try again */
15721:
1573 addl %edx, %eax /* return offset */
1574 ret
1575
1576/*
1577 * testbit(int nr, volatile void *array)
1578 *
1579 * Test to see if the bit is set within the bit string
1580 */
1581
1582ENTRY(testbit)
1583 movl S_ARG0,%eax /* Get the bit to test */
1584 movl S_ARG1,%ecx /* get the array string */
1585 btl %eax,(%ecx)
1586 sbbl %eax,%eax
1587 ret
1588
1589ENTRY(get_pc)
1590 movl 4(%ebp),%eax
1591 ret
1592
1c79356b
A
1593ENTRY(minsecurity)
1594 pushl %ebp
1595 movl %esp,%ebp
1596/*
1597 * jail: set the EIP to "jail" to block a kernel thread.
1598 * Useful to debug synchronization problems on MPs.
1599 */
1600ENTRY(jail)
1601 jmp EXT(jail)
1602
1c79356b
A
1603/*
1604 * unsigned int
1605 * div_scale(unsigned int dividend,
1606 * unsigned int divisor,
1607 * unsigned int *scale)
1608 *
1609 * This function returns (dividend << *scale) //divisor where *scale
1610 * is the largest possible value before overflow. This is used in
1611 * computation where precision must be achieved in order to avoid
1612 * floating point usage.
1613 *
1614 * Algorithm:
1615 * *scale = 0;
1616 * while (((dividend >> *scale) >= divisor))
1617 * (*scale)++;
1618 * *scale = 32 - *scale;
1619 * return ((dividend << *scale) / divisor);
1620 */
1621ENTRY(div_scale)
1622 PUSH_FRAME
1623 xorl %ecx, %ecx /* *scale = 0 */
1624 xorl %eax, %eax
1625 movl ARG0, %edx /* get dividend */
16260:
1627 cmpl ARG1, %edx /* if (divisor > dividend) */
1628 jle 1f /* goto 1f */
1629 addl $1, %ecx /* (*scale)++ */
1630 shrdl $1, %edx, %eax /* dividend >> 1 */
1631 shrl $1, %edx /* dividend >> 1 */
1632 jmp 0b /* goto 0b */
16331:
1634 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1635 movl ARG2, %edx /* get scale */
1636 movl $32, (%edx) /* *scale = 32 */
1637 subl %ecx, (%edx) /* *scale -= %ecx */
1638 POP_FRAME
1639 ret
1640
1641/*
1642 * unsigned int
1643 * mul_scale(unsigned int multiplicand,
1644 * unsigned int multiplier,
1645 * unsigned int *scale)
1646 *
1647 * This function returns ((multiplicand * multiplier) >> *scale) where
1648 * scale is the largest possible value before overflow. This is used in
1649 * computation where precision must be achieved in order to avoid
1650 * floating point usage.
1651 *
1652 * Algorithm:
1653 * *scale = 0;
1654 * while (overflow((multiplicand * multiplier) >> *scale))
1655 * (*scale)++;
1656 * return ((multiplicand * multiplier) >> *scale);
1657 */
1658ENTRY(mul_scale)
1659 PUSH_FRAME
1660 xorl %ecx, %ecx /* *scale = 0 */
1661 movl ARG0, %eax /* get multiplicand */
1662 mull ARG1 /* multiplicand * multiplier */
16630:
1664 cmpl $0, %edx /* if (!overflow()) */
1665 je 1f /* goto 1 */
1666 addl $1, %ecx /* (*scale)++ */
1667 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1668 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1669 jmp 0b
16701:
1671 movl ARG2, %edx /* get scale */
1672 movl %ecx, (%edx) /* set *scale */
1673 POP_FRAME
1674 ret
1675
6601e61a 1676
0c530ab8 1677
6601e61a 1678/*
0c530ab8 1679 * Double-fault exception handler task. The last gasp...
1c79356b 1680 */
0c530ab8 1681Entry(df_task_start)
b0d623f7 1682 CCALL1(panic_double_fault32, $(T_DOUBLE_FAULT))
0c530ab8 1683 hlt
1c79356b 1684
1c79356b
A
1685
1686/*
0c530ab8 1687 * machine-check handler task. The last gasp...
1c79356b 1688 */
0c530ab8 1689Entry(mc_task_start)
b0d623f7 1690 CCALL1(panic_machine_check32, $(T_MACHINE_CHECK))
0c530ab8 1691 hlt
1c79356b
A
1692
1693/*
0c530ab8 1694 * Compatibility mode's last gasp...
1c79356b 1695 */
0c530ab8
A
1696Entry(lo_df64)
1697 movl %esp, %eax
1698 CCALL1(panic_double_fault64, %eax)
1699 hlt
1c79356b 1700
0c530ab8
A
1701Entry(lo_mc64)
1702 movl %esp, %eax
1703 CCALL1(panic_machine_check64, %eax)
1704 hlt
1c79356b 1705