]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
69#include <i386/proc_reg.h>
70#include <i386/trap.h>
71#include <assym.s>
72#include <mach/exception_types.h>
73
0c530ab8
A
74#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
75#include <mach/i386/syscall_sw.h>
6601e61a 76
0c530ab8 77#include <i386/mp.h>
6601e61a 78
91447636
A
79/*
80 * PTmap is recursive pagemap at top of virtual address space.
81 * Within PTmap, the page directory can be found (third indirection).
82*/
83 .globl _PTmap,_PTD,_PTDpde
84 .set _PTmap,(PTDPTDI << PDESHIFT)
85 .set _PTD,_PTmap + (PTDPTDI * NBPG)
86 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
87
88/*
89 * APTmap, APTD is the alternate recursive pagemap.
90 * It's used when modifying another process's page tables.
91 */
92 .globl _APTmap,_APTD,_APTDpde
93 .set _APTmap,(APTDPTDI << PDESHIFT)
94 .set _APTD,_APTmap + (APTDPTDI * NBPG)
95 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
96
1c79356b
A
97#if __MACHO__
98/* Under Mach-O, etext is a variable which contains
99 * the last text address
100 */
101#define ETEXT_ADDR (EXT(etext))
102#else
103/* Under ELF and other non-Mach-O formats, the address of
104 * etext represents the last text address
105 */
9bccf70c 106#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
107#endif
108
1c79356b
A
109#define CX(addr,reg) addr(,reg,4)
110
0c530ab8
A
111/*
112 * The following macros make calls into C code.
113 * They dynamically align the stack to 16 bytes.
114 * Arguments are moved (not pushed) onto the correctly aligned stack.
115 * NOTE: EDI is destroyed in the process, and hence cannot
116 * be directly used as a parameter. Users of this macro must
117 * independently preserve EDI (a non-volatile) if the routine is
118 * intended to be called from C, for instance.
119 */
120
121#define CCALL(fn) \
122 movl %esp, %edi ;\
123 andl $0xFFFFFFF0, %esp ;\
124 call EXT(fn) ;\
125 movl %edi, %esp
126
127#define CCALL1(fn, arg1) \
128 movl %esp, %edi ;\
129 subl $4, %esp ;\
130 andl $0xFFFFFFF0, %esp ;\
131 movl arg1, 0(%esp) ;\
132 call EXT(fn) ;\
133 movl %edi, %esp
134
135#define CCALL2(fn, arg1, arg2) \
136 movl %esp, %edi ;\
137 subl $8, %esp ;\
138 andl $0xFFFFFFF0, %esp ;\
139 movl arg2, 4(%esp) ;\
140 movl arg1, 0(%esp) ;\
141 call EXT(fn) ;\
142 movl %edi, %esp
143
935ed37a
A
144/*
145 * CCALL5 is used for callee functions with 3 arguments but
146 * where arg2 (a3:a2) and arg3 (a5:a4) are 64-bit values.
147 */
148#define CCALL5(fn, a1, a2, a3, a4, a5) \
0c530ab8 149 movl %esp, %edi ;\
935ed37a 150 subl $20, %esp ;\
0c530ab8 151 andl $0xFFFFFFF0, %esp ;\
935ed37a
A
152 movl a5, 16(%esp) ;\
153 movl a4, 12(%esp) ;\
154 movl a3, 8(%esp) ;\
155 movl a2, 4(%esp) ;\
156 movl a1, 0(%esp) ;\
0c530ab8
A
157 call EXT(fn) ;\
158 movl %edi, %esp
159
1c79356b
A
160 .text
161locore_start:
162
163/*
164 * Fault recovery.
165 */
166
167#ifdef __MACHO__
168#define RECOVERY_SECTION .section __VECTORS, __recover
1c79356b
A
169#else
170#define RECOVERY_SECTION .text
171#define RECOVERY_SECTION .text
172#endif
173
174#define RECOVER_TABLE_START \
175 .align 2 ; \
176 .globl EXT(recover_table) ;\
177LEXT(recover_table) ;\
178 .text
179
180#define RECOVER(addr) \
181 .align 2; \
182 .long 9f ;\
183 .long addr ;\
184 .text ;\
1859:
186
187#define RECOVER_TABLE_END \
188 .align 2 ;\
189 .globl EXT(recover_table_end) ;\
190LEXT(recover_table_end) ;\
191 .text
192
193/*
0c530ab8 194 * Allocate recovery and table.
1c79356b
A
195 */
196 RECOVERY_SECTION
197 RECOVER_TABLE_START
1c79356b
A
198
199/*
200 * Timing routines.
201 */
91447636
A
202Entry(timer_update)
203 movl 4(%esp),%ecx
204 movl 8(%esp),%eax
205 movl 12(%esp),%edx
206 movl %eax,TIMER_HIGHCHK(%ecx)
207 movl %edx,TIMER_LOW(%ecx)
208 movl %eax,TIMER_HIGH(%ecx)
209 ret
210
211Entry(timer_grab)
212 movl 4(%esp),%ecx
2130: movl TIMER_HIGH(%ecx),%edx
214 movl TIMER_LOW(%ecx),%eax
215 cmpl TIMER_HIGHCHK(%ecx),%edx
216 jne 0b
217 ret
218
1c79356b
A
219#if STAT_TIME
220
221#define TIME_TRAP_UENTRY
222#define TIME_TRAP_UEXIT
223#define TIME_INT_ENTRY
224#define TIME_INT_EXIT
225
91447636
A
226#else
227/*
228 * Nanosecond timing.
229 */
230
231/*
2d21ac55 232 * Nanotime returned in %edx:%eax.
0c530ab8
A
233 * Computed from tsc based on the scale factor
234 * and an implicit 32 bit shift.
cf7d32b8
A
235 * This code must match what _rtc_nanotime_read does in
236 * i386/machine_routines_asm.s. Failure to do so can
237 * result in "weird" timing results.
0c530ab8 238 *
2d21ac55 239 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
0c530ab8
A
240 */
241#define RNT_INFO _rtc_nanotime_info
cf7d32b8
A
242#define NANOTIME \
243 lea RNT_INFO,%edi ; \
2440: ; \
245 movl RNT_GENERATION(%edi),%esi /* being updated? */ ; \
246 testl %esi,%esi ; \
247 jz 0b /* wait until done */ ; \
248 rdtsc ; \
249 subl RNT_TSC_BASE(%edi),%eax ; \
250 sbbl RNT_TSC_BASE+4(%edi),%edx /* tsc - tsc_base */ ; \
251 movl RNT_SCALE(%edi),%ecx /* * scale factor */ ; \
252 movl %edx,%ebx ; \
253 mull %ecx ; \
254 movl %ebx,%eax ; \
255 movl %edx,%ebx ; \
256 mull %ecx ; \
257 addl %ebx,%eax ; \
258 adcl $0,%edx ; \
259 addl RNT_NS_BASE(%edi),%eax /* + ns_base */ ; \
260 adcl RNT_NS_BASE+4(%edi),%edx ; \
261 cmpl RNT_GENERATION(%edi),%esi /* check for update */ ; \
262 jne 0b /* do it all again */
263
1c79356b
A
264
265/*
2d21ac55 266 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
1c79356b 267 */
2d21ac55
A
268#define TIMER_UPDATE(treg,dreg,areg) \
269 addl TIMER_LOW(treg),areg /* add low bits */ ; \
270 adcl dreg,TIMER_HIGH(treg) /* add carry high bits */ ; \
271 movl areg,TIMER_LOW(treg) /* store updated low bit */ ; \
272 movl TIMER_HIGH(treg),dreg /* copy high bits */ ; \
273 movl dreg,TIMER_HIGHCHK(treg) /* to high check */
1c79356b
A
274
275/*
91447636 276 * Add time delta to old timer and start new.
1c79356b 277 */
2d21ac55
A
278#define TIMER_EVENT(old,new) \
279 NANOTIME /* edx:eax nanosecs */ ; \
280 movl %eax,%esi /* save timestamp */ ; \
281 movl %edx,%edi /* save timestamp */ ; \
282 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
283 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
284 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
285 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
286 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
287 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ; \
288 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
289 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
290 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
291 movl %esi,%eax /* restore timestamp */ ; \
292 movl %edi,%edx /* restore timestamp */ ; \
293 movl CURRENT_STATE(%ebx),%ecx /* current state */ ; \
294 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
295 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
296 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
297 addl $(new##_STATE-old##_STATE),%ecx /* point to new state */ ; \
298 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
299 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
300 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
301
302/*
303 * Update time on user trap entry.
2d21ac55 304 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 305 */
935ed37a 306#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
307
308/*
309 * update time on user trap exit.
2d21ac55 310 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636 311 */
935ed37a 312#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
313
314/*
315 * update time on interrupt entry.
2d21ac55
A
316 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
317 * Saves processor state info on stack.
1c79356b 318 */
2d21ac55
A
319#define TIME_INT_ENTRY \
320 NANOTIME /* edx:eax nanosecs */ ; \
321 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
322 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
323 movl %eax,%esi /* save timestamp */ ; \
324 movl %edx,%edi /* save timestamp */ ; \
325 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
326 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
327 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
328 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
329 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
330 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
331 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
332 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
333 movl %esi,%eax /* restore timestamp */ ; \
334 movl %edi,%edx /* restore timestamp */ ; \
335 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
336 pushl %ecx /* save state */ ; \
337 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
338 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
339 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
340 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
341 cmpl %eax,%ecx /* compare current state */ ; \
342 je 0f /* skip if equal */ ; \
343 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
344 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
3450: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
346 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
347
348/*
349 * update time on interrupt exit.
2d21ac55
A
350 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
351 * Restores processor state info from stack.
1c79356b 352 */
2d21ac55
A
353#define TIME_INT_EXIT \
354 NANOTIME /* edx:eax nanosecs */ ; \
355 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
356 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
357 movl %eax,%esi /* save timestamp */ ; \
358 movl %edx,%edi /* save timestamp */ ; \
359 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
360 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
361 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
362 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
363 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
364 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
365 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
366 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
367 movl %esi,%eax /* restore timestamp */ ; \
368 movl %edi,%edx /* restore timestamp */ ; \
369 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
370 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
371 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
372 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
373 popl %ecx /* restore state */ ; \
374 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
375 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
376 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b 377
91447636 378#endif /* STAT_TIME */
1c79356b 379
1c79356b
A
380#undef PDEBUG
381
382#ifdef PDEBUG
383
384/*
385 * Traditional, not ANSI.
386 */
387#define CAH(label) \
388 .data ;\
389 .globl label/**/count ;\
390label/**/count: ;\
391 .long 0 ;\
392 .globl label/**/limit ;\
393label/**/limit: ;\
394 .long 0 ;\
395 .text ;\
396 addl $1,%ss:label/**/count ;\
397 cmpl $0,label/**/limit ;\
398 jz label/**/exit ;\
399 pushl %eax ;\
400label/**/loop: ;\
401 movl %ss:label/**/count,%eax ;\
402 cmpl %eax,%ss:label/**/limit ;\
403 je label/**/loop ;\
404 popl %eax ;\
405label/**/exit:
406
407#else /* PDEBUG */
408
409#define CAH(label)
410
411#endif /* PDEBUG */
0c530ab8 412
1c79356b
A
413#if MACH_KDB
414/*
415 * Last-ditch debug code to handle faults that might result
416 * from entering kernel (from collocated server) on an invalid
417 * stack. On collocated entry, there's no hardware-initiated
418 * stack switch, so a valid stack must be in place when an
419 * exception occurs, or we may double-fault.
420 *
421 * In case of a double-fault, our only recourse is to switch
422 * hardware "tasks", so that we avoid using the current stack.
423 *
424 * The idea here is just to get the processor into the debugger,
425 * post-haste. No attempt is made to fix up whatever error got
426 * us here, so presumably continuing from the debugger will
427 * simply land us here again -- at best.
428 */
429#if 0
430/*
431 * Note that the per-fault entry points are not currently
432 * functional. The only way to make them work would be to
433 * set up separate TSS's for each fault type, which doesn't
434 * currently seem worthwhile. (The offset part of a task
435 * gate is always ignored.) So all faults that task switch
436 * currently resume at db_task_start.
437 */
438/*
439 * Double fault (Murphy's point) - error code (0) on stack
440 */
441Entry(db_task_dbl_fault)
442 popl %eax
443 movl $(T_DOUBLE_FAULT),%ebx
444 jmp db_task_start
445/*
446 * Segment not present - error code on stack
447 */
448Entry(db_task_seg_np)
449 popl %eax
450 movl $(T_SEGMENT_NOT_PRESENT),%ebx
451 jmp db_task_start
452/*
453 * Stack fault - error code on (current) stack
454 */
455Entry(db_task_stk_fault)
456 popl %eax
457 movl $(T_STACK_FAULT),%ebx
458 jmp db_task_start
459/*
460 * General protection fault - error code on stack
461 */
462Entry(db_task_gen_prot)
463 popl %eax
464 movl $(T_GENERAL_PROTECTION),%ebx
465 jmp db_task_start
466#endif /* 0 */
467/*
468 * The entry point where execution resumes after last-ditch debugger task
469 * switch.
470 */
471Entry(db_task_start)
472 movl %esp,%edx
0c530ab8 473 subl $(ISS32_SIZE),%edx
2d21ac55 474 movl %edx,%esp /* allocate x86_saved_state on stack */
1c79356b
A
475 movl %eax,R_ERR(%esp)
476 movl %ebx,R_TRAPNO(%esp)
477 pushl %edx
1c79356b 478 CPU_NUMBER(%edx)
0c530ab8 479 movl CX(EXT(master_dbtss),%edx),%edx
1c79356b 480 movl TSS_LINK(%edx),%eax
1c79356b
A
481 pushl %eax /* pass along selector of previous TSS */
482 call EXT(db_tss_to_frame)
483 popl %eax /* get rid of TSS selector */
484 call EXT(db_trap_from_asm)
485 addl $0x4,%esp
486 /*
487 * And now...?
488 */
489 iret /* ha, ha, ha... */
490#endif /* MACH_KDB */
491
492/*
0c530ab8
A
493 * Called as a function, makes the current thread
494 * return from the kernel as if from an exception.
1c79356b
A
495 */
496
0c530ab8
A
497 .globl EXT(thread_exception_return)
498 .globl EXT(thread_bootstrap_return)
499LEXT(thread_exception_return)
500LEXT(thread_bootstrap_return)
501 cli
502 movl %gs:CPU_KERNEL_STACK,%ecx
503 movl (%ecx),%esp /* switch back to PCB stack */
504 jmp EXT(return_from_trap)
89b3af67 505
0c530ab8
A
506Entry(call_continuation)
507 movl S_ARG0,%eax /* get continuation */
508 movl S_ARG1,%edx /* continuation param */
509 movl S_ARG2,%ecx /* wait result */
510 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
511 xorl %ebp,%ebp /* zero frame pointer */
512 subl $8,%esp /* align the stack */
513 pushl %ecx
514 pushl %edx
515 call *%eax /* call continuation */
516 addl $16,%esp
517 movl %gs:CPU_ACTIVE_THREAD,%eax
518 pushl %eax
519 call EXT(thread_terminate)
520
2d21ac55 521
0c530ab8
A
522
523/*******************************************************************************************************
2d21ac55 524 *
0c530ab8
A
525 * All 64 bit task 'exceptions' enter lo_alltraps:
526 * esp -> x86_saved_state_t
527 *
528 * The rest of the state is set up as:
529 * cr3 -> kernel directory
530 * esp -> low based stack
531 * gs -> CPU_DATA_GS
532 * cs -> KERNEL_CS
533 * ss/ds/es -> KERNEL_DS
6601e61a 534 *
0c530ab8
A
535 * interrupts disabled
536 * direction flag cleared
537 */
538Entry(lo_alltraps)
539 movl R_CS(%esp),%eax /* assume 32-bit state */
540 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
541 jne 1f
542 movl R64_CS(%esp),%eax /* 64-bit user mode */
5431:
2d21ac55 544 testb $3,%al
0c530ab8
A
545 jz trap_from_kernel
546 /* user mode trap */
6601e61a 547 TIME_TRAP_UENTRY
4452a7af 548
2d21ac55
A
549 movl %gs:CPU_ACTIVE_THREAD,%ecx
550 movl ACT_TASK(%ecx),%ebx
551
552 /* Check for active vtimers in the current task */
553 cmpl $0,TASK_VTIMERS(%ebx)
554 jz 1f
555
556 /* Set a pending AST */
557 orl $(AST_BSD),%gs:CPU_PENDING_AST
558
559 /* Set a thread AST (atomic) */
560 lock
561 orl $(AST_BSD),ACT_AST(%ecx)
562
5631:
4452a7af 564 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
565 xchgl %ebx,%esp /* switch to kernel stack */
566 sti
567
568 CCALL1(user_trap, %ebx) /* call user trap routine */
569 cli /* hold off intrs - critical section */
570 popl %esp /* switch back to PCB stack */
4452a7af 571
6601e61a
A
572/*
573 * Return from trap or system call, checking for ASTs.
0c530ab8
A
574 * On lowbase PCB stack with intrs disabled
575 */
6601e61a 576LEXT(return_from_trap)
0c530ab8
A
577 movl %gs:CPU_PENDING_AST,%eax
578 testl %eax,%eax
579 je EXT(return_to_user) /* branch if no AST */
580
581 movl %gs:CPU_KERNEL_STACK,%ebx
582 xchgl %ebx,%esp /* switch to kernel stack */
583 sti /* interrupts always enabled on return to user mode */
584
585 pushl %ebx /* save PCB stack */
2d21ac55 586 xorl %ebp,%ebp /* Clear framepointer */
0c530ab8
A
587 CCALL1(i386_astintr, $0) /* take the AST */
588 cli
1c79356b
A
589 popl %esp /* switch back to PCB stack (w/exc link) */
590 jmp EXT(return_from_trap) /* and check again (rare) */
1c79356b 591
1c79356b
A
592LEXT(return_to_user)
593 TIME_TRAP_UEXIT
6601e61a 594
0c530ab8
A
595LEXT(ret_to_user)
596 cmpl $0, %gs:CPU_IS64BIT
597 je EXT(lo_ret_to_user)
598 jmp EXT(lo64_ret_to_user)
6601e61a 599
6601e61a 600
2d21ac55 601
6601e61a 602/*
0c530ab8
A
603 * Trap from kernel mode. No need to switch stacks.
604 * Interrupts must be off here - we will set them to state at time of trap
605 * as soon as it's safe for us to do so and not recurse doing preemption
1c79356b
A
606 */
607trap_from_kernel:
0c530ab8 608 movl %esp, %eax /* saved state addr */
2d21ac55
A
609 pushl R_EIP(%esp) /* Simulate a CALL from fault point */
610 pushl %ebp /* Extend framepointer chain */
611 movl %esp, %ebp
612 CCALL1(kernel_trap, %eax) /* Call kernel trap handler */
613 popl %ebp
614 addl $4, %esp
0c530ab8 615 cli
6601e61a 616
91447636 617 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 618 testl $ AST_URGENT,%eax /* any urgent preemption? */
0c530ab8
A
619 je ret_to_kernel /* no, nothing to do */
620 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
621 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
622 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
623 je ret_to_kernel
624 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
625 jne ret_to_kernel
91447636 626 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
627 movl %esp,%ecx
628 xorl %eax,%ecx
629 andl $(-KERNEL_STACK_SIZE),%ecx
630 testl %ecx,%ecx /* are we on the kernel stack? */
0c530ab8 631 jne ret_to_kernel /* no, skip it */
1c79356b 632
0c530ab8 633 CCALL1(i386_astintr, $1) /* take the AST */
8ad349bb 634
0c530ab8
A
635ret_to_kernel:
636 cmpl $0, %gs:CPU_IS64BIT
637 je EXT(lo_ret_to_kernel)
638 jmp EXT(lo64_ret_to_kernel)
8f6c56a5 639
21362eb3 640
6601e61a 641
0c530ab8
A
642/*******************************************************************************************************
643 *
644 * All interrupts on all tasks enter here with:
645 * esp-> -> x86_saved_state_t
646 *
647 * cr3 -> kernel directory
648 * esp -> low based stack
649 * gs -> CPU_DATA_GS
650 * cs -> KERNEL_CS
651 * ss/ds/es -> KERNEL_DS
652 *
653 * interrupts disabled
654 * direction flag cleared
655 */
656Entry(lo_allintrs)
91447636
A
657 /*
658 * test whether already on interrupt stack
659 */
660 movl %gs:CPU_INT_STACK_TOP,%ecx
661 cmpl %esp,%ecx
662 jb 1f
663 leal -INTSTACK_SIZE(%ecx),%edx
664 cmpl %esp,%edx
665 jb int_from_intstack
0c530ab8 6661:
1c79356b
A
667 xchgl %ecx,%esp /* switch to interrupt stack */
668
0c530ab8
A
669 movl %cr0,%eax /* get cr0 */
670 orl $(CR0_TS),%eax /* or in TS bit */
671 movl %eax,%cr0 /* set cr0 */
672
673 subl $8, %esp /* for 16-byte stack alignment */
1c79356b 674 pushl %ecx /* save pointer to old stack */
0c530ab8 675 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
9bccf70c 676
91447636
A
677 TIME_INT_ENTRY /* do timing */
678
2d21ac55
A
679 movl %gs:CPU_ACTIVE_THREAD,%ecx
680 movl ACT_TASK(%ecx),%ebx
681
682 /* Check for active vtimers in the current task */
683 cmpl $0,TASK_VTIMERS(%ebx)
684 jz 1f
685
686 /* Set a pending AST */
687 orl $(AST_BSD),%gs:CPU_PENDING_AST
688
689 /* Set a thread AST (atomic) */
690 lock
691 orl $(AST_BSD),ACT_AST(%ecx)
692
6931:
91447636 694 incl %gs:CPU_PREEMPTION_LEVEL
91447636 695 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 696
0c530ab8
A
697 movl %gs:CPU_INT_STATE, %eax
698 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
699
700 cli /* just in case we returned with intrs enabled */
701 xorl %eax,%eax
702 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1c79356b 703
91447636 704 decl %gs:CPU_INTERRUPT_LEVEL
91447636 705 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 706
1c79356b 707 TIME_INT_EXIT /* do timing */
1c79356b 708
0c530ab8
A
709 movl %gs:CPU_ACTIVE_THREAD,%eax
710 movl ACT_PCB(%eax),%eax /* get act`s PCB */
711 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
712 cmpl $0,%eax /* Is there a context */
713 je 1f /* Branch if not */
714 movl FP_VALID(%eax),%eax /* Load fp_valid */
715 cmpl $0,%eax /* Check if valid */
716 jne 1f /* Branch if valid */
717 clts /* Clear TS */
718 jmp 2f
7191:
720 movl %cr0,%eax /* get cr0 */
721 orl $(CR0_TS),%eax /* or in TS bit */
722 movl %eax,%cr0 /* set cr0 */
7232:
1c79356b
A
724 popl %esp /* switch back to old stack */
725
0c530ab8
A
726 /* Load interrupted code segment into %eax */
727 movl R_CS(%esp),%eax /* assume 32-bit state */
728 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
729 jne 3f
730 movl R64_CS(%esp),%eax /* 64-bit user mode */
7313:
2d21ac55 732 testb $3,%al /* user mode, */
0c530ab8
A
733 jnz ast_from_interrupt_user /* go handle potential ASTs */
734 /*
735 * we only want to handle preemption requests if
736 * the interrupt fell in the kernel context
737 * and preemption isn't disabled
738 */
739 movl %gs:CPU_PENDING_AST,%eax
740 testl $ AST_URGENT,%eax /* any urgent requests? */
741 je ret_to_kernel /* no, nothing to do */
742
743 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
744 jne ret_to_kernel /* yes, skip it */
1c79356b 745
91447636 746 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
747 movl %esp,%ecx
748 xorl %eax,%ecx
749 andl $(-KERNEL_STACK_SIZE),%ecx
0c530ab8
A
750 testl %ecx,%ecx /* are we on the kernel stack? */
751 jne ret_to_kernel /* no, skip it */
1c79356b 752
0c530ab8
A
753 /*
754 * Take an AST from kernel space. We don't need (and don't want)
755 * to do as much as the case where the interrupt came from user
756 * space.
757 */
758 CCALL1(i386_astintr, $1)
1c79356b 759
0c530ab8 760 jmp ret_to_kernel
1c79356b 761
1c79356b 762
0c530ab8
A
763/*
764 * nested int - simple path, can't preempt etc on way out
765 */
1c79356b 766int_from_intstack:
91447636 767 incl %gs:CPU_PREEMPTION_LEVEL
91447636 768 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 769
2d21ac55 770 movl %esp, %edx /* x86_saved_state */
0c530ab8 771 CCALL1(PE_incoming_interrupt, %edx)
1c79356b 772
91447636 773 decl %gs:CPU_INTERRUPT_LEVEL
91447636 774 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 775
0c530ab8 776 jmp ret_to_kernel
1c79356b
A
777
778/*
0c530ab8
A
779 * Take an AST from an interrupted user
780 */
781ast_from_interrupt_user:
782 movl %gs:CPU_PENDING_AST,%eax
783 testl %eax,%eax /* pending ASTs? */
784 je EXT(ret_to_user) /* no, nothing to do */
6601e61a 785
1c79356b
A
786 TIME_TRAP_UENTRY
787
1c79356b
A
788 jmp EXT(return_from_trap) /* return */
789
0c530ab8
A
790
791/*******************************************************************************************************
8f6c56a5 792 *
0c530ab8
A
793 * 32bit Tasks
794 * System call entries via INTR_GATE or sysenter:
21362eb3 795 *
2d21ac55 796 * esp -> x86_saved_state32_t
0c530ab8
A
797 * cr3 -> kernel directory
798 * esp -> low based stack
799 * gs -> CPU_DATA_GS
800 * cs -> KERNEL_CS
801 * ss/ds/es -> KERNEL_DS
6601e61a 802 *
0c530ab8
A
803 * interrupts disabled
804 * direction flag cleared
6601e61a
A
805 */
806
0c530ab8
A
807Entry(lo_sysenter)
808 /*
809 * We can be here either for a mach syscall or a unix syscall,
810 * as indicated by the sign of the code:
811 */
812 movl R_EAX(%esp),%eax
813 testl %eax,%eax
814 js EXT(lo_mach_scall) /* < 0 => mach */
815 /* > 0 => unix */
816
817Entry(lo_unix_scall)
2d21ac55
A
818 TIME_TRAP_UENTRY
819
820 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
821 movl ACT_TASK(%ecx),%ebx /* point to current task */
822 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
823
824 /* Check for active vtimers in the current task */
825 cmpl $0,TASK_VTIMERS(%ebx)
826 jz 1f
21362eb3 827
2d21ac55
A
828 /* Set a pending AST */
829 orl $(AST_BSD),%gs:CPU_PENDING_AST
830
831 /* Set a thread AST (atomic) */
832 lock
833 orl $(AST_BSD),ACT_AST(%ecx)
834
8351:
4452a7af 836 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8 837 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 838
0c530ab8 839 sti
6601e61a 840
0c530ab8
A
841 CCALL1(unix_syscall, %ebx)
842 /*
843 * always returns through thread_exception_return
844 */
2d21ac55 845
21362eb3 846
0c530ab8
A
847Entry(lo_mach_scall)
848 TIME_TRAP_UENTRY
21362eb3 849
2d21ac55
A
850 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
851 movl ACT_TASK(%ecx),%ebx /* point to current task */
852 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
853
854 /* Check for active vtimers in the current task */
855 cmpl $0,TASK_VTIMERS(%ebx)
856 jz 1f
857
858 /* Set a pending AST */
859 orl $(AST_BSD),%gs:CPU_PENDING_AST
860
861 /* Set a thread AST (atomic) */
862 lock
863 orl $(AST_BSD),ACT_AST(%ecx)
864
8651:
0c530ab8
A
866 movl %gs:CPU_KERNEL_STACK,%ebx
867 xchgl %ebx,%esp /* switch to kernel stack */
21362eb3 868
0c530ab8 869 sti
21362eb3 870
0c530ab8
A
871 CCALL1(mach_call_munger, %ebx)
872 /*
873 * always returns through thread_exception_return
874 */
6601e61a 875
2d21ac55 876
0c530ab8 877Entry(lo_mdep_scall)
2d21ac55
A
878 TIME_TRAP_UENTRY
879
880 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
881 movl ACT_TASK(%ecx),%ebx /* point to current task */
882
883 /* Check for active vtimers in the current task */
884 cmpl $0,TASK_VTIMERS(%ebx)
885 jz 1f
6601e61a 886
2d21ac55
A
887 /* Set a pending AST */
888 orl $(AST_BSD),%gs:CPU_PENDING_AST
889
890 /* Set a thread AST (atomic) */
891 lock
892 orl $(AST_BSD),ACT_AST(%ecx)
893
8941:
0c530ab8
A
895 movl %gs:CPU_KERNEL_STACK,%ebx
896 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 897
0c530ab8 898 sti
6601e61a 899
0c530ab8
A
900 CCALL1(machdep_syscall, %ebx)
901 /*
902 * always returns through thread_exception_return
903 */
2d21ac55 904
6601e61a 905
0c530ab8 906Entry(lo_diag_scall)
6601e61a
A
907 TIME_TRAP_UENTRY
908
2d21ac55
A
909 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
910 movl ACT_TASK(%ecx),%ebx /* point to current task */
911
912 /* Check for active vtimers in the current task */
913 cmpl $0,TASK_VTIMERS(%ebx)
914 jz 1f
915
916 /* Set a pending AST */
917 orl $(AST_BSD),%gs:CPU_PENDING_AST
918
919 /* Set a thread AST (atomic) */
920 lock
921 orl $(AST_BSD),ACT_AST(%ecx)
922
9231:
0c530ab8
A
924 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
925 xchgl %ebx,%esp // Switch to it, saving the previous
6601e61a 926
0c530ab8 927 CCALL1(diagCall, %ebx) // Call diagnostics
0c530ab8
A
928
929 cmpl $0,%eax // What kind of return is this?
2d21ac55
A
930 je 2f
931 cli // Disable interruptions just in case they were enabled
932 popl %esp // Get back the original stack
933 jmp EXT(return_to_user) // Normal return, do not check asts...
9342:
935ed37a 935 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
0c530ab8
A
936 // pass what would be the diag syscall
937 // error return - cause an exception
938 /* no return */
939
6601e61a 940
6601e61a 941
0c530ab8
A
942/*******************************************************************************************************
943 *
944 * 64bit Tasks
945 * System call entries via syscall only:
946 *
947 * esp -> x86_saved_state64_t
948 * cr3 -> kernel directory
949 * esp -> low based stack
950 * gs -> CPU_DATA_GS
951 * cs -> KERNEL_CS
952 * ss/ds/es -> KERNEL_DS
953 *
954 * interrupts disabled
955 * direction flag cleared
1c79356b 956 */
1c79356b 957
0c530ab8 958Entry(lo_syscall)
935ed37a
A
959 TIME_TRAP_UENTRY
960
0c530ab8
A
961 /*
962 * We can be here either for a mach, unix machdep or diag syscall,
963 * as indicated by the syscall class:
964 */
965 movl R64_RAX(%esp), %eax /* syscall number/class */
966 movl %eax, %ebx
967 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
968 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
969 je EXT(lo64_mach_scall)
970 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
971 je EXT(lo64_unix_scall)
972 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
973 je EXT(lo64_mdep_scall)
974 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
975 je EXT(lo64_diag_scall)
976
2d21ac55
A
977 movl %gs:CPU_KERNEL_STACK,%ebx
978 xchgl %ebx,%esp /* switch to kernel stack */
979
980 sti
981
0c530ab8 982 /* Syscall class unknown */
935ed37a 983 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
0c530ab8 984 /* no return */
1c79356b 985
2d21ac55 986
0c530ab8 987Entry(lo64_unix_scall)
2d21ac55
A
988 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
989 movl ACT_TASK(%ecx),%ebx /* point to current task */
990 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
991
992 /* Check for active vtimers in the current task */
993 cmpl $0,TASK_VTIMERS(%ebx)
994 jz 1f
995
996 /* Set a pending AST */
997 orl $(AST_BSD),%gs:CPU_PENDING_AST
1c79356b 998
2d21ac55
A
999 /* Set a thread AST (atomic) */
1000 lock
1001 orl $(AST_BSD),ACT_AST(%ecx)
1002
10031:
0c530ab8
A
1004 movl %gs:CPU_KERNEL_STACK,%ebx
1005 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1006
0c530ab8 1007 sti
1c79356b 1008
0c530ab8
A
1009 CCALL1(unix_syscall64, %ebx)
1010 /*
1011 * always returns through thread_exception_return
1012 */
2d21ac55 1013
55e303ae 1014
0c530ab8 1015Entry(lo64_mach_scall)
2d21ac55
A
1016 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1017 movl ACT_TASK(%ecx),%ebx /* point to current task */
1018 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
1019
1020 /* Check for active vtimers in the current task */
1021 cmpl $0,TASK_VTIMERS(%ebx)
1022 jz 1f
1023
1024 /* Set a pending AST */
1025 orl $(AST_BSD),%gs:CPU_PENDING_AST
1026
1027 lock
1028 orl $(AST_BSD),ACT_AST(%ecx)
1029
10301:
0c530ab8
A
1031 movl %gs:CPU_KERNEL_STACK,%ebx
1032 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1033
0c530ab8 1034 sti
1c79356b 1035
0c530ab8
A
1036 CCALL1(mach_call_munger64, %ebx)
1037 /*
1038 * always returns through thread_exception_return
1039 */
1c79356b 1040
2d21ac55
A
1041
1042
0c530ab8 1043Entry(lo64_mdep_scall)
2d21ac55
A
1044 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1045 movl ACT_TASK(%ecx),%ebx /* point to current task */
1046
1047 /* Check for active vtimers in the current task */
1048 cmpl $0,TASK_VTIMERS(%ebx)
1049 jz 1f
1050
1051 /* Set a pending AST */
1052 orl $(AST_BSD),%gs:CPU_PENDING_AST
1053
1054 /* Set a thread AST (atomic) */
1055 lock
1056 orl $(AST_BSD),ACT_AST(%ecx)
1057
10581:
91447636 1059 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
1060 xchgl %ebx,%esp /* switch to kernel stack */
1061
1062 sti
1063
1064 CCALL1(machdep_syscall64, %ebx)
1065 /*
1066 * always returns through thread_exception_return
1067 */
2d21ac55 1068
0c530ab8
A
1069
1070Entry(lo64_diag_scall)
2d21ac55
A
1071 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1072 movl ACT_TASK(%ecx),%ebx /* point to current task */
1073
1074 /* Check for active vtimers in the current task */
1075 cmpl $0,TASK_VTIMERS(%ebx)
1076 jz 1f
1077
1078 /* Set a pending AST */
1079 orl $(AST_BSD),%gs:CPU_PENDING_AST
1080
1081 /* Set a thread AST (atomic) */
1082 lock
1083 orl $(AST_BSD),ACT_AST(%ecx)
0c530ab8 1084
2d21ac55
A
10851:
1086 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
1087 xchgl %ebx,%esp // Switch to it, saving the previous
1088
0c530ab8 1089 CCALL1(diagCall64, %ebx) // Call diagnostics
2d21ac55
A
1090
1091 cmpl $0,%eax // What kind of return is this?
1092 je 2f
0c530ab8
A
1093 cli // Disable interruptions just in case they were enabled
1094 popl %esp // Get back the original stack
2d21ac55
A
1095 jmp EXT(return_to_user) // Normal return, do not check asts...
10962:
935ed37a 1097 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
2d21ac55
A
1098 // pass what would be the diag syscall
1099 // error return - cause an exception
0c530ab8 1100 /* no return */
1c79356b 1101
1c79356b
A
1102/*\f*/
1103/*
1104 * Utility routines.
1105 */
1106
1107
1108/*
0c530ab8
A
1109 * Copy from user/kernel address space.
1110 * arg0: window offset or kernel address
1c79356b
A
1111 * arg1: kernel address
1112 * arg2: byte count
1113 */
2d21ac55 1114Entry(copyinphys_user)
0c530ab8
A
1115 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1116 mov %cx,%ds
1117
2d21ac55 1118Entry(copyinphys_kern)
0c530ab8
A
1119 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1120 mov %cx,%es
1121 jmp copyin_common
1122
2d21ac55 1123Entry(copyin_user)
0c530ab8
A
1124 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1125 mov %cx,%ds
1126
2d21ac55 1127Entry(copyin_kern)
0c530ab8
A
1128
1129copyin_common:
1c79356b
A
1130 pushl %esi
1131 pushl %edi /* save registers */
1132
0c530ab8
A
1133 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1134 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1135 movl 8+S_ARG2,%edx /* get count */
1136
1c79356b
A
1137 cld /* count up */
1138 movl %edx,%ecx /* move by longwords first */
1139 shrl $2,%ecx
1140 RECOVERY_SECTION
1141 RECOVER(copyin_fail)
1142 rep
1143 movsl /* move longwords */
1144 movl %edx,%ecx /* now move remaining bytes */
1145 andl $3,%ecx
1146 RECOVERY_SECTION
1147 RECOVER(copyin_fail)
1148 rep
1149 movsb
1150 xorl %eax,%eax /* return 0 for success */
0c530ab8
A
1151copyin_ret:
1152 mov %ss,%cx /* restore kernel data and extended segments */
1153 mov %cx,%ds
1154 mov %cx,%es
1c79356b
A
1155
1156 popl %edi /* restore registers */
1157 popl %esi
1158 ret /* and return */
1159
1160copyin_fail:
0c530ab8
A
1161 movl $(EFAULT),%eax /* return error for failure */
1162 jmp copyin_ret /* pop frame and return */
1163
1c79356b 1164
0c530ab8 1165
1c79356b 1166/*
0c530ab8
A
1167 * Copy string from user/kern address space.
1168 * arg0: window offset or kernel address
1c79356b
A
1169 * arg1: kernel address
1170 * arg2: max byte count
1171 * arg3: actual byte count (OUT)
1172 */
0c530ab8
A
1173Entry(copyinstr_kern)
1174 mov %ds,%cx
1175 jmp copyinstr_common
1176
1177Entry(copyinstr_user)
1178 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1179
1180copyinstr_common:
1181 mov %cx,%fs
1182
1c79356b
A
1183 pushl %esi
1184 pushl %edi /* save registers */
1185
0c530ab8
A
1186 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1187 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1188 movl 8+S_ARG2,%edx /* get count */
1189
0c530ab8
A
1190 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
1191 /* are 0 for the cmpl against 0 */
1c79356b
A
11922:
1193 RECOVERY_SECTION
1194 RECOVER(copystr_fail) /* copy bytes... */
c0fea474 1195 movb %fs:(%esi),%al
1c79356b
A
1196 incl %esi
1197 testl %edi,%edi /* if kernel address is ... */
1198 jz 3f /* not NULL */
c0fea474 1199 movb %al,(%edi) /* copy the byte */
1c79356b
A
1200 incl %edi
12013:
0c530ab8
A
1202 testl %eax,%eax /* did we just stuff the 0-byte? */
1203 jz 4f /* yes, return 0 status already in %eax */
1204 decl %edx /* decrement #bytes left in buffer */
1205 jnz 2b /* buffer not full so copy in another byte */
1206 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1c79356b 12074:
1c79356b
A
1208 movl 8+S_ARG3,%edi /* get OUT len ptr */
1209 cmpl $0,%edi
1210 jz copystr_ret /* if null, just return */
1211 subl 8+S_ARG0,%esi
1212 movl %esi,(%edi) /* else set OUT arg to xfer len */
1213copystr_ret:
1214 popl %edi /* restore registers */
1215 popl %esi
1216 ret /* and return */
1217
1218copystr_fail:
0c530ab8
A
1219 movl $(EFAULT),%eax /* return error for failure */
1220 jmp copystr_ret /* pop frame and return */
1221
1c79356b
A
1222
1223/*
0c530ab8 1224 * Copy to user/kern address space.
1c79356b 1225 * arg0: kernel address
0c530ab8 1226 * arg1: window offset or kernel address
1c79356b
A
1227 * arg2: byte count
1228 */
0c530ab8
A
1229ENTRY(copyoutphys_user)
1230 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1231 mov %cx,%es
89b3af67 1232
0c530ab8
A
1233ENTRY(copyoutphys_kern)
1234 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1235 mov %cx,%ds
1236 jmp copyout_common
4452a7af 1237
0c530ab8
A
1238ENTRY(copyout_user)
1239 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
6601e61a 1240 mov %cx,%es
4452a7af 1241
0c530ab8
A
1242ENTRY(copyout_kern)
1243
1244copyout_common:
1245 pushl %esi
1246 pushl %edi /* save registers */
1247
1248 movl 8+S_ARG0,%esi /* get source - kernel address */
1249 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1250 movl 8+S_ARG2,%edx /* get count */
1c79356b 1251
1c79356b 1252 cld /* count up */
0c530ab8 1253 movl %edx,%ecx /* move by longwords first */
1c79356b
A
1254 shrl $2,%ecx
1255 RECOVERY_SECTION
1256 RECOVER(copyout_fail)
1c79356b
A
1257 rep
1258 movsl
0c530ab8 1259 movl %edx,%ecx /* now move remaining bytes */
1c79356b
A
1260 andl $3,%ecx
1261 RECOVERY_SECTION
1262 RECOVER(copyout_fail)
1c79356b
A
1263 rep
1264 movsb /* move */
1c79356b
A
1265 xorl %eax,%eax /* return 0 for success */
1266copyout_ret:
0c530ab8
A
1267 mov %ss,%cx /* restore kernel segment */
1268 mov %cx,%es
1269 mov %cx,%ds
1c79356b 1270
1c79356b
A
1271 popl %edi /* restore registers */
1272 popl %esi
1273 ret /* and return */
1274
1275copyout_fail:
0c530ab8 1276 movl $(EFAULT),%eax /* return error for failure */
1c79356b
A
1277 jmp copyout_ret /* pop frame and return */
1278
1c79356b
A
1279/*
1280 * io register must not be used on slaves (no AT bus)
1281 */
1282#define ILL_ON_SLAVE
1283
1284
1285#if MACH_ASSERT
1286
1287#define ARG0 B_ARG0
1288#define ARG1 B_ARG1
1289#define ARG2 B_ARG2
1290#define PUSH_FRAME FRAME
1291#define POP_FRAME EMARF
1292
1293#else /* MACH_ASSERT */
1294
1295#define ARG0 S_ARG0
1296#define ARG1 S_ARG1
1297#define ARG2 S_ARG2
1298#define PUSH_FRAME
1299#define POP_FRAME
1300
1301#endif /* MACH_ASSERT */
1302
1303
1304#if MACH_KDB || MACH_ASSERT
1305
1306/*
1307 * Following routines are also defined as macros in i386/pio.h
1308 * Compile then when MACH_KDB is configured so that they
1309 * can be invoked from the debugger.
1310 */
1311
1312/*
1313 * void outb(unsigned char *io_port,
1314 * unsigned char byte)
1315 *
1316 * Output a byte to an IO port.
1317 */
1318ENTRY(outb)
1319 PUSH_FRAME
1320 ILL_ON_SLAVE
1321 movl ARG0,%edx /* IO port address */
1322 movl ARG1,%eax /* data to output */
1323 outb %al,%dx /* send it out */
1324 POP_FRAME
1325 ret
1326
1327/*
1328 * unsigned char inb(unsigned char *io_port)
1329 *
1330 * Input a byte from an IO port.
1331 */
1332ENTRY(inb)
1333 PUSH_FRAME
1334 ILL_ON_SLAVE
1335 movl ARG0,%edx /* IO port address */
1336 xor %eax,%eax /* clear high bits of register */
1337 inb %dx,%al /* get the byte */
1338 POP_FRAME
1339 ret
1340
1341/*
1342 * void outw(unsigned short *io_port,
1343 * unsigned short word)
1344 *
1345 * Output a word to an IO port.
1346 */
1347ENTRY(outw)
1348 PUSH_FRAME
1349 ILL_ON_SLAVE
1350 movl ARG0,%edx /* IO port address */
1351 movl ARG1,%eax /* data to output */
1352 outw %ax,%dx /* send it out */
1353 POP_FRAME
1354 ret
1355
1356/*
1357 * unsigned short inw(unsigned short *io_port)
1358 *
1359 * Input a word from an IO port.
1360 */
1361ENTRY(inw)
1362 PUSH_FRAME
1363 ILL_ON_SLAVE
1364 movl ARG0,%edx /* IO port address */
1365 xor %eax,%eax /* clear high bits of register */
1366 inw %dx,%ax /* get the word */
1367 POP_FRAME
1368 ret
1369
1370/*
1371 * void outl(unsigned int *io_port,
1372 * unsigned int byte)
1373 *
1374 * Output an int to an IO port.
1375 */
1376ENTRY(outl)
1377 PUSH_FRAME
1378 ILL_ON_SLAVE
1379 movl ARG0,%edx /* IO port address*/
1380 movl ARG1,%eax /* data to output */
1381 outl %eax,%dx /* send it out */
1382 POP_FRAME
1383 ret
1384
1385/*
1386 * unsigned int inl(unsigned int *io_port)
1387 *
1388 * Input an int from an IO port.
1389 */
1390ENTRY(inl)
1391 PUSH_FRAME
1392 ILL_ON_SLAVE
1393 movl ARG0,%edx /* IO port address */
1394 inl %dx,%eax /* get the int */
1395 POP_FRAME
1396 ret
1397
1398#endif /* MACH_KDB || MACH_ASSERT*/
1399
1400/*
1401 * void loutb(unsigned byte *io_port,
1402 * unsigned byte *data,
1403 * unsigned int count)
1404 *
1405 * Output an array of bytes to an IO port.
1406 */
1407ENTRY(loutb)
1408ENTRY(outsb)
1409 PUSH_FRAME
1410 ILL_ON_SLAVE
1411 movl %esi,%eax /* save register */
1412 movl ARG0,%edx /* get io port number */
1413 movl ARG1,%esi /* get data address */
1414 movl ARG2,%ecx /* get count */
1415 cld /* count up */
1416 rep
1417 outsb /* output */
1418 movl %eax,%esi /* restore register */
1419 POP_FRAME
1420 ret
1421
1422
1423/*
1424 * void loutw(unsigned short *io_port,
1425 * unsigned short *data,
1426 * unsigned int count)
1427 *
1428 * Output an array of shorts to an IO port.
1429 */
1430ENTRY(loutw)
1431ENTRY(outsw)
1432 PUSH_FRAME
1433 ILL_ON_SLAVE
1434 movl %esi,%eax /* save register */
1435 movl ARG0,%edx /* get io port number */
1436 movl ARG1,%esi /* get data address */
1437 movl ARG2,%ecx /* get count */
1438 cld /* count up */
1439 rep
1440 outsw /* output */
1441 movl %eax,%esi /* restore register */
1442 POP_FRAME
1443 ret
1444
1445/*
1446 * void loutw(unsigned short io_port,
1447 * unsigned int *data,
1448 * unsigned int count)
1449 *
1450 * Output an array of longs to an IO port.
1451 */
1452ENTRY(loutl)
1453ENTRY(outsl)
1454 PUSH_FRAME
1455 ILL_ON_SLAVE
1456 movl %esi,%eax /* save register */
1457 movl ARG0,%edx /* get io port number */
1458 movl ARG1,%esi /* get data address */
1459 movl ARG2,%ecx /* get count */
1460 cld /* count up */
1461 rep
1462 outsl /* output */
1463 movl %eax,%esi /* restore register */
1464 POP_FRAME
1465 ret
1466
1467
1468/*
1469 * void linb(unsigned char *io_port,
1470 * unsigned char *data,
1471 * unsigned int count)
1472 *
1473 * Input an array of bytes from an IO port.
1474 */
1475ENTRY(linb)
1476ENTRY(insb)
1477 PUSH_FRAME
1478 ILL_ON_SLAVE
1479 movl %edi,%eax /* save register */
1480 movl ARG0,%edx /* get io port number */
1481 movl ARG1,%edi /* get data address */
1482 movl ARG2,%ecx /* get count */
1483 cld /* count up */
1484 rep
1485 insb /* input */
1486 movl %eax,%edi /* restore register */
1487 POP_FRAME
1488 ret
1489
1490
1491/*
1492 * void linw(unsigned short *io_port,
1493 * unsigned short *data,
1494 * unsigned int count)
1495 *
1496 * Input an array of shorts from an IO port.
1497 */
1498ENTRY(linw)
1499ENTRY(insw)
1500 PUSH_FRAME
1501 ILL_ON_SLAVE
1502 movl %edi,%eax /* save register */
1503 movl ARG0,%edx /* get io port number */
1504 movl ARG1,%edi /* get data address */
1505 movl ARG2,%ecx /* get count */
1506 cld /* count up */
1507 rep
1508 insw /* input */
1509 movl %eax,%edi /* restore register */
1510 POP_FRAME
1511 ret
1512
1513
1514/*
1515 * void linl(unsigned short io_port,
1516 * unsigned int *data,
1517 * unsigned int count)
1518 *
1519 * Input an array of longs from an IO port.
1520 */
1521ENTRY(linl)
1522ENTRY(insl)
1523 PUSH_FRAME
1524 ILL_ON_SLAVE
1525 movl %edi,%eax /* save register */
1526 movl ARG0,%edx /* get io port number */
1527 movl ARG1,%edi /* get data address */
1528 movl ARG2,%ecx /* get count */
1529 cld /* count up */
1530 rep
1531 insl /* input */
1532 movl %eax,%edi /* restore register */
1533 POP_FRAME
1534 ret
1535
91447636
A
1536/*
1537 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1538 */
1539ENTRY(rdmsr_carefully)
1540 movl S_ARG0, %ecx
1541 RECOVERY_SECTION
1542 RECOVER(rdmsr_fail)
1543 rdmsr
1544 movl S_ARG1, %ecx
1545 movl %eax, (%ecx)
1546 movl S_ARG2, %ecx
1547 movl %edx, (%ecx)
1548 movl $0, %eax
1549 ret
1550
1551rdmsr_fail:
1552 movl $1, %eax
1553 ret
1c79356b
A
1554
1555/*
0c530ab8 1556 * Done with recovery table.
1c79356b
A
1557 */
1558 RECOVERY_SECTION
1559 RECOVER_TABLE_END
1c79356b 1560
1c79356b 1561 .data
1c79356b
A
1562dr_msk:
1563 .long ~0x000f0003
1564 .long ~0x00f0000c
1565 .long ~0x0f000030
1566 .long ~0xf00000c0
1567ENTRY(dr_addr)
1568 .long 0,0,0,0
1569 .long 0,0,0,0
0c530ab8 1570
1c79356b
A
1571 .text
1572
1c79356b
A
1573#ifndef SYMMETRY
1574
1575/*
1576 * ffs(mask)
1577 */
1578ENTRY(ffs)
1579 bsfl S_ARG0, %eax
1580 jz 0f
1581 incl %eax
1582 ret
15830: xorl %eax, %eax
1584 ret
1585
1586/*
1587 * cpu_shutdown()
1588 * Force reboot
1589 */
1590
1591null_idtr:
1592 .word 0
1593 .long 0
1594
1595Entry(cpu_shutdown)
1596 lidt null_idtr /* disable the interrupt handler */
1597 xor %ecx,%ecx /* generate a divide by zero */
1598 div %ecx,%eax /* reboot now */
1599 ret /* this will "never" be executed */
1600
1601#endif /* SYMMETRY */
1602
1603
1604/*
1605 * setbit(int bitno, int *s) - set bit in bit string
1606 */
1607ENTRY(setbit)
1608 movl S_ARG0, %ecx /* bit number */
1609 movl S_ARG1, %eax /* address */
1610 btsl %ecx, (%eax) /* set bit */
1611 ret
1612
1613/*
1614 * clrbit(int bitno, int *s) - clear bit in bit string
1615 */
1616ENTRY(clrbit)
1617 movl S_ARG0, %ecx /* bit number */
1618 movl S_ARG1, %eax /* address */
1619 btrl %ecx, (%eax) /* clear bit */
1620 ret
1621
1622/*
1623 * ffsbit(int *s) - find first set bit in bit string
1624 */
1625ENTRY(ffsbit)
1626 movl S_ARG0, %ecx /* address */
1627 movl $0, %edx /* base offset */
16280:
1629 bsfl (%ecx), %eax /* check argument bits */
1630 jnz 1f /* found bit, return */
1631 addl $4, %ecx /* increment address */
1632 addl $32, %edx /* increment offset */
1633 jmp 0b /* try again */
16341:
1635 addl %edx, %eax /* return offset */
1636 ret
1637
1638/*
1639 * testbit(int nr, volatile void *array)
1640 *
1641 * Test to see if the bit is set within the bit string
1642 */
1643
1644ENTRY(testbit)
1645 movl S_ARG0,%eax /* Get the bit to test */
1646 movl S_ARG1,%ecx /* get the array string */
1647 btl %eax,(%ecx)
1648 sbbl %eax,%eax
1649 ret
1650
1651ENTRY(get_pc)
1652 movl 4(%ebp),%eax
1653 ret
1654
1c79356b
A
1655ENTRY(minsecurity)
1656 pushl %ebp
1657 movl %esp,%ebp
1658/*
1659 * jail: set the EIP to "jail" to block a kernel thread.
1660 * Useful to debug synchronization problems on MPs.
1661 */
1662ENTRY(jail)
1663 jmp EXT(jail)
1664
1c79356b
A
1665/*
1666 * unsigned int
1667 * div_scale(unsigned int dividend,
1668 * unsigned int divisor,
1669 * unsigned int *scale)
1670 *
1671 * This function returns (dividend << *scale) //divisor where *scale
1672 * is the largest possible value before overflow. This is used in
1673 * computation where precision must be achieved in order to avoid
1674 * floating point usage.
1675 *
1676 * Algorithm:
1677 * *scale = 0;
1678 * while (((dividend >> *scale) >= divisor))
1679 * (*scale)++;
1680 * *scale = 32 - *scale;
1681 * return ((dividend << *scale) / divisor);
1682 */
1683ENTRY(div_scale)
1684 PUSH_FRAME
1685 xorl %ecx, %ecx /* *scale = 0 */
1686 xorl %eax, %eax
1687 movl ARG0, %edx /* get dividend */
16880:
1689 cmpl ARG1, %edx /* if (divisor > dividend) */
1690 jle 1f /* goto 1f */
1691 addl $1, %ecx /* (*scale)++ */
1692 shrdl $1, %edx, %eax /* dividend >> 1 */
1693 shrl $1, %edx /* dividend >> 1 */
1694 jmp 0b /* goto 0b */
16951:
1696 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1697 movl ARG2, %edx /* get scale */
1698 movl $32, (%edx) /* *scale = 32 */
1699 subl %ecx, (%edx) /* *scale -= %ecx */
1700 POP_FRAME
1701 ret
1702
1703/*
1704 * unsigned int
1705 * mul_scale(unsigned int multiplicand,
1706 * unsigned int multiplier,
1707 * unsigned int *scale)
1708 *
1709 * This function returns ((multiplicand * multiplier) >> *scale) where
1710 * scale is the largest possible value before overflow. This is used in
1711 * computation where precision must be achieved in order to avoid
1712 * floating point usage.
1713 *
1714 * Algorithm:
1715 * *scale = 0;
1716 * while (overflow((multiplicand * multiplier) >> *scale))
1717 * (*scale)++;
1718 * return ((multiplicand * multiplier) >> *scale);
1719 */
1720ENTRY(mul_scale)
1721 PUSH_FRAME
1722 xorl %ecx, %ecx /* *scale = 0 */
1723 movl ARG0, %eax /* get multiplicand */
1724 mull ARG1 /* multiplicand * multiplier */
17250:
1726 cmpl $0, %edx /* if (!overflow()) */
1727 je 1f /* goto 1 */
1728 addl $1, %ecx /* (*scale)++ */
1729 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1730 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1731 jmp 0b
17321:
1733 movl ARG2, %edx /* get scale */
1734 movl %ecx, (%edx) /* set *scale */
1735 POP_FRAME
1736 ret
1737
6601e61a 1738
0c530ab8 1739
6601e61a 1740/*
0c530ab8 1741 * Double-fault exception handler task. The last gasp...
1c79356b 1742 */
0c530ab8
A
1743Entry(df_task_start)
1744 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1745 hlt
1c79356b 1746
1c79356b
A
1747
1748/*
0c530ab8 1749 * machine-check handler task. The last gasp...
1c79356b 1750 */
0c530ab8
A
1751Entry(mc_task_start)
1752 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1753 hlt
1c79356b
A
1754
1755/*
0c530ab8 1756 * Compatibility mode's last gasp...
1c79356b 1757 */
0c530ab8
A
1758Entry(lo_df64)
1759 movl %esp, %eax
1760 CCALL1(panic_double_fault64, %eax)
1761 hlt
1c79356b 1762
0c530ab8
A
1763Entry(lo_mc64)
1764 movl %esp, %eax
1765 CCALL1(panic_machine_check64, %eax)
1766 hlt
1c79356b 1767