]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locore.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdb.h>
60#include <mach_kgdb.h>
61#include <mach_kdp.h>
62#include <stat_time.h>
63#include <mach_assert.h>
64
65#include <sys/errno.h>
66#include <i386/asm.h>
67#include <i386/cpuid.h>
68#include <i386/eflags.h>
69#include <i386/proc_reg.h>
70#include <i386/trap.h>
71#include <assym.s>
72#include <mach/exception_types.h>
73
0c530ab8
A
74#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
75#include <mach/i386/syscall_sw.h>
6601e61a 76
0c530ab8 77#include <i386/mp.h>
6601e61a 78
91447636
A
79/*
80 * PTmap is recursive pagemap at top of virtual address space.
81 * Within PTmap, the page directory can be found (third indirection).
82*/
83 .globl _PTmap,_PTD,_PTDpde
84 .set _PTmap,(PTDPTDI << PDESHIFT)
85 .set _PTD,_PTmap + (PTDPTDI * NBPG)
86 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
87
88/*
89 * APTmap, APTD is the alternate recursive pagemap.
90 * It's used when modifying another process's page tables.
91 */
92 .globl _APTmap,_APTD,_APTDpde
93 .set _APTmap,(APTDPTDI << PDESHIFT)
94 .set _APTD,_APTmap + (APTDPTDI * NBPG)
95 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
96
1c79356b
A
97#if __MACHO__
98/* Under Mach-O, etext is a variable which contains
99 * the last text address
100 */
101#define ETEXT_ADDR (EXT(etext))
102#else
103/* Under ELF and other non-Mach-O formats, the address of
104 * etext represents the last text address
105 */
9bccf70c 106#define ETEXT_ADDR $ EXT(etext)
1c79356b
A
107#endif
108
1c79356b
A
109#define CX(addr,reg) addr(,reg,4)
110
0c530ab8
A
111/*
112 * The following macros make calls into C code.
113 * They dynamically align the stack to 16 bytes.
114 * Arguments are moved (not pushed) onto the correctly aligned stack.
115 * NOTE: EDI is destroyed in the process, and hence cannot
116 * be directly used as a parameter. Users of this macro must
117 * independently preserve EDI (a non-volatile) if the routine is
118 * intended to be called from C, for instance.
119 */
120
121#define CCALL(fn) \
122 movl %esp, %edi ;\
123 andl $0xFFFFFFF0, %esp ;\
124 call EXT(fn) ;\
125 movl %edi, %esp
126
127#define CCALL1(fn, arg1) \
128 movl %esp, %edi ;\
129 subl $4, %esp ;\
130 andl $0xFFFFFFF0, %esp ;\
131 movl arg1, 0(%esp) ;\
132 call EXT(fn) ;\
133 movl %edi, %esp
134
135#define CCALL2(fn, arg1, arg2) \
136 movl %esp, %edi ;\
137 subl $8, %esp ;\
138 andl $0xFFFFFFF0, %esp ;\
139 movl arg2, 4(%esp) ;\
140 movl arg1, 0(%esp) ;\
141 call EXT(fn) ;\
142 movl %edi, %esp
143
144#define CCALL3(fn, arg1, arg2, arg3) \
145 movl %esp, %edi ;\
146 subl $12, %esp ;\
147 andl $0xFFFFFFF0, %esp ;\
148 movl arg3, 8(%esp) ;\
149 movl arg2, 4(%esp) ;\
150 movl arg1, 0(%esp) ;\
151 call EXT(fn) ;\
152 movl %edi, %esp
153
1c79356b
A
154 .text
155locore_start:
156
157/*
158 * Fault recovery.
159 */
160
161#ifdef __MACHO__
162#define RECOVERY_SECTION .section __VECTORS, __recover
1c79356b
A
163#else
164#define RECOVERY_SECTION .text
165#define RECOVERY_SECTION .text
166#endif
167
168#define RECOVER_TABLE_START \
169 .align 2 ; \
170 .globl EXT(recover_table) ;\
171LEXT(recover_table) ;\
172 .text
173
174#define RECOVER(addr) \
175 .align 2; \
176 .long 9f ;\
177 .long addr ;\
178 .text ;\
1799:
180
181#define RECOVER_TABLE_END \
182 .align 2 ;\
183 .globl EXT(recover_table_end) ;\
184LEXT(recover_table_end) ;\
185 .text
186
187/*
0c530ab8 188 * Allocate recovery and table.
1c79356b
A
189 */
190 RECOVERY_SECTION
191 RECOVER_TABLE_START
1c79356b
A
192
193/*
194 * Timing routines.
195 */
91447636
A
196Entry(timer_update)
197 movl 4(%esp),%ecx
198 movl 8(%esp),%eax
199 movl 12(%esp),%edx
200 movl %eax,TIMER_HIGHCHK(%ecx)
201 movl %edx,TIMER_LOW(%ecx)
202 movl %eax,TIMER_HIGH(%ecx)
203 ret
204
205Entry(timer_grab)
206 movl 4(%esp),%ecx
2070: movl TIMER_HIGH(%ecx),%edx
208 movl TIMER_LOW(%ecx),%eax
209 cmpl TIMER_HIGHCHK(%ecx),%edx
210 jne 0b
211 ret
212
1c79356b
A
213#if STAT_TIME
214
215#define TIME_TRAP_UENTRY
216#define TIME_TRAP_UEXIT
217#define TIME_INT_ENTRY
218#define TIME_INT_EXIT
219
91447636
A
220#else
221/*
222 * Nanosecond timing.
223 */
224
225/*
2d21ac55 226 * Nanotime returned in %edx:%eax.
0c530ab8
A
227 * Computed from tsc based on the scale factor
228 * and an implicit 32 bit shift.
229 *
2d21ac55 230 * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
0c530ab8
A
231 */
232#define RNT_INFO _rtc_nanotime_info
2d21ac55
A
233#define NANOTIME \
2340: movl RNT_INFO+RNT_TSC_BASE,%esi ; \
235 movl RNT_INFO+RNT_TSC_BASE+4,%edi ; \
236 rdtsc ; \
237 subl %esi,%eax /* tsc - tsc_base */ ; \
238 sbbl %edi,%edx ; \
239 movl RNT_INFO+RNT_SCALE,%ecx ; \
240 movl %edx,%ebx /* delta * scale */ ; \
241 mull %ecx ; \
242 movl %ebx,%eax ; \
243 movl %edx,%ebx ; \
244 mull %ecx ; \
245 addl %ebx,%eax ; \
246 adcl $0,%edx /* add carry into hi */ ; \
247 addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base lo */ ; \
248 adcl RNT_INFO+RNT_NS_BASE+4,%edx /* add ns_base hi */ ; \
249 cmpl RNT_INFO+RNT_TSC_BASE,%esi ; \
250 jne 0b /* repeat if changed */ ; \
251 cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ; \
252 jne 0b
1c79356b
A
253
254/*
2d21ac55 255 * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
1c79356b 256 */
2d21ac55
A
257#define TIMER_UPDATE(treg,dreg,areg) \
258 addl TIMER_LOW(treg),areg /* add low bits */ ; \
259 adcl dreg,TIMER_HIGH(treg) /* add carry high bits */ ; \
260 movl areg,TIMER_LOW(treg) /* store updated low bit */ ; \
261 movl TIMER_HIGH(treg),dreg /* copy high bits */ ; \
262 movl dreg,TIMER_HIGHCHK(treg) /* to high check */
1c79356b
A
263
264/*
91447636 265 * Add time delta to old timer and start new.
1c79356b 266 */
2d21ac55
A
267#define TIMER_EVENT(old,new) \
268 NANOTIME /* edx:eax nanosecs */ ; \
269 movl %eax,%esi /* save timestamp */ ; \
270 movl %edx,%edi /* save timestamp */ ; \
271 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
272 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
273 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
274 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
275 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
276 addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ; \
277 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
278 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
279 movl %ecx,THREAD_TIMER(%ebx) /* set current timer */ ; \
280 movl %esi,%eax /* restore timestamp */ ; \
281 movl %edi,%edx /* restore timestamp */ ; \
282 movl CURRENT_STATE(%ebx),%ecx /* current state */ ; \
283 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
284 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
285 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
286 addl $(new##_STATE-old##_STATE),%ecx /* point to new state */ ; \
287 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
288 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
289 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
290
291/*
292 * Update time on user trap entry.
2d21ac55 293 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636
A
294 */
295#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM)
1c79356b
A
296
297/*
298 * update time on user trap exit.
2d21ac55 299 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
91447636
A
300 */
301#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER)
1c79356b
A
302
303/*
304 * update time on interrupt entry.
2d21ac55
A
305 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
306 * Saves processor state info on stack.
1c79356b 307 */
2d21ac55
A
308#define TIME_INT_ENTRY \
309 NANOTIME /* edx:eax nanosecs */ ; \
310 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
311 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
312 movl %eax,%esi /* save timestamp */ ; \
313 movl %edx,%edi /* save timestamp */ ; \
314 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
315 movl THREAD_TIMER(%ebx),%ecx /* get current timer */ ; \
316 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
317 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
318 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
319 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
320 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
321 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
322 movl %esi,%eax /* restore timestamp */ ; \
323 movl %edi,%edx /* restore timestamp */ ; \
324 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
325 pushl %ecx /* save state */ ; \
326 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
327 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
328 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
329 leal IDLE_STATE(%ebx),%eax /* get idle state */ ; \
330 cmpl %eax,%ecx /* compare current state */ ; \
331 je 0f /* skip if equal */ ; \
332 leal SYSTEM_STATE(%ebx),%ecx /* get system state */ ; \
333 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
3340: movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
335 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b
A
336
337/*
338 * update time on interrupt exit.
2d21ac55
A
339 * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
340 * Restores processor state info from stack.
1c79356b 341 */
2d21ac55
A
342#define TIME_INT_EXIT \
343 NANOTIME /* edx:eax nanosecs */ ; \
344 movl %eax,%gs:CPU_INT_EVENT_TIME /* save in cpu data */ ; \
345 movl %edx,%gs:CPU_INT_EVENT_TIME+4 /* save in cpu data */ ; \
346 movl %eax,%esi /* save timestamp */ ; \
347 movl %edx,%edi /* save timestamp */ ; \
348 movl %gs:CPU_PROCESSOR,%ebx /* get current processor */ ; \
349 movl KERNEL_TIMER(%ebx),%ecx /* point to kernel timer */ ; \
350 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
351 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
352 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
353 movl THREAD_TIMER(%ebx),%ecx /* interrupted timer */ ; \
354 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
355 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */ ; \
356 movl %esi,%eax /* restore timestamp */ ; \
357 movl %edi,%edx /* restore timestamp */ ; \
358 movl CURRENT_STATE(%ebx),%ecx /* get current state */ ; \
359 subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ; \
360 sbbl TIMER_TSTAMP+4(%ecx),%edx /* compute elapsed time */ ; \
361 TIMER_UPDATE(%ecx,%edx,%eax) /* update timer */ ; \
362 popl %ecx /* restore state */ ; \
363 movl %ecx,CURRENT_STATE(%ebx) /* set current state */ ; \
364 movl %esi,TIMER_TSTAMP(%ecx) /* set timestamp */ ; \
365 movl %edi,TIMER_TSTAMP+4(%ecx) /* set timestamp */
1c79356b 366
91447636 367#endif /* STAT_TIME */
1c79356b 368
1c79356b
A
369#undef PDEBUG
370
371#ifdef PDEBUG
372
373/*
374 * Traditional, not ANSI.
375 */
376#define CAH(label) \
377 .data ;\
378 .globl label/**/count ;\
379label/**/count: ;\
380 .long 0 ;\
381 .globl label/**/limit ;\
382label/**/limit: ;\
383 .long 0 ;\
384 .text ;\
385 addl $1,%ss:label/**/count ;\
386 cmpl $0,label/**/limit ;\
387 jz label/**/exit ;\
388 pushl %eax ;\
389label/**/loop: ;\
390 movl %ss:label/**/count,%eax ;\
391 cmpl %eax,%ss:label/**/limit ;\
392 je label/**/loop ;\
393 popl %eax ;\
394label/**/exit:
395
396#else /* PDEBUG */
397
398#define CAH(label)
399
400#endif /* PDEBUG */
0c530ab8 401
1c79356b
A
402#if MACH_KDB
403/*
404 * Last-ditch debug code to handle faults that might result
405 * from entering kernel (from collocated server) on an invalid
406 * stack. On collocated entry, there's no hardware-initiated
407 * stack switch, so a valid stack must be in place when an
408 * exception occurs, or we may double-fault.
409 *
410 * In case of a double-fault, our only recourse is to switch
411 * hardware "tasks", so that we avoid using the current stack.
412 *
413 * The idea here is just to get the processor into the debugger,
414 * post-haste. No attempt is made to fix up whatever error got
415 * us here, so presumably continuing from the debugger will
416 * simply land us here again -- at best.
417 */
418#if 0
419/*
420 * Note that the per-fault entry points are not currently
421 * functional. The only way to make them work would be to
422 * set up separate TSS's for each fault type, which doesn't
423 * currently seem worthwhile. (The offset part of a task
424 * gate is always ignored.) So all faults that task switch
425 * currently resume at db_task_start.
426 */
427/*
428 * Double fault (Murphy's point) - error code (0) on stack
429 */
430Entry(db_task_dbl_fault)
431 popl %eax
432 movl $(T_DOUBLE_FAULT),%ebx
433 jmp db_task_start
434/*
435 * Segment not present - error code on stack
436 */
437Entry(db_task_seg_np)
438 popl %eax
439 movl $(T_SEGMENT_NOT_PRESENT),%ebx
440 jmp db_task_start
441/*
442 * Stack fault - error code on (current) stack
443 */
444Entry(db_task_stk_fault)
445 popl %eax
446 movl $(T_STACK_FAULT),%ebx
447 jmp db_task_start
448/*
449 * General protection fault - error code on stack
450 */
451Entry(db_task_gen_prot)
452 popl %eax
453 movl $(T_GENERAL_PROTECTION),%ebx
454 jmp db_task_start
455#endif /* 0 */
456/*
457 * The entry point where execution resumes after last-ditch debugger task
458 * switch.
459 */
460Entry(db_task_start)
461 movl %esp,%edx
0c530ab8 462 subl $(ISS32_SIZE),%edx
2d21ac55 463 movl %edx,%esp /* allocate x86_saved_state on stack */
1c79356b
A
464 movl %eax,R_ERR(%esp)
465 movl %ebx,R_TRAPNO(%esp)
466 pushl %edx
1c79356b 467 CPU_NUMBER(%edx)
0c530ab8 468 movl CX(EXT(master_dbtss),%edx),%edx
1c79356b 469 movl TSS_LINK(%edx),%eax
1c79356b
A
470 pushl %eax /* pass along selector of previous TSS */
471 call EXT(db_tss_to_frame)
472 popl %eax /* get rid of TSS selector */
473 call EXT(db_trap_from_asm)
474 addl $0x4,%esp
475 /*
476 * And now...?
477 */
478 iret /* ha, ha, ha... */
479#endif /* MACH_KDB */
480
481/*
0c530ab8
A
482 * Called as a function, makes the current thread
483 * return from the kernel as if from an exception.
1c79356b
A
484 */
485
0c530ab8
A
486 .globl EXT(thread_exception_return)
487 .globl EXT(thread_bootstrap_return)
488LEXT(thread_exception_return)
489LEXT(thread_bootstrap_return)
490 cli
491 movl %gs:CPU_KERNEL_STACK,%ecx
492 movl (%ecx),%esp /* switch back to PCB stack */
493 jmp EXT(return_from_trap)
89b3af67 494
0c530ab8
A
495Entry(call_continuation)
496 movl S_ARG0,%eax /* get continuation */
497 movl S_ARG1,%edx /* continuation param */
498 movl S_ARG2,%ecx /* wait result */
499 movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */
500 xorl %ebp,%ebp /* zero frame pointer */
501 subl $8,%esp /* align the stack */
502 pushl %ecx
503 pushl %edx
504 call *%eax /* call continuation */
505 addl $16,%esp
506 movl %gs:CPU_ACTIVE_THREAD,%eax
507 pushl %eax
508 call EXT(thread_terminate)
509
2d21ac55 510
0c530ab8
A
511
512/*******************************************************************************************************
2d21ac55 513 *
0c530ab8
A
514 * All 64 bit task 'exceptions' enter lo_alltraps:
515 * esp -> x86_saved_state_t
516 *
517 * The rest of the state is set up as:
518 * cr3 -> kernel directory
519 * esp -> low based stack
520 * gs -> CPU_DATA_GS
521 * cs -> KERNEL_CS
522 * ss/ds/es -> KERNEL_DS
6601e61a 523 *
0c530ab8
A
524 * interrupts disabled
525 * direction flag cleared
526 */
527Entry(lo_alltraps)
528 movl R_CS(%esp),%eax /* assume 32-bit state */
529 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
530 jne 1f
531 movl R64_CS(%esp),%eax /* 64-bit user mode */
5321:
2d21ac55 533 testb $3,%al
0c530ab8
A
534 jz trap_from_kernel
535 /* user mode trap */
6601e61a 536 TIME_TRAP_UENTRY
4452a7af 537
2d21ac55
A
538 movl %gs:CPU_ACTIVE_THREAD,%ecx
539 movl ACT_TASK(%ecx),%ebx
540
541 /* Check for active vtimers in the current task */
542 cmpl $0,TASK_VTIMERS(%ebx)
543 jz 1f
544
545 /* Set a pending AST */
546 orl $(AST_BSD),%gs:CPU_PENDING_AST
547
548 /* Set a thread AST (atomic) */
549 lock
550 orl $(AST_BSD),ACT_AST(%ecx)
551
5521:
4452a7af 553 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
554 xchgl %ebx,%esp /* switch to kernel stack */
555 sti
556
557 CCALL1(user_trap, %ebx) /* call user trap routine */
558 cli /* hold off intrs - critical section */
559 popl %esp /* switch back to PCB stack */
4452a7af 560
6601e61a
A
561/*
562 * Return from trap or system call, checking for ASTs.
0c530ab8
A
563 * On lowbase PCB stack with intrs disabled
564 */
6601e61a 565LEXT(return_from_trap)
0c530ab8
A
566 movl %gs:CPU_PENDING_AST,%eax
567 testl %eax,%eax
568 je EXT(return_to_user) /* branch if no AST */
569
570 movl %gs:CPU_KERNEL_STACK,%ebx
571 xchgl %ebx,%esp /* switch to kernel stack */
572 sti /* interrupts always enabled on return to user mode */
573
574 pushl %ebx /* save PCB stack */
2d21ac55 575 xorl %ebp,%ebp /* Clear framepointer */
0c530ab8
A
576 CCALL1(i386_astintr, $0) /* take the AST */
577 cli
1c79356b
A
578 popl %esp /* switch back to PCB stack (w/exc link) */
579 jmp EXT(return_from_trap) /* and check again (rare) */
1c79356b 580
1c79356b
A
581LEXT(return_to_user)
582 TIME_TRAP_UEXIT
6601e61a 583
0c530ab8
A
584LEXT(ret_to_user)
585 cmpl $0, %gs:CPU_IS64BIT
586 je EXT(lo_ret_to_user)
587 jmp EXT(lo64_ret_to_user)
6601e61a 588
6601e61a 589
2d21ac55 590
6601e61a 591/*
0c530ab8
A
592 * Trap from kernel mode. No need to switch stacks.
593 * Interrupts must be off here - we will set them to state at time of trap
594 * as soon as it's safe for us to do so and not recurse doing preemption
1c79356b
A
595 */
596trap_from_kernel:
0c530ab8 597 movl %esp, %eax /* saved state addr */
2d21ac55
A
598 pushl R_EIP(%esp) /* Simulate a CALL from fault point */
599 pushl %ebp /* Extend framepointer chain */
600 movl %esp, %ebp
601 CCALL1(kernel_trap, %eax) /* Call kernel trap handler */
602 popl %ebp
603 addl $4, %esp
0c530ab8 604 cli
6601e61a 605
91447636 606 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
9bccf70c 607 testl $ AST_URGENT,%eax /* any urgent preemption? */
0c530ab8
A
608 je ret_to_kernel /* no, nothing to do */
609 cmpl $ T_PREEMPT,R_TRAPNO(%esp)
610 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
611 testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
612 je ret_to_kernel
613 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
614 jne ret_to_kernel
91447636 615 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
616 movl %esp,%ecx
617 xorl %eax,%ecx
618 andl $(-KERNEL_STACK_SIZE),%ecx
619 testl %ecx,%ecx /* are we on the kernel stack? */
0c530ab8 620 jne ret_to_kernel /* no, skip it */
1c79356b 621
0c530ab8 622 CCALL1(i386_astintr, $1) /* take the AST */
8ad349bb 623
0c530ab8
A
624ret_to_kernel:
625 cmpl $0, %gs:CPU_IS64BIT
626 je EXT(lo_ret_to_kernel)
627 jmp EXT(lo64_ret_to_kernel)
8f6c56a5 628
21362eb3 629
6601e61a 630
0c530ab8
A
631/*******************************************************************************************************
632 *
633 * All interrupts on all tasks enter here with:
634 * esp-> -> x86_saved_state_t
635 *
636 * cr3 -> kernel directory
637 * esp -> low based stack
638 * gs -> CPU_DATA_GS
639 * cs -> KERNEL_CS
640 * ss/ds/es -> KERNEL_DS
641 *
642 * interrupts disabled
643 * direction flag cleared
644 */
645Entry(lo_allintrs)
91447636
A
646 /*
647 * test whether already on interrupt stack
648 */
649 movl %gs:CPU_INT_STACK_TOP,%ecx
650 cmpl %esp,%ecx
651 jb 1f
652 leal -INTSTACK_SIZE(%ecx),%edx
653 cmpl %esp,%edx
654 jb int_from_intstack
0c530ab8 6551:
1c79356b
A
656 xchgl %ecx,%esp /* switch to interrupt stack */
657
0c530ab8
A
658 movl %cr0,%eax /* get cr0 */
659 orl $(CR0_TS),%eax /* or in TS bit */
660 movl %eax,%cr0 /* set cr0 */
661
662 subl $8, %esp /* for 16-byte stack alignment */
1c79356b 663 pushl %ecx /* save pointer to old stack */
0c530ab8 664 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
9bccf70c 665
91447636
A
666 TIME_INT_ENTRY /* do timing */
667
2d21ac55
A
668 movl %gs:CPU_ACTIVE_THREAD,%ecx
669 movl ACT_TASK(%ecx),%ebx
670
671 /* Check for active vtimers in the current task */
672 cmpl $0,TASK_VTIMERS(%ebx)
673 jz 1f
674
675 /* Set a pending AST */
676 orl $(AST_BSD),%gs:CPU_PENDING_AST
677
678 /* Set a thread AST (atomic) */
679 lock
680 orl $(AST_BSD),ACT_AST(%ecx)
681
6821:
91447636 683 incl %gs:CPU_PREEMPTION_LEVEL
91447636 684 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 685
0c530ab8
A
686 movl %gs:CPU_INT_STATE, %eax
687 CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
688
689 cli /* just in case we returned with intrs enabled */
690 xorl %eax,%eax
691 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1c79356b 692
91447636 693 decl %gs:CPU_INTERRUPT_LEVEL
91447636 694 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 695
1c79356b 696 TIME_INT_EXIT /* do timing */
1c79356b 697
0c530ab8
A
698 movl %gs:CPU_ACTIVE_THREAD,%eax
699 movl ACT_PCB(%eax),%eax /* get act`s PCB */
700 movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
701 cmpl $0,%eax /* Is there a context */
702 je 1f /* Branch if not */
703 movl FP_VALID(%eax),%eax /* Load fp_valid */
704 cmpl $0,%eax /* Check if valid */
705 jne 1f /* Branch if valid */
706 clts /* Clear TS */
707 jmp 2f
7081:
709 movl %cr0,%eax /* get cr0 */
710 orl $(CR0_TS),%eax /* or in TS bit */
711 movl %eax,%cr0 /* set cr0 */
7122:
1c79356b
A
713 popl %esp /* switch back to old stack */
714
0c530ab8
A
715 /* Load interrupted code segment into %eax */
716 movl R_CS(%esp),%eax /* assume 32-bit state */
717 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
718 jne 3f
719 movl R64_CS(%esp),%eax /* 64-bit user mode */
7203:
2d21ac55 721 testb $3,%al /* user mode, */
0c530ab8
A
722 jnz ast_from_interrupt_user /* go handle potential ASTs */
723 /*
724 * we only want to handle preemption requests if
725 * the interrupt fell in the kernel context
726 * and preemption isn't disabled
727 */
728 movl %gs:CPU_PENDING_AST,%eax
729 testl $ AST_URGENT,%eax /* any urgent requests? */
730 je ret_to_kernel /* no, nothing to do */
731
732 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
733 jne ret_to_kernel /* yes, skip it */
1c79356b 734
91447636 735 movl %gs:CPU_KERNEL_STACK,%eax
1c79356b
A
736 movl %esp,%ecx
737 xorl %eax,%ecx
738 andl $(-KERNEL_STACK_SIZE),%ecx
0c530ab8
A
739 testl %ecx,%ecx /* are we on the kernel stack? */
740 jne ret_to_kernel /* no, skip it */
1c79356b 741
0c530ab8
A
742 /*
743 * Take an AST from kernel space. We don't need (and don't want)
744 * to do as much as the case where the interrupt came from user
745 * space.
746 */
747 CCALL1(i386_astintr, $1)
1c79356b 748
0c530ab8 749 jmp ret_to_kernel
1c79356b 750
1c79356b 751
0c530ab8
A
752/*
753 * nested int - simple path, can't preempt etc on way out
754 */
1c79356b 755int_from_intstack:
91447636 756 incl %gs:CPU_PREEMPTION_LEVEL
91447636 757 incl %gs:CPU_INTERRUPT_LEVEL
1c79356b 758
2d21ac55 759 movl %esp, %edx /* x86_saved_state */
0c530ab8 760 CCALL1(PE_incoming_interrupt, %edx)
1c79356b 761
91447636 762 decl %gs:CPU_INTERRUPT_LEVEL
91447636 763 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b 764
0c530ab8 765 jmp ret_to_kernel
1c79356b
A
766
767/*
0c530ab8
A
768 * Take an AST from an interrupted user
769 */
770ast_from_interrupt_user:
771 movl %gs:CPU_PENDING_AST,%eax
772 testl %eax,%eax /* pending ASTs? */
773 je EXT(ret_to_user) /* no, nothing to do */
6601e61a 774
1c79356b
A
775 TIME_TRAP_UENTRY
776
1c79356b
A
777 jmp EXT(return_from_trap) /* return */
778
0c530ab8
A
779
780/*******************************************************************************************************
8f6c56a5 781 *
0c530ab8
A
782 * 32bit Tasks
783 * System call entries via INTR_GATE or sysenter:
21362eb3 784 *
2d21ac55 785 * esp -> x86_saved_state32_t
0c530ab8
A
786 * cr3 -> kernel directory
787 * esp -> low based stack
788 * gs -> CPU_DATA_GS
789 * cs -> KERNEL_CS
790 * ss/ds/es -> KERNEL_DS
6601e61a 791 *
0c530ab8
A
792 * interrupts disabled
793 * direction flag cleared
6601e61a
A
794 */
795
0c530ab8
A
796Entry(lo_sysenter)
797 /*
798 * We can be here either for a mach syscall or a unix syscall,
799 * as indicated by the sign of the code:
800 */
801 movl R_EAX(%esp),%eax
802 testl %eax,%eax
803 js EXT(lo_mach_scall) /* < 0 => mach */
804 /* > 0 => unix */
805
806Entry(lo_unix_scall)
2d21ac55
A
807 TIME_TRAP_UENTRY
808
809 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
810 movl ACT_TASK(%ecx),%ebx /* point to current task */
811 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
812
813 /* Check for active vtimers in the current task */
814 cmpl $0,TASK_VTIMERS(%ebx)
815 jz 1f
21362eb3 816
2d21ac55
A
817 /* Set a pending AST */
818 orl $(AST_BSD),%gs:CPU_PENDING_AST
819
820 /* Set a thread AST (atomic) */
821 lock
822 orl $(AST_BSD),ACT_AST(%ecx)
823
8241:
4452a7af 825 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8 826 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 827
0c530ab8 828 sti
6601e61a 829
0c530ab8
A
830 CCALL1(unix_syscall, %ebx)
831 /*
832 * always returns through thread_exception_return
833 */
2d21ac55 834
21362eb3 835
0c530ab8
A
836Entry(lo_mach_scall)
837 TIME_TRAP_UENTRY
21362eb3 838
2d21ac55
A
839 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
840 movl ACT_TASK(%ecx),%ebx /* point to current task */
841 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
842
843 /* Check for active vtimers in the current task */
844 cmpl $0,TASK_VTIMERS(%ebx)
845 jz 1f
846
847 /* Set a pending AST */
848 orl $(AST_BSD),%gs:CPU_PENDING_AST
849
850 /* Set a thread AST (atomic) */
851 lock
852 orl $(AST_BSD),ACT_AST(%ecx)
853
8541:
0c530ab8
A
855 movl %gs:CPU_KERNEL_STACK,%ebx
856 xchgl %ebx,%esp /* switch to kernel stack */
21362eb3 857
0c530ab8 858 sti
21362eb3 859
0c530ab8
A
860 CCALL1(mach_call_munger, %ebx)
861 /*
862 * always returns through thread_exception_return
863 */
6601e61a 864
2d21ac55 865
0c530ab8 866Entry(lo_mdep_scall)
2d21ac55
A
867 TIME_TRAP_UENTRY
868
869 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
870 movl ACT_TASK(%ecx),%ebx /* point to current task */
871
872 /* Check for active vtimers in the current task */
873 cmpl $0,TASK_VTIMERS(%ebx)
874 jz 1f
6601e61a 875
2d21ac55
A
876 /* Set a pending AST */
877 orl $(AST_BSD),%gs:CPU_PENDING_AST
878
879 /* Set a thread AST (atomic) */
880 lock
881 orl $(AST_BSD),ACT_AST(%ecx)
882
8831:
0c530ab8
A
884 movl %gs:CPU_KERNEL_STACK,%ebx
885 xchgl %ebx,%esp /* switch to kernel stack */
6601e61a 886
0c530ab8 887 sti
6601e61a 888
0c530ab8
A
889 CCALL1(machdep_syscall, %ebx)
890 /*
891 * always returns through thread_exception_return
892 */
2d21ac55 893
6601e61a 894
0c530ab8 895Entry(lo_diag_scall)
6601e61a
A
896 TIME_TRAP_UENTRY
897
2d21ac55
A
898 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
899 movl ACT_TASK(%ecx),%ebx /* point to current task */
900
901 /* Check for active vtimers in the current task */
902 cmpl $0,TASK_VTIMERS(%ebx)
903 jz 1f
904
905 /* Set a pending AST */
906 orl $(AST_BSD),%gs:CPU_PENDING_AST
907
908 /* Set a thread AST (atomic) */
909 lock
910 orl $(AST_BSD),ACT_AST(%ecx)
911
9121:
0c530ab8
A
913 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
914 xchgl %ebx,%esp // Switch to it, saving the previous
6601e61a 915
0c530ab8 916 CCALL1(diagCall, %ebx) // Call diagnostics
0c530ab8
A
917
918 cmpl $0,%eax // What kind of return is this?
2d21ac55
A
919 je 2f
920 cli // Disable interruptions just in case they were enabled
921 popl %esp // Get back the original stack
922 jmp EXT(return_to_user) // Normal return, do not check asts...
9232:
0c530ab8
A
924 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
925 // pass what would be the diag syscall
926 // error return - cause an exception
927 /* no return */
928
6601e61a 929
6601e61a 930
0c530ab8
A
931/*******************************************************************************************************
932 *
933 * 64bit Tasks
934 * System call entries via syscall only:
935 *
936 * esp -> x86_saved_state64_t
937 * cr3 -> kernel directory
938 * esp -> low based stack
939 * gs -> CPU_DATA_GS
940 * cs -> KERNEL_CS
941 * ss/ds/es -> KERNEL_DS
942 *
943 * interrupts disabled
944 * direction flag cleared
1c79356b 945 */
1c79356b 946
0c530ab8
A
947Entry(lo_syscall)
948 /*
949 * We can be here either for a mach, unix machdep or diag syscall,
950 * as indicated by the syscall class:
951 */
952 movl R64_RAX(%esp), %eax /* syscall number/class */
953 movl %eax, %ebx
954 andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
955 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
956 je EXT(lo64_mach_scall)
957 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
958 je EXT(lo64_unix_scall)
959 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
960 je EXT(lo64_mdep_scall)
961 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
962 je EXT(lo64_diag_scall)
963
2d21ac55
A
964 movl %gs:CPU_KERNEL_STACK,%ebx
965 xchgl %ebx,%esp /* switch to kernel stack */
966
967 sti
968
0c530ab8
A
969 /* Syscall class unknown */
970 CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
971 /* no return */
1c79356b 972
2d21ac55 973
0c530ab8 974Entry(lo64_unix_scall)
2d21ac55
A
975 TIME_TRAP_UENTRY
976
977 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
978 movl ACT_TASK(%ecx),%ebx /* point to current task */
979 addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
980
981 /* Check for active vtimers in the current task */
982 cmpl $0,TASK_VTIMERS(%ebx)
983 jz 1f
984
985 /* Set a pending AST */
986 orl $(AST_BSD),%gs:CPU_PENDING_AST
1c79356b 987
2d21ac55
A
988 /* Set a thread AST (atomic) */
989 lock
990 orl $(AST_BSD),ACT_AST(%ecx)
991
9921:
0c530ab8
A
993 movl %gs:CPU_KERNEL_STACK,%ebx
994 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 995
0c530ab8 996 sti
1c79356b 997
0c530ab8
A
998 CCALL1(unix_syscall64, %ebx)
999 /*
1000 * always returns through thread_exception_return
1001 */
2d21ac55 1002
55e303ae 1003
0c530ab8
A
1004Entry(lo64_mach_scall)
1005 TIME_TRAP_UENTRY
55e303ae 1006
2d21ac55
A
1007 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1008 movl ACT_TASK(%ecx),%ebx /* point to current task */
1009 addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
1010
1011 /* Check for active vtimers in the current task */
1012 cmpl $0,TASK_VTIMERS(%ebx)
1013 jz 1f
1014
1015 /* Set a pending AST */
1016 orl $(AST_BSD),%gs:CPU_PENDING_AST
1017
1018 lock
1019 orl $(AST_BSD),ACT_AST(%ecx)
1020
10211:
0c530ab8
A
1022 movl %gs:CPU_KERNEL_STACK,%ebx
1023 xchgl %ebx,%esp /* switch to kernel stack */
1c79356b 1024
0c530ab8 1025 sti
1c79356b 1026
0c530ab8
A
1027 CCALL1(mach_call_munger64, %ebx)
1028 /*
1029 * always returns through thread_exception_return
1030 */
1c79356b 1031
2d21ac55
A
1032
1033
0c530ab8 1034Entry(lo64_mdep_scall)
2d21ac55 1035 TIME_TRAP_UENTRY
1c79356b 1036
2d21ac55
A
1037 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1038 movl ACT_TASK(%ecx),%ebx /* point to current task */
1039
1040 /* Check for active vtimers in the current task */
1041 cmpl $0,TASK_VTIMERS(%ebx)
1042 jz 1f
1043
1044 /* Set a pending AST */
1045 orl $(AST_BSD),%gs:CPU_PENDING_AST
1046
1047 /* Set a thread AST (atomic) */
1048 lock
1049 orl $(AST_BSD),ACT_AST(%ecx)
1050
10511:
91447636 1052 movl %gs:CPU_KERNEL_STACK,%ebx
0c530ab8
A
1053 xchgl %ebx,%esp /* switch to kernel stack */
1054
1055 sti
1056
1057 CCALL1(machdep_syscall64, %ebx)
1058 /*
1059 * always returns through thread_exception_return
1060 */
2d21ac55 1061
0c530ab8
A
1062
1063Entry(lo64_diag_scall)
1064 TIME_TRAP_UENTRY
1c79356b 1065
2d21ac55
A
1066 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1067 movl ACT_TASK(%ecx),%ebx /* point to current task */
1068
1069 /* Check for active vtimers in the current task */
1070 cmpl $0,TASK_VTIMERS(%ebx)
1071 jz 1f
1072
1073 /* Set a pending AST */
1074 orl $(AST_BSD),%gs:CPU_PENDING_AST
1075
1076 /* Set a thread AST (atomic) */
1077 lock
1078 orl $(AST_BSD),ACT_AST(%ecx)
0c530ab8 1079
2d21ac55
A
10801:
1081 movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
1082 xchgl %ebx,%esp // Switch to it, saving the previous
1083
0c530ab8 1084 CCALL1(diagCall64, %ebx) // Call diagnostics
2d21ac55
A
1085
1086 cmpl $0,%eax // What kind of return is this?
1087 je 2f
0c530ab8
A
1088 cli // Disable interruptions just in case they were enabled
1089 popl %esp // Get back the original stack
2d21ac55
A
1090 jmp EXT(return_to_user) // Normal return, do not check asts...
10912:
0c530ab8 1092 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
2d21ac55
A
1093 // pass what would be the diag syscall
1094 // error return - cause an exception
0c530ab8 1095 /* no return */
1c79356b 1096
1c79356b
A
1097/*\f*/
1098/*
1099 * Utility routines.
1100 */
1101
1102
1103/*
0c530ab8
A
1104 * Copy from user/kernel address space.
1105 * arg0: window offset or kernel address
1c79356b
A
1106 * arg1: kernel address
1107 * arg2: byte count
1108 */
2d21ac55 1109Entry(copyinphys_user)
0c530ab8
A
1110 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1111 mov %cx,%ds
1112
2d21ac55 1113Entry(copyinphys_kern)
0c530ab8
A
1114 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1115 mov %cx,%es
1116 jmp copyin_common
1117
2d21ac55 1118Entry(copyin_user)
0c530ab8
A
1119 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1120 mov %cx,%ds
1121
2d21ac55 1122Entry(copyin_kern)
0c530ab8
A
1123
1124copyin_common:
1c79356b
A
1125 pushl %esi
1126 pushl %edi /* save registers */
1127
0c530ab8
A
1128 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1129 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1130 movl 8+S_ARG2,%edx /* get count */
1131
1c79356b
A
1132 cld /* count up */
1133 movl %edx,%ecx /* move by longwords first */
1134 shrl $2,%ecx
1135 RECOVERY_SECTION
1136 RECOVER(copyin_fail)
1137 rep
1138 movsl /* move longwords */
1139 movl %edx,%ecx /* now move remaining bytes */
1140 andl $3,%ecx
1141 RECOVERY_SECTION
1142 RECOVER(copyin_fail)
1143 rep
1144 movsb
1145 xorl %eax,%eax /* return 0 for success */
0c530ab8
A
1146copyin_ret:
1147 mov %ss,%cx /* restore kernel data and extended segments */
1148 mov %cx,%ds
1149 mov %cx,%es
1c79356b
A
1150
1151 popl %edi /* restore registers */
1152 popl %esi
1153 ret /* and return */
1154
1155copyin_fail:
0c530ab8
A
1156 movl $(EFAULT),%eax /* return error for failure */
1157 jmp copyin_ret /* pop frame and return */
1158
1c79356b 1159
0c530ab8 1160
1c79356b 1161/*
0c530ab8
A
1162 * Copy string from user/kern address space.
1163 * arg0: window offset or kernel address
1c79356b
A
1164 * arg1: kernel address
1165 * arg2: max byte count
1166 * arg3: actual byte count (OUT)
1167 */
0c530ab8
A
1168Entry(copyinstr_kern)
1169 mov %ds,%cx
1170 jmp copyinstr_common
1171
1172Entry(copyinstr_user)
1173 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1174
1175copyinstr_common:
1176 mov %cx,%fs
1177
1c79356b
A
1178 pushl %esi
1179 pushl %edi /* save registers */
1180
0c530ab8
A
1181 movl 8+S_ARG0,%esi /* get source - window offset or kernel address */
1182 movl 8+S_ARG1,%edi /* get destination - kernel address */
1c79356b
A
1183 movl 8+S_ARG2,%edx /* get count */
1184
0c530ab8
A
1185 xorl %eax,%eax /* set to 0 here so that the high 24 bits */
1186 /* are 0 for the cmpl against 0 */
1c79356b
A
11872:
1188 RECOVERY_SECTION
1189 RECOVER(copystr_fail) /* copy bytes... */
c0fea474 1190 movb %fs:(%esi),%al
1c79356b
A
1191 incl %esi
1192 testl %edi,%edi /* if kernel address is ... */
1193 jz 3f /* not NULL */
c0fea474 1194 movb %al,(%edi) /* copy the byte */
1c79356b
A
1195 incl %edi
11963:
0c530ab8
A
1197 testl %eax,%eax /* did we just stuff the 0-byte? */
1198 jz 4f /* yes, return 0 status already in %eax */
1199 decl %edx /* decrement #bytes left in buffer */
1200 jnz 2b /* buffer not full so copy in another byte */
1201 movl $(ENAMETOOLONG),%eax /* buffer full but no 0-byte: ENAMETOOLONG */
1c79356b 12024:
1c79356b
A
1203 movl 8+S_ARG3,%edi /* get OUT len ptr */
1204 cmpl $0,%edi
1205 jz copystr_ret /* if null, just return */
1206 subl 8+S_ARG0,%esi
1207 movl %esi,(%edi) /* else set OUT arg to xfer len */
1208copystr_ret:
1209 popl %edi /* restore registers */
1210 popl %esi
1211 ret /* and return */
1212
1213copystr_fail:
0c530ab8
A
1214 movl $(EFAULT),%eax /* return error for failure */
1215 jmp copystr_ret /* pop frame and return */
1216
1c79356b
A
1217
1218/*
0c530ab8 1219 * Copy to user/kern address space.
1c79356b 1220 * arg0: kernel address
0c530ab8 1221 * arg1: window offset or kernel address
1c79356b
A
1222 * arg2: byte count
1223 */
0c530ab8
A
1224ENTRY(copyoutphys_user)
1225 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
1226 mov %cx,%es
89b3af67 1227
0c530ab8
A
1228ENTRY(copyoutphys_kern)
1229 movl $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
1230 mov %cx,%ds
1231 jmp copyout_common
4452a7af 1232
0c530ab8
A
1233ENTRY(copyout_user)
1234 movl $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
6601e61a 1235 mov %cx,%es
4452a7af 1236
0c530ab8
A
1237ENTRY(copyout_kern)
1238
1239copyout_common:
1240 pushl %esi
1241 pushl %edi /* save registers */
1242
1243 movl 8+S_ARG0,%esi /* get source - kernel address */
1244 movl 8+S_ARG1,%edi /* get destination - window offset or kernel address */
1245 movl 8+S_ARG2,%edx /* get count */
1c79356b 1246
1c79356b 1247 cld /* count up */
0c530ab8 1248 movl %edx,%ecx /* move by longwords first */
1c79356b
A
1249 shrl $2,%ecx
1250 RECOVERY_SECTION
1251 RECOVER(copyout_fail)
1c79356b
A
1252 rep
1253 movsl
0c530ab8 1254 movl %edx,%ecx /* now move remaining bytes */
1c79356b
A
1255 andl $3,%ecx
1256 RECOVERY_SECTION
1257 RECOVER(copyout_fail)
1c79356b
A
1258 rep
1259 movsb /* move */
1c79356b
A
1260 xorl %eax,%eax /* return 0 for success */
1261copyout_ret:
0c530ab8
A
1262 mov %ss,%cx /* restore kernel segment */
1263 mov %cx,%es
1264 mov %cx,%ds
1c79356b 1265
1c79356b
A
1266 popl %edi /* restore registers */
1267 popl %esi
1268 ret /* and return */
1269
1270copyout_fail:
0c530ab8 1271 movl $(EFAULT),%eax /* return error for failure */
1c79356b
A
1272 jmp copyout_ret /* pop frame and return */
1273
1c79356b
A
1274/*
1275 * io register must not be used on slaves (no AT bus)
1276 */
1277#define ILL_ON_SLAVE
1278
1279
1280#if MACH_ASSERT
1281
1282#define ARG0 B_ARG0
1283#define ARG1 B_ARG1
1284#define ARG2 B_ARG2
1285#define PUSH_FRAME FRAME
1286#define POP_FRAME EMARF
1287
1288#else /* MACH_ASSERT */
1289
1290#define ARG0 S_ARG0
1291#define ARG1 S_ARG1
1292#define ARG2 S_ARG2
1293#define PUSH_FRAME
1294#define POP_FRAME
1295
1296#endif /* MACH_ASSERT */
1297
1298
1299#if MACH_KDB || MACH_ASSERT
1300
1301/*
1302 * Following routines are also defined as macros in i386/pio.h
1303 * Compile then when MACH_KDB is configured so that they
1304 * can be invoked from the debugger.
1305 */
1306
1307/*
1308 * void outb(unsigned char *io_port,
1309 * unsigned char byte)
1310 *
1311 * Output a byte to an IO port.
1312 */
1313ENTRY(outb)
1314 PUSH_FRAME
1315 ILL_ON_SLAVE
1316 movl ARG0,%edx /* IO port address */
1317 movl ARG1,%eax /* data to output */
1318 outb %al,%dx /* send it out */
1319 POP_FRAME
1320 ret
1321
1322/*
1323 * unsigned char inb(unsigned char *io_port)
1324 *
1325 * Input a byte from an IO port.
1326 */
1327ENTRY(inb)
1328 PUSH_FRAME
1329 ILL_ON_SLAVE
1330 movl ARG0,%edx /* IO port address */
1331 xor %eax,%eax /* clear high bits of register */
1332 inb %dx,%al /* get the byte */
1333 POP_FRAME
1334 ret
1335
1336/*
1337 * void outw(unsigned short *io_port,
1338 * unsigned short word)
1339 *
1340 * Output a word to an IO port.
1341 */
1342ENTRY(outw)
1343 PUSH_FRAME
1344 ILL_ON_SLAVE
1345 movl ARG0,%edx /* IO port address */
1346 movl ARG1,%eax /* data to output */
1347 outw %ax,%dx /* send it out */
1348 POP_FRAME
1349 ret
1350
1351/*
1352 * unsigned short inw(unsigned short *io_port)
1353 *
1354 * Input a word from an IO port.
1355 */
1356ENTRY(inw)
1357 PUSH_FRAME
1358 ILL_ON_SLAVE
1359 movl ARG0,%edx /* IO port address */
1360 xor %eax,%eax /* clear high bits of register */
1361 inw %dx,%ax /* get the word */
1362 POP_FRAME
1363 ret
1364
1365/*
1366 * void outl(unsigned int *io_port,
1367 * unsigned int byte)
1368 *
1369 * Output an int to an IO port.
1370 */
1371ENTRY(outl)
1372 PUSH_FRAME
1373 ILL_ON_SLAVE
1374 movl ARG0,%edx /* IO port address*/
1375 movl ARG1,%eax /* data to output */
1376 outl %eax,%dx /* send it out */
1377 POP_FRAME
1378 ret
1379
1380/*
1381 * unsigned int inl(unsigned int *io_port)
1382 *
1383 * Input an int from an IO port.
1384 */
1385ENTRY(inl)
1386 PUSH_FRAME
1387 ILL_ON_SLAVE
1388 movl ARG0,%edx /* IO port address */
1389 inl %dx,%eax /* get the int */
1390 POP_FRAME
1391 ret
1392
1393#endif /* MACH_KDB || MACH_ASSERT*/
1394
1395/*
1396 * void loutb(unsigned byte *io_port,
1397 * unsigned byte *data,
1398 * unsigned int count)
1399 *
1400 * Output an array of bytes to an IO port.
1401 */
1402ENTRY(loutb)
1403ENTRY(outsb)
1404 PUSH_FRAME
1405 ILL_ON_SLAVE
1406 movl %esi,%eax /* save register */
1407 movl ARG0,%edx /* get io port number */
1408 movl ARG1,%esi /* get data address */
1409 movl ARG2,%ecx /* get count */
1410 cld /* count up */
1411 rep
1412 outsb /* output */
1413 movl %eax,%esi /* restore register */
1414 POP_FRAME
1415 ret
1416
1417
1418/*
1419 * void loutw(unsigned short *io_port,
1420 * unsigned short *data,
1421 * unsigned int count)
1422 *
1423 * Output an array of shorts to an IO port.
1424 */
1425ENTRY(loutw)
1426ENTRY(outsw)
1427 PUSH_FRAME
1428 ILL_ON_SLAVE
1429 movl %esi,%eax /* save register */
1430 movl ARG0,%edx /* get io port number */
1431 movl ARG1,%esi /* get data address */
1432 movl ARG2,%ecx /* get count */
1433 cld /* count up */
1434 rep
1435 outsw /* output */
1436 movl %eax,%esi /* restore register */
1437 POP_FRAME
1438 ret
1439
1440/*
1441 * void loutw(unsigned short io_port,
1442 * unsigned int *data,
1443 * unsigned int count)
1444 *
1445 * Output an array of longs to an IO port.
1446 */
1447ENTRY(loutl)
1448ENTRY(outsl)
1449 PUSH_FRAME
1450 ILL_ON_SLAVE
1451 movl %esi,%eax /* save register */
1452 movl ARG0,%edx /* get io port number */
1453 movl ARG1,%esi /* get data address */
1454 movl ARG2,%ecx /* get count */
1455 cld /* count up */
1456 rep
1457 outsl /* output */
1458 movl %eax,%esi /* restore register */
1459 POP_FRAME
1460 ret
1461
1462
1463/*
1464 * void linb(unsigned char *io_port,
1465 * unsigned char *data,
1466 * unsigned int count)
1467 *
1468 * Input an array of bytes from an IO port.
1469 */
1470ENTRY(linb)
1471ENTRY(insb)
1472 PUSH_FRAME
1473 ILL_ON_SLAVE
1474 movl %edi,%eax /* save register */
1475 movl ARG0,%edx /* get io port number */
1476 movl ARG1,%edi /* get data address */
1477 movl ARG2,%ecx /* get count */
1478 cld /* count up */
1479 rep
1480 insb /* input */
1481 movl %eax,%edi /* restore register */
1482 POP_FRAME
1483 ret
1484
1485
1486/*
1487 * void linw(unsigned short *io_port,
1488 * unsigned short *data,
1489 * unsigned int count)
1490 *
1491 * Input an array of shorts from an IO port.
1492 */
1493ENTRY(linw)
1494ENTRY(insw)
1495 PUSH_FRAME
1496 ILL_ON_SLAVE
1497 movl %edi,%eax /* save register */
1498 movl ARG0,%edx /* get io port number */
1499 movl ARG1,%edi /* get data address */
1500 movl ARG2,%ecx /* get count */
1501 cld /* count up */
1502 rep
1503 insw /* input */
1504 movl %eax,%edi /* restore register */
1505 POP_FRAME
1506 ret
1507
1508
1509/*
1510 * void linl(unsigned short io_port,
1511 * unsigned int *data,
1512 * unsigned int count)
1513 *
1514 * Input an array of longs from an IO port.
1515 */
1516ENTRY(linl)
1517ENTRY(insl)
1518 PUSH_FRAME
1519 ILL_ON_SLAVE
1520 movl %edi,%eax /* save register */
1521 movl ARG0,%edx /* get io port number */
1522 movl ARG1,%edi /* get data address */
1523 movl ARG2,%ecx /* get count */
1524 cld /* count up */
1525 rep
1526 insl /* input */
1527 movl %eax,%edi /* restore register */
1528 POP_FRAME
1529 ret
1530
91447636
A
1531/*
1532 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
1533 */
1534ENTRY(rdmsr_carefully)
1535 movl S_ARG0, %ecx
1536 RECOVERY_SECTION
1537 RECOVER(rdmsr_fail)
1538 rdmsr
1539 movl S_ARG1, %ecx
1540 movl %eax, (%ecx)
1541 movl S_ARG2, %ecx
1542 movl %edx, (%ecx)
1543 movl $0, %eax
1544 ret
1545
1546rdmsr_fail:
1547 movl $1, %eax
1548 ret
1c79356b
A
1549
1550/*
0c530ab8 1551 * Done with recovery table.
1c79356b
A
1552 */
1553 RECOVERY_SECTION
1554 RECOVER_TABLE_END
1c79356b 1555
1c79356b 1556 .data
1c79356b
A
1557dr_msk:
1558 .long ~0x000f0003
1559 .long ~0x00f0000c
1560 .long ~0x0f000030
1561 .long ~0xf00000c0
1562ENTRY(dr_addr)
1563 .long 0,0,0,0
1564 .long 0,0,0,0
0c530ab8 1565
1c79356b
A
1566 .text
1567
1c79356b
A
1568#ifndef SYMMETRY
1569
1570/*
1571 * ffs(mask)
1572 */
1573ENTRY(ffs)
1574 bsfl S_ARG0, %eax
1575 jz 0f
1576 incl %eax
1577 ret
15780: xorl %eax, %eax
1579 ret
1580
1581/*
1582 * cpu_shutdown()
1583 * Force reboot
1584 */
1585
1586null_idtr:
1587 .word 0
1588 .long 0
1589
1590Entry(cpu_shutdown)
1591 lidt null_idtr /* disable the interrupt handler */
1592 xor %ecx,%ecx /* generate a divide by zero */
1593 div %ecx,%eax /* reboot now */
1594 ret /* this will "never" be executed */
1595
1596#endif /* SYMMETRY */
1597
1598
1599/*
1600 * setbit(int bitno, int *s) - set bit in bit string
1601 */
1602ENTRY(setbit)
1603 movl S_ARG0, %ecx /* bit number */
1604 movl S_ARG1, %eax /* address */
1605 btsl %ecx, (%eax) /* set bit */
1606 ret
1607
1608/*
1609 * clrbit(int bitno, int *s) - clear bit in bit string
1610 */
1611ENTRY(clrbit)
1612 movl S_ARG0, %ecx /* bit number */
1613 movl S_ARG1, %eax /* address */
1614 btrl %ecx, (%eax) /* clear bit */
1615 ret
1616
1617/*
1618 * ffsbit(int *s) - find first set bit in bit string
1619 */
1620ENTRY(ffsbit)
1621 movl S_ARG0, %ecx /* address */
1622 movl $0, %edx /* base offset */
16230:
1624 bsfl (%ecx), %eax /* check argument bits */
1625 jnz 1f /* found bit, return */
1626 addl $4, %ecx /* increment address */
1627 addl $32, %edx /* increment offset */
1628 jmp 0b /* try again */
16291:
1630 addl %edx, %eax /* return offset */
1631 ret
1632
1633/*
1634 * testbit(int nr, volatile void *array)
1635 *
1636 * Test to see if the bit is set within the bit string
1637 */
1638
1639ENTRY(testbit)
1640 movl S_ARG0,%eax /* Get the bit to test */
1641 movl S_ARG1,%ecx /* get the array string */
1642 btl %eax,(%ecx)
1643 sbbl %eax,%eax
1644 ret
1645
1646ENTRY(get_pc)
1647 movl 4(%ebp),%eax
1648 ret
1649
1c79356b
A
1650ENTRY(minsecurity)
1651 pushl %ebp
1652 movl %esp,%ebp
1653/*
1654 * jail: set the EIP to "jail" to block a kernel thread.
1655 * Useful to debug synchronization problems on MPs.
1656 */
1657ENTRY(jail)
1658 jmp EXT(jail)
1659
1c79356b
A
1660/*
1661 * unsigned int
1662 * div_scale(unsigned int dividend,
1663 * unsigned int divisor,
1664 * unsigned int *scale)
1665 *
1666 * This function returns (dividend << *scale) //divisor where *scale
1667 * is the largest possible value before overflow. This is used in
1668 * computation where precision must be achieved in order to avoid
1669 * floating point usage.
1670 *
1671 * Algorithm:
1672 * *scale = 0;
1673 * while (((dividend >> *scale) >= divisor))
1674 * (*scale)++;
1675 * *scale = 32 - *scale;
1676 * return ((dividend << *scale) / divisor);
1677 */
1678ENTRY(div_scale)
1679 PUSH_FRAME
1680 xorl %ecx, %ecx /* *scale = 0 */
1681 xorl %eax, %eax
1682 movl ARG0, %edx /* get dividend */
16830:
1684 cmpl ARG1, %edx /* if (divisor > dividend) */
1685 jle 1f /* goto 1f */
1686 addl $1, %ecx /* (*scale)++ */
1687 shrdl $1, %edx, %eax /* dividend >> 1 */
1688 shrl $1, %edx /* dividend >> 1 */
1689 jmp 0b /* goto 0b */
16901:
1691 divl ARG1 /* (dividend << (32 - *scale)) / divisor */
1692 movl ARG2, %edx /* get scale */
1693 movl $32, (%edx) /* *scale = 32 */
1694 subl %ecx, (%edx) /* *scale -= %ecx */
1695 POP_FRAME
1696 ret
1697
1698/*
1699 * unsigned int
1700 * mul_scale(unsigned int multiplicand,
1701 * unsigned int multiplier,
1702 * unsigned int *scale)
1703 *
1704 * This function returns ((multiplicand * multiplier) >> *scale) where
1705 * scale is the largest possible value before overflow. This is used in
1706 * computation where precision must be achieved in order to avoid
1707 * floating point usage.
1708 *
1709 * Algorithm:
1710 * *scale = 0;
1711 * while (overflow((multiplicand * multiplier) >> *scale))
1712 * (*scale)++;
1713 * return ((multiplicand * multiplier) >> *scale);
1714 */
1715ENTRY(mul_scale)
1716 PUSH_FRAME
1717 xorl %ecx, %ecx /* *scale = 0 */
1718 movl ARG0, %eax /* get multiplicand */
1719 mull ARG1 /* multiplicand * multiplier */
17200:
1721 cmpl $0, %edx /* if (!overflow()) */
1722 je 1f /* goto 1 */
1723 addl $1, %ecx /* (*scale)++ */
1724 shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */
1725 shrl $1, %edx /* (multiplicand * multiplier) >> 1 */
1726 jmp 0b
17271:
1728 movl ARG2, %edx /* get scale */
1729 movl %ecx, (%edx) /* set *scale */
1730 POP_FRAME
1731 ret
1732
6601e61a 1733
0c530ab8 1734
6601e61a 1735/*
0c530ab8 1736 * Double-fault exception handler task. The last gasp...
1c79356b 1737 */
0c530ab8
A
1738Entry(df_task_start)
1739 CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
1740 hlt
1c79356b 1741
1c79356b
A
1742
1743/*
0c530ab8 1744 * machine-check handler task. The last gasp...
1c79356b 1745 */
0c530ab8
A
1746Entry(mc_task_start)
1747 CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
1748 hlt
1c79356b
A
1749
1750/*
0c530ab8 1751 * Compatibility mode's last gasp...
1c79356b 1752 */
0c530ab8
A
1753Entry(lo_df64)
1754 movl %esp, %eax
1755 CCALL1(panic_double_fault64, %eax)
1756 hlt
1c79356b 1757
0c530ab8
A
1758Entry(lo_mc64)
1759 movl %esp, %eax
1760 CCALL1(panic_machine_check64, %eax)
1761 hlt
1c79356b 1762