]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <i386/asm.h> | |
29 | #include <i386/asm64.h> | |
30 | #include <assym.s> | |
31 | #include <mach_kdb.h> | |
32 | #include <i386/eflags.h> | |
33 | #include <i386/trap.h> | |
34 | #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ | |
35 | #include <mach/i386/syscall_sw.h> | |
36 | #include <i386/postcode.h> | |
37 | #include <i386/proc_reg.h> | |
38 | ||
39 | /* | |
40 | * Locore handlers. | |
41 | */ | |
42 | #define LO_ALLINTRS EXT(lo_allintrs) | |
43 | #define LO_ALLTRAPS EXT(lo_alltraps) | |
44 | #define LO_SYSCALL EXT(lo_syscall) | |
45 | #define LO_UNIX_SCALL EXT(lo_unix_scall) | |
46 | #define LO_MACH_SCALL EXT(lo_mach_scall) | |
47 | #define LO_MDEP_SCALL EXT(lo_mdep_scall) | |
48 | #define LO_DIAG_SCALL EXT(lo_diag_scall) | |
49 | #define LO_DOUBLE_FAULT EXT(lo_df64) | |
50 | #define LO_MACHINE_CHECK EXT(lo_mc64) | |
51 | ||
52 | /* | |
53 | * Interrupt descriptor table and code vectors for it. | |
54 | * | |
55 | * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be | |
56 | * reformatted ("fixed") before use. | |
57 | * All vector are rebased in uber-space. | |
58 | * Special vectors (e.g. double-fault) use a non-0 IST. | |
59 | */ | |
60 | #define IDT64_BASE_ENTRY(vec,seg,ist,type) \ | |
61 | .data ;\ | |
62 | .long vec ;\ | |
63 | .long KERNEL_UBER_BASE_HI32 ;\ | |
64 | .word seg ;\ | |
65 | .byte ist*16 ;\ | |
66 | .byte type ;\ | |
67 | .long 0 ;\ | |
68 | .text | |
69 | ||
70 | #define IDT64_ENTRY(vec,ist,type) \ | |
71 | IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type) | |
72 | #define IDT64_ENTRY_LOCAL(vec,ist,type) \ | |
73 | IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type) | |
74 | ||
75 | /* | |
76 | * Push trap number and address of compatibility mode handler, | |
77 | * then branch to common trampoline. Error already pushed. | |
78 | */ | |
79 | #define EXCEP64_ERR(n,name) \ | |
80 | IDT64_ENTRY(name,0,K_INTR_GATE) ;\ | |
81 | Entry(name) ;\ | |
82 | push $(n) ;\ | |
83 | movl $(LO_ALLTRAPS), 4(%rsp) ;\ | |
84 | jmp L_enter_lohandler | |
85 | ||
86 | ||
87 | /* | |
88 | * Push error(0), trap number and address of compatibility mode handler, | |
89 | * then branch to common trampoline. | |
90 | */ | |
91 | #define EXCEPTION64(n,name) \ | |
92 | IDT64_ENTRY(name,0,K_INTR_GATE) ;\ | |
93 | Entry(name) ;\ | |
94 | push $0 ;\ | |
95 | push $(n) ;\ | |
96 | movl $(LO_ALLTRAPS), 4(%rsp) ;\ | |
97 | jmp L_enter_lohandler | |
98 | ||
99 | ||
100 | /* | |
101 | * Interrupt from user. | |
102 | * Push error (0), trap number and address of compatibility mode handler, | |
103 | * then branch to common trampoline. | |
104 | */ | |
105 | #define EXCEP64_USR(n,name) \ | |
106 | IDT64_ENTRY(name,0,U_INTR_GATE) ;\ | |
107 | Entry(name) ;\ | |
108 | push $0 ;\ | |
109 | push $(n) ;\ | |
110 | movl $(LO_ALLTRAPS), 4(%rsp) ;\ | |
111 | jmp L_enter_lohandler | |
112 | ||
113 | ||
114 | /* | |
115 | * Special interrupt code from user. | |
116 | */ | |
117 | #define EXCEP64_SPC_USR(n,name) \ | |
118 | IDT64_ENTRY(name,0,U_INTR_GATE) | |
119 | ||
120 | ||
121 | /* | |
122 | * Special interrupt code. | |
123 | * In 64-bit mode we may use an IST slot instead of task gates. | |
124 | */ | |
125 | #define EXCEP64_IST(n,name,ist) \ | |
126 | IDT64_ENTRY(name,ist,K_INTR_GATE) | |
127 | #define EXCEP64_SPC(n,name) \ | |
128 | IDT64_ENTRY(name,0,K_INTR_GATE) | |
129 | ||
130 | ||
131 | /* | |
132 | * Interrupt. | |
133 | * Push zero err, interrupt vector and address of compatibility mode handler, | |
134 | * then branch to common trampoline. | |
135 | */ | |
136 | #define INTERRUPT64(n) \ | |
137 | IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\ | |
138 | .align FALIGN ;\ | |
139 | L_ ## n: ;\ | |
140 | push $0 ;\ | |
141 | push $(n) ;\ | |
142 | movl $(LO_ALLINTRS), 4(%rsp) ;\ | |
143 | jmp L_enter_lohandler | |
144 | ||
145 | ||
146 | .data | |
147 | .align 12 | |
148 | Entry(master_idt64) | |
149 | Entry(hi64_data_base) | |
150 | .text | |
151 | .code64 | |
152 | Entry(hi64_text_base) | |
153 | ||
154 | EXCEPTION64(0x00,t64_zero_div) | |
155 | EXCEP64_SPC(0x01,hi64_debug) | |
156 | INTERRUPT64(0x02) /* NMI */ | |
157 | EXCEP64_USR(0x03,t64_int3) | |
158 | EXCEP64_USR(0x04,t64_into) | |
159 | EXCEP64_USR(0x05,t64_bounds) | |
160 | EXCEPTION64(0x06,t64_invop) | |
161 | EXCEPTION64(0x07,t64_nofpu) | |
162 | #if MACH_KDB | |
163 | EXCEP64_IST(0x08,db_task_dbl_fault64,1) | |
164 | #else | |
165 | EXCEP64_IST(0x08,hi64_double_fault,1) | |
166 | #endif | |
167 | EXCEPTION64(0x09,a64_fpu_over) | |
168 | EXCEPTION64(0x0a,a64_inv_tss) | |
169 | EXCEP64_SPC(0x0b,hi64_segnp) | |
170 | #if MACH_KDB | |
171 | EXCEP64_IST(0x0c,db_task_stk_fault64,1) | |
172 | #else | |
173 | EXCEP64_SPC(0x0c,hi64_stack_fault) | |
174 | #endif | |
175 | EXCEP64_SPC(0x0d,hi64_gen_prot) | |
176 | EXCEP64_SPC(0x0e, hi64_page_fault) | |
177 | EXCEPTION64(0x0f,t64_trap_0f) | |
178 | EXCEPTION64(0x10,t64_fpu_err) | |
179 | EXCEPTION64(0x11,t64_trap_11) | |
180 | EXCEP64_IST(0x12,mc64,1) | |
181 | EXCEPTION64(0x13,t64_sse_err) | |
182 | EXCEPTION64(0x14,t64_trap_14) | |
183 | EXCEPTION64(0x15,t64_trap_15) | |
184 | EXCEPTION64(0x16,t64_trap_16) | |
185 | EXCEPTION64(0x17,t64_trap_17) | |
186 | EXCEPTION64(0x18,t64_trap_18) | |
187 | EXCEPTION64(0x19,t64_trap_19) | |
188 | EXCEPTION64(0x1a,t64_trap_1a) | |
189 | EXCEPTION64(0x1b,t64_trap_1b) | |
190 | EXCEPTION64(0x1c,t64_trap_1c) | |
191 | EXCEPTION64(0x1d,t64_trap_1d) | |
192 | EXCEPTION64(0x1e,t64_trap_1e) | |
193 | EXCEPTION64(0x1f,t64_trap_1f) | |
194 | ||
195 | INTERRUPT64(0x20) | |
196 | INTERRUPT64(0x21) | |
197 | INTERRUPT64(0x22) | |
198 | INTERRUPT64(0x23) | |
199 | INTERRUPT64(0x24) | |
200 | INTERRUPT64(0x25) | |
201 | INTERRUPT64(0x26) | |
202 | INTERRUPT64(0x27) | |
203 | INTERRUPT64(0x28) | |
204 | INTERRUPT64(0x29) | |
205 | INTERRUPT64(0x2a) | |
206 | INTERRUPT64(0x2b) | |
207 | INTERRUPT64(0x2c) | |
208 | INTERRUPT64(0x2d) | |
209 | INTERRUPT64(0x2e) | |
210 | INTERRUPT64(0x2f) | |
211 | ||
212 | INTERRUPT64(0x30) | |
213 | INTERRUPT64(0x31) | |
214 | INTERRUPT64(0x32) | |
215 | INTERRUPT64(0x33) | |
216 | INTERRUPT64(0x34) | |
217 | INTERRUPT64(0x35) | |
218 | INTERRUPT64(0x36) | |
219 | INTERRUPT64(0x37) | |
220 | INTERRUPT64(0x38) | |
221 | INTERRUPT64(0x39) | |
222 | INTERRUPT64(0x3a) | |
223 | INTERRUPT64(0x3b) | |
224 | INTERRUPT64(0x3c) | |
225 | INTERRUPT64(0x3d) | |
226 | INTERRUPT64(0x3e) | |
227 | INTERRUPT64(0x3f) | |
228 | ||
229 | INTERRUPT64(0x40) | |
230 | INTERRUPT64(0x41) | |
231 | INTERRUPT64(0x42) | |
232 | INTERRUPT64(0x43) | |
233 | INTERRUPT64(0x44) | |
234 | INTERRUPT64(0x45) | |
235 | INTERRUPT64(0x46) | |
236 | INTERRUPT64(0x47) | |
237 | INTERRUPT64(0x48) | |
238 | INTERRUPT64(0x49) | |
239 | INTERRUPT64(0x4a) | |
240 | INTERRUPT64(0x4b) | |
241 | INTERRUPT64(0x4c) | |
242 | INTERRUPT64(0x4d) | |
243 | INTERRUPT64(0x4e) | |
244 | INTERRUPT64(0x4f) | |
245 | ||
246 | INTERRUPT64(0x50) | |
247 | INTERRUPT64(0x51) | |
248 | INTERRUPT64(0x52) | |
249 | INTERRUPT64(0x53) | |
250 | INTERRUPT64(0x54) | |
251 | INTERRUPT64(0x55) | |
252 | INTERRUPT64(0x56) | |
253 | INTERRUPT64(0x57) | |
254 | INTERRUPT64(0x58) | |
255 | INTERRUPT64(0x59) | |
256 | INTERRUPT64(0x5a) | |
257 | INTERRUPT64(0x5b) | |
258 | INTERRUPT64(0x5c) | |
259 | INTERRUPT64(0x5d) | |
260 | INTERRUPT64(0x5e) | |
261 | INTERRUPT64(0x5f) | |
262 | ||
263 | INTERRUPT64(0x60) | |
264 | INTERRUPT64(0x61) | |
265 | INTERRUPT64(0x62) | |
266 | INTERRUPT64(0x63) | |
267 | INTERRUPT64(0x64) | |
268 | INTERRUPT64(0x65) | |
269 | INTERRUPT64(0x66) | |
270 | INTERRUPT64(0x67) | |
271 | INTERRUPT64(0x68) | |
272 | INTERRUPT64(0x69) | |
273 | INTERRUPT64(0x6a) | |
274 | INTERRUPT64(0x6b) | |
275 | INTERRUPT64(0x6c) | |
276 | INTERRUPT64(0x6d) | |
277 | INTERRUPT64(0x6e) | |
278 | INTERRUPT64(0x6f) | |
279 | ||
280 | INTERRUPT64(0x70) | |
281 | INTERRUPT64(0x71) | |
282 | INTERRUPT64(0x72) | |
283 | INTERRUPT64(0x73) | |
284 | INTERRUPT64(0x74) | |
285 | INTERRUPT64(0x75) | |
286 | INTERRUPT64(0x76) | |
287 | INTERRUPT64(0x77) | |
288 | INTERRUPT64(0x78) | |
289 | INTERRUPT64(0x79) | |
290 | INTERRUPT64(0x7a) | |
291 | INTERRUPT64(0x7b) | |
292 | INTERRUPT64(0x7c) | |
293 | INTERRUPT64(0x7d) | |
294 | INTERRUPT64(0x7e) | |
295 | EXCEP64_USR(0x7f, t64_dtrace_ret) | |
296 | ||
297 | EXCEP64_SPC_USR(0x80,hi64_unix_scall) | |
298 | EXCEP64_SPC_USR(0x81,hi64_mach_scall) | |
299 | EXCEP64_SPC_USR(0x82,hi64_mdep_scall) | |
300 | EXCEP64_SPC_USR(0x83,hi64_diag_scall) | |
301 | ||
302 | INTERRUPT64(0x84) | |
303 | INTERRUPT64(0x85) | |
304 | INTERRUPT64(0x86) | |
305 | INTERRUPT64(0x87) | |
306 | INTERRUPT64(0x88) | |
307 | INTERRUPT64(0x89) | |
308 | INTERRUPT64(0x8a) | |
309 | INTERRUPT64(0x8b) | |
310 | INTERRUPT64(0x8c) | |
311 | INTERRUPT64(0x8d) | |
312 | INTERRUPT64(0x8e) | |
313 | INTERRUPT64(0x8f) | |
314 | ||
315 | INTERRUPT64(0x90) | |
316 | INTERRUPT64(0x91) | |
317 | INTERRUPT64(0x92) | |
318 | INTERRUPT64(0x93) | |
319 | INTERRUPT64(0x94) | |
320 | INTERRUPT64(0x95) | |
321 | INTERRUPT64(0x96) | |
322 | INTERRUPT64(0x97) | |
323 | INTERRUPT64(0x98) | |
324 | INTERRUPT64(0x99) | |
325 | INTERRUPT64(0x9a) | |
326 | INTERRUPT64(0x9b) | |
327 | INTERRUPT64(0x9c) | |
328 | INTERRUPT64(0x9d) | |
329 | INTERRUPT64(0x9e) | |
330 | INTERRUPT64(0x9f) | |
331 | ||
332 | INTERRUPT64(0xa0) | |
333 | INTERRUPT64(0xa1) | |
334 | INTERRUPT64(0xa2) | |
335 | INTERRUPT64(0xa3) | |
336 | INTERRUPT64(0xa4) | |
337 | INTERRUPT64(0xa5) | |
338 | INTERRUPT64(0xa6) | |
339 | INTERRUPT64(0xa7) | |
340 | INTERRUPT64(0xa8) | |
341 | INTERRUPT64(0xa9) | |
342 | INTERRUPT64(0xaa) | |
343 | INTERRUPT64(0xab) | |
344 | INTERRUPT64(0xac) | |
345 | INTERRUPT64(0xad) | |
346 | INTERRUPT64(0xae) | |
347 | INTERRUPT64(0xaf) | |
348 | ||
349 | INTERRUPT64(0xb0) | |
350 | INTERRUPT64(0xb1) | |
351 | INTERRUPT64(0xb2) | |
352 | INTERRUPT64(0xb3) | |
353 | INTERRUPT64(0xb4) | |
354 | INTERRUPT64(0xb5) | |
355 | INTERRUPT64(0xb6) | |
356 | INTERRUPT64(0xb7) | |
357 | INTERRUPT64(0xb8) | |
358 | INTERRUPT64(0xb9) | |
359 | INTERRUPT64(0xba) | |
360 | INTERRUPT64(0xbb) | |
361 | INTERRUPT64(0xbc) | |
362 | INTERRUPT64(0xbd) | |
363 | INTERRUPT64(0xbe) | |
364 | INTERRUPT64(0xbf) | |
365 | ||
366 | INTERRUPT64(0xc0) | |
367 | INTERRUPT64(0xc1) | |
368 | INTERRUPT64(0xc2) | |
369 | INTERRUPT64(0xc3) | |
370 | INTERRUPT64(0xc4) | |
371 | INTERRUPT64(0xc5) | |
372 | INTERRUPT64(0xc6) | |
373 | INTERRUPT64(0xc7) | |
374 | INTERRUPT64(0xc8) | |
375 | INTERRUPT64(0xc9) | |
376 | INTERRUPT64(0xca) | |
377 | INTERRUPT64(0xcb) | |
378 | INTERRUPT64(0xcc) | |
379 | INTERRUPT64(0xcd) | |
380 | INTERRUPT64(0xce) | |
381 | INTERRUPT64(0xcf) | |
382 | ||
383 | INTERRUPT64(0xd0) | |
384 | INTERRUPT64(0xd1) | |
385 | INTERRUPT64(0xd2) | |
386 | INTERRUPT64(0xd3) | |
387 | INTERRUPT64(0xd4) | |
388 | INTERRUPT64(0xd5) | |
389 | INTERRUPT64(0xd6) | |
390 | INTERRUPT64(0xd7) | |
391 | INTERRUPT64(0xd8) | |
392 | INTERRUPT64(0xd9) | |
393 | INTERRUPT64(0xda) | |
394 | INTERRUPT64(0xdb) | |
395 | INTERRUPT64(0xdc) | |
396 | INTERRUPT64(0xdd) | |
397 | INTERRUPT64(0xde) | |
398 | INTERRUPT64(0xdf) | |
399 | ||
400 | INTERRUPT64(0xe0) | |
401 | INTERRUPT64(0xe1) | |
402 | INTERRUPT64(0xe2) | |
403 | INTERRUPT64(0xe3) | |
404 | INTERRUPT64(0xe4) | |
405 | INTERRUPT64(0xe5) | |
406 | INTERRUPT64(0xe6) | |
407 | INTERRUPT64(0xe7) | |
408 | INTERRUPT64(0xe8) | |
409 | INTERRUPT64(0xe9) | |
410 | INTERRUPT64(0xea) | |
411 | INTERRUPT64(0xeb) | |
412 | INTERRUPT64(0xec) | |
413 | INTERRUPT64(0xed) | |
414 | INTERRUPT64(0xee) | |
415 | INTERRUPT64(0xef) | |
416 | ||
417 | INTERRUPT64(0xf0) | |
418 | INTERRUPT64(0xf1) | |
419 | INTERRUPT64(0xf2) | |
420 | INTERRUPT64(0xf3) | |
421 | INTERRUPT64(0xf4) | |
422 | INTERRUPT64(0xf5) | |
423 | INTERRUPT64(0xf6) | |
424 | INTERRUPT64(0xf7) | |
425 | INTERRUPT64(0xf8) | |
426 | INTERRUPT64(0xf9) | |
427 | INTERRUPT64(0xfa) | |
428 | INTERRUPT64(0xfb) | |
429 | INTERRUPT64(0xfc) | |
430 | INTERRUPT64(0xfd) | |
431 | INTERRUPT64(0xfe) | |
432 | EXCEPTION64(0xff,t64_preempt) | |
433 | ||
434 | ||
435 | .text | |
436 | /* | |
437 | * | |
438 | * Trap/interrupt entry points. | |
439 | * | |
440 | * All traps must create the following 32-bit save area on the PCB "stack" | |
441 | * - this is identical to the legacy mode 32-bit case: | |
442 | * | |
443 | * gs | |
444 | * fs | |
445 | * es | |
446 | * ds | |
447 | * edi | |
448 | * esi | |
449 | * ebp | |
450 | * cr2 (defined only for page fault) | |
451 | * ebx | |
452 | * edx | |
453 | * ecx | |
454 | * eax | |
455 | * trap number | |
456 | * error code | |
457 | * eip | |
458 | * cs | |
459 | * eflags | |
460 | * user esp - if from user | |
461 | * user ss - if from user | |
462 | * | |
463 | * Above this is the trap number and compatibility mode handler address | |
464 | * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame: | |
465 | * | |
466 | * (trapno, trapfn) | |
467 | * err | |
468 | * rip | |
469 | * cs | |
470 | * rflags | |
471 | * rsp | |
472 | * ss | |
473 | * | |
474 | */ | |
475 | ||
476 | .code32 | |
477 | /* | |
478 | * Control is passed here to return to the compatibility mode user. | |
479 | * At this stage we're in kernel space in compatibility mode | |
480 | * but we need to switch into 64-bit mode in the 4G-based trampoline | |
481 | * space before performing the iret. | |
482 | */ | |
483 | Entry(lo64_ret_to_user) | |
484 | movl %gs:CPU_ACTIVE_THREAD,%ecx | |
485 | ||
486 | movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */ | |
487 | cmpl $0,%eax /* Is there a debug register context? */ | |
488 | je 2f /* branch if not */ | |
489 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */ | |
490 | jne 1f | |
491 | movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */ | |
492 | movl %ecx, %db0 | |
493 | movl DS_DR1(%eax), %ecx | |
494 | movl %ecx, %db1 | |
495 | movl DS_DR2(%eax), %ecx | |
496 | movl %ecx, %db2 | |
497 | movl DS_DR3(%eax), %ecx | |
498 | movl %ecx, %db3 | |
499 | movl DS_DR7(%eax), %ecx | |
500 | movl %ecx, %gs:CPU_DR7 | |
501 | movl $0, %gs:CPU_DR7 + 4 | |
502 | jmp 2f | |
503 | 1: | |
504 | ENTER_64BIT_MODE() /* Enter long mode */ | |
505 | mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/ | |
506 | mov %rcx, %dr0 | |
507 | mov DS64_DR1(%eax), %rcx | |
508 | mov %rcx, %dr1 | |
509 | mov DS64_DR2(%eax), %rcx | |
510 | mov %rcx, %dr2 | |
511 | mov DS64_DR3(%eax), %rcx | |
512 | mov %rcx, %dr3 | |
513 | mov DS64_DR7(%eax), %rcx | |
514 | mov %rcx, %gs:CPU_DR7 | |
515 | jmp 3f /* Enter uberspace */ | |
516 | 2: | |
517 | ENTER_64BIT_MODE() | |
518 | 3: | |
519 | ENTER_UBERSPACE() | |
520 | ||
521 | /* | |
522 | * Now switch %cr3, if necessary. | |
523 | */ | |
524 | swapgs /* switch back to uber-kernel gs base */ | |
525 | mov %gs:CPU_TASK_CR3,%rcx | |
526 | mov %rcx,%gs:CPU_ACTIVE_CR3 | |
527 | mov %cr3, %rax | |
528 | cmp %rcx, %rax | |
529 | je 1f | |
530 | /* flag the copyio engine state as WINDOWS_CLEAN */ | |
531 | mov %gs:CPU_ACTIVE_THREAD,%eax | |
532 | movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax) | |
533 | mov %rcx,%cr3 /* switch to user's address space */ | |
534 | 1: | |
535 | ||
536 | mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/ | |
537 | cmp $0, %rax | |
538 | je 1f | |
539 | mov %rax, %dr7 /* Set DR7 */ | |
540 | movq $0, %gs:CPU_DR7 | |
541 | 1: | |
542 | ||
543 | /* | |
544 | * Adjust stack to use uber-space. | |
545 | */ | |
546 | mov $(KERNEL_UBER_BASE_HI32), %rax | |
547 | shl $32, %rsp | |
548 | shrd $32, %rax, %rsp /* relocate into uber-space */ | |
549 | ||
550 | cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */ | |
551 | jne L_64bit_return | |
552 | jmp L_32bit_return | |
553 | ||
554 | Entry(lo64_ret_to_kernel) | |
555 | ENTER_64BIT_MODE() | |
556 | ENTER_UBERSPACE() | |
557 | ||
558 | swapgs /* switch back to uber-kernel gs base */ | |
559 | ||
560 | /* | |
561 | * Adjust stack to use uber-space. | |
562 | */ | |
563 | mov $(KERNEL_UBER_BASE_HI32), %rax | |
564 | shl $32, %rsp | |
565 | shrd $32, %rax, %rsp /* relocate into uber-space */ | |
566 | ||
567 | /* Check for return to 64-bit kernel space (EFI today) */ | |
568 | cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */ | |
569 | jne L_64bit_return | |
570 | /* fall through for 32-bit return */ | |
571 | ||
572 | L_32bit_return: | |
573 | /* | |
574 | * Restore registers into the machine state for iret. | |
575 | */ | |
576 | movl R32_EIP(%rsp), %eax | |
577 | movl %eax, ISC32_RIP(%rsp) | |
578 | movl R32_EFLAGS(%rsp), %eax | |
579 | movl %eax, ISC32_RFLAGS(%rsp) | |
580 | movl R32_CS(%rsp), %eax | |
581 | movl %eax, ISC32_CS(%rsp) | |
582 | movl R32_UESP(%rsp), %eax | |
583 | movl %eax, ISC32_RSP(%rsp) | |
584 | movl R32_SS(%rsp), %eax | |
585 | movl %eax, ISC32_SS(%rsp) | |
586 | ||
587 | /* | |
588 | * Restore general 32-bit registers | |
589 | */ | |
590 | movl R32_EAX(%rsp), %eax | |
591 | movl R32_EBX(%rsp), %ebx | |
592 | movl R32_ECX(%rsp), %ecx | |
593 | movl R32_EDX(%rsp), %edx | |
594 | movl R32_EBP(%rsp), %ebp | |
595 | movl R32_ESI(%rsp), %esi | |
596 | movl R32_EDI(%rsp), %edi | |
597 | ||
598 | /* | |
599 | * Restore segment registers. We make take an exception here but | |
600 | * we've got enough space left in the save frame area to absorb | |
601 | * a hardware frame plus the trapfn and trapno | |
602 | */ | |
603 | swapgs | |
604 | EXT(ret32_set_ds): | |
605 | movw R32_DS(%rsp), %ds | |
606 | EXT(ret32_set_es): | |
607 | movw R32_ES(%rsp), %es | |
608 | EXT(ret32_set_fs): | |
609 | movw R32_FS(%rsp), %fs | |
610 | EXT(ret32_set_gs): | |
611 | movw R32_GS(%rsp), %gs | |
612 | ||
613 | add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame + | |
614 | trapno/trapfn and error */ | |
615 | cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp) | |
616 | /* test for fast entry/exit */ | |
617 | je L_fast_exit | |
618 | EXT(ret32_iret): | |
619 | iretq /* return from interrupt */ | |
620 | ||
621 | L_fast_exit: | |
622 | pop %rdx /* user return eip */ | |
623 | pop %rcx /* pop and toss cs */ | |
624 | andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */ | |
625 | popf /* flags - carry denotes failure */ | |
626 | pop %rcx /* user return esp */ | |
627 | .code32 | |
628 | sti /* interrupts enabled after sysexit */ | |
629 | sysexit /* 32-bit sysexit */ | |
630 | .code64 | |
631 | ||
632 | L_64bit_return: | |
633 | /* | |
634 | * Set the GS Base MSR with the user's gs base. | |
635 | */ | |
636 | movl %gs:CPU_UBER_USER_GS_BASE, %eax | |
637 | movl %gs:CPU_UBER_USER_GS_BASE+4, %edx | |
638 | movl $(MSR_IA32_GS_BASE), %ecx | |
639 | swapgs | |
640 | testb $3, R64_CS(%rsp) /* returning to user-space? */ | |
641 | jz 1f | |
642 | wrmsr /* set 64-bit base */ | |
643 | 1: | |
644 | ||
645 | /* | |
646 | * Restore general 64-bit registers | |
647 | */ | |
648 | mov R64_R15(%rsp), %r15 | |
649 | mov R64_R14(%rsp), %r14 | |
650 | mov R64_R13(%rsp), %r13 | |
651 | mov R64_R12(%rsp), %r12 | |
652 | mov R64_R11(%rsp), %r11 | |
653 | mov R64_R10(%rsp), %r10 | |
654 | mov R64_R9(%rsp), %r9 | |
655 | mov R64_R8(%rsp), %r8 | |
656 | mov R64_RSI(%rsp), %rsi | |
657 | mov R64_RDI(%rsp), %rdi | |
658 | mov R64_RBP(%rsp), %rbp | |
659 | mov R64_RDX(%rsp), %rdx | |
660 | mov R64_RBX(%rsp), %rbx | |
661 | mov R64_RCX(%rsp), %rcx | |
662 | mov R64_RAX(%rsp), %rax | |
663 | ||
664 | add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame + | |
665 | trapno/trapfn and error */ | |
666 | cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp) | |
667 | /* test for fast entry/exit */ | |
668 | je L_sysret | |
669 | EXT(ret64_iret): | |
670 | iretq /* return from interrupt */ | |
671 | ||
672 | L_sysret: | |
673 | /* | |
674 | * Here to load rcx/r11/rsp and perform the sysret back to user-space. | |
675 | * rcx user rip | |
676 | * r1 user rflags | |
677 | * rsp user stack pointer | |
678 | */ | |
679 | mov ISF64_RIP-16(%rsp), %rcx | |
680 | mov ISF64_RFLAGS-16(%rsp), %r11 | |
681 | mov ISF64_RSP-16(%rsp), %rsp | |
682 | sysretq /* return from system call */ | |
683 | ||
684 | /* | |
685 | * Common path to enter locore handlers. | |
686 | */ | |
687 | L_enter_lohandler: | |
688 | swapgs /* switch to kernel gs (cpu_data) */ | |
689 | L_enter_lohandler_continue: | |
690 | cmpl $(USER64_CS), ISF64_CS(%rsp) | |
691 | je L_64bit_enter /* this is a 64-bit user task */ | |
692 | cmpl $(KERNEL64_CS), ISF64_CS(%rsp) | |
693 | je L_64bit_enter /* we're in 64-bit (EFI) code */ | |
694 | jmp L_32bit_enter | |
695 | ||
696 | /* | |
697 | * System call handlers. | |
698 | * These are entered via a syscall interrupt. The system call number in %rax | |
699 | * is saved to the error code slot in the stack frame. We then branch to the | |
700 | * common state saving code. | |
701 | */ | |
702 | ||
703 | Entry(hi64_unix_scall) | |
704 | swapgs /* switch to kernel gs (cpu_data) */ | |
705 | L_unix_scall_continue: | |
706 | push %rax /* save system call number */ | |
707 | push $(UNIX_INT) | |
708 | movl $(LO_UNIX_SCALL), 4(%rsp) | |
709 | jmp L_32bit_enter_check | |
710 | ||
711 | ||
712 | Entry(hi64_mach_scall) | |
713 | swapgs /* switch to kernel gs (cpu_data) */ | |
714 | L_mach_scall_continue: | |
715 | push %rax /* save system call number */ | |
716 | push $(MACH_INT) | |
717 | movl $(LO_MACH_SCALL), 4(%rsp) | |
718 | jmp L_32bit_enter_check | |
719 | ||
720 | ||
721 | Entry(hi64_mdep_scall) | |
722 | swapgs /* switch to kernel gs (cpu_data) */ | |
723 | L_mdep_scall_continue: | |
724 | push %rax /* save system call number */ | |
725 | push $(MACHDEP_INT) | |
726 | movl $(LO_MDEP_SCALL), 4(%rsp) | |
727 | jmp L_32bit_enter_check | |
728 | ||
729 | ||
730 | Entry(hi64_diag_scall) | |
731 | swapgs /* switch to kernel gs (cpu_data) */ | |
732 | L_diag_scall_continue: | |
733 | push %rax /* save system call number */ | |
734 | push $(DIAG_INT) | |
735 | movl $(LO_DIAG_SCALL), 4(%rsp) | |
736 | jmp L_32bit_enter_check | |
737 | ||
738 | Entry(hi64_syscall) | |
739 | swapgs /* Kapow! get per-cpu data area */ | |
740 | L_syscall_continue: | |
741 | mov %rsp, %gs:CPU_UBER_TMP /* save user stack */ | |
742 | mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */ | |
743 | ||
744 | /* | |
745 | * Save values in the ISF frame in the PCB | |
746 | * to cons up the saved machine state. | |
747 | */ | |
748 | movl $(USER_DS), ISF64_SS(%rsp) | |
749 | movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */ | |
750 | mov %r11, ISF64_RFLAGS(%rsp) /* rflags */ | |
751 | mov %rcx, ISF64_RIP(%rsp) /* rip */ | |
752 | mov %gs:CPU_UBER_TMP, %rcx | |
753 | mov %rcx, ISF64_RSP(%rsp) /* user stack */ | |
754 | mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */ | |
755 | movl $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */ | |
756 | movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp) | |
757 | jmp L_64bit_enter /* this can only be a 64-bit task */ | |
758 | ||
759 | ||
760 | L_32bit_enter_check: | |
761 | /* | |
762 | * Check we're not a confused 64-bit user. | |
763 | */ | |
764 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP | |
765 | jne L_64bit_entry_reject | |
766 | jmp L_32bit_enter | |
767 | /* | |
768 | * sysenter entry point | |
769 | * Requires user code to set up: | |
770 | * edx: user instruction pointer (return address) | |
771 | * ecx: user stack pointer | |
772 | * on which is pushed stub ret addr and saved ebx | |
773 | * Return to user-space is made using sysexit. | |
774 | * Note: sysenter/sysexit cannot be used for calls returning a value in edx, | |
775 | * or requiring ecx to be preserved. | |
776 | */ | |
777 | Entry(hi64_sysenter) | |
778 | mov (%rsp), %rsp /* switch from temporary stack to pcb */ | |
779 | /* | |
780 | * Push values on to the PCB stack | |
781 | * to cons up the saved machine state. | |
782 | */ | |
783 | push $(USER_DS) /* ss */ | |
784 | push %rcx /* uesp */ | |
785 | pushf /* flags */ | |
786 | /* | |
787 | * Clear, among others, the Nested Task (NT) flags bit; | |
788 | * this is zeroed by INT, but not by SYSENTER. | |
789 | */ | |
790 | push $0 | |
791 | popf | |
792 | push $(SYSENTER_CS) /* cs */ | |
793 | swapgs /* switch to kernel gs (cpu_data) */ | |
794 | L_sysenter_continue: | |
795 | push %rdx /* eip */ | |
796 | push %rax /* err/eax - syscall code */ | |
797 | push $(T_SYSENTER) | |
798 | orl $(EFL_IF), ISF64_RFLAGS(%rsp) | |
799 | movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp) | |
800 | testl %eax, %eax | |
801 | js L_32bit_enter_check | |
802 | movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp) | |
803 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP | |
804 | jne L_64bit_entry_reject | |
805 | /* If the caller (typically LibSystem) has recorded the cumulative size of | |
806 | * the arguments in EAX, copy them over from the user stack directly. | |
807 | * We recover from exceptions inline--if the copy loop doesn't complete | |
808 | * due to an exception, we fall back to copyin from compatibility mode. | |
809 | * We can potentially extend this mechanism to mach traps as well (DRK). | |
810 | */ | |
811 | L_sysenter_copy_args: | |
812 | testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax | |
813 | jz L_32bit_enter | |
814 | xor %r9, %r9 | |
815 | mov %gs:CPU_UBER_ARG_STORE, %r8 | |
816 | movl %eax, %r9d | |
817 | mov %gs:CPU_UBER_ARG_STORE_VALID, %r12 | |
818 | xor %r10, %r10 | |
819 | shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d | |
820 | andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d | |
821 | movl $0, (%r12) | |
822 | EXT(hi64_sysenter_user_arg_copy): | |
823 | 0: | |
824 | movl 4(%rcx, %r10, 4), %r11d | |
825 | movl %r11d, (%r8, %r10, 4) | |
826 | incl %r10d | |
827 | decl %r9d | |
828 | jnz 0b | |
829 | movl $1, (%r12) | |
830 | /* Fall through to 32-bit handler */ | |
831 | ||
832 | L_32bit_enter: | |
833 | /* | |
834 | * Make space for the compatibility save area. | |
835 | */ | |
836 | sub $(ISC32_OFFSET), %rsp | |
837 | movl $(SS_32), SS_FLAVOR(%rsp) | |
838 | ||
839 | /* | |
840 | * Save segment regs | |
841 | */ | |
842 | mov %ds, R32_DS(%rsp) | |
843 | mov %es, R32_ES(%rsp) | |
844 | mov %fs, R32_FS(%rsp) | |
845 | mov %gs, R32_GS(%rsp) | |
846 | ||
847 | /* | |
848 | * Save general 32-bit registers | |
849 | */ | |
850 | mov %eax, R32_EAX(%rsp) | |
851 | mov %ebx, R32_EBX(%rsp) | |
852 | mov %ecx, R32_ECX(%rsp) | |
853 | mov %edx, R32_EDX(%rsp) | |
854 | mov %ebp, R32_EBP(%rsp) | |
855 | mov %esi, R32_ESI(%rsp) | |
856 | mov %edi, R32_EDI(%rsp) | |
857 | ||
858 | /* Unconditionally save cr2; only meaningful on page faults */ | |
859 | mov %cr2, %rax | |
860 | mov %eax, R32_CR2(%rsp) | |
861 | ||
862 | /* | |
863 | * Copy registers already saved in the machine state | |
864 | * (in the interrupt stack frame) into the compat save area. | |
865 | */ | |
866 | mov ISC32_RIP(%rsp), %eax | |
867 | mov %eax, R32_EIP(%rsp) | |
868 | mov ISC32_RFLAGS(%rsp), %eax | |
869 | mov %eax, R32_EFLAGS(%rsp) | |
870 | mov ISC32_CS(%rsp), %eax | |
871 | mov %eax, R32_CS(%rsp) | |
872 | testb $3, %al | |
873 | jz 1f | |
874 | xor %ebp, %ebp | |
875 | 1: | |
876 | mov ISC32_RSP(%rsp), %eax | |
877 | mov %eax, R32_UESP(%rsp) | |
878 | mov ISC32_SS(%rsp), %eax | |
879 | mov %eax, R32_SS(%rsp) | |
880 | L_32bit_enter_after_fault: | |
881 | mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */ | |
882 | mov %ebx, R32_TRAPNO(%rsp) | |
883 | mov ISC32_ERR(%rsp), %eax | |
884 | mov %eax, R32_ERR(%rsp) | |
885 | mov ISC32_TRAPFN(%rsp), %edx | |
886 | ||
887 | /* | |
888 | * Common point to enter lo_handler in compatibilty mode: | |
889 | * %ebx trapno | |
890 | * %edx locore handler address | |
891 | */ | |
892 | L_enter_lohandler2: | |
893 | /* | |
894 | * Switch address space to kernel | |
895 | * if not shared space and not already mapped. | |
896 | * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3. | |
897 | */ | |
898 | mov %cr3, %rax | |
899 | mov %gs:CPU_TASK_CR3, %rcx | |
900 | cmp %rax, %rcx /* is the task's cr3 loaded? */ | |
901 | jne 1f | |
902 | cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP | |
903 | je 2f | |
904 | 1: | |
905 | mov %gs:CPU_KERNEL_CR3, %rcx | |
906 | cmp %rax, %rcx | |
907 | je 2f | |
908 | mov %rcx, %cr3 | |
909 | mov %rcx, %gs:CPU_ACTIVE_CR3 | |
910 | 2: | |
911 | /* | |
912 | * Switch to compatibility mode. | |
913 | * Then establish kernel segments. | |
914 | */ | |
915 | swapgs /* Done with uber-kernel gs */ | |
916 | ENTER_COMPAT_MODE() | |
917 | ||
918 | /* | |
919 | * Now in compatibility mode and running in compatibility space | |
920 | * prepare to enter the locore handler. | |
921 | * %ebx trapno | |
922 | * %edx lo_handler pointer | |
923 | * Note: the stack pointer (now 32-bit) is now directly addressing the | |
924 | * the kernel below 4G and therefore is automagically re-based. | |
925 | */ | |
926 | mov $(KERNEL_DS), %eax | |
927 | mov %eax, %ss | |
928 | mov %eax, %ds | |
929 | mov %eax, %es | |
930 | mov %eax, %fs | |
931 | mov $(CPU_DATA_GS), %eax | |
932 | mov %eax, %gs | |
933 | ||
934 | movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */ | |
935 | cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */ | |
936 | je 1f | |
937 | movl $0, %ecx /* If so, reset DR7 (the control) */ | |
938 | movl %ecx, %dr7 | |
939 | 1: | |
940 | addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count | |
941 | /* Dispatch the designated lo handler */ | |
942 | jmp *%edx | |
943 | ||
944 | .code64 | |
945 | L_64bit_entry_reject: | |
946 | /* | |
947 | * Here for a 64-bit user attempting an invalid kernel entry. | |
948 | */ | |
949 | movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp) | |
950 | movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp) | |
951 | /* Fall through... */ | |
952 | ||
953 | L_64bit_enter: | |
954 | /* | |
955 | * Here for a 64-bit user task, or special 64-bit kernel code. | |
956 | * Make space for the save area. | |
957 | */ | |
958 | sub $(ISS64_OFFSET), %rsp | |
959 | movl $(SS_64), SS_FLAVOR(%rsp) | |
960 | ||
961 | /* | |
962 | * Save segment regs | |
963 | */ | |
964 | mov %fs, R64_FS(%rsp) | |
965 | mov %gs, R64_GS(%rsp) | |
966 | ||
967 | /* Save general-purpose registers */ | |
968 | mov %rax, R64_RAX(%rsp) | |
969 | mov %rcx, R64_RCX(%rsp) | |
970 | mov %rbx, R64_RBX(%rsp) | |
971 | mov %rbp, R64_RBP(%rsp) | |
972 | mov %r11, R64_R11(%rsp) | |
973 | mov %r12, R64_R12(%rsp) | |
974 | mov %r13, R64_R13(%rsp) | |
975 | mov %r14, R64_R14(%rsp) | |
976 | mov %r15, R64_R15(%rsp) | |
977 | ||
978 | /* cr2 is significant only for page-faults */ | |
979 | mov %cr2, %rax | |
980 | mov %rax, R64_CR2(%rsp) | |
981 | ||
982 | /* Other registers (which may contain syscall args) */ | |
983 | mov %rdi, R64_RDI(%rsp) /* arg0 .. */ | |
984 | mov %rsi, R64_RSI(%rsp) | |
985 | mov %rdx, R64_RDX(%rsp) | |
986 | mov %r10, R64_R10(%rsp) | |
987 | mov %r8, R64_R8(%rsp) | |
988 | mov %r9, R64_R9(%rsp) /* .. arg5 */ | |
989 | ||
990 | L_64bit_enter_after_fault: | |
991 | /* | |
992 | * At this point we're almost ready to join the common lo-entry code. | |
993 | */ | |
994 | mov R64_TRAPNO(%rsp), %ebx | |
995 | mov R64_TRAPFN(%rsp), %edx | |
996 | ||
997 | testb $3, ISF64_CS+ISS64_OFFSET(%rsp) | |
998 | jz 1f | |
999 | xor %rbp, %rbp | |
1000 | 1: | |
1001 | jmp L_enter_lohandler2 | |
1002 | ||
1003 | Entry(hi64_page_fault) | |
1004 | push $(T_PAGE_FAULT) | |
1005 | movl $(LO_ALLTRAPS), 4(%rsp) | |
1006 | cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp) | |
1007 | jne L_enter_lohandler | |
1008 | cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp) | |
1009 | jne L_enter_lohandler | |
1010 | mov ISF64_RSP(%rsp), %rsp | |
1011 | jmp L_32bit_enter | |
1012 | ||
1013 | /* | |
1014 | * Debug trap. Check for single-stepping across system call into | |
1015 | * kernel. If this is the case, taking the debug trap has turned | |
1016 | * off single-stepping - save the flags register with the trace | |
1017 | * bit set. | |
1018 | */ | |
1019 | Entry(hi64_debug) | |
1020 | swapgs /* set %gs for cpu data */ | |
1021 | push $0 /* error code */ | |
1022 | push $(T_DEBUG) | |
1023 | movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp) | |
1024 | ||
1025 | testb $3, ISF64_CS(%rsp) | |
1026 | jnz L_enter_lohandler_continue | |
1027 | ||
1028 | /* | |
1029 | * trap came from kernel mode | |
1030 | */ | |
1031 | cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp) | |
1032 | jne L_enter_lohandler_continue /* trap not in uber-space */ | |
1033 | ||
1034 | cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp) | |
1035 | jne 6f | |
1036 | add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */ | |
1037 | jmp L_mach_scall_continue /* continue system call entry */ | |
1038 | 6: | |
1039 | cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp) | |
1040 | jne 5f | |
1041 | add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */ | |
1042 | jmp L_mdep_scall_continue /* continue system call entry */ | |
1043 | 5: | |
1044 | cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp) | |
1045 | jne 4f | |
1046 | add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */ | |
1047 | jmp L_unix_scall_continue /* continue system call entry */ | |
1048 | 4: | |
1049 | cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp) | |
1050 | jne L_enter_lohandler_continue | |
1051 | /* | |
1052 | * Interrupt stack frame has been pushed on the temporary stack. | |
1053 | * We have to switch to pcb stack and copy eflags. | |
1054 | */ | |
1055 | add $32,%rsp /* remove trapno/trapfn/err/rip/cs */ | |
1056 | push %rcx /* save %rcx - user stack pointer */ | |
1057 | mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */ | |
1058 | xchg %rcx,%rsp /* switch to pcb stack */ | |
1059 | push $(USER_DS) /* ss */ | |
1060 | push (%rcx) /* saved %rcx into rsp slot */ | |
1061 | push 8(%rcx) /* rflags */ | |
1062 | mov (%rcx),%rcx /* restore %rcx */ | |
1063 | push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */ | |
1064 | jmp L_sysenter_continue /* continue sysenter entry */ | |
1065 | ||
1066 | ||
1067 | Entry(hi64_double_fault) | |
1068 | swapgs /* set %gs for cpu data */ | |
1069 | push $(T_DOUBLE_FAULT) | |
1070 | movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp) | |
1071 | ||
1072 | cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp) | |
1073 | jne L_enter_lohandler_continue /* trap not in uber-space */ | |
1074 | ||
1075 | cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp) | |
1076 | jne L_enter_lohandler_continue | |
1077 | ||
1078 | mov ISF64_RSP(%rsp), %rsp | |
1079 | jmp L_syscall_continue | |
1080 | ||
1081 | ||
1082 | /* | |
1083 | * General protection or segment-not-present fault. | |
1084 | * Check for a GP/NP fault in the kernel_return | |
1085 | * sequence; if there, report it as a GP/NP fault on the user's instruction. | |
1086 | * | |
1087 | * rsp-> 0: trap code (NP or GP) and trap function | |
1088 | * 8: segment number in error (error code) | |
1089 | * 16 rip | |
1090 | * 24 cs | |
1091 | * 32 rflags | |
1092 | * 40 rsp | |
1093 | * 48 ss | |
1094 | * 56 old registers (trap is from kernel) | |
1095 | */ | |
1096 | Entry(hi64_gen_prot) | |
1097 | push $(T_GENERAL_PROTECTION) | |
1098 | jmp trap_check_kernel_exit /* check for kernel exit sequence */ | |
1099 | ||
1100 | Entry(hi64_stack_fault) | |
1101 | push $(T_STACK_FAULT) | |
1102 | jmp trap_check_kernel_exit /* check for kernel exit sequence */ | |
1103 | ||
1104 | Entry(hi64_segnp) | |
1105 | push $(T_SEGMENT_NOT_PRESENT) | |
1106 | /* indicate fault type */ | |
1107 | trap_check_kernel_exit: | |
1108 | movl $(LO_ALLTRAPS), 4(%rsp) | |
1109 | testb $3,24(%rsp) | |
1110 | jnz hi64_take_trap | |
1111 | /* trap was from kernel mode, so */ | |
1112 | /* check for the kernel exit sequence */ | |
1113 | cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp) | |
1114 | jne hi64_take_trap /* trap not in uber-space */ | |
1115 | ||
1116 | cmpl $(EXT(ret32_iret)), 16(%rsp) | |
1117 | je L_fault_iret32 | |
1118 | cmpl $(EXT(ret32_set_ds)), 16(%rsp) | |
1119 | je L_32bit_fault_set_seg | |
1120 | cmpl $(EXT(ret32_set_es)), 16(%rsp) | |
1121 | je L_32bit_fault_set_seg | |
1122 | cmpl $(EXT(ret32_set_fs)), 16(%rsp) | |
1123 | je L_32bit_fault_set_seg | |
1124 | cmpl $(EXT(ret32_set_gs)), 16(%rsp) | |
1125 | je L_32bit_fault_set_seg | |
1126 | ||
1127 | cmpl $(EXT(ret64_iret)), 16(%rsp) | |
1128 | je L_fault_iret64 | |
1129 | ||
1130 | cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp) | |
1131 | jne hi64_take_trap | |
1132 | mov ISF64_RSP(%rsp), %rsp | |
1133 | jmp L_32bit_enter | |
1134 | hi64_take_trap: | |
1135 | jmp L_enter_lohandler | |
1136 | ||
1137 | ||
1138 | /* | |
1139 | * GP/NP fault on IRET: CS or SS is in error. | |
1140 | * All registers contain the user's values. | |
1141 | * | |
1142 | * on SP is | |
1143 | * 0 trap number/function | |
1144 | * 8 errcode | |
1145 | * 16 rip | |
1146 | * 24 cs | |
1147 | * 32 rflags | |
1148 | * 40 rsp | |
1149 | * 48 ss --> new trapno/trapfn | |
1150 | * 56 (16-byte padding) --> new errcode | |
1151 | * 64 user rip | |
1152 | * 72 user cs | |
1153 | * 80 user rflags | |
1154 | * 88 user rsp | |
1155 | * 96 user ss | |
1156 | */ | |
1157 | L_fault_iret32: | |
1158 | mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */ | |
1159 | mov 0(%rsp), %rax /* get trap number */ | |
1160 | mov %rax, 48(%rsp) /* put in user trap number */ | |
1161 | mov 8(%rsp), %rax /* get error code */ | |
1162 | mov %rax, 56(%rsp) /* put in user errcode */ | |
1163 | mov 16(%rsp), %rax /* restore rax */ | |
1164 | add $48, %rsp /* reset to original frame */ | |
1165 | /* now treat as fault from user */ | |
1166 | swapgs | |
1167 | jmp L_32bit_enter | |
1168 | ||
1169 | L_fault_iret64: | |
1170 | mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */ | |
1171 | mov 0(%rsp), %rax /* get trap number */ | |
1172 | mov %rax, 48(%rsp) /* put in user trap number */ | |
1173 | mov 8(%rsp), %rax /* get error code */ | |
1174 | mov %rax, 56(%rsp) /* put in user errcode */ | |
1175 | mov 16(%rsp), %rax /* restore rax */ | |
1176 | add $48, %rsp /* reset to original frame */ | |
1177 | /* now treat as fault from user */ | |
1178 | swapgs | |
1179 | jmp L_64bit_enter | |
1180 | ||
1181 | /* | |
1182 | * Fault restoring a segment register. All of the saved state is still | |
1183 | * on the stack untouched since we didn't move the stack pointer. | |
1184 | */ | |
1185 | L_32bit_fault_set_seg: | |
1186 | mov 0(%rsp), %rax /* get trap number/function */ | |
1187 | mov 8(%rsp), %rdx /* get error code */ | |
1188 | mov 40(%rsp), %rsp /* reload stack prior to fault */ | |
1189 | mov %rax,ISC32_TRAPNO(%rsp) | |
1190 | mov %rdx,ISC32_ERR(%rsp) | |
1191 | /* now treat as fault from user */ | |
1192 | /* except that all the state is */ | |
1193 | /* already saved - we just have to */ | |
1194 | /* move the trapno and error into */ | |
1195 | /* the compatibility frame */ | |
1196 | swapgs | |
1197 | jmp L_32bit_enter_after_fault | |
1198 | ||
1199 | ||
1200 | /* | |
1201 | * Fatal exception handlers: | |
1202 | */ | |
1203 | Entry(db_task_dbl_fault64) | |
1204 | push $(T_DOUBLE_FAULT) | |
1205 | movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp) | |
1206 | jmp L_enter_lohandler | |
1207 | ||
1208 | Entry(db_task_stk_fault64) | |
1209 | push $(T_STACK_FAULT) | |
1210 | movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp) | |
1211 | jmp L_enter_lohandler | |
1212 | ||
1213 | Entry(mc64) | |
1214 | push $(0) /* Error */ | |
1215 | push $(T_MACHINE_CHECK) | |
1216 | movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp) | |
1217 | jmp L_enter_lohandler |