]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | #include <platforms.h> | |
60 | #include <mach_kdb.h> | |
61 | ||
62 | #include <i386/asm.h> | |
63 | #include <i386/proc_reg.h> | |
64 | #include <i386/postcode.h> | |
65 | #include <assym.s> | |
66 | ||
67 | #include <i386/mp.h> | |
68 | #include <i386/cpuid.h> | |
69 | #include <i386/acpi.h> | |
70 | ||
71 | .code32 | |
72 | ||
73 | ||
74 | /* | |
75 | * Interrupt and bootup stack for initial processor. | |
76 | */ | |
77 | ||
78 | /* in the __HIB section since the hibernate restore code uses this stack. */ | |
79 | .section __HIB, __data | |
80 | .align 12 | |
81 | ||
82 | .globl EXT(low_intstack) | |
83 | EXT(low_intstack): | |
84 | .globl EXT(gIOHibernateRestoreStack) | |
85 | EXT(gIOHibernateRestoreStack): | |
86 | ||
6d2010ae | 87 | .space INTSTACK_SIZE |
b0d623f7 A |
88 | |
89 | .globl EXT(low_eintstack) | |
90 | EXT(low_eintstack:) | |
91 | .globl EXT(gIOHibernateRestoreStackEnd) | |
92 | EXT(gIOHibernateRestoreStackEnd): | |
93 | ||
94 | /* back to the regular __DATA section. */ | |
95 | ||
96 | .section __DATA, __data | |
97 | ||
98 | /* | |
99 | * Stack for last-gasp double-fault handler. | |
100 | */ | |
101 | .align 12 | |
102 | .globl EXT(df_task_stack) | |
103 | EXT(df_task_stack): | |
6d2010ae | 104 | .space INTSTACK_SIZE |
b0d623f7 A |
105 | .globl EXT(df_task_stack_end) |
106 | EXT(df_task_stack_end): | |
107 | ||
108 | ||
109 | /* | |
110 | * Stack for machine-check handler. | |
111 | */ | |
112 | .align 12 | |
113 | .globl EXT(mc_task_stack) | |
114 | EXT(mc_task_stack): | |
6d2010ae | 115 | .space INTSTACK_SIZE |
b0d623f7 A |
116 | .globl EXT(mc_task_stack_end) |
117 | EXT(mc_task_stack_end): | |
118 | ||
b0d623f7 A |
119 | /* |
120 | * BSP CPU start here. | |
121 | * eax points to kernbootstruct | |
122 | * | |
123 | * Environment: | |
124 | * protected mode, no paging, flat 32-bit address space. | |
125 | * (Code/data/stack segments have base == 0, limit == 4G) | |
126 | */ | |
127 | ||
128 | #define SWITCH_TO_64BIT_MODE \ | |
129 | movl $(CR4_PAE),%eax /* enable PAE */ ;\ | |
130 | movl %eax,%cr4 ;\ | |
131 | movl $MSR_IA32_EFER,%ecx ;\ | |
132 | rdmsr ;\ | |
133 | orl $MSR_IA32_EFER_LME,%eax /* enable long mode */ ;\ | |
134 | wrmsr ;\ | |
135 | movl $INITPT_SEG_BASE,%eax ;\ | |
136 | movl %eax,%cr3 ;\ | |
137 | movl %cr0,%eax ;\ | |
138 | orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\ | |
139 | movl %eax,%cr0 ;\ | |
140 | /* "The Aussie Maneuver" ("Myria" variant) */ ;\ | |
141 | pushl $(0xcb<<24)|KERNEL64_CS /* reload CS with 0x08 */ ;\ | |
142 | call .-1 ;\ | |
143 | .code64 | |
144 | ||
145 | /* | |
146 | * [ We used to have a reason for the following statement; ] | |
147 | * [ but the issue has been fixed. The line is true ] | |
148 | * [ nevertheless, therefore it should remain there. ] | |
149 | * This proves that Little Endian is superior to Big Endian. | |
150 | */ | |
151 | ||
b0d623f7 A |
152 | .text |
153 | .align ALIGN | |
154 | .globl EXT(_start) | |
155 | .globl EXT(_pstart) | |
156 | LEXT(_start) | |
157 | LEXT(_pstart) | |
158 | ||
159 | .code32 | |
160 | ||
161 | #if 0 | |
162 | mov $0x3f8, %dx | |
163 | mov $0x4D, %al; out %al, %dx | |
164 | mov $0x49, %al; out %al, %dx | |
165 | mov $0x53, %al; out %al, %dx | |
166 | mov $0x54, %al; out %al, %dx | |
167 | mov $0x0D, %al; out %al, %dx | |
168 | mov $0x0A, %al; out %al, %dx | |
169 | #endif | |
170 | ||
171 | /* | |
172 | * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode. | |
173 | * | |
174 | * Initial memory layout: | |
175 | * | |
176 | * ------------------------- | |
177 | * | | | |
178 | * | Kernel text/data | | |
179 | * | | | |
180 | * ------------------------- Kernel start addr | |
181 | * | | | |
182 | * | | | |
183 | * ------------------------- 0 | |
184 | * | |
185 | */ | |
186 | mov %eax, %edi /* save kernbootstruct */ | |
187 | ||
188 | /* Use low 32-bits of address as 32-bit stack */ | |
189 | movl $EXT(low_eintstack), %esp | |
190 | ||
191 | /* | |
192 | * Set up segmentation | |
193 | */ | |
194 | movl $EXT(protected_mode_gdtr), %eax | |
195 | lgdtl (%eax) | |
196 | ||
b0d623f7 A |
197 | /* the following code is shared by the master CPU and all slave CPUs */ |
198 | L_pstart_common: | |
199 | /* | |
200 | * switch to 64 bit mode | |
201 | */ | |
202 | SWITCH_TO_64BIT_MODE | |
203 | ||
6d2010ae A |
204 | /* Flush data segment selectors */ |
205 | xor %eax, %eax | |
206 | mov %ax, %ss | |
207 | mov %ax, %ds | |
208 | mov %ax, %es | |
209 | mov %ax, %fs | |
210 | mov %ax, %gs | |
211 | ||
b0d623f7 A |
212 | /* %edi = boot_args_start */ |
213 | ||
214 | leaq _vstart(%rip), %rcx | |
215 | movq $0xffffff8000000000, %rax /* adjust the pointer to be up high */ | |
216 | or %rax, %rsp /* and stack pointer up there too */ | |
217 | or %rcx, %rax | |
218 | andq $0xfffffffffffffff0, %rsp /* align stack */ | |
219 | xorq %rbp, %rbp /* zero frame pointer */ | |
220 | callq *%rax | |
221 | ||
222 | /* | |
223 | * AP (slave) CPUs enter here. | |
224 | * | |
225 | * Environment: | |
226 | * protected mode, no paging, flat 32-bit address space. | |
227 | * (Code/data/stack segments have base == 0, limit == 4G) | |
228 | */ | |
229 | .align ALIGN | |
230 | .globl EXT(slave_pstart) | |
231 | LEXT(slave_pstart) | |
232 | .code32 | |
233 | cli /* disable interrupts, so we don`t */ | |
234 | /* need IDT for a while */ | |
235 | POSTCODE(SLAVE_PSTART_ENTRY) | |
236 | ||
237 | movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp | |
238 | ||
239 | /* set up identity mapping of page tables */ | |
240 | movl $INITPT_SEG_BASE,%eax | |
241 | movl (KERNEL_PML4_INDEX*8)(%eax), %esi | |
242 | movl %esi, (0)(%eax) | |
243 | movl (KERNEL_PML4_INDEX*8+4)(%eax), %esi | |
244 | movl %esi, (0+4)(%eax) | |
245 | ||
246 | movl $0, %edi /* "no kernbootstruct" */ | |
247 | ||
248 | jmp L_pstart_common /* hop a ride to vstart() */ | |
249 | ||
250 | ||
251 | /* BEGIN HIBERNATE CODE */ | |
252 | ||
253 | .section __HIB, __text | |
254 | /* | |
255 | This code is linked into the kernel but part of the "__HIB" section, which means | |
256 | its used by code running in the special context of restoring the kernel text and data | |
257 | from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything | |
258 | it calls or references (ie. hibernate_restore_phys_page()) | |
259 | needs to be careful to only touch memory also in the "__HIB" section. | |
260 | */ | |
261 | ||
262 | ||
263 | .align ALIGN | |
264 | .globl EXT(hibernate_machine_entrypoint) | |
265 | .code32 | |
266 | LEXT(hibernate_machine_entrypoint) | |
267 | movl %eax, %edi /* regparm(1) calling convention */ | |
268 | ||
269 | /* restore gdt */ | |
270 | mov $(SLEEP_SEG_BASE)+20, %eax // load saved_gdt, this may break | |
271 | lgdtl (%eax) | |
272 | ||
273 | /* setup the protected mode segment registers */ | |
274 | mov $KERNEL_DS, %eax | |
275 | movw %ax, %ds | |
276 | movw %ax, %es | |
277 | movw %ax, %ss | |
278 | xor %eax,%eax | |
279 | movw %ax, %fs | |
280 | movw %ax, %gs | |
281 | ||
282 | /* set up the page tables to use BootstrapPTD | |
283 | * as done in idle_pt.c, but this must be done programatically */ | |
284 | mov $(INITPT_SEG_BASE + PAGE_SIZE), %eax | |
285 | mov $(INITPT_SEG_BASE + 2*PAGE_SIZE | INTEL_PTE_WRITE | INTEL_PTE_VALID), %ecx | |
286 | mov $0x0, %edx | |
287 | mov %ecx, (0*8+0)(%eax) | |
288 | mov %edx, (0*8+4)(%eax) | |
289 | add $(PAGE_SIZE), %ecx | |
290 | mov %ecx, (1*8+0)(%eax) | |
291 | mov %edx, (1*8+4)(%eax) | |
292 | add $(PAGE_SIZE), %ecx | |
293 | mov %ecx, (2*8+0)(%eax) | |
294 | mov %edx, (2*8+4)(%eax) | |
295 | add $(PAGE_SIZE), %ecx | |
296 | mov %ecx, (3*8+0)(%eax) | |
297 | mov %edx, (3*8+4)(%eax) | |
298 | ||
299 | /* Temporary stack */ | |
300 | mov $(REAL_MODE_BOOTSTRAP_OFFSET + PROT_MODE_START), %esp | |
301 | ||
302 | SWITCH_TO_64BIT_MODE | |
303 | ||
304 | leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx | |
305 | leaq EXT(gIOHibernateRestoreStackEnd)(%rip),%rsp /* switch to the bootup stack */ | |
306 | movq $0xffffff8000000000, %rax /* adjust the pointer to be up high */ | |
307 | orq %rax, %rsp /* and stack pointer up there too :D */ | |
308 | orq %rcx, %rax /* put entrypoint in %rax */ | |
309 | /* %edi is already filled with header pointer */ | |
310 | xorl %esi, %esi /* zero 2nd arg */ | |
311 | xorl %edx, %edx /* zero 3rd arg */ | |
312 | xorl %ecx, %ecx /* zero 4th arg */ | |
313 | andq $0xfffffffffffffff0, %rsp /* align stack */ | |
314 | /* (future-proofing, stack should already be aligned) */ | |
315 | xorq %rbp, %rbp /* zero frame pointer */ | |
316 | call *%rax /* call instead of jmp to keep the required stack alignment */ | |
317 | /* NOTREACHED */ | |
318 | hlt | |
319 | ||
320 | /* END HIBERNATE CODE */ | |
321 | ||
322 | #if CONFIG_SLEEP | |
323 | /* BEGIN ACPI WAKEUP CODE */ | |
324 | ||
325 | #include <i386/acpi.h> | |
326 | ||
327 | ||
328 | ||
329 | ||
330 | #define PA(addr) (addr) | |
331 | ||
332 | /* | |
333 | * acpi_wake_start | |
334 | * | |
335 | * The code from acpi_wake_start to acpi_wake_end is copied to | |
336 | * memory below 1MB. The firmware waking vector is updated to | |
337 | * point at acpi_wake_start in low memory before sleeping. | |
338 | */ | |
339 | ||
340 | .section __TEXT,__text | |
341 | .text | |
342 | .align 12 /* Page align for single bcopy_phys() */ | |
343 | .code32 | |
344 | .globl EXT(acpi_wake_prot) | |
345 | EXT(acpi_wake_prot): | |
346 | /* protected mode, paging disabled */ | |
347 | ||
348 | /* jump to acpi_temp_alloc (stored in saved_tmp) */ | |
349 | mov $(SLEEP_SEG_BASE)+16, %eax | |
350 | mov (%eax), %ecx // Load acpi_temp_reloc from saved_eip | |
351 | jmp *%ecx | |
352 | acpi_temp_reloc: | |
353 | mov $(SLEEP_SEG_BASE)+16, %esp /* setup stack for 64bit */ | |
354 | ||
355 | SWITCH_TO_64BIT_MODE | |
356 | ||
357 | lea Lwake_64(%rip), %rax | |
358 | movq $0xffffff8000000000, %rdx | |
359 | orq %rdx, %rax | |
360 | jmp *%rax | |
361 | .code32 | |
362 | ||
363 | .code64 | |
364 | ||
365 | /* | |
366 | * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon) | |
367 | * | |
368 | * Save CPU state before platform sleep. Restore CPU state | |
369 | * following wake up. | |
370 | */ | |
371 | ||
372 | ENTRY(acpi_sleep_cpu) | |
373 | push %rbp | |
374 | mov %rsp, %rbp | |
375 | ||
376 | /* save flags */ | |
377 | pushf | |
378 | ||
379 | /* save general purpose registers */ | |
380 | push %rax | |
381 | push %rbx | |
382 | push %rcx | |
383 | push %rdx | |
384 | push %rbp | |
385 | push %rsi | |
386 | push %rdi | |
387 | push %r8 | |
388 | push %r9 | |
389 | push %r10 | |
390 | push %r11 | |
391 | push %r12 | |
392 | push %r13 | |
393 | push %r14 | |
394 | push %r15 | |
395 | ||
396 | mov %rsp, saved_rsp(%rip) | |
397 | ||
398 | /* make sure tlb is flushed */ | |
399 | mov %cr3,%rax | |
400 | mov %rax,%cr3 | |
401 | ||
402 | /* save control registers */ | |
403 | mov %cr0, %rax | |
404 | mov %rax, saved_cr0(%rip) | |
405 | mov %cr2, %rax | |
406 | mov %rax, saved_cr2(%rip) | |
407 | mov %cr4, %rax | |
408 | mov %rax, saved_cr4(%rip) | |
409 | ||
410 | /* save segment registers */ | |
411 | movw %es, saved_es(%rip) | |
412 | movw %fs, saved_fs(%rip) | |
413 | movw %gs, saved_gs(%rip) | |
414 | movw %ss, saved_ss(%rip) | |
415 | ||
6d2010ae A |
416 | /* save the 64bit user and kernel gs base */ |
417 | /* note: user's curently swapped into kernel base MSR */ | |
b0d623f7 | 418 | mov $MSR_IA32_KERNEL_GS_BASE, %rcx |
6d2010ae A |
419 | rdmsr |
420 | movl %eax, saved_ugs_base(%rip) | |
421 | movl %edx, saved_ugs_base+4(%rip) | |
b0d623f7 A |
422 | swapgs |
423 | rdmsr | |
424 | movl %eax, saved_kgs_base(%rip) | |
425 | movl %edx, saved_kgs_base+4(%rip) | |
426 | swapgs | |
427 | ||
428 | /* save descriptor table registers */ | |
429 | sgdt saved_gdt(%rip) | |
430 | sldt saved_ldt(%rip) | |
431 | sidt saved_idt(%rip) | |
432 | str saved_tr(%rip) | |
433 | ||
434 | /* | |
435 | * When system wakes up, the real mode wake handler will revert to | |
436 | * protected mode, then jump to the address stored at saved_eip. | |
437 | */ | |
438 | leaq acpi_temp_reloc(%rip), %rax | |
439 | mov %eax, saved_eip(%rip) | |
440 | ||
441 | /* | |
442 | * Call ACPI function provided by the caller to sleep the platform. | |
443 | * This call will not return on success. | |
444 | */ | |
445 | ||
446 | xchgq %rdi, %rsi | |
447 | call *%rsi | |
448 | ||
449 | /* sleep failed, no cpu context lost */ | |
450 | jmp wake_restore | |
451 | ||
452 | .globl EXT(acpi_wake_prot_entry) | |
453 | EXT(acpi_wake_prot_entry): | |
454 | POSTCODE(ACPI_WAKE_PROT_ENTRY) | |
455 | /* Entry from the hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c | |
456 | * | |
457 | * Reset the first 4 PDE's to point to entries in IdlePTD, as done in | |
458 | * Idle_PTs_init() during startup */ | |
459 | leaq _IdlePDPT(%rip), %rax | |
460 | movq _IdlePTD(%rip), %rcx | |
461 | mov %ecx, %ecx /* zero top 32bits of %rcx */ | |
462 | orq $(INTEL_PTE_WRITE|INTEL_PTE_VALID), %rcx | |
463 | movq %rcx, 0x0(%rax) | |
464 | add $0x1000, %rcx | |
465 | movq %rcx, 0x8(%rax) | |
466 | add $0x1000, %rcx | |
467 | movq %rcx, 0x10(%rax) | |
468 | add $0x1000, %rcx | |
469 | movq %rcx, 0x18(%rax) | |
470 | mov %cr3, %rax | |
471 | mov %rax, %cr3 | |
472 | ||
473 | Lwake_64: | |
474 | /* | |
475 | * restore cr4, PAE and NXE states in an orderly fashion | |
476 | */ | |
477 | mov saved_cr4(%rip), %rcx | |
478 | mov %rcx, %cr4 | |
479 | ||
480 | mov $(MSR_IA32_EFER), %ecx /* MSR number in ecx */ | |
481 | rdmsr /* MSR value return in edx: eax */ | |
482 | or $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */ | |
483 | wrmsr /* Update Extended Feature Enable reg */ | |
484 | ||
485 | /* restore kernel GDT */ | |
486 | lgdt EXT(protected_mode_gdtr)(%rip) | |
487 | ||
488 | movq saved_cr2(%rip), %rax | |
489 | mov %rax, %cr2 | |
490 | ||
491 | /* restore CR0, paging enabled */ | |
492 | mov saved_cr0(%rip), %rax | |
493 | mov %rax, %cr0 | |
494 | ||
495 | /* protected mode, paging enabled */ | |
496 | POSTCODE(ACPI_WAKE_PAGED_ENTRY) | |
497 | ||
6d2010ae A |
498 | /* load null segment selectors */ |
499 | xor %eax, %eax | |
500 | movw %ax, %ss | |
b0d623f7 A |
501 | movw %ax, %ds |
502 | ||
503 | /* restore local and interrupt descriptor tables */ | |
504 | lldt saved_ldt(%rip) | |
505 | lidt saved_idt(%rip) | |
506 | ||
507 | /* restore segment registers */ | |
508 | movw saved_es(%rip), %es | |
6d2010ae A |
509 | movw saved_fs(%rip), %fs |
510 | movw saved_gs(%rip), %gs | |
b0d623f7 A |
511 | movw saved_ss(%rip), %ss |
512 | ||
6d2010ae | 513 | /* restore the 64bit kernel and user gs base */ |
b0d623f7 A |
514 | mov $MSR_IA32_KERNEL_GS_BASE, %rcx |
515 | movl saved_kgs_base(%rip), %eax | |
516 | movl saved_kgs_base+4(%rip), %edx | |
517 | wrmsr | |
518 | swapgs | |
6d2010ae A |
519 | movl saved_ugs_base(%rip), %eax |
520 | movl saved_ugs_base+4(%rip), %edx | |
521 | wrmsr | |
b0d623f7 | 522 | |
b0d623f7 A |
523 | /* |
524 | * Restore task register. Before doing this, clear the busy flag | |
525 | * in the TSS descriptor set by the CPU. | |
526 | */ | |
527 | lea saved_gdt(%rip), %rax | |
528 | movq 2(%rax), %rdx /* GDT base, skip limit word */ | |
529 | movl $(KERNEL_TSS), %eax /* TSS segment selector */ | |
530 | movb $(K_TSS), 5(%rdx, %rax) /* clear busy flag */ | |
531 | ||
532 | ltr saved_tr(%rip) /* restore TR */ | |
533 | ||
534 | wake_restore: | |
535 | mov saved_rsp(%rip), %rsp | |
536 | ||
537 | /* restore general purpose registers */ | |
538 | pop %r15 | |
539 | pop %r14 | |
540 | pop %r13 | |
541 | pop %r12 | |
542 | pop %r11 | |
543 | pop %r10 | |
544 | pop %r9 | |
545 | pop %r8 | |
546 | pop %rdi | |
547 | pop %rsi | |
548 | pop %rbp | |
549 | pop %rdx | |
550 | pop %rcx | |
551 | pop %rbx | |
552 | pop %rax | |
553 | ||
554 | /* restore flags */ | |
555 | popf | |
556 | ||
557 | leave | |
558 | ret | |
559 | ||
560 | /* END ACPI WAKEUP CODE */ | |
561 | #endif /* CONFIG_SLEEP */ | |
562 | ||
563 | /* Code to get from real mode to protected mode */ | |
564 | ||
565 | #define operand_size_prefix .byte 0x66 | |
566 | #define address_size_prefix .byte 0x67 | |
567 | #define cs_base_prefix .byte 0x2e | |
568 | ||
569 | #define LJMP(segment,address) \ | |
570 | operand_size_prefix ;\ | |
571 | .byte 0xea ;\ | |
572 | .long address-EXT(real_mode_bootstrap_base) ;\ | |
573 | .word segment | |
574 | ||
575 | #define LGDT(address) \ | |
576 | cs_base_prefix ;\ | |
577 | address_size_prefix ;\ | |
578 | operand_size_prefix ;\ | |
579 | .word 0x010f ;\ | |
580 | .byte 0x15 ;\ | |
581 | .long address-EXT(real_mode_bootstrap_base) | |
582 | ||
583 | .section __TEXT,__text | |
584 | .align 12 /* Page align for single bcopy_phys() */ | |
585 | .code32 | |
586 | Entry(real_mode_bootstrap_base) | |
587 | cli | |
588 | ||
589 | LGDT(EXT(protected_mode_gdtr)) | |
590 | ||
591 | /* set the PE bit of CR0 */ | |
592 | mov %cr0, %eax | |
593 | inc %eax | |
594 | mov %eax, %cr0 | |
595 | ||
596 | /* reload CS register */ | |
597 | LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET) | |
598 | 1: | |
599 | ||
600 | /* we are in protected mode now */ | |
601 | /* set up the segment registers */ | |
602 | mov $KERNEL_DS, %eax | |
603 | movw %ax, %ds | |
604 | movw %ax, %es | |
605 | movw %ax, %ss | |
606 | xor %eax,%eax | |
607 | movw %ax, %fs | |
608 | movw %ax, %gs | |
609 | ||
610 | POSTCODE(SLAVE_STARTPROG_ENTRY); | |
611 | ||
612 | mov PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx | |
613 | jmp *%ecx | |
614 | ||
615 | Entry(protected_mode_gdtr) | |
616 | .short 160 /* limit (8*6 segs) */ | |
617 | .quad EXT(master_gdt) | |
618 | ||
619 | Entry(real_mode_bootstrap_end) | |
620 | ||
621 | /* Save area used across sleep/wake */ | |
622 | .section __SLEEP, __data | |
623 | .align 2 | |
624 | ||
625 | temp_stack: .quad 0 | |
626 | .quad 0 | |
627 | saved_eip: .long 0 | |
628 | saved_gdt: .word 0 | |
629 | .quad 0 | |
630 | saved_rsp: .quad 0 | |
631 | saved_es: .word 0 | |
632 | saved_fs: .word 0 | |
633 | saved_gs: .word 0 | |
634 | saved_ss: .word 0 | |
635 | saved_cr0: .quad 0 | |
636 | saved_cr2: .quad 0 | |
637 | saved_cr4: .quad 0 | |
638 | saved_idt: .word 0 | |
639 | .quad 0 | |
640 | saved_ldt: .word 0 | |
641 | saved_tr: .word 0 | |
642 | saved_kgs_base: .quad 0 | |
6d2010ae | 643 | saved_ugs_base: .quad 0 |
b0d623f7 | 644 |