]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
316670eb | 59 | #include <debug.h> |
b0d623f7 A |
60 | |
61 | #include <i386/asm.h> | |
62 | #include <i386/proc_reg.h> | |
63 | #include <i386/postcode.h> | |
64 | #include <assym.s> | |
65 | ||
b0d623f7 A |
66 | #include <i386/cpuid.h> |
67 | #include <i386/acpi.h> | |
68 | ||
69 | .code32 | |
70 | ||
71 | ||
72 | /* | |
73 | * Interrupt and bootup stack for initial processor. | |
316670eb | 74 | * Note: we switch to a dynamically allocated interrupt stack once VM is up. |
b0d623f7 A |
75 | */ |
76 | ||
316670eb | 77 | /* in the __HIB section since the hibernate restore code uses this stack. */ |
b0d623f7 A |
78 | .section __HIB, __data |
79 | .align 12 | |
80 | ||
81 | .globl EXT(low_intstack) | |
82 | EXT(low_intstack): | |
83 | .globl EXT(gIOHibernateRestoreStack) | |
84 | EXT(gIOHibernateRestoreStack): | |
85 | ||
6d2010ae | 86 | .space INTSTACK_SIZE |
b0d623f7 A |
87 | |
88 | .globl EXT(low_eintstack) | |
89 | EXT(low_eintstack:) | |
90 | .globl EXT(gIOHibernateRestoreStackEnd) | |
91 | EXT(gIOHibernateRestoreStackEnd): | |
92 | ||
93 | /* back to the regular __DATA section. */ | |
94 | ||
95 | .section __DATA, __data | |
96 | ||
b0d623f7 A |
97 | /* |
98 | * Stack for machine-check handler. | |
99 | */ | |
100 | .align 12 | |
101 | .globl EXT(mc_task_stack) | |
102 | EXT(mc_task_stack): | |
6d2010ae | 103 | .space INTSTACK_SIZE |
b0d623f7 A |
104 | .globl EXT(mc_task_stack_end) |
105 | EXT(mc_task_stack_end): | |
106 | ||
316670eb A |
107 | /* Must not clobber EDI */ |
108 | #define SWITCH_TO_64BIT_MODE \ | |
109 | movl $(CR4_PAE),%eax /* enable PAE */ ;\ | |
110 | movl %eax,%cr4 ;\ | |
111 | movl $MSR_IA32_EFER,%ecx ;\ | |
112 | rdmsr ;\ | |
113 | /* enable long mode, NX */ ;\ | |
114 | orl $(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax ;\ | |
115 | wrmsr ;\ | |
116 | movl $EXT(BootPML4),%eax ;\ | |
117 | movl %eax,%cr3 ;\ | |
118 | movl %cr0,%eax ;\ | |
119 | orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\ | |
120 | movl %eax,%cr0 ;\ | |
39236c6e A |
121 | ljmpl $KERNEL64_CS,$64f ;\ |
122 | 64: ;\ | |
316670eb A |
123 | .code64 |
124 | ||
b0d623f7 A |
125 | /* |
126 | * BSP CPU start here. | |
127 | * eax points to kernbootstruct | |
128 | * | |
129 | * Environment: | |
130 | * protected mode, no paging, flat 32-bit address space. | |
131 | * (Code/data/stack segments have base == 0, limit == 4G) | |
b0d623f7 A |
132 | */ |
133 | ||
316670eb | 134 | .code32 |
b0d623f7 | 135 | .text |
316670eb | 136 | .section __HIB, __text |
b0d623f7 A |
137 | .align ALIGN |
138 | .globl EXT(_start) | |
316670eb | 139 | .globl EXT(pstart) |
b0d623f7 | 140 | LEXT(_start) |
316670eb | 141 | LEXT(pstart) |
b0d623f7 | 142 | |
b0d623f7 A |
143 | /* |
144 | * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode. | |
145 | * | |
146 | * Initial memory layout: | |
147 | * | |
148 | * ------------------------- | |
149 | * | | | |
150 | * | Kernel text/data | | |
151 | * | | | |
316670eb A |
152 | * |-----------------------| Kernel text base addr - 2MB-aligned |
153 | * | padding | | |
154 | * |-----------------------| | |
155 | * | __HIB section | | |
156 | * |-----------------------| Page-aligned | |
b0d623f7 | 157 | * | | |
316670eb | 158 | * | padding | |
b0d623f7 A |
159 | * | | |
160 | * ------------------------- 0 | |
161 | * | |
162 | */ | |
163 | mov %eax, %edi /* save kernbootstruct */ | |
164 | ||
165 | /* Use low 32-bits of address as 32-bit stack */ | |
316670eb | 166 | movl $EXT(low_eintstack), %esp |
b0d623f7 | 167 | |
316670eb A |
168 | POSTCODE(PSTART_ENTRY) |
169 | ||
b0d623f7 A |
170 | /* |
171 | * Set up segmentation | |
172 | */ | |
173 | movl $EXT(protected_mode_gdtr), %eax | |
174 | lgdtl (%eax) | |
175 | ||
316670eb A |
176 | /* |
177 | * Rebase Boot page tables to kernel base address. | |
178 | */ | |
179 | movl $EXT(BootPML4), %eax // Level 4: | |
180 | add %eax, 0*8+0(%eax) // - 1:1 | |
181 | add %eax, KERNEL_PML4_INDEX*8+0(%eax) // - kernel space | |
182 | ||
183 | movl $EXT(BootPDPT), %edx // Level 3: | |
184 | add %eax, 0*8+0(%edx) | |
185 | add %eax, 1*8+0(%edx) | |
186 | add %eax, 2*8+0(%edx) | |
187 | add %eax, 3*8+0(%edx) | |
188 | ||
189 | POSTCODE(PSTART_REBASE) | |
190 | ||
b0d623f7 A |
191 | /* the following code is shared by the master CPU and all slave CPUs */ |
192 | L_pstart_common: | |
193 | /* | |
194 | * switch to 64 bit mode | |
195 | */ | |
196 | SWITCH_TO_64BIT_MODE | |
197 | ||
6d2010ae A |
198 | /* Flush data segment selectors */ |
199 | xor %eax, %eax | |
200 | mov %ax, %ss | |
201 | mov %ax, %ds | |
202 | mov %ax, %es | |
203 | mov %ax, %fs | |
204 | mov %ax, %gs | |
205 | ||
316670eb A |
206 | test %edi, %edi /* Populate stack canary on BSP */ |
207 | jz Lvstartshim | |
208 | ||
209 | mov $1, %eax | |
210 | cpuid | |
211 | test $(1 << 30), %ecx | |
212 | jz Lnon_rdrand | |
fe8ab488 | 213 | rdrand %rax /* RAX := 64 bits of DRBG entropy */ |
316670eb A |
214 | jnc Lnon_rdrand /* TODO: complain if DRBG fails at this stage */ |
215 | ||
216 | Lstore_random_guard: | |
217 | xor %ah, %ah /* Security: zero second byte of stack canary */ | |
218 | movq %rax, ___stack_chk_guard(%rip) | |
219 | /* %edi = boot_args_start if BSP */ | |
220 | Lvstartshim: | |
221 | ||
222 | POSTCODE(PSTART_VSTART) | |
223 | ||
b0d623f7 A |
224 | /* %edi = boot_args_start */ |
225 | ||
316670eb A |
226 | leaq _vstart(%rip), %rcx |
227 | movq $0xffffff8000000000, %rax /* adjust pointer up high */ | |
228 | or %rax, %rsp /* and stack pointer up there */ | |
229 | or %rcx, %rax | |
230 | andq $0xfffffffffffffff0, %rsp /* align stack */ | |
231 | xorq %rbp, %rbp /* zero frame pointer */ | |
232 | callq *%rax | |
233 | ||
234 | Lnon_rdrand: | |
235 | rdtsc /* EDX:EAX := TSC */ | |
236 | /* Distribute low order bits */ | |
237 | mov %eax, %ecx | |
238 | xor %al, %ah | |
239 | shl $16, %rcx | |
240 | xor %rcx, %rax | |
241 | xor %eax, %edx | |
242 | ||
243 | /* Incorporate ASLR entropy, if any */ | |
244 | lea (%rip), %rcx | |
245 | shr $21, %rcx | |
246 | movzbl %cl, %ecx | |
247 | shl $16, %ecx | |
248 | xor %ecx, %edx | |
249 | ||
250 | mov %ah, %cl | |
251 | ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */ | |
252 | shl $32, %rdx | |
253 | xor %rdx, %rax | |
254 | mov %cl, %al | |
255 | jmp Lstore_random_guard | |
b0d623f7 A |
256 | /* |
257 | * AP (slave) CPUs enter here. | |
258 | * | |
259 | * Environment: | |
260 | * protected mode, no paging, flat 32-bit address space. | |
261 | * (Code/data/stack segments have base == 0, limit == 4G) | |
262 | */ | |
263 | .align ALIGN | |
264 | .globl EXT(slave_pstart) | |
265 | LEXT(slave_pstart) | |
266 | .code32 | |
267 | cli /* disable interrupts, so we don`t */ | |
268 | /* need IDT for a while */ | |
316670eb | 269 | POSTCODE(SLAVE_PSTART) |
b0d623f7 A |
270 | |
271 | movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp | |
272 | ||
316670eb | 273 | xor %edi, %edi /* AP, no "kernbootstruct" */ |
b0d623f7 A |
274 | |
275 | jmp L_pstart_common /* hop a ride to vstart() */ | |
276 | ||
277 | ||
278 | /* BEGIN HIBERNATE CODE */ | |
279 | ||
280 | .section __HIB, __text | |
281 | /* | |
316670eb A |
282 | * This code is linked into the kernel but part of the "__HIB" section, |
283 | * which means it's used by code running in the special context of restoring | |
284 | * the kernel text and data from the hibernation image read by the booter. | |
285 | * hibernate_kernel_entrypoint() and everything it calls or references | |
286 | * (ie. hibernate_restore_phys_page()) needs to be careful to only touch | |
287 | * memory also in the "__HIB" section. | |
288 | */ | |
b0d623f7 A |
289 | |
290 | .align ALIGN | |
291 | .globl EXT(hibernate_machine_entrypoint) | |
292 | .code32 | |
293 | LEXT(hibernate_machine_entrypoint) | |
294 | movl %eax, %edi /* regparm(1) calling convention */ | |
295 | ||
316670eb A |
296 | /* Use low 32-bits of address as 32-bit stack */ |
297 | movl $EXT(low_eintstack), %esp | |
298 | ||
299 | /* | |
300 | * Set up GDT | |
301 | */ | |
302 | movl $EXT(master_gdtr), %eax | |
b0d623f7 A |
303 | lgdtl (%eax) |
304 | ||
316670eb A |
305 | /* Switch to 64-bit on the Boot PTs */ |
306 | SWITCH_TO_64BIT_MODE | |
b0d623f7 | 307 | |
316670eb | 308 | leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx |
b0d623f7 | 309 | |
316670eb A |
310 | /* adjust the pointers to be up high */ |
311 | movq $0xffffff8000000000, %rax | |
312 | orq %rax, %rsp | |
313 | orq %rcx, %rax | |
b0d623f7 | 314 | |
b0d623f7 | 315 | /* %edi is already filled with header pointer */ |
316670eb A |
316 | xorl %esi, %esi /* zero 2nd arg */ |
317 | xorl %edx, %edx /* zero 3rd arg */ | |
318 | xorl %ecx, %ecx /* zero 4th arg */ | |
319 | andq $0xfffffffffffffff0, %rsp /* align stack */ | |
320 | ||
321 | /* call instead of jmp to keep the required stack alignment */ | |
322 | xorq %rbp, %rbp /* zero frame pointer */ | |
323 | call *%rax | |
324 | ||
b0d623f7 A |
325 | /* NOTREACHED */ |
326 | hlt | |
327 | ||
328 | /* END HIBERNATE CODE */ | |
329 | ||
330 | #if CONFIG_SLEEP | |
331 | /* BEGIN ACPI WAKEUP CODE */ | |
332 | ||
333 | #include <i386/acpi.h> | |
334 | ||
335 | ||
b0d623f7 A |
336 | /* |
337 | * acpi_wake_start | |
b0d623f7 A |
338 | */ |
339 | ||
340 | .section __TEXT,__text | |
b0d623f7 A |
341 | .code64 |
342 | ||
343 | /* | |
344 | * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon) | |
345 | * | |
346 | * Save CPU state before platform sleep. Restore CPU state | |
347 | * following wake up. | |
348 | */ | |
349 | ||
350 | ENTRY(acpi_sleep_cpu) | |
351 | push %rbp | |
352 | mov %rsp, %rbp | |
353 | ||
354 | /* save flags */ | |
355 | pushf | |
356 | ||
357 | /* save general purpose registers */ | |
358 | push %rax | |
359 | push %rbx | |
360 | push %rcx | |
361 | push %rdx | |
362 | push %rbp | |
363 | push %rsi | |
364 | push %rdi | |
365 | push %r8 | |
366 | push %r9 | |
367 | push %r10 | |
368 | push %r11 | |
369 | push %r12 | |
370 | push %r13 | |
371 | push %r14 | |
372 | push %r15 | |
373 | ||
374 | mov %rsp, saved_rsp(%rip) | |
375 | ||
376 | /* make sure tlb is flushed */ | |
377 | mov %cr3,%rax | |
378 | mov %rax,%cr3 | |
379 | ||
380 | /* save control registers */ | |
381 | mov %cr0, %rax | |
382 | mov %rax, saved_cr0(%rip) | |
383 | mov %cr2, %rax | |
384 | mov %rax, saved_cr2(%rip) | |
316670eb A |
385 | mov %cr3, %rax |
386 | mov %rax, saved_cr3(%rip) | |
b0d623f7 A |
387 | mov %cr4, %rax |
388 | mov %rax, saved_cr4(%rip) | |
389 | ||
390 | /* save segment registers */ | |
391 | movw %es, saved_es(%rip) | |
392 | movw %fs, saved_fs(%rip) | |
393 | movw %gs, saved_gs(%rip) | |
394 | movw %ss, saved_ss(%rip) | |
395 | ||
6d2010ae A |
396 | /* save the 64bit user and kernel gs base */ |
397 | /* note: user's curently swapped into kernel base MSR */ | |
b0d623f7 | 398 | mov $MSR_IA32_KERNEL_GS_BASE, %rcx |
6d2010ae A |
399 | rdmsr |
400 | movl %eax, saved_ugs_base(%rip) | |
401 | movl %edx, saved_ugs_base+4(%rip) | |
b0d623f7 A |
402 | swapgs |
403 | rdmsr | |
404 | movl %eax, saved_kgs_base(%rip) | |
405 | movl %edx, saved_kgs_base+4(%rip) | |
406 | swapgs | |
407 | ||
408 | /* save descriptor table registers */ | |
409 | sgdt saved_gdt(%rip) | |
410 | sldt saved_ldt(%rip) | |
411 | sidt saved_idt(%rip) | |
412 | str saved_tr(%rip) | |
413 | ||
b0d623f7 A |
414 | /* |
415 | * Call ACPI function provided by the caller to sleep the platform. | |
416 | * This call will not return on success. | |
417 | */ | |
418 | ||
419 | xchgq %rdi, %rsi | |
420 | call *%rsi | |
421 | ||
422 | /* sleep failed, no cpu context lost */ | |
423 | jmp wake_restore | |
424 | ||
316670eb A |
425 | .section __HIB, __text |
426 | .code32 | |
427 | .globl EXT(acpi_wake_prot) | |
428 | EXT(acpi_wake_prot): | |
429 | /* protected mode, paging disabled */ | |
430 | movl $EXT(low_eintstack), %esp | |
431 | ||
432 | SWITCH_TO_64BIT_MODE | |
433 | ||
434 | jmp Lwake_64 | |
435 | ||
436 | .section __TEXT,__text | |
437 | .code64 | |
438 | ||
b0d623f7 A |
439 | .globl EXT(acpi_wake_prot_entry) |
440 | EXT(acpi_wake_prot_entry): | |
441 | POSTCODE(ACPI_WAKE_PROT_ENTRY) | |
316670eb A |
442 | /* Return from hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c |
443 | */ | |
b0d623f7 A |
444 | Lwake_64: |
445 | /* | |
446 | * restore cr4, PAE and NXE states in an orderly fashion | |
447 | */ | |
316670eb A |
448 | mov saved_cr4(%rip), %rcx |
449 | mov %rcx, %cr4 | |
b0d623f7 | 450 | |
316670eb A |
451 | mov $(MSR_IA32_EFER), %ecx /* MSR number in ecx */ |
452 | rdmsr /* MSR value in edx:eax */ | |
453 | or $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */ | |
454 | wrmsr /* Update */ | |
b0d623f7 A |
455 | |
456 | movq saved_cr2(%rip), %rax | |
316670eb | 457 | mov %rax, %cr2 |
b0d623f7 A |
458 | |
459 | /* restore CR0, paging enabled */ | |
316670eb A |
460 | mov saved_cr0(%rip), %rax |
461 | mov %rax, %cr0 | |
462 | ||
463 | /* restore the page tables */ | |
464 | mov saved_cr3(%rip), %rax | |
465 | mov %rax, %cr3 | |
b0d623f7 A |
466 | |
467 | /* protected mode, paging enabled */ | |
468 | POSTCODE(ACPI_WAKE_PAGED_ENTRY) | |
469 | ||
6d2010ae A |
470 | /* load null segment selectors */ |
471 | xor %eax, %eax | |
472 | movw %ax, %ss | |
b0d623f7 A |
473 | movw %ax, %ds |
474 | ||
316670eb A |
475 | /* restore descriptor tables */ |
476 | lgdt saved_gdt(%rip) | |
b0d623f7 A |
477 | lldt saved_ldt(%rip) |
478 | lidt saved_idt(%rip) | |
479 | ||
480 | /* restore segment registers */ | |
481 | movw saved_es(%rip), %es | |
6d2010ae A |
482 | movw saved_fs(%rip), %fs |
483 | movw saved_gs(%rip), %gs | |
b0d623f7 A |
484 | movw saved_ss(%rip), %ss |
485 | ||
6d2010ae | 486 | /* restore the 64bit kernel and user gs base */ |
b0d623f7 A |
487 | mov $MSR_IA32_KERNEL_GS_BASE, %rcx |
488 | movl saved_kgs_base(%rip), %eax | |
489 | movl saved_kgs_base+4(%rip), %edx | |
490 | wrmsr | |
491 | swapgs | |
6d2010ae A |
492 | movl saved_ugs_base(%rip), %eax |
493 | movl saved_ugs_base+4(%rip), %edx | |
494 | wrmsr | |
b0d623f7 | 495 | |
b0d623f7 A |
496 | /* |
497 | * Restore task register. Before doing this, clear the busy flag | |
498 | * in the TSS descriptor set by the CPU. | |
499 | */ | |
500 | lea saved_gdt(%rip), %rax | |
501 | movq 2(%rax), %rdx /* GDT base, skip limit word */ | |
502 | movl $(KERNEL_TSS), %eax /* TSS segment selector */ | |
503 | movb $(K_TSS), 5(%rdx, %rax) /* clear busy flag */ | |
504 | ||
505 | ltr saved_tr(%rip) /* restore TR */ | |
506 | ||
507 | wake_restore: | |
508 | mov saved_rsp(%rip), %rsp | |
509 | ||
510 | /* restore general purpose registers */ | |
511 | pop %r15 | |
512 | pop %r14 | |
513 | pop %r13 | |
514 | pop %r12 | |
515 | pop %r11 | |
516 | pop %r10 | |
517 | pop %r9 | |
518 | pop %r8 | |
519 | pop %rdi | |
520 | pop %rsi | |
521 | pop %rbp | |
522 | pop %rdx | |
523 | pop %rcx | |
524 | pop %rbx | |
525 | pop %rax | |
526 | ||
527 | /* restore flags */ | |
528 | popf | |
529 | ||
530 | leave | |
531 | ret | |
532 | ||
533 | /* END ACPI WAKEUP CODE */ | |
534 | #endif /* CONFIG_SLEEP */ | |
535 | ||
536 | /* Code to get from real mode to protected mode */ | |
537 | ||
538 | #define operand_size_prefix .byte 0x66 | |
539 | #define address_size_prefix .byte 0x67 | |
540 | #define cs_base_prefix .byte 0x2e | |
541 | ||
542 | #define LJMP(segment,address) \ | |
543 | operand_size_prefix ;\ | |
544 | .byte 0xea ;\ | |
545 | .long address-EXT(real_mode_bootstrap_base) ;\ | |
546 | .word segment | |
547 | ||
548 | #define LGDT(address) \ | |
549 | cs_base_prefix ;\ | |
550 | address_size_prefix ;\ | |
551 | operand_size_prefix ;\ | |
552 | .word 0x010f ;\ | |
553 | .byte 0x15 ;\ | |
554 | .long address-EXT(real_mode_bootstrap_base) | |
555 | ||
316670eb | 556 | .section __HIB, __text |
b0d623f7 A |
557 | .align 12 /* Page align for single bcopy_phys() */ |
558 | .code32 | |
559 | Entry(real_mode_bootstrap_base) | |
560 | cli | |
561 | ||
562 | LGDT(EXT(protected_mode_gdtr)) | |
563 | ||
564 | /* set the PE bit of CR0 */ | |
565 | mov %cr0, %eax | |
566 | inc %eax | |
567 | mov %eax, %cr0 | |
568 | ||
569 | /* reload CS register */ | |
570 | LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET) | |
571 | 1: | |
572 | ||
573 | /* we are in protected mode now */ | |
574 | /* set up the segment registers */ | |
575 | mov $KERNEL_DS, %eax | |
576 | movw %ax, %ds | |
577 | movw %ax, %es | |
578 | movw %ax, %ss | |
316670eb | 579 | xor %eax,%eax |
b0d623f7 A |
580 | movw %ax, %fs |
581 | movw %ax, %gs | |
582 | ||
583 | POSTCODE(SLAVE_STARTPROG_ENTRY); | |
584 | ||
585 | mov PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx | |
586 | jmp *%ecx | |
587 | ||
588 | Entry(protected_mode_gdtr) | |
316670eb | 589 | .short 160 /* limit (8*20 segs) */ |
b0d623f7 A |
590 | .quad EXT(master_gdt) |
591 | ||
592 | Entry(real_mode_bootstrap_end) | |
593 | ||
594 | /* Save area used across sleep/wake */ | |
316670eb | 595 | .section __HIB, __data |
b0d623f7 A |
596 | .align 2 |
597 | ||
316670eb A |
598 | /* gdtr for real address of master_gdt in HIB (not the aliased address) */ |
599 | Entry(master_gdtr) | |
600 | .word 160 /* limit (8*20 segs) */ | |
601 | .quad EXT(master_gdt) | |
602 | ||
b0d623f7 | 603 | saved_gdt: .word 0 |
316670eb | 604 | .quad 0 |
b0d623f7 A |
605 | saved_rsp: .quad 0 |
606 | saved_es: .word 0 | |
607 | saved_fs: .word 0 | |
608 | saved_gs: .word 0 | |
609 | saved_ss: .word 0 | |
610 | saved_cr0: .quad 0 | |
611 | saved_cr2: .quad 0 | |
316670eb | 612 | saved_cr3: .quad 0 |
b0d623f7 A |
613 | saved_cr4: .quad 0 |
614 | saved_idt: .word 0 | |
615 | .quad 0 | |
616 | saved_ldt: .word 0 | |
617 | saved_tr: .word 0 | |
618 | saved_kgs_base: .quad 0 | |
6d2010ae | 619 | saved_ugs_base: .quad 0 |
b0d623f7 | 620 |