2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 #include <platforms.h>
63 #include <i386/proc_reg.h>
64 #include <i386/postcode.h>
68 #include <i386/cpuid.h>
69 #include <i386/acpi.h>
75 * Interrupt and bootup stack for initial processor.
76 * Note: we switch to a dynamically allocated interrupt stack once VM is up.
79 /* in the __HIB section since the hibernate restore code uses this stack. */
80 .section __HIB, __data
83 .globl EXT(low_intstack)
85 .globl EXT(gIOHibernateRestoreStack)
86 EXT(gIOHibernateRestoreStack):
90 .globl EXT(low_eintstack)
92 .globl EXT(gIOHibernateRestoreStackEnd)
93 EXT(gIOHibernateRestoreStackEnd):
95 /* back to the regular __DATA section. */
97 .section __DATA, __data
100 * Stack for machine-check handler.
103 .globl EXT(mc_task_stack)
106 .globl EXT(mc_task_stack_end)
107 EXT(mc_task_stack_end):
109 /* Must not clobber EDI */
110 #define SWITCH_TO_64BIT_MODE \
111 movl $(CR4_PAE),%eax /* enable PAE */ ;\
113 movl $MSR_IA32_EFER,%ecx ;\
115 /* enable long mode, NX */ ;\
116 orl $(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax ;\
118 movl $EXT(BootPML4),%eax ;\
121 orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\
123 /* "The Aussie Maneuver" ("Myria" variant) */ ;\
124 pushl $(0xcb<<24)|KERNEL64_CS /* reload CS with 0x08 */ ;\
129 * BSP CPU start here.
130 * eax points to kernbootstruct
133 * protected mode, no paging, flat 32-bit address space.
134 * (Code/data/stack segments have base == 0, limit == 4G)
139 .section __HIB, __text
147 * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode.
149 * Initial memory layout:
151 * -------------------------
153 * | Kernel text/data |
155 * |-----------------------| Kernel text base addr - 2MB-aligned
157 * |-----------------------|
159 * |-----------------------| Page-aligned
163 * ------------------------- 0
166 mov %eax, %edi /* save kernbootstruct */
168 /* Use low 32-bits of address as 32-bit stack */
169 movl $EXT(low_eintstack), %esp
171 POSTCODE(PSTART_ENTRY)
174 * Set up segmentation
176 movl $EXT(protected_mode_gdtr), %eax
180 * Rebase Boot page tables to kernel base address.
182 movl $EXT(BootPML4), %eax // Level 4:
183 add %eax, 0*8+0(%eax) // - 1:1
184 add %eax, KERNEL_PML4_INDEX*8+0(%eax) // - kernel space
186 movl $EXT(BootPDPT), %edx // Level 3:
187 add %eax, 0*8+0(%edx)
188 add %eax, 1*8+0(%edx)
189 add %eax, 2*8+0(%edx)
190 add %eax, 3*8+0(%edx)
192 POSTCODE(PSTART_REBASE)
194 /* the following code is shared by the master CPU and all slave CPUs */
197 * switch to 64 bit mode
201 /* Flush data segment selectors */
209 test %edi, %edi /* Populate stack canary on BSP */
214 test $(1 << 30), %ecx
216 RDRAND_RAX /* RAX := 64 bits of DRBG entropy */
217 jnc Lnon_rdrand /* TODO: complain if DRBG fails at this stage */
220 xor %ah, %ah /* Security: zero second byte of stack canary */
221 movq %rax, ___stack_chk_guard(%rip)
222 /* %edi = boot_args_start if BSP */
225 POSTCODE(PSTART_VSTART)
227 /* %edi = boot_args_start */
229 leaq _vstart(%rip), %rcx
230 movq $0xffffff8000000000, %rax /* adjust pointer up high */
231 or %rax, %rsp /* and stack pointer up there */
233 andq $0xfffffffffffffff0, %rsp /* align stack */
234 xorq %rbp, %rbp /* zero frame pointer */
238 rdtsc /* EDX:EAX := TSC */
239 /* Distribute low order bits */
246 /* Incorporate ASLR entropy, if any */
254 ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */
258 jmp Lstore_random_guard
260 * AP (slave) CPUs enter here.
263 * protected mode, no paging, flat 32-bit address space.
264 * (Code/data/stack segments have base == 0, limit == 4G)
267 .globl EXT(slave_pstart)
270 cli /* disable interrupts, so we don`t */
271 /* need IDT for a while */
272 POSTCODE(SLAVE_PSTART)
274 movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp
276 xor %edi, %edi /* AP, no "kernbootstruct" */
278 jmp L_pstart_common /* hop a ride to vstart() */
281 /* BEGIN HIBERNATE CODE */
283 .section __HIB, __text
285 * This code is linked into the kernel but part of the "__HIB" section,
286 * which means it's used by code running in the special context of restoring
287 * the kernel text and data from the hibernation image read by the booter.
288 * hibernate_kernel_entrypoint() and everything it calls or references
289 * (ie. hibernate_restore_phys_page()) needs to be careful to only touch
290 * memory also in the "__HIB" section.
294 .globl EXT(hibernate_machine_entrypoint)
296 LEXT(hibernate_machine_entrypoint)
297 movl %eax, %edi /* regparm(1) calling convention */
299 /* Use low 32-bits of address as 32-bit stack */
300 movl $EXT(low_eintstack), %esp
305 movl $EXT(master_gdtr), %eax
308 /* Switch to 64-bit on the Boot PTs */
311 leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx
313 /* adjust the pointers to be up high */
314 movq $0xffffff8000000000, %rax
318 /* %edi is already filled with header pointer */
319 xorl %esi, %esi /* zero 2nd arg */
320 xorl %edx, %edx /* zero 3rd arg */
321 xorl %ecx, %ecx /* zero 4th arg */
322 andq $0xfffffffffffffff0, %rsp /* align stack */
324 /* call instead of jmp to keep the required stack alignment */
325 xorq %rbp, %rbp /* zero frame pointer */
331 /* END HIBERNATE CODE */
334 /* BEGIN ACPI WAKEUP CODE */
336 #include <i386/acpi.h>
343 .section __TEXT,__text
347 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon)
349 * Save CPU state before platform sleep. Restore CPU state
353 ENTRY(acpi_sleep_cpu)
360 /* save general purpose registers */
377 mov %rsp, saved_rsp(%rip)
379 /* make sure tlb is flushed */
383 /* save control registers */
385 mov %rax, saved_cr0(%rip)
387 mov %rax, saved_cr2(%rip)
389 mov %rax, saved_cr3(%rip)
391 mov %rax, saved_cr4(%rip)
393 /* save segment registers */
394 movw %es, saved_es(%rip)
395 movw %fs, saved_fs(%rip)
396 movw %gs, saved_gs(%rip)
397 movw %ss, saved_ss(%rip)
399 /* save the 64bit user and kernel gs base */
400 /* note: user's curently swapped into kernel base MSR */
401 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
403 movl %eax, saved_ugs_base(%rip)
404 movl %edx, saved_ugs_base+4(%rip)
407 movl %eax, saved_kgs_base(%rip)
408 movl %edx, saved_kgs_base+4(%rip)
411 /* save descriptor table registers */
418 * Call ACPI function provided by the caller to sleep the platform.
419 * This call will not return on success.
425 /* sleep failed, no cpu context lost */
428 .section __HIB, __text
430 .globl EXT(acpi_wake_prot)
432 /* protected mode, paging disabled */
433 movl $EXT(low_eintstack), %esp
439 .section __TEXT,__text
442 .globl EXT(acpi_wake_prot_entry)
443 EXT(acpi_wake_prot_entry):
444 POSTCODE(ACPI_WAKE_PROT_ENTRY)
445 /* Return from hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c
449 * restore cr4, PAE and NXE states in an orderly fashion
451 mov saved_cr4(%rip), %rcx
454 mov $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
455 rdmsr /* MSR value in edx:eax */
456 or $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
459 movq saved_cr2(%rip), %rax
462 /* restore CR0, paging enabled */
463 mov saved_cr0(%rip), %rax
466 /* restore the page tables */
467 mov saved_cr3(%rip), %rax
470 /* protected mode, paging enabled */
471 POSTCODE(ACPI_WAKE_PAGED_ENTRY)
473 /* load null segment selectors */
478 /* restore descriptor tables */
483 /* restore segment registers */
484 movw saved_es(%rip), %es
485 movw saved_fs(%rip), %fs
486 movw saved_gs(%rip), %gs
487 movw saved_ss(%rip), %ss
489 /* restore the 64bit kernel and user gs base */
490 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
491 movl saved_kgs_base(%rip), %eax
492 movl saved_kgs_base+4(%rip), %edx
495 movl saved_ugs_base(%rip), %eax
496 movl saved_ugs_base+4(%rip), %edx
500 * Restore task register. Before doing this, clear the busy flag
501 * in the TSS descriptor set by the CPU.
503 lea saved_gdt(%rip), %rax
504 movq 2(%rax), %rdx /* GDT base, skip limit word */
505 movl $(KERNEL_TSS), %eax /* TSS segment selector */
506 movb $(K_TSS), 5(%rdx, %rax) /* clear busy flag */
508 ltr saved_tr(%rip) /* restore TR */
511 mov saved_rsp(%rip), %rsp
513 /* restore general purpose registers */
536 /* END ACPI WAKEUP CODE */
537 #endif /* CONFIG_SLEEP */
539 /* Code to get from real mode to protected mode */
541 #define operand_size_prefix .byte 0x66
542 #define address_size_prefix .byte 0x67
543 #define cs_base_prefix .byte 0x2e
545 #define LJMP(segment,address) \
546 operand_size_prefix ;\
548 .long address-EXT(real_mode_bootstrap_base) ;\
551 #define LGDT(address) \
553 address_size_prefix ;\
554 operand_size_prefix ;\
557 .long address-EXT(real_mode_bootstrap_base)
559 .section __HIB, __text
560 .align 12 /* Page align for single bcopy_phys() */
562 Entry(real_mode_bootstrap_base)
565 LGDT(EXT(protected_mode_gdtr))
567 /* set the PE bit of CR0 */
572 /* reload CS register */
573 LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET)
576 /* we are in protected mode now */
577 /* set up the segment registers */
586 POSTCODE(SLAVE_STARTPROG_ENTRY);
588 mov PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx
591 Entry(protected_mode_gdtr)
592 .short 160 /* limit (8*20 segs) */
593 .quad EXT(master_gdt)
595 Entry(real_mode_bootstrap_end)
597 /* Save area used across sleep/wake */
598 .section __HIB, __data
601 /* gdtr for real address of master_gdt in HIB (not the aliased address) */
603 .word 160 /* limit (8*20 segs) */
604 .quad EXT(master_gdt)
621 saved_kgs_base: .quad 0
622 saved_ugs_base: .quad 0