]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/start.s
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / x86_64 / start.s
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59#include <platforms.h>
316670eb 60#include <debug.h>
b0d623f7
A
61
62#include <i386/asm.h>
63#include <i386/proc_reg.h>
64#include <i386/postcode.h>
65#include <assym.s>
66
67#include <i386/mp.h>
68#include <i386/cpuid.h>
69#include <i386/acpi.h>
70
71.code32
72
73
74/*
75 * Interrupt and bootup stack for initial processor.
316670eb 76 * Note: we switch to a dynamically allocated interrupt stack once VM is up.
b0d623f7
A
77 */
78
316670eb 79/* in the __HIB section since the hibernate restore code uses this stack. */
b0d623f7
A
80 .section __HIB, __data
81 .align 12
82
83 .globl EXT(low_intstack)
84EXT(low_intstack):
85 .globl EXT(gIOHibernateRestoreStack)
86EXT(gIOHibernateRestoreStack):
87
6d2010ae 88 .space INTSTACK_SIZE
b0d623f7
A
89
90 .globl EXT(low_eintstack)
91EXT(low_eintstack:)
92 .globl EXT(gIOHibernateRestoreStackEnd)
93EXT(gIOHibernateRestoreStackEnd):
94
95 /* back to the regular __DATA section. */
96
97 .section __DATA, __data
98
b0d623f7
A
99/*
100 * Stack for machine-check handler.
101 */
102 .align 12
103 .globl EXT(mc_task_stack)
104EXT(mc_task_stack):
6d2010ae 105 .space INTSTACK_SIZE
b0d623f7
A
106 .globl EXT(mc_task_stack_end)
107EXT(mc_task_stack_end):
108
316670eb
A
109 /* Must not clobber EDI */
110#define SWITCH_TO_64BIT_MODE \
111 movl $(CR4_PAE),%eax /* enable PAE */ ;\
112 movl %eax,%cr4 ;\
113 movl $MSR_IA32_EFER,%ecx ;\
114 rdmsr ;\
115 /* enable long mode, NX */ ;\
116 orl $(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax ;\
117 wrmsr ;\
118 movl $EXT(BootPML4),%eax ;\
119 movl %eax,%cr3 ;\
120 movl %cr0,%eax ;\
121 orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\
122 movl %eax,%cr0 ;\
123 /* "The Aussie Maneuver" ("Myria" variant) */ ;\
124 pushl $(0xcb<<24)|KERNEL64_CS /* reload CS with 0x08 */ ;\
125 call .-1 ;\
126 .code64
127
b0d623f7
A
128/*
129 * BSP CPU start here.
130 * eax points to kernbootstruct
131 *
132 * Environment:
133 * protected mode, no paging, flat 32-bit address space.
134 * (Code/data/stack segments have base == 0, limit == 4G)
b0d623f7
A
135 */
136
316670eb 137.code32
b0d623f7 138 .text
316670eb 139 .section __HIB, __text
b0d623f7
A
140 .align ALIGN
141 .globl EXT(_start)
316670eb 142 .globl EXT(pstart)
b0d623f7 143LEXT(_start)
316670eb 144LEXT(pstart)
b0d623f7 145
b0d623f7
A
146/*
147 * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode.
148 *
149 * Initial memory layout:
150 *
151 * -------------------------
152 * | |
153 * | Kernel text/data |
154 * | |
316670eb
A
155 * |-----------------------| Kernel text base addr - 2MB-aligned
156 * | padding |
157 * |-----------------------|
158 * | __HIB section |
159 * |-----------------------| Page-aligned
b0d623f7 160 * | |
316670eb 161 * | padding |
b0d623f7
A
162 * | |
163 * ------------------------- 0
164 *
165 */
166 mov %eax, %edi /* save kernbootstruct */
167
168 /* Use low 32-bits of address as 32-bit stack */
316670eb 169 movl $EXT(low_eintstack), %esp
b0d623f7 170
316670eb
A
171 POSTCODE(PSTART_ENTRY)
172
b0d623f7
A
173 /*
174 * Set up segmentation
175 */
176 movl $EXT(protected_mode_gdtr), %eax
177 lgdtl (%eax)
178
316670eb
A
179 /*
180 * Rebase Boot page tables to kernel base address.
181 */
182 movl $EXT(BootPML4), %eax // Level 4:
183 add %eax, 0*8+0(%eax) // - 1:1
184 add %eax, KERNEL_PML4_INDEX*8+0(%eax) // - kernel space
185
186 movl $EXT(BootPDPT), %edx // Level 3:
187 add %eax, 0*8+0(%edx)
188 add %eax, 1*8+0(%edx)
189 add %eax, 2*8+0(%edx)
190 add %eax, 3*8+0(%edx)
191
192 POSTCODE(PSTART_REBASE)
193
b0d623f7
A
194/* the following code is shared by the master CPU and all slave CPUs */
195L_pstart_common:
196 /*
197 * switch to 64 bit mode
198 */
199 SWITCH_TO_64BIT_MODE
200
6d2010ae
A
201 /* Flush data segment selectors */
202 xor %eax, %eax
203 mov %ax, %ss
204 mov %ax, %ds
205 mov %ax, %es
206 mov %ax, %fs
207 mov %ax, %gs
208
316670eb
A
209 test %edi, %edi /* Populate stack canary on BSP */
210 jz Lvstartshim
211
212 mov $1, %eax
213 cpuid
214 test $(1 << 30), %ecx
215 jz Lnon_rdrand
216 RDRAND_RAX /* RAX := 64 bits of DRBG entropy */
217 jnc Lnon_rdrand /* TODO: complain if DRBG fails at this stage */
218
219Lstore_random_guard:
220 xor %ah, %ah /* Security: zero second byte of stack canary */
221 movq %rax, ___stack_chk_guard(%rip)
222 /* %edi = boot_args_start if BSP */
223Lvstartshim:
224
225 POSTCODE(PSTART_VSTART)
226
b0d623f7
A
227 /* %edi = boot_args_start */
228
316670eb
A
229 leaq _vstart(%rip), %rcx
230 movq $0xffffff8000000000, %rax /* adjust pointer up high */
231 or %rax, %rsp /* and stack pointer up there */
232 or %rcx, %rax
233 andq $0xfffffffffffffff0, %rsp /* align stack */
234 xorq %rbp, %rbp /* zero frame pointer */
235 callq *%rax
236
237Lnon_rdrand:
238 rdtsc /* EDX:EAX := TSC */
239 /* Distribute low order bits */
240 mov %eax, %ecx
241 xor %al, %ah
242 shl $16, %rcx
243 xor %rcx, %rax
244 xor %eax, %edx
245
246 /* Incorporate ASLR entropy, if any */
247 lea (%rip), %rcx
248 shr $21, %rcx
249 movzbl %cl, %ecx
250 shl $16, %ecx
251 xor %ecx, %edx
252
253 mov %ah, %cl
254 ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */
255 shl $32, %rdx
256 xor %rdx, %rax
257 mov %cl, %al
258 jmp Lstore_random_guard
b0d623f7
A
259/*
260 * AP (slave) CPUs enter here.
261 *
262 * Environment:
263 * protected mode, no paging, flat 32-bit address space.
264 * (Code/data/stack segments have base == 0, limit == 4G)
265 */
266 .align ALIGN
267 .globl EXT(slave_pstart)
268LEXT(slave_pstart)
269 .code32
270 cli /* disable interrupts, so we don`t */
271 /* need IDT for a while */
316670eb 272 POSTCODE(SLAVE_PSTART)
b0d623f7
A
273
274 movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp
275
316670eb 276 xor %edi, %edi /* AP, no "kernbootstruct" */
b0d623f7
A
277
278 jmp L_pstart_common /* hop a ride to vstart() */
279
280
281/* BEGIN HIBERNATE CODE */
282
283.section __HIB, __text
284/*
316670eb
A
285 * This code is linked into the kernel but part of the "__HIB" section,
286 * which means it's used by code running in the special context of restoring
287 * the kernel text and data from the hibernation image read by the booter.
288 * hibernate_kernel_entrypoint() and everything it calls or references
289 * (ie. hibernate_restore_phys_page()) needs to be careful to only touch
290 * memory also in the "__HIB" section.
291 */
b0d623f7
A
292
293 .align ALIGN
294 .globl EXT(hibernate_machine_entrypoint)
295.code32
296LEXT(hibernate_machine_entrypoint)
297 movl %eax, %edi /* regparm(1) calling convention */
298
316670eb
A
299 /* Use low 32-bits of address as 32-bit stack */
300 movl $EXT(low_eintstack), %esp
301
302 /*
303 * Set up GDT
304 */
305 movl $EXT(master_gdtr), %eax
b0d623f7
A
306 lgdtl (%eax)
307
316670eb
A
308 /* Switch to 64-bit on the Boot PTs */
309 SWITCH_TO_64BIT_MODE
b0d623f7 310
316670eb 311 leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx
b0d623f7 312
316670eb
A
313 /* adjust the pointers to be up high */
314 movq $0xffffff8000000000, %rax
315 orq %rax, %rsp
316 orq %rcx, %rax
b0d623f7 317
b0d623f7 318 /* %edi is already filled with header pointer */
316670eb
A
319 xorl %esi, %esi /* zero 2nd arg */
320 xorl %edx, %edx /* zero 3rd arg */
321 xorl %ecx, %ecx /* zero 4th arg */
322 andq $0xfffffffffffffff0, %rsp /* align stack */
323
324 /* call instead of jmp to keep the required stack alignment */
325 xorq %rbp, %rbp /* zero frame pointer */
326 call *%rax
327
b0d623f7
A
328 /* NOTREACHED */
329 hlt
330
331/* END HIBERNATE CODE */
332
333#if CONFIG_SLEEP
334/* BEGIN ACPI WAKEUP CODE */
335
336#include <i386/acpi.h>
337
338
b0d623f7
A
339/*
340 * acpi_wake_start
b0d623f7
A
341 */
342
343.section __TEXT,__text
b0d623f7
A
344.code64
345
346/*
347 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon)
348 *
349 * Save CPU state before platform sleep. Restore CPU state
350 * following wake up.
351 */
352
353ENTRY(acpi_sleep_cpu)
354 push %rbp
355 mov %rsp, %rbp
356
357 /* save flags */
358 pushf
359
360 /* save general purpose registers */
361 push %rax
362 push %rbx
363 push %rcx
364 push %rdx
365 push %rbp
366 push %rsi
367 push %rdi
368 push %r8
369 push %r9
370 push %r10
371 push %r11
372 push %r12
373 push %r13
374 push %r14
375 push %r15
376
377 mov %rsp, saved_rsp(%rip)
378
379 /* make sure tlb is flushed */
380 mov %cr3,%rax
381 mov %rax,%cr3
382
383 /* save control registers */
384 mov %cr0, %rax
385 mov %rax, saved_cr0(%rip)
386 mov %cr2, %rax
387 mov %rax, saved_cr2(%rip)
316670eb
A
388 mov %cr3, %rax
389 mov %rax, saved_cr3(%rip)
b0d623f7
A
390 mov %cr4, %rax
391 mov %rax, saved_cr4(%rip)
392
393 /* save segment registers */
394 movw %es, saved_es(%rip)
395 movw %fs, saved_fs(%rip)
396 movw %gs, saved_gs(%rip)
397 movw %ss, saved_ss(%rip)
398
6d2010ae
A
399 /* save the 64bit user and kernel gs base */
400 /* note: user's curently swapped into kernel base MSR */
b0d623f7 401 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
6d2010ae
A
402 rdmsr
403 movl %eax, saved_ugs_base(%rip)
404 movl %edx, saved_ugs_base+4(%rip)
b0d623f7
A
405 swapgs
406 rdmsr
407 movl %eax, saved_kgs_base(%rip)
408 movl %edx, saved_kgs_base+4(%rip)
409 swapgs
410
411 /* save descriptor table registers */
412 sgdt saved_gdt(%rip)
413 sldt saved_ldt(%rip)
414 sidt saved_idt(%rip)
415 str saved_tr(%rip)
416
b0d623f7
A
417 /*
418 * Call ACPI function provided by the caller to sleep the platform.
419 * This call will not return on success.
420 */
421
422 xchgq %rdi, %rsi
423 call *%rsi
424
425 /* sleep failed, no cpu context lost */
426 jmp wake_restore
427
316670eb
A
428.section __HIB, __text
429.code32
430.globl EXT(acpi_wake_prot)
431EXT(acpi_wake_prot):
432 /* protected mode, paging disabled */
433 movl $EXT(low_eintstack), %esp
434
435 SWITCH_TO_64BIT_MODE
436
437 jmp Lwake_64
438
439.section __TEXT,__text
440.code64
441
b0d623f7
A
442.globl EXT(acpi_wake_prot_entry)
443EXT(acpi_wake_prot_entry):
444 POSTCODE(ACPI_WAKE_PROT_ENTRY)
316670eb
A
445 /* Return from hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c
446 */
b0d623f7
A
447Lwake_64:
448 /*
449 * restore cr4, PAE and NXE states in an orderly fashion
450 */
316670eb
A
451 mov saved_cr4(%rip), %rcx
452 mov %rcx, %cr4
b0d623f7 453
316670eb
A
454 mov $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
455 rdmsr /* MSR value in edx:eax */
456 or $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
457 wrmsr /* Update */
b0d623f7
A
458
459 movq saved_cr2(%rip), %rax
316670eb 460 mov %rax, %cr2
b0d623f7
A
461
462 /* restore CR0, paging enabled */
316670eb
A
463 mov saved_cr0(%rip), %rax
464 mov %rax, %cr0
465
466 /* restore the page tables */
467 mov saved_cr3(%rip), %rax
468 mov %rax, %cr3
b0d623f7
A
469
470 /* protected mode, paging enabled */
471 POSTCODE(ACPI_WAKE_PAGED_ENTRY)
472
6d2010ae
A
473 /* load null segment selectors */
474 xor %eax, %eax
475 movw %ax, %ss
b0d623f7
A
476 movw %ax, %ds
477
316670eb
A
478 /* restore descriptor tables */
479 lgdt saved_gdt(%rip)
b0d623f7
A
480 lldt saved_ldt(%rip)
481 lidt saved_idt(%rip)
482
483 /* restore segment registers */
484 movw saved_es(%rip), %es
6d2010ae
A
485 movw saved_fs(%rip), %fs
486 movw saved_gs(%rip), %gs
b0d623f7
A
487 movw saved_ss(%rip), %ss
488
6d2010ae 489 /* restore the 64bit kernel and user gs base */
b0d623f7
A
490 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
491 movl saved_kgs_base(%rip), %eax
492 movl saved_kgs_base+4(%rip), %edx
493 wrmsr
494 swapgs
6d2010ae
A
495 movl saved_ugs_base(%rip), %eax
496 movl saved_ugs_base+4(%rip), %edx
497 wrmsr
b0d623f7 498
b0d623f7
A
499 /*
500 * Restore task register. Before doing this, clear the busy flag
501 * in the TSS descriptor set by the CPU.
502 */
503 lea saved_gdt(%rip), %rax
504 movq 2(%rax), %rdx /* GDT base, skip limit word */
505 movl $(KERNEL_TSS), %eax /* TSS segment selector */
506 movb $(K_TSS), 5(%rdx, %rax) /* clear busy flag */
507
508 ltr saved_tr(%rip) /* restore TR */
509
510wake_restore:
511 mov saved_rsp(%rip), %rsp
512
513 /* restore general purpose registers */
514 pop %r15
515 pop %r14
516 pop %r13
517 pop %r12
518 pop %r11
519 pop %r10
520 pop %r9
521 pop %r8
522 pop %rdi
523 pop %rsi
524 pop %rbp
525 pop %rdx
526 pop %rcx
527 pop %rbx
528 pop %rax
529
530 /* restore flags */
531 popf
532
533 leave
534 ret
535
536/* END ACPI WAKEUP CODE */
537#endif /* CONFIG_SLEEP */
538
539/* Code to get from real mode to protected mode */
540
541#define operand_size_prefix .byte 0x66
542#define address_size_prefix .byte 0x67
543#define cs_base_prefix .byte 0x2e
544
545#define LJMP(segment,address) \
546 operand_size_prefix ;\
547 .byte 0xea ;\
548 .long address-EXT(real_mode_bootstrap_base) ;\
549 .word segment
550
551#define LGDT(address) \
552 cs_base_prefix ;\
553 address_size_prefix ;\
554 operand_size_prefix ;\
555 .word 0x010f ;\
556 .byte 0x15 ;\
557 .long address-EXT(real_mode_bootstrap_base)
558
316670eb 559.section __HIB, __text
b0d623f7
A
560.align 12 /* Page align for single bcopy_phys() */
561.code32
562Entry(real_mode_bootstrap_base)
563 cli
564
565 LGDT(EXT(protected_mode_gdtr))
566
567 /* set the PE bit of CR0 */
568 mov %cr0, %eax
569 inc %eax
570 mov %eax, %cr0
571
572 /* reload CS register */
573 LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET)
5741:
575
576 /* we are in protected mode now */
577 /* set up the segment registers */
578 mov $KERNEL_DS, %eax
579 movw %ax, %ds
580 movw %ax, %es
581 movw %ax, %ss
316670eb 582 xor %eax,%eax
b0d623f7
A
583 movw %ax, %fs
584 movw %ax, %gs
585
586 POSTCODE(SLAVE_STARTPROG_ENTRY);
587
588 mov PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx
589 jmp *%ecx
590
591Entry(protected_mode_gdtr)
316670eb 592 .short 160 /* limit (8*20 segs) */
b0d623f7
A
593 .quad EXT(master_gdt)
594
595Entry(real_mode_bootstrap_end)
596
597/* Save area used across sleep/wake */
316670eb 598.section __HIB, __data
b0d623f7
A
599.align 2
600
316670eb
A
601/* gdtr for real address of master_gdt in HIB (not the aliased address) */
602Entry(master_gdtr)
603 .word 160 /* limit (8*20 segs) */
604 .quad EXT(master_gdt)
605
b0d623f7 606saved_gdt: .word 0
316670eb 607 .quad 0
b0d623f7
A
608saved_rsp: .quad 0
609saved_es: .word 0
610saved_fs: .word 0
611saved_gs: .word 0
612saved_ss: .word 0
613saved_cr0: .quad 0
614saved_cr2: .quad 0
316670eb 615saved_cr3: .quad 0
b0d623f7
A
616saved_cr4: .quad 0
617saved_idt: .word 0
618 .quad 0
619saved_ldt: .word 0
620saved_tr: .word 0
621saved_kgs_base: .quad 0
6d2010ae 622saved_ugs_base: .quad 0
b0d623f7 623