]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/start.s
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / x86_64 / start.s
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59#include <platforms.h>
316670eb 60#include <debug.h>
b0d623f7
A
61
62#include <i386/asm.h>
63#include <i386/proc_reg.h>
64#include <i386/postcode.h>
65#include <assym.s>
66
67#include <i386/mp.h>
68#include <i386/cpuid.h>
69#include <i386/acpi.h>
70
71.code32
72
73
74/*
75 * Interrupt and bootup stack for initial processor.
316670eb 76 * Note: we switch to a dynamically allocated interrupt stack once VM is up.
b0d623f7
A
77 */
78
316670eb 79/* in the __HIB section since the hibernate restore code uses this stack. */
b0d623f7
A
80 .section __HIB, __data
81 .align 12
82
83 .globl EXT(low_intstack)
84EXT(low_intstack):
85 .globl EXT(gIOHibernateRestoreStack)
86EXT(gIOHibernateRestoreStack):
87
6d2010ae 88 .space INTSTACK_SIZE
b0d623f7
A
89
90 .globl EXT(low_eintstack)
91EXT(low_eintstack:)
92 .globl EXT(gIOHibernateRestoreStackEnd)
93EXT(gIOHibernateRestoreStackEnd):
94
95 /* back to the regular __DATA section. */
96
97 .section __DATA, __data
98
b0d623f7
A
99/*
100 * Stack for machine-check handler.
101 */
102 .align 12
103 .globl EXT(mc_task_stack)
104EXT(mc_task_stack):
6d2010ae 105 .space INTSTACK_SIZE
b0d623f7
A
106 .globl EXT(mc_task_stack_end)
107EXT(mc_task_stack_end):
108
316670eb
A
109 /* Must not clobber EDI */
110#define SWITCH_TO_64BIT_MODE \
111 movl $(CR4_PAE),%eax /* enable PAE */ ;\
112 movl %eax,%cr4 ;\
113 movl $MSR_IA32_EFER,%ecx ;\
114 rdmsr ;\
115 /* enable long mode, NX */ ;\
116 orl $(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax ;\
117 wrmsr ;\
118 movl $EXT(BootPML4),%eax ;\
119 movl %eax,%cr3 ;\
120 movl %cr0,%eax ;\
121 orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\
122 movl %eax,%cr0 ;\
39236c6e
A
123 ljmpl $KERNEL64_CS,$64f ;\
12464: ;\
316670eb
A
125 .code64
126
b0d623f7
A
127/*
128 * BSP CPU start here.
129 * eax points to kernbootstruct
130 *
131 * Environment:
132 * protected mode, no paging, flat 32-bit address space.
133 * (Code/data/stack segments have base == 0, limit == 4G)
b0d623f7
A
134 */
135
316670eb 136.code32
b0d623f7 137 .text
316670eb 138 .section __HIB, __text
b0d623f7
A
139 .align ALIGN
140 .globl EXT(_start)
316670eb 141 .globl EXT(pstart)
b0d623f7 142LEXT(_start)
316670eb 143LEXT(pstart)
b0d623f7 144
b0d623f7
A
145/*
146 * Here we do the minimal setup to switch from 32 bit mode to 64 bit long mode.
147 *
148 * Initial memory layout:
149 *
150 * -------------------------
151 * | |
152 * | Kernel text/data |
153 * | |
316670eb
A
154 * |-----------------------| Kernel text base addr - 2MB-aligned
155 * | padding |
156 * |-----------------------|
157 * | __HIB section |
158 * |-----------------------| Page-aligned
b0d623f7 159 * | |
316670eb 160 * | padding |
b0d623f7
A
161 * | |
162 * ------------------------- 0
163 *
164 */
165 mov %eax, %edi /* save kernbootstruct */
166
167 /* Use low 32-bits of address as 32-bit stack */
316670eb 168 movl $EXT(low_eintstack), %esp
b0d623f7 169
316670eb
A
170 POSTCODE(PSTART_ENTRY)
171
b0d623f7
A
172 /*
173 * Set up segmentation
174 */
175 movl $EXT(protected_mode_gdtr), %eax
176 lgdtl (%eax)
177
316670eb
A
178 /*
179 * Rebase Boot page tables to kernel base address.
180 */
181 movl $EXT(BootPML4), %eax // Level 4:
182 add %eax, 0*8+0(%eax) // - 1:1
183 add %eax, KERNEL_PML4_INDEX*8+0(%eax) // - kernel space
184
185 movl $EXT(BootPDPT), %edx // Level 3:
186 add %eax, 0*8+0(%edx)
187 add %eax, 1*8+0(%edx)
188 add %eax, 2*8+0(%edx)
189 add %eax, 3*8+0(%edx)
190
191 POSTCODE(PSTART_REBASE)
192
b0d623f7
A
193/* the following code is shared by the master CPU and all slave CPUs */
194L_pstart_common:
195 /*
196 * switch to 64 bit mode
197 */
198 SWITCH_TO_64BIT_MODE
199
6d2010ae
A
200 /* Flush data segment selectors */
201 xor %eax, %eax
202 mov %ax, %ss
203 mov %ax, %ds
204 mov %ax, %es
205 mov %ax, %fs
206 mov %ax, %gs
207
316670eb
A
208 test %edi, %edi /* Populate stack canary on BSP */
209 jz Lvstartshim
210
211 mov $1, %eax
212 cpuid
213 test $(1 << 30), %ecx
214 jz Lnon_rdrand
215 RDRAND_RAX /* RAX := 64 bits of DRBG entropy */
216 jnc Lnon_rdrand /* TODO: complain if DRBG fails at this stage */
217
218Lstore_random_guard:
219 xor %ah, %ah /* Security: zero second byte of stack canary */
220 movq %rax, ___stack_chk_guard(%rip)
221 /* %edi = boot_args_start if BSP */
222Lvstartshim:
223
224 POSTCODE(PSTART_VSTART)
225
b0d623f7
A
226 /* %edi = boot_args_start */
227
316670eb
A
228 leaq _vstart(%rip), %rcx
229 movq $0xffffff8000000000, %rax /* adjust pointer up high */
230 or %rax, %rsp /* and stack pointer up there */
231 or %rcx, %rax
232 andq $0xfffffffffffffff0, %rsp /* align stack */
233 xorq %rbp, %rbp /* zero frame pointer */
234 callq *%rax
235
236Lnon_rdrand:
237 rdtsc /* EDX:EAX := TSC */
238 /* Distribute low order bits */
239 mov %eax, %ecx
240 xor %al, %ah
241 shl $16, %rcx
242 xor %rcx, %rax
243 xor %eax, %edx
244
245 /* Incorporate ASLR entropy, if any */
246 lea (%rip), %rcx
247 shr $21, %rcx
248 movzbl %cl, %ecx
249 shl $16, %ecx
250 xor %ecx, %edx
251
252 mov %ah, %cl
253 ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */
254 shl $32, %rdx
255 xor %rdx, %rax
256 mov %cl, %al
257 jmp Lstore_random_guard
b0d623f7
A
258/*
259 * AP (slave) CPUs enter here.
260 *
261 * Environment:
262 * protected mode, no paging, flat 32-bit address space.
263 * (Code/data/stack segments have base == 0, limit == 4G)
264 */
265 .align ALIGN
266 .globl EXT(slave_pstart)
267LEXT(slave_pstart)
268 .code32
269 cli /* disable interrupts, so we don`t */
270 /* need IDT for a while */
316670eb 271 POSTCODE(SLAVE_PSTART)
b0d623f7
A
272
273 movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp
274
316670eb 275 xor %edi, %edi /* AP, no "kernbootstruct" */
b0d623f7
A
276
277 jmp L_pstart_common /* hop a ride to vstart() */
278
279
280/* BEGIN HIBERNATE CODE */
281
282.section __HIB, __text
283/*
316670eb
A
284 * This code is linked into the kernel but part of the "__HIB" section,
285 * which means it's used by code running in the special context of restoring
286 * the kernel text and data from the hibernation image read by the booter.
287 * hibernate_kernel_entrypoint() and everything it calls or references
288 * (ie. hibernate_restore_phys_page()) needs to be careful to only touch
289 * memory also in the "__HIB" section.
290 */
b0d623f7
A
291
292 .align ALIGN
293 .globl EXT(hibernate_machine_entrypoint)
294.code32
295LEXT(hibernate_machine_entrypoint)
296 movl %eax, %edi /* regparm(1) calling convention */
297
316670eb
A
298 /* Use low 32-bits of address as 32-bit stack */
299 movl $EXT(low_eintstack), %esp
300
301 /*
302 * Set up GDT
303 */
304 movl $EXT(master_gdtr), %eax
b0d623f7
A
305 lgdtl (%eax)
306
316670eb
A
307 /* Switch to 64-bit on the Boot PTs */
308 SWITCH_TO_64BIT_MODE
b0d623f7 309
316670eb 310 leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx
b0d623f7 311
316670eb
A
312 /* adjust the pointers to be up high */
313 movq $0xffffff8000000000, %rax
314 orq %rax, %rsp
315 orq %rcx, %rax
b0d623f7 316
b0d623f7 317 /* %edi is already filled with header pointer */
316670eb
A
318 xorl %esi, %esi /* zero 2nd arg */
319 xorl %edx, %edx /* zero 3rd arg */
320 xorl %ecx, %ecx /* zero 4th arg */
321 andq $0xfffffffffffffff0, %rsp /* align stack */
322
323 /* call instead of jmp to keep the required stack alignment */
324 xorq %rbp, %rbp /* zero frame pointer */
325 call *%rax
326
b0d623f7
A
327 /* NOTREACHED */
328 hlt
329
330/* END HIBERNATE CODE */
331
332#if CONFIG_SLEEP
333/* BEGIN ACPI WAKEUP CODE */
334
335#include <i386/acpi.h>
336
337
b0d623f7
A
338/*
339 * acpi_wake_start
b0d623f7
A
340 */
341
342.section __TEXT,__text
b0d623f7
A
343.code64
344
345/*
346 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon)
347 *
348 * Save CPU state before platform sleep. Restore CPU state
349 * following wake up.
350 */
351
352ENTRY(acpi_sleep_cpu)
353 push %rbp
354 mov %rsp, %rbp
355
356 /* save flags */
357 pushf
358
359 /* save general purpose registers */
360 push %rax
361 push %rbx
362 push %rcx
363 push %rdx
364 push %rbp
365 push %rsi
366 push %rdi
367 push %r8
368 push %r9
369 push %r10
370 push %r11
371 push %r12
372 push %r13
373 push %r14
374 push %r15
375
376 mov %rsp, saved_rsp(%rip)
377
378 /* make sure tlb is flushed */
379 mov %cr3,%rax
380 mov %rax,%cr3
381
382 /* save control registers */
383 mov %cr0, %rax
384 mov %rax, saved_cr0(%rip)
385 mov %cr2, %rax
386 mov %rax, saved_cr2(%rip)
316670eb
A
387 mov %cr3, %rax
388 mov %rax, saved_cr3(%rip)
b0d623f7
A
389 mov %cr4, %rax
390 mov %rax, saved_cr4(%rip)
391
392 /* save segment registers */
393 movw %es, saved_es(%rip)
394 movw %fs, saved_fs(%rip)
395 movw %gs, saved_gs(%rip)
396 movw %ss, saved_ss(%rip)
397
6d2010ae
A
398 /* save the 64bit user and kernel gs base */
399 /* note: user's curently swapped into kernel base MSR */
b0d623f7 400 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
6d2010ae
A
401 rdmsr
402 movl %eax, saved_ugs_base(%rip)
403 movl %edx, saved_ugs_base+4(%rip)
b0d623f7
A
404 swapgs
405 rdmsr
406 movl %eax, saved_kgs_base(%rip)
407 movl %edx, saved_kgs_base+4(%rip)
408 swapgs
409
410 /* save descriptor table registers */
411 sgdt saved_gdt(%rip)
412 sldt saved_ldt(%rip)
413 sidt saved_idt(%rip)
414 str saved_tr(%rip)
415
b0d623f7
A
416 /*
417 * Call ACPI function provided by the caller to sleep the platform.
418 * This call will not return on success.
419 */
420
421 xchgq %rdi, %rsi
422 call *%rsi
423
424 /* sleep failed, no cpu context lost */
425 jmp wake_restore
426
316670eb
A
427.section __HIB, __text
428.code32
429.globl EXT(acpi_wake_prot)
430EXT(acpi_wake_prot):
431 /* protected mode, paging disabled */
432 movl $EXT(low_eintstack), %esp
433
434 SWITCH_TO_64BIT_MODE
435
436 jmp Lwake_64
437
438.section __TEXT,__text
439.code64
440
b0d623f7
A
441.globl EXT(acpi_wake_prot_entry)
442EXT(acpi_wake_prot_entry):
443 POSTCODE(ACPI_WAKE_PROT_ENTRY)
316670eb
A
444 /* Return from hibernate code in iokit/Kernel/IOHibernateRestoreKernel.c
445 */
b0d623f7
A
446Lwake_64:
447 /*
448 * restore cr4, PAE and NXE states in an orderly fashion
449 */
316670eb
A
450 mov saved_cr4(%rip), %rcx
451 mov %rcx, %cr4
b0d623f7 452
316670eb
A
453 mov $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
454 rdmsr /* MSR value in edx:eax */
455 or $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
456 wrmsr /* Update */
b0d623f7
A
457
458 movq saved_cr2(%rip), %rax
316670eb 459 mov %rax, %cr2
b0d623f7
A
460
461 /* restore CR0, paging enabled */
316670eb
A
462 mov saved_cr0(%rip), %rax
463 mov %rax, %cr0
464
465 /* restore the page tables */
466 mov saved_cr3(%rip), %rax
467 mov %rax, %cr3
b0d623f7
A
468
469 /* protected mode, paging enabled */
470 POSTCODE(ACPI_WAKE_PAGED_ENTRY)
471
6d2010ae
A
472 /* load null segment selectors */
473 xor %eax, %eax
474 movw %ax, %ss
b0d623f7
A
475 movw %ax, %ds
476
316670eb
A
477 /* restore descriptor tables */
478 lgdt saved_gdt(%rip)
b0d623f7
A
479 lldt saved_ldt(%rip)
480 lidt saved_idt(%rip)
481
482 /* restore segment registers */
483 movw saved_es(%rip), %es
6d2010ae
A
484 movw saved_fs(%rip), %fs
485 movw saved_gs(%rip), %gs
b0d623f7
A
486 movw saved_ss(%rip), %ss
487
6d2010ae 488 /* restore the 64bit kernel and user gs base */
b0d623f7
A
489 mov $MSR_IA32_KERNEL_GS_BASE, %rcx
490 movl saved_kgs_base(%rip), %eax
491 movl saved_kgs_base+4(%rip), %edx
492 wrmsr
493 swapgs
6d2010ae
A
494 movl saved_ugs_base(%rip), %eax
495 movl saved_ugs_base+4(%rip), %edx
496 wrmsr
b0d623f7 497
b0d623f7
A
498 /*
499 * Restore task register. Before doing this, clear the busy flag
500 * in the TSS descriptor set by the CPU.
501 */
502 lea saved_gdt(%rip), %rax
503 movq 2(%rax), %rdx /* GDT base, skip limit word */
504 movl $(KERNEL_TSS), %eax /* TSS segment selector */
505 movb $(K_TSS), 5(%rdx, %rax) /* clear busy flag */
506
507 ltr saved_tr(%rip) /* restore TR */
508
509wake_restore:
510 mov saved_rsp(%rip), %rsp
511
512 /* restore general purpose registers */
513 pop %r15
514 pop %r14
515 pop %r13
516 pop %r12
517 pop %r11
518 pop %r10
519 pop %r9
520 pop %r8
521 pop %rdi
522 pop %rsi
523 pop %rbp
524 pop %rdx
525 pop %rcx
526 pop %rbx
527 pop %rax
528
529 /* restore flags */
530 popf
531
532 leave
533 ret
534
535/* END ACPI WAKEUP CODE */
536#endif /* CONFIG_SLEEP */
537
538/* Code to get from real mode to protected mode */
539
540#define operand_size_prefix .byte 0x66
541#define address_size_prefix .byte 0x67
542#define cs_base_prefix .byte 0x2e
543
544#define LJMP(segment,address) \
545 operand_size_prefix ;\
546 .byte 0xea ;\
547 .long address-EXT(real_mode_bootstrap_base) ;\
548 .word segment
549
550#define LGDT(address) \
551 cs_base_prefix ;\
552 address_size_prefix ;\
553 operand_size_prefix ;\
554 .word 0x010f ;\
555 .byte 0x15 ;\
556 .long address-EXT(real_mode_bootstrap_base)
557
316670eb 558.section __HIB, __text
b0d623f7
A
559.align 12 /* Page align for single bcopy_phys() */
560.code32
561Entry(real_mode_bootstrap_base)
562 cli
563
564 LGDT(EXT(protected_mode_gdtr))
565
566 /* set the PE bit of CR0 */
567 mov %cr0, %eax
568 inc %eax
569 mov %eax, %cr0
570
571 /* reload CS register */
572 LJMP(KERNEL32_CS, 1f + REAL_MODE_BOOTSTRAP_OFFSET)
5731:
574
575 /* we are in protected mode now */
576 /* set up the segment registers */
577 mov $KERNEL_DS, %eax
578 movw %ax, %ds
579 movw %ax, %es
580 movw %ax, %ss
316670eb 581 xor %eax,%eax
b0d623f7
A
582 movw %ax, %fs
583 movw %ax, %gs
584
585 POSTCODE(SLAVE_STARTPROG_ENTRY);
586
587 mov PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, %ecx
588 jmp *%ecx
589
590Entry(protected_mode_gdtr)
316670eb 591 .short 160 /* limit (8*20 segs) */
b0d623f7
A
592 .quad EXT(master_gdt)
593
594Entry(real_mode_bootstrap_end)
595
596/* Save area used across sleep/wake */
316670eb 597.section __HIB, __data
b0d623f7
A
598.align 2
599
316670eb
A
600/* gdtr for real address of master_gdt in HIB (not the aliased address) */
601Entry(master_gdtr)
602 .word 160 /* limit (8*20 segs) */
603 .quad EXT(master_gdt)
604
b0d623f7 605saved_gdt: .word 0
316670eb 606 .quad 0
b0d623f7
A
607saved_rsp: .quad 0
608saved_es: .word 0
609saved_fs: .word 0
610saved_gs: .word 0
611saved_ss: .word 0
612saved_cr0: .quad 0
613saved_cr2: .quad 0
316670eb 614saved_cr3: .quad 0
b0d623f7
A
615saved_cr4: .quad 0
616saved_idt: .word 0
617 .quad 0
618saved_ldt: .word 0
619saved_tr: .word 0
620saved_kgs_base: .quad 0
6d2010ae 621saved_ugs_base: .quad 0
b0d623f7 622