.globl EXT(gIOHibernateRestoreStack)
EXT(gIOHibernateRestoreStack):
- .set ., .+INTSTACK_SIZE
+ .space INTSTACK_SIZE
.globl EXT(low_eintstack)
EXT(low_eintstack:)
.align 12
.globl EXT(df_task_stack)
EXT(df_task_stack):
- .set ., .+INTSTACK_SIZE
+ .space INTSTACK_SIZE
.globl EXT(df_task_stack_end)
EXT(df_task_stack_end):
.align 12
.globl EXT(mc_task_stack)
EXT(mc_task_stack):
- .set ., .+INTSTACK_SIZE
+ .space INTSTACK_SIZE
.globl EXT(mc_task_stack_end)
EXT(mc_task_stack_end):
-
-#if MACH_KDB
-/*
- * Kernel debugger stack for each processor.
- */
- .align 12
- .globl EXT(db_stack_store)
-EXT(db_stack_store):
- .set ., .+(INTSTACK_SIZE*MAX_CPUS)
-
-/*
- * Stack for last-ditch debugger task for each processor.
- */
- .align 12
- .globl EXT(db_task_stack_store)
-EXT(db_task_stack_store):
- .set ., .+(INTSTACK_SIZE*MAX_CPUS)
-
-/*
- * per-processor kernel debugger stacks
- */
- .align ALIGN
- .globl EXT(kgdb_stack_store)
-EXT(kgdb_stack_store):
- .set ., .+(INTSTACK_SIZE*MAX_CPUS)
-#endif /* MACH_KDB */
-
/*
* BSP CPU start here.
* eax points to kernbootstruct
* This proves that Little Endian is superior to Big Endian.
*/
-
.text
.align ALIGN
.globl EXT(_start)
movl $EXT(protected_mode_gdtr), %eax
lgdtl (%eax)
- mov $(KERNEL_DS), %ax
- mov %ax, %ds
- mov %ax, %es
- mov %ax, %ss
- xor %eax, %eax
- mov %ax, %fs
- mov %ax, %gs
-
/* the following code is shared by the master CPU and all slave CPUs */
L_pstart_common:
/*
*/
SWITCH_TO_64BIT_MODE
+ /* Flush data segment selectors */
+ xor %eax, %eax
+ mov %ax, %ss
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+
/* %edi = boot_args_start */
leaq _vstart(%rip), %rcx
movw %gs, saved_gs(%rip)
movw %ss, saved_ss(%rip)
- /* save the 64bit kernel gs base */
+ /* save the 64bit user and kernel gs base */
+ /* note: user's curently swapped into kernel base MSR */
mov $MSR_IA32_KERNEL_GS_BASE, %rcx
+ rdmsr
+ movl %eax, saved_ugs_base(%rip)
+ movl %edx, saved_ugs_base+4(%rip)
swapgs
rdmsr
movl %eax, saved_kgs_base(%rip)
/* protected mode, paging enabled */
POSTCODE(ACPI_WAKE_PAGED_ENTRY)
- /* switch to kernel data segment */
- movw $(KERNEL_DS), %ax
+ /* load null segment selectors */
+ xor %eax, %eax
+ movw %ax, %ss
movw %ax, %ds
/* restore local and interrupt descriptor tables */
movw saved_gs(%rip), %gs
movw saved_ss(%rip), %ss
- /* save the 64bit kernel gs base */
+ /* restore the 64bit kernel and user gs base */
mov $MSR_IA32_KERNEL_GS_BASE, %rcx
movl saved_kgs_base(%rip), %eax
movl saved_kgs_base+4(%rip), %edx
wrmsr
swapgs
+ movl saved_ugs_base(%rip), %eax
+ movl saved_ugs_base+4(%rip), %edx
+ wrmsr
- //K64todo verify this TSS stuff
/*
* Restore task register. Before doing this, clear the busy flag
* in the TSS descriptor set by the CPU.
saved_ldt: .word 0
saved_tr: .word 0
saved_kgs_base: .quad 0
+saved_ugs_base: .quad 0