2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 #include <kern/cpu_number.h>
61 #include <kern/kalloc.h>
62 #include <kern/cpu_data.h>
63 #include <mach/mach_types.h>
64 #include <mach/machine.h>
65 #include <mach/vm_map.h>
66 #include <mach/machine/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_map.h>
70 #include <i386/bit_routines.h>
71 #include <i386/mp_desc.h>
72 #include <i386/misc_protos.h>
74 #include <i386/pmap.h>
75 #if defined(__i386__) || defined(__x86_64__)
76 #include <i386/pmap_internal.h>
79 #include <i386/machine_check.h>
82 #include <kern/misc_protos.h>
84 #define K_INTR_GATE (ACC_P|ACC_PL_K|ACC_INTR_GATE)
85 #define U_INTR_GATE (ACC_P|ACC_PL_U|ACC_INTR_GATE)
87 // Declare macros that will declare the externs
88 #define TRAP(n, name) extern void *name ;
89 #define TRAP_ERR(n, name) extern void *name ;
90 #define TRAP_SPC(n, name) extern void *name ;
91 #define TRAP_IST1(n, name) extern void *name ;
92 #define TRAP_IST2(n, name) extern void *name ;
93 #define INTERRUPT(n) extern void *_intr_ ## n ;
94 #define USER_TRAP(n, name) extern void *name ;
95 #define USER_TRAP_SPC(n, name) extern void *name ;
97 // Include the table to declare the externs
98 #include "../x86_64/idt_table.h"
100 // Undef the macros, then redefine them so we can declare the table
110 #define TRAP(n, name) \
119 #define TRAP_ERR TRAP
120 #define TRAP_SPC TRAP
122 #define TRAP_IST1(n, name) \
131 #define TRAP_IST2(n, name) \
140 #define INTERRUPT(n) \
142 (uintptr_t)&_intr_ ## n,\
149 #define USER_TRAP(n, name) \
158 #define USER_TRAP_SPC USER_TRAP
160 // Declare the table using the macros we just set up
161 struct fake_descriptor64 master_idt64
[IDTSZ
]
162 __attribute__ ((section("__HIB,__desc")))
163 __attribute__ ((aligned(PAGE_SIZE
))) = {
164 #include "../x86_64/idt_table.h"
168 * First cpu`s interrupt stack.
170 extern uint32_t low_intstack
[]; /* bottom */
171 extern uint32_t low_eintstack
[]; /* top */
174 * Per-cpu data area pointers.
175 * The master cpu (cpu 0) has its data area statically allocated;
176 * others are allocated dynamically and this array is updated at runtime.
178 static cpu_data_t cpu_data_master
= {
179 .cpu_this
= &cpu_data_master
,
180 .cpu_nanotime
= &pal_rtc_nanotime_info
,
181 .cpu_int_stack_top
= (vm_offset_t
) low_eintstack
,
183 cpu_data_t
*cpu_data_ptr
[MAX_CPUS
] = { [0] = &cpu_data_master
};
185 decl_simple_lock_data(,ncpus_lock
); /* protects real_ncpus */
186 unsigned int real_ncpus
= 1;
187 unsigned int max_ncpus
= MAX_CPUS
;
189 extern void hi64_sysenter(void);
190 extern void hi64_syscall(void);
193 * Multiprocessor i386/i486 systems use a separate copy of the
194 * GDT, IDT, LDT, and kernel TSS per processor. The first three
195 * are separate to avoid lock contention: the i386 uses locked
196 * memory cycles to access the descriptor tables. The TSS is
197 * separate since each processor needs its own kernel stack,
198 * and since using a TSS marks it busy.
202 * Allocate and initialize the per-processor descriptor tables.
206 * This is the expanded, 64-bit variant of the kernel LDT descriptor.
207 * When switching to 64-bit mode this replaces KERNEL_LDT entry
208 * and the following empty slot. This enables the LDT to be referenced
209 * in the uber-space remapping window on the kernel.
211 struct fake_descriptor64 kernel_ldt_desc64
= {
213 LDTSZ_MIN
*sizeof(struct fake_descriptor
)-1,
215 ACC_P
|ACC_PL_K
|ACC_LDT
,
220 * This is the expanded, 64-bit variant of the kernel TSS descriptor.
221 * It is follows pattern of the KERNEL_LDT.
223 struct fake_descriptor64 kernel_tss_desc64
= {
225 sizeof(struct x86_64_tss
)-1,
227 ACC_P
|ACC_PL_K
|ACC_TSS
,
232 * Convert a descriptor from fake to real format.
234 * Fake descriptor format:
235 * bytes 0..3 base 31..0
236 * bytes 4..5 limit 15..0
237 * byte 6 access byte 2 | limit 19..16
238 * byte 7 access byte 1
240 * Real descriptor format:
241 * bytes 0..1 limit 15..0
242 * bytes 2..3 base 15..0
244 * byte 5 access byte 1
245 * byte 6 access byte 2 | limit 19..16
250 * bytes 4..5 selector
251 * byte 6 word count << 4 (to match fake descriptor)
252 * byte 7 access byte 1
255 * bytes 0..1 offset 15..0
256 * bytes 2..3 selector
258 * byte 5 access byte 1
259 * bytes 6..7 offset 31..16
262 fix_desc(void *d
, int num_desc
) {
263 //early_kprintf("fix_desc(%x, %x)\n", d, num_desc);
264 uint8_t *desc
= (uint8_t*) d
;
267 if ((desc
[7] & 0x14) == 0x04) { /* gate */
273 offset
= *((uint32_t*)(desc
));
274 selector
= *((uint32_t*)(desc
+4));
275 wordcount
= desc
[6] >> 4;
278 *((uint16_t*)desc
) = offset
& 0xFFFF;
279 *((uint16_t*)(desc
+2)) = selector
;
282 *((uint16_t*)(desc
+6)) = offset
>> 16;
284 } else { /* descriptor */
289 base
= *((uint32_t*)(desc
));
290 limit
= *((uint16_t*)(desc
+4));
294 *((uint16_t*)(desc
)) = limit
;
295 *((uint16_t*)(desc
+2)) = base
& 0xFFFF;
296 desc
[4] = (base
>> 16) & 0xFF;
299 desc
[7] = base
>> 24;
302 } while (--num_desc
);
306 fix_desc64(void *descp
, int count
)
308 struct fake_descriptor64
*fakep
;
310 struct real_gate64 gate
;
311 struct real_descriptor64 desc
;
315 fakep
= (struct fake_descriptor64
*) descp
;
317 for (i
= 0; i
< count
; i
++, fakep
++) {
319 * Construct the real decriptor locally.
322 bzero((void *) &real
, sizeof(real
));
324 switch (fakep
->access
& ACC_TYPE
) {
330 real
.gate
.offset_low16
= (uint16_t)(fakep
->offset64
& 0xFFFF);
331 real
.gate
.selector16
= fakep
->lim_or_seg
& 0xFFFF;
332 real
.gate
.IST
= fakep
->size_or_IST
& 0x7;
333 real
.gate
.access8
= fakep
->access
;
334 real
.gate
.offset_high16
= (uint16_t)((fakep
->offset64
>>16) & 0xFFFF);
335 real
.gate
.offset_top32
= (uint32_t)(fakep
->offset64
>>32);
337 default: /* Otherwise */
338 real
.desc
.limit_low16
= fakep
->lim_or_seg
& 0xFFFF;
339 real
.desc
.base_low16
= (uint16_t)(fakep
->offset64
& 0xFFFF);
340 real
.desc
.base_med8
= (uint8_t)((fakep
->offset64
>> 16) & 0xFF);
341 real
.desc
.access8
= fakep
->access
;
342 real
.desc
.limit_high4
= (fakep
->lim_or_seg
>> 16) & 0xFF;
343 real
.desc
.granularity4
= fakep
->size_or_IST
;
344 real
.desc
.base_high8
= (uint8_t)((fakep
->offset64
>> 24) & 0xFF);
345 real
.desc
.base_top32
= (uint32_t)(fakep
->offset64
>>32);
349 * Now copy back over the fake structure.
351 bcopy((void *) &real
, (void *) fakep
, sizeof(real
));
356 cpu_gdt_alias(vm_map_offset_t gdt
, vm_map_offset_t alias
)
358 pt_entry_t
*pte
= NULL
;
360 /* Require page alignment */
361 assert(page_aligned(gdt
));
362 assert(page_aligned(alias
));
364 pte
= pmap_pte(kernel_pmap
, alias
);
365 pmap_store_pte(pte
, kvtophys(gdt
) | INTEL_PTE_REF
372 /* TLB flush unneccessry because target processor isn't running yet */
377 cpu_desc_init64(cpu_data_t
*cdp
)
379 cpu_desc_index_t
*cdi
= &cdp
->cpu_desc_index
;
381 if (cdp
== &cpu_data_master
) {
383 * Master CPU uses the tables built at boot time.
384 * Just set the index pointers to the low memory space.
386 cdi
->cdi_ktss
= (void *)&master_ktss64
;
387 cdi
->cdi_sstk
= (vm_offset_t
) &master_sstk
.top
;
388 cdi
->cdi_gdt
.ptr
= (void *)MASTER_GDT_ALIAS
;
389 cdi
->cdi_idt
.ptr
= (void *)MASTER_IDT_ALIAS
;
390 cdi
->cdi_ldt
= (struct fake_descriptor
*) master_ldt
;
392 /* Replace the expanded LDTs and TSS slots in the GDT */
393 kernel_ldt_desc64
.offset64
= (uintptr_t) &master_ldt
;
394 *(struct fake_descriptor64
*) &master_gdt
[sel_idx(KERNEL_LDT
)] =
396 *(struct fake_descriptor64
*) &master_gdt
[sel_idx(USER_LDT
)] =
398 kernel_tss_desc64
.offset64
= (uintptr_t) &master_ktss64
;
399 *(struct fake_descriptor64
*) &master_gdt
[sel_idx(KERNEL_TSS
)] =
402 /* Fix up the expanded descriptors for 64-bit. */
403 fix_desc64((void *) &master_idt64
, IDTSZ
);
404 fix_desc64((void *) &master_gdt
[sel_idx(KERNEL_LDT
)], 1);
405 fix_desc64((void *) &master_gdt
[sel_idx(USER_LDT
)], 1);
406 fix_desc64((void *) &master_gdt
[sel_idx(KERNEL_TSS
)], 1);
409 * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS
410 * Note: this will be dynamically re-allocated in VM later.
412 master_ktss64
.ist2
= (uintptr_t) low_eintstack
;
413 master_ktss64
.ist1
= (uintptr_t) low_eintstack
414 - sizeof(x86_64_intr_stack_frame_t
);
416 } else if (cdi
->cdi_ktss
== NULL
) { /* Skipping re-init on wake */
417 cpu_desc_table64_t
*cdt
= (cpu_desc_table64_t
*) cdp
->cpu_desc_tablep
;
420 * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel
421 * heap (cpu_desc_table).
422 * LDT descriptors are mapped into a separate area.
423 * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space.
425 cdi
->cdi_idt
.ptr
= (void *)MASTER_IDT_ALIAS
;
426 cdi
->cdi_gdt
.ptr
= (void *)CPU_GDT_ALIAS(cdp
->cpu_number
);
427 cdi
->cdi_ktss
= (void *)&cdt
->ktss
;
428 cdi
->cdi_sstk
= (vm_offset_t
)&cdt
->sstk
.top
;
429 cdi
->cdi_ldt
= cdp
->cpu_ldtp
;
431 /* Make the virtual alias address for the GDT */
432 cpu_gdt_alias((vm_map_offset_t
) &cdt
->gdt
,
433 (vm_map_offset_t
) cdi
->cdi_gdt
.ptr
);
438 bcopy((char *)master_gdt
, (char *)cdt
->gdt
, sizeof(master_gdt
));
439 bcopy((char *)master_ldt
, (char *)cdp
->cpu_ldtp
, sizeof(master_ldt
));
440 bcopy((char *)&master_ktss64
, (char *)&cdt
->ktss
, sizeof(struct x86_64_tss
));
443 * Fix up the entries in the GDT to point to
444 * this LDT and this TSS.
446 kernel_ldt_desc64
.offset64
= (uintptr_t) cdi
->cdi_ldt
;
447 *(struct fake_descriptor64
*) &cdt
->gdt
[sel_idx(KERNEL_LDT
)] =
449 fix_desc64(&cdt
->gdt
[sel_idx(KERNEL_LDT
)], 1);
451 kernel_ldt_desc64
.offset64
= (uintptr_t) cdi
->cdi_ldt
;
452 *(struct fake_descriptor64
*) &cdt
->gdt
[sel_idx(USER_LDT
)] =
454 fix_desc64(&cdt
->gdt
[sel_idx(USER_LDT
)], 1);
456 kernel_tss_desc64
.offset64
= (uintptr_t) cdi
->cdi_ktss
;
457 *(struct fake_descriptor64
*) &cdt
->gdt
[sel_idx(KERNEL_TSS
)] =
459 fix_desc64(&cdt
->gdt
[sel_idx(KERNEL_TSS
)], 1);
461 /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */
462 bzero((void *) cdt
->fstk
, sizeof(cdt
->fstk
));
463 cdt
->ktss
.ist2
= (unsigned long)cdt
->fstk
+ sizeof(cdt
->fstk
);
464 cdt
->ktss
.ist1
= cdt
->ktss
.ist2
465 - sizeof(x86_64_intr_stack_frame_t
);
468 /* Require that the top of the sysenter stack is 16-byte aligned */
469 if ((cdi
->cdi_sstk
% 16) != 0)
470 panic("cpu_desc_init64() sysenter stack not 16-byte aligned");
475 cpu_desc_load64(cpu_data_t
*cdp
)
477 cpu_desc_index_t
*cdi
= &cdp
->cpu_desc_index
;
479 /* Stuff the kernel per-cpu data area address into the MSRs */
480 wrmsr64(MSR_IA32_GS_BASE
, (uintptr_t) cdp
);
481 wrmsr64(MSR_IA32_KERNEL_GS_BASE
, (uintptr_t) cdp
);
484 * Ensure the TSS segment's busy bit is clear. This is required
485 * for the case of reloading descriptors at wake to avoid
486 * their complete re-initialization.
488 gdt_desc_p(KERNEL_TSS
)->access
&= ~ACC_TSS_BUSY
;
490 /* Load the GDT, LDT, IDT and TSS */
491 cdi
->cdi_gdt
.size
= sizeof(struct real_descriptor
)*GDTSZ
- 1;
492 cdi
->cdi_idt
.size
= 0x1000 + cdp
->cpu_number
;
493 lgdt((uintptr_t *) &cdi
->cdi_gdt
);
494 lidt((uintptr_t *) &cdi
->cdi_idt
);
498 #if GPROF // Hack to enable mcount to work on K64
499 __asm__
volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS
)));
505 * Set MSRs for sysenter/sysexit and syscall/sysret for 64-bit.
508 fast_syscall_init64(__unused cpu_data_t
*cdp
)
510 wrmsr64(MSR_IA32_SYSENTER_CS
, SYSENTER_CS
);
511 wrmsr64(MSR_IA32_SYSENTER_EIP
, (uintptr_t) hi64_sysenter
);
512 wrmsr64(MSR_IA32_SYSENTER_ESP
, current_sstk());
513 /* Enable syscall/sysret */
514 wrmsr64(MSR_IA32_EFER
, rdmsr64(MSR_IA32_EFER
) | MSR_IA32_EFER_SCE
);
517 * MSRs for 64-bit syscall/sysret
518 * Note USER_CS because sysret uses this + 16 when returning to
521 wrmsr64(MSR_IA32_LSTAR
, (uintptr_t) hi64_syscall
);
522 wrmsr64(MSR_IA32_STAR
, (((uint64_t)USER_CS
) << 48) |
523 (((uint64_t)KERNEL64_CS
) << 32));
525 * Emulate eflags cleared by sysenter but note that
526 * we also clear the trace trap to avoid the complications
527 * of single-stepping into a syscall. The nested task bit
528 * is also cleared to avoid a spurious "task switch"
529 * should we choose to return via an IRET.
531 wrmsr64(MSR_IA32_FMASK
, EFL_DF
|EFL_IF
|EFL_TF
|EFL_NT
);
537 cpu_data_alloc(boolean_t is_boot_cpu
)
543 assert(real_ncpus
== 1);
545 if (cdp
->cpu_processor
== NULL
) {
546 simple_lock_init(&ncpus_lock
, 0);
547 cdp
->cpu_processor
= cpu_processor_alloc(TRUE
);
548 #if NCOPY_WINDOWS > 0
549 cdp
->cpu_pmap
= pmap_cpu_alloc(TRUE
);
556 * Allocate per-cpu data:
558 ret
= kmem_alloc(kernel_map
, (vm_offset_t
*) &cdp
, sizeof(cpu_data_t
), VM_KERN_MEMORY_CPU
);
559 if (ret
!= KERN_SUCCESS
) {
560 printf("cpu_data_alloc() failed, ret=%d\n", ret
);
563 bzero((void*) cdp
, sizeof(cpu_data_t
));
567 * Allocate interrupt stack:
569 ret
= kmem_alloc(kernel_map
,
570 (vm_offset_t
*) &cdp
->cpu_int_stack_top
,
571 INTSTACK_SIZE
, VM_KERN_MEMORY_CPU
);
572 if (ret
!= KERN_SUCCESS
) {
573 printf("cpu_data_alloc() int stack failed, ret=%d\n", ret
);
576 bzero((void*) cdp
->cpu_int_stack_top
, INTSTACK_SIZE
);
577 cdp
->cpu_int_stack_top
+= INTSTACK_SIZE
;
580 * Allocate descriptor table:
582 ret
= kmem_alloc(kernel_map
,
583 (vm_offset_t
*) &cdp
->cpu_desc_tablep
,
584 sizeof(cpu_desc_table64_t
),
586 if (ret
!= KERN_SUCCESS
) {
587 printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret
);
594 ret
= kmem_alloc(kernel_map
,
595 (vm_offset_t
*) &cdp
->cpu_ldtp
,
596 sizeof(struct real_descriptor
) * LDTSZ
,
598 if (ret
!= KERN_SUCCESS
) {
599 printf("cpu_data_alloc() ldt failed, ret=%d\n", ret
);
604 /* Machine-check shadow register allocation. */
608 simple_lock(&ncpus_lock
);
610 cpu_data_ptr
[real_ncpus
] = cdp
;
611 cdp
->cpu_number
= real_ncpus
;
613 simple_unlock(&ncpus_lock
);
616 * Before this cpu has been assigned a real thread context,
617 * we give it a fake, unique, non-zero thread id which the locking
618 * primitives use as their lock value.
619 * Note that this does not apply to the boot processor, cpu 0, which
620 * transitions to a thread context well before other processors are
623 cdp
->cpu_active_thread
= (thread_t
) (uintptr_t) cdp
->cpu_number
;
625 cdp
->cpu_nanotime
= &pal_rtc_nanotime_info
;
627 kprintf("cpu_data_alloc(%d) %p desc_table: %p "
629 "int_stack: 0x%lx-0x%lx\n",
630 cdp
->cpu_number
, cdp
, cdp
->cpu_desc_tablep
, cdp
->cpu_ldtp
,
631 (long)(cdp
->cpu_int_stack_top
- INTSTACK_SIZE
), (long)(cdp
->cpu_int_stack_top
));
637 if (cdp
->cpu_desc_tablep
)
638 kfree((void *) cdp
->cpu_desc_tablep
,
639 sizeof(cpu_desc_table64_t
));
640 if (cdp
->cpu_int_stack_top
)
641 kfree((void *) (cdp
->cpu_int_stack_top
- INTSTACK_SIZE
),
643 kfree((void *) cdp
, sizeof(*cdp
));
649 valid_user_data_selector(uint16_t selector
)
651 sel_t sel
= selector_to_sel(selector
);
656 if (sel
.ti
== SEL_LDT
)
658 else if (sel
.index
< GDTSZ
) {
659 if ((gdt_desc_p(selector
)->access
& ACC_PL_U
) == ACC_PL_U
)
667 valid_user_code_selector(uint16_t selector
)
669 sel_t sel
= selector_to_sel(selector
);
674 if (sel
.ti
== SEL_LDT
) {
675 if (sel
.rpl
== USER_PRIV
)
678 else if (sel
.index
< GDTSZ
&& sel
.rpl
== USER_PRIV
) {
679 if ((gdt_desc_p(selector
)->access
& ACC_PL_U
) == ACC_PL_U
)
681 /* Explicitly validate the system code selectors
682 * even if not instantaneously privileged,
683 * since they are dynamically re-privileged
686 if ((selector
== USER_CS
) || (selector
== USER64_CS
))
694 valid_user_stack_selector(uint16_t selector
)
696 sel_t sel
= selector_to_sel(selector
);
701 if (sel
.ti
== SEL_LDT
) {
702 if (sel
.rpl
== USER_PRIV
)
705 else if (sel
.index
< GDTSZ
&& sel
.rpl
== USER_PRIV
) {
706 if ((gdt_desc_p(selector
)->access
& ACC_PL_U
) == ACC_PL_U
)
714 valid_user_segment_selectors(uint16_t cs
,
721 return valid_user_code_selector(cs
) &&
722 valid_user_stack_selector(ss
) &&
723 valid_user_data_selector(ds
) &&
724 valid_user_data_selector(es
) &&
725 valid_user_data_selector(fs
) &&
726 valid_user_data_selector(gs
);
729 #if NCOPY_WINDOWS > 0
731 static vm_offset_t user_window_base
= 0;
734 cpu_userwindow_init(int cpu
)
736 cpu_data_t
*cdp
= cpu_data_ptr
[cpu
];
737 vm_offset_t user_window
;
741 num_cpus
= ml_get_max_cpus();
744 panic("cpu_userwindow_init: cpu > num_cpus");
746 if (user_window_base
== 0) {
748 if (vm_allocate(kernel_map
, &vaddr
,
749 (NBPDE
* NCOPY_WINDOWS
* num_cpus
) + NBPDE
,
750 VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_KERN_MEMORY_CPU
)) != KERN_SUCCESS
)
751 panic("cpu_userwindow_init: "
752 "couldn't allocate user map window");
755 * window must start on a page table boundary
756 * in the virtual address space
758 user_window_base
= (vaddr
+ (NBPDE
- 1)) & ~(NBPDE
- 1);
761 * get rid of any allocation leading up to our
764 vm_deallocate(kernel_map
, vaddr
, user_window_base
- vaddr
);
767 * get rid of tail that we don't need
769 user_window
= user_window_base
+
770 (NBPDE
* NCOPY_WINDOWS
* num_cpus
);
772 vm_deallocate(kernel_map
, user_window
,
774 ((NBPDE
* NCOPY_WINDOWS
* num_cpus
) + NBPDE
)) -
778 user_window
= user_window_base
+ (cpu
* NCOPY_WINDOWS
* NBPDE
);
780 cdp
->cpu_copywindow_base
= user_window
;
782 * Abuse this pdp entry, the pdp now actually points to
783 * an array of copy windows addresses.
785 cdp
->cpu_copywindow_pdp
= pmap_pde(kernel_pmap
, user_window
);
790 cpu_physwindow_init(int cpu
)
792 cpu_data_t
*cdp
= cpu_data_ptr
[cpu
];
793 vm_offset_t phys_window
= cdp
->cpu_physwindow_base
;
795 if (phys_window
== 0) {
796 if (vm_allocate(kernel_map
, &phys_window
,
797 PAGE_SIZE
, VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_KERN_MEMORY_CPU
))
799 panic("cpu_physwindow_init: "
800 "couldn't allocate phys map window");
803 * make sure the page that encompasses the
804 * pte pointer we're interested in actually
805 * exists in the page table
807 pmap_expand(kernel_pmap
, phys_window
, PMAP_EXPAND_OPTIONS_NONE
);
809 cdp
->cpu_physwindow_base
= phys_window
;
810 cdp
->cpu_physwindow_ptep
= vtopte(phys_window
);
813 #endif /* NCOPY_WINDOWS > 0 */
816 * Load the segment descriptor tables for the current processor.
819 cpu_mode_init(cpu_data_t
*cdp
)
821 fast_syscall_init64(cdp
);
825 * Allocate a new interrupt stack for the boot processor from the
826 * heap rather than continue to use the statically allocated space.
827 * Also switch to a dynamically allocated cpu data area.
830 cpu_data_realloc(void)
838 ret
= kmem_alloc(kernel_map
, &istk
, INTSTACK_SIZE
, VM_KERN_MEMORY_CPU
);
839 if (ret
!= KERN_SUCCESS
) {
840 panic("cpu_data_realloc() stack alloc, ret=%d\n", ret
);
842 bzero((void*) istk
, INTSTACK_SIZE
);
843 istk
+= INTSTACK_SIZE
;
845 ret
= kmem_alloc(kernel_map
, (vm_offset_t
*) &cdp
, sizeof(cpu_data_t
), VM_KERN_MEMORY_CPU
);
846 if (ret
!= KERN_SUCCESS
) {
847 panic("cpu_data_realloc() cpu data alloc, ret=%d\n", ret
);
850 /* Copy old contents into new area and make fix-ups */
851 assert(cpu_number() == 0);
852 bcopy((void *) cpu_data_ptr
[0], (void*) cdp
, sizeof(cpu_data_t
));
854 cdp
->cpu_int_stack_top
= istk
;
855 timer_call_queue_init(&cdp
->rtclock_timer
.queue
);
857 /* Allocate the separate fault stack */
858 ret
= kmem_alloc(kernel_map
, &fstk
, PAGE_SIZE
, VM_KERN_MEMORY_CPU
);
859 if (ret
!= KERN_SUCCESS
) {
860 panic("cpu_data_realloc() fault stack alloc, ret=%d\n", ret
);
862 bzero((void*) fstk
, PAGE_SIZE
);
866 * With interrupts disabled commmit the new areas.
868 istate
= ml_set_interrupts_enabled(FALSE
);
869 cpu_data_ptr
[0] = cdp
;
870 master_ktss64
.ist2
= (uintptr_t) fstk
;
871 master_ktss64
.ist1
= (uintptr_t) fstk
872 - sizeof(x86_64_intr_stack_frame_t
);
873 wrmsr64(MSR_IA32_GS_BASE
, (uintptr_t) cdp
);
874 wrmsr64(MSR_IA32_KERNEL_GS_BASE
, (uintptr_t) cdp
);
875 (void) ml_set_interrupts_enabled(istate
);
877 kprintf("Reallocated master cpu data: %p,"
878 " interrupt stack: %p, fault stack: %p\n",
879 (void *) cdp
, (void *) istk
, (void *) fstk
);