]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 #include <kern/cpu_number.h>
61 #include <kern/kalloc.h>
62 #include <kern/cpu_data.h>
63 #include <mach/mach_types.h>
64 #include <mach/machine.h>
65 #include <mach/vm_map.h>
66 #include <mach/machine/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_map.h>
69
70 #include <i386/lock.h>
71 #include <i386/mp_desc.h>
72 #include <i386/misc_protos.h>
73 #include <i386/mp.h>
74 #include <i386/pmap.h>
75 #if defined(__i386__) || defined(__x86_64__)
76 #include <i386/pmap_internal.h>
77 #endif /* i386 */
78 #if CONFIG_MCA
79 #include <i386/machine_check.h>
80 #endif
81
82 #include <kern/misc_protos.h>
83
84 #define K_INTR_GATE (ACC_P|ACC_PL_K|ACC_INTR_GATE)
85 #define U_INTR_GATE (ACC_P|ACC_PL_U|ACC_INTR_GATE)
86
87 // Declare macros that will declare the externs
88 #define TRAP(n, name) extern void *name ;
89 #define TRAP_ERR(n, name) extern void *name ;
90 #define TRAP_SPC(n, name) extern void *name ;
91 #define TRAP_IST1(n, name) extern void *name ;
92 #define TRAP_IST2(n, name) extern void *name ;
93 #define INTERRUPT(n) extern void *_intr_ ## n ;
94 #define USER_TRAP(n, name) extern void *name ;
95 #define USER_TRAP_SPC(n, name) extern void *name ;
96
97 // Include the table to declare the externs
98 #include "../x86_64/idt_table.h"
99
100 // Undef the macros, then redefine them so we can declare the table
101 #undef TRAP
102 #undef TRAP_ERR
103 #undef TRAP_SPC
104 #undef TRAP_IST1
105 #undef TRAP_IST2
106 #undef INTERRUPT
107 #undef USER_TRAP
108 #undef USER_TRAP_SPC
109
110 #define TRAP(n, name) \
111 [n] = { \
112 (uintptr_t)&name, \
113 KERNEL64_CS, \
114 0, \
115 K_INTR_GATE, \
116 0 \
117 },
118
119 #define TRAP_ERR TRAP
120 #define TRAP_SPC TRAP
121
122 #define TRAP_IST1(n, name) \
123 [n] = { \
124 (uintptr_t)&name, \
125 KERNEL64_CS, \
126 1, \
127 K_INTR_GATE, \
128 0 \
129 },
130
131 #define TRAP_IST2(n, name) \
132 [n] = { \
133 (uintptr_t)&name, \
134 KERNEL64_CS, \
135 2, \
136 K_INTR_GATE, \
137 0 \
138 },
139
140 #define INTERRUPT(n) \
141 [n] = { \
142 (uintptr_t)&_intr_ ## n,\
143 KERNEL64_CS, \
144 0, \
145 K_INTR_GATE, \
146 0 \
147 },
148
149 #define USER_TRAP(n, name) \
150 [n] = { \
151 (uintptr_t)&name, \
152 KERNEL64_CS, \
153 0, \
154 U_INTR_GATE, \
155 0 \
156 },
157
158 #define USER_TRAP_SPC USER_TRAP
159
160 // Declare the table using the macros we just set up
161 struct fake_descriptor64 master_idt64[IDTSZ]
162 __attribute__ ((section("__HIB,__desc")))
163 __attribute__ ((aligned(PAGE_SIZE))) = {
164 #include "../x86_64/idt_table.h"
165 };
166
167 /*
168 * First cpu`s interrupt stack.
169 */
170 extern uint32_t low_intstack[]; /* bottom */
171 extern uint32_t low_eintstack[]; /* top */
172
173 /*
174 * Per-cpu data area pointers.
175 * The master cpu (cpu 0) has its data area statically allocated;
176 * others are allocated dynamically and this array is updated at runtime.
177 */
178 static cpu_data_t cpu_data_master = {
179 .cpu_this = &cpu_data_master,
180 .cpu_nanotime = &pal_rtc_nanotime_info,
181 .cpu_int_stack_top = (vm_offset_t) low_eintstack,
182 };
183 cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] = &cpu_data_master };
184
185 decl_simple_lock_data(,ncpus_lock); /* protects real_ncpus */
186 unsigned int real_ncpus = 1;
187 unsigned int max_ncpus = MAX_CPUS;
188
189 extern void hi64_sysenter(void);
190 extern void hi64_syscall(void);
191
192 /*
193 * Multiprocessor i386/i486 systems use a separate copy of the
194 * GDT, IDT, LDT, and kernel TSS per processor. The first three
195 * are separate to avoid lock contention: the i386 uses locked
196 * memory cycles to access the descriptor tables. The TSS is
197 * separate since each processor needs its own kernel stack,
198 * and since using a TSS marks it busy.
199 */
200
201 /*
202 * Allocate and initialize the per-processor descriptor tables.
203 */
204
205 struct fake_descriptor ldt_desc_pattern = {
206 (unsigned int) 0,
207 LDTSZ_MIN * sizeof(struct fake_descriptor) - 1,
208 0,
209 ACC_P|ACC_PL_K|ACC_LDT
210 };
211
212 struct fake_descriptor tss_desc_pattern = {
213 (unsigned int) 0,
214 sizeof(struct i386_tss) - 1,
215 0,
216 ACC_P|ACC_PL_K|ACC_TSS
217 };
218
219 struct fake_descriptor cpudata_desc_pattern = {
220 (unsigned int) 0,
221 sizeof(cpu_data_t)-1,
222 SZ_32,
223 ACC_P|ACC_PL_K|ACC_DATA_W
224 };
225
226 #if NCOPY_WINDOWS > 0
227 struct fake_descriptor userwindow_desc_pattern = {
228 (unsigned int) 0,
229 ((NBPDE * NCOPY_WINDOWS) / PAGE_SIZE) - 1,
230 SZ_32 | SZ_G,
231 ACC_P|ACC_PL_U|ACC_DATA_W
232 };
233 #endif
234
235 struct fake_descriptor physwindow_desc_pattern = {
236 (unsigned int) 0,
237 PAGE_SIZE - 1,
238 SZ_32,
239 ACC_P|ACC_PL_K|ACC_DATA_W
240 };
241
242 /*
243 * This is the expanded, 64-bit variant of the kernel LDT descriptor.
244 * When switching to 64-bit mode this replaces KERNEL_LDT entry
245 * and the following empty slot. This enables the LDT to be referenced
246 * in the uber-space remapping window on the kernel.
247 */
248 struct fake_descriptor64 kernel_ldt_desc64 = {
249 0,
250 LDTSZ_MIN*sizeof(struct fake_descriptor)-1,
251 0,
252 ACC_P|ACC_PL_K|ACC_LDT,
253 0
254 };
255
256 /*
257 * This is the expanded, 64-bit variant of the kernel TSS descriptor.
258 * It is follows pattern of the KERNEL_LDT.
259 */
260 struct fake_descriptor64 kernel_tss_desc64 = {
261 0,
262 sizeof(struct x86_64_tss)-1,
263 0,
264 ACC_P|ACC_PL_K|ACC_TSS,
265 0
266 };
267
268 /*
269 * Convert a descriptor from fake to real format.
270 *
271 * Fake descriptor format:
272 * bytes 0..3 base 31..0
273 * bytes 4..5 limit 15..0
274 * byte 6 access byte 2 | limit 19..16
275 * byte 7 access byte 1
276 *
277 * Real descriptor format:
278 * bytes 0..1 limit 15..0
279 * bytes 2..3 base 15..0
280 * byte 4 base 23..16
281 * byte 5 access byte 1
282 * byte 6 access byte 2 | limit 19..16
283 * byte 7 base 31..24
284 *
285 * Fake gate format:
286 * bytes 0..3 offset
287 * bytes 4..5 selector
288 * byte 6 word count << 4 (to match fake descriptor)
289 * byte 7 access byte 1
290 *
291 * Real gate format:
292 * bytes 0..1 offset 15..0
293 * bytes 2..3 selector
294 * byte 4 word count
295 * byte 5 access byte 1
296 * bytes 6..7 offset 31..16
297 */
298 void
299 fix_desc(void *d, int num_desc) {
300 //early_kprintf("fix_desc(%x, %x)\n", d, num_desc);
301 uint8_t *desc = (uint8_t*) d;
302
303 do {
304 if ((desc[7] & 0x14) == 0x04) { /* gate */
305 uint32_t offset;
306 uint16_t selector;
307 uint8_t wordcount;
308 uint8_t acc;
309
310 offset = *((uint32_t*)(desc));
311 selector = *((uint32_t*)(desc+4));
312 wordcount = desc[6] >> 4;
313 acc = desc[7];
314
315 *((uint16_t*)desc) = offset & 0xFFFF;
316 *((uint16_t*)(desc+2)) = selector;
317 desc[4] = wordcount;
318 desc[5] = acc;
319 *((uint16_t*)(desc+6)) = offset >> 16;
320
321 } else { /* descriptor */
322 uint32_t base;
323 uint16_t limit;
324 uint8_t acc1, acc2;
325
326 base = *((uint32_t*)(desc));
327 limit = *((uint16_t*)(desc+4));
328 acc2 = desc[6];
329 acc1 = desc[7];
330
331 *((uint16_t*)(desc)) = limit;
332 *((uint16_t*)(desc+2)) = base & 0xFFFF;
333 desc[4] = (base >> 16) & 0xFF;
334 desc[5] = acc1;
335 desc[6] = acc2;
336 desc[7] = base >> 24;
337 }
338 desc += 8;
339 } while (--num_desc);
340 }
341
342 void
343 fix_desc64(void *descp, int count)
344 {
345 struct fake_descriptor64 *fakep;
346 union {
347 struct real_gate64 gate;
348 struct real_descriptor64 desc;
349 } real;
350 int i;
351
352 fakep = (struct fake_descriptor64 *) descp;
353
354 for (i = 0; i < count; i++, fakep++) {
355 /*
356 * Construct the real decriptor locally.
357 */
358
359 bzero((void *) &real, sizeof(real));
360
361 switch (fakep->access & ACC_TYPE) {
362 case 0:
363 break;
364 case ACC_CALL_GATE:
365 case ACC_INTR_GATE:
366 case ACC_TRAP_GATE:
367 real.gate.offset_low16 = (uint16_t)(fakep->offset64 & 0xFFFF);
368 real.gate.selector16 = fakep->lim_or_seg & 0xFFFF;
369 real.gate.IST = fakep->size_or_IST & 0x7;
370 real.gate.access8 = fakep->access;
371 real.gate.offset_high16 = (uint16_t)((fakep->offset64>>16) & 0xFFFF);
372 real.gate.offset_top32 = (uint32_t)(fakep->offset64>>32);
373 break;
374 default: /* Otherwise */
375 real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF;
376 real.desc.base_low16 = (uint16_t)(fakep->offset64 & 0xFFFF);
377 real.desc.base_med8 = (uint8_t)((fakep->offset64 >> 16) & 0xFF);
378 real.desc.access8 = fakep->access;
379 real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF;
380 real.desc.granularity4 = fakep->size_or_IST;
381 real.desc.base_high8 = (uint8_t)((fakep->offset64 >> 24) & 0xFF);
382 real.desc.base_top32 = (uint32_t)(fakep->offset64>>32);
383 }
384
385 /*
386 * Now copy back over the fake structure.
387 */
388 bcopy((void *) &real, (void *) fakep, sizeof(real));
389 }
390 }
391
392 static void
393 cpu_gdt_alias(vm_map_offset_t gdt, vm_map_offset_t alias)
394 {
395 pt_entry_t *pte = NULL;
396
397 /* Require page alignment */
398 assert(page_aligned(gdt));
399 assert(page_aligned(alias));
400
401 pte = pmap_pte(kernel_pmap, alias);
402 pmap_store_pte(pte, kvtophys(gdt) | INTEL_PTE_REF
403 | INTEL_PTE_MOD
404 | INTEL_PTE_WIRED
405 | INTEL_PTE_VALID
406 | INTEL_PTE_WRITE
407 | INTEL_PTE_NX);
408
409 /* TLB flush unneccessry because target processor isn't running yet */
410 }
411
412
413 void
414 cpu_desc_init64(cpu_data_t *cdp)
415 {
416 cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
417
418 if (cdp == &cpu_data_master) {
419 /*
420 * Master CPU uses the tables built at boot time.
421 * Just set the index pointers to the low memory space.
422 */
423 cdi->cdi_ktss = (void *)&master_ktss64;
424 cdi->cdi_sstk = (vm_offset_t) &master_sstk.top;
425 cdi->cdi_gdt.ptr = (void *)MASTER_GDT_ALIAS;
426 cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS;
427 cdi->cdi_ldt = (struct fake_descriptor *) master_ldt;
428
429 /* Replace the expanded LDTs and TSS slots in the GDT */
430 kernel_ldt_desc64.offset64 = (uintptr_t) &master_ldt;
431 *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] =
432 kernel_ldt_desc64;
433 *(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] =
434 kernel_ldt_desc64;
435 kernel_tss_desc64.offset64 = (uintptr_t) &master_ktss64;
436 *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] =
437 kernel_tss_desc64;
438
439 /* Fix up the expanded descriptors for 64-bit. */
440 fix_desc64((void *) &master_idt64, IDTSZ);
441 fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1);
442 fix_desc64((void *) &master_gdt[sel_idx(USER_LDT)], 1);
443 fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1);
444
445 /*
446 * Set the NMI/fault stacks as IST2/IST1 in the 64-bit TSS
447 * Note: this will be dynamically re-allocated in VM later.
448 */
449 master_ktss64.ist2 = (uintptr_t) low_eintstack;
450 master_ktss64.ist1 = (uintptr_t) low_eintstack
451 - sizeof(x86_64_intr_stack_frame_t);
452
453 } else if (cdi->cdi_ktss == NULL) { /* Skipping re-init on wake */
454 cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
455
456 /*
457 * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel
458 * heap (cpu_desc_table).
459 * LDT descriptors are mapped into a separate area.
460 * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space.
461 */
462 cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS;
463 cdi->cdi_gdt.ptr = (void *)CPU_GDT_ALIAS(cdp->cpu_number);
464 cdi->cdi_ktss = (void *)&cdt->ktss;
465 cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top;
466 cdi->cdi_ldt = cdp->cpu_ldtp;
467
468 /* Make the virtual alias address for the GDT */
469 cpu_gdt_alias((vm_map_offset_t) &cdt->gdt,
470 (vm_map_offset_t) cdi->cdi_gdt.ptr);
471
472 /*
473 * Copy the tables
474 */
475 bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt));
476 bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt));
477 bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss));
478
479 /*
480 * Fix up the entries in the GDT to point to
481 * this LDT and this TSS.
482 */
483 kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt;
484 *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] =
485 kernel_ldt_desc64;
486 fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1);
487
488 kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldt;
489 *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] =
490 kernel_ldt_desc64;
491 fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1);
492
493 kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktss;
494 *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] =
495 kernel_tss_desc64;
496 fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);
497
498 /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */
499 bzero((void *) cdt->fstk, sizeof(cdt->fstk));
500 cdt->ktss.ist2 = (unsigned long)cdt->fstk + sizeof(cdt->fstk);
501 cdt->ktss.ist1 = cdt->ktss.ist2
502 - sizeof(x86_64_intr_stack_frame_t);
503 }
504
505 /* Require that the top of the sysenter stack is 16-byte aligned */
506 if ((cdi->cdi_sstk % 16) != 0)
507 panic("cpu_desc_init64() sysenter stack not 16-byte aligned");
508 }
509
510
511 void
512 cpu_desc_load64(cpu_data_t *cdp)
513 {
514 cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
515
516 /* Stuff the kernel per-cpu data area address into the MSRs */
517 wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
518 wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
519
520 /*
521 * Ensure the TSS segment's busy bit is clear. This is required
522 * for the case of reloading descriptors at wake to avoid
523 * their complete re-initialization.
524 */
525 gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY;
526
527 /* Load the GDT, LDT, IDT and TSS */
528 cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1;
529 cdi->cdi_idt.size = 0x1000 + cdp->cpu_number;
530 lgdt((uintptr_t *) &cdi->cdi_gdt);
531 lidt((uintptr_t *) &cdi->cdi_idt);
532 lldt(KERNEL_LDT);
533 set_tr(KERNEL_TSS);
534
535 #if GPROF // Hack to enable mcount to work on K64
536 __asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS)));
537 #endif
538 }
539
540
541 /*
542 * Set MSRs for sysenter/sysexit and syscall/sysret for 64-bit.
543 */
544 static void
545 fast_syscall_init64(__unused cpu_data_t *cdp)
546 {
547 wrmsr64(MSR_IA32_SYSENTER_CS, SYSENTER_CS);
548 wrmsr64(MSR_IA32_SYSENTER_EIP, (uintptr_t) hi64_sysenter);
549 wrmsr64(MSR_IA32_SYSENTER_ESP, current_sstk());
550 /* Enable syscall/sysret */
551 wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_SCE);
552
553 /*
554 * MSRs for 64-bit syscall/sysret
555 * Note USER_CS because sysret uses this + 16 when returning to
556 * 64-bit code.
557 */
558 wrmsr64(MSR_IA32_LSTAR, (uintptr_t) hi64_syscall);
559 wrmsr64(MSR_IA32_STAR, (((uint64_t)USER_CS) << 48) |
560 (((uint64_t)KERNEL64_CS) << 32));
561 /*
562 * Emulate eflags cleared by sysenter but note that
563 * we also clear the trace trap to avoid the complications
564 * of single-stepping into a syscall. The nested task bit
565 * is also cleared to avoid a spurious "task switch"
566 * should we choose to return via an IRET.
567 */
568 wrmsr64(MSR_IA32_FMASK, EFL_DF|EFL_IF|EFL_TF|EFL_NT);
569
570 }
571
572
573 cpu_data_t *
574 cpu_data_alloc(boolean_t is_boot_cpu)
575 {
576 int ret;
577 cpu_data_t *cdp;
578
579 if (is_boot_cpu) {
580 assert(real_ncpus == 1);
581 cdp = cpu_datap(0);
582 if (cdp->cpu_processor == NULL) {
583 simple_lock_init(&ncpus_lock, 0);
584 cdp->cpu_processor = cpu_processor_alloc(TRUE);
585 #if NCOPY_WINDOWS > 0
586 cdp->cpu_pmap = pmap_cpu_alloc(TRUE);
587 #endif
588 }
589 return cdp;
590 }
591
592 /*
593 * Allocate per-cpu data:
594 */
595 ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t));
596 if (ret != KERN_SUCCESS) {
597 printf("cpu_data_alloc() failed, ret=%d\n", ret);
598 goto abort;
599 }
600 bzero((void*) cdp, sizeof(cpu_data_t));
601 cdp->cpu_this = cdp;
602
603 /*
604 * Allocate interrupt stack:
605 */
606 ret = kmem_alloc(kernel_map,
607 (vm_offset_t *) &cdp->cpu_int_stack_top,
608 INTSTACK_SIZE);
609 if (ret != KERN_SUCCESS) {
610 printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
611 goto abort;
612 }
613 bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE);
614 cdp->cpu_int_stack_top += INTSTACK_SIZE;
615
616 /*
617 * Allocate descriptor table:
618 */
619 ret = kmem_alloc(kernel_map,
620 (vm_offset_t *) &cdp->cpu_desc_tablep,
621 sizeof(cpu_desc_table64_t));
622 if (ret != KERN_SUCCESS) {
623 printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
624 goto abort;
625 }
626
627 /*
628 * Allocate LDT
629 */
630 ret = kmem_alloc(kernel_map,
631 (vm_offset_t *) &cdp->cpu_ldtp,
632 sizeof(struct real_descriptor) * LDTSZ);
633 if (ret != KERN_SUCCESS) {
634 printf("cpu_data_alloc() ldt failed, ret=%d\n", ret);
635 goto abort;
636 }
637
638 #if CONFIG_MCA
639 /* Machine-check shadow register allocation. */
640 mca_cpu_alloc(cdp);
641 #endif
642
643 simple_lock(&ncpus_lock);
644
645 cpu_data_ptr[real_ncpus] = cdp;
646 cdp->cpu_number = real_ncpus;
647 real_ncpus++;
648 simple_unlock(&ncpus_lock);
649
650 cdp->cpu_nanotime = &pal_rtc_nanotime_info;
651
652 kprintf("cpu_data_alloc(%d) %p desc_table: %p "
653 "ldt: %p "
654 "int_stack: 0x%lx-0x%lx\n",
655 cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp,
656 (long)(cdp->cpu_int_stack_top - INTSTACK_SIZE), (long)(cdp->cpu_int_stack_top));
657
658 return cdp;
659
660 abort:
661 if (cdp) {
662 if (cdp->cpu_desc_tablep)
663 kfree((void *) cdp->cpu_desc_tablep,
664 sizeof(cpu_desc_table64_t));
665 if (cdp->cpu_int_stack_top)
666 kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
667 INTSTACK_SIZE);
668 kfree((void *) cdp, sizeof(*cdp));
669 }
670 return NULL;
671 }
672
673 boolean_t
674 valid_user_data_selector(uint16_t selector)
675 {
676 sel_t sel = selector_to_sel(selector);
677
678 if (selector == 0)
679 return (TRUE);
680
681 if (sel.ti == SEL_LDT)
682 return (TRUE);
683 else if (sel.index < GDTSZ) {
684 if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
685 return (TRUE);
686 }
687
688 return (FALSE);
689 }
690
691 boolean_t
692 valid_user_code_selector(uint16_t selector)
693 {
694 sel_t sel = selector_to_sel(selector);
695
696 if (selector == 0)
697 return (FALSE);
698
699 if (sel.ti == SEL_LDT) {
700 if (sel.rpl == USER_PRIV)
701 return (TRUE);
702 }
703 else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
704 if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
705 return (TRUE);
706 }
707
708 return (FALSE);
709 }
710
711 boolean_t
712 valid_user_stack_selector(uint16_t selector)
713 {
714 sel_t sel = selector_to_sel(selector);
715
716 if (selector == 0)
717 return (FALSE);
718
719 if (sel.ti == SEL_LDT) {
720 if (sel.rpl == USER_PRIV)
721 return (TRUE);
722 }
723 else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
724 if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
725 return (TRUE);
726 }
727
728 return (FALSE);
729 }
730
731 boolean_t
732 valid_user_segment_selectors(uint16_t cs,
733 uint16_t ss,
734 uint16_t ds,
735 uint16_t es,
736 uint16_t fs,
737 uint16_t gs)
738 {
739 return valid_user_code_selector(cs) &&
740 valid_user_stack_selector(ss) &&
741 valid_user_data_selector(ds) &&
742 valid_user_data_selector(es) &&
743 valid_user_data_selector(fs) &&
744 valid_user_data_selector(gs);
745 }
746
747 #if NCOPY_WINDOWS > 0
748
749 static vm_offset_t user_window_base = 0;
750
751 void
752 cpu_userwindow_init(int cpu)
753 {
754 cpu_data_t *cdp = cpu_data_ptr[cpu];
755 vm_offset_t user_window;
756 vm_offset_t vaddr;
757 int num_cpus;
758
759 num_cpus = ml_get_max_cpus();
760
761 if (cpu >= num_cpus)
762 panic("cpu_userwindow_init: cpu > num_cpus");
763
764 if (user_window_base == 0) {
765
766 if (vm_allocate(kernel_map, &vaddr,
767 (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE,
768 VM_FLAGS_ANYWHERE) != KERN_SUCCESS)
769 panic("cpu_userwindow_init: "
770 "couldn't allocate user map window");
771
772 /*
773 * window must start on a page table boundary
774 * in the virtual address space
775 */
776 user_window_base = (vaddr + (NBPDE - 1)) & ~(NBPDE - 1);
777
778 /*
779 * get rid of any allocation leading up to our
780 * starting boundary
781 */
782 vm_deallocate(kernel_map, vaddr, user_window_base - vaddr);
783
784 /*
785 * get rid of tail that we don't need
786 */
787 user_window = user_window_base +
788 (NBPDE * NCOPY_WINDOWS * num_cpus);
789
790 vm_deallocate(kernel_map, user_window,
791 (vaddr +
792 ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) -
793 user_window);
794 }
795
796 user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE);
797
798 cdp->cpu_copywindow_base = user_window;
799 /*
800 * Abuse this pdp entry, the pdp now actually points to
801 * an array of copy windows addresses.
802 */
803 cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window);
804
805 }
806
807 void
808 cpu_physwindow_init(int cpu)
809 {
810 cpu_data_t *cdp = cpu_data_ptr[cpu];
811 vm_offset_t phys_window = cdp->cpu_physwindow_base;
812
813 if (phys_window == 0) {
814 if (vm_allocate(kernel_map, &phys_window,
815 PAGE_SIZE, VM_FLAGS_ANYWHERE)
816 != KERN_SUCCESS)
817 panic("cpu_physwindow_init: "
818 "couldn't allocate phys map window");
819
820 /*
821 * make sure the page that encompasses the
822 * pte pointer we're interested in actually
823 * exists in the page table
824 */
825 pmap_expand(kernel_pmap, phys_window, PMAP_EXPAND_OPTIONS_NONE);
826
827 cdp->cpu_physwindow_base = phys_window;
828 cdp->cpu_physwindow_ptep = vtopte(phys_window);
829 }
830 }
831 #endif /* NCOPY_WINDOWS > 0 */
832
833 /*
834 * Load the segment descriptor tables for the current processor.
835 */
836 void
837 cpu_mode_init(cpu_data_t *cdp)
838 {
839 fast_syscall_init64(cdp);
840 }
841
842 /*
843 * Allocate a new interrupt stack for the boot processor from the
844 * heap rather than continue to use the statically allocated space.
845 * Also switch to a dynamically allocated cpu data area.
846 */
847 void
848 cpu_data_realloc(void)
849 {
850 int ret;
851 vm_offset_t istk;
852 vm_offset_t fstk;
853 cpu_data_t *cdp;
854 boolean_t istate;
855
856 ret = kmem_alloc(kernel_map, &istk, INTSTACK_SIZE);
857 if (ret != KERN_SUCCESS) {
858 panic("cpu_data_realloc() stack alloc, ret=%d\n", ret);
859 }
860 bzero((void*) istk, INTSTACK_SIZE);
861 istk += INTSTACK_SIZE;
862
863 ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t));
864 if (ret != KERN_SUCCESS) {
865 panic("cpu_data_realloc() cpu data alloc, ret=%d\n", ret);
866 }
867
868 /* Copy old contents into new area and make fix-ups */
869 assert(cpu_number() == 0);
870 bcopy((void *) cpu_data_ptr[0], (void*) cdp, sizeof(cpu_data_t));
871 cdp->cpu_this = cdp;
872 cdp->cpu_int_stack_top = istk;
873 timer_call_queue_init(&cdp->rtclock_timer.queue);
874
875 /* Allocate the separate fault stack */
876 ret = kmem_alloc(kernel_map, &fstk, PAGE_SIZE);
877 if (ret != KERN_SUCCESS) {
878 panic("cpu_data_realloc() fault stack alloc, ret=%d\n", ret);
879 }
880 bzero((void*) fstk, PAGE_SIZE);
881 fstk += PAGE_SIZE;
882
883 /*
884 * With interrupts disabled commmit the new areas.
885 */
886 istate = ml_set_interrupts_enabled(FALSE);
887 cpu_data_ptr[0] = cdp;
888 master_ktss64.ist2 = (uintptr_t) fstk;
889 master_ktss64.ist1 = (uintptr_t) fstk
890 - sizeof(x86_64_intr_stack_frame_t);
891 wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
892 wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
893 (void) ml_set_interrupts_enabled(istate);
894
895 kprintf("Reallocated master cpu data: %p,"
896 " interrupt stack: %p, fault stack: %p\n",
897 (void *) cdp, (void *) istk, (void *) fstk);
898 }