]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/machine_routines.h>
30 #include <i386/io_map_entries.h>
31 #include <i386/cpuid.h>
32 #include <i386/fpu.h>
33 #include <mach/processor.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <i386/cpu_data.h>
40 #include <i386/machine_cpu.h>
41 #include <i386/lapic.h>
42 #include <i386/mp_events.h>
43 #include <i386/pmap.h>
44 #include <i386/misc_protos.h>
45 #include <i386/pmCPU.h>
46 #include <i386/proc_reg.h>
47 #include <i386/tsc.h>
48 #include <i386/cpu_threads.h>
49 #include <mach/vm_param.h>
50 #if MACH_KDB
51 #include <i386/db_machdep.h>
52 #include <ddb/db_aout.h>
53 #include <ddb/db_access.h>
54 #include <ddb/db_sym.h>
55 #include <ddb/db_variables.h>
56 #include <ddb/db_command.h>
57 #include <ddb/db_output.h>
58 #include <ddb/db_expr.h>
59 #endif
60
61 #if DEBUG
62 #define DBG(x...) kprintf("DBG: " x)
63 #else
64 #define DBG(x...)
65 #endif
66
67 extern thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor);
68 extern void wakeup(void *);
69 extern unsigned KernelRelocOffset;
70
71 static int max_cpus_initialized = 0;
72
73 unsigned int LockTimeOut;
74 unsigned int LockTimeOutTSC;
75 unsigned int MutexSpin;
76
77 #define MAX_CPUS_SET 0x1
78 #define MAX_CPUS_WAIT 0x2
79
80 /* IO memory map services */
81
82 /* Map memory map IO space */
83 vm_offset_t ml_io_map(
84 vm_offset_t phys_addr,
85 vm_size_t size)
86 {
87 return(io_map(phys_addr,size,VM_WIMG_IO));
88 }
89
90 /* boot memory allocation */
91 vm_offset_t ml_static_malloc(
92 __unused vm_size_t size)
93 {
94 return((vm_offset_t)NULL);
95 }
96
97
98 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
99 {
100 *phys_addr = bounce_pool_base;
101 *size = bounce_pool_size;
102 }
103
104
105 vm_offset_t
106 ml_boot_ptovirt(
107 vm_offset_t paddr)
108 {
109 return (vm_offset_t)((paddr-KernelRelocOffset) | LINEAR_KERNEL_ADDRESS);
110 }
111
112 vm_offset_t
113 ml_static_ptovirt(
114 vm_offset_t paddr)
115 {
116 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
117 }
118
119
120 /*
121 * Routine: ml_static_mfree
122 * Function:
123 */
124 void
125 ml_static_mfree(
126 vm_offset_t vaddr,
127 vm_size_t size)
128 {
129 vm_offset_t vaddr_cur;
130 ppnum_t ppn;
131
132 // if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
133
134 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
135
136 for (vaddr_cur = vaddr;
137 vaddr_cur < round_page_32(vaddr+size);
138 vaddr_cur += PAGE_SIZE) {
139 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
140 if (ppn != (vm_offset_t)NULL) {
141 kernel_pmap->stats.resident_count++;
142 if (kernel_pmap->stats.resident_count >
143 kernel_pmap->stats.resident_max) {
144 kernel_pmap->stats.resident_max =
145 kernel_pmap->stats.resident_count;
146 }
147 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
148 vm_page_create(ppn,(ppn+1));
149 vm_page_wire_count--;
150 }
151 }
152 }
153
154
155 /* virtual to physical on wired pages */
156 vm_offset_t ml_vtophys(
157 vm_offset_t vaddr)
158 {
159 return kvtophys(vaddr);
160 }
161
162 /*
163 * Routine: ml_nofault_copy
164 * Function: Perform a physical mode copy if the source and
165 * destination have valid translations in the kernel pmap.
166 * If translations are present, they are assumed to
167 * be wired; i.e. no attempt is made to guarantee that the
168 * translations obtained remained valid for
169 * the duration of the copy process.
170 */
171
172 vm_size_t ml_nofault_copy(
173 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
174 {
175 addr64_t cur_phys_dst, cur_phys_src;
176 uint32_t count, nbytes = 0;
177
178 while (size > 0) {
179 if (!(cur_phys_src = kvtophys(virtsrc)))
180 break;
181 if (!(cur_phys_dst = kvtophys(virtdst)))
182 break;
183 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
184 break;
185 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
186 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
187 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
188 if (count > size)
189 count = size;
190
191 bcopy_phys(cur_phys_src, cur_phys_dst, count);
192
193 nbytes += count;
194 virtsrc += count;
195 virtdst += count;
196 size -= count;
197 }
198
199 return nbytes;
200 }
201
202 /* Interrupt handling */
203
204 /* Initialize Interrupts */
205 void ml_init_interrupt(void)
206 {
207 (void) ml_set_interrupts_enabled(TRUE);
208 }
209
210 /* Get Interrupts Enabled */
211 boolean_t ml_get_interrupts_enabled(void)
212 {
213 unsigned long flags;
214
215 __asm__ volatile("pushf; popl %0" : "=r" (flags));
216 return (flags & EFL_IF) != 0;
217 }
218
219 /* Set Interrupts Enabled */
220 boolean_t ml_set_interrupts_enabled(boolean_t enable)
221 {
222 unsigned long flags;
223
224 __asm__ volatile("pushf; popl %0" : "=r" (flags));
225
226 if (enable) {
227 ast_t *myast;
228
229 myast = ast_pending();
230
231 if ( (get_preemption_level() == 0) && (*myast & AST_URGENT) ) {
232 __asm__ volatile("sti");
233 __asm__ volatile ("int $0xff");
234 } else {
235 __asm__ volatile ("sti");
236 }
237 }
238 else {
239 __asm__ volatile("cli");
240 }
241
242 return (flags & EFL_IF) != 0;
243 }
244
245 /* Check if running at interrupt context */
246 boolean_t ml_at_interrupt_context(void)
247 {
248 return get_interrupt_level() != 0;
249 }
250
251 /* Generate a fake interrupt */
252 void ml_cause_interrupt(void)
253 {
254 panic("ml_cause_interrupt not defined yet on Intel");
255 }
256
257 void ml_thread_policy(
258 thread_t thread,
259 __unused unsigned policy_id,
260 unsigned policy_info)
261 {
262 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
263 spl_t s = splsched();
264
265 thread_lock(thread);
266
267 set_priority(thread, thread->priority + 1);
268
269 thread_unlock(thread);
270 splx(s);
271 }
272 }
273
274 /* Initialize Interrupts */
275 void ml_install_interrupt_handler(
276 void *nub,
277 int source,
278 void *target,
279 IOInterruptHandler handler,
280 void *refCon)
281 {
282 boolean_t current_state;
283
284 current_state = ml_get_interrupts_enabled();
285
286 PE_install_interrupt_handler(nub, source, target,
287 (IOInterruptHandler) handler, refCon);
288
289 (void) ml_set_interrupts_enabled(current_state);
290
291 initialize_screen(NULL, kPEAcquireScreen);
292 }
293
294
295 void
296 machine_signal_idle(
297 processor_t processor)
298 {
299 cpu_interrupt(processor->cpu_num);
300 }
301
302 thread_t
303 machine_processor_shutdown(
304 thread_t thread,
305 void (*doshutdown)(processor_t),
306 processor_t processor)
307 {
308 vmx_suspend();
309 fpu_save_context(thread);
310 return(Shutdown_context(thread, doshutdown, processor));
311 }
312
313 kern_return_t
314 ml_processor_register(
315 cpu_id_t cpu_id,
316 uint32_t lapic_id,
317 processor_t *processor_out,
318 ipi_handler_t *ipi_handler,
319 boolean_t boot_cpu)
320 {
321 int target_cpu;
322 cpu_data_t *this_cpu_datap;
323
324 this_cpu_datap = cpu_data_alloc(boot_cpu);
325 if (this_cpu_datap == NULL) {
326 return KERN_FAILURE;
327 }
328 target_cpu = this_cpu_datap->cpu_number;
329 assert((boot_cpu && (target_cpu == 0)) ||
330 (!boot_cpu && (target_cpu != 0)));
331
332 lapic_cpu_map(lapic_id, target_cpu);
333
334 this_cpu_datap->cpu_id = cpu_id;
335 this_cpu_datap->cpu_phys_number = lapic_id;
336
337 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
338 if (this_cpu_datap->cpu_console_buf == NULL)
339 goto failed;
340
341 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
342 if (this_cpu_datap->cpu_chud == NULL)
343 goto failed;
344
345 if (!boot_cpu) {
346 cpu_thread_alloc(this_cpu_datap->cpu_number);
347 if (this_cpu_datap->lcpu.core == NULL)
348 goto failed;
349
350 pmCPUStateInit();
351
352 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
353 if (this_cpu_datap->cpu_pmap == NULL)
354 goto failed;
355
356 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
357 if (this_cpu_datap->cpu_processor == NULL)
358 goto failed;
359 /*
360 * processor_init() deferred to topology start
361 * because "slot numbers" a.k.a. logical processor numbers
362 * are not yet finalized.
363 */
364 }
365
366 *processor_out = this_cpu_datap->cpu_processor;
367 *ipi_handler = NULL;
368
369 if (target_cpu == machine_info.max_cpus - 1) {
370 /*
371 * All processors are now registered but not started (except
372 * for this "in-limbo" boot processor). We call to the machine
373 * topology code to finalize and activate the topology.
374 */
375 cpu_topology_start();
376 }
377
378 return KERN_SUCCESS;
379
380 failed:
381 cpu_processor_free(this_cpu_datap->cpu_processor);
382 pmap_cpu_free(this_cpu_datap->cpu_pmap);
383 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
384 console_cpu_free(this_cpu_datap->cpu_console_buf);
385 return KERN_FAILURE;
386 }
387
388 void
389 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
390 {
391 boolean_t os_supports_sse;
392 i386_cpu_info_t *cpuid_infop;
393
394 if (cpu_infop == NULL)
395 return;
396
397 /*
398 * Are we supporting MMX/SSE/SSE2/SSE3?
399 * As distinct from whether the cpu has these capabilities.
400 */
401 os_supports_sse = get_cr4() & CR4_XMM;
402 if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse)
403 cpu_infop->vector_unit = 8;
404 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse)
405 cpu_infop->vector_unit = 7;
406 else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse)
407 cpu_infop->vector_unit = 6;
408 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
409 cpu_infop->vector_unit = 5;
410 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
411 cpu_infop->vector_unit = 4;
412 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
413 cpu_infop->vector_unit = 3;
414 else if (cpuid_features() & CPUID_FEATURE_MMX)
415 cpu_infop->vector_unit = 2;
416 else
417 cpu_infop->vector_unit = 0;
418
419 cpuid_infop = cpuid_info();
420
421 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
422
423 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
424 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
425
426 if (cpuid_infop->cache_size[L2U] > 0) {
427 cpu_infop->l2_settings = 1;
428 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
429 } else {
430 cpu_infop->l2_settings = 0;
431 cpu_infop->l2_cache_size = 0xFFFFFFFF;
432 }
433
434 if (cpuid_infop->cache_size[L3U] > 0) {
435 cpu_infop->l3_settings = 1;
436 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
437 } else {
438 cpu_infop->l3_settings = 0;
439 cpu_infop->l3_cache_size = 0xFFFFFFFF;
440 }
441 }
442
443 void
444 ml_init_max_cpus(unsigned long max_cpus)
445 {
446 boolean_t current_state;
447
448 current_state = ml_set_interrupts_enabled(FALSE);
449 if (max_cpus_initialized != MAX_CPUS_SET) {
450 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
451 /*
452 * Note: max_cpus is the number of enabled processors
453 * that ACPI found; max_ncpus is the maximum number
454 * that the kernel supports or that the "cpus="
455 * boot-arg has set. Here we take int minimum.
456 */
457 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
458 }
459 if (max_cpus_initialized == MAX_CPUS_WAIT)
460 wakeup((event_t)&max_cpus_initialized);
461 max_cpus_initialized = MAX_CPUS_SET;
462 }
463 (void) ml_set_interrupts_enabled(current_state);
464 }
465
466 int
467 ml_get_max_cpus(void)
468 {
469 boolean_t current_state;
470
471 current_state = ml_set_interrupts_enabled(FALSE);
472 if (max_cpus_initialized != MAX_CPUS_SET) {
473 max_cpus_initialized = MAX_CPUS_WAIT;
474 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
475 (void)thread_block(THREAD_CONTINUE_NULL);
476 }
477 (void) ml_set_interrupts_enabled(current_state);
478 return(machine_info.max_cpus);
479 }
480
481 /*
482 * Routine: ml_init_lock_timeout
483 * Function:
484 */
485 void
486 ml_init_lock_timeout(void)
487 {
488 uint64_t abstime;
489 uint32_t mtxspin;
490
491 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
492 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
493 LockTimeOut = (uint32_t) abstime;
494 LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t);
495
496 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
497 if (mtxspin > USEC_PER_SEC>>4)
498 mtxspin = USEC_PER_SEC>>4;
499 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
500 } else {
501 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
502 }
503 MutexSpin = (unsigned int)abstime;
504 }
505
506 /*
507 * This is called from the machine-independent routine cpu_up()
508 * to perform machine-dependent info updates. Defer to cpu_thread_init().
509 */
510 void
511 ml_cpu_up(void)
512 {
513 return;
514 }
515
516 /*
517 * This is called from the machine-independent routine cpu_down()
518 * to perform machine-dependent info updates.
519 */
520 void
521 ml_cpu_down(void)
522 {
523 return;
524 }
525
526 /*
527 * The following are required for parts of the kernel
528 * that cannot resolve these functions as inlines:
529 */
530 extern thread_t current_act(void);
531 thread_t
532 current_act(void)
533 {
534 return(current_thread_fast());
535 }
536
537 #undef current_thread
538 extern thread_t current_thread(void);
539 thread_t
540 current_thread(void)
541 {
542 return(current_thread_fast());
543 }
544
545
546 boolean_t ml_is64bit(void) {
547
548 return (cpu_mode_is64bit());
549 }
550
551
552 boolean_t ml_thread_is64bit(thread_t thread) {
553
554 return (thread_is_64bit(thread));
555 }
556
557
558 boolean_t ml_state_is64bit(void *saved_state) {
559
560 return is_saved_state64(saved_state);
561 }
562
563 void ml_cpu_set_ldt(int selector)
564 {
565 /*
566 * Avoid loading the LDT
567 * if we're setting the KERNEL LDT and it's already set.
568 */
569 if (selector == KERNEL_LDT &&
570 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
571 return;
572
573 /*
574 * If 64bit this requires a mode switch (and back).
575 */
576 if (cpu_mode_is64bit())
577 ml_64bit_lldt(selector);
578 else
579 lldt(selector);
580 current_cpu_datap()->cpu_ldt = selector;
581 }
582
583 void ml_fp_setvalid(boolean_t value)
584 {
585 fp_setvalid(value);
586 }
587
588 uint64_t ml_cpu_int_event_time(void)
589 {
590 return current_cpu_datap()->cpu_int_event_time;
591 }
592
593
594 #if MACH_KDB
595
596 /*
597 * Display the global msrs
598 * *
599 * ms
600 */
601 void
602 db_msr(__unused db_expr_t addr,
603 __unused int have_addr,
604 __unused db_expr_t count,
605 __unused char *modif)
606 {
607
608 uint32_t i, msrlow, msrhigh;
609
610 /* Try all of the first 4096 msrs */
611 for (i = 0; i < 4096; i++) {
612 if (!rdmsr_carefully(i, &msrlow, &msrhigh)) {
613 db_printf("%08X - %08X.%08X\n", i, msrhigh, msrlow);
614 }
615 }
616
617 /* Try all of the 4096 msrs at 0x0C000000 */
618 for (i = 0; i < 4096; i++) {
619 if (!rdmsr_carefully(0x0C000000 | i, &msrlow, &msrhigh)) {
620 db_printf("%08X - %08X.%08X\n",
621 0x0C000000 | i, msrhigh, msrlow);
622 }
623 }
624
625 /* Try all of the 4096 msrs at 0xC0000000 */
626 for (i = 0; i < 4096; i++) {
627 if (!rdmsr_carefully(0xC0000000 | i, &msrlow, &msrhigh)) {
628 db_printf("%08X - %08X.%08X\n",
629 0xC0000000 | i, msrhigh, msrlow);
630 }
631 }
632 }
633
634 #endif