]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b 1/*
c0fea474 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <i386/machine_routines.h>
23#include <i386/io_map_entries.h>
55e303ae
A
24#include <i386/cpuid.h>
25#include <i386/fpu.h>
26#include <kern/processor.h>
91447636 27#include <kern/machine.h>
1c79356b 28#include <kern/cpu_data.h>
91447636
A
29#include <kern/cpu_number.h>
30#include <kern/thread.h>
31#include <i386/cpu_data.h>
55e303ae
A
32#include <i386/machine_cpu.h>
33#include <i386/mp.h>
34#include <i386/mp_events.h>
91447636
A
35#include <i386/cpu_threads.h>
36#include <i386/pmap.h>
37#include <i386/misc_protos.h>
c0fea474
A
38#include <i386/pmCPU.h>
39#include <i386/proc_reg.h>
91447636 40#include <mach/vm_param.h>
c0fea474
A
41#if MACH_KDB
42#include <i386/db_machdep.h>
43#include <ddb/db_aout.h>
44#include <ddb/db_access.h>
45#include <ddb/db_sym.h>
46#include <ddb/db_variables.h>
47#include <ddb/db_command.h>
48#include <ddb/db_output.h>
49#include <ddb/db_expr.h>
50#endif
91447636
A
51
52#define MIN(a,b) ((a)<(b)? (a) : (b))
53
c0fea474
A
54#if DEBUG
55#define DBG(x...) kprintf("DBG: " x)
56#else
57#define DBG(x...)
58#endif
59
91447636 60extern void initialize_screen(Boot_Video *, unsigned int);
c0fea474 61extern thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor);
91447636 62extern void wakeup(void *);
c0fea474 63extern unsigned KernelRelocOffset;
55e303ae
A
64
65static int max_cpus_initialized = 0;
66
c0fea474
A
67unsigned int LockTimeOut = 12500000;
68unsigned int MutexSpin = 0;
69
55e303ae
A
70#define MAX_CPUS_SET 0x1
71#define MAX_CPUS_WAIT 0x2
1c79356b
A
72
73/* IO memory map services */
74
75/* Map memory map IO space */
76vm_offset_t ml_io_map(
77 vm_offset_t phys_addr,
78 vm_size_t size)
79{
c0fea474 80 return(io_map(phys_addr,size,VM_WIMG_IO));
1c79356b
A
81}
82
83/* boot memory allocation */
84vm_offset_t ml_static_malloc(
91447636 85 __unused vm_size_t size)
1c79356b
A
86{
87 return((vm_offset_t)NULL);
88}
89
c0fea474
A
90
91void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
92{
93 *phys_addr = bounce_pool_base;
94 *size = bounce_pool_size;
95}
96
97
98vm_offset_t
99ml_boot_ptovirt(
100 vm_offset_t paddr)
101{
102 return (vm_offset_t)((paddr-KernelRelocOffset) | LINEAR_KERNEL_ADDRESS);
103}
104
1c79356b
A
105vm_offset_t
106ml_static_ptovirt(
107 vm_offset_t paddr)
108{
91447636 109 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
1c79356b
A
110}
111
91447636
A
112
113/*
114 * Routine: ml_static_mfree
115 * Function:
116 */
1c79356b
A
117void
118ml_static_mfree(
91447636
A
119 vm_offset_t vaddr,
120 vm_size_t size)
1c79356b 121{
91447636
A
122 vm_offset_t vaddr_cur;
123 ppnum_t ppn;
124
c0fea474 125// if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
91447636
A
126
127 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
128
129 for (vaddr_cur = vaddr;
130 vaddr_cur < round_page_32(vaddr+size);
131 vaddr_cur += PAGE_SIZE) {
132 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
133 if (ppn != (vm_offset_t)NULL) {
134 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
135 vm_page_create(ppn,(ppn+1));
136 vm_page_wire_count--;
137 }
138 }
1c79356b
A
139}
140
c0fea474 141
1c79356b
A
142/* virtual to physical on wired pages */
143vm_offset_t ml_vtophys(
144 vm_offset_t vaddr)
145{
146 return kvtophys(vaddr);
147}
148
149/* Interrupt handling */
150
55e303ae
A
151/* Initialize Interrupts */
152void ml_init_interrupt(void)
153{
154 (void) ml_set_interrupts_enabled(TRUE);
155}
156
1c79356b
A
157/* Get Interrupts Enabled */
158boolean_t ml_get_interrupts_enabled(void)
159{
160 unsigned long flags;
161
162 __asm__ volatile("pushf; popl %0" : "=r" (flags));
163 return (flags & EFL_IF) != 0;
164}
165
166/* Set Interrupts Enabled */
167boolean_t ml_set_interrupts_enabled(boolean_t enable)
168{
169 unsigned long flags;
170
171 __asm__ volatile("pushf; popl %0" : "=r" (flags));
172
c0fea474
A
173 if (enable) {
174 ast_t *myast;
175
176 myast = ast_pending();
177
178 if ( (get_preemption_level() == 0) && (*myast & AST_URGENT) ) {
1c79356b 179 __asm__ volatile("sti");
c0fea474
A
180 __asm__ volatile ("int $0xff");
181 } else {
182 __asm__ volatile ("sti");
183 }
184 }
185 else {
1c79356b 186 __asm__ volatile("cli");
c0fea474 187 }
1c79356b
A
188
189 return (flags & EFL_IF) != 0;
190}
191
192/* Check if running at interrupt context */
193boolean_t ml_at_interrupt_context(void)
194{
195 return get_interrupt_level() != 0;
196}
197
198/* Generate a fake interrupt */
199void ml_cause_interrupt(void)
200{
201 panic("ml_cause_interrupt not defined yet on Intel");
202}
203
d52fe63f
A
204void ml_thread_policy(
205 thread_t thread,
206 unsigned policy_id,
207 unsigned policy_info)
208{
55e303ae
A
209 if (policy_id == MACHINE_GROUP)
210 thread_bind(thread, master_processor);
211
212 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
213 spl_t s = splsched();
214
215 thread_lock(thread);
216
217 set_priority(thread, thread->priority + 1);
218
219 thread_unlock(thread);
220 splx(s);
221 }
d52fe63f
A
222}
223
1c79356b
A
224/* Initialize Interrupts */
225void ml_install_interrupt_handler(
226 void *nub,
227 int source,
228 void *target,
229 IOInterruptHandler handler,
230 void *refCon)
231{
232 boolean_t current_state;
233
234 current_state = ml_get_interrupts_enabled();
235
236 PE_install_interrupt_handler(nub, source, target,
237 (IOInterruptHandler) handler, refCon);
238
239 (void) ml_set_interrupts_enabled(current_state);
55e303ae
A
240
241 initialize_screen(0, kPEAcquireScreen);
242}
243
91447636 244
55e303ae
A
245void
246machine_idle(void)
247{
91447636
A
248 cpu_core_t *my_core = cpu_core();
249 int others_active;
250
251 /*
252 * We halt this cpu thread
253 * unless kernel param idlehalt is false and no other thread
254 * in the same core is active - if so, don't halt so that this
255 * core doesn't go into a low-power mode.
c0fea474 256 * For 4/4, we set a null "active cr3" while idle.
91447636
A
257 */
258 others_active = !atomic_decl_and_test(
259 (long *) &my_core->active_threads, 1);
260 if (idlehalt || others_active) {
261 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
c0fea474
A
262 MARK_CPU_IDLE(cpu_number());
263 machine_idle_cstate();
264 MARK_CPU_ACTIVE(cpu_number());
91447636
A
265 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
266 } else {
267 __asm__ volatile("sti");
268 }
269 atomic_incl((long *) &my_core->active_threads, 1);
1c79356b
A
270}
271
272void
273machine_signal_idle(
274 processor_t processor)
275{
91447636 276 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
55e303ae
A
277}
278
c0fea474
A
279thread_t
280machine_processor_shutdown(
281 thread_t thread,
282 void (*doshutdown)(processor_t),
283 processor_t processor)
284{
285 fpu_save_context(thread);
286 return(Shutdown_context(thread, doshutdown, processor));
287}
288
55e303ae
A
289kern_return_t
290ml_processor_register(
291 cpu_id_t cpu_id,
292 uint32_t lapic_id,
91447636 293 processor_t *processor_out,
55e303ae
A
294 ipi_handler_t *ipi_handler,
295 boolean_t boot_cpu)
296{
55e303ae 297 int target_cpu;
91447636 298 cpu_data_t *this_cpu_datap;
55e303ae 299
91447636
A
300 this_cpu_datap = cpu_data_alloc(boot_cpu);
301 if (this_cpu_datap == NULL) {
55e303ae 302 return KERN_FAILURE;
91447636
A
303 }
304 target_cpu = this_cpu_datap->cpu_number;
55e303ae
A
305 assert((boot_cpu && (target_cpu == 0)) ||
306 (!boot_cpu && (target_cpu != 0)));
307
308 lapic_cpu_map(lapic_id, target_cpu);
91447636
A
309
310 this_cpu_datap->cpu_id = cpu_id;
311 this_cpu_datap->cpu_phys_number = lapic_id;
312
313 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
314 if (this_cpu_datap->cpu_console_buf == NULL)
315 goto failed;
316
c0fea474
A
317 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
318 if (this_cpu_datap->cpu_chud == NULL)
319 goto failed;
320
91447636 321 if (!boot_cpu) {
c0fea474
A
322 this_cpu_datap->cpu_core = cpu_thread_alloc(target_cpu);
323
91447636
A
324 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
325 if (this_cpu_datap->cpu_pmap == NULL)
326 goto failed;
327
328 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
329 if (this_cpu_datap->cpu_processor == NULL)
330 goto failed;
331 processor_init(this_cpu_datap->cpu_processor, target_cpu);
332 }
333
334 *processor_out = this_cpu_datap->cpu_processor;
55e303ae
A
335 *ipi_handler = NULL;
336
337 return KERN_SUCCESS;
91447636
A
338
339failed:
340 cpu_processor_free(this_cpu_datap->cpu_processor);
341 pmap_cpu_free(this_cpu_datap->cpu_pmap);
c0fea474 342 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
91447636
A
343 console_cpu_free(this_cpu_datap->cpu_console_buf);
344 return KERN_FAILURE;
1c79356b
A
345}
346
43866e37 347void
91447636 348ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 349{
55e303ae
A
350 boolean_t os_supports_sse;
351 i386_cpu_info_t *cpuid_infop;
352
91447636 353 if (cpu_infop == NULL)
55e303ae
A
354 return;
355
356 /*
c0fea474 357 * Are we supporting MMX/SSE/SSE2/SSE3?
55e303ae
A
358 * As distinct from whether the cpu has these capabilities.
359 */
360 os_supports_sse = get_cr4() & CR4_XMM;
c0fea474
A
361 if ((cpuid_features() & CPUID_FEATURE_MNI) && os_supports_sse)
362 cpu_infop->vector_unit = 6;
363 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
364 cpu_infop->vector_unit = 5;
365 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
91447636 366 cpu_infop->vector_unit = 4;
55e303ae 367 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
91447636 368 cpu_infop->vector_unit = 3;
55e303ae 369 else if (cpuid_features() & CPUID_FEATURE_MMX)
91447636 370 cpu_infop->vector_unit = 2;
55e303ae 371 else
91447636 372 cpu_infop->vector_unit = 0;
55e303ae
A
373
374 cpuid_infop = cpuid_info();
375
91447636 376 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 377
91447636
A
378 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
379 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
55e303ae 380
91447636
A
381 if (cpuid_infop->cache_size[L2U] > 0) {
382 cpu_infop->l2_settings = 1;
383 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
384 } else {
385 cpu_infop->l2_settings = 0;
386 cpu_infop->l2_cache_size = 0xFFFFFFFF;
387 }
55e303ae 388
91447636 389 if (cpuid_infop->cache_size[L3U] > 0) {
c0fea474
A
390 cpu_infop->l3_settings = 1;
391 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
91447636
A
392 } else {
393 cpu_infop->l3_settings = 0;
394 cpu_infop->l3_cache_size = 0xFFFFFFFF;
395 }
43866e37
A
396}
397
398void
399ml_init_max_cpus(unsigned long max_cpus)
400{
55e303ae
A
401 boolean_t current_state;
402
403 current_state = ml_set_interrupts_enabled(FALSE);
404 if (max_cpus_initialized != MAX_CPUS_SET) {
91447636
A
405 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
406 /*
407 * Note: max_cpus is the number of enable processors
408 * that ACPI found; max_ncpus is the maximum number
409 * that the kernel supports or that the "cpus="
410 * boot-arg has set. Here we take int minimum.
411 */
412 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
413 }
55e303ae
A
414 if (max_cpus_initialized == MAX_CPUS_WAIT)
415 wakeup((event_t)&max_cpus_initialized);
416 max_cpus_initialized = MAX_CPUS_SET;
417 }
418 (void) ml_set_interrupts_enabled(current_state);
43866e37
A
419}
420
421int
422ml_get_max_cpus(void)
423{
55e303ae 424 boolean_t current_state;
43866e37 425
55e303ae
A
426 current_state = ml_set_interrupts_enabled(FALSE);
427 if (max_cpus_initialized != MAX_CPUS_SET) {
428 max_cpus_initialized = MAX_CPUS_WAIT;
429 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
430 (void)thread_block(THREAD_CONTINUE_NULL);
431 }
432 (void) ml_set_interrupts_enabled(current_state);
433 return(machine_info.max_cpus);
43866e37
A
434}
435
c0fea474
A
436/*
437 * Routine: ml_init_lock_timeout
438 * Function:
439 */
440void
441ml_init_lock_timeout(void)
442{
443 uint64_t abstime;
444 uint32_t mtxspin;
445
446 /*
447 * XXX As currently implemented for x86, LockTimeOut should be a
448 * cycle (tsc) count not an absolute time (nanoseconds) -
449 * but it's of the right order.
450 */
451 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
452 LockTimeOut = (unsigned int)abstime;
453
454 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
455 if (mtxspin > USEC_PER_SEC>>4)
456 mtxspin = USEC_PER_SEC>>4;
457 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
458 } else {
459 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
460 }
461 MutexSpin = (unsigned int)abstime;
462}
463
91447636
A
464/*
465 * This is called from the machine-independent routine cpu_up()
466 * to perform machine-dependent info updates. Defer to cpu_thread_init().
467 */
468void
469ml_cpu_up(void)
470{
471 return;
472}
473
474/*
475 * This is called from the machine-independent routine cpu_down()
476 * to perform machine-dependent info updates.
477 */
478void
479ml_cpu_down(void)
480{
481 return;
482}
483
1c79356b
A
484/* Stubs for pc tracing mechanism */
485
486int *pc_trace_buf;
487int pc_trace_cnt = 0;
488
489int
91447636 490set_be_bit(void)
1c79356b
A
491{
492 return(0);
493}
494
495int
91447636 496clr_be_bit(void)
1c79356b
A
497{
498 return(0);
499}
500
501int
91447636 502be_tracing(void)
1c79356b
A
503{
504 return(0);
505}
9bccf70c 506
91447636
A
507/*
508 * The following are required for parts of the kernel
509 * that cannot resolve these functions as inlines:
510 */
511extern thread_t current_act(void);
512thread_t
9bccf70c 513current_act(void)
91447636
A
514{
515 return(current_thread_fast());
516}
55e303ae
A
517
518#undef current_thread
91447636 519extern thread_t current_thread(void);
55e303ae
A
520thread_t
521current_thread(void)
522{
91447636 523 return(current_thread_fast());
55e303ae 524}
c0fea474
A
525
526/*
527 * Set the worst-case time for the C4 to C2 transition.
528 * The maxdelay parameter is in nanoseconds.
529 */
530
531void
532ml_set_maxsnoop(uint32_t maxdelay)
533{
534 C4C2SnoopDelay = maxdelay; /* Set the transition time */
535 machine_nap_policy(); /* Adjust the current nap state */
536}
537
538
539/*
540 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
541 */
542
543unsigned
544ml_get_maxsnoop(void)
545{
546 return C4C2SnoopDelay; /* Set the transition time */
547}
548
549
550uint32_t
551ml_get_maxbusdelay(void)
552{
553 return maxBusDelay;
554}
555
556/*
557 * Set the maximum delay time allowed for snoop on the bus.
558 *
559 * Note that this value will be compared to the amount of time that it takes
560 * to transition from a non-snooping power state (C4) to a snooping state (C2).
561 * If maxBusDelay is less than C4C2SnoopDelay,
562 * we will not enter the lowest power state.
563 */
564
565void
566ml_set_maxbusdelay(uint32_t mdelay)
567{
568 maxBusDelay = mdelay; /* Set the delay */
569 machine_nap_policy(); /* Adjust the current nap state */
570}
571
572
573boolean_t ml_is64bit(void) {
574
575 return (cpu_mode_is64bit());
576}
577
578
579boolean_t ml_thread_is64bit(thread_t thread) {
580
581 return (thread_is_64bit(thread));
582}
583
584
585boolean_t ml_state_is64bit(void *saved_state) {
586
587 return is_saved_state64(saved_state);
588}
589
590void ml_cpu_set_ldt(int selector)
591{
592 /*
593 * Avoid loading the LDT
594 * if we're setting the KERNEL LDT and it's already set.
595 */
596 if (selector == KERNEL_LDT &&
597 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
598 return;
599
600 /*
601 * If 64bit this requires a mode switch (and back).
602 */
603 if (cpu_mode_is64bit())
604 ml_64bit_lldt(selector);
605 else
606 lldt(selector);
607 current_cpu_datap()->cpu_ldt = selector;
608}
609
610void ml_fp_setvalid(boolean_t value)
611{
612 fp_setvalid(value);
613}
614
615#if MACH_KDB
616
617/*
618 * Display the global msrs
619 * *
620 * ms
621 */
622void
623db_msr(__unused db_expr_t addr,
624 __unused int have_addr,
625 __unused db_expr_t count,
626 __unused char *modif)
627{
628
629 uint32_t i, msrlow, msrhigh;
630
631 /* Try all of the first 4096 msrs */
632 for (i = 0; i < 4096; i++) {
633 if (!rdmsr_carefully(i, &msrlow, &msrhigh)) {
634 db_printf("%08X - %08X.%08X\n", i, msrhigh, msrlow);
635 }
636 }
637
638 /* Try all of the 4096 msrs at 0x0C000000 */
639 for (i = 0; i < 4096; i++) {
640 if (!rdmsr_carefully(0x0C000000 | i, &msrlow, &msrhigh)) {
641 db_printf("%08X - %08X.%08X\n",
642 0x0C000000 | i, msrhigh, msrlow);
643 }
644 }
645
646 /* Try all of the 4096 msrs at 0xC0000000 */
647 for (i = 0; i < 4096; i++) {
648 if (!rdmsr_carefully(0xC0000000 | i, &msrlow, &msrhigh)) {
649 db_printf("%08X - %08X.%08X\n",
650 0xC0000000 | i, msrhigh, msrlow);
651 }
652 }
653}
654
655#endif