]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b 1/*
89b3af67 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#include <i386/machine_routines.h>
29#include <i386/io_map_entries.h>
55e303ae
A
30#include <i386/cpuid.h>
31#include <i386/fpu.h>
32#include <kern/processor.h>
91447636 33#include <kern/machine.h>
1c79356b 34#include <kern/cpu_data.h>
91447636
A
35#include <kern/cpu_number.h>
36#include <kern/thread.h>
37#include <i386/cpu_data.h>
55e303ae
A
38#include <i386/machine_cpu.h>
39#include <i386/mp.h>
40#include <i386/mp_events.h>
91447636
A
41#include <i386/cpu_threads.h>
42#include <i386/pmap.h>
43#include <i386/misc_protos.h>
89b3af67
A
44#include <i386/pmCPU.h>
45#include <i386/proc_reg.h>
91447636 46#include <mach/vm_param.h>
89b3af67
A
47#if MACH_KDB
48#include <i386/db_machdep.h>
49#include <ddb/db_aout.h>
50#include <ddb/db_access.h>
51#include <ddb/db_sym.h>
52#include <ddb/db_variables.h>
53#include <ddb/db_command.h>
54#include <ddb/db_output.h>
55#include <ddb/db_expr.h>
56#endif
91447636
A
57
58#define MIN(a,b) ((a)<(b)? (a) : (b))
59
89b3af67
A
60#if DEBUG
61#define DBG(x...) kprintf("DBG: " x)
62#else
63#define DBG(x...)
64#endif
65
91447636 66extern void initialize_screen(Boot_Video *, unsigned int);
89b3af67 67extern thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor);
91447636 68extern void wakeup(void *);
89b3af67 69extern unsigned KernelRelocOffset;
55e303ae
A
70
71static int max_cpus_initialized = 0;
72
89b3af67
A
73unsigned int LockTimeOut = 12500000;
74unsigned int MutexSpin = 0;
75
55e303ae
A
76#define MAX_CPUS_SET 0x1
77#define MAX_CPUS_WAIT 0x2
1c79356b
A
78
79/* IO memory map services */
80
81/* Map memory map IO space */
82vm_offset_t ml_io_map(
83 vm_offset_t phys_addr,
84 vm_size_t size)
85{
89b3af67 86 return(io_map(phys_addr,size,VM_WIMG_IO));
1c79356b
A
87}
88
89/* boot memory allocation */
90vm_offset_t ml_static_malloc(
91447636 91 __unused vm_size_t size)
1c79356b
A
92{
93 return((vm_offset_t)NULL);
94}
95
89b3af67
A
96
97void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
98{
99 *phys_addr = bounce_pool_base;
100 *size = bounce_pool_size;
101}
102
103
104vm_offset_t
105ml_boot_ptovirt(
106 vm_offset_t paddr)
107{
108 return (vm_offset_t)((paddr-KernelRelocOffset) | LINEAR_KERNEL_ADDRESS);
109}
110
1c79356b
A
111vm_offset_t
112ml_static_ptovirt(
113 vm_offset_t paddr)
114{
91447636 115 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
1c79356b
A
116}
117
91447636
A
118
119/*
120 * Routine: ml_static_mfree
121 * Function:
122 */
1c79356b
A
123void
124ml_static_mfree(
91447636
A
125 vm_offset_t vaddr,
126 vm_size_t size)
1c79356b 127{
91447636
A
128 vm_offset_t vaddr_cur;
129 ppnum_t ppn;
130
89b3af67 131// if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
91447636
A
132
133 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
134
135 for (vaddr_cur = vaddr;
136 vaddr_cur < round_page_32(vaddr+size);
137 vaddr_cur += PAGE_SIZE) {
138 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
139 if (ppn != (vm_offset_t)NULL) {
140 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
141 vm_page_create(ppn,(ppn+1));
142 vm_page_wire_count--;
143 }
144 }
1c79356b
A
145}
146
89b3af67 147
1c79356b
A
148/* virtual to physical on wired pages */
149vm_offset_t ml_vtophys(
150 vm_offset_t vaddr)
151{
152 return kvtophys(vaddr);
153}
154
155/* Interrupt handling */
156
55e303ae
A
157/* Initialize Interrupts */
158void ml_init_interrupt(void)
159{
160 (void) ml_set_interrupts_enabled(TRUE);
161}
162
1c79356b
A
163/* Get Interrupts Enabled */
164boolean_t ml_get_interrupts_enabled(void)
165{
166 unsigned long flags;
167
168 __asm__ volatile("pushf; popl %0" : "=r" (flags));
169 return (flags & EFL_IF) != 0;
170}
171
172/* Set Interrupts Enabled */
173boolean_t ml_set_interrupts_enabled(boolean_t enable)
174{
175 unsigned long flags;
176
177 __asm__ volatile("pushf; popl %0" : "=r" (flags));
178
89b3af67
A
179 if (enable) {
180 ast_t *myast;
181
182 myast = ast_pending();
183
184 if ( (get_preemption_level() == 0) && (*myast & AST_URGENT) ) {
1c79356b 185 __asm__ volatile("sti");
89b3af67
A
186 __asm__ volatile ("int $0xff");
187 } else {
188 __asm__ volatile ("sti");
189 }
190 }
191 else {
1c79356b 192 __asm__ volatile("cli");
89b3af67 193 }
1c79356b
A
194
195 return (flags & EFL_IF) != 0;
196}
197
198/* Check if running at interrupt context */
199boolean_t ml_at_interrupt_context(void)
200{
201 return get_interrupt_level() != 0;
202}
203
204/* Generate a fake interrupt */
205void ml_cause_interrupt(void)
206{
207 panic("ml_cause_interrupt not defined yet on Intel");
208}
209
d52fe63f
A
210void ml_thread_policy(
211 thread_t thread,
212 unsigned policy_id,
213 unsigned policy_info)
214{
55e303ae
A
215 if (policy_id == MACHINE_GROUP)
216 thread_bind(thread, master_processor);
217
218 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
219 spl_t s = splsched();
220
221 thread_lock(thread);
222
223 set_priority(thread, thread->priority + 1);
224
225 thread_unlock(thread);
226 splx(s);
227 }
d52fe63f
A
228}
229
1c79356b
A
230/* Initialize Interrupts */
231void ml_install_interrupt_handler(
232 void *nub,
233 int source,
234 void *target,
235 IOInterruptHandler handler,
236 void *refCon)
237{
238 boolean_t current_state;
239
240 current_state = ml_get_interrupts_enabled();
241
242 PE_install_interrupt_handler(nub, source, target,
243 (IOInterruptHandler) handler, refCon);
244
245 (void) ml_set_interrupts_enabled(current_state);
55e303ae
A
246
247 initialize_screen(0, kPEAcquireScreen);
248}
249
91447636 250
55e303ae
A
251void
252machine_idle(void)
253{
91447636
A
254 cpu_core_t *my_core = cpu_core();
255 int others_active;
256
257 /*
258 * We halt this cpu thread
259 * unless kernel param idlehalt is false and no other thread
260 * in the same core is active - if so, don't halt so that this
261 * core doesn't go into a low-power mode.
89b3af67 262 * For 4/4, we set a null "active cr3" while idle.
91447636
A
263 */
264 others_active = !atomic_decl_and_test(
265 (long *) &my_core->active_threads, 1);
266 if (idlehalt || others_active) {
267 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
89b3af67
A
268 MARK_CPU_IDLE(cpu_number());
269 machine_idle_cstate();
270 MARK_CPU_ACTIVE(cpu_number());
91447636
A
271 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
272 } else {
273 __asm__ volatile("sti");
274 }
275 atomic_incl((long *) &my_core->active_threads, 1);
1c79356b
A
276}
277
278void
279machine_signal_idle(
280 processor_t processor)
281{
91447636 282 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
55e303ae
A
283}
284
89b3af67
A
285thread_t
286machine_processor_shutdown(
287 thread_t thread,
288 void (*doshutdown)(processor_t),
289 processor_t processor)
290{
291 fpu_save_context(thread);
292 return(Shutdown_context(thread, doshutdown, processor));
293}
294
55e303ae
A
295kern_return_t
296ml_processor_register(
297 cpu_id_t cpu_id,
298 uint32_t lapic_id,
91447636 299 processor_t *processor_out,
55e303ae
A
300 ipi_handler_t *ipi_handler,
301 boolean_t boot_cpu)
302{
55e303ae 303 int target_cpu;
91447636 304 cpu_data_t *this_cpu_datap;
55e303ae 305
91447636
A
306 this_cpu_datap = cpu_data_alloc(boot_cpu);
307 if (this_cpu_datap == NULL) {
55e303ae 308 return KERN_FAILURE;
91447636
A
309 }
310 target_cpu = this_cpu_datap->cpu_number;
55e303ae
A
311 assert((boot_cpu && (target_cpu == 0)) ||
312 (!boot_cpu && (target_cpu != 0)));
313
314 lapic_cpu_map(lapic_id, target_cpu);
91447636
A
315
316 this_cpu_datap->cpu_id = cpu_id;
317 this_cpu_datap->cpu_phys_number = lapic_id;
318
319 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
320 if (this_cpu_datap->cpu_console_buf == NULL)
321 goto failed;
322
89b3af67
A
323 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
324 if (this_cpu_datap->cpu_chud == NULL)
325 goto failed;
326
91447636 327 if (!boot_cpu) {
89b3af67
A
328 this_cpu_datap->cpu_core = cpu_thread_alloc(target_cpu);
329
91447636
A
330 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
331 if (this_cpu_datap->cpu_pmap == NULL)
332 goto failed;
333
334 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
335 if (this_cpu_datap->cpu_processor == NULL)
336 goto failed;
337 processor_init(this_cpu_datap->cpu_processor, target_cpu);
338 }
339
340 *processor_out = this_cpu_datap->cpu_processor;
55e303ae
A
341 *ipi_handler = NULL;
342
343 return KERN_SUCCESS;
91447636
A
344
345failed:
346 cpu_processor_free(this_cpu_datap->cpu_processor);
347 pmap_cpu_free(this_cpu_datap->cpu_pmap);
89b3af67 348 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
91447636
A
349 console_cpu_free(this_cpu_datap->cpu_console_buf);
350 return KERN_FAILURE;
1c79356b
A
351}
352
43866e37 353void
91447636 354ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 355{
55e303ae
A
356 boolean_t os_supports_sse;
357 i386_cpu_info_t *cpuid_infop;
358
91447636 359 if (cpu_infop == NULL)
55e303ae
A
360 return;
361
362 /*
89b3af67 363 * Are we supporting MMX/SSE/SSE2/SSE3?
55e303ae
A
364 * As distinct from whether the cpu has these capabilities.
365 */
366 os_supports_sse = get_cr4() & CR4_XMM;
89b3af67
A
367 if ((cpuid_features() & CPUID_FEATURE_MNI) && os_supports_sse)
368 cpu_infop->vector_unit = 6;
369 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
370 cpu_infop->vector_unit = 5;
371 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
91447636 372 cpu_infop->vector_unit = 4;
55e303ae 373 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
91447636 374 cpu_infop->vector_unit = 3;
55e303ae 375 else if (cpuid_features() & CPUID_FEATURE_MMX)
91447636 376 cpu_infop->vector_unit = 2;
55e303ae 377 else
91447636 378 cpu_infop->vector_unit = 0;
55e303ae
A
379
380 cpuid_infop = cpuid_info();
381
91447636 382 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 383
91447636
A
384 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
385 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
55e303ae 386
91447636
A
387 if (cpuid_infop->cache_size[L2U] > 0) {
388 cpu_infop->l2_settings = 1;
389 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
390 } else {
391 cpu_infop->l2_settings = 0;
392 cpu_infop->l2_cache_size = 0xFFFFFFFF;
393 }
55e303ae 394
91447636 395 if (cpuid_infop->cache_size[L3U] > 0) {
89b3af67
A
396 cpu_infop->l3_settings = 1;
397 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
91447636
A
398 } else {
399 cpu_infop->l3_settings = 0;
400 cpu_infop->l3_cache_size = 0xFFFFFFFF;
401 }
43866e37
A
402}
403
404void
405ml_init_max_cpus(unsigned long max_cpus)
406{
55e303ae
A
407 boolean_t current_state;
408
409 current_state = ml_set_interrupts_enabled(FALSE);
410 if (max_cpus_initialized != MAX_CPUS_SET) {
91447636
A
411 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
412 /*
413 * Note: max_cpus is the number of enable processors
414 * that ACPI found; max_ncpus is the maximum number
415 * that the kernel supports or that the "cpus="
416 * boot-arg has set. Here we take int minimum.
417 */
418 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
419 }
55e303ae
A
420 if (max_cpus_initialized == MAX_CPUS_WAIT)
421 wakeup((event_t)&max_cpus_initialized);
422 max_cpus_initialized = MAX_CPUS_SET;
423 }
424 (void) ml_set_interrupts_enabled(current_state);
43866e37
A
425}
426
427int
428ml_get_max_cpus(void)
429{
55e303ae 430 boolean_t current_state;
43866e37 431
55e303ae
A
432 current_state = ml_set_interrupts_enabled(FALSE);
433 if (max_cpus_initialized != MAX_CPUS_SET) {
434 max_cpus_initialized = MAX_CPUS_WAIT;
435 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
436 (void)thread_block(THREAD_CONTINUE_NULL);
437 }
438 (void) ml_set_interrupts_enabled(current_state);
439 return(machine_info.max_cpus);
43866e37
A
440}
441
89b3af67
A
442/*
443 * Routine: ml_init_lock_timeout
444 * Function:
445 */
446void
447ml_init_lock_timeout(void)
448{
449 uint64_t abstime;
450 uint32_t mtxspin;
451
452 /*
453 * XXX As currently implemented for x86, LockTimeOut should be a
454 * cycle (tsc) count not an absolute time (nanoseconds) -
455 * but it's of the right order.
456 */
457 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
458 LockTimeOut = (unsigned int)abstime;
459
460 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
461 if (mtxspin > USEC_PER_SEC>>4)
462 mtxspin = USEC_PER_SEC>>4;
463 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
464 } else {
465 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
466 }
467 MutexSpin = (unsigned int)abstime;
468}
469
91447636
A
470/*
471 * This is called from the machine-independent routine cpu_up()
472 * to perform machine-dependent info updates. Defer to cpu_thread_init().
473 */
474void
475ml_cpu_up(void)
476{
477 return;
478}
479
480/*
481 * This is called from the machine-independent routine cpu_down()
482 * to perform machine-dependent info updates.
483 */
484void
485ml_cpu_down(void)
486{
487 return;
488}
489
1c79356b
A
490/* Stubs for pc tracing mechanism */
491
492int *pc_trace_buf;
493int pc_trace_cnt = 0;
494
495int
91447636 496set_be_bit(void)
1c79356b
A
497{
498 return(0);
499}
500
501int
91447636 502clr_be_bit(void)
1c79356b
A
503{
504 return(0);
505}
506
507int
91447636 508be_tracing(void)
1c79356b
A
509{
510 return(0);
511}
9bccf70c 512
91447636
A
513/*
514 * The following are required for parts of the kernel
515 * that cannot resolve these functions as inlines:
516 */
517extern thread_t current_act(void);
518thread_t
9bccf70c 519current_act(void)
91447636
A
520{
521 return(current_thread_fast());
522}
55e303ae
A
523
524#undef current_thread
91447636 525extern thread_t current_thread(void);
55e303ae
A
526thread_t
527current_thread(void)
528{
91447636 529 return(current_thread_fast());
55e303ae 530}
89b3af67
A
531
532/*
533 * Set the worst-case time for the C4 to C2 transition.
534 * The maxdelay parameter is in nanoseconds.
535 */
536
537void
538ml_set_maxsnoop(uint32_t maxdelay)
539{
540 C4C2SnoopDelay = maxdelay; /* Set the transition time */
541 machine_nap_policy(); /* Adjust the current nap state */
542}
543
544
545/*
546 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
547 */
548
549unsigned
550ml_get_maxsnoop(void)
551{
552 return C4C2SnoopDelay; /* Set the transition time */
553}
554
555
556uint32_t
557ml_get_maxbusdelay(void)
558{
559 return maxBusDelay;
560}
561
562/*
563 * Set the maximum delay time allowed for snoop on the bus.
564 *
565 * Note that this value will be compared to the amount of time that it takes
566 * to transition from a non-snooping power state (C4) to a snooping state (C2).
567 * If maxBusDelay is less than C4C2SnoopDelay,
568 * we will not enter the lowest power state.
569 */
570
571void
572ml_set_maxbusdelay(uint32_t mdelay)
573{
574 maxBusDelay = mdelay; /* Set the delay */
575 machine_nap_policy(); /* Adjust the current nap state */
576}
577
578
579boolean_t ml_is64bit(void) {
580
581 return (cpu_mode_is64bit());
582}
583
584
585boolean_t ml_thread_is64bit(thread_t thread) {
586
587 return (thread_is_64bit(thread));
588}
589
590
591boolean_t ml_state_is64bit(void *saved_state) {
592
593 return is_saved_state64(saved_state);
594}
595
596void ml_cpu_set_ldt(int selector)
597{
598 /*
599 * Avoid loading the LDT
600 * if we're setting the KERNEL LDT and it's already set.
601 */
602 if (selector == KERNEL_LDT &&
603 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
604 return;
605
606 /*
607 * If 64bit this requires a mode switch (and back).
608 */
609 if (cpu_mode_is64bit())
610 ml_64bit_lldt(selector);
611 else
612 lldt(selector);
613 current_cpu_datap()->cpu_ldt = selector;
614}
615
616void ml_fp_setvalid(boolean_t value)
617{
618 fp_setvalid(value);
619}
620
621#if MACH_KDB
622
623/*
624 * Display the global msrs
625 * *
626 * ms
627 */
628void
629db_msr(__unused db_expr_t addr,
630 __unused int have_addr,
631 __unused db_expr_t count,
632 __unused char *modif)
633{
634
635 uint32_t i, msrlow, msrhigh;
636
637 /* Try all of the first 4096 msrs */
638 for (i = 0; i < 4096; i++) {
639 if (!rdmsr_carefully(i, &msrlow, &msrhigh)) {
640 db_printf("%08X - %08X.%08X\n", i, msrhigh, msrlow);
641 }
642 }
643
644 /* Try all of the 4096 msrs at 0x0C000000 */
645 for (i = 0; i < 4096; i++) {
646 if (!rdmsr_carefully(0x0C000000 | i, &msrlow, &msrhigh)) {
647 db_printf("%08X - %08X.%08X\n",
648 0x0C000000 | i, msrhigh, msrlow);
649 }
650 }
651
652 /* Try all of the 4096 msrs at 0xC0000000 */
653 for (i = 0; i < 4096; i++) {
654 if (!rdmsr_carefully(0xC0000000 | i, &msrlow, &msrhigh)) {
655 db_printf("%08X - %08X.%08X\n",
656 0xC0000000 | i, msrhigh, msrlow);
657 }
658 }
659}
660
661#endif