]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_routines.c
de4200e6e60397070cf870ee577ce28d72f83cf9
[apple/xnu.git] / osfmk / i386 / machine_routines.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <i386/machine_routines.h>
31 #include <i386/io_map_entries.h>
32 #include <i386/cpuid.h>
33 #include <i386/fpu.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <i386/cpu_data.h>
40 #include <i386/machine_cpu.h>
41 #include <i386/mp.h>
42 #include <i386/mp_events.h>
43 #include <i386/cpu_threads.h>
44 #include <i386/pmap.h>
45 #include <i386/misc_protos.h>
46 #include <i386/pmCPU.h>
47 #include <i386/proc_reg.h>
48 #include <mach/vm_param.h>
49 #if MACH_KDB
50 #include <i386/db_machdep.h>
51 #include <ddb/db_aout.h>
52 #include <ddb/db_access.h>
53 #include <ddb/db_sym.h>
54 #include <ddb/db_variables.h>
55 #include <ddb/db_command.h>
56 #include <ddb/db_output.h>
57 #include <ddb/db_expr.h>
58 #endif
59
60 #define MIN(a,b) ((a)<(b)? (a) : (b))
61
62 #if DEBUG
63 #define DBG(x...) kprintf("DBG: " x)
64 #else
65 #define DBG(x...)
66 #endif
67
68 extern void initialize_screen(Boot_Video *, unsigned int);
69 extern thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor);
70 extern void wakeup(void *);
71 extern unsigned KernelRelocOffset;
72
73 static int max_cpus_initialized = 0;
74
75 unsigned int LockTimeOut = 12500000;
76 unsigned int MutexSpin = 0;
77
78 #define MAX_CPUS_SET 0x1
79 #define MAX_CPUS_WAIT 0x2
80
81 /* IO memory map services */
82
83 /* Map memory map IO space */
84 vm_offset_t ml_io_map(
85 vm_offset_t phys_addr,
86 vm_size_t size)
87 {
88 return(io_map(phys_addr,size,VM_WIMG_IO));
89 }
90
91 /* boot memory allocation */
92 vm_offset_t ml_static_malloc(
93 __unused vm_size_t size)
94 {
95 return((vm_offset_t)NULL);
96 }
97
98
99 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
100 {
101 *phys_addr = bounce_pool_base;
102 *size = bounce_pool_size;
103 }
104
105
106 vm_offset_t
107 ml_boot_ptovirt(
108 vm_offset_t paddr)
109 {
110 return (vm_offset_t)((paddr-KernelRelocOffset) | LINEAR_KERNEL_ADDRESS);
111 }
112
113 vm_offset_t
114 ml_static_ptovirt(
115 vm_offset_t paddr)
116 {
117 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
118 }
119
120
121 /*
122 * Routine: ml_static_mfree
123 * Function:
124 */
125 void
126 ml_static_mfree(
127 vm_offset_t vaddr,
128 vm_size_t size)
129 {
130 vm_offset_t vaddr_cur;
131 ppnum_t ppn;
132
133 // if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
134
135 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
136
137 for (vaddr_cur = vaddr;
138 vaddr_cur < round_page_32(vaddr+size);
139 vaddr_cur += PAGE_SIZE) {
140 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
141 if (ppn != (vm_offset_t)NULL) {
142 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
143 vm_page_create(ppn,(ppn+1));
144 vm_page_wire_count--;
145 }
146 }
147 }
148
149
150 /* virtual to physical on wired pages */
151 vm_offset_t ml_vtophys(
152 vm_offset_t vaddr)
153 {
154 return kvtophys(vaddr);
155 }
156
157 /* Interrupt handling */
158
159 /* Initialize Interrupts */
160 void ml_init_interrupt(void)
161 {
162 (void) ml_set_interrupts_enabled(TRUE);
163 }
164
165 /* Get Interrupts Enabled */
166 boolean_t ml_get_interrupts_enabled(void)
167 {
168 unsigned long flags;
169
170 __asm__ volatile("pushf; popl %0" : "=r" (flags));
171 return (flags & EFL_IF) != 0;
172 }
173
174 /* Set Interrupts Enabled */
175 boolean_t ml_set_interrupts_enabled(boolean_t enable)
176 {
177 unsigned long flags;
178
179 __asm__ volatile("pushf; popl %0" : "=r" (flags));
180
181 if (enable) {
182 ast_t *myast;
183
184 myast = ast_pending();
185
186 if ( (get_preemption_level() == 0) && (*myast & AST_URGENT) ) {
187 __asm__ volatile("sti");
188 __asm__ volatile ("int $0xff");
189 } else {
190 __asm__ volatile ("sti");
191 }
192 }
193 else {
194 __asm__ volatile("cli");
195 }
196
197 return (flags & EFL_IF) != 0;
198 }
199
200 /* Check if running at interrupt context */
201 boolean_t ml_at_interrupt_context(void)
202 {
203 return get_interrupt_level() != 0;
204 }
205
206 /* Generate a fake interrupt */
207 void ml_cause_interrupt(void)
208 {
209 panic("ml_cause_interrupt not defined yet on Intel");
210 }
211
212 void ml_thread_policy(
213 thread_t thread,
214 unsigned policy_id,
215 unsigned policy_info)
216 {
217 if (policy_id == MACHINE_GROUP)
218 thread_bind(thread, master_processor);
219
220 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
221 spl_t s = splsched();
222
223 thread_lock(thread);
224
225 set_priority(thread, thread->priority + 1);
226
227 thread_unlock(thread);
228 splx(s);
229 }
230 }
231
232 /* Initialize Interrupts */
233 void ml_install_interrupt_handler(
234 void *nub,
235 int source,
236 void *target,
237 IOInterruptHandler handler,
238 void *refCon)
239 {
240 boolean_t current_state;
241
242 current_state = ml_get_interrupts_enabled();
243
244 PE_install_interrupt_handler(nub, source, target,
245 (IOInterruptHandler) handler, refCon);
246
247 (void) ml_set_interrupts_enabled(current_state);
248
249 initialize_screen(0, kPEAcquireScreen);
250 }
251
252
253 void
254 machine_idle(void)
255 {
256 cpu_core_t *my_core = cpu_core();
257 int others_active;
258
259 /*
260 * We halt this cpu thread
261 * unless kernel param idlehalt is false and no other thread
262 * in the same core is active - if so, don't halt so that this
263 * core doesn't go into a low-power mode.
264 * For 4/4, we set a null "active cr3" while idle.
265 */
266 others_active = !atomic_decl_and_test(
267 (long *) &my_core->active_threads, 1);
268 if (idlehalt || others_active) {
269 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
270 MARK_CPU_IDLE(cpu_number());
271 machine_idle_cstate();
272 MARK_CPU_ACTIVE(cpu_number());
273 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
274 } else {
275 __asm__ volatile("sti");
276 }
277 atomic_incl((long *) &my_core->active_threads, 1);
278 }
279
280 void
281 machine_signal_idle(
282 processor_t processor)
283 {
284 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
285 }
286
287 thread_t
288 machine_processor_shutdown(
289 thread_t thread,
290 void (*doshutdown)(processor_t),
291 processor_t processor)
292 {
293 fpu_save_context(thread);
294 return(Shutdown_context(thread, doshutdown, processor));
295 }
296
297 kern_return_t
298 ml_processor_register(
299 cpu_id_t cpu_id,
300 uint32_t lapic_id,
301 processor_t *processor_out,
302 ipi_handler_t *ipi_handler,
303 boolean_t boot_cpu)
304 {
305 int target_cpu;
306 cpu_data_t *this_cpu_datap;
307
308 this_cpu_datap = cpu_data_alloc(boot_cpu);
309 if (this_cpu_datap == NULL) {
310 return KERN_FAILURE;
311 }
312 target_cpu = this_cpu_datap->cpu_number;
313 assert((boot_cpu && (target_cpu == 0)) ||
314 (!boot_cpu && (target_cpu != 0)));
315
316 lapic_cpu_map(lapic_id, target_cpu);
317
318 this_cpu_datap->cpu_id = cpu_id;
319 this_cpu_datap->cpu_phys_number = lapic_id;
320
321 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
322 if (this_cpu_datap->cpu_console_buf == NULL)
323 goto failed;
324
325 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
326 if (this_cpu_datap->cpu_chud == NULL)
327 goto failed;
328
329 if (!boot_cpu) {
330 this_cpu_datap->cpu_core = cpu_thread_alloc(target_cpu);
331
332 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
333 if (this_cpu_datap->cpu_pmap == NULL)
334 goto failed;
335
336 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
337 if (this_cpu_datap->cpu_processor == NULL)
338 goto failed;
339 processor_init(this_cpu_datap->cpu_processor, target_cpu);
340 }
341
342 *processor_out = this_cpu_datap->cpu_processor;
343 *ipi_handler = NULL;
344
345 return KERN_SUCCESS;
346
347 failed:
348 cpu_processor_free(this_cpu_datap->cpu_processor);
349 pmap_cpu_free(this_cpu_datap->cpu_pmap);
350 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
351 console_cpu_free(this_cpu_datap->cpu_console_buf);
352 return KERN_FAILURE;
353 }
354
355 void
356 ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
357 {
358 boolean_t os_supports_sse;
359 i386_cpu_info_t *cpuid_infop;
360
361 if (cpu_infop == NULL)
362 return;
363
364 /*
365 * Are we supporting MMX/SSE/SSE2/SSE3?
366 * As distinct from whether the cpu has these capabilities.
367 */
368 os_supports_sse = get_cr4() & CR4_XMM;
369 if ((cpuid_features() & CPUID_FEATURE_MNI) && os_supports_sse)
370 cpu_infop->vector_unit = 6;
371 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
372 cpu_infop->vector_unit = 5;
373 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
374 cpu_infop->vector_unit = 4;
375 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
376 cpu_infop->vector_unit = 3;
377 else if (cpuid_features() & CPUID_FEATURE_MMX)
378 cpu_infop->vector_unit = 2;
379 else
380 cpu_infop->vector_unit = 0;
381
382 cpuid_infop = cpuid_info();
383
384 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
385
386 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
387 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
388
389 if (cpuid_infop->cache_size[L2U] > 0) {
390 cpu_infop->l2_settings = 1;
391 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
392 } else {
393 cpu_infop->l2_settings = 0;
394 cpu_infop->l2_cache_size = 0xFFFFFFFF;
395 }
396
397 if (cpuid_infop->cache_size[L3U] > 0) {
398 cpu_infop->l3_settings = 1;
399 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
400 } else {
401 cpu_infop->l3_settings = 0;
402 cpu_infop->l3_cache_size = 0xFFFFFFFF;
403 }
404 }
405
406 void
407 ml_init_max_cpus(unsigned long max_cpus)
408 {
409 boolean_t current_state;
410
411 current_state = ml_set_interrupts_enabled(FALSE);
412 if (max_cpus_initialized != MAX_CPUS_SET) {
413 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
414 /*
415 * Note: max_cpus is the number of enable processors
416 * that ACPI found; max_ncpus is the maximum number
417 * that the kernel supports or that the "cpus="
418 * boot-arg has set. Here we take int minimum.
419 */
420 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
421 }
422 if (max_cpus_initialized == MAX_CPUS_WAIT)
423 wakeup((event_t)&max_cpus_initialized);
424 max_cpus_initialized = MAX_CPUS_SET;
425 }
426 (void) ml_set_interrupts_enabled(current_state);
427 }
428
429 int
430 ml_get_max_cpus(void)
431 {
432 boolean_t current_state;
433
434 current_state = ml_set_interrupts_enabled(FALSE);
435 if (max_cpus_initialized != MAX_CPUS_SET) {
436 max_cpus_initialized = MAX_CPUS_WAIT;
437 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
438 (void)thread_block(THREAD_CONTINUE_NULL);
439 }
440 (void) ml_set_interrupts_enabled(current_state);
441 return(machine_info.max_cpus);
442 }
443
444 /*
445 * Routine: ml_init_lock_timeout
446 * Function:
447 */
448 void
449 ml_init_lock_timeout(void)
450 {
451 uint64_t abstime;
452 uint32_t mtxspin;
453
454 /*
455 * XXX As currently implemented for x86, LockTimeOut should be a
456 * cycle (tsc) count not an absolute time (nanoseconds) -
457 * but it's of the right order.
458 */
459 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
460 LockTimeOut = (unsigned int)abstime;
461
462 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
463 if (mtxspin > USEC_PER_SEC>>4)
464 mtxspin = USEC_PER_SEC>>4;
465 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
466 } else {
467 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
468 }
469 MutexSpin = (unsigned int)abstime;
470 }
471
472 /*
473 * This is called from the machine-independent routine cpu_up()
474 * to perform machine-dependent info updates. Defer to cpu_thread_init().
475 */
476 void
477 ml_cpu_up(void)
478 {
479 return;
480 }
481
482 /*
483 * This is called from the machine-independent routine cpu_down()
484 * to perform machine-dependent info updates.
485 */
486 void
487 ml_cpu_down(void)
488 {
489 return;
490 }
491
492 /* Stubs for pc tracing mechanism */
493
494 int *pc_trace_buf;
495 int pc_trace_cnt = 0;
496
497 int
498 set_be_bit(void)
499 {
500 return(0);
501 }
502
503 int
504 clr_be_bit(void)
505 {
506 return(0);
507 }
508
509 int
510 be_tracing(void)
511 {
512 return(0);
513 }
514
515 /*
516 * The following are required for parts of the kernel
517 * that cannot resolve these functions as inlines:
518 */
519 extern thread_t current_act(void);
520 thread_t
521 current_act(void)
522 {
523 return(current_thread_fast());
524 }
525
526 #undef current_thread
527 extern thread_t current_thread(void);
528 thread_t
529 current_thread(void)
530 {
531 return(current_thread_fast());
532 }
533
534 /*
535 * Set the worst-case time for the C4 to C2 transition.
536 * The maxdelay parameter is in nanoseconds.
537 */
538
539 void
540 ml_set_maxsnoop(uint32_t maxdelay)
541 {
542 C4C2SnoopDelay = maxdelay; /* Set the transition time */
543 machine_nap_policy(); /* Adjust the current nap state */
544 }
545
546
547 /*
548 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
549 */
550
551 unsigned
552 ml_get_maxsnoop(void)
553 {
554 return C4C2SnoopDelay; /* Set the transition time */
555 }
556
557
558 uint32_t
559 ml_get_maxbusdelay(void)
560 {
561 return maxBusDelay;
562 }
563
564 /*
565 * Set the maximum delay time allowed for snoop on the bus.
566 *
567 * Note that this value will be compared to the amount of time that it takes
568 * to transition from a non-snooping power state (C4) to a snooping state (C2).
569 * If maxBusDelay is less than C4C2SnoopDelay,
570 * we will not enter the lowest power state.
571 */
572
573 void
574 ml_set_maxbusdelay(uint32_t mdelay)
575 {
576 maxBusDelay = mdelay; /* Set the delay */
577 machine_nap_policy(); /* Adjust the current nap state */
578 }
579
580
581 boolean_t ml_is64bit(void) {
582
583 return (cpu_mode_is64bit());
584 }
585
586
587 boolean_t ml_thread_is64bit(thread_t thread) {
588
589 return (thread_is_64bit(thread));
590 }
591
592
593 boolean_t ml_state_is64bit(void *saved_state) {
594
595 return is_saved_state64(saved_state);
596 }
597
598 void ml_cpu_set_ldt(int selector)
599 {
600 /*
601 * Avoid loading the LDT
602 * if we're setting the KERNEL LDT and it's already set.
603 */
604 if (selector == KERNEL_LDT &&
605 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
606 return;
607
608 /*
609 * If 64bit this requires a mode switch (and back).
610 */
611 if (cpu_mode_is64bit())
612 ml_64bit_lldt(selector);
613 else
614 lldt(selector);
615 current_cpu_datap()->cpu_ldt = selector;
616 }
617
618 void ml_fp_setvalid(boolean_t value)
619 {
620 fp_setvalid(value);
621 }
622
623 #if MACH_KDB
624
625 /*
626 * Display the global msrs
627 * *
628 * ms
629 */
630 void
631 db_msr(__unused db_expr_t addr,
632 __unused int have_addr,
633 __unused db_expr_t count,
634 __unused char *modif)
635 {
636
637 uint32_t i, msrlow, msrhigh;
638
639 /* Try all of the first 4096 msrs */
640 for (i = 0; i < 4096; i++) {
641 if (!rdmsr_carefully(i, &msrlow, &msrhigh)) {
642 db_printf("%08X - %08X.%08X\n", i, msrhigh, msrlow);
643 }
644 }
645
646 /* Try all of the 4096 msrs at 0x0C000000 */
647 for (i = 0; i < 4096; i++) {
648 if (!rdmsr_carefully(0x0C000000 | i, &msrlow, &msrhigh)) {
649 db_printf("%08X - %08X.%08X\n",
650 0x0C000000 | i, msrhigh, msrlow);
651 }
652 }
653
654 /* Try all of the 4096 msrs at 0xC0000000 */
655 for (i = 0; i < 4096; i++) {
656 if (!rdmsr_carefully(0xC0000000 | i, &msrlow, &msrhigh)) {
657 db_printf("%08X - %08X.%08X\n",
658 0xC0000000 | i, msrhigh, msrlow);
659 }
660 }
661 }
662
663 #endif