2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 #include <i386/machine_routines.h>
31 #include <i386/io_map_entries.h>
32 #include <i386/cpuid.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <i386/cpu_data.h>
40 #include <i386/machine_cpu.h>
42 #include <i386/mp_events.h>
43 #include <i386/cpu_threads.h>
44 #include <i386/pmap.h>
45 #include <i386/misc_protos.h>
46 #include <i386/pmCPU.h>
47 #include <i386/proc_reg.h>
48 #include <mach/vm_param.h>
50 #include <i386/db_machdep.h>
51 #include <ddb/db_aout.h>
52 #include <ddb/db_access.h>
53 #include <ddb/db_sym.h>
54 #include <ddb/db_variables.h>
55 #include <ddb/db_command.h>
56 #include <ddb/db_output.h>
57 #include <ddb/db_expr.h>
60 #define MIN(a,b) ((a)<(b)? (a) : (b))
63 #define DBG(x...) kprintf("DBG: " x)
68 extern void initialize_screen(Boot_Video
*, unsigned int);
69 extern thread_t
Shutdown_context(thread_t thread
, void (*doshutdown
)(processor_t
),processor_t processor
);
70 extern void wakeup(void *);
71 extern unsigned KernelRelocOffset
;
73 static int max_cpus_initialized
= 0;
75 unsigned int LockTimeOut
= 12500000;
76 unsigned int MutexSpin
= 0;
78 #define MAX_CPUS_SET 0x1
79 #define MAX_CPUS_WAIT 0x2
81 /* IO memory map services */
83 /* Map memory map IO space */
84 vm_offset_t
ml_io_map(
85 vm_offset_t phys_addr
,
88 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
91 /* boot memory allocation */
92 vm_offset_t
ml_static_malloc(
93 __unused vm_size_t size
)
95 return((vm_offset_t
)NULL
);
99 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
101 *phys_addr
= bounce_pool_base
;
102 *size
= bounce_pool_size
;
110 return (vm_offset_t
)((paddr
-KernelRelocOffset
) | LINEAR_KERNEL_ADDRESS
);
117 return (vm_offset_t
)((unsigned) paddr
| LINEAR_KERNEL_ADDRESS
);
122 * Routine: ml_static_mfree
130 vm_offset_t vaddr_cur
;
133 // if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
135 assert((vaddr
& (PAGE_SIZE
-1)) == 0); /* must be page aligned */
137 for (vaddr_cur
= vaddr
;
138 vaddr_cur
< round_page_32(vaddr
+size
);
139 vaddr_cur
+= PAGE_SIZE
) {
140 ppn
= pmap_find_phys(kernel_pmap
, (addr64_t
)vaddr_cur
);
141 if (ppn
!= (vm_offset_t
)NULL
) {
142 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
143 vm_page_create(ppn
,(ppn
+1));
144 vm_page_wire_count
--;
150 /* virtual to physical on wired pages */
151 vm_offset_t
ml_vtophys(
154 return kvtophys(vaddr
);
157 /* Interrupt handling */
159 /* Initialize Interrupts */
160 void ml_init_interrupt(void)
162 (void) ml_set_interrupts_enabled(TRUE
);
165 /* Get Interrupts Enabled */
166 boolean_t
ml_get_interrupts_enabled(void)
170 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
171 return (flags
& EFL_IF
) != 0;
174 /* Set Interrupts Enabled */
175 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
179 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
184 myast
= ast_pending();
186 if ( (get_preemption_level() == 0) && (*myast
& AST_URGENT
) ) {
187 __asm__
volatile("sti");
188 __asm__
volatile ("int $0xff");
190 __asm__
volatile ("sti");
194 __asm__
volatile("cli");
197 return (flags
& EFL_IF
) != 0;
200 /* Check if running at interrupt context */
201 boolean_t
ml_at_interrupt_context(void)
203 return get_interrupt_level() != 0;
206 /* Generate a fake interrupt */
207 void ml_cause_interrupt(void)
209 panic("ml_cause_interrupt not defined yet on Intel");
212 void ml_thread_policy(
215 unsigned policy_info
)
217 if (policy_id
== MACHINE_GROUP
)
218 thread_bind(thread
, master_processor
);
220 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
221 spl_t s
= splsched();
225 set_priority(thread
, thread
->priority
+ 1);
227 thread_unlock(thread
);
232 /* Initialize Interrupts */
233 void ml_install_interrupt_handler(
237 IOInterruptHandler handler
,
240 boolean_t current_state
;
242 current_state
= ml_get_interrupts_enabled();
244 PE_install_interrupt_handler(nub
, source
, target
,
245 (IOInterruptHandler
) handler
, refCon
);
247 (void) ml_set_interrupts_enabled(current_state
);
249 initialize_screen(0, kPEAcquireScreen
);
256 cpu_core_t
*my_core
= cpu_core();
260 * We halt this cpu thread
261 * unless kernel param idlehalt is false and no other thread
262 * in the same core is active - if so, don't halt so that this
263 * core doesn't go into a low-power mode.
264 * For 4/4, we set a null "active cr3" while idle.
266 others_active
= !atomic_decl_and_test(
267 (long *) &my_core
->active_threads
, 1);
268 if (idlehalt
|| others_active
) {
269 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
270 MARK_CPU_IDLE(cpu_number());
271 machine_idle_cstate();
272 MARK_CPU_ACTIVE(cpu_number());
273 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
275 __asm__
volatile("sti");
277 atomic_incl((long *) &my_core
->active_threads
, 1);
282 processor_t processor
)
284 cpu_interrupt(PROCESSOR_DATA(processor
, slot_num
));
288 machine_processor_shutdown(
290 void (*doshutdown
)(processor_t
),
291 processor_t processor
)
293 fpu_save_context(thread
);
294 return(Shutdown_context(thread
, doshutdown
, processor
));
298 ml_processor_register(
301 processor_t
*processor_out
,
302 ipi_handler_t
*ipi_handler
,
306 cpu_data_t
*this_cpu_datap
;
308 this_cpu_datap
= cpu_data_alloc(boot_cpu
);
309 if (this_cpu_datap
== NULL
) {
312 target_cpu
= this_cpu_datap
->cpu_number
;
313 assert((boot_cpu
&& (target_cpu
== 0)) ||
314 (!boot_cpu
&& (target_cpu
!= 0)));
316 lapic_cpu_map(lapic_id
, target_cpu
);
318 this_cpu_datap
->cpu_id
= cpu_id
;
319 this_cpu_datap
->cpu_phys_number
= lapic_id
;
321 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(boot_cpu
);
322 if (this_cpu_datap
->cpu_console_buf
== NULL
)
325 this_cpu_datap
->cpu_chud
= chudxnu_cpu_alloc(boot_cpu
);
326 if (this_cpu_datap
->cpu_chud
== NULL
)
330 this_cpu_datap
->cpu_core
= cpu_thread_alloc(target_cpu
);
332 this_cpu_datap
->cpu_pmap
= pmap_cpu_alloc(boot_cpu
);
333 if (this_cpu_datap
->cpu_pmap
== NULL
)
336 this_cpu_datap
->cpu_processor
= cpu_processor_alloc(boot_cpu
);
337 if (this_cpu_datap
->cpu_processor
== NULL
)
339 processor_init(this_cpu_datap
->cpu_processor
, target_cpu
);
342 *processor_out
= this_cpu_datap
->cpu_processor
;
348 cpu_processor_free(this_cpu_datap
->cpu_processor
);
349 pmap_cpu_free(this_cpu_datap
->cpu_pmap
);
350 chudxnu_cpu_free(this_cpu_datap
->cpu_chud
);
351 console_cpu_free(this_cpu_datap
->cpu_console_buf
);
356 ml_cpu_get_info(ml_cpu_info_t
*cpu_infop
)
358 boolean_t os_supports_sse
;
359 i386_cpu_info_t
*cpuid_infop
;
361 if (cpu_infop
== NULL
)
365 * Are we supporting MMX/SSE/SSE2/SSE3?
366 * As distinct from whether the cpu has these capabilities.
368 os_supports_sse
= get_cr4() & CR4_XMM
;
369 if ((cpuid_features() & CPUID_FEATURE_MNI
) && os_supports_sse
)
370 cpu_infop
->vector_unit
= 6;
371 else if ((cpuid_features() & CPUID_FEATURE_SSE3
) && os_supports_sse
)
372 cpu_infop
->vector_unit
= 5;
373 else if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
374 cpu_infop
->vector_unit
= 4;
375 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
376 cpu_infop
->vector_unit
= 3;
377 else if (cpuid_features() & CPUID_FEATURE_MMX
)
378 cpu_infop
->vector_unit
= 2;
380 cpu_infop
->vector_unit
= 0;
382 cpuid_infop
= cpuid_info();
384 cpu_infop
->cache_line_size
= cpuid_infop
->cache_linesize
;
386 cpu_infop
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
387 cpu_infop
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
389 if (cpuid_infop
->cache_size
[L2U
] > 0) {
390 cpu_infop
->l2_settings
= 1;
391 cpu_infop
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
393 cpu_infop
->l2_settings
= 0;
394 cpu_infop
->l2_cache_size
= 0xFFFFFFFF;
397 if (cpuid_infop
->cache_size
[L3U
] > 0) {
398 cpu_infop
->l3_settings
= 1;
399 cpu_infop
->l3_cache_size
= cpuid_infop
->cache_size
[L3U
];
401 cpu_infop
->l3_settings
= 0;
402 cpu_infop
->l3_cache_size
= 0xFFFFFFFF;
407 ml_init_max_cpus(unsigned long max_cpus
)
409 boolean_t current_state
;
411 current_state
= ml_set_interrupts_enabled(FALSE
);
412 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
413 if (max_cpus
> 0 && max_cpus
<= MAX_CPUS
) {
415 * Note: max_cpus is the number of enable processors
416 * that ACPI found; max_ncpus is the maximum number
417 * that the kernel supports or that the "cpus="
418 * boot-arg has set. Here we take int minimum.
420 machine_info
.max_cpus
= MIN(max_cpus
, max_ncpus
);
422 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
423 wakeup((event_t
)&max_cpus_initialized
);
424 max_cpus_initialized
= MAX_CPUS_SET
;
426 (void) ml_set_interrupts_enabled(current_state
);
430 ml_get_max_cpus(void)
432 boolean_t current_state
;
434 current_state
= ml_set_interrupts_enabled(FALSE
);
435 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
436 max_cpus_initialized
= MAX_CPUS_WAIT
;
437 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
438 (void)thread_block(THREAD_CONTINUE_NULL
);
440 (void) ml_set_interrupts_enabled(current_state
);
441 return(machine_info
.max_cpus
);
445 * Routine: ml_init_lock_timeout
449 ml_init_lock_timeout(void)
455 * XXX As currently implemented for x86, LockTimeOut should be a
456 * cycle (tsc) count not an absolute time (nanoseconds) -
457 * but it's of the right order.
459 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
460 LockTimeOut
= (unsigned int)abstime
;
462 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
463 if (mtxspin
> USEC_PER_SEC
>>4)
464 mtxspin
= USEC_PER_SEC
>>4;
465 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
467 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
469 MutexSpin
= (unsigned int)abstime
;
473 * This is called from the machine-independent routine cpu_up()
474 * to perform machine-dependent info updates. Defer to cpu_thread_init().
483 * This is called from the machine-independent routine cpu_down()
484 * to perform machine-dependent info updates.
492 /* Stubs for pc tracing mechanism */
495 int pc_trace_cnt
= 0;
516 * The following are required for parts of the kernel
517 * that cannot resolve these functions as inlines:
519 extern thread_t
current_act(void);
523 return(current_thread_fast());
526 #undef current_thread
527 extern thread_t
current_thread(void);
531 return(current_thread_fast());
535 * Set the worst-case time for the C4 to C2 transition.
536 * The maxdelay parameter is in nanoseconds.
540 ml_set_maxsnoop(uint32_t maxdelay
)
542 C4C2SnoopDelay
= maxdelay
; /* Set the transition time */
543 machine_nap_policy(); /* Adjust the current nap state */
548 * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds.
552 ml_get_maxsnoop(void)
554 return C4C2SnoopDelay
; /* Set the transition time */
559 ml_get_maxbusdelay(void)
565 * Set the maximum delay time allowed for snoop on the bus.
567 * Note that this value will be compared to the amount of time that it takes
568 * to transition from a non-snooping power state (C4) to a snooping state (C2).
569 * If maxBusDelay is less than C4C2SnoopDelay,
570 * we will not enter the lowest power state.
574 ml_set_maxbusdelay(uint32_t mdelay
)
576 maxBusDelay
= mdelay
; /* Set the delay */
577 machine_nap_policy(); /* Adjust the current nap state */
581 boolean_t
ml_is64bit(void) {
583 return (cpu_mode_is64bit());
587 boolean_t
ml_thread_is64bit(thread_t thread
) {
589 return (thread_is_64bit(thread
));
593 boolean_t
ml_state_is64bit(void *saved_state
) {
595 return is_saved_state64(saved_state
);
598 void ml_cpu_set_ldt(int selector
)
601 * Avoid loading the LDT
602 * if we're setting the KERNEL LDT and it's already set.
604 if (selector
== KERNEL_LDT
&&
605 current_cpu_datap()->cpu_ldt
== KERNEL_LDT
)
609 * If 64bit this requires a mode switch (and back).
611 if (cpu_mode_is64bit())
612 ml_64bit_lldt(selector
);
615 current_cpu_datap()->cpu_ldt
= selector
;
618 void ml_fp_setvalid(boolean_t value
)
626 * Display the global msrs
631 db_msr(__unused db_expr_t addr
,
632 __unused
int have_addr
,
633 __unused db_expr_t count
,
634 __unused
char *modif
)
637 uint32_t i
, msrlow
, msrhigh
;
639 /* Try all of the first 4096 msrs */
640 for (i
= 0; i
< 4096; i
++) {
641 if (!rdmsr_carefully(i
, &msrlow
, &msrhigh
)) {
642 db_printf("%08X - %08X.%08X\n", i
, msrhigh
, msrlow
);
646 /* Try all of the 4096 msrs at 0x0C000000 */
647 for (i
= 0; i
< 4096; i
++) {
648 if (!rdmsr_carefully(0x0C000000 | i
, &msrlow
, &msrhigh
)) {
649 db_printf("%08X - %08X.%08X\n",
650 0x0C000000 | i
, msrhigh
, msrlow
);
654 /* Try all of the 4096 msrs at 0xC0000000 */
655 for (i
= 0; i
< 4096; i
++) {
656 if (!rdmsr_carefully(0xC0000000 | i
, &msrlow
, &msrhigh
)) {
657 db_printf("%08X - %08X.%08X\n",
658 0xC0000000 | i
, msrhigh
, msrlow
);