2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/machine_routines.h>
30 #include <i386/io_map_entries.h>
31 #include <i386/cpuid.h>
33 #include <mach/processor.h>
34 #include <kern/processor.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_data.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <i386/cpu_data.h>
40 #include <i386/machine_cpu.h>
42 #include <i386/mp_events.h>
43 #include <i386/pmap.h>
44 #include <i386/misc_protos.h>
45 #include <i386/pmCPU.h>
46 #include <i386/proc_reg.h>
48 #include <i386/cpu_threads.h>
49 #include <mach/vm_param.h>
51 #include <i386/db_machdep.h>
52 #include <ddb/db_aout.h>
53 #include <ddb/db_access.h>
54 #include <ddb/db_sym.h>
55 #include <ddb/db_variables.h>
56 #include <ddb/db_command.h>
57 #include <ddb/db_output.h>
58 #include <ddb/db_expr.h>
62 #define DBG(x...) kprintf("DBG: " x)
67 extern thread_t
Shutdown_context(thread_t thread
, void (*doshutdown
)(processor_t
),processor_t processor
);
68 extern void wakeup(void *);
69 extern unsigned KernelRelocOffset
;
71 static int max_cpus_initialized
= 0;
73 unsigned int LockTimeOut
;
74 unsigned int LockTimeOutTSC
;
75 unsigned int MutexSpin
;
77 #define MAX_CPUS_SET 0x1
78 #define MAX_CPUS_WAIT 0x2
80 /* IO memory map services */
82 /* Map memory map IO space */
83 vm_offset_t
ml_io_map(
84 vm_offset_t phys_addr
,
87 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
90 /* boot memory allocation */
91 vm_offset_t
ml_static_malloc(
92 __unused vm_size_t size
)
94 return((vm_offset_t
)NULL
);
98 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
100 *phys_addr
= bounce_pool_base
;
101 *size
= bounce_pool_size
;
109 return (vm_offset_t
)((paddr
-KernelRelocOffset
) | LINEAR_KERNEL_ADDRESS
);
116 return (vm_offset_t
)((unsigned) paddr
| LINEAR_KERNEL_ADDRESS
);
121 * Routine: ml_static_mfree
129 vm_offset_t vaddr_cur
;
132 // if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
134 assert((vaddr
& (PAGE_SIZE
-1)) == 0); /* must be page aligned */
136 for (vaddr_cur
= vaddr
;
137 vaddr_cur
< round_page_32(vaddr
+size
);
138 vaddr_cur
+= PAGE_SIZE
) {
139 ppn
= pmap_find_phys(kernel_pmap
, (addr64_t
)vaddr_cur
);
140 if (ppn
!= (vm_offset_t
)NULL
) {
141 kernel_pmap
->stats
.resident_count
++;
142 if (kernel_pmap
->stats
.resident_count
>
143 kernel_pmap
->stats
.resident_max
) {
144 kernel_pmap
->stats
.resident_max
=
145 kernel_pmap
->stats
.resident_count
;
147 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
148 vm_page_create(ppn
,(ppn
+1));
149 vm_page_wire_count
--;
155 /* virtual to physical on wired pages */
156 vm_offset_t
ml_vtophys(
159 return kvtophys(vaddr
);
163 * Routine: ml_nofault_copy
164 * Function: Perform a physical mode copy if the source and
165 * destination have valid translations in the kernel pmap.
166 * If translations are present, they are assumed to
167 * be wired; i.e. no attempt is made to guarantee that the
168 * translations obtained remained valid for
169 * the duration of the copy process.
172 vm_size_t
ml_nofault_copy(
173 vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
175 addr64_t cur_phys_dst
, cur_phys_src
;
176 uint32_t count
, nbytes
= 0;
179 if (!(cur_phys_src
= kvtophys(virtsrc
)))
181 if (!(cur_phys_dst
= kvtophys(virtdst
)))
183 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
185 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
186 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
187 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
191 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
202 /* Interrupt handling */
204 /* Initialize Interrupts */
205 void ml_init_interrupt(void)
207 (void) ml_set_interrupts_enabled(TRUE
);
210 /* Get Interrupts Enabled */
211 boolean_t
ml_get_interrupts_enabled(void)
215 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
216 return (flags
& EFL_IF
) != 0;
219 /* Set Interrupts Enabled */
220 boolean_t
ml_set_interrupts_enabled(boolean_t enable
)
224 __asm__
volatile("pushf; popl %0" : "=r" (flags
));
229 myast
= ast_pending();
231 if ( (get_preemption_level() == 0) && (*myast
& AST_URGENT
) ) {
232 __asm__
volatile("sti");
233 __asm__
volatile ("int $0xff");
235 __asm__
volatile ("sti");
239 __asm__
volatile("cli");
242 return (flags
& EFL_IF
) != 0;
245 /* Check if running at interrupt context */
246 boolean_t
ml_at_interrupt_context(void)
248 return get_interrupt_level() != 0;
251 /* Generate a fake interrupt */
252 void ml_cause_interrupt(void)
254 panic("ml_cause_interrupt not defined yet on Intel");
257 void ml_thread_policy(
259 __unused
unsigned policy_id
,
260 unsigned policy_info
)
262 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
263 spl_t s
= splsched();
267 set_priority(thread
, thread
->priority
+ 1);
269 thread_unlock(thread
);
274 /* Initialize Interrupts */
275 void ml_install_interrupt_handler(
279 IOInterruptHandler handler
,
282 boolean_t current_state
;
284 current_state
= ml_get_interrupts_enabled();
286 PE_install_interrupt_handler(nub
, source
, target
,
287 (IOInterruptHandler
) handler
, refCon
);
289 (void) ml_set_interrupts_enabled(current_state
);
291 initialize_screen(NULL
, kPEAcquireScreen
);
298 x86_core_t
*my_core
= x86_core();
299 cpu_data_t
*my_cpu
= current_cpu_datap();
303 * We halt this cpu thread
304 * unless kernel param idlehalt is false and no other thread
305 * in the same core is active - if so, don't halt so that this
306 * core doesn't go into a low-power mode.
307 * For 4/4, we set a null "active cr3" while idle.
309 if (my_core
== NULL
|| my_cpu
== NULL
)
312 others_active
= !atomic_decl_and_test(
313 (long *) &my_core
->active_lcpus
, 1);
314 my_cpu
->lcpu
.idle
= TRUE
;
315 if (idlehalt
|| others_active
) {
316 DBGLOG(cpu_handle
, cpu_number(), MP_IDLE
);
317 MARK_CPU_IDLE(cpu_number());
318 machine_idle_cstate(FALSE
);
319 MARK_CPU_ACTIVE(cpu_number());
320 DBGLOG(cpu_handle
, cpu_number(), MP_UNIDLE
);
322 my_cpu
->lcpu
.idle
= FALSE
;
323 atomic_incl((long *) &my_core
->active_lcpus
, 1);
325 __asm__
volatile("sti");
330 processor_t processor
)
332 cpu_interrupt(PROCESSOR_DATA(processor
, slot_num
));
336 machine_processor_shutdown(
338 void (*doshutdown
)(processor_t
),
339 processor_t processor
)
342 fpu_save_context(thread
);
343 return(Shutdown_context(thread
, doshutdown
, processor
));
347 ml_processor_register(
350 processor_t
*processor_out
,
351 ipi_handler_t
*ipi_handler
,
355 cpu_data_t
*this_cpu_datap
;
357 this_cpu_datap
= cpu_data_alloc(boot_cpu
);
358 if (this_cpu_datap
== NULL
) {
361 target_cpu
= this_cpu_datap
->cpu_number
;
362 assert((boot_cpu
&& (target_cpu
== 0)) ||
363 (!boot_cpu
&& (target_cpu
!= 0)));
365 lapic_cpu_map(lapic_id
, target_cpu
);
367 this_cpu_datap
->cpu_id
= cpu_id
;
368 this_cpu_datap
->cpu_phys_number
= lapic_id
;
370 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(boot_cpu
);
371 if (this_cpu_datap
->cpu_console_buf
== NULL
)
374 this_cpu_datap
->cpu_chud
= chudxnu_cpu_alloc(boot_cpu
);
375 if (this_cpu_datap
->cpu_chud
== NULL
)
379 this_cpu_datap
->lcpu
.core
= cpu_thread_alloc(this_cpu_datap
->cpu_number
);
380 if (this_cpu_datap
->lcpu
.core
== NULL
)
385 this_cpu_datap
->cpu_pmap
= pmap_cpu_alloc(boot_cpu
);
386 if (this_cpu_datap
->cpu_pmap
== NULL
)
389 this_cpu_datap
->cpu_processor
= cpu_processor_alloc(boot_cpu
);
390 if (this_cpu_datap
->cpu_processor
== NULL
)
393 * processor_init() deferred to topology start
394 * because "slot numbers" a.k.a. logical processor numbers
395 * are not yet finalized.
399 *processor_out
= this_cpu_datap
->cpu_processor
;
402 if (target_cpu
== machine_info
.max_cpus
- 1) {
404 * All processors are now registered but not started (except
405 * for this "in-limbo" boot processor). We call to the machine
406 * topology code to finalize and activate the topology.
408 cpu_topology_start();
414 cpu_processor_free(this_cpu_datap
->cpu_processor
);
415 pmap_cpu_free(this_cpu_datap
->cpu_pmap
);
416 chudxnu_cpu_free(this_cpu_datap
->cpu_chud
);
417 console_cpu_free(this_cpu_datap
->cpu_console_buf
);
422 ml_cpu_get_info(ml_cpu_info_t
*cpu_infop
)
424 boolean_t os_supports_sse
;
425 i386_cpu_info_t
*cpuid_infop
;
427 if (cpu_infop
== NULL
)
431 * Are we supporting MMX/SSE/SSE2/SSE3?
432 * As distinct from whether the cpu has these capabilities.
434 os_supports_sse
= get_cr4() & CR4_XMM
;
435 if ((cpuid_features() & CPUID_FEATURE_SSE4_2
) && os_supports_sse
)
436 cpu_infop
->vector_unit
= 8;
437 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1
) && os_supports_sse
)
438 cpu_infop
->vector_unit
= 7;
439 else if ((cpuid_features() & CPUID_FEATURE_SSSE3
) && os_supports_sse
)
440 cpu_infop
->vector_unit
= 6;
441 else if ((cpuid_features() & CPUID_FEATURE_SSE3
) && os_supports_sse
)
442 cpu_infop
->vector_unit
= 5;
443 else if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
)
444 cpu_infop
->vector_unit
= 4;
445 else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
)
446 cpu_infop
->vector_unit
= 3;
447 else if (cpuid_features() & CPUID_FEATURE_MMX
)
448 cpu_infop
->vector_unit
= 2;
450 cpu_infop
->vector_unit
= 0;
452 cpuid_infop
= cpuid_info();
454 cpu_infop
->cache_line_size
= cpuid_infop
->cache_linesize
;
456 cpu_infop
->l1_icache_size
= cpuid_infop
->cache_size
[L1I
];
457 cpu_infop
->l1_dcache_size
= cpuid_infop
->cache_size
[L1D
];
459 if (cpuid_infop
->cache_size
[L2U
] > 0) {
460 cpu_infop
->l2_settings
= 1;
461 cpu_infop
->l2_cache_size
= cpuid_infop
->cache_size
[L2U
];
463 cpu_infop
->l2_settings
= 0;
464 cpu_infop
->l2_cache_size
= 0xFFFFFFFF;
467 if (cpuid_infop
->cache_size
[L3U
] > 0) {
468 cpu_infop
->l3_settings
= 1;
469 cpu_infop
->l3_cache_size
= cpuid_infop
->cache_size
[L3U
];
471 cpu_infop
->l3_settings
= 0;
472 cpu_infop
->l3_cache_size
= 0xFFFFFFFF;
477 ml_init_max_cpus(unsigned long max_cpus
)
479 boolean_t current_state
;
481 current_state
= ml_set_interrupts_enabled(FALSE
);
482 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
483 if (max_cpus
> 0 && max_cpus
<= MAX_CPUS
) {
485 * Note: max_cpus is the number of enabled processors
486 * that ACPI found; max_ncpus is the maximum number
487 * that the kernel supports or that the "cpus="
488 * boot-arg has set. Here we take int minimum.
490 machine_info
.max_cpus
= MIN(max_cpus
, max_ncpus
);
492 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
493 wakeup((event_t
)&max_cpus_initialized
);
494 max_cpus_initialized
= MAX_CPUS_SET
;
496 (void) ml_set_interrupts_enabled(current_state
);
500 ml_get_max_cpus(void)
502 boolean_t current_state
;
504 current_state
= ml_set_interrupts_enabled(FALSE
);
505 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
506 max_cpus_initialized
= MAX_CPUS_WAIT
;
507 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
);
508 (void)thread_block(THREAD_CONTINUE_NULL
);
510 (void) ml_set_interrupts_enabled(current_state
);
511 return(machine_info
.max_cpus
);
515 * Routine: ml_init_lock_timeout
519 ml_init_lock_timeout(void)
524 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
525 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
526 LockTimeOut
= (uint32_t) abstime
;
527 LockTimeOutTSC
= (uint32_t) tmrCvt(abstime
, tscFCvtn2t
);
529 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
530 if (mtxspin
> USEC_PER_SEC
>>4)
531 mtxspin
= USEC_PER_SEC
>>4;
532 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
534 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
536 MutexSpin
= (unsigned int)abstime
;
540 * This is called from the machine-independent routine cpu_up()
541 * to perform machine-dependent info updates. Defer to cpu_thread_init().
550 * This is called from the machine-independent routine cpu_down()
551 * to perform machine-dependent info updates.
560 * The following are required for parts of the kernel
561 * that cannot resolve these functions as inlines:
563 extern thread_t
current_act(void);
567 return(current_thread_fast());
570 #undef current_thread
571 extern thread_t
current_thread(void);
575 return(current_thread_fast());
579 boolean_t
ml_is64bit(void) {
581 return (cpu_mode_is64bit());
585 boolean_t
ml_thread_is64bit(thread_t thread
) {
587 return (thread_is_64bit(thread
));
591 boolean_t
ml_state_is64bit(void *saved_state
) {
593 return is_saved_state64(saved_state
);
596 void ml_cpu_set_ldt(int selector
)
599 * Avoid loading the LDT
600 * if we're setting the KERNEL LDT and it's already set.
602 if (selector
== KERNEL_LDT
&&
603 current_cpu_datap()->cpu_ldt
== KERNEL_LDT
)
607 * If 64bit this requires a mode switch (and back).
609 if (cpu_mode_is64bit())
610 ml_64bit_lldt(selector
);
613 current_cpu_datap()->cpu_ldt
= selector
;
616 void ml_fp_setvalid(boolean_t value
)
621 uint64_t ml_cpu_int_event_time(void)
623 return current_cpu_datap()->cpu_int_event_time
;
630 * Display the global msrs
635 db_msr(__unused db_expr_t addr
,
636 __unused
int have_addr
,
637 __unused db_expr_t count
,
638 __unused
char *modif
)
641 uint32_t i
, msrlow
, msrhigh
;
643 /* Try all of the first 4096 msrs */
644 for (i
= 0; i
< 4096; i
++) {
645 if (!rdmsr_carefully(i
, &msrlow
, &msrhigh
)) {
646 db_printf("%08X - %08X.%08X\n", i
, msrhigh
, msrlow
);
650 /* Try all of the 4096 msrs at 0x0C000000 */
651 for (i
= 0; i
< 4096; i
++) {
652 if (!rdmsr_carefully(0x0C000000 | i
, &msrlow
, &msrhigh
)) {
653 db_printf("%08X - %08X.%08X\n",
654 0x0C000000 | i
, msrhigh
, msrlow
);
658 /* Try all of the 4096 msrs at 0xC0000000 */
659 for (i
= 0; i
< 4096; i
++) {
660 if (!rdmsr_carefully(0xC0000000 | i
, &msrlow
, &msrhigh
)) {
661 db_printf("%08X - %08X.%08X\n",
662 0xC0000000 | i
, msrhigh
, msrlow
);