2  * Copyright (c) 2000-2008 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #include <i386/machine_routines.h> 
  30 #include <i386/io_map_entries.h> 
  31 #include <i386/cpuid.h> 
  33 #include <mach/processor.h> 
  34 #include <kern/processor.h> 
  35 #include <kern/machine.h> 
  36 #include <kern/cpu_data.h> 
  37 #include <kern/cpu_number.h> 
  38 #include <kern/thread.h> 
  39 #include <i386/cpu_data.h> 
  40 #include <i386/machine_cpu.h> 
  41 #include <i386/lapic.h> 
  42 #include <i386/mp_events.h> 
  43 #include <i386/pmap.h> 
  44 #include <i386/misc_protos.h> 
  45 #include <i386/pmCPU.h> 
  46 #include <i386/proc_reg.h> 
  48 #include <i386/cpu_threads.h> 
  49 #include <mach/vm_param.h> 
  51 #include <i386/db_machdep.h> 
  52 #include <ddb/db_aout.h> 
  53 #include <ddb/db_access.h> 
  54 #include <ddb/db_sym.h> 
  55 #include <ddb/db_variables.h> 
  56 #include <ddb/db_command.h> 
  57 #include <ddb/db_output.h> 
  58 #include <ddb/db_expr.h> 
  62 #define DBG(x...)       kprintf("DBG: " x) 
  67 extern thread_t 
Shutdown_context(thread_t thread
, void (*doshutdown
)(processor_t
),processor_t  processor
); 
  68 extern void     wakeup(void *); 
  69 extern unsigned KernelRelocOffset
; 
  71 static int max_cpus_initialized 
= 0; 
  73 unsigned int    LockTimeOut
; 
  74 unsigned int    LockTimeOutTSC
; 
  75 unsigned int    MutexSpin
; 
  77 #define MAX_CPUS_SET    0x1 
  78 #define MAX_CPUS_WAIT   0x2 
  80 /* IO memory map services */ 
  82 /* Map memory map IO space */ 
  83 vm_offset_t 
ml_io_map( 
  84         vm_offset_t phys_addr
,  
  87         return(io_map(phys_addr
,size
,VM_WIMG_IO
)); 
  90 /* boot memory allocation */ 
  91 vm_offset_t 
ml_static_malloc( 
  92                              __unused vm_size_t size
) 
  94         return((vm_offset_t
)NULL
); 
  98 void ml_get_bouncepool_info(vm_offset_t 
*phys_addr
, vm_size_t 
*size
) 
 100         *phys_addr 
= bounce_pool_base
; 
 101         *size      
= bounce_pool_size
; 
 109         return (vm_offset_t
)((paddr
-KernelRelocOffset
) | LINEAR_KERNEL_ADDRESS
); 
 116     return (vm_offset_t
)((unsigned) paddr 
| LINEAR_KERNEL_ADDRESS
); 
 121  *      Routine:        ml_static_mfree 
 129         vm_offset_t vaddr_cur
; 
 132 //      if (vaddr < VM_MIN_KERNEL_ADDRESS) return; 
 134         assert((vaddr 
& (PAGE_SIZE
-1)) == 0); /* must be page aligned */ 
 136         for (vaddr_cur 
= vaddr
; 
 137              vaddr_cur 
< round_page_32(vaddr
+size
); 
 138              vaddr_cur 
+= PAGE_SIZE
) { 
 139                 ppn 
= pmap_find_phys(kernel_pmap
, (addr64_t
)vaddr_cur
); 
 140                 if (ppn 
!= (vm_offset_t
)NULL
) { 
 141                         kernel_pmap
->stats
.resident_count
++; 
 142                         if (kernel_pmap
->stats
.resident_count 
> 
 143                             kernel_pmap
->stats
.resident_max
) { 
 144                                 kernel_pmap
->stats
.resident_max 
= 
 145                                         kernel_pmap
->stats
.resident_count
; 
 147                         pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
)); 
 148                         vm_page_create(ppn
,(ppn
+1)); 
 149                         vm_page_wire_count
--; 
 155 /* virtual to physical on wired pages */ 
 156 vm_offset_t 
ml_vtophys( 
 159         return  kvtophys(vaddr
); 
 163  *      Routine:        ml_nofault_copy 
 164  *      Function:       Perform a physical mode copy if the source and 
 165  *                      destination have valid translations in the kernel pmap. 
 166  *                      If translations are present, they are assumed to 
 167  *                      be wired; i.e. no attempt is made to guarantee that the 
 168  *                      translations obtained remained valid for 
 169  *                      the duration of the copy process. 
 172 vm_size_t 
ml_nofault_copy( 
 173         vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
) 
 175         addr64_t cur_phys_dst
, cur_phys_src
; 
 176         uint32_t count
, nbytes 
= 0; 
 179                 if (!(cur_phys_src 
= kvtophys(virtsrc
))) 
 181                 if (!(cur_phys_dst 
= kvtophys(virtdst
))) 
 183                 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
))) 
 185                 count 
= PAGE_SIZE 
- (cur_phys_src 
& PAGE_MASK
); 
 186                 if (count 
> (PAGE_SIZE 
- (cur_phys_dst 
& PAGE_MASK
))) 
 187                         count 
= PAGE_SIZE 
- (cur_phys_dst 
& PAGE_MASK
); 
 191                 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
); 
 202 /* Interrupt handling */ 
 204 /* Initialize Interrupts */ 
 205 void ml_init_interrupt(void) 
 207         (void) ml_set_interrupts_enabled(TRUE
); 
 210 /* Get Interrupts Enabled */ 
 211 boolean_t 
ml_get_interrupts_enabled(void) 
 215   __asm__ 
volatile("pushf; popl %0" :  "=r" (flags
)); 
 216   return (flags 
& EFL_IF
) != 0; 
 219 /* Set Interrupts Enabled */ 
 220 boolean_t 
ml_set_interrupts_enabled(boolean_t enable
) 
 224   __asm__ 
volatile("pushf; popl %0" :  "=r" (flags
)); 
 229         myast 
= ast_pending(); 
 231         if ( (get_preemption_level() == 0) &&  (*myast 
& AST_URGENT
) ) { 
 232         __asm__ 
volatile("sti"); 
 233           __asm__ 
volatile ("int $0xff"); 
 235           __asm__ 
volatile ("sti"); 
 239         __asm__ 
volatile("cli"); 
 242   return (flags 
& EFL_IF
) != 0; 
 245 /* Check if running at interrupt context */ 
 246 boolean_t 
ml_at_interrupt_context(void) 
 248         return get_interrupt_level() != 0; 
 251 /* Generate a fake interrupt */ 
 252 void ml_cause_interrupt(void) 
 254         panic("ml_cause_interrupt not defined yet on Intel"); 
 257 void ml_thread_policy( 
 259 __unused        
unsigned policy_id
, 
 260         unsigned policy_info
) 
 262         if (policy_info 
& MACHINE_NETWORK_WORKLOOP
) { 
 263                 spl_t           s 
= splsched(); 
 267                 set_priority(thread
, thread
->priority 
+ 1); 
 269                 thread_unlock(thread
); 
 274 /* Initialize Interrupts */ 
 275 void ml_install_interrupt_handler( 
 279         IOInterruptHandler handler
, 
 282         boolean_t current_state
; 
 284         current_state 
= ml_get_interrupts_enabled(); 
 286         PE_install_interrupt_handler(nub
, source
, target
, 
 287                                      (IOInterruptHandler
) handler
, refCon
); 
 289         (void) ml_set_interrupts_enabled(current_state
); 
 291         initialize_screen(NULL
, kPEAcquireScreen
); 
 297         processor_t processor
) 
 299         cpu_interrupt(processor
->cpu_num
); 
 303 machine_processor_shutdown( 
 305         void            (*doshutdown
)(processor_t
), 
 306         processor_t     processor
) 
 309         fpu_save_context(thread
); 
 310         return(Shutdown_context(thread
, doshutdown
, processor
)); 
 314 ml_processor_register( 
 317         processor_t     
*processor_out
, 
 318         ipi_handler_t   
*ipi_handler
, 
 322         cpu_data_t      
*this_cpu_datap
; 
 324         this_cpu_datap 
= cpu_data_alloc(boot_cpu
); 
 325         if (this_cpu_datap 
== NULL
) { 
 328         target_cpu 
= this_cpu_datap
->cpu_number
; 
 329         assert((boot_cpu 
&& (target_cpu 
== 0)) || 
 330               (!boot_cpu 
&& (target_cpu 
!= 0))); 
 332         lapic_cpu_map(lapic_id
, target_cpu
); 
 334         this_cpu_datap
->cpu_id 
= cpu_id
; 
 335         this_cpu_datap
->cpu_phys_number 
= lapic_id
; 
 337         this_cpu_datap
->cpu_console_buf 
= console_cpu_alloc(boot_cpu
); 
 338         if (this_cpu_datap
->cpu_console_buf 
== NULL
) 
 341         this_cpu_datap
->cpu_chud 
= chudxnu_cpu_alloc(boot_cpu
); 
 342         if (this_cpu_datap
->cpu_chud 
== NULL
) 
 346                 cpu_thread_alloc(this_cpu_datap
->cpu_number
); 
 347                 if (this_cpu_datap
->lcpu
.core 
== NULL
) 
 352                 this_cpu_datap
->cpu_pmap 
= pmap_cpu_alloc(boot_cpu
); 
 353                 if (this_cpu_datap
->cpu_pmap 
== NULL
) 
 356                 this_cpu_datap
->cpu_processor 
= cpu_processor_alloc(boot_cpu
); 
 357                 if (this_cpu_datap
->cpu_processor 
== NULL
) 
 360                  * processor_init() deferred to topology start 
 361                  * because "slot numbers" a.k.a. logical processor numbers 
 362                  * are not yet finalized. 
 366         *processor_out 
= this_cpu_datap
->cpu_processor
; 
 369         if (target_cpu 
== machine_info
.max_cpus 
- 1) { 
 371                  * All processors are now registered but not started (except 
 372                  * for this "in-limbo" boot processor). We call to the machine 
 373                  * topology code to finalize and activate the topology. 
 375                 cpu_topology_start(); 
 381         cpu_processor_free(this_cpu_datap
->cpu_processor
); 
 382         pmap_cpu_free(this_cpu_datap
->cpu_pmap
); 
 383         chudxnu_cpu_free(this_cpu_datap
->cpu_chud
); 
 384         console_cpu_free(this_cpu_datap
->cpu_console_buf
); 
 389 ml_cpu_get_info(ml_cpu_info_t 
*cpu_infop
) 
 391         boolean_t       os_supports_sse
; 
 392         i386_cpu_info_t 
*cpuid_infop
; 
 394         if (cpu_infop 
== NULL
) 
 398          * Are we supporting MMX/SSE/SSE2/SSE3? 
 399          * As distinct from whether the cpu has these capabilities. 
 401         os_supports_sse 
= get_cr4() & CR4_XMM
; 
 402         if ((cpuid_features() & CPUID_FEATURE_SSE4_2
) && os_supports_sse
) 
 403                 cpu_infop
->vector_unit 
= 8; 
 404         else if ((cpuid_features() & CPUID_FEATURE_SSE4_1
) && os_supports_sse
) 
 405                 cpu_infop
->vector_unit 
= 7; 
 406         else if ((cpuid_features() & CPUID_FEATURE_SSSE3
) && os_supports_sse
) 
 407                 cpu_infop
->vector_unit 
= 6; 
 408         else if ((cpuid_features() & CPUID_FEATURE_SSE3
) && os_supports_sse
) 
 409                 cpu_infop
->vector_unit 
= 5; 
 410         else if ((cpuid_features() & CPUID_FEATURE_SSE2
) && os_supports_sse
) 
 411                 cpu_infop
->vector_unit 
= 4; 
 412         else if ((cpuid_features() & CPUID_FEATURE_SSE
) && os_supports_sse
) 
 413                 cpu_infop
->vector_unit 
= 3; 
 414         else if (cpuid_features() & CPUID_FEATURE_MMX
) 
 415                 cpu_infop
->vector_unit 
= 2; 
 417                 cpu_infop
->vector_unit 
= 0; 
 419         cpuid_infop  
= cpuid_info(); 
 421         cpu_infop
->cache_line_size 
= cpuid_infop
->cache_linesize
;  
 423         cpu_infop
->l1_icache_size 
= cpuid_infop
->cache_size
[L1I
]; 
 424         cpu_infop
->l1_dcache_size 
= cpuid_infop
->cache_size
[L1D
]; 
 426         if (cpuid_infop
->cache_size
[L2U
] > 0) { 
 427             cpu_infop
->l2_settings 
= 1; 
 428             cpu_infop
->l2_cache_size 
= cpuid_infop
->cache_size
[L2U
]; 
 430             cpu_infop
->l2_settings 
= 0; 
 431             cpu_infop
->l2_cache_size 
= 0xFFFFFFFF; 
 434         if (cpuid_infop
->cache_size
[L3U
] > 0) { 
 435             cpu_infop
->l3_settings 
= 1; 
 436             cpu_infop
->l3_cache_size 
= cpuid_infop
->cache_size
[L3U
]; 
 438             cpu_infop
->l3_settings 
= 0; 
 439             cpu_infop
->l3_cache_size 
= 0xFFFFFFFF; 
 444 ml_init_max_cpus(unsigned long max_cpus
) 
 446         boolean_t current_state
; 
 448         current_state 
= ml_set_interrupts_enabled(FALSE
); 
 449         if (max_cpus_initialized 
!= MAX_CPUS_SET
) { 
 450                 if (max_cpus 
> 0 && max_cpus 
<= MAX_CPUS
) { 
 452                          * Note: max_cpus is the number of enabled processors 
 453                          * that ACPI found; max_ncpus is the maximum number 
 454                          * that the kernel supports or that the "cpus=" 
 455                          * boot-arg has set. Here we take int minimum. 
 457                         machine_info
.max_cpus 
= MIN(max_cpus
, max_ncpus
); 
 459                 if (max_cpus_initialized 
== MAX_CPUS_WAIT
) 
 460                         wakeup((event_t
)&max_cpus_initialized
); 
 461                 max_cpus_initialized 
= MAX_CPUS_SET
; 
 463         (void) ml_set_interrupts_enabled(current_state
); 
 467 ml_get_max_cpus(void) 
 469         boolean_t current_state
; 
 471         current_state 
= ml_set_interrupts_enabled(FALSE
); 
 472         if (max_cpus_initialized 
!= MAX_CPUS_SET
) { 
 473                 max_cpus_initialized 
= MAX_CPUS_WAIT
; 
 474                 assert_wait((event_t
)&max_cpus_initialized
, THREAD_UNINT
); 
 475                 (void)thread_block(THREAD_CONTINUE_NULL
); 
 477         (void) ml_set_interrupts_enabled(current_state
); 
 478         return(machine_info
.max_cpus
); 
 482  *      Routine:        ml_init_lock_timeout 
 486 ml_init_lock_timeout(void) 
 491         /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */ 
 492         nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
); 
 493         LockTimeOut 
= (uint32_t) abstime
; 
 494         LockTimeOutTSC 
= (uint32_t) tmrCvt(abstime
, tscFCvtn2t
); 
 496         if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) { 
 497                 if (mtxspin 
> USEC_PER_SEC
>>4) 
 498                         mtxspin 
=  USEC_PER_SEC
>>4; 
 499                 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
); 
 501                 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
); 
 503         MutexSpin 
= (unsigned int)abstime
; 
 507  * This is called from the machine-independent routine cpu_up() 
 508  * to perform machine-dependent info updates. Defer to cpu_thread_init(). 
 517  * This is called from the machine-independent routine cpu_down() 
 518  * to perform machine-dependent info updates. 
 527  * The following are required for parts of the kernel 
 528  * that cannot resolve these functions as inlines: 
 530 extern thread_t 
current_act(void); 
 534   return(current_thread_fast()); 
 537 #undef current_thread 
 538 extern thread_t 
current_thread(void); 
 542   return(current_thread_fast()); 
 546 boolean_t 
ml_is64bit(void) { 
 548         return (cpu_mode_is64bit()); 
 552 boolean_t 
ml_thread_is64bit(thread_t thread
) { 
 554         return (thread_is_64bit(thread
)); 
 558 boolean_t 
ml_state_is64bit(void *saved_state
) { 
 560         return is_saved_state64(saved_state
); 
 563 void ml_cpu_set_ldt(int selector
) 
 566          * Avoid loading the LDT 
 567          * if we're setting the KERNEL LDT and it's already set. 
 569         if (selector 
== KERNEL_LDT 
&& 
 570             current_cpu_datap()->cpu_ldt 
== KERNEL_LDT
) 
 574          * If 64bit this requires a mode switch (and back).  
 576         if (cpu_mode_is64bit()) 
 577                 ml_64bit_lldt(selector
); 
 580         current_cpu_datap()->cpu_ldt 
= selector
;         
 583 void ml_fp_setvalid(boolean_t value
) 
 588 uint64_t ml_cpu_int_event_time(void) 
 590         return current_cpu_datap()->cpu_int_event_time
; 
 597  *      Display the global msrs 
 602 db_msr(__unused db_expr_t addr
, 
 603        __unused 
int have_addr
, 
 604        __unused db_expr_t count
, 
 605        __unused 
char *modif
) 
 608         uint32_t        i
, msrlow
, msrhigh
; 
 610         /* Try all of the first 4096 msrs */ 
 611         for (i 
= 0; i 
< 4096; i
++) { 
 612                 if (!rdmsr_carefully(i
, &msrlow
, &msrhigh
)) { 
 613                         db_printf("%08X - %08X.%08X\n", i
, msrhigh
, msrlow
); 
 617         /* Try all of the 4096 msrs at 0x0C000000 */ 
 618         for (i 
= 0; i 
< 4096; i
++) { 
 619                 if (!rdmsr_carefully(0x0C000000 | i
, &msrlow
, &msrhigh
)) { 
 620                         db_printf("%08X - %08X.%08X\n", 
 621                                 0x0C000000 | i
, msrhigh
, msrlow
); 
 625         /* Try all of the 4096 msrs at 0xC0000000 */ 
 626         for (i 
= 0; i 
< 4096; i
++) { 
 627                 if (!rdmsr_carefully(0xC0000000 | i
, &msrlow
, &msrhigh
)) { 
 628                         db_printf("%08X - %08X.%08X\n", 
 629                                 0xC0000000 | i
, msrhigh
, msrlow
);