2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
25 #include <ppc/machine_routines.h>
26 #include <ppc/cpu_internal.h>
27 #include <ppc/exception.h>
28 #include <ppc/io_map_entries.h>
29 #include <ppc/misc_protos.h>
30 #include <ppc/savearea.h>
31 #include <ppc/Firmware.h>
34 #include <ppc/new_screen.h>
35 #include <ppc/proc_reg.h>
36 #include <kern/kern_types.h>
37 #include <kern/processor.h>
38 #include <kern/machine.h>
40 #include <vm/vm_page.h>
42 unsigned int LockTimeOut
= 1250000000;
43 unsigned int MutexSpin
= 0;
45 decl_mutex_data(static,mcpus_lock
);
46 unsigned int mcpus_lock_initialized
= 0;
47 unsigned int mcpus_state
= 0;
49 uint32_t warFlags
= 0;
50 #define warDisMBpoff 0x80000000
51 #define MAX_CPUS_SET 0x01
52 #define MAX_CPUS_WAIT 0x02
54 decl_simple_lock_data(, spsLock
);
55 unsigned int spsLockInit
= 0;
57 extern unsigned int hwllckPatch_isync
;
58 extern unsigned int hwulckPatch_isync
;
59 extern unsigned int hwulckbPatch_isync
;
60 extern unsigned int hwlmlckPatch_isync
;
61 extern unsigned int hwltlckPatch_isync
;
62 extern unsigned int hwcsatomicPatch_isync
;
63 extern unsigned int mlckePatch_isync
;
64 extern unsigned int mlckPatch_isync
;
65 extern unsigned int mltelckPatch_isync
;
66 extern unsigned int mltlckPatch_isync
;
67 extern unsigned int mulckePatch_isync
;
68 extern unsigned int mulckPatch_isync
;
69 extern unsigned int slckPatch_isync
;
70 extern unsigned int stlckPatch_isync
;
71 extern unsigned int sulckPatch_isync
;
72 extern unsigned int rwlePatch_isync
;
73 extern unsigned int rwlsPatch_isync
;
74 extern unsigned int rwlsePatch_isync
;
75 extern unsigned int rwlesPatch_isync
;
76 extern unsigned int rwtlePatch_isync
;
77 extern unsigned int rwtlsPatch_isync
;
78 extern unsigned int rwldPatch_isync
;
79 extern unsigned int hwulckPatch_eieio
;
80 extern unsigned int mulckPatch_eieio
;
81 extern unsigned int mulckePatch_eieio
;
82 extern unsigned int sulckPatch_eieio
;
83 extern unsigned int rwlesPatch_eieio
;
84 extern unsigned int rwldPatch_eieio
;
86 extern unsigned int entfsectPatch_isync
;
87 extern unsigned int retfsectPatch_isync
;
88 extern unsigned int retfsectPatch_eieio
;
96 typedef struct patch_up patch_up_t
;
98 patch_up_t patch_up_table
[] = {
99 {&hwllckPatch_isync
, 0x60000000},
100 {&hwulckPatch_isync
, 0x60000000},
101 {&hwulckbPatch_isync
, 0x60000000},
102 {&hwlmlckPatch_isync
, 0x60000000},
103 {&hwltlckPatch_isync
, 0x60000000},
104 {&hwcsatomicPatch_isync
, 0x60000000},
105 {&mlckePatch_isync
, 0x60000000},
106 {&mlckPatch_isync
, 0x60000000},
107 {&mltelckPatch_isync
, 0x60000000},
108 {&mltlckPatch_isync
, 0x60000000},
109 {&mulckePatch_isync
, 0x60000000},
110 {&mulckPatch_isync
, 0x60000000},
111 {&slckPatch_isync
, 0x60000000},
112 {&stlckPatch_isync
, 0x60000000},
113 {&sulckPatch_isync
, 0x60000000},
114 {&rwlePatch_isync
, 0x60000000},
115 {&rwlsPatch_isync
, 0x60000000},
116 {&rwlsePatch_isync
, 0x60000000},
117 {&rwlesPatch_isync
, 0x60000000},
118 {&rwtlePatch_isync
, 0x60000000},
119 {&rwtlsPatch_isync
, 0x60000000},
120 {&rwldPatch_isync
, 0x60000000},
121 {&hwulckPatch_eieio
, 0x60000000},
122 {&hwulckPatch_eieio
, 0x60000000},
123 {&mulckPatch_eieio
, 0x60000000},
124 {&mulckePatch_eieio
, 0x60000000},
125 {&sulckPatch_eieio
, 0x60000000},
126 {&rwlesPatch_eieio
, 0x60000000},
127 {&rwldPatch_eieio
, 0x60000000},
129 {&entfsectPatch_isync
, 0x60000000},
130 {&retfsectPatch_isync
, 0x60000000},
131 {&retfsectPatch_eieio
, 0x60000000},
137 extern boolean_t pmap_initialized
;
139 /* Map memory map IO space */
142 vm_offset_t phys_addr
,
145 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
149 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
157 * Routine: ml_static_malloc
158 * Function: static memory allocation
166 if (pmap_initialized
)
167 return((vm_offset_t
)NULL
);
169 vaddr
= static_memory_end
;
170 static_memory_end
= round_page(vaddr
+size
);
176 * Routine: ml_static_ptovirt
185 /* Static memory is map V=R */
187 if ( (vaddr
< static_memory_end
) && (pmap_extract(kernel_pmap
, vaddr
)==paddr
) )
190 return((vm_offset_t
)NULL
);
194 * Routine: ml_static_mfree
202 vm_offset_t paddr_cur
, vaddr_cur
;
204 for (vaddr_cur
= round_page_32(vaddr
);
205 vaddr_cur
< trunc_page_32(vaddr
+size
);
206 vaddr_cur
+= PAGE_SIZE
) {
207 paddr_cur
= pmap_extract(kernel_pmap
, vaddr_cur
);
208 if (paddr_cur
!= (vm_offset_t
)NULL
) {
209 vm_page_wire_count
--;
210 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
211 vm_page_create(paddr_cur
>>12,(paddr_cur
+PAGE_SIZE
)>>12);
217 * Routine: ml_vtophys
218 * Function: virtual to physical on static pages
220 vm_offset_t
ml_vtophys(
223 return(pmap_extract(kernel_pmap
, vaddr
));
227 * Routine: ml_install_interrupt_handler
228 * Function: Initialize Interrupt Handler
230 void ml_install_interrupt_handler(
234 IOInterruptHandler handler
,
237 struct per_proc_info
*proc_info
;
238 boolean_t current_state
;
240 current_state
= ml_get_interrupts_enabled();
241 proc_info
= getPerProc();
243 proc_info
->interrupt_nub
= nub
;
244 proc_info
->interrupt_source
= source
;
245 proc_info
->interrupt_target
= target
;
246 proc_info
->interrupt_handler
= handler
;
247 proc_info
->interrupt_refCon
= refCon
;
249 proc_info
->interrupts_enabled
= TRUE
;
250 (void) ml_set_interrupts_enabled(current_state
);
252 initialize_screen(0, kPEAcquireScreen
);
256 * Routine: ml_init_interrupt
257 * Function: Initialize Interrupts
259 void ml_init_interrupt(void)
261 boolean_t current_state
;
263 current_state
= ml_get_interrupts_enabled();
265 getPerProc()->interrupts_enabled
= TRUE
;
266 (void) ml_set_interrupts_enabled(current_state
);
270 * Routine: ml_get_interrupts_enabled
271 * Function: Get Interrupts Enabled
273 boolean_t
ml_get_interrupts_enabled(void)
275 return((mfmsr() & MASK(MSR_EE
)) != 0);
279 * Routine: ml_at_interrupt_context
280 * Function: Check if running at interrupt context
282 boolean_t
ml_at_interrupt_context(void)
285 boolean_t current_state
;
287 current_state
= ml_set_interrupts_enabled(FALSE
);
288 ret
= (getPerProc()->istackptr
== 0);
289 ml_set_interrupts_enabled(current_state
);
294 * Routine: ml_cause_interrupt
295 * Function: Generate a fake interrupt
297 void ml_cause_interrupt(void)
303 * Routine: ml_thread_policy
306 void ml_thread_policy(
309 unsigned policy_info
)
312 if ((policy_id
== MACHINE_GROUP
) &&
313 ((PerProcTable
[master_cpu
].ppe_vaddr
->pf
.Available
) & pfSMPcap
))
314 thread_bind(thread
, master_processor
);
316 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
317 spl_t s
= splsched();
321 set_priority(thread
, thread
->priority
+ 1);
323 thread_unlock(thread
);
329 * Routine: machine_signal_idle
334 processor_t processor
)
336 struct per_proc_info
*proc_info
;
338 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
340 if (proc_info
->pf
.Available
& (pfCanDoze
|pfWillNap
))
341 (void)cpu_signal(proc_info
->cpu_number
, SIGPwake
, 0, 0);
345 * Routine: ml_processor_register
349 ml_processor_register(
350 ml_processor_info_t
*in_processor_info
,
351 processor_t
*processor_out
,
352 ipi_handler_t
*ipi_handler
)
354 struct per_proc_info
*proc_info
;
356 boolean_t current_state
;
357 boolean_t boot_processor
;
359 if (in_processor_info
->boot_cpu
== FALSE
) {
360 if (spsLockInit
== 0) {
362 simple_lock_init(&spsLock
, 0);
364 boot_processor
= FALSE
;
365 proc_info
= cpu_per_proc_alloc();
366 if (proc_info
== (struct per_proc_info
*)NULL
)
368 proc_info
->pp_cbfr
= console_per_proc_alloc(FALSE
);
369 if (proc_info
->pp_cbfr
== (void *)NULL
)
370 goto processor_register_error
;
372 boot_processor
= TRUE
;
373 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
376 proc_info
->pp_chud
= chudxnu_per_proc_alloc(boot_processor
);
377 if (proc_info
->pp_chud
== (void *)NULL
)
378 goto processor_register_error
;
381 if (cpu_per_proc_register(proc_info
) != KERN_SUCCESS
)
382 goto processor_register_error
;
384 proc_info
->cpu_id
= in_processor_info
->cpu_id
;
385 proc_info
->start_paddr
= in_processor_info
->start_paddr
;
386 if(in_processor_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
387 proc_info
->time_base_enable
= in_processor_info
->time_base_enable
;
389 proc_info
->time_base_enable
= (void(*)(cpu_id_t
, boolean_t
))NULL
;
391 if((proc_info
->pf
.pfPowerModes
& pmType
) == pmPowerTune
) {
392 proc_info
->pf
.pfPowerTune0
= in_processor_info
->power_mode_0
;
393 proc_info
->pf
.pfPowerTune1
= in_processor_info
->power_mode_1
;
396 donap
= in_processor_info
->supports_nap
; /* Assume we use requested nap */
397 if(forcenap
) donap
= forcenap
- 1; /* If there was an override, use that */
399 if((proc_info
->pf
.Available
& pfCanNap
)
401 proc_info
->pf
.Available
|= pfWillNap
;
402 current_state
= ml_set_interrupts_enabled(FALSE
);
403 if(proc_info
== getPerProc())
404 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
405 (void) ml_set_interrupts_enabled(current_state
);
408 if (!boot_processor
) {
409 (void)hw_atomic_add((uint32_t *)&saveanchor
.savetarget
, FreeListMin
); /* saveareas for this processor */
410 processor_init((struct processor
*)proc_info
->processor
, proc_info
->cpu_number
);
413 *processor_out
= (struct processor
*)proc_info
->processor
;
414 *ipi_handler
= cpu_signal_handler
;
418 processor_register_error
:
419 if (proc_info
->pp_cbfr
!= (void *)NULL
)
420 console_per_proc_free(proc_info
->pp_cbfr
);
421 if (proc_info
->pp_chud
!= (void *)NULL
)
422 chudxnu_per_proc_free(proc_info
->pp_chud
);
424 cpu_per_proc_free(proc_info
);
429 * Routine: ml_enable_nap
433 ml_enable_nap(int target_cpu
, boolean_t nap_enabled
)
435 struct per_proc_info
*proc_info
;
436 boolean_t prev_value
;
437 boolean_t current_state
;
439 proc_info
= PerProcTable
[target_cpu
].ppe_vaddr
;
441 prev_value
= (proc_info
->pf
.Available
& pfCanNap
) && (proc_info
->pf
.Available
& pfWillNap
);
443 if(forcenap
) nap_enabled
= forcenap
- 1; /* If we are to force nap on or off, do it */
445 if(proc_info
->pf
.Available
& pfCanNap
) { /* Can the processor nap? */
446 if (nap_enabled
) proc_info
->pf
.Available
|= pfWillNap
; /* Is nap supported on this machine? */
447 else proc_info
->pf
.Available
&= ~pfWillNap
; /* Clear if not */
450 current_state
= ml_set_interrupts_enabled(FALSE
);
451 if(proc_info
== getPerProc())
452 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
453 (void) ml_set_interrupts_enabled(current_state
);
459 * Routine: ml_init_max_cpus
463 ml_init_max_cpus(unsigned int mcpus
)
466 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
467 mutex_init(&mcpus_lock
,0);
468 mutex_lock(&mcpus_lock
);
469 if ((mcpus_state
& MAX_CPUS_SET
)
471 || (mcpus
> MAX_CPUS
))
472 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus
);
474 machine_info
.max_cpus
= mcpus
;
475 machine_info
.physical_cpu_max
= mcpus
;
476 machine_info
.logical_cpu_max
= mcpus
;
477 mcpus_state
|= MAX_CPUS_SET
;
479 if (mcpus_state
& MAX_CPUS_WAIT
) {
480 mcpus_state
|= ~MAX_CPUS_WAIT
;
481 thread_wakeup((event_t
)&mcpus_state
);
483 mutex_unlock(&mcpus_lock
);
485 if (machine_info
.logical_cpu_max
== 1) {
486 struct patch_up
*patch_up_ptr
;
487 boolean_t current_state
;
489 patch_up_ptr
= &patch_up_table
[0];
491 current_state
= ml_set_interrupts_enabled(FALSE
);
492 while (patch_up_ptr
->addr
!= NULL
) {
494 * Patch for V=R kernel text section
496 bcopy_phys((addr64_t
)((unsigned int)(&patch_up_ptr
->data
)),
497 (addr64_t
)((unsigned int)(patch_up_ptr
->addr
)), 4);
498 sync_cache64((addr64_t
)((unsigned int)(patch_up_ptr
->addr
)),4);
501 (void) ml_set_interrupts_enabled(current_state
);
506 * Routine: ml_get_max_cpus
510 ml_get_max_cpus(void)
512 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
513 mutex_init(&mcpus_lock
,0);
514 mutex_lock(&mcpus_lock
);
515 if (!(mcpus_state
& MAX_CPUS_SET
)) {
516 mcpus_state
|= MAX_CPUS_WAIT
;
517 thread_sleep_mutex((event_t
)&mcpus_state
,
518 &mcpus_lock
, THREAD_UNINT
);
520 mutex_unlock(&mcpus_lock
);
521 return(machine_info
.max_cpus
);
525 * This is called from the machine-independent routine cpu_up()
526 * to perform machine-dependent info updates.
531 hw_atomic_add(&machine_info
.physical_cpu
, 1);
532 hw_atomic_add(&machine_info
.logical_cpu
, 1);
536 * This is called from the machine-independent routine cpu_down()
537 * to perform machine-dependent info updates.
542 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
543 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
547 * Routine: ml_cpu_get_info
551 ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
)
553 struct per_proc_info
*proc_info
;
555 if (ml_cpu_info
== 0) return;
557 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
558 ml_cpu_info
->vector_unit
= (proc_info
->pf
.Available
& pfAltivec
) != 0;
559 ml_cpu_info
->cache_line_size
= proc_info
->pf
.lineSize
;
560 ml_cpu_info
->l1_icache_size
= proc_info
->pf
.l1iSize
;
561 ml_cpu_info
->l1_dcache_size
= proc_info
->pf
.l1dSize
;
563 if (proc_info
->pf
.Available
& pfL2
) {
564 ml_cpu_info
->l2_settings
= proc_info
->pf
.l2cr
;
565 ml_cpu_info
->l2_cache_size
= proc_info
->pf
.l2Size
;
567 ml_cpu_info
->l2_settings
= 0;
568 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
570 if (proc_info
->pf
.Available
& pfL3
) {
571 ml_cpu_info
->l3_settings
= proc_info
->pf
.l3cr
;
572 ml_cpu_info
->l3_cache_size
= proc_info
->pf
.l3Size
;
574 ml_cpu_info
->l3_settings
= 0;
575 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
580 * Routine: ml_enable_cache_level
583 #define l2em 0x80000000
584 #define l3em 0x80000000
586 ml_enable_cache_level(int cache_level
, int enable
)
589 unsigned long available
, ccr
;
590 struct per_proc_info
*proc_info
;
592 if (real_ncpus
!= 1) return -1; /* XXX: This test is not safe */
594 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
595 available
= proc_info
->pf
.Available
;
597 if ((cache_level
== 2) && (available
& pfL2
)) {
598 ccr
= proc_info
->pf
.l2cr
;
599 old_mode
= (ccr
& l2em
) ? TRUE
: FALSE
;
600 if (old_mode
!= enable
) {
601 if (enable
) ccr
= proc_info
->pf
.l2crOriginal
;
603 proc_info
->pf
.l2cr
= ccr
;
610 if ((cache_level
== 3) && (available
& pfL3
)) {
611 ccr
= proc_info
->pf
.l3cr
;
612 old_mode
= (ccr
& l3em
) ? TRUE
: FALSE
;
613 if (old_mode
!= enable
) {
614 if (enable
) ccr
= proc_info
->pf
.l3crOriginal
;
616 proc_info
->pf
.l3cr
= ccr
;
627 decl_simple_lock_data(, spsLock
);
630 * Routine: ml_set_processor_speed
634 ml_set_processor_speed(unsigned long speed
)
636 struct per_proc_info
*proc_info
;
638 kern_return_t result
;
639 boolean_t current_state
;
642 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
644 switch (proc_info
->pf
.pfPowerModes
& pmType
) { /* Figure specific type */
647 ml_set_processor_speed_dpll(speed
);
652 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
654 * cpu_signal() returns after .5ms if it fails to signal a running cpu
655 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
657 for (i
=200; i
>0; i
--) {
658 current_state
= ml_set_interrupts_enabled(FALSE
);
659 if (cpu
!= cpu_number()) {
660 if (PerProcTable
[cpu
].ppe_vaddr
->cpu_flags
& SignalReady
)
662 * Target cpu is off-line, skip
664 result
= KERN_SUCCESS
;
666 simple_lock(&spsLock
);
667 result
= cpu_signal(cpu
, SIGPcpureq
, CPRQsps
, speed
);
668 if (result
== KERN_SUCCESS
)
669 thread_sleep_simple_lock(&spsLock
, &spsLock
, THREAD_UNINT
);
670 simple_unlock(&spsLock
);
673 ml_set_processor_speed_dfs(speed
);
674 result
= KERN_SUCCESS
;
676 (void) ml_set_interrupts_enabled(current_state
);
677 if (result
== KERN_SUCCESS
)
680 if (result
!= KERN_SUCCESS
)
681 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu
);
687 ml_set_processor_speed_powertune(speed
);
698 * Routine: ml_set_processor_speed_slave
702 ml_set_processor_speed_slave(unsigned long speed
)
704 ml_set_processor_speed_dfs(speed
);
706 simple_lock(&spsLock
);
707 thread_wakeup(&spsLock
);
708 simple_unlock(&spsLock
);
712 * Routine: ml_init_lock_timeout
716 ml_init_lock_timeout(void)
721 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
722 LockTimeOut
= (unsigned int)abstime
;
724 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
725 if (mtxspin
> USEC_PER_SEC
>>4)
726 mtxspin
= USEC_PER_SEC
>>4;
727 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
729 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
731 MutexSpin
= (unsigned int)abstime
;
735 * Routine: init_ast_check
740 __unused processor_t processor
)
744 * Routine: cause_ast_check
749 processor_t processor
)
751 struct per_proc_info
*proc_info
;
753 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
755 if (proc_info
!= getPerProc()
756 && proc_info
->interrupts_enabled
== TRUE
)
757 cpu_signal(proc_info
->cpu_number
, SIGPast
, (unsigned int)NULL
, (unsigned int)NULL
);
761 * Routine: machine_processor_shutdown
765 machine_processor_shutdown(
766 __unused thread_t thread
,
767 __unused
void (*doshutdown
)(processor_t
),
768 __unused processor_t processor
)
771 return((thread_t
)(getPerProc()->old_thread
));
775 * Routine: set_be_bit
782 boolean_t current_state
;
784 current_state
= ml_set_interrupts_enabled(FALSE
);
785 getPerProc()->cpu_flags
|= traceBE
;
786 (void) ml_set_interrupts_enabled(current_state
);
791 * Routine: clr_be_bit
798 boolean_t current_state
;
800 current_state
= ml_set_interrupts_enabled(FALSE
);
801 getPerProc()->cpu_flags
&= ~traceBE
;
802 (void) ml_set_interrupts_enabled(current_state
);
807 * Routine: be_tracing
814 return(getPerProc()->cpu_flags
& traceBE
);
818 void ml_mem_backoff(void) {
820 if(warFlags
& warDisMBpoff
) return; /* If backoff disabled, exit */
822 __asm__
volatile("sync");
823 __asm__
volatile("isync");