2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
25 #include <ppc/machine_routines.h>
26 #include <ppc/cpu_internal.h>
27 #include <ppc/exception.h>
28 #include <ppc/io_map_entries.h>
29 #include <ppc/misc_protos.h>
30 #include <ppc/savearea.h>
31 #include <ppc/Firmware.h>
34 #include <ppc/new_screen.h>
35 #include <ppc/proc_reg.h>
36 #include <kern/kern_types.h>
37 #include <kern/processor.h>
38 #include <kern/machine.h>
40 #include <vm/vm_page.h>
42 unsigned int LockTimeOut
= 12500000;
43 unsigned int MutexSpin
= 0;
45 decl_mutex_data(static,mcpus_lock
);
46 unsigned int mcpus_lock_initialized
= 0;
47 unsigned int mcpus_state
= 0;
49 uint32_t warFlags
= 0;
50 #define warDisMBpoff 0x80000000
51 #define MAX_CPUS_SET 0x01
52 #define MAX_CPUS_WAIT 0x02
54 decl_simple_lock_data(, spsLock
);
55 unsigned int spsLockInit
= 0;
57 extern unsigned int hwllckPatch_isync
;
58 extern unsigned int hwulckPatch_isync
;
59 extern unsigned int hwulckbPatch_isync
;
60 extern unsigned int hwlmlckPatch_isync
;
61 extern unsigned int hwltlckPatch_isync
;
62 extern unsigned int hwcsatomicPatch_isync
;
63 extern unsigned int mlckePatch_isync
;
64 extern unsigned int mlckPatch_isync
;
65 extern unsigned int mltelckPatch_isync
;
66 extern unsigned int mltlckPatch_isync
;
67 extern unsigned int mulckePatch_isync
;
68 extern unsigned int mulckPatch_isync
;
69 extern unsigned int slckPatch_isync
;
70 extern unsigned int stlckPatch_isync
;
71 extern unsigned int sulckPatch_isync
;
72 extern unsigned int rwlePatch_isync
;
73 extern unsigned int rwlsPatch_isync
;
74 extern unsigned int rwlsePatch_isync
;
75 extern unsigned int rwlesPatch_isync
;
76 extern unsigned int rwtlePatch_isync
;
77 extern unsigned int rwtlsPatch_isync
;
78 extern unsigned int rwldPatch_isync
;
79 extern unsigned int hwulckPatch_eieio
;
80 extern unsigned int mulckPatch_eieio
;
81 extern unsigned int mulckePatch_eieio
;
82 extern unsigned int sulckPatch_eieio
;
83 extern unsigned int rwlesPatch_eieio
;
84 extern unsigned int rwldPatch_eieio
;
86 extern unsigned int entfsectPatch_isync
;
87 extern unsigned int retfsectPatch_isync
;
88 extern unsigned int retfsectPatch_eieio
;
96 typedef struct patch_up patch_up_t
;
98 patch_up_t patch_up_table
[] = {
99 {&hwllckPatch_isync
, 0x60000000},
100 {&hwulckPatch_isync
, 0x60000000},
101 {&hwulckbPatch_isync
, 0x60000000},
102 {&hwlmlckPatch_isync
, 0x60000000},
103 {&hwltlckPatch_isync
, 0x60000000},
104 {&hwcsatomicPatch_isync
, 0x60000000},
105 {&mlckePatch_isync
, 0x60000000},
106 {&mlckPatch_isync
, 0x60000000},
107 {&mltelckPatch_isync
, 0x60000000},
108 {&mltlckPatch_isync
, 0x60000000},
109 {&mulckePatch_isync
, 0x60000000},
110 {&mulckPatch_isync
, 0x60000000},
111 {&slckPatch_isync
, 0x60000000},
112 {&stlckPatch_isync
, 0x60000000},
113 {&sulckPatch_isync
, 0x60000000},
114 {&rwlePatch_isync
, 0x60000000},
115 {&rwlsPatch_isync
, 0x60000000},
116 {&rwlsePatch_isync
, 0x60000000},
117 {&rwlesPatch_isync
, 0x60000000},
118 {&rwtlePatch_isync
, 0x60000000},
119 {&rwtlsPatch_isync
, 0x60000000},
120 {&rwldPatch_isync
, 0x60000000},
121 {&hwulckPatch_eieio
, 0x60000000},
122 {&hwulckPatch_eieio
, 0x60000000},
123 {&mulckPatch_eieio
, 0x60000000},
124 {&mulckePatch_eieio
, 0x60000000},
125 {&sulckPatch_eieio
, 0x60000000},
126 {&rwlesPatch_eieio
, 0x60000000},
127 {&rwldPatch_eieio
, 0x60000000},
129 {&entfsectPatch_isync
, 0x60000000},
130 {&retfsectPatch_isync
, 0x60000000},
131 {&retfsectPatch_eieio
, 0x60000000},
137 extern boolean_t pmap_initialized
;
139 /* Map memory map IO space */
142 vm_offset_t phys_addr
,
145 return(io_map(phys_addr
,size
));
149 * Routine: ml_static_malloc
150 * Function: static memory allocation
158 if (pmap_initialized
)
159 return((vm_offset_t
)NULL
);
161 vaddr
= static_memory_end
;
162 static_memory_end
= round_page(vaddr
+size
);
168 * Routine: ml_static_ptovirt
177 /* Static memory is map V=R */
179 if ( (vaddr
< static_memory_end
) && (pmap_extract(kernel_pmap
, vaddr
)==paddr
) )
182 return((vm_offset_t
)NULL
);
186 * Routine: ml_static_mfree
194 vm_offset_t paddr_cur
, vaddr_cur
;
196 for (vaddr_cur
= round_page_32(vaddr
);
197 vaddr_cur
< trunc_page_32(vaddr
+size
);
198 vaddr_cur
+= PAGE_SIZE
) {
199 paddr_cur
= pmap_extract(kernel_pmap
, vaddr_cur
);
200 if (paddr_cur
!= (vm_offset_t
)NULL
) {
201 vm_page_wire_count
--;
202 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
203 vm_page_create(paddr_cur
>>12,(paddr_cur
+PAGE_SIZE
)>>12);
209 * Routine: ml_vtophys
210 * Function: virtual to physical on static pages
212 vm_offset_t
ml_vtophys(
215 return(pmap_extract(kernel_pmap
, vaddr
));
219 * Routine: ml_install_interrupt_handler
220 * Function: Initialize Interrupt Handler
222 void ml_install_interrupt_handler(
226 IOInterruptHandler handler
,
229 struct per_proc_info
*proc_info
;
230 boolean_t current_state
;
232 current_state
= ml_get_interrupts_enabled();
233 proc_info
= getPerProc();
235 proc_info
->interrupt_nub
= nub
;
236 proc_info
->interrupt_source
= source
;
237 proc_info
->interrupt_target
= target
;
238 proc_info
->interrupt_handler
= handler
;
239 proc_info
->interrupt_refCon
= refCon
;
241 proc_info
->interrupts_enabled
= TRUE
;
242 (void) ml_set_interrupts_enabled(current_state
);
244 initialize_screen(0, kPEAcquireScreen
);
248 * Routine: ml_init_interrupt
249 * Function: Initialize Interrupts
251 void ml_init_interrupt(void)
253 boolean_t current_state
;
255 current_state
= ml_get_interrupts_enabled();
257 getPerProc()->interrupts_enabled
= TRUE
;
258 (void) ml_set_interrupts_enabled(current_state
);
262 * Routine: ml_get_interrupts_enabled
263 * Function: Get Interrupts Enabled
265 boolean_t
ml_get_interrupts_enabled(void)
267 return((mfmsr() & MASK(MSR_EE
)) != 0);
271 * Routine: ml_at_interrupt_context
272 * Function: Check if running at interrupt context
274 boolean_t
ml_at_interrupt_context(void)
277 boolean_t current_state
;
279 current_state
= ml_set_interrupts_enabled(FALSE
);
280 ret
= (getPerProc()->istackptr
== 0);
281 ml_set_interrupts_enabled(current_state
);
286 * Routine: ml_cause_interrupt
287 * Function: Generate a fake interrupt
289 void ml_cause_interrupt(void)
295 * Routine: ml_thread_policy
298 void ml_thread_policy(
301 unsigned policy_info
)
304 if ((policy_id
== MACHINE_GROUP
) &&
305 ((PerProcTable
[master_cpu
].ppe_vaddr
->pf
.Available
) & pfSMPcap
))
306 thread_bind(thread
, master_processor
);
308 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
309 spl_t s
= splsched();
313 set_priority(thread
, thread
->priority
+ 1);
315 thread_unlock(thread
);
321 * Routine: machine_signal_idle
326 processor_t processor
)
328 struct per_proc_info
*proc_info
;
330 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
332 if (proc_info
->pf
.Available
& (pfCanDoze
|pfWillNap
))
333 (void)cpu_signal(proc_info
->cpu_number
, SIGPwake
, 0, 0);
337 * Routine: ml_processor_register
341 ml_processor_register(
342 ml_processor_info_t
*in_processor_info
,
343 processor_t
*processor_out
,
344 ipi_handler_t
*ipi_handler
)
346 struct per_proc_info
*proc_info
;
348 boolean_t current_state
;
349 boolean_t boot_processor
;
351 if (in_processor_info
->boot_cpu
== FALSE
) {
352 if (spsLockInit
== 0) {
354 simple_lock_init(&spsLock
, 0);
356 boot_processor
= FALSE
;
357 proc_info
= cpu_per_proc_alloc();
358 if (proc_info
== (struct per_proc_info
*)NULL
)
360 proc_info
->pp_cbfr
= console_per_proc_alloc(FALSE
);
361 if (proc_info
->pp_cbfr
== (void *)NULL
)
362 goto processor_register_error
;
364 boot_processor
= TRUE
;
365 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
368 proc_info
->pp_chud
= chudxnu_per_proc_alloc(boot_processor
);
369 if (proc_info
->pp_chud
== (void *)NULL
)
370 goto processor_register_error
;
373 if (cpu_per_proc_register(proc_info
) != KERN_SUCCESS
)
374 goto processor_register_error
;
376 proc_info
->cpu_id
= in_processor_info
->cpu_id
;
377 proc_info
->start_paddr
= in_processor_info
->start_paddr
;
378 if(in_processor_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
379 proc_info
->time_base_enable
= in_processor_info
->time_base_enable
;
381 proc_info
->time_base_enable
= (void(*)(cpu_id_t
, boolean_t
))NULL
;
383 if (proc_info
->pf
.pfPowerModes
& pmPowerTune
) {
384 proc_info
->pf
.pfPowerTune0
= in_processor_info
->power_mode_0
;
385 proc_info
->pf
.pfPowerTune1
= in_processor_info
->power_mode_1
;
388 donap
= in_processor_info
->supports_nap
; /* Assume we use requested nap */
389 if(forcenap
) donap
= forcenap
- 1; /* If there was an override, use that */
391 if((proc_info
->pf
.Available
& pfCanNap
)
393 proc_info
->pf
.Available
|= pfWillNap
;
394 current_state
= ml_set_interrupts_enabled(FALSE
);
395 if(proc_info
== getPerProc())
396 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
397 (void) ml_set_interrupts_enabled(current_state
);
400 if (!boot_processor
) {
401 (void)hw_atomic_add((uint32_t *)&saveanchor
.savetarget
, FreeListMin
); /* saveareas for this processor */
402 processor_init((struct processor
*)proc_info
->processor
, proc_info
->cpu_number
);
405 *processor_out
= (struct processor
*)proc_info
->processor
;
406 *ipi_handler
= cpu_signal_handler
;
410 processor_register_error
:
411 if (proc_info
->pp_cbfr
!= (void *)NULL
)
412 console_per_proc_free(proc_info
->pp_cbfr
);
413 if (proc_info
->pp_chud
!= (void *)NULL
)
414 chudxnu_per_proc_free(proc_info
->pp_chud
);
416 cpu_per_proc_free(proc_info
);
421 * Routine: ml_enable_nap
425 ml_enable_nap(int target_cpu
, boolean_t nap_enabled
)
427 struct per_proc_info
*proc_info
;
428 boolean_t prev_value
;
429 boolean_t current_state
;
431 proc_info
= PerProcTable
[target_cpu
].ppe_vaddr
;
433 prev_value
= (proc_info
->pf
.Available
& pfCanNap
) && (proc_info
->pf
.Available
& pfWillNap
);
435 if(forcenap
) nap_enabled
= forcenap
- 1; /* If we are to force nap on or off, do it */
437 if(proc_info
->pf
.Available
& pfCanNap
) { /* Can the processor nap? */
438 if (nap_enabled
) proc_info
->pf
.Available
|= pfWillNap
; /* Is nap supported on this machine? */
439 else proc_info
->pf
.Available
&= ~pfWillNap
; /* Clear if not */
442 current_state
= ml_set_interrupts_enabled(FALSE
);
443 if(proc_info
== getPerProc())
444 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
445 (void) ml_set_interrupts_enabled(current_state
);
451 * Routine: ml_init_max_cpus
455 ml_init_max_cpus(unsigned int mcpus
)
458 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
459 mutex_init(&mcpus_lock
,0);
460 mutex_lock(&mcpus_lock
);
461 if ((mcpus_state
& MAX_CPUS_SET
)
463 || (mcpus
> MAX_CPUS
))
464 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus
);
466 machine_info
.max_cpus
= mcpus
;
467 machine_info
.physical_cpu_max
= mcpus
;
468 machine_info
.logical_cpu_max
= mcpus
;
469 mcpus_state
|= MAX_CPUS_SET
;
471 if (mcpus_state
& MAX_CPUS_WAIT
) {
472 mcpus_state
|= ~MAX_CPUS_WAIT
;
473 thread_wakeup((event_t
)&mcpus_state
);
475 mutex_unlock(&mcpus_lock
);
477 if (machine_info
.logical_cpu_max
== 1) {
478 struct patch_up
*patch_up_ptr
;
479 boolean_t current_state
;
481 patch_up_ptr
= &patch_up_table
[0];
483 current_state
= ml_set_interrupts_enabled(FALSE
);
484 while (patch_up_ptr
->addr
!= NULL
) {
486 * Patch for V=R kernel text section
488 bcopy_phys((addr64_t
)((unsigned int)(&patch_up_ptr
->data
)),
489 (addr64_t
)((unsigned int)(patch_up_ptr
->addr
)), 4);
490 sync_cache64((addr64_t
)((unsigned int)(patch_up_ptr
->addr
)),4);
493 (void) ml_set_interrupts_enabled(current_state
);
498 * Routine: ml_get_max_cpus
502 ml_get_max_cpus(void)
504 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
505 mutex_init(&mcpus_lock
,0);
506 mutex_lock(&mcpus_lock
);
507 if (!(mcpus_state
& MAX_CPUS_SET
)) {
508 mcpus_state
|= MAX_CPUS_WAIT
;
509 thread_sleep_mutex((event_t
)&mcpus_state
,
510 &mcpus_lock
, THREAD_UNINT
);
512 mutex_unlock(&mcpus_lock
);
513 return(machine_info
.max_cpus
);
517 * This is called from the machine-independent routine cpu_up()
518 * to perform machine-dependent info updates.
523 hw_atomic_add(&machine_info
.physical_cpu
, 1);
524 hw_atomic_add(&machine_info
.logical_cpu
, 1);
528 * This is called from the machine-independent routine cpu_down()
529 * to perform machine-dependent info updates.
534 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
535 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
539 * Routine: ml_cpu_get_info
543 ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
)
545 struct per_proc_info
*proc_info
;
547 if (ml_cpu_info
== 0) return;
549 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
550 ml_cpu_info
->vector_unit
= (proc_info
->pf
.Available
& pfAltivec
) != 0;
551 ml_cpu_info
->cache_line_size
= proc_info
->pf
.lineSize
;
552 ml_cpu_info
->l1_icache_size
= proc_info
->pf
.l1iSize
;
553 ml_cpu_info
->l1_dcache_size
= proc_info
->pf
.l1dSize
;
555 if (proc_info
->pf
.Available
& pfL2
) {
556 ml_cpu_info
->l2_settings
= proc_info
->pf
.l2cr
;
557 ml_cpu_info
->l2_cache_size
= proc_info
->pf
.l2Size
;
559 ml_cpu_info
->l2_settings
= 0;
560 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
562 if (proc_info
->pf
.Available
& pfL3
) {
563 ml_cpu_info
->l3_settings
= proc_info
->pf
.l3cr
;
564 ml_cpu_info
->l3_cache_size
= proc_info
->pf
.l3Size
;
566 ml_cpu_info
->l3_settings
= 0;
567 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
572 * Routine: ml_enable_cache_level
575 #define l2em 0x80000000
576 #define l3em 0x80000000
578 ml_enable_cache_level(int cache_level
, int enable
)
581 unsigned long available
, ccr
;
582 struct per_proc_info
*proc_info
;
584 if (real_ncpus
!= 1) return -1; /* XXX: This test is not safe */
586 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
587 available
= proc_info
->pf
.Available
;
589 if ((cache_level
== 2) && (available
& pfL2
)) {
590 ccr
= proc_info
->pf
.l2cr
;
591 old_mode
= (ccr
& l2em
) ? TRUE
: FALSE
;
592 if (old_mode
!= enable
) {
593 if (enable
) ccr
= proc_info
->pf
.l2crOriginal
;
595 proc_info
->pf
.l2cr
= ccr
;
602 if ((cache_level
== 3) && (available
& pfL3
)) {
603 ccr
= proc_info
->pf
.l3cr
;
604 old_mode
= (ccr
& l3em
) ? TRUE
: FALSE
;
605 if (old_mode
!= enable
) {
606 if (enable
) ccr
= proc_info
->pf
.l3crOriginal
;
608 proc_info
->pf
.l3cr
= ccr
;
619 decl_simple_lock_data(, spsLock
);
622 * Routine: ml_set_processor_speed
626 ml_set_processor_speed(unsigned long speed
)
628 struct per_proc_info
*proc_info
;
629 uint32_t powerModes
, cpu
;
630 kern_return_t result
;
631 boolean_t current_state
;
634 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
635 powerModes
= proc_info
->pf
.pfPowerModes
;
637 if (powerModes
& pmDualPLL
) {
639 ml_set_processor_speed_dpll(speed
);
641 } else if (powerModes
& pmDFS
) {
643 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
645 * cpu_signal() returns after .5ms if it fails to signal a running cpu
646 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
648 for (i
=200; i
>0; i
--) {
649 current_state
= ml_set_interrupts_enabled(FALSE
);
650 if (cpu
!= cpu_number()) {
651 if (PerProcTable
[cpu
].ppe_vaddr
->cpu_flags
& SignalReady
)
653 * Target cpu is off-line, skip
655 result
= KERN_SUCCESS
;
657 simple_lock(&spsLock
);
658 result
= cpu_signal(cpu
, SIGPcpureq
, CPRQsps
, speed
);
659 if (result
== KERN_SUCCESS
)
660 thread_sleep_simple_lock(&spsLock
, &spsLock
, THREAD_UNINT
);
661 simple_unlock(&spsLock
);
664 ml_set_processor_speed_dfs(speed
);
665 result
= KERN_SUCCESS
;
667 (void) ml_set_interrupts_enabled(current_state
);
668 if (result
== KERN_SUCCESS
)
671 if (result
!= KERN_SUCCESS
)
672 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu
);
675 } else if (powerModes
& pmPowerTune
) {
677 ml_set_processor_speed_powertune(speed
);
683 * Routine: ml_set_processor_speed_slave
687 ml_set_processor_speed_slave(unsigned long speed
)
689 ml_set_processor_speed_dfs(speed
);
691 simple_lock(&spsLock
);
692 thread_wakeup(&spsLock
);
693 simple_unlock(&spsLock
);
697 * Routine: ml_init_lock_timeout
701 ml_init_lock_timeout(void)
706 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
707 LockTimeOut
= (unsigned int)abstime
;
709 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
710 if (mtxspin
> USEC_PER_SEC
>>4)
711 mtxspin
= USEC_PER_SEC
>>4;
712 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
714 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
716 MutexSpin
= (unsigned int)abstime
;
720 * Routine: init_ast_check
725 __unused processor_t processor
)
729 * Routine: cause_ast_check
734 processor_t processor
)
736 struct per_proc_info
*proc_info
;
738 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
740 if (proc_info
!= getPerProc()
741 && proc_info
->interrupts_enabled
== TRUE
)
742 cpu_signal(proc_info
->cpu_number
, SIGPast
, (unsigned int)NULL
, (unsigned int)NULL
);
746 * Routine: machine_processor_shutdown
750 machine_processor_shutdown(
751 __unused thread_t thread
,
752 __unused
void (*doshutdown
)(processor_t
),
753 __unused processor_t processor
)
756 return((thread_t
)(getPerProc()->old_thread
));
760 * Routine: set_be_bit
767 boolean_t current_state
;
769 current_state
= ml_set_interrupts_enabled(FALSE
);
770 getPerProc()->cpu_flags
|= traceBE
;
771 (void) ml_set_interrupts_enabled(current_state
);
776 * Routine: clr_be_bit
783 boolean_t current_state
;
785 current_state
= ml_set_interrupts_enabled(FALSE
);
786 getPerProc()->cpu_flags
&= ~traceBE
;
787 (void) ml_set_interrupts_enabled(current_state
);
792 * Routine: be_tracing
799 return(getPerProc()->cpu_flags
& traceBE
);
803 void ml_mem_backoff(void) {
805 if(warFlags
& warDisMBpoff
) return; /* If backoff disabled, exit */
807 __asm__
volatile("sync");
808 __asm__
volatile("isync");