2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
24 #include <mach/machine.h>
25 #include <mach/processor_info.h>
27 #include <kern/kalloc.h>
28 #include <kern/kern_types.h>
29 #include <kern/machine.h>
30 #include <kern/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/sched_prim.h>
33 #include <kern/processor.h>
37 #include <ppc/proc_reg.h>
38 #include <ppc/misc_protos.h>
39 #include <ppc/machine_routines.h>
40 #include <ppc/cpu_internal.h>
41 #include <ppc/exception.h>
43 #include <ppc/hw_perfmon.h>
44 #include <pexpert/pexpert.h>
45 #include <kern/cpu_data.h>
46 #include <ppc/mappings.h>
47 #include <ppc/Diagnostics.h>
49 #include <ppc/machine_cpu.h>
51 decl_mutex_data(static,ppt_lock
);
53 unsigned int real_ncpus
= 1;
54 unsigned int max_ncpus
= MAX_CPUS
;
56 decl_simple_lock_data(static,rht_lock
);
58 static unsigned int rht_state
= 0;
62 decl_simple_lock_data(static,SignalReadyLock
);
71 perfCallback perfCpuSigHook
= 0; /* Pointer to CHUD cpu signal hook routine */
73 extern int debugger_sync
;
79 void cpu_sync_timebase(
82 void cpu_timebase_signal_handler(
83 struct per_proc_info
*proc_info
,
84 struct SIGtimebase
*timebaseAddr
);
87 * Routine: cpu_bootstrap
94 simple_lock_init(&rht_lock
,0);
95 simple_lock_init(&SignalReadyLock
,0);
96 mutex_init(&ppt_lock
,0);
108 struct per_proc_info
*proc_info
;
110 proc_info
= getPerProc();
115 if (proc_info
->save_tbu
!= 0 || proc_info
->save_tbl
!= 0) {
117 mttbu(proc_info
->save_tbu
);
118 mttb(proc_info
->save_tbl
);
121 proc_info
->cpu_type
= CPU_TYPE_POWERPC
;
122 proc_info
->cpu_subtype
= (cpu_subtype_t
)proc_info
->pf
.rptdProc
;
123 proc_info
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
124 proc_info
->running
= TRUE
;
129 * Routine: cpu_machine_init
136 struct per_proc_info
*proc_info
;
137 volatile struct per_proc_info
*mproc_info
;
140 proc_info
= getPerProc();
141 mproc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
143 if (proc_info
!= mproc_info
) {
144 simple_lock(&rht_lock
);
145 if (rht_state
& RHT_WAIT
)
146 thread_wakeup(&rht_state
);
147 rht_state
&= ~(RHT_BUSY
|RHT_WAIT
);
148 simple_unlock(&rht_lock
);
151 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
154 if (proc_info
!= mproc_info
) {
155 while (!((mproc_info
->cpu_flags
) & SignalReady
))
161 if (proc_info
!= mproc_info
)
162 simple_lock(&SignalReadyLock
);
163 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
164 if (proc_info
!= mproc_info
) {
165 if (proc_info
->ppXFlags
& SignalReadyWait
) {
166 hw_atomic_and(&proc_info
->ppXFlags
, ~SignalReadyWait
);
167 thread_wakeup(&proc_info
->cpu_flags
);
169 simple_unlock(&SignalReadyLock
);
175 * Routine: cpu_per_proc_alloc
178 struct per_proc_info
*
182 struct per_proc_info
*proc_info
=0;
183 void *interrupt_stack
=0;
184 void *debugger_stack
=0;
186 if ((proc_info
= (struct per_proc_info
*)kalloc(PAGE_SIZE
)) == (struct per_proc_info
*)0)
187 return (struct per_proc_info
*)NULL
;;
188 if ((interrupt_stack
= kalloc(INTSTACK_SIZE
)) == 0) {
189 kfree(proc_info
, PAGE_SIZE
);
190 return (struct per_proc_info
*)NULL
;;
192 #if MACH_KDP || MACH_KDB
193 if ((debugger_stack
= kalloc(KERNEL_STACK_SIZE
)) == 0) {
194 kfree(proc_info
, PAGE_SIZE
);
195 kfree(interrupt_stack
, INTSTACK_SIZE
);
196 return (struct per_proc_info
*)NULL
;;
200 bzero((void *)proc_info
, sizeof(struct per_proc_info
));
202 proc_info
->next_savearea
= (uint64_t)save_get_init();
203 proc_info
->pf
= BootProcInfo
.pf
;
204 proc_info
->istackptr
= (vm_offset_t
)interrupt_stack
+ INTSTACK_SIZE
- FM_SIZE
;
205 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
206 #if MACH_KDP || MACH_KDB
207 proc_info
->debstackptr
= (vm_offset_t
)debugger_stack
+ KERNEL_STACK_SIZE
- FM_SIZE
;
208 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
209 #endif /* MACH_KDP || MACH_KDB */
216 * Routine: cpu_per_proc_free
221 struct per_proc_info
*proc_info
224 if (proc_info
->cpu_number
== master_cpu
)
226 kfree((void *)(proc_info
->intstack_top_ss
- INTSTACK_SIZE
+ FM_SIZE
), INTSTACK_SIZE
);
227 kfree((void *)(proc_info
->debstack_top_ss
- KERNEL_STACK_SIZE
+ FM_SIZE
), KERNEL_STACK_SIZE
);
228 kfree((void *)proc_info
, PAGE_SIZE
);
233 * Routine: cpu_per_proc_register
237 cpu_per_proc_register(
238 struct per_proc_info
*proc_info
243 mutex_lock(&ppt_lock
);
244 if (real_ncpus
>= max_ncpus
) {
245 mutex_unlock(&ppt_lock
);
249 proc_info
->cpu_number
= cpu
;
250 PerProcTable
[cpu
].ppe_vaddr
= proc_info
;
251 PerProcTable
[cpu
].ppe_paddr
= ((addr64_t
)pmap_find_phys(kernel_pmap
, (vm_offset_t
)proc_info
)) << PAGE_SHIFT
;
254 mutex_unlock(&ppt_lock
);
267 struct per_proc_info
*proc_info
;
271 proc_info
= PerProcTable
[cpu
].ppe_vaddr
;
273 if (cpu
== cpu_number()) {
274 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
276 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
280 proc_info
->cpu_flags
&= BootDone
;
281 proc_info
->interrupts_enabled
= 0;
282 proc_info
->pending_ast
= AST_NONE
;
283 proc_info
->istackptr
= proc_info
->intstack_top_ss
;
284 proc_info
->rtcPop
= 0xFFFFFFFFFFFFFFFFULL
;
285 mp
= (mapping_t
*)(&proc_info
->ppUMWmp
);
286 mp
->mpFlags
= 0x01000000 | mpLinkage
| mpPerm
| 1;
287 mp
->mpSpace
= invalSpace
;
289 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
291 simple_lock(&rht_lock
);
292 while (rht_state
& RHT_BUSY
) {
293 rht_state
|= RHT_WAIT
;
294 thread_sleep_usimple_lock((event_t
)&rht_state
,
295 &rht_lock
, THREAD_UNINT
);
297 rht_state
|= RHT_BUSY
;
298 simple_unlock(&rht_lock
);
300 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
301 RESET_HANDLER_START
);
302 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
303 (vm_offset_t
)_start_cpu
);
304 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
305 (vm_offset_t
)&PerProcTable
[cpu
]);
308 * Note: we pass the current time to the other processor here. He will load it
309 * as early as possible so that there is a chance that it is close to accurate.
310 * After the machine is up a while, we will officially resync the clocks so
311 * that all processors are the same. This is just to get close.
314 ml_get_timebase((unsigned long long *)&proc_info
->ruptStamp
);
316 __asm__
volatile("sync"); /* Commit to storage */
317 __asm__
volatile("isync"); /* Wait a second */
318 ret
= PE_cpu_start(proc_info
->cpu_id
,
319 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
321 if (ret
!= KERN_SUCCESS
) {
322 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
323 simple_lock(&rht_lock
);
324 if (rht_state
& RHT_WAIT
)
325 thread_wakeup(&rht_state
);
326 rht_state
&= ~(RHT_BUSY
|RHT_WAIT
);
327 simple_unlock(&rht_lock
);
330 simple_lock(&SignalReadyLock
);
331 if (!((*(volatile short *)&proc_info
->cpu_flags
) & SignalReady
)) {
332 hw_atomic_or(&proc_info
->ppXFlags
, SignalReadyWait
);
333 thread_sleep_simple_lock((event_t
)&proc_info
->cpu_flags
,
334 &SignalReadyLock
, THREAD_UNINT
);
336 simple_unlock(&SignalReadyLock
);
344 * Routine: cpu_exit_wait
351 struct per_proc_info
*tpproc
;
353 if ( cpu
!= master_cpu
) {
354 tpproc
= PerProcTable
[cpu
].ppe_vaddr
;
355 while (!((*(volatile short *)&tpproc
->cpu_flags
) & SleepState
)) {};
361 * Routine: cpu_doshutdown
369 processor_offline(current_processor());
381 struct per_proc_info
*proc_info
;
383 unsigned int wait_ncpus_sleep
, ncpus_sleep
;
384 facility_context
*fowner
;
386 proc_info
= getPerProc();
388 proc_info
->running
= FALSE
;
390 fowner
= proc_info
->FPU_owner
; /* Cache this */
391 if(fowner
) fpu_save(fowner
); /* If anyone owns FPU, save it */
392 proc_info
->FPU_owner
= 0; /* Set no fpu owner now */
394 fowner
= proc_info
->VMX_owner
; /* Cache this */
395 if(fowner
) vec_save(fowner
); /* If anyone owns vectors, save it */
396 proc_info
->VMX_owner
= 0; /* Set no vector owner now */
398 if (proc_info
->cpu_number
== master_cpu
) {
399 proc_info
->cpu_flags
&= BootDone
;
400 proc_info
->interrupts_enabled
= 0;
401 proc_info
->pending_ast
= AST_NONE
;
403 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
404 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
405 RESET_HANDLER_START
);
406 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
407 (vm_offset_t
)_start_cpu
);
408 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
409 (vm_offset_t
)&PerProcTable
[master_cpu
]);
411 __asm__
volatile("sync");
412 __asm__
volatile("isync");
415 wait_ncpus_sleep
= real_ncpus
-1;
417 while (wait_ncpus_sleep
!= ncpus_sleep
) {
419 for(i
=1; i
< real_ncpus
; i
++) {
420 if ((*(volatile short *)&(PerProcTable
[i
].ppe_vaddr
->cpu_flags
)) & SleepState
)
428 * Save the TBR before stopping.
431 proc_info
->save_tbu
= mftbu();
432 proc_info
->save_tbl
= mftb();
433 } while (mftbu() != proc_info
->save_tbu
);
435 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
440 * Routine: cpu_signal
442 * Here is where we send a message to another processor. So far we only have two:
443 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
444 * currently disabled). SIGPdebug is used to enter the debugger.
446 * We set up the SIGP function to indicate that this is a simple message and set the
447 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
448 * block for the target, we lock the message block. Then we set the parameter(s).
449 * Next we change the lock (also called "busy") to "passing" and finally signal
450 * the other processor. Note that we only wait about 1ms to get the message lock.
451 * If we time out, we return failure to our caller. It is their responsibility to
462 unsigned int holdStat
;
463 struct per_proc_info
*tpproc
, *mpproc
;
467 if(((unsigned int)target
) >= MAX_CPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
470 mpproc
= getPerProc(); /* Point to our block */
471 tpproc
= PerProcTable
[target
].ppe_vaddr
; /* Point to the target's block */
472 if(mpproc
== tpproc
) return KERN_FAILURE
; /* Cannot signal ourselves */
474 if(!tpproc
->running
) return KERN_FAILURE
;
476 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
478 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
480 if(signal
== SIGPwake
) { /* SIGPwake can merge into all others... */
481 mpproc
->hwCtr
.numSIGPmwake
++; /* Account for merged wakes */
485 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
486 mpproc
->hwCtr
.numSIGPmast
++; /* Account for merged ASTs */
487 return KERN_SUCCESS
; /* Don't bother to send this one... */
490 if (tpproc
->MPsigpParm0
== SIGPwake
) {
491 if (hw_lock_mbits(&tpproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
),
492 (MPsigpBusy
| MPsigpPass
), MPsigpBusy
, 0)) {
494 mpproc
->hwCtr
.numSIGPmwake
++;
499 if((busybitset
== 0) &&
500 (!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
501 (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 11)))) { /* Try to lock the message block with a .5ms timeout */
502 mpproc
->hwCtr
.numSIGPtimo
++; /* Account for timeouts */
503 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
506 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | mpproc
->cpu_number
; /* Set up the signal status word */
507 tpproc
->MPsigpParm0
= signal
; /* Set message order */
508 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
509 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
511 __asm__
volatile("sync"); /* Make sure it's all there */
513 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
514 __asm__
volatile("eieio"); /* I'm a paraniod freak */
517 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
519 return KERN_SUCCESS
; /* All is goodness and rainbows... */
524 * Routine: cpu_signal_handler
526 * Here is where we implement the receiver of the signaling protocol.
527 * We wait for the signal status area to be passed to us. Then we snarf
528 * up the status, the sender, and the 3 potential parms. Next we release
529 * the lock and signal the other guy.
536 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
537 unsigned int *parmAddr
;
538 struct per_proc_info
*proc_info
;
541 cpu
= cpu_number(); /* Get the CPU number */
543 proc_info
= getPerProc();
546 * Since we've been signaled, wait about 31 ms for the signal lock to pass
548 if(!hw_lock_mbits(&proc_info
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
), (MPsigpBusy
| MPsigpPass
),
549 (MPsigpBusy
| MPsigpPass
| MPsigpAck
), (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 5))) {
550 panic("cpu_signal_handler: Lock pass timed out\n");
553 holdStat
= proc_info
->MPsigpStat
; /* Snarf stat word */
554 holdParm0
= proc_info
->MPsigpParm0
; /* Snarf parameter */
555 holdParm1
= proc_info
->MPsigpParm1
; /* Snarf parameter */
556 holdParm2
= proc_info
->MPsigpParm2
; /* Snarf parameter */
558 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
560 proc_info
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpAck
| MPsigpFunc
); /* Release lock */
562 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
564 case MPsigpIdle
: /* Was function cancelled? */
567 case MPsigpSigp
: /* Signal Processor message? */
569 switch (holdParm0
) { /* Decode SIGP message order */
571 case SIGPast
: /* Should we do an AST? */
572 proc_info
->hwCtr
.numSIGPast
++; /* Count this one */
574 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
576 ast_check((processor_t
)proc_info
->processor
);
577 return; /* All done... */
579 case SIGPcpureq
: /* CPU specific function? */
581 proc_info
->hwCtr
.numSIGPcpureq
++; /* Count this one */
582 switch (holdParm1
) { /* Select specific function */
586 cpu_timebase_signal_handler(proc_info
, (struct SIGtimebase
*)holdParm2
);
593 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
595 struct savearea
*ssp
= current_thread()->machine
.pcb
;
597 (perfCpuSigHook
)(parmAddr
[1] /* request */, ssp
, 0, 0);
601 parmAddr
[0] = 0; /* Show we're done */
605 if(((scomcomm
*)holdParm2
)->scomfunc
) { /* Are we writing */
606 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_write(((scomcomm
*)holdParm2
)->scomreg
, ((scomcomm
*)holdParm2
)->scomdata
); /* Write scom */
608 else { /* No, reading... */
609 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_read(((scomcomm
*)holdParm2
)->scomreg
, &((scomcomm
*)holdParm2
)->scomdata
); /* Read scom */
615 ml_set_processor_speed_slave(holdParm2
);
619 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
624 case SIGPdebug
: /* Enter the debugger? */
626 proc_info
->hwCtr
.numSIGPdebug
++; /* Count this one */
627 proc_info
->debugger_is_slave
++; /* Bump up the count to show we're here */
628 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
629 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
630 return; /* All done now... */
632 case SIGPwake
: /* Wake up CPU */
633 proc_info
->hwCtr
.numSIGPwake
++; /* Count this one */
634 return; /* No need to do anything, the interrupt does it all... */
636 case SIGPcall
: /* Call function on CPU */
637 proc_info
->hwCtr
.numSIGPcall
++; /* Count this one */
638 xfunc
= holdParm1
; /* Do this since I can't seem to figure C out */
639 xfunc(holdParm2
); /* Call the passed function */
640 return; /* Done... */
643 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
649 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
653 panic("cpu_signal_handler: we should never get here\n");
658 * Routine: cpu_sync_timebase
667 struct SIGtimebase syncClkSpot
;
669 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
671 syncClkSpot
.avail
= FALSE
;
672 syncClkSpot
.ready
= FALSE
;
673 syncClkSpot
.done
= FALSE
;
675 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
676 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
679 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
)
685 * We do the following to keep the compiler from generating extra stuff
688 tbu
= syncClkSpot
.abstime
>> 32;
689 tbl
= (uint32_t)syncClkSpot
.abstime
;
695 syncClkSpot
.ready
= TRUE
;
697 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
)
700 (void)ml_set_interrupts_enabled(intr
);
705 * Routine: cpu_timebase_signal_handler
709 cpu_timebase_signal_handler(
710 struct per_proc_info
*proc_info
,
711 struct SIGtimebase
*timebaseAddr
)
713 unsigned int tbu
, tbu2
, tbl
;
715 if(proc_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
716 proc_info
->time_base_enable(proc_info
->cpu_id
, FALSE
);
718 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
722 asm volatile(" mftbu %0" : "=r" (tbu
));
723 asm volatile(" mftb %0" : "=r" (tbl
));
724 asm volatile(" mftbu %0" : "=r" (tbu2
));
725 } while (tbu
!= tbu2
);
727 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
728 sync(); /* Force order */
730 timebaseAddr
->avail
= TRUE
;
732 while (*(volatile int *)&(timebaseAddr
->ready
) == FALSE
);
734 if(proc_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
735 proc_info
->time_base_enable(proc_info
->cpu_id
, TRUE
);
737 timebaseAddr
->done
= TRUE
;
742 * Routine: cpu_control
748 processor_info_t info
,
751 struct per_proc_info
*proc_info
;
752 cpu_type_t tcpu_type
;
753 cpu_subtype_t tcpu_subtype
;
754 processor_pm_regs_t perf_regs
;
755 processor_control_cmd_t cmd
;
757 #define MMCR0_SUPPORT_MASK 0xf83f1fff
758 #define MMCR1_SUPPORT_MASK 0xffc00000
759 #define MMCR2_SUPPORT_MASK 0x80000000
761 proc_info
= PerProcTable
[slot_num
].ppe_vaddr
;
762 tcpu_type
= proc_info
->cpu_type
;
763 tcpu_subtype
= proc_info
->cpu_subtype
;
764 cmd
= (processor_control_cmd_t
) info
;
766 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
767 return(KERN_FAILURE
);
769 if ( tcpu_type
!= cmd
->cmd_cpu_type
||
770 tcpu_subtype
!= cmd
->cmd_cpu_subtype
)
771 return(KERN_FAILURE
);
773 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS
) {
774 return(KERN_RESOURCE_SHORTAGE
); /* cpu performance facility in use by another task */
779 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
780 switch (tcpu_subtype
)
782 case CPU_SUBTYPE_POWERPC_750
:
783 case CPU_SUBTYPE_POWERPC_7400
:
784 case CPU_SUBTYPE_POWERPC_7450
:
786 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
791 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
792 return(KERN_SUCCESS
);
795 return(KERN_FAILURE
);
797 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
798 switch (tcpu_subtype
)
800 case CPU_SUBTYPE_POWERPC_750
:
801 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
802 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
803 return(KERN_FAILURE
);
806 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
807 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
808 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
809 mtpmc1(PERFMON_PMC1(perf_regs
));
810 mtpmc2(PERFMON_PMC2(perf_regs
));
811 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
812 mtpmc3(PERFMON_PMC3(perf_regs
));
813 mtpmc4(PERFMON_PMC4(perf_regs
));
814 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
815 return(KERN_SUCCESS
);
817 case CPU_SUBTYPE_POWERPC_7400
:
818 case CPU_SUBTYPE_POWERPC_7450
:
819 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
820 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
821 return(KERN_FAILURE
);
824 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
825 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
826 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
827 mtpmc1(PERFMON_PMC1(perf_regs
));
828 mtpmc2(PERFMON_PMC2(perf_regs
));
829 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
830 mtpmc3(PERFMON_PMC3(perf_regs
));
831 mtpmc4(PERFMON_PMC4(perf_regs
));
832 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
833 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
834 return(KERN_SUCCESS
);
837 return(KERN_FAILURE
);
838 } /* switch tcpu_subtype */
839 case PROCESSOR_PM_SET_MMCR
:
840 switch (tcpu_subtype
)
842 case CPU_SUBTYPE_POWERPC_750
:
843 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
844 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
845 return(KERN_FAILURE
);
848 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
849 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
850 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
851 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
852 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
853 return(KERN_SUCCESS
);
855 case CPU_SUBTYPE_POWERPC_7400
:
856 case CPU_SUBTYPE_POWERPC_7450
:
857 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
858 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
859 return(KERN_FAILURE
);
862 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
863 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
864 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
865 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
866 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
867 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
868 return(KERN_SUCCESS
);
871 return(KERN_FAILURE
);
874 return(KERN_FAILURE
);
875 } /* switch cmd_op */
880 * Routine: cpu_info_count
885 processor_flavor_t flavor
,
888 cpu_subtype_t tcpu_subtype
;
891 * For now, we just assume that all CPUs are of the same type
893 tcpu_subtype
= PerProcTable
[master_cpu
].ppe_vaddr
->cpu_subtype
;
895 case PROCESSOR_PM_REGS_INFO
:
896 switch (tcpu_subtype
) {
897 case CPU_SUBTYPE_POWERPC_750
:
899 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
900 return(KERN_SUCCESS
);
902 case CPU_SUBTYPE_POWERPC_7400
:
903 case CPU_SUBTYPE_POWERPC_7450
:
905 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
906 return(KERN_SUCCESS
);
910 return(KERN_INVALID_ARGUMENT
);
911 } /* switch tcpu_subtype */
913 case PROCESSOR_TEMPERATURE
:
914 *count
= PROCESSOR_TEMPERATURE_COUNT
;
915 return (KERN_SUCCESS
);
919 return(KERN_INVALID_ARGUMENT
);
931 processor_flavor_t flavor
,
933 processor_info_t info
,
936 cpu_subtype_t tcpu_subtype
;
937 processor_pm_regs_t perf_regs
;
940 tcpu_subtype
= PerProcTable
[slot_num
].ppe_vaddr
->cpu_subtype
;
943 case PROCESSOR_PM_REGS_INFO
:
945 perf_regs
= (processor_pm_regs_t
) info
;
947 switch (tcpu_subtype
) {
948 case CPU_SUBTYPE_POWERPC_750
:
950 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
951 return(KERN_FAILURE
);
953 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
954 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
955 PERFMON_PMC1(perf_regs
) = mfpmc1();
956 PERFMON_PMC2(perf_regs
) = mfpmc2();
957 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
958 PERFMON_PMC3(perf_regs
) = mfpmc3();
959 PERFMON_PMC4(perf_regs
) = mfpmc4();
960 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
962 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
963 return(KERN_SUCCESS
);
965 case CPU_SUBTYPE_POWERPC_7400
:
966 case CPU_SUBTYPE_POWERPC_7450
:
968 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
969 return(KERN_FAILURE
);
971 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
972 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
973 PERFMON_PMC1(perf_regs
) = mfpmc1();
974 PERFMON_PMC2(perf_regs
) = mfpmc2();
975 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
976 PERFMON_PMC3(perf_regs
) = mfpmc3();
977 PERFMON_PMC4(perf_regs
) = mfpmc4();
978 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
979 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
981 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
982 return(KERN_SUCCESS
);
985 return(KERN_FAILURE
);
986 } /* switch tcpu_subtype */
988 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
990 *info
= -1; /* Get the temperature */
991 return(KERN_FAILURE
);
994 return(KERN_INVALID_ARGUMENT
);
1001 * Routine: cpu_to_processor
1008 return ((processor_t
)PerProcTable
[cpu
].ppe_vaddr
->processor
);
1013 * Routine: slot_type
1020 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_type
);
1025 * Routine: slot_subtype
1032 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_subtype
);
1037 * Routine: slot_threadtype
1044 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_threadtype
);
1055 return (getPerProc()->cpu_type
);
1060 * Routine: cpu_subtype
1066 return (getPerProc()->cpu_subtype
);
1071 * Routine: cpu_threadtype
1075 cpu_threadtype(void)
1077 return (getPerProc()->cpu_threadtype
);
1081 * Call a function on all running processors
1083 * Note that the synch paramter is used to wait until all functions are complete.
1084 * It is not passed to the other processor and must be known by the called function.
1085 * The called function must do a thread_wakeup on the synch if it decrements the
1090 int32_t cpu_broadcast(uint32_t *synch
, broadcastFunc func
, uint32_t parm
) {
1092 int sigproc
, cpu
, ocpu
;
1094 cpu
= cpu_number(); /* Who are we? */
1095 sigproc
= 0; /* Clear called processor count */
1097 if(real_ncpus
> 1) { /* Are we just a uni? */
1099 assert_wait((event_t
)synch
, THREAD_UNINT
); /* If more than one processor, we may have to wait */
1101 for(ocpu
= 0; ocpu
< real_ncpus
; ocpu
++) { /* Tell everyone to call */
1102 if(ocpu
== cpu
) continue; /* If we talk to ourselves, people will wonder... */
1103 hw_atomic_add(synch
, 1); /* Tentatively bump synchronizer */
1104 sigproc
++; /* Tentatively bump signal sent count */
1105 if(KERN_SUCCESS
!= cpu_signal(ocpu
, SIGPcall
, (uint32_t)func
, parm
)) { /* Call the function on the other processor */
1106 hw_atomic_sub(synch
, 1); /* Other guy isn't really there, ignore it */
1107 sigproc
--; /* and don't count it */
1111 if(!sigproc
) clear_wait(current_thread(), THREAD_AWAKENED
); /* Clear wait if we never signalled */
1112 else thread_block(THREAD_CONTINUE_NULL
); /* Wait for everyone to get into step... */
1115 return sigproc
; /* Return the number of guys actually signalled */