2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/processor_info.h>
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/machine.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/sched_prim.h>
39 #include <kern/timer_queue.h>
40 #include <kern/processor.h>
44 #include <IOKit/IOHibernatePrivate.h>
46 #include <ppc/proc_reg.h>
47 #include <ppc/misc_protos.h>
48 #include <ppc/fpu_protos.h>
49 #include <ppc/machine_routines.h>
50 #include <ppc/cpu_internal.h>
51 #include <ppc/exception.h>
53 #include <ppc/hw_perfmon.h>
54 #include <pexpert/pexpert.h>
55 #include <kern/cpu_data.h>
56 #include <ppc/mappings.h>
57 #include <ppc/Diagnostics.h>
59 #include <ppc/machine_cpu.h>
60 #include <ppc/rtclock.h>
62 #include <libkern/OSAtomic.h>
64 unsigned int real_ncpus
= 1;
65 unsigned int max_ncpus
= MAX_CPUS
;
67 decl_simple_lock_data(static,rht_lock
);
69 static unsigned int rht_state
= 0;
73 decl_simple_lock_data(static,SignalReadyLock
);
76 volatile boolean_t avail
;
77 volatile boolean_t ready
;
78 volatile boolean_t done
;
82 perfCallback perfCpuSigHook
; /* Pointer to CHUD cpu signal hook routine */
84 extern uint32_t debugger_sync
;
90 void cpu_sync_timebase(
93 void cpu_timebase_signal_handler(
94 struct per_proc_info
*proc_info
,
95 struct SIGtimebase
*timebaseAddr
);
98 * Routine: cpu_bootstrap
105 simple_lock_init(&rht_lock
,0);
106 simple_lock_init(&SignalReadyLock
,0);
118 struct per_proc_info
*proc_info
;
120 proc_info
= getPerProc();
125 if (proc_info
->save_tbu
!= 0 || proc_info
->save_tbl
!= 0) {
127 mttbu(proc_info
->save_tbu
);
128 mttb(proc_info
->save_tbl
);
131 proc_info
->rtcPop
= EndOfAllTime
; /* forget any existing decrementer setting */
132 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
134 proc_info
->cpu_type
= CPU_TYPE_POWERPC
;
135 proc_info
->cpu_subtype
= (cpu_subtype_t
)proc_info
->pf
.rptdProc
;
136 proc_info
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
137 proc_info
->running
= TRUE
;
142 * Routine: cpu_machine_init
149 struct per_proc_info
*proc_info
;
150 volatile struct per_proc_info
*mproc_info
;
153 proc_info
= getPerProc();
154 mproc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
156 if (proc_info
!= mproc_info
) {
157 simple_lock(&rht_lock
);
158 if (rht_state
& RHT_WAIT
)
159 thread_wakeup(&rht_state
);
160 rht_state
&= ~(RHT_BUSY
|RHT_WAIT
);
161 simple_unlock(&rht_lock
);
164 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
166 if (proc_info
->hibernate
) {
172 } while (mftbu() != tbu
);
174 proc_info
->hibernate
= 0;
175 hibernate_machine_init();
177 // hibernate_machine_init() could take minutes and we don't want timeouts
178 // to fire as soon as scheduling starts. Reset timebase so it appears
179 // no time has elapsed, as it would for regular sleep.
185 if (proc_info
!= mproc_info
) {
186 while (!((mproc_info
->cpu_flags
) & SignalReady
))
192 if (proc_info
!= mproc_info
)
193 simple_lock(&SignalReadyLock
);
194 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
195 if (proc_info
!= mproc_info
) {
196 if (proc_info
->ppXFlags
& SignalReadyWait
) {
197 hw_atomic_and_noret(&proc_info
->ppXFlags
, ~SignalReadyWait
);
198 thread_wakeup(&proc_info
->cpu_flags
);
200 simple_unlock(&SignalReadyLock
);
201 pmsPark(); /* Timers should be cool now, park the power management stepper */
207 * Routine: cpu_per_proc_alloc
210 struct per_proc_info
*
214 struct per_proc_info
*proc_info
= NULL
;
215 void *interrupt_stack
= NULL
;
216 void *debugger_stack
= NULL
;
218 if ((proc_info
= (struct per_proc_info
*)kalloc(sizeof(struct per_proc_info
))) == (struct per_proc_info
*)0)
219 return (struct per_proc_info
*)NULL
;
220 if ((interrupt_stack
= kalloc(INTSTACK_SIZE
)) == 0) {
221 kfree(proc_info
, sizeof(struct per_proc_info
));
222 return (struct per_proc_info
*)NULL
;
225 if ((debugger_stack
= kalloc(kernel_stack_size
)) == 0) {
226 kfree(proc_info
, sizeof(struct per_proc_info
));
227 kfree(interrupt_stack
, INTSTACK_SIZE
);
228 return (struct per_proc_info
*)NULL
;
231 bzero((void *)proc_info
, sizeof(struct per_proc_info
));
233 /* Set physical address of the second page */
234 proc_info
->pp2ndPage
= (addr64_t
)pmap_find_phys(kernel_pmap
,
235 ((addr64_t
)(unsigned int)proc_info
) + 0x1000)
237 proc_info
->next_savearea
= (uint64_t)save_get_init();
238 proc_info
->pf
= BootProcInfo
.pf
;
239 proc_info
->istackptr
= (vm_offset_t
)interrupt_stack
+ INTSTACK_SIZE
- FM_SIZE
;
240 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
241 proc_info
->debstackptr
= (vm_offset_t
)debugger_stack
+ kernel_stack_size
- FM_SIZE
;
242 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
244 queue_init(&proc_info
->rtclock_timer
.queue
);
245 proc_info
->rtclock_timer
.deadline
= EndOfAllTime
;
253 * Routine: cpu_per_proc_free
258 struct per_proc_info
*proc_info
261 if (proc_info
->cpu_number
== master_cpu
)
263 kfree((void *)(proc_info
->intstack_top_ss
- INTSTACK_SIZE
+ FM_SIZE
), INTSTACK_SIZE
);
264 kfree((void *)(proc_info
->debstack_top_ss
- kernel_stack_size
+ FM_SIZE
), kernel_stack_size
);
265 kfree((void *)proc_info
, sizeof(struct per_proc_info
)); /* Release the per_proc */
270 * Routine: cpu_per_proc_register
274 cpu_per_proc_register(
275 struct per_proc_info
*proc_info
280 cpu
= OSIncrementAtomic(&real_ncpus
);
282 if (real_ncpus
> max_ncpus
) {
286 proc_info
->cpu_number
= cpu
;
287 PerProcTable
[cpu
].ppe_vaddr
= proc_info
;
288 PerProcTable
[cpu
].ppe_paddr
= (addr64_t
)pmap_find_phys(kernel_pmap
, (addr64_t
)(unsigned int)proc_info
) << PAGE_SHIFT
;
302 struct per_proc_info
*proc_info
;
306 proc_info
= PerProcTable
[cpu
].ppe_vaddr
;
308 if (cpu
== cpu_number()) {
309 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
311 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
315 proc_info
->cpu_flags
&= BootDone
;
316 proc_info
->interrupts_enabled
= 0;
317 proc_info
->pending_ast
= AST_NONE
;
318 proc_info
->istackptr
= proc_info
->intstack_top_ss
;
319 proc_info
->rtcPop
= EndOfAllTime
;
320 proc_info
->FPU_owner
= NULL
;
321 proc_info
->VMX_owner
= NULL
;
322 proc_info
->pms
.pmsStamp
= 0; /* Dummy transition time */
323 proc_info
->pms
.pmsPop
= EndOfAllTime
; /* Set the pop way into the future */
324 proc_info
->pms
.pmsState
= pmsParked
; /* Park the stepper */
325 proc_info
->pms
.pmsCSetCmd
= pmsCInit
; /* Set dummy initial hardware state */
326 mp
= (mapping_t
*)(&proc_info
->ppUMWmp
);
327 mp
->mpFlags
= 0x01000000 | mpLinkage
| mpPerm
| 1;
328 mp
->mpSpace
= invalSpace
;
330 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
332 simple_lock(&rht_lock
);
333 while (rht_state
& RHT_BUSY
) {
334 rht_state
|= RHT_WAIT
;
335 thread_sleep_usimple_lock((event_t
)&rht_state
,
336 &rht_lock
, THREAD_UNINT
);
338 rht_state
|= RHT_BUSY
;
339 simple_unlock(&rht_lock
);
341 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
342 RESET_HANDLER_START
);
343 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
344 (vm_offset_t
)_start_cpu
);
345 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
346 (vm_offset_t
)&PerProcTable
[cpu
]);
349 * Note: we pass the current time to the other processor here. He will load it
350 * as early as possible so that there is a chance that it is close to accurate.
351 * After the machine is up a while, we will officially resync the clocks so
352 * that all processors are the same. This is just to get close.
355 ml_get_timebase((unsigned long long *)&proc_info
->ruptStamp
);
357 __asm__
volatile("sync"); /* Commit to storage */
358 __asm__
volatile("isync"); /* Wait a second */
359 ret
= PE_cpu_start(proc_info
->cpu_id
,
360 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
362 if (ret
!= KERN_SUCCESS
) {
363 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
364 simple_lock(&rht_lock
);
365 if (rht_state
& RHT_WAIT
)
366 thread_wakeup(&rht_state
);
367 rht_state
&= ~(RHT_BUSY
|RHT_WAIT
);
368 simple_unlock(&rht_lock
);
371 simple_lock(&SignalReadyLock
);
372 if (!((*(volatile short *)&proc_info
->cpu_flags
) & SignalReady
)) {
373 hw_atomic_or_noret(&proc_info
->ppXFlags
, SignalReadyWait
);
374 thread_sleep_simple_lock((event_t
)&proc_info
->cpu_flags
,
375 &SignalReadyLock
, THREAD_UNINT
);
377 simple_unlock(&SignalReadyLock
);
385 * Routine: cpu_exit_wait
392 struct per_proc_info
*tpproc
;
394 if ( cpu
!= master_cpu
) {
395 tpproc
= PerProcTable
[cpu
].ppe_vaddr
;
396 while (!((*(volatile short *)&tpproc
->cpu_flags
) & SleepState
)) {};
402 * Routine: cpu_doshutdown
410 processor_offline(current_processor());
422 struct per_proc_info
*proc_info
;
424 unsigned int wait_ncpus_sleep
, ncpus_sleep
;
425 facility_context
*fowner
;
427 proc_info
= getPerProc();
429 proc_info
->running
= FALSE
;
431 timer_queue_shutdown(&proc_info
->rtclock_timer
.queue
);
432 proc_info
->rtclock_timer
.deadline
= EndOfAllTime
;
434 fowner
= proc_info
->FPU_owner
; /* Cache this */
435 if(fowner
) /* If anyone owns FPU, save it */
437 proc_info
->FPU_owner
= NULL
; /* Set no fpu owner now */
439 fowner
= proc_info
->VMX_owner
; /* Cache this */
440 if(fowner
) vec_save(fowner
); /* If anyone owns vectors, save it */
441 proc_info
->VMX_owner
= NULL
; /* Set no vector owner now */
443 if (proc_info
->cpu_number
== master_cpu
) {
444 proc_info
->cpu_flags
&= BootDone
;
445 proc_info
->interrupts_enabled
= 0;
446 proc_info
->pending_ast
= AST_NONE
;
448 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
449 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
450 RESET_HANDLER_START
);
451 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
452 (vm_offset_t
)_start_cpu
);
453 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
454 (vm_offset_t
)&PerProcTable
[master_cpu
]);
456 __asm__
volatile("sync");
457 __asm__
volatile("isync");
460 wait_ncpus_sleep
= real_ncpus
-1;
462 while (wait_ncpus_sleep
!= ncpus_sleep
) {
464 for(i
=1; i
< real_ncpus
; i
++) {
465 if ((*(volatile short *)&(PerProcTable
[i
].ppe_vaddr
->cpu_flags
)) & SleepState
)
473 * Save the TBR before stopping.
476 proc_info
->save_tbu
= mftbu();
477 proc_info
->save_tbl
= mftb();
478 } while (mftbu() != proc_info
->save_tbu
);
480 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
485 * Routine: cpu_signal
487 * Here is where we send a message to another processor. So far we only have two:
488 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
489 * currently disabled). SIGPdebug is used to enter the debugger.
491 * We set up the SIGP function to indicate that this is a simple message and set the
492 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
493 * block for the target, we lock the message block. Then we set the parameter(s).
494 * Next we change the lock (also called "busy") to "passing" and finally signal
495 * the other processor. Note that we only wait about 1ms to get the message lock.
496 * If we time out, we return failure to our caller. It is their responsibility to
507 unsigned int holdStat
;
508 struct per_proc_info
*tpproc
, *mpproc
;
512 if(((unsigned int)target
) >= MAX_CPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
515 mpproc
= getPerProc(); /* Point to our block */
516 tpproc
= PerProcTable
[target
].ppe_vaddr
; /* Point to the target's block */
517 if(mpproc
== tpproc
) return KERN_FAILURE
; /* Cannot signal ourselves */
519 if(!tpproc
->running
) return KERN_FAILURE
;
521 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
523 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
525 if(signal
== SIGPwake
) { /* SIGPwake can merge into all others... */
526 mpproc
->hwCtr
.numSIGPmwake
++; /* Account for merged wakes */
530 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
531 mpproc
->hwCtr
.numSIGPmast
++; /* Account for merged ASTs */
532 return KERN_SUCCESS
; /* Don't bother to send this one... */
535 if (tpproc
->MPsigpParm0
== SIGPwake
) {
536 if (hw_lock_mbits(&tpproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
),
537 (MPsigpBusy
| MPsigpPass
), MPsigpBusy
, 0)) {
539 mpproc
->hwCtr
.numSIGPmwake
++;
544 if((busybitset
== 0) &&
545 (!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
546 (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 11)))) { /* Try to lock the message block with a .5ms timeout */
547 mpproc
->hwCtr
.numSIGPtimo
++; /* Account for timeouts */
548 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
551 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | mpproc
->cpu_number
; /* Set up the signal status word */
552 tpproc
->MPsigpParm0
= signal
; /* Set message order */
553 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
554 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
556 __asm__
volatile("sync"); /* Make sure it's all there */
558 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
559 __asm__
volatile("eieio"); /* I'm a paraniod freak */
562 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
564 return KERN_SUCCESS
; /* All is goodness and rainbows... */
569 * Routine: cpu_signal_handler
571 * Here is where we implement the receiver of the signaling protocol.
572 * We wait for the signal status area to be passed to us. Then we snarf
573 * up the status, the sender, and the 3 potential parms. Next we release
574 * the lock and signal the other guy.
577 cpu_signal_handler(void)
579 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
;
580 unsigned int *parmAddr
;
581 struct per_proc_info
*proc_info
;
584 cpu
= cpu_number(); /* Get the CPU number */
586 proc_info
= getPerProc();
589 * Since we've been signaled, wait about 31 ms for the signal lock to pass
591 if(!hw_lock_mbits(&proc_info
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
), (MPsigpBusy
| MPsigpPass
),
592 (MPsigpBusy
| MPsigpPass
| MPsigpAck
), (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 5))) {
593 panic("cpu_signal_handler: Lock pass timed out\n");
596 holdStat
= proc_info
->MPsigpStat
; /* Snarf stat word */
597 holdParm0
= proc_info
->MPsigpParm0
; /* Snarf parameter */
598 holdParm1
= proc_info
->MPsigpParm1
; /* Snarf parameter */
599 holdParm2
= proc_info
->MPsigpParm2
; /* Snarf parameter */
601 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
603 proc_info
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpAck
| MPsigpFunc
); /* Release lock */
605 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
607 case MPsigpIdle
: /* Was function cancelled? */
610 case MPsigpSigp
: /* Signal Processor message? */
612 switch (holdParm0
) { /* Decode SIGP message order */
614 case SIGPast
: /* Should we do an AST? */
615 proc_info
->hwCtr
.numSIGPast
++; /* Count this one */
617 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
619 ast_check((processor_t
)proc_info
->processor
);
620 return; /* All done... */
622 case SIGPcpureq
: /* CPU specific function? */
624 proc_info
->hwCtr
.numSIGPcpureq
++; /* Count this one */
625 switch (holdParm1
) { /* Select specific function */
629 cpu_timebase_signal_handler(proc_info
, (struct SIGtimebase
*)holdParm2
);
636 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
638 struct savearea
*ssp
= current_thread()->machine
.pcb
;
640 (perfCpuSigHook
)(parmAddr
[1] /* request */, ssp
, 0, 0);
644 parmAddr
[0] = 0; /* Show we're done */
648 if(((scomcomm
*)holdParm2
)->scomfunc
) { /* Are we writing */
649 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_write(((scomcomm
*)holdParm2
)->scomreg
, ((scomcomm
*)holdParm2
)->scomdata
); /* Write scom */
651 else { /* No, reading... */
652 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_read(((scomcomm
*)holdParm2
)->scomreg
, &((scomcomm
*)holdParm2
)->scomdata
); /* Read scom */
658 ml_set_processor_speed_slave(holdParm2
);
662 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
667 case SIGPdebug
: /* Enter the debugger? */
669 proc_info
->hwCtr
.numSIGPdebug
++; /* Count this one */
670 proc_info
->debugger_is_slave
++; /* Bump up the count to show we're here */
671 (void)hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
672 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
673 return; /* All done now... */
675 case SIGPwake
: /* Wake up CPU */
676 proc_info
->hwCtr
.numSIGPwake
++; /* Count this one */
677 return; /* No need to do anything, the interrupt does it all... */
679 case SIGPcall
: /* Call function on CPU */
680 proc_info
->hwCtr
.numSIGPcall
++; /* Count this one */
681 xfunc
= (broadcastFunc
)holdParm1
; /* Do this since I can't seem to figure C out */
682 xfunc(holdParm2
); /* Call the passed function */
683 return; /* Done... */
686 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
692 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
696 panic("cpu_signal_handler: we should never get here\n");
701 * Routine: cpu_sync_timebase
710 struct SIGtimebase syncClkSpot
;
712 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
714 syncClkSpot
.avail
= FALSE
;
715 syncClkSpot
.ready
= FALSE
;
716 syncClkSpot
.done
= FALSE
;
718 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
719 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
722 while (syncClkSpot
.avail
== FALSE
)
728 * We do the following to keep the compiler from generating extra stuff
731 tbu
= syncClkSpot
.abstime
>> 32;
732 tbl
= (uint32_t)syncClkSpot
.abstime
;
738 syncClkSpot
.ready
= TRUE
;
740 while (syncClkSpot
.done
== FALSE
)
743 etimer_resync_deadlines(); /* Start the timer */
744 (void)ml_set_interrupts_enabled(intr
);
749 * Routine: cpu_timebase_signal_handler
753 cpu_timebase_signal_handler(
754 struct per_proc_info
*proc_info
,
755 struct SIGtimebase
*timebaseAddr
)
757 unsigned int tbu
, tbu2
, tbl
;
759 if(proc_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
760 proc_info
->time_base_enable(proc_info
->cpu_id
, FALSE
);
762 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
766 asm volatile(" mftbu %0" : "=r" (tbu
));
767 asm volatile(" mftb %0" : "=r" (tbl
));
768 asm volatile(" mftbu %0" : "=r" (tbu2
));
769 } while (tbu
!= tbu2
);
771 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
772 sync(); /* Force order */
774 timebaseAddr
->avail
= TRUE
;
776 while (timebaseAddr
->ready
== FALSE
)
779 if(proc_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
780 proc_info
->time_base_enable(proc_info
->cpu_id
, TRUE
);
782 timebaseAddr
->done
= TRUE
;
787 * Routine: cpu_control
793 processor_info_t info
,
796 struct per_proc_info
*proc_info
;
797 cpu_type_t tcpu_type
;
798 cpu_subtype_t tcpu_subtype
;
799 processor_pm_regs_t perf_regs
;
800 processor_control_cmd_t cmd
;
802 #define MMCR0_SUPPORT_MASK 0xf83f1fff
803 #define MMCR1_SUPPORT_MASK 0xffc00000
804 #define MMCR2_SUPPORT_MASK 0x80000000
806 proc_info
= PerProcTable
[slot_num
].ppe_vaddr
;
807 tcpu_type
= proc_info
->cpu_type
;
808 tcpu_subtype
= proc_info
->cpu_subtype
;
809 cmd
= (processor_control_cmd_t
) info
;
811 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
812 return(KERN_FAILURE
);
814 if ( tcpu_type
!= cmd
->cmd_cpu_type
||
815 tcpu_subtype
!= cmd
->cmd_cpu_subtype
)
816 return(KERN_FAILURE
);
818 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS
) {
819 return(KERN_RESOURCE_SHORTAGE
); /* cpu performance facility in use by another task */
824 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
825 switch (tcpu_subtype
)
827 case CPU_SUBTYPE_POWERPC_750
:
828 case CPU_SUBTYPE_POWERPC_7400
:
829 case CPU_SUBTYPE_POWERPC_7450
:
831 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
836 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
837 return(KERN_SUCCESS
);
840 return(KERN_FAILURE
);
842 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
843 switch (tcpu_subtype
)
845 case CPU_SUBTYPE_POWERPC_750
:
846 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
847 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
848 return(KERN_FAILURE
);
851 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
852 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
853 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
854 mtpmc1(PERFMON_PMC1(perf_regs
));
855 mtpmc2(PERFMON_PMC2(perf_regs
));
856 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
857 mtpmc3(PERFMON_PMC3(perf_regs
));
858 mtpmc4(PERFMON_PMC4(perf_regs
));
859 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
860 return(KERN_SUCCESS
);
862 case CPU_SUBTYPE_POWERPC_7400
:
863 case CPU_SUBTYPE_POWERPC_7450
:
864 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
865 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
866 return(KERN_FAILURE
);
869 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
870 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
871 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
872 mtpmc1(PERFMON_PMC1(perf_regs
));
873 mtpmc2(PERFMON_PMC2(perf_regs
));
874 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
875 mtpmc3(PERFMON_PMC3(perf_regs
));
876 mtpmc4(PERFMON_PMC4(perf_regs
));
877 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
878 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
879 return(KERN_SUCCESS
);
882 return(KERN_FAILURE
);
883 } /* switch tcpu_subtype */
884 case PROCESSOR_PM_SET_MMCR
:
885 switch (tcpu_subtype
)
887 case CPU_SUBTYPE_POWERPC_750
:
888 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
889 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
890 return(KERN_FAILURE
);
893 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
894 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
895 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
896 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
897 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
898 return(KERN_SUCCESS
);
900 case CPU_SUBTYPE_POWERPC_7400
:
901 case CPU_SUBTYPE_POWERPC_7450
:
902 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
903 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
904 return(KERN_FAILURE
);
907 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
908 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
909 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
910 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
911 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
912 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
913 return(KERN_SUCCESS
);
916 return(KERN_FAILURE
);
919 return(KERN_FAILURE
);
920 } /* switch cmd_op */
925 * Routine: cpu_info_count
930 processor_flavor_t flavor
,
933 cpu_subtype_t tcpu_subtype
;
936 * For now, we just assume that all CPUs are of the same type
938 tcpu_subtype
= PerProcTable
[master_cpu
].ppe_vaddr
->cpu_subtype
;
940 case PROCESSOR_PM_REGS_INFO
:
941 switch (tcpu_subtype
) {
942 case CPU_SUBTYPE_POWERPC_750
:
944 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
945 return(KERN_SUCCESS
);
947 case CPU_SUBTYPE_POWERPC_7400
:
948 case CPU_SUBTYPE_POWERPC_7450
:
950 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
951 return(KERN_SUCCESS
);
955 return(KERN_INVALID_ARGUMENT
);
956 } /* switch tcpu_subtype */
958 case PROCESSOR_TEMPERATURE
:
959 *count
= PROCESSOR_TEMPERATURE_COUNT
;
960 return (KERN_SUCCESS
);
964 return(KERN_INVALID_ARGUMENT
);
976 processor_flavor_t flavor
,
978 processor_info_t info
,
981 cpu_subtype_t tcpu_subtype
;
982 processor_pm_regs_t perf_regs
;
985 tcpu_subtype
= PerProcTable
[slot_num
].ppe_vaddr
->cpu_subtype
;
988 case PROCESSOR_PM_REGS_INFO
:
990 perf_regs
= (processor_pm_regs_t
) info
;
992 switch (tcpu_subtype
) {
993 case CPU_SUBTYPE_POWERPC_750
:
995 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
996 return(KERN_FAILURE
);
998 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
999 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
1000 PERFMON_PMC1(perf_regs
) = mfpmc1();
1001 PERFMON_PMC2(perf_regs
) = mfpmc2();
1002 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
1003 PERFMON_PMC3(perf_regs
) = mfpmc3();
1004 PERFMON_PMC4(perf_regs
) = mfpmc4();
1005 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
1007 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
1008 return(KERN_SUCCESS
);
1010 case CPU_SUBTYPE_POWERPC_7400
:
1011 case CPU_SUBTYPE_POWERPC_7450
:
1013 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
1014 return(KERN_FAILURE
);
1016 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
1017 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
1018 PERFMON_PMC1(perf_regs
) = mfpmc1();
1019 PERFMON_PMC2(perf_regs
) = mfpmc2();
1020 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
1021 PERFMON_PMC3(perf_regs
) = mfpmc3();
1022 PERFMON_PMC4(perf_regs
) = mfpmc4();
1023 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
1024 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
1026 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
1027 return(KERN_SUCCESS
);
1030 return(KERN_FAILURE
);
1031 } /* switch tcpu_subtype */
1033 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
1035 *info
= -1; /* Get the temperature */
1036 return(KERN_FAILURE
);
1039 return(KERN_INVALID_ARGUMENT
);
1046 * Routine: cpu_to_processor
1053 return ((processor_t
)PerProcTable
[cpu
].ppe_vaddr
->processor
);
1058 * Routine: slot_type
1065 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_type
);
1070 * Routine: slot_subtype
1077 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_subtype
);
1082 * Routine: slot_threadtype
1089 return (PerProcTable
[slot_num
].ppe_vaddr
->cpu_threadtype
);
1100 return (getPerProc()->cpu_type
);
1105 * Routine: cpu_subtype
1111 return (getPerProc()->cpu_subtype
);
1116 * Routine: cpu_threadtype
1120 cpu_threadtype(void)
1122 return (getPerProc()->cpu_threadtype
);
1126 * Call a function on all running processors
1128 * Note that the synch paramter is used to wait until all functions are complete.
1129 * It is not passed to the other processor and must be known by the called function.
1130 * The called function must do a thread_wakeup on the synch if it decrements the
1133 * We start by initializing the synchronizer to the number of possible cpus.
1134 * The we signal each popssible processor.
1135 * If the signal fails, we count it. We also skip our own.
1136 * When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals.
1137 * Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0
1138 * If this happens, all other processors are finished with the function.
1139 * If so, we clear the wait and continue
1140 * Otherwise, we block waiting for the other processor(s) to finish.
1142 * Meanwhile, the other processors are decrementing the synchronizer when they are done
1143 * If it goes to zero, thread_wakeup is called to run the broadcaster
1145 * Note that because we account for the broadcaster in the synchronization count, we will not get any
1146 * premature wakeup calls.
1148 * Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that
1149 * all of the other processors are finished. Otherwise, we know that there is at least one more.
1150 * When that thread decrements the synchronizer to zero, it will do a thread_wake.
1155 cpu_broadcast(uint32_t *synch
, broadcastFunc func
, uint32_t parm
)
1158 unsigned int cpu
, ocpu
;
1160 cpu
= cpu_number(); /* Who are we? */
1161 failsig
= 0; /* Clear called processor count */
1163 if(real_ncpus
> 1) { /* Are we just a uni? */
1165 *synch
= real_ncpus
; /* Set how many we are going to try */
1166 assert_wait((event_t
)synch
, THREAD_UNINT
); /* If more than one processor, we may have to wait */
1168 for(ocpu
= 0; ocpu
< real_ncpus
; ocpu
++) { /* Tell everyone to call */
1170 if(ocpu
== cpu
) continue; /* If we talk to ourselves, people will wonder... */
1172 if(KERN_SUCCESS
!= cpu_signal(ocpu
, SIGPcall
, (uint32_t)func
, parm
)) { /* Call the function on the other processor */
1173 failsig
++; /* Count failed signals */
1177 if (hw_atomic_sub(synch
, failsig
+ 1) == 0)
1178 clear_wait(current_thread(), THREAD_AWAKENED
); /* Clear wait if we never signalled or all of the others finished */
1180 thread_block(THREAD_CONTINUE_NULL
); /* Wait for everyone to get into step... */
1183 return (real_ncpus
- failsig
- 1); /* Return the number of guys actually signalled... */