2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * cpu specific routines
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
41 #include <ppc/hw_perfmon.h>
42 #include <pexpert/pexpert.h>
43 #include <kern/cpu_data.h>
44 #include <ppc/mappings.h>
45 #include <ppc/Diagnostics.h>
48 /* TODO: BOGUS TO BE REMOVED */
52 resethandler_t resethandler_target
;
54 decl_simple_lock_data(static,SignalReadyLock
);
55 static unsigned int SignalReadyWait
= 0xFFFFFFFFU
;
57 #define MMCR0_SUPPORT_MASK 0xf83f1fff
58 #define MMCR1_SUPPORT_MASK 0xffc00000
59 #define MMCR2_SUPPORT_MASK 0x80000000
61 extern int debugger_pending
[NCPUS
];
62 extern int debugger_is_slave
[NCPUS
];
63 extern int debugger_holdoff
[NCPUS
];
64 extern int debugger_sync
;
73 struct per_proc_info
*pper_proc_info
= per_proc_info
;
75 extern struct SIGtimebase syncClkSpot
;
77 void cpu_sync_timebase(void);
82 processor_info_t info
,
86 cpu_subtype_t cpu_subtype
;
87 processor_pm_regs_t perf_regs
;
88 processor_control_cmd_t cmd
;
91 cpu_type
= machine_slot
[slot_num
].cpu_type
;
92 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
93 cmd
= (processor_control_cmd_t
) info
;
95 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
98 if ( cpu_type
!= cmd
->cmd_cpu_type
||
99 cpu_subtype
!= cmd
->cmd_cpu_subtype
)
100 return(KERN_FAILURE
);
102 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS
) {
103 return(KERN_RESOURCE_SHORTAGE
); /* cpu performance facility in use by another task */
108 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
111 case CPU_SUBTYPE_POWERPC_750
:
112 case CPU_SUBTYPE_POWERPC_7400
:
113 case CPU_SUBTYPE_POWERPC_7450
:
115 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
120 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
121 return(KERN_SUCCESS
);
124 return(KERN_FAILURE
);
126 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
129 case CPU_SUBTYPE_POWERPC_750
:
130 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
131 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
132 return(KERN_FAILURE
);
135 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
136 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
137 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
138 mtpmc1(PERFMON_PMC1(perf_regs
));
139 mtpmc2(PERFMON_PMC2(perf_regs
));
140 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
141 mtpmc3(PERFMON_PMC3(perf_regs
));
142 mtpmc4(PERFMON_PMC4(perf_regs
));
143 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
144 return(KERN_SUCCESS
);
146 case CPU_SUBTYPE_POWERPC_7400
:
147 case CPU_SUBTYPE_POWERPC_7450
:
148 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
149 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
150 return(KERN_FAILURE
);
153 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
154 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
155 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
156 mtpmc1(PERFMON_PMC1(perf_regs
));
157 mtpmc2(PERFMON_PMC2(perf_regs
));
158 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
159 mtpmc3(PERFMON_PMC3(perf_regs
));
160 mtpmc4(PERFMON_PMC4(perf_regs
));
161 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
162 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
163 return(KERN_SUCCESS
);
166 return(KERN_FAILURE
);
167 } /* switch cpu_subtype */
168 case PROCESSOR_PM_SET_MMCR
:
171 case CPU_SUBTYPE_POWERPC_750
:
172 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
173 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
174 return(KERN_FAILURE
);
177 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
178 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
179 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
180 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
181 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
182 return(KERN_SUCCESS
);
184 case CPU_SUBTYPE_POWERPC_7400
:
185 case CPU_SUBTYPE_POWERPC_7450
:
186 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
187 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
188 return(KERN_FAILURE
);
191 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
192 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
193 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
194 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
195 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
196 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
197 return(KERN_SUCCESS
);
200 return(KERN_FAILURE
);
203 return(KERN_FAILURE
);
204 } /* switch cmd_op */
209 processor_flavor_t flavor
,
212 cpu_subtype_t cpu_subtype
;
215 * For now, we just assume that all CPUs are of the same type
217 cpu_subtype
= machine_slot
[0].cpu_subtype
;
219 case PROCESSOR_PM_REGS_INFO
:
220 switch (cpu_subtype
) {
221 case CPU_SUBTYPE_POWERPC_750
:
223 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
224 return(KERN_SUCCESS
);
226 case CPU_SUBTYPE_POWERPC_7400
:
227 case CPU_SUBTYPE_POWERPC_7450
:
229 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
230 return(KERN_SUCCESS
);
234 return(KERN_INVALID_ARGUMENT
);
235 } /* switch cpu_subtype */
237 case PROCESSOR_TEMPERATURE
:
238 *count
= PROCESSOR_TEMPERATURE_COUNT
;
239 return (KERN_SUCCESS
);
243 return(KERN_INVALID_ARGUMENT
);
250 processor_flavor_t flavor
,
252 processor_info_t info
,
255 cpu_subtype_t cpu_subtype
;
256 processor_pm_regs_t perf_regs
;
258 unsigned int temp
[2];
260 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
263 case PROCESSOR_PM_REGS_INFO
:
265 perf_regs
= (processor_pm_regs_t
) info
;
267 switch (cpu_subtype
) {
268 case CPU_SUBTYPE_POWERPC_750
:
270 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
271 return(KERN_FAILURE
);
273 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
274 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
275 PERFMON_PMC1(perf_regs
) = mfpmc1();
276 PERFMON_PMC2(perf_regs
) = mfpmc2();
277 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
278 PERFMON_PMC3(perf_regs
) = mfpmc3();
279 PERFMON_PMC4(perf_regs
) = mfpmc4();
280 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
282 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
283 return(KERN_SUCCESS
);
285 case CPU_SUBTYPE_POWERPC_7400
:
286 case CPU_SUBTYPE_POWERPC_7450
:
288 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
289 return(KERN_FAILURE
);
291 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
292 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
293 PERFMON_PMC1(perf_regs
) = mfpmc1();
294 PERFMON_PMC2(perf_regs
) = mfpmc2();
295 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
296 PERFMON_PMC3(perf_regs
) = mfpmc3();
297 PERFMON_PMC4(perf_regs
) = mfpmc4();
298 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
299 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
301 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
302 return(KERN_SUCCESS
);
305 return(KERN_FAILURE
);
306 } /* switch cpu_subtype */
308 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
310 disable_preemption(); /* Don't move me now */
312 if(slot_num
== cpu_number()) { /* Is this for the local CPU? */
313 *info
= ml_read_temp(); /* Get the temperature */
315 else { /* For another CPU */
316 temp
[0] = -1; /* Set sync flag */
319 temp
[1] = -1; /* Set invalid temperature */
320 (void)cpu_signal(slot_num
, SIGPcpureq
, CPRQtemp
,(unsigned int)&temp
); /* Ask him to take his temperature */
321 (void)hw_cpu_sync(temp
, LockTimeOut
); /* Wait for the other processor to get its temperature */
322 *info
= temp
[1]; /* Pass it back */
325 enable_preemption(); /* Ok to move now */
326 return(KERN_SUCCESS
);
329 return(KERN_INVALID_ARGUMENT
);
342 machine_slot
[cpu
].running
= TRUE
;
343 machine_slot
[cpu
].cpu_type
= CPU_TYPE_POWERPC
;
344 machine_slot
[cpu
].cpu_subtype
= (cpu_subtype_t
)per_proc_info
[cpu
].pf
.rptdProc
;
352 struct per_proc_info
*tproc_info
;
353 volatile struct per_proc_info
*mproc_info
;
356 /* TODO: realese mutex lock reset_handler_lock */
359 tproc_info
= &per_proc_info
[cpu
];
360 mproc_info
= &per_proc_info
[master_cpu
];
361 PE_cpu_machine_init(tproc_info
->cpu_id
, !(tproc_info
->cpu_flags
& BootDone
));
362 if (cpu
!= master_cpu
) {
363 while (!((mproc_info
->cpu_flags
) & SignalReady
))
368 if (cpu
!= master_cpu
)
369 simple_lock(&SignalReadyLock
);
370 tproc_info
->cpu_flags
|= BootDone
|SignalReady
;
371 if (cpu
!= master_cpu
) {
372 if (SignalReadyWait
!= 0) {
374 thread_wakeup(&tproc_info
->cpu_flags
);
376 simple_unlock(&SignalReadyLock
);
389 * - Run cpu_register() in exclusion mode
393 for(cpu
=0; cpu
< wncpu
; cpu
++) {
394 if(!machine_slot
[cpu
].is_cpu
) {
395 machine_slot
[cpu
].is_cpu
= TRUE
;
400 if (*target_cpu
!= -1) {
411 struct per_proc_info
*proc_info
;
415 extern vm_offset_t intstack
;
416 extern vm_offset_t debstack
;
418 proc_info
= &per_proc_info
[cpu
];
420 if (cpu
== cpu_number()) {
421 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
423 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
427 extern void _start_cpu(void);
429 if (SignalReadyWait
== 0xFFFFFFFFU
) {
431 simple_lock_init(&SignalReadyLock
,0);
434 proc_info
->cpu_number
= cpu
;
435 proc_info
->cpu_flags
&= BootDone
;
436 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
437 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
438 #if MACH_KDP || MACH_KDB
439 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
440 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
441 #endif /* MACH_KDP || MACH_KDB */
442 proc_info
->interrupts_enabled
= 0;
443 proc_info
->need_ast
= (unsigned int)&need_ast
[cpu
];
444 proc_info
->FPU_owner
= 0;
445 proc_info
->VMX_owner
= 0;
446 mp
= (mapping
*)(&proc_info
->ppCIOmp
);
447 mp
->mpFlags
= 0x01000000 | mpSpecial
| 1;
448 mp
->mpSpace
= invalSpace
;
450 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
452 /* TODO: get mutex lock reset_handler_lock */
454 resethandler_target
.type
= RESET_HANDLER_START
;
455 resethandler_target
.call_paddr
= (vm_offset_t
)_start_cpu
; /* Note: these routines are always V=R */
456 resethandler_target
.arg__paddr
= (vm_offset_t
)proc_info
; /* Note: these routines are always V=R */
458 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
459 resethandler_target
.type
);
460 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
461 resethandler_target
.call_paddr
);
462 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
463 resethandler_target
.arg__paddr
);
467 * Note: we pass the current time to the other processor here. He will load it
468 * as early as possible so that there is a chance that it is close to accurate.
469 * After the machine is up a while, we will officially resync the clocks so
470 * that all processors are the same. This is just to get close.
473 ml_get_timebase((unsigned long long *)&proc_info
->ruptStamp
); /* Pass our current time to the other guy */
475 __asm__
volatile("sync"); /* Commit to storage */
476 __asm__
volatile("isync"); /* Wait a second */
477 ret
= PE_cpu_start(proc_info
->cpu_id
,
478 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
480 if (ret
!= KERN_SUCCESS
&&
481 proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
483 /* TODO: realese mutex lock reset_handler_lock */
485 simple_lock(&SignalReadyLock
);
487 while (!((*(volatile short *)&per_proc_info
[cpu
].cpu_flags
) & SignalReady
)) {
489 thread_sleep_simple_lock((event_t
)&per_proc_info
[cpu
].cpu_flags
,
490 &SignalReadyLock
, THREAD_UNINT
);
492 simple_unlock(&SignalReadyLock
);
502 if ( cpu
!= master_cpu
)
503 while (!((*(volatile short *)&per_proc_info
[cpu
].cpu_flags
) & SleepState
)) {};
506 perfTrap perfCpuSigHook
= 0; /* Pointer to CHUD cpu signal hook routine */
509 * Here is where we implement the receiver of the signaling protocol.
510 * We wait for the signal status area to be passed to us. Then we snarf
511 * up the status, the sender, and the 3 potential parms. Next we release
512 * the lock and signal the other guy.
520 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
521 unsigned int *parmAddr
;
522 struct per_proc_info
*pproc
; /* Area for my per_proc address */
524 struct SIGtimebase
*timebaseAddr
;
525 natural_t tbu
, tbu2
, tbl
;
527 cpu
= cpu_number(); /* Get the CPU number */
528 pproc
= &per_proc_info
[cpu
]; /* Point to our block */
531 * Since we've been signaled, wait about 31 ms for the signal lock to pass
533 if(!hw_lock_mbits(&pproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
), (MPsigpBusy
| MPsigpPass
),
534 (MPsigpBusy
| MPsigpPass
| MPsigpAck
), (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 5))) {
535 panic("cpu_signal_handler: Lock pass timed out\n");
538 holdStat
= pproc
->MPsigpStat
; /* Snarf stat word */
539 holdParm0
= pproc
->MPsigpParm0
; /* Snarf parameter */
540 holdParm1
= pproc
->MPsigpParm1
; /* Snarf parameter */
541 holdParm2
= pproc
->MPsigpParm2
; /* Snarf parameter */
543 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
545 pproc
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpAck
| MPsigpFunc
); /* Release lock */
547 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
549 case MPsigpIdle
: /* Was function cancelled? */
552 case MPsigpSigp
: /* Signal Processor message? */
554 switch (holdParm0
) { /* Decode SIGP message order */
556 case SIGPast
: /* Should we do an AST? */
557 pproc
->hwCtr
.numSIGPast
++; /* Count this one */
559 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
561 ast_check(cpu_to_processor(cpu
));
562 return; /* All done... */
564 case SIGPcpureq
: /* CPU specific function? */
566 pproc
->hwCtr
.numSIGPcpureq
++; /* Count this one */
567 switch (holdParm1
) { /* Select specific function */
569 case CPRQtemp
: /* Get the temperature */
570 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
571 parmAddr
[1] = ml_read_temp(); /* Get the core temperature */
572 eieio(); /* Force order */
573 sync(); /* Force to memory */
574 parmAddr
[0] = 0; /* Show we're done */
579 timebaseAddr
= (struct SIGtimebase
*)holdParm2
;
581 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
582 pproc
->time_base_enable(pproc
->cpu_id
, FALSE
);
584 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
588 asm volatile(" mftbu %0" : "=r" (tbu
));
589 asm volatile(" mftb %0" : "=r" (tbl
));
590 asm volatile(" mftbu %0" : "=r" (tbu2
));
591 } while (tbu
!= tbu2
);
593 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
594 sync(); /* Force order */
596 timebaseAddr
->avail
= TRUE
;
598 while (*(volatile int *)&(syncClkSpot
.ready
) == FALSE
);
600 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
601 pproc
->time_base_enable(pproc
->cpu_id
, TRUE
);
603 timebaseAddr
->done
= TRUE
;
611 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
613 struct savearea
*ssp
= current_act()->mact
.pcb
;
615 (perfCpuSigHook
)(parmAddr
[1] /* request */, ssp
, 0, 0);
619 parmAddr
[0] = 0; /* Show we're done */
623 if(((scomcomm
*)holdParm2
)->scomfunc
) { /* Are we writing */
624 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_write(((scomcomm
*)holdParm2
)->scomreg
, ((scomcomm
*)holdParm2
)->scomdata
); /* Write scom */
626 else { /* No, reading... */
627 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_read(((scomcomm
*)holdParm2
)->scomreg
, &((scomcomm
*)holdParm2
)->scomdata
); /* Read scom */
633 extern void ml_set_processor_speed_slave(unsigned long speed
);
635 ml_set_processor_speed_slave(holdParm2
);
639 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
644 case SIGPdebug
: /* Enter the debugger? */
646 pproc
->hwCtr
.numSIGPdebug
++; /* Count this one */
647 debugger_is_slave
[cpu
]++; /* Bump up the count to show we're here */
648 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
649 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
650 return; /* All done now... */
652 case SIGPwake
: /* Wake up CPU */
653 pproc
->hwCtr
.numSIGPwake
++; /* Count this one */
654 return; /* No need to do anything, the interrupt does it all... */
657 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
663 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
667 panic("cpu_signal_handler: we should never get here\n");
671 * Here is where we send a message to another processor. So far we only have two:
672 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
673 * currently disabled). SIGPdebug is used to enter the debugger.
675 * We set up the SIGP function to indicate that this is a simple message and set the
676 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
677 * block for the target, we lock the message block. Then we set the parameter(s).
678 * Next we change the lock (also called "busy") to "passing" and finally signal
679 * the other processor. Note that we only wait about 1ms to get the message lock.
680 * If we time out, we return failure to our caller. It is their responsibility to
692 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
693 struct per_proc_info
*tpproc
, *mpproc
; /* Area for per_proc addresses */
698 if(target
> NCPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
701 cpu
= cpu_number(); /* Get our CPU number */
702 if(target
== cpu
) return KERN_FAILURE
; /* Don't play with ourselves */
703 if(!machine_slot
[target
].running
) return KERN_FAILURE
; /* These guys are too young */
705 mpproc
= &per_proc_info
[cpu
]; /* Point to our block */
706 tpproc
= &per_proc_info
[target
]; /* Point to the target's block */
708 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
710 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
712 if(signal
== SIGPwake
) { /* SIGPwake can merge into all others... */
713 mpproc
->hwCtr
.numSIGPmwake
++; /* Account for merged wakes */
717 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
718 mpproc
->hwCtr
.numSIGPmast
++; /* Account for merged ASTs */
719 return KERN_SUCCESS
; /* Don't bother to send this one... */
722 if (tpproc
->MPsigpParm0
== SIGPwake
) {
723 if (hw_lock_mbits(&tpproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
),
724 (MPsigpBusy
| MPsigpPass
), MPsigpBusy
, 0)) {
726 mpproc
->hwCtr
.numSIGPmwake
++;
731 if((busybitset
== 0) &&
732 (!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
733 (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 11)))) { /* Try to lock the message block with a .5ms timeout */
734 mpproc
->hwCtr
.numSIGPtimo
++; /* Account for timeouts */
735 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
738 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | cpu
; /* Set up the signal status word */
739 tpproc
->MPsigpParm0
= signal
; /* Set message order */
740 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
741 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
743 __asm__
volatile("sync"); /* Make sure it's all there */
745 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
746 __asm__
volatile("eieio"); /* I'm a paraniod freak */
749 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
751 return KERN_SUCCESS
; /* All is goodness and rainbows... */
759 processor_offline(current_processor());
766 struct per_proc_info
*proc_info
;
768 unsigned int wait_ncpus_sleep
, ncpus_sleep
;
769 facility_context
*fowner
;
770 extern vm_offset_t intstack
;
771 extern vm_offset_t debstack
;
772 extern void _restart_cpu(void);
776 proc_info
= &per_proc_info
[cpu
];
778 fowner
= proc_info
->FPU_owner
; /* Cache this */
779 if(fowner
) fpu_save(fowner
); /* If anyone owns FPU, save it */
780 proc_info
->FPU_owner
= 0; /* Set no fpu owner now */
782 fowner
= proc_info
->VMX_owner
; /* Cache this */
783 if(fowner
) vec_save(fowner
); /* If anyone owns vectors, save it */
784 proc_info
->VMX_owner
= 0; /* Set no vector owner now */
786 if (proc_info
->cpu_number
== 0) {
787 proc_info
->cpu_flags
&= BootDone
;
788 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
789 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
790 #if MACH_KDP || MACH_KDB
791 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
792 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
793 #endif /* MACH_KDP || MACH_KDB */
794 proc_info
->interrupts_enabled
= 0;
796 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
797 extern void _start_cpu(void);
799 resethandler_target
.type
= RESET_HANDLER_START
;
800 resethandler_target
.call_paddr
= (vm_offset_t
)_start_cpu
; /* Note: these routines are always V=R */
801 resethandler_target
.arg__paddr
= (vm_offset_t
)proc_info
; /* Note: these routines are always V=R */
803 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
804 resethandler_target
.type
);
805 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
806 resethandler_target
.call_paddr
);
807 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
808 resethandler_target
.arg__paddr
);
810 __asm__
volatile("sync");
811 __asm__
volatile("isync");
814 wait_ncpus_sleep
= real_ncpus
-1;
816 while (wait_ncpus_sleep
!= ncpus_sleep
) {
818 for(i
=1; i
< real_ncpus
; i
++) {
819 if ((*(volatile short *)&per_proc_info
[i
].cpu_flags
) & SleepState
)
825 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
835 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
837 /* Note that syncClkSpot is in a cache aligned area */
838 syncClkSpot
.avail
= FALSE
;
839 syncClkSpot
.ready
= FALSE
;
840 syncClkSpot
.done
= FALSE
;
842 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
843 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
846 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
)
852 * We do the following to keep the compiler from generating extra stuff
855 tbu
= syncClkSpot
.abstime
>> 32;
856 tbl
= (uint32_t)syncClkSpot
.abstime
;
862 syncClkSpot
.ready
= TRUE
;
864 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
)
867 (void)ml_set_interrupts_enabled(intr
);