2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * cpu specific routines
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
41 #include <ppc/hw_perfmon.h>
42 #include <pexpert/pexpert.h>
43 #include <kern/cpu_data.h>
44 #include <ppc/mappings.h>
45 #include <ppc/Diagnostics.h>
48 /* TODO: BOGUS TO BE REMOVED */
52 resethandler_t resethandler_target
;
54 #define MMCR0_SUPPORT_MASK 0xf83f1fff
55 #define MMCR1_SUPPORT_MASK 0xffc00000
56 #define MMCR2_SUPPORT_MASK 0x80000000
58 extern int debugger_pending
[NCPUS
];
59 extern int debugger_is_slave
[NCPUS
];
60 extern int debugger_holdoff
[NCPUS
];
61 extern int debugger_sync
;
70 struct per_proc_info
*pper_proc_info
= per_proc_info
;
72 extern struct SIGtimebase syncClkSpot
;
74 void cpu_sync_timebase(void);
79 processor_info_t info
,
83 cpu_subtype_t cpu_subtype
;
84 processor_pm_regs_t perf_regs
;
85 processor_control_cmd_t cmd
;
88 cpu_type
= machine_slot
[slot_num
].cpu_type
;
89 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
90 cmd
= (processor_control_cmd_t
) info
;
92 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
95 if ( cpu_type
!= cmd
->cmd_cpu_type
||
96 cpu_subtype
!= cmd
->cmd_cpu_subtype
)
99 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS
) {
100 return(KERN_RESOURCE_SHORTAGE
); /* cpu performance facility in use by another task */
105 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
108 case CPU_SUBTYPE_POWERPC_750
:
109 case CPU_SUBTYPE_POWERPC_7400
:
110 case CPU_SUBTYPE_POWERPC_7450
:
112 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
117 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
118 return(KERN_SUCCESS
);
121 return(KERN_FAILURE
);
123 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
126 case CPU_SUBTYPE_POWERPC_750
:
127 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
128 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
129 return(KERN_FAILURE
);
132 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
133 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
134 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
135 mtpmc1(PERFMON_PMC1(perf_regs
));
136 mtpmc2(PERFMON_PMC2(perf_regs
));
137 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
138 mtpmc3(PERFMON_PMC3(perf_regs
));
139 mtpmc4(PERFMON_PMC4(perf_regs
));
140 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
141 return(KERN_SUCCESS
);
143 case CPU_SUBTYPE_POWERPC_7400
:
144 case CPU_SUBTYPE_POWERPC_7450
:
145 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
146 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
147 return(KERN_FAILURE
);
150 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
151 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
152 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
153 mtpmc1(PERFMON_PMC1(perf_regs
));
154 mtpmc2(PERFMON_PMC2(perf_regs
));
155 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
156 mtpmc3(PERFMON_PMC3(perf_regs
));
157 mtpmc4(PERFMON_PMC4(perf_regs
));
158 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
159 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
160 return(KERN_SUCCESS
);
163 return(KERN_FAILURE
);
164 } /* switch cpu_subtype */
165 case PROCESSOR_PM_SET_MMCR
:
168 case CPU_SUBTYPE_POWERPC_750
:
169 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
170 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
171 return(KERN_FAILURE
);
174 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
175 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
176 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
177 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
178 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
179 return(KERN_SUCCESS
);
181 case CPU_SUBTYPE_POWERPC_7400
:
182 case CPU_SUBTYPE_POWERPC_7450
:
183 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
184 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
185 return(KERN_FAILURE
);
188 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
189 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
190 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
191 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
192 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
193 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
194 return(KERN_SUCCESS
);
197 return(KERN_FAILURE
);
200 return(KERN_FAILURE
);
201 } /* switch cmd_op */
206 processor_flavor_t flavor
,
209 cpu_subtype_t cpu_subtype
;
212 * For now, we just assume that all CPUs are of the same type
214 cpu_subtype
= machine_slot
[0].cpu_subtype
;
216 case PROCESSOR_PM_REGS_INFO
:
217 switch (cpu_subtype
) {
218 case CPU_SUBTYPE_POWERPC_750
:
220 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
221 return(KERN_SUCCESS
);
223 case CPU_SUBTYPE_POWERPC_7400
:
224 case CPU_SUBTYPE_POWERPC_7450
:
226 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
227 return(KERN_SUCCESS
);
231 return(KERN_INVALID_ARGUMENT
);
232 } /* switch cpu_subtype */
234 case PROCESSOR_TEMPERATURE
:
235 *count
= PROCESSOR_TEMPERATURE_COUNT
;
236 return (KERN_SUCCESS
);
240 return(KERN_INVALID_ARGUMENT
);
247 processor_flavor_t flavor
,
249 processor_info_t info
,
252 cpu_subtype_t cpu_subtype
;
253 processor_pm_regs_t perf_regs
;
255 unsigned int temp
[2];
257 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
260 case PROCESSOR_PM_REGS_INFO
:
262 perf_regs
= (processor_pm_regs_t
) info
;
264 switch (cpu_subtype
) {
265 case CPU_SUBTYPE_POWERPC_750
:
267 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
268 return(KERN_FAILURE
);
270 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
271 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
272 PERFMON_PMC1(perf_regs
) = mfpmc1();
273 PERFMON_PMC2(perf_regs
) = mfpmc2();
274 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
275 PERFMON_PMC3(perf_regs
) = mfpmc3();
276 PERFMON_PMC4(perf_regs
) = mfpmc4();
277 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
279 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
280 return(KERN_SUCCESS
);
282 case CPU_SUBTYPE_POWERPC_7400
:
283 case CPU_SUBTYPE_POWERPC_7450
:
285 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
286 return(KERN_FAILURE
);
288 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
289 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
290 PERFMON_PMC1(perf_regs
) = mfpmc1();
291 PERFMON_PMC2(perf_regs
) = mfpmc2();
292 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
293 PERFMON_PMC3(perf_regs
) = mfpmc3();
294 PERFMON_PMC4(perf_regs
) = mfpmc4();
295 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
296 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
298 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
299 return(KERN_SUCCESS
);
302 return(KERN_FAILURE
);
303 } /* switch cpu_subtype */
305 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
307 disable_preemption(); /* Don't move me now */
309 if(slot_num
== cpu_number()) { /* Is this for the local CPU? */
310 *info
= ml_read_temp(); /* Get the temperature */
312 else { /* For another CPU */
313 temp
[0] = -1; /* Set sync flag */
316 temp
[1] = -1; /* Set invalid temperature */
317 (void)cpu_signal(slot_num
, SIGPcpureq
, CPRQtemp
,(unsigned int)&temp
); /* Ask him to take his temperature */
318 (void)hw_cpu_sync(temp
, LockTimeOut
); /* Wait for the other processor to get its temperature */
319 *info
= temp
[1]; /* Pass it back */
322 enable_preemption(); /* Ok to move now */
323 return(KERN_SUCCESS
);
326 return(KERN_INVALID_ARGUMENT
);
339 machine_slot
[cpu
].running
= TRUE
;
340 machine_slot
[cpu
].cpu_type
= CPU_TYPE_POWERPC
;
341 machine_slot
[cpu
].cpu_subtype
= (cpu_subtype_t
)per_proc_info
[cpu
].pf
.rptdProc
;
349 struct per_proc_info
*tproc_info
;
350 volatile struct per_proc_info
*mproc_info
;
353 /* TODO: realese mutex lock reset_handler_lock */
356 tproc_info
= &per_proc_info
[cpu
];
357 mproc_info
= &per_proc_info
[master_cpu
];
358 PE_cpu_machine_init(tproc_info
->cpu_id
, !(tproc_info
->cpu_flags
& BootDone
));
359 if (cpu
!= master_cpu
) {
360 while (!((mproc_info
->cpu_flags
) & SignalReady
))
365 tproc_info
->cpu_flags
|= BootDone
|SignalReady
;
377 * - Run cpu_register() in exclusion mode
381 for(cpu
=0; cpu
< wncpu
; cpu
++) {
382 if(!machine_slot
[cpu
].is_cpu
) {
383 machine_slot
[cpu
].is_cpu
= TRUE
;
388 if (*target_cpu
!= -1) {
399 struct per_proc_info
*proc_info
;
403 extern vm_offset_t intstack
;
404 extern vm_offset_t debstack
;
406 proc_info
= &per_proc_info
[cpu
];
408 if (cpu
== cpu_number()) {
409 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
411 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
415 extern void _start_cpu(void);
417 proc_info
->cpu_number
= cpu
;
418 proc_info
->cpu_flags
&= BootDone
;
419 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
420 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
421 #if MACH_KDP || MACH_KDB
422 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
423 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
424 #endif /* MACH_KDP || MACH_KDB */
425 proc_info
->interrupts_enabled
= 0;
426 proc_info
->need_ast
= (unsigned int)&need_ast
[cpu
];
427 proc_info
->FPU_owner
= 0;
428 proc_info
->VMX_owner
= 0;
429 mp
= (mapping
*)(&proc_info
->ppCIOmp
);
430 mp
->mpFlags
= 0x01000000 | mpSpecial
| 1;
431 mp
->mpSpace
= invalSpace
;
433 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
435 /* TODO: get mutex lock reset_handler_lock */
437 resethandler_target
.type
= RESET_HANDLER_START
;
438 resethandler_target
.call_paddr
= (vm_offset_t
)_start_cpu
; /* Note: these routines are always V=R */
439 resethandler_target
.arg__paddr
= (vm_offset_t
)proc_info
; /* Note: these routines are always V=R */
441 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
442 resethandler_target
.type
);
443 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
444 resethandler_target
.call_paddr
);
445 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
446 resethandler_target
.arg__paddr
);
450 * Note: we pass the current time to the other processor here. He will load it
451 * as early as possible so that there is a chance that it is close to accurate.
452 * After the machine is up a while, we will officially resync the clocks so
453 * that all processors are the same. This is just to get close.
456 ml_get_timebase((unsigned long long *)&proc_info
->ruptStamp
); /* Pass our current time to the other guy */
458 __asm__
volatile("sync"); /* Commit to storage */
459 __asm__
volatile("isync"); /* Wait a second */
460 ret
= PE_cpu_start(proc_info
->cpu_id
,
461 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
463 if (ret
!= KERN_SUCCESS
&&
464 proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
466 /* TODO: realese mutex lock reset_handler_lock */
472 perfTrap perfCpuSigHook
= 0; /* Pointer to CHUD cpu signal hook routine */
475 * Here is where we implement the receiver of the signaling protocol.
476 * We wait for the signal status area to be passed to us. Then we snarf
477 * up the status, the sender, and the 3 potential parms. Next we release
478 * the lock and signal the other guy.
486 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
487 unsigned int *parmAddr
;
488 struct per_proc_info
*pproc
; /* Area for my per_proc address */
490 struct SIGtimebase
*timebaseAddr
;
491 natural_t tbu
, tbu2
, tbl
;
493 cpu
= cpu_number(); /* Get the CPU number */
494 pproc
= &per_proc_info
[cpu
]; /* Point to our block */
497 * Since we've been signaled, wait about 31 ms for the signal lock to pass
499 if(!hw_lock_mbits(&pproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
), (MPsigpBusy
| MPsigpPass
),
500 (MPsigpBusy
| MPsigpPass
| MPsigpAck
), (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 5))) {
501 panic("cpu_signal_handler: Lock pass timed out\n");
504 holdStat
= pproc
->MPsigpStat
; /* Snarf stat word */
505 holdParm0
= pproc
->MPsigpParm0
; /* Snarf parameter */
506 holdParm1
= pproc
->MPsigpParm1
; /* Snarf parameter */
507 holdParm2
= pproc
->MPsigpParm2
; /* Snarf parameter */
509 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
511 pproc
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpAck
| MPsigpFunc
); /* Release lock */
513 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
515 case MPsigpIdle
: /* Was function cancelled? */
518 case MPsigpSigp
: /* Signal Processor message? */
520 switch (holdParm0
) { /* Decode SIGP message order */
522 case SIGPast
: /* Should we do an AST? */
523 pproc
->hwCtr
.numSIGPast
++; /* Count this one */
525 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
527 ast_check(cpu_to_processor(cpu
));
528 return; /* All done... */
530 case SIGPcpureq
: /* CPU specific function? */
532 pproc
->hwCtr
.numSIGPcpureq
++; /* Count this one */
533 switch (holdParm1
) { /* Select specific function */
535 case CPRQtemp
: /* Get the temperature */
536 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
537 parmAddr
[1] = ml_read_temp(); /* Get the core temperature */
538 eieio(); /* Force order */
539 sync(); /* Force to memory */
540 parmAddr
[0] = 0; /* Show we're done */
545 timebaseAddr
= (struct SIGtimebase
*)holdParm2
;
547 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
548 pproc
->time_base_enable(pproc
->cpu_id
, FALSE
);
550 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
554 asm volatile(" mftbu %0" : "=r" (tbu
));
555 asm volatile(" mftb %0" : "=r" (tbl
));
556 asm volatile(" mftbu %0" : "=r" (tbu2
));
557 } while (tbu
!= tbu2
);
559 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
560 sync(); /* Force order */
562 timebaseAddr
->avail
= TRUE
;
564 while (*(volatile int *)&(syncClkSpot
.ready
) == FALSE
);
566 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
567 pproc
->time_base_enable(pproc
->cpu_id
, TRUE
);
569 timebaseAddr
->done
= TRUE
;
577 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
579 struct savearea
*ssp
= current_act()->mact
.pcb
;
581 (perfCpuSigHook
)(parmAddr
[1] /* request */, ssp
, 0, 0);
585 parmAddr
[0] = 0; /* Show we're done */
589 if(((scomcomm
*)holdParm2
)->scomfunc
) { /* Are we writing */
590 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_write(((scomcomm
*)holdParm2
)->scomreg
, ((scomcomm
*)holdParm2
)->scomdata
); /* Write scom */
592 else { /* No, reading... */
593 ((scomcomm
*)holdParm2
)->scomstat
= ml_scom_read(((scomcomm
*)holdParm2
)->scomreg
, &((scomcomm
*)holdParm2
)->scomdata
); /* Read scom */
598 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
603 case SIGPdebug
: /* Enter the debugger? */
605 pproc
->hwCtr
.numSIGPdebug
++; /* Count this one */
606 debugger_is_slave
[cpu
]++; /* Bump up the count to show we're here */
607 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
608 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
609 return; /* All done now... */
611 case SIGPwake
: /* Wake up CPU */
612 pproc
->hwCtr
.numSIGPwake
++; /* Count this one */
613 return; /* No need to do anything, the interrupt does it all... */
616 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
622 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
626 panic("cpu_signal_handler: we should never get here\n");
630 * Here is where we send a message to another processor. So far we only have two:
631 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
632 * currently disabled). SIGPdebug is used to enter the debugger.
634 * We set up the SIGP function to indicate that this is a simple message and set the
635 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
636 * block for the target, we lock the message block. Then we set the parameter(s).
637 * Next we change the lock (also called "busy") to "passing" and finally signal
638 * the other processor. Note that we only wait about 1ms to get the message lock.
639 * If we time out, we return failure to our caller. It is their responsibility to
651 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
652 struct per_proc_info
*tpproc
, *mpproc
; /* Area for per_proc addresses */
657 if(target
> NCPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
660 cpu
= cpu_number(); /* Get our CPU number */
661 if(target
== cpu
) return KERN_FAILURE
; /* Don't play with ourselves */
662 if(!machine_slot
[target
].running
) return KERN_FAILURE
; /* These guys are too young */
664 mpproc
= &per_proc_info
[cpu
]; /* Point to our block */
665 tpproc
= &per_proc_info
[target
]; /* Point to the target's block */
667 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
669 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
671 if(signal
== SIGPwake
) { /* SIGPwake can merge into all others... */
672 mpproc
->hwCtr
.numSIGPmwake
++; /* Account for merged wakes */
676 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
677 mpproc
->hwCtr
.numSIGPmast
++; /* Account for merged ASTs */
678 return KERN_SUCCESS
; /* Don't bother to send this one... */
681 if (tpproc
->MPsigpParm0
== SIGPwake
) {
682 if (hw_lock_mbits(&tpproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
),
683 (MPsigpBusy
| MPsigpPass
), MPsigpBusy
, 0)) {
685 mpproc
->hwCtr
.numSIGPmwake
++;
690 if((busybitset
== 0) &&
691 (!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
692 (gPEClockFrequencyInfo
.timebase_frequency_hz
>> 11)))) { /* Try to lock the message block with a .5ms timeout */
693 mpproc
->hwCtr
.numSIGPtimo
++; /* Account for timeouts */
694 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
697 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | cpu
; /* Set up the signal status word */
698 tpproc
->MPsigpParm0
= signal
; /* Set message order */
699 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
700 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
702 __asm__
volatile("sync"); /* Make sure it's all there */
704 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
705 __asm__
volatile("eieio"); /* I'm a paraniod freak */
708 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
710 return KERN_SUCCESS
; /* All is goodness and rainbows... */
718 processor_offline(current_processor());
725 struct per_proc_info
*proc_info
;
727 unsigned int wait_ncpus_sleep
, ncpus_sleep
;
728 facility_context
*fowner
;
729 extern vm_offset_t intstack
;
730 extern vm_offset_t debstack
;
731 extern void _restart_cpu(void);
735 proc_info
= &per_proc_info
[cpu
];
737 fowner
= proc_info
->FPU_owner
; /* Cache this */
738 if(fowner
) fpu_save(fowner
); /* If anyone owns FPU, save it */
739 proc_info
->FPU_owner
= 0; /* Set no fpu owner now */
741 fowner
= proc_info
->VMX_owner
; /* Cache this */
742 if(fowner
) vec_save(fowner
); /* If anyone owns vectors, save it */
743 proc_info
->VMX_owner
= 0; /* Set no vector owner now */
745 if (proc_info
->cpu_number
== 0) {
746 proc_info
->cpu_flags
&= BootDone
;
747 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
748 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
749 #if MACH_KDP || MACH_KDB
750 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
751 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
752 #endif /* MACH_KDP || MACH_KDB */
753 proc_info
->interrupts_enabled
= 0;
755 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
756 extern void _start_cpu(void);
758 resethandler_target
.type
= RESET_HANDLER_START
;
759 resethandler_target
.call_paddr
= (vm_offset_t
)_start_cpu
; /* Note: these routines are always V=R */
760 resethandler_target
.arg__paddr
= (vm_offset_t
)proc_info
; /* Note: these routines are always V=R */
762 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
763 resethandler_target
.type
);
764 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
765 resethandler_target
.call_paddr
);
766 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
767 resethandler_target
.arg__paddr
);
769 __asm__
volatile("sync");
770 __asm__
volatile("isync");
773 wait_ncpus_sleep
= real_ncpus
-1;
775 while (wait_ncpus_sleep
!= ncpus_sleep
) {
777 for(i
=1; i
< real_ncpus
; i
++) {
778 if ((*(volatile short *)&per_proc_info
[i
].cpu_flags
) & SleepState
)
784 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
794 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
796 /* Note that syncClkSpot is in a cache aligned area */
797 syncClkSpot
.avail
= FALSE
;
798 syncClkSpot
.ready
= FALSE
;
799 syncClkSpot
.done
= FALSE
;
801 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
802 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
805 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
)
811 * We do the following to keep the compiler from generating extra stuff
814 tbu
= syncClkSpot
.abstime
>> 32;
815 tbl
= (uint32_t)syncClkSpot
.abstime
;
821 syncClkSpot
.ready
= TRUE
;
823 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
)
826 (void)ml_set_interrupts_enabled(intr
);