2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * cpu specific routines
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
40 #include <pexpert/pexpert.h>
41 //#include <pexpert/ppc/powermac.h>
43 /* TODO: BOGUS TO BE REMOVED */
47 resethandler_t resethandler_target
;
49 #define MMCR0_SUPPORT_MASK 0xf83f1fff
50 #define MMCR1_SUPPORT_MASK 0xffc00000
51 #define MMCR2_SUPPORT_MASK 0x80000000
53 extern int debugger_pending
[NCPUS
];
54 extern int debugger_is_slave
[NCPUS
];
55 extern int debugger_holdoff
[NCPUS
];
56 extern int debugger_sync
;
65 extern struct SIGtimebase syncClkSpot
;
67 void cpu_sync_timebase(void);
72 processor_info_t info
,
76 cpu_subtype_t cpu_subtype
;
77 processor_pm_regs_t perf_regs
;
78 processor_control_cmd_t cmd
;
81 cpu_type
= machine_slot
[slot_num
].cpu_type
;
82 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
83 cmd
= (processor_control_cmd_t
) info
;
85 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
88 if ( cpu_type
!= cmd
->cmd_cpu_type
||
89 cpu_subtype
!= cmd
->cmd_cpu_subtype
)
94 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
97 case CPU_SUBTYPE_POWERPC_604
:
99 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
102 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
103 return(KERN_SUCCESS
);
105 case CPU_SUBTYPE_POWERPC_604e
:
106 case CPU_SUBTYPE_POWERPC_750
:
107 case CPU_SUBTYPE_POWERPC_7400
:
108 case CPU_SUBTYPE_POWERPC_7450
:
110 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
115 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
116 return(KERN_SUCCESS
);
119 return(KERN_FAILURE
);
121 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
124 case CPU_SUBTYPE_POWERPC_604
:
125 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
126 + PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
127 return(KERN_FAILURE
);
130 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
131 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
132 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
133 mtpmc1(PERFMON_PMC1(perf_regs
));
134 mtpmc2(PERFMON_PMC2(perf_regs
));
135 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
136 return(KERN_SUCCESS
);
138 case CPU_SUBTYPE_POWERPC_604e
:
139 case CPU_SUBTYPE_POWERPC_750
:
140 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
141 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
142 return(KERN_FAILURE
);
145 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
146 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
147 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
148 mtpmc1(PERFMON_PMC1(perf_regs
));
149 mtpmc2(PERFMON_PMC2(perf_regs
));
150 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
151 mtpmc3(PERFMON_PMC3(perf_regs
));
152 mtpmc4(PERFMON_PMC4(perf_regs
));
153 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
154 return(KERN_SUCCESS
);
156 case CPU_SUBTYPE_POWERPC_7400
:
157 case CPU_SUBTYPE_POWERPC_7450
:
158 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
159 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
160 return(KERN_FAILURE
);
163 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
164 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
165 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
166 mtpmc1(PERFMON_PMC1(perf_regs
));
167 mtpmc2(PERFMON_PMC2(perf_regs
));
168 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
169 mtpmc3(PERFMON_PMC3(perf_regs
));
170 mtpmc4(PERFMON_PMC4(perf_regs
));
171 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
172 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
173 return(KERN_SUCCESS
);
176 return(KERN_FAILURE
);
177 } /* switch cpu_subtype */
178 case PROCESSOR_PM_SET_MMCR
:
181 case CPU_SUBTYPE_POWERPC_604
:
182 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
183 PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
184 return(KERN_FAILURE
);
187 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
188 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
189 return(KERN_SUCCESS
);
191 case CPU_SUBTYPE_POWERPC_604e
:
192 case CPU_SUBTYPE_POWERPC_750
:
193 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
194 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
195 return(KERN_FAILURE
);
198 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
199 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
200 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
201 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
202 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
203 return(KERN_SUCCESS
);
205 case CPU_SUBTYPE_POWERPC_7400
:
206 case CPU_SUBTYPE_POWERPC_7450
:
207 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
208 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
209 return(KERN_FAILURE
);
212 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
213 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
214 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
215 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
216 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
217 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
218 return(KERN_SUCCESS
);
221 return(KERN_FAILURE
);
224 return(KERN_FAILURE
);
225 } /* switch cmd_op */
230 processor_flavor_t flavor
,
233 cpu_subtype_t cpu_subtype
;
236 * For now, we just assume that all CPUs are of the same type
238 cpu_subtype
= machine_slot
[0].cpu_subtype
;
240 case PROCESSOR_PM_REGS_INFO
:
241 switch (cpu_subtype
) {
242 case CPU_SUBTYPE_POWERPC_604
:
243 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
244 return(KERN_SUCCESS
);
246 case CPU_SUBTYPE_POWERPC_604e
:
247 case CPU_SUBTYPE_POWERPC_750
:
249 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
250 return(KERN_SUCCESS
);
252 case CPU_SUBTYPE_POWERPC_7400
:
253 case CPU_SUBTYPE_POWERPC_7450
:
255 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
256 return(KERN_SUCCESS
);
260 return(KERN_INVALID_ARGUMENT
);
261 } /* switch cpu_subtype */
263 case PROCESSOR_TEMPERATURE
:
264 *count
= PROCESSOR_TEMPERATURE_COUNT
;
265 return (KERN_SUCCESS
);
269 return(KERN_INVALID_ARGUMENT
);
276 processor_flavor_t flavor
,
278 processor_info_t info
,
281 cpu_subtype_t cpu_subtype
;
282 processor_pm_regs_t perf_regs
;
284 unsigned int temp
[2];
286 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
289 case PROCESSOR_PM_REGS_INFO
:
291 perf_regs
= (processor_pm_regs_t
) info
;
293 switch (cpu_subtype
) {
294 case CPU_SUBTYPE_POWERPC_604
:
296 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_604
)
297 return(KERN_FAILURE
);
299 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
300 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
301 PERFMON_PMC1(perf_regs
) = mfpmc1();
302 PERFMON_PMC2(perf_regs
) = mfpmc2();
303 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
305 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
306 return(KERN_SUCCESS
);
308 case CPU_SUBTYPE_POWERPC_604e
:
309 case CPU_SUBTYPE_POWERPC_750
:
311 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
312 return(KERN_FAILURE
);
314 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
315 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
316 PERFMON_PMC1(perf_regs
) = mfpmc1();
317 PERFMON_PMC2(perf_regs
) = mfpmc2();
318 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
319 PERFMON_PMC3(perf_regs
) = mfpmc3();
320 PERFMON_PMC4(perf_regs
) = mfpmc4();
321 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
323 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
324 return(KERN_SUCCESS
);
326 case CPU_SUBTYPE_POWERPC_7400
:
327 case CPU_SUBTYPE_POWERPC_7450
:
329 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
330 return(KERN_FAILURE
);
332 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
333 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
334 PERFMON_PMC1(perf_regs
) = mfpmc1();
335 PERFMON_PMC2(perf_regs
) = mfpmc2();
336 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
337 PERFMON_PMC3(perf_regs
) = mfpmc3();
338 PERFMON_PMC4(perf_regs
) = mfpmc4();
339 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
340 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
342 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
343 return(KERN_SUCCESS
);
346 return(KERN_FAILURE
);
347 } /* switch cpu_subtype */
349 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
351 disable_preemption(); /* Don't move me now */
353 if(slot_num
== cpu_number()) { /* Is this for the local CPU? */
354 *info
= ml_read_temp(); /* Get the temperature */
356 else { /* For another CPU */
357 temp
[0] = -1; /* Set sync flag */
360 temp
[1] = -1; /* Set invalid temperature */
361 (void)cpu_signal(slot_num
, SIGPcpureq
, CPRQtemp
,(unsigned int)&temp
); /* Ask him to take his temperature */
362 (void)hw_cpu_sync(temp
, LockTimeOut
); /* Wait for the other processor to get its temperature */
363 *info
= temp
[1]; /* Pass it back */
366 enable_preemption(); /* Ok to move now */
367 return(KERN_SUCCESS
);
370 return(KERN_INVALID_ARGUMENT
);
383 machine_slot
[cpu
].running
= TRUE
;
384 machine_slot
[cpu
].cpu_type
= CPU_TYPE_POWERPC
;
385 machine_slot
[cpu
].cpu_subtype
= (cpu_subtype_t
)per_proc_info
[cpu
].pf
.rptdProc
;
393 struct per_proc_info
*tproc_info
;
394 volatile struct per_proc_info
*mproc_info
;
397 /* TODO: realese mutex lock reset_handler_lock */
400 tproc_info
= &per_proc_info
[cpu
];
401 mproc_info
= &per_proc_info
[master_cpu
];
402 PE_cpu_machine_init(tproc_info
->cpu_id
, !(tproc_info
->cpu_flags
& BootDone
));
403 if (cpu
!= master_cpu
) {
404 while (!((mproc_info
->cpu_flags
) & SignalReady
))
409 tproc_info
->cpu_flags
|= BootDone
|SignalReady
;
421 * - Run cpu_register() in exclusion mode
425 for(cpu
=0; cpu
< wncpu
; cpu
++) {
426 if(!machine_slot
[cpu
].is_cpu
) {
427 machine_slot
[cpu
].is_cpu
= TRUE
;
432 if (*target_cpu
!= -1) {
443 struct per_proc_info
*proc_info
;
446 extern void (*exception_handlers
[])(void);
447 extern vm_offset_t intstack
;
448 extern vm_offset_t debstack
;
450 proc_info
= &per_proc_info
[cpu
];
452 if (cpu
== cpu_number()) {
453 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
455 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
459 extern void _start_cpu(void);
461 proc_info
->cpu_number
= cpu
;
462 proc_info
->cpu_flags
&= BootDone
;
463 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
464 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
465 #if MACH_KDP || MACH_KDB
466 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
467 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
468 #endif /* MACH_KDP || MACH_KDB */
469 proc_info
->interrupts_enabled
= 0;
470 proc_info
->active_kloaded
= (unsigned int)&active_kloaded
[cpu
];
471 proc_info
->cpu_data
= (unsigned int)&cpu_data
[cpu
];
472 proc_info
->active_stacks
= (unsigned int)&active_stacks
[cpu
];
473 proc_info
->need_ast
= (unsigned int)&need_ast
[cpu
];
474 proc_info
->FPU_thread
= 0;
475 proc_info
->FPU_vmmCtx
= 0;
476 proc_info
->VMX_thread
= 0;
477 proc_info
->VMX_vmmCtx
= 0;
479 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
481 /* TODO: get mutex lock reset_handler_lock */
483 resethandler_target
.type
= RESET_HANDLER_START
;
484 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
485 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
487 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
488 resethandler_target
.type
);
489 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
490 resethandler_target
.call_paddr
);
491 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
492 resethandler_target
.arg__paddr
);
496 * Note: we pass the current time to the other processor here. He will load it
497 * as early as possible so that there is a chance that it is close to accurate.
498 * After the machine is up a while, we will officially resync the clocks so
499 * that all processors are the same. This is just to get close.
502 ml_get_timebase(&proc_info
->ruptStamp
); /* Pass our current time to the other guy */
504 __asm__
volatile("sync"); /* Commit to storage */
505 __asm__
volatile("isync"); /* Wait a second */
506 ret
= PE_cpu_start(proc_info
->cpu_id
,
507 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
509 if (ret
!= KERN_SUCCESS
&&
510 proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
512 /* TODO: realese mutex lock reset_handler_lock */
519 * Here is where we implement the receiver of the signaling protocol.
520 * We wait for the signal status area to be passed to us. Then we snarf
521 * up the status, the sender, and the 3 potential parms. Next we release
522 * the lock and signal the other guy.
530 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
531 unsigned int *parmAddr
;
532 struct per_proc_info
*pproc
; /* Area for my per_proc address */
534 struct SIGtimebase
*timebaseAddr
;
535 natural_t tbu
, tbu2
, tbl
;
537 cpu
= cpu_number(); /* Get the CPU number */
538 pproc
= &per_proc_info
[cpu
]; /* Point to our block */
541 * Since we've been signaled, wait just under 1ms for the signal lock to pass
543 if(!hw_lock_mbits(&pproc
->MPsigpStat
, MPsigpMsgp
, (MPsigpBusy
| MPsigpPass
),
544 (MPsigpBusy
| MPsigpPass
), (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 7))) {
545 panic("cpu_signal_handler: Lock pass timed out\n");
548 holdStat
= pproc
->MPsigpStat
; /* Snarf stat word */
549 holdParm0
= pproc
->MPsigpParm0
; /* Snarf parameter */
550 holdParm1
= pproc
->MPsigpParm1
; /* Snarf parameter */
551 holdParm2
= pproc
->MPsigpParm2
; /* Snarf parameter */
553 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
555 pproc
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpFunc
); /* Release lock */
557 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
559 case MPsigpIdle
: /* Was function cancelled? */
562 case MPsigpSigp
: /* Signal Processor message? */
564 switch (holdParm0
) { /* Decode SIGP message order */
566 case SIGPast
: /* Should we do an AST? */
567 pproc
->numSIGPast
++; /* Count this one */
569 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
571 ast_check(); /* Yes, do it */
572 /* XXX: Should check if AST_URGENT is needed */
574 return; /* All done... */
576 case SIGPcpureq
: /* CPU specific function? */
578 pproc
->numSIGPcpureq
++; /* Count this one */
579 switch (holdParm1
) { /* Select specific function */
581 case CPRQtemp
: /* Get the temperature */
582 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
583 parmAddr
[1] = ml_read_temp(); /* Get the core temperature */
584 eieio(); /* Force order */
585 sync(); /* Force to memory */
586 parmAddr
[0] = 0; /* Show we're done */
591 timebaseAddr
= (struct SIGtimebase
*)holdParm2
;
593 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
594 pproc
->time_base_enable(pproc
->cpu_id
, FALSE
);
596 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
600 asm volatile(" mftbu %0" : "=r" (tbu
));
601 asm volatile(" mftb %0" : "=r" (tbl
));
602 asm volatile(" mftbu %0" : "=r" (tbu2
));
603 } while (tbu
!= tbu2
);
605 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
606 sync(); /* Force order */
608 timebaseAddr
->avail
= TRUE
;
610 while (*(volatile int *)&(syncClkSpot
.ready
) == FALSE
);
612 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
613 pproc
->time_base_enable(pproc
->cpu_id
, TRUE
);
615 timebaseAddr
->done
= TRUE
;
620 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
625 case SIGPdebug
: /* Enter the debugger? */
627 pproc
->numSIGPdebug
++; /* Count this one */
628 debugger_is_slave
[cpu
]++; /* Bump up the count to show we're here */
629 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
630 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
631 return; /* All done now... */
633 case SIGPwake
: /* Wake up CPU */
634 pproc
->numSIGPwake
++; /* Count this one */
635 return; /* No need to do anything, the interrupt does it all... */
638 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
644 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
648 panic("cpu_signal_handler: we should never get here\n");
652 * Here is where we send a message to another processor. So far we only have two:
653 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
654 * currently disabled). SIGPdebug is used to enter the debugger.
656 * We set up the SIGP function to indicate that this is a simple message and set the
657 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
658 * block for the target, we lock the message block. Then we set the parameter(s).
659 * Next we change the lock (also called "busy") to "passing" and finally signal
660 * the other processor. Note that we only wait about 1ms to get the message lock.
661 * If we time out, we return failure to our caller. It is their responsibility to
673 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
674 struct per_proc_info
*tpproc
, *mpproc
; /* Area for per_proc addresses */
678 if(target
> NCPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
681 cpu
= cpu_number(); /* Get our CPU number */
682 if(target
== cpu
) return KERN_FAILURE
; /* Don't play with ourselves */
683 if(!machine_slot
[target
].running
) return KERN_FAILURE
; /* These guys are too young */
685 mpproc
= &per_proc_info
[cpu
]; /* Point to our block */
686 tpproc
= &per_proc_info
[target
]; /* Point to the target's block */
688 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
690 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
692 if(signal
== SIGPwake
) return KERN_SUCCESS
; /* SIGPwake can merge into all others... */
694 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
695 return KERN_SUCCESS
; /* Don't bother to send this one... */
699 if(!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
700 (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 13))) { /* Try to lock the message block with a .5ms timeout */
701 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
704 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | cpu
; /* Set up the signal status word */
705 tpproc
->MPsigpParm0
= signal
; /* Set message order */
706 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
707 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
709 __asm__
volatile("sync"); /* Make sure it's all there */
711 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
712 __asm__
volatile("eieio"); /* I'm a paraniod freak */
714 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
716 return KERN_SUCCESS
; /* All is goodness and rainbows... */
723 processor_doshutdown(current_processor());
730 struct per_proc_info
*proc_info
;
732 extern void (*exception_handlers
[])(void);
733 extern vm_offset_t intstack
;
734 extern vm_offset_t debstack
;
735 extern void _restart_cpu(void);
739 kprintf("******* About to sleep cpu %d\n", cpu
);
742 proc_info
= &per_proc_info
[cpu
];
744 if(proc_info
->FPU_thread
) fpu_save(proc_info
->FPU_thread
); /* If anyone owns FPU, save it */
745 proc_info
->FPU_thread
= 0; /* Set no fpu owner now */
747 if(proc_info
->VMX_thread
) vec_save(proc_info
->VMX_thread
); /* If anyone owns vectors, save it */
748 proc_info
->VMX_thread
= 0; /* Set no vector owner now */
750 if (proc_info
->cpu_number
== 0) {
751 proc_info
->cpu_flags
&= BootDone
;
752 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
753 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
754 #if MACH_KDP || MACH_KDB
755 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
756 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
757 #endif /* MACH_KDP || MACH_KDB */
758 proc_info
->interrupts_enabled
= 0;
760 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
761 extern void _start_cpu(void);
763 resethandler_target
.type
= RESET_HANDLER_START
;
764 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
765 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
767 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
768 resethandler_target
.type
);
769 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
770 resethandler_target
.call_paddr
);
771 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
772 resethandler_target
.arg__paddr
);
774 __asm__
volatile("sync");
775 __asm__
volatile("isync");
779 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
789 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
791 /* Note that syncClkSpot is in a cache aligned area */
792 syncClkSpot
.avail
= FALSE
;
793 syncClkSpot
.ready
= FALSE
;
794 syncClkSpot
.done
= FALSE
;
796 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
797 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
800 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
)
806 * We do the following to keep the compiler from generating extra stuff
809 tbu
= syncClkSpot
.abstime
>> 32;
810 tbl
= (uint32_t)syncClkSpot
.abstime
;
816 syncClkSpot
.ready
= TRUE
;
818 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
)
821 (void)ml_set_interrupts_enabled(intr
);