]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cpu.c
xnu-344.26.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * File: ppc/cpu.c
24 *
25 * cpu specific routines
26 */
27
28#include <kern/machine.h>
29#include <kern/misc_protos.h>
30#include <kern/thread.h>
31#include <kern/processor.h>
32#include <mach/machine.h>
33#include <mach/processor_info.h>
34#include <mach/mach_types.h>
35#include <ppc/proc_reg.h>
36#include <ppc/misc_protos.h>
37#include <ppc/machine_routines.h>
38#include <ppc/machine_cpu.h>
39#include <ppc/exception.h>
9bccf70c 40#include <ppc/asm.h>
1c79356b 41#include <pexpert/pexpert.h>
9bccf70c 42#include <kern/cpu_data.h>
1c79356b
A
43
44/* TODO: BOGUS TO BE REMOVED */
45int real_ncpus = 1;
46
47int wncpu = NCPUS;
48resethandler_t resethandler_target;
49
50#define MMCR0_SUPPORT_MASK 0xf83f1fff
51#define MMCR1_SUPPORT_MASK 0xffc00000
52#define MMCR2_SUPPORT_MASK 0x80000000
53
54extern int debugger_pending[NCPUS];
55extern int debugger_is_slave[NCPUS];
56extern int debugger_holdoff[NCPUS];
57extern int debugger_sync;
58
59struct SIGtimebase {
60 boolean_t avail;
61 boolean_t ready;
62 boolean_t done;
0b4e3aa0 63 uint64_t abstime;
1c79356b
A
64};
65
9bccf70c
A
66struct per_proc_info *pper_proc_info = per_proc_info;
67
1c79356b
A
68extern struct SIGtimebase syncClkSpot;
69
70void cpu_sync_timebase(void);
71
72kern_return_t
73cpu_control(
74 int slot_num,
75 processor_info_t info,
76 unsigned int count)
77{
78 cpu_type_t cpu_type;
79 cpu_subtype_t cpu_subtype;
80 processor_pm_regs_t perf_regs;
81 processor_control_cmd_t cmd;
82 boolean_t oldlevel;
83
84 cpu_type = machine_slot[slot_num].cpu_type;
85 cpu_subtype = machine_slot[slot_num].cpu_subtype;
86 cmd = (processor_control_cmd_t) info;
87
88 if (count < PROCESSOR_CONTROL_CMD_COUNT)
89 return(KERN_FAILURE);
90
91 if ( cpu_type != cmd->cmd_cpu_type ||
92 cpu_subtype != cmd->cmd_cpu_subtype)
93 return(KERN_FAILURE);
94
95 switch (cmd->cmd_op)
96 {
97 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
98 switch (cpu_subtype)
99 {
de355530
A
100 case CPU_SUBTYPE_POWERPC_604:
101 {
102 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
103 mtpmc1(0x0);
104 mtpmc2(0x0);
105 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
106 return(KERN_SUCCESS);
107 }
108 case CPU_SUBTYPE_POWERPC_604e:
1c79356b
A
109 case CPU_SUBTYPE_POWERPC_750:
110 case CPU_SUBTYPE_POWERPC_7400:
111 case CPU_SUBTYPE_POWERPC_7450:
112 {
113 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
114 mtpmc1(0x0);
115 mtpmc2(0x0);
116 mtpmc3(0x0);
117 mtpmc4(0x0);
118 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
119 return(KERN_SUCCESS);
120 }
121 default:
122 return(KERN_FAILURE);
123 } /* cpu_subtype */
124 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
125 switch (cpu_subtype)
126 {
de355530
A
127 case CPU_SUBTYPE_POWERPC_604:
128 if (count < (PROCESSOR_CONTROL_CMD_COUNT
129 + PROCESSOR_PM_REGS_COUNT_POWERPC_604))
130 return(KERN_FAILURE);
131 else
132 {
133 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
134 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
135 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
136 mtpmc1(PERFMON_PMC1(perf_regs));
137 mtpmc2(PERFMON_PMC2(perf_regs));
138 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
139 return(KERN_SUCCESS);
140 }
141 case CPU_SUBTYPE_POWERPC_604e:
1c79356b
A
142 case CPU_SUBTYPE_POWERPC_750:
143 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
144 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
145 return(KERN_FAILURE);
146 else
147 {
148 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
149 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
150 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
151 mtpmc1(PERFMON_PMC1(perf_regs));
152 mtpmc2(PERFMON_PMC2(perf_regs));
153 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
154 mtpmc3(PERFMON_PMC3(perf_regs));
155 mtpmc4(PERFMON_PMC4(perf_regs));
156 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
157 return(KERN_SUCCESS);
158 }
159 case CPU_SUBTYPE_POWERPC_7400:
160 case CPU_SUBTYPE_POWERPC_7450:
161 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
162 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
163 return(KERN_FAILURE);
164 else
165 {
166 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
167 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
168 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
169 mtpmc1(PERFMON_PMC1(perf_regs));
170 mtpmc2(PERFMON_PMC2(perf_regs));
171 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
172 mtpmc3(PERFMON_PMC3(perf_regs));
173 mtpmc4(PERFMON_PMC4(perf_regs));
174 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
175 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
176 return(KERN_SUCCESS);
177 }
178 default:
179 return(KERN_FAILURE);
180 } /* switch cpu_subtype */
181 case PROCESSOR_PM_SET_MMCR:
182 switch (cpu_subtype)
183 {
de355530
A
184 case CPU_SUBTYPE_POWERPC_604:
185 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
186 PROCESSOR_PM_REGS_COUNT_POWERPC_604))
187 return(KERN_FAILURE);
188 else
189 {
190 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
191 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
192 return(KERN_SUCCESS);
193 }
194 case CPU_SUBTYPE_POWERPC_604e:
1c79356b
A
195 case CPU_SUBTYPE_POWERPC_750:
196 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
197 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
198 return(KERN_FAILURE);
199 else
200 {
201 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
202 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
203 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
204 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
205 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
206 return(KERN_SUCCESS);
207 }
208 case CPU_SUBTYPE_POWERPC_7400:
209 case CPU_SUBTYPE_POWERPC_7450:
210 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
211 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
212 return(KERN_FAILURE);
213 else
214 {
215 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
216 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
217 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
218 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
219 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
220 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
221 return(KERN_SUCCESS);
222 }
223 default:
224 return(KERN_FAILURE);
225 } /* cpu_subtype */
226 default:
227 return(KERN_FAILURE);
228 } /* switch cmd_op */
229}
230
231kern_return_t
232cpu_info_count(
233 processor_flavor_t flavor,
234 unsigned int *count)
235{
236 cpu_subtype_t cpu_subtype;
237
238 /*
239 * For now, we just assume that all CPUs are of the same type
240 */
241 cpu_subtype = machine_slot[0].cpu_subtype;
242 switch (flavor) {
243 case PROCESSOR_PM_REGS_INFO:
244 switch (cpu_subtype) {
de355530
A
245 case CPU_SUBTYPE_POWERPC_604:
246 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
247 return(KERN_SUCCESS);
248
249 case CPU_SUBTYPE_POWERPC_604e:
1c79356b
A
250 case CPU_SUBTYPE_POWERPC_750:
251
252 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
253 return(KERN_SUCCESS);
254
255 case CPU_SUBTYPE_POWERPC_7400:
256 case CPU_SUBTYPE_POWERPC_7450:
257
258 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
259 return(KERN_SUCCESS);
260
261 default:
262 *count = 0;
263 return(KERN_INVALID_ARGUMENT);
264 } /* switch cpu_subtype */
265
266 case PROCESSOR_TEMPERATURE:
267 *count = PROCESSOR_TEMPERATURE_COUNT;
268 return (KERN_SUCCESS);
269
270 default:
271 *count = 0;
272 return(KERN_INVALID_ARGUMENT);
273
274 }
275}
276
277kern_return_t
278cpu_info(
279 processor_flavor_t flavor,
280 int slot_num,
281 processor_info_t info,
282 unsigned int *count)
283{
284 cpu_subtype_t cpu_subtype;
285 processor_pm_regs_t perf_regs;
286 boolean_t oldlevel;
287 unsigned int temp[2];
288
289 cpu_subtype = machine_slot[slot_num].cpu_subtype;
290
291 switch (flavor) {
292 case PROCESSOR_PM_REGS_INFO:
293
294 perf_regs = (processor_pm_regs_t) info;
295
296 switch (cpu_subtype) {
de355530
A
297 case CPU_SUBTYPE_POWERPC_604:
298
299 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604)
300 return(KERN_FAILURE);
301
302 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
303 PERFMON_MMCR0(perf_regs) = mfmmcr0();
304 PERFMON_PMC1(perf_regs) = mfpmc1();
305 PERFMON_PMC2(perf_regs) = mfpmc2();
306 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
307
308 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
309 return(KERN_SUCCESS);
310
311 case CPU_SUBTYPE_POWERPC_604e:
1c79356b
A
312 case CPU_SUBTYPE_POWERPC_750:
313
314 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
315 return(KERN_FAILURE);
316
317 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
318 PERFMON_MMCR0(perf_regs) = mfmmcr0();
319 PERFMON_PMC1(perf_regs) = mfpmc1();
320 PERFMON_PMC2(perf_regs) = mfpmc2();
321 PERFMON_MMCR1(perf_regs) = mfmmcr1();
322 PERFMON_PMC3(perf_regs) = mfpmc3();
323 PERFMON_PMC4(perf_regs) = mfpmc4();
324 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
325
326 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
327 return(KERN_SUCCESS);
328
329 case CPU_SUBTYPE_POWERPC_7400:
330 case CPU_SUBTYPE_POWERPC_7450:
331
332 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
333 return(KERN_FAILURE);
334
335 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
336 PERFMON_MMCR0(perf_regs) = mfmmcr0();
337 PERFMON_PMC1(perf_regs) = mfpmc1();
338 PERFMON_PMC2(perf_regs) = mfpmc2();
339 PERFMON_MMCR1(perf_regs) = mfmmcr1();
340 PERFMON_PMC3(perf_regs) = mfpmc3();
341 PERFMON_PMC4(perf_regs) = mfpmc4();
342 PERFMON_MMCR2(perf_regs) = mfmmcr2();
343 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
344
345 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
346 return(KERN_SUCCESS);
347
348 default:
349 return(KERN_FAILURE);
350 } /* switch cpu_subtype */
351
352 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
353
354 disable_preemption(); /* Don't move me now */
355
356 if(slot_num == cpu_number()) { /* Is this for the local CPU? */
357 *info = ml_read_temp(); /* Get the temperature */
358 }
359 else { /* For another CPU */
360 temp[0] = -1; /* Set sync flag */
361 eieio();
362 sync();
363 temp[1] = -1; /* Set invalid temperature */
364 (void)cpu_signal(slot_num, SIGPcpureq, CPRQtemp ,(unsigned int)&temp); /* Ask him to take his temperature */
365 (void)hw_cpu_sync(temp, LockTimeOut); /* Wait for the other processor to get its temperature */
366 *info = temp[1]; /* Pass it back */
367 }
368
369 enable_preemption(); /* Ok to move now */
370 return(KERN_SUCCESS);
371
372 default:
373 return(KERN_INVALID_ARGUMENT);
374
375 } /* flavor */
376}
377
378void
379cpu_init(
380 void)
381{
382 int cpu;
383
384 cpu = cpu_number();
385
386 machine_slot[cpu].running = TRUE;
387 machine_slot[cpu].cpu_type = CPU_TYPE_POWERPC;
388 machine_slot[cpu].cpu_subtype = (cpu_subtype_t)per_proc_info[cpu].pf.rptdProc;
389
390}
391
392void
393cpu_machine_init(
394 void)
395{
0b4e3aa0
A
396 struct per_proc_info *tproc_info;
397 volatile struct per_proc_info *mproc_info;
1c79356b
A
398 int cpu;
399
400 /* TODO: realese mutex lock reset_handler_lock */
401
402 cpu = cpu_number();
0b4e3aa0
A
403 tproc_info = &per_proc_info[cpu];
404 mproc_info = &per_proc_info[master_cpu];
405 PE_cpu_machine_init(tproc_info->cpu_id, !(tproc_info->cpu_flags & BootDone));
406 if (cpu != master_cpu) {
407 while (!((mproc_info->cpu_flags) & SignalReady))
408 continue;
1c79356b 409 cpu_sync_timebase();
0b4e3aa0 410 }
1c79356b 411 ml_init_interrupt();
0b4e3aa0 412 tproc_info->cpu_flags |= BootDone|SignalReady;
1c79356b
A
413}
414
415kern_return_t
416cpu_register(
417 int *target_cpu
418)
419{
420 int cpu;
421
422 /*
423 * TODO:
424 * - Run cpu_register() in exclusion mode
425 */
426
427 *target_cpu = -1;
428 for(cpu=0; cpu < wncpu; cpu++) {
429 if(!machine_slot[cpu].is_cpu) {
430 machine_slot[cpu].is_cpu = TRUE;
431 *target_cpu = cpu;
432 break;
433 }
434 }
435 if (*target_cpu != -1) {
436 real_ncpus++;
437 return KERN_SUCCESS;
438 } else
439 return KERN_FAILURE;
440}
441
442kern_return_t
443cpu_start(
444 int cpu)
445{
446 struct per_proc_info *proc_info;
447 kern_return_t ret;
448
de355530 449 extern void (*exception_handlers[])(void);
1c79356b
A
450 extern vm_offset_t intstack;
451 extern vm_offset_t debstack;
452
453 proc_info = &per_proc_info[cpu];
454
455 if (cpu == cpu_number()) {
456 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
457 ml_init_interrupt();
0b4e3aa0 458 proc_info->cpu_flags |= BootDone|SignalReady;
1c79356b
A
459
460 return KERN_SUCCESS;
461 } else {
462 extern void _start_cpu(void);
463
464 proc_info->cpu_number = cpu;
465 proc_info->cpu_flags &= BootDone;
9bccf70c 466 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
1c79356b
A
467 proc_info->intstack_top_ss = proc_info->istackptr;
468#if MACH_KDP || MACH_KDB
9bccf70c 469 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
1c79356b
A
470 proc_info->debstack_top_ss = proc_info->debstackptr;
471#endif /* MACH_KDP || MACH_KDB */
0b4e3aa0 472 proc_info->interrupts_enabled = 0;
1c79356b 473 proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu];
1c79356b
A
474 proc_info->active_stacks = (unsigned int)&active_stacks[cpu];
475 proc_info->need_ast = (unsigned int)&need_ast[cpu];
9bccf70c
A
476 proc_info->FPU_owner = 0;
477 proc_info->VMX_owner = 0;
de355530 478
1c79356b
A
479
480 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
481
482 /* TODO: get mutex lock reset_handler_lock */
483
484 resethandler_target.type = RESET_HANDLER_START;
de355530
A
485 resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
486 resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
1c79356b
A
487
488 ml_phys_write((vm_offset_t)&ResetHandler + 0,
489 resethandler_target.type);
490 ml_phys_write((vm_offset_t)&ResetHandler + 4,
491 resethandler_target.call_paddr);
492 ml_phys_write((vm_offset_t)&ResetHandler + 8,
493 resethandler_target.arg__paddr);
494
495 }
496/*
497 * Note: we pass the current time to the other processor here. He will load it
498 * as early as possible so that there is a chance that it is close to accurate.
499 * After the machine is up a while, we will officially resync the clocks so
500 * that all processors are the same. This is just to get close.
501 */
502
9bccf70c 503 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); /* Pass our current time to the other guy */
1c79356b
A
504
505 __asm__ volatile("sync"); /* Commit to storage */
506 __asm__ volatile("isync"); /* Wait a second */
507 ret = PE_cpu_start(proc_info->cpu_id,
508 proc_info->start_paddr, (vm_offset_t)proc_info);
509
510 if (ret != KERN_SUCCESS &&
511 proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
512
513 /* TODO: realese mutex lock reset_handler_lock */
514 }
515 return(ret);
516 }
517}
518
519/*
520 * Here is where we implement the receiver of the signaling protocol.
521 * We wait for the signal status area to be passed to us. Then we snarf
522 * up the status, the sender, and the 3 potential parms. Next we release
523 * the lock and signal the other guy.
524 */
525
526void
527cpu_signal_handler(
528 void)
529{
530
531 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
532 unsigned int *parmAddr;
533 struct per_proc_info *pproc; /* Area for my per_proc address */
534 int cpu;
535 struct SIGtimebase *timebaseAddr;
536 natural_t tbu, tbu2, tbl;
537
538 cpu = cpu_number(); /* Get the CPU number */
539 pproc = &per_proc_info[cpu]; /* Point to our block */
540
541/*
90556fb8 542 * Since we've been signaled, wait about 31 ms for the signal lock to pass
1c79356b 543 */
9bccf70c 544 if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
90556fb8 545 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
1c79356b
A
546 panic("cpu_signal_handler: Lock pass timed out\n");
547 }
548
549 holdStat = pproc->MPsigpStat; /* Snarf stat word */
550 holdParm0 = pproc->MPsigpParm0; /* Snarf parameter */
551 holdParm1 = pproc->MPsigpParm1; /* Snarf parameter */
552 holdParm2 = pproc->MPsigpParm2; /* Snarf parameter */
553
554 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
555
9bccf70c 556 pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
1c79356b
A
557
558 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
559
560 case MPsigpIdle: /* Was function cancelled? */
561 return; /* Yup... */
562
563 case MPsigpSigp: /* Signal Processor message? */
564
565 switch (holdParm0) { /* Decode SIGP message order */
566
567 case SIGPast: /* Should we do an AST? */
de355530 568 pproc->numSIGPast++; /* Count this one */
1c79356b
A
569#if 0
570 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
571#endif
9bccf70c 572 ast_check(cpu_to_processor(cpu));
1c79356b
A
573 return; /* All done... */
574
575 case SIGPcpureq: /* CPU specific function? */
576
de355530 577 pproc->numSIGPcpureq++; /* Count this one */
1c79356b
A
578 switch (holdParm1) { /* Select specific function */
579
580 case CPRQtemp: /* Get the temperature */
581 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
582 parmAddr[1] = ml_read_temp(); /* Get the core temperature */
583 eieio(); /* Force order */
584 sync(); /* Force to memory */
585 parmAddr[0] = 0; /* Show we're done */
586 return;
587
588 case CPRQtimebase:
589
590 timebaseAddr = (struct SIGtimebase *)holdParm2;
591
592 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
593 pproc->time_base_enable(pproc->cpu_id, FALSE);
594
0b4e3aa0 595 timebaseAddr->abstime = 0; /* Touch to force into cache */
1c79356b
A
596 sync();
597
598 do {
599 asm volatile(" mftbu %0" : "=r" (tbu));
600 asm volatile(" mftb %0" : "=r" (tbl));
601 asm volatile(" mftbu %0" : "=r" (tbu2));
602 } while (tbu != tbu2);
603
0b4e3aa0 604 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
1c79356b
A
605 sync(); /* Force order */
606
607 timebaseAddr->avail = TRUE;
608
609 while (*(volatile int *)&(syncClkSpot.ready) == FALSE);
610
611 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
612 pproc->time_base_enable(pproc->cpu_id, TRUE);
613
614 timebaseAddr->done = TRUE;
615
616 return;
617
618 default:
619 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
620 return;
621 }
622
623
624 case SIGPdebug: /* Enter the debugger? */
625
de355530 626 pproc->numSIGPdebug++; /* Count this one */
1c79356b
A
627 debugger_is_slave[cpu]++; /* Bump up the count to show we're here */
628 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
629 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
630 return; /* All done now... */
631
632 case SIGPwake: /* Wake up CPU */
de355530 633 pproc->numSIGPwake++; /* Count this one */
1c79356b
A
634 return; /* No need to do anything, the interrupt does it all... */
635
636 default:
637 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
638 return;
639
640 }
641
642 default:
643 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
644 return;
645
646 }
647 panic("cpu_signal_handler: we should never get here\n");
648}
649
650/*
651 * Here is where we send a message to another processor. So far we only have two:
652 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
653 * currently disabled). SIGPdebug is used to enter the debugger.
654 *
655 * We set up the SIGP function to indicate that this is a simple message and set the
656 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
657 * block for the target, we lock the message block. Then we set the parameter(s).
658 * Next we change the lock (also called "busy") to "passing" and finally signal
659 * the other processor. Note that we only wait about 1ms to get the message lock.
660 * If we time out, we return failure to our caller. It is their responsibility to
661 * recover.
662 */
663
664kern_return_t
665cpu_signal(
666 int target,
667 int signal,
668 unsigned int p1,
669 unsigned int p2)
670{
671
672 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
673 struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */
674 int cpu;
9bccf70c 675 int busybitset =0;
1c79356b
A
676
677#if DEBUG
678 if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
679#endif
680
681 cpu = cpu_number(); /* Get our CPU number */
682 if(target == cpu) return KERN_FAILURE; /* Don't play with ourselves */
683 if(!machine_slot[target].running) return KERN_FAILURE; /* These guys are too young */
684
685 mpproc = &per_proc_info[cpu]; /* Point to our block */
686 tpproc = &per_proc_info[target]; /* Point to the target's block */
0b4e3aa0
A
687
688 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
9bccf70c 689
7b1edb79
A
690 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
691
9bccf70c 692 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
de355530 693 mpproc->numSIGPmwake++; /* Account for merged wakes */
9bccf70c
A
694 return KERN_SUCCESS;
695 }
7b1edb79
A
696
697 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
de355530 698 mpproc->numSIGPmast++; /* Account for merged ASTs */
7b1edb79
A
699 return KERN_SUCCESS; /* Don't bother to send this one... */
700 }
9bccf70c
A
701
702 if (tpproc->MPsigpParm0 == SIGPwake) {
703 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
704 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
705 busybitset = 1;
de355530 706 mpproc->numSIGPmwake++;
9bccf70c
A
707 }
708 }
7b1edb79
A
709 }
710
9bccf70c
A
711 if((busybitset == 0) &&
712 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
90556fb8 713 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
de355530 714 mpproc->numSIGPtimo++; /* Account for timeouts */
1c79356b
A
715 return KERN_FAILURE; /* Timed out, take your ball and go home... */
716 }
717
718 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | cpu; /* Set up the signal status word */
719 tpproc->MPsigpParm0 = signal; /* Set message order */
720 tpproc->MPsigpParm1 = p1; /* Set additional parm */
721 tpproc->MPsigpParm2 = p2; /* Set additional parm */
722
723 __asm__ volatile("sync"); /* Make sure it's all there */
724
725 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
726 __asm__ volatile("eieio"); /* I'm a paraniod freak */
727
9bccf70c
A
728 if (busybitset == 0)
729 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
1c79356b
A
730
731 return KERN_SUCCESS; /* All is goodness and rainbows... */
732}
733
734void
735cpu_doshutdown(
736 void)
737{
738 processor_doshutdown(current_processor());
739}
740
741void
742cpu_sleep(
743 void)
744{
745 struct per_proc_info *proc_info;
746 unsigned int cpu;
9bccf70c 747 facility_context *fowner;
de355530 748 extern void (*exception_handlers[])(void);
1c79356b
A
749 extern vm_offset_t intstack;
750 extern vm_offset_t debstack;
751 extern void _restart_cpu(void);
752
753 cpu = cpu_number();
de355530
A
754#if 0
755 kprintf("******* About to sleep cpu %d\n", cpu);
756#endif
1c79356b
A
757
758 proc_info = &per_proc_info[cpu];
759
9bccf70c
A
760 fowner = proc_info->FPU_owner; /* Cache this */
761 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
762 proc_info->FPU_owner = 0; /* Set no fpu owner now */
765c9de3 763
9bccf70c
A
764 fowner = proc_info->VMX_owner; /* Cache this */
765 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
766 proc_info->VMX_owner = 0; /* Set no vector owner now */
765c9de3 767
1c79356b
A
768 if (proc_info->cpu_number == 0) {
769 proc_info->cpu_flags &= BootDone;
9bccf70c 770 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
1c79356b
A
771 proc_info->intstack_top_ss = proc_info->istackptr;
772#if MACH_KDP || MACH_KDB
9bccf70c 773 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
1c79356b
A
774 proc_info->debstack_top_ss = proc_info->debstackptr;
775#endif /* MACH_KDP || MACH_KDB */
0b4e3aa0 776 proc_info->interrupts_enabled = 0;
1c79356b 777
765c9de3 778 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
1c79356b 779 extern void _start_cpu(void);
765c9de3 780
1c79356b 781 resethandler_target.type = RESET_HANDLER_START;
de355530
A
782 resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
783 resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
765c9de3 784
1c79356b 785 ml_phys_write((vm_offset_t)&ResetHandler + 0,
765c9de3 786 resethandler_target.type);
1c79356b 787 ml_phys_write((vm_offset_t)&ResetHandler + 4,
765c9de3 788 resethandler_target.call_paddr);
1c79356b 789 ml_phys_write((vm_offset_t)&ResetHandler + 8,
765c9de3 790 resethandler_target.arg__paddr);
1c79356b
A
791
792 __asm__ volatile("sync");
793 __asm__ volatile("isync");
765c9de3 794 }
1c79356b
A
795 }
796
797 PE_cpu_machine_quiesce(proc_info->cpu_id);
798}
799
800void
801cpu_sync_timebase(
802 void)
803{
804 natural_t tbu, tbl;
805 boolean_t intr;
806
807 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
808
809 /* Note that syncClkSpot is in a cache aligned area */
810 syncClkSpot.avail = FALSE;
811 syncClkSpot.ready = FALSE;
812 syncClkSpot.done = FALSE;
813
0b4e3aa0
A
814 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
815 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
816 continue;
1c79356b 817
0b4e3aa0
A
818 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
819 continue;
1c79356b 820
1c79356b
A
821 isync();
822
823 /*
824 * We do the following to keep the compiler from generating extra stuff
825 * in tb set part
826 */
0b4e3aa0
A
827 tbu = syncClkSpot.abstime >> 32;
828 tbl = (uint32_t)syncClkSpot.abstime;
1c79356b
A
829
830 mttb(0);
831 mttbu(tbu);
832 mttb(tbl);
833
834 syncClkSpot.ready = TRUE;
835
0b4e3aa0
A
836 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
837 continue;
1c79356b
A
838
839 (void)ml_set_interrupts_enabled(intr);
840}