]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
3a60a9f5 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b | 29 | */ |
1c79356b | 30 | |
91447636 A |
31 | #include <mach/mach_types.h> |
32 | #include <mach/machine.h> | |
33 | #include <mach/processor_info.h> | |
34 | ||
35 | #include <kern/kalloc.h> | |
36 | #include <kern/kern_types.h> | |
1c79356b A |
37 | #include <kern/machine.h> |
38 | #include <kern/misc_protos.h> | |
39 | #include <kern/thread.h> | |
91447636 | 40 | #include <kern/sched_prim.h> |
1c79356b | 41 | #include <kern/processor.h> |
5d5c5d0d | 42 | #include <kern/pms.h> |
91447636 A |
43 | |
44 | #include <vm/pmap.h> | |
3a60a9f5 | 45 | #include <IOKit/IOHibernatePrivate.h> |
91447636 | 46 | |
1c79356b A |
47 | #include <ppc/proc_reg.h> |
48 | #include <ppc/misc_protos.h> | |
49 | #include <ppc/machine_routines.h> | |
91447636 | 50 | #include <ppc/cpu_internal.h> |
1c79356b | 51 | #include <ppc/exception.h> |
9bccf70c | 52 | #include <ppc/asm.h> |
55e303ae | 53 | #include <ppc/hw_perfmon.h> |
1c79356b | 54 | #include <pexpert/pexpert.h> |
9bccf70c | 55 | #include <kern/cpu_data.h> |
55e303ae A |
56 | #include <ppc/mappings.h> |
57 | #include <ppc/Diagnostics.h> | |
58 | #include <ppc/trap.h> | |
91447636 | 59 | #include <ppc/machine_cpu.h> |
3a60a9f5 | 60 | #include <ppc/rtclock.h> |
1c79356b | 61 | |
91447636 | 62 | decl_mutex_data(static,ppt_lock); |
1c79356b | 63 | |
91447636 A |
64 | unsigned int real_ncpus = 1; |
65 | unsigned int max_ncpus = MAX_CPUS; | |
1c79356b | 66 | |
91447636 | 67 | decl_simple_lock_data(static,rht_lock); |
5353443c | 68 | |
91447636 A |
69 | static unsigned int rht_state = 0; |
70 | #define RHT_WAIT 0x01 | |
71 | #define RHT_BUSY 0x02 | |
1c79356b | 72 | |
91447636 | 73 | decl_simple_lock_data(static,SignalReadyLock); |
1c79356b A |
74 | |
75 | struct SIGtimebase { | |
5d5c5d0d A |
76 | volatile boolean_t avail; |
77 | volatile boolean_t ready; | |
78 | volatile boolean_t done; | |
0b4e3aa0 | 79 | uint64_t abstime; |
1c79356b A |
80 | }; |
81 | ||
91447636 | 82 | perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */ |
1c79356b | 83 | |
91447636 | 84 | extern int debugger_sync; |
1c79356b | 85 | |
91447636 A |
86 | /* |
87 | * Forward definitions | |
88 | */ | |
1c79356b | 89 | |
91447636 A |
90 | void cpu_sync_timebase( |
91 | void); | |
55e303ae | 92 | |
91447636 A |
93 | void cpu_timebase_signal_handler( |
94 | struct per_proc_info *proc_info, | |
95 | struct SIGtimebase *timebaseAddr); | |
1c79356b | 96 | |
91447636 A |
97 | /* |
98 | * Routine: cpu_bootstrap | |
99 | * Function: | |
100 | */ | |
101 | void | |
102 | cpu_bootstrap( | |
103 | void) | |
1c79356b | 104 | { |
91447636 A |
105 | simple_lock_init(&rht_lock,0); |
106 | simple_lock_init(&SignalReadyLock,0); | |
107 | mutex_init(&ppt_lock,0); | |
1c79356b A |
108 | } |
109 | ||
1c79356b | 110 | |
91447636 A |
111 | /* |
112 | * Routine: cpu_init | |
113 | * Function: | |
114 | */ | |
1c79356b A |
115 | void |
116 | cpu_init( | |
117 | void) | |
118 | { | |
91447636 A |
119 | struct per_proc_info *proc_info; |
120 | ||
121 | proc_info = getPerProc(); | |
1c79356b | 122 | |
91447636 A |
123 | /* |
124 | * Restore the TBR. | |
125 | */ | |
126 | if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) { | |
127 | mttb(0); | |
128 | mttbu(proc_info->save_tbu); | |
129 | mttb(proc_info->save_tbl); | |
130 | } | |
5d5c5d0d A |
131 | |
132 | proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */ | |
133 | etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */ | |
1c79356b | 134 | |
91447636 A |
135 | proc_info->cpu_type = CPU_TYPE_POWERPC; |
136 | proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc; | |
137 | proc_info->cpu_threadtype = CPU_THREADTYPE_NONE; | |
138 | proc_info->running = TRUE; | |
1c79356b A |
139 | |
140 | } | |
141 | ||
91447636 A |
142 | /* |
143 | * Routine: cpu_machine_init | |
144 | * Function: | |
145 | */ | |
1c79356b A |
146 | void |
147 | cpu_machine_init( | |
148 | void) | |
149 | { | |
91447636 | 150 | struct per_proc_info *proc_info; |
0b4e3aa0 | 151 | volatile struct per_proc_info *mproc_info; |
1c79356b | 152 | |
1c79356b | 153 | |
91447636 A |
154 | proc_info = getPerProc(); |
155 | mproc_info = PerProcTable[master_cpu].ppe_vaddr; | |
156 | ||
157 | if (proc_info != mproc_info) { | |
158 | simple_lock(&rht_lock); | |
159 | if (rht_state & RHT_WAIT) | |
160 | thread_wakeup(&rht_state); | |
161 | rht_state &= ~(RHT_BUSY|RHT_WAIT); | |
162 | simple_unlock(&rht_lock); | |
163 | } | |
164 | ||
165 | PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); | |
166 | ||
3a60a9f5 A |
167 | if (proc_info->hibernate) { |
168 | uint32_t tbu, tbl; | |
169 | ||
170 | do { | |
171 | tbu = mftbu(); | |
172 | tbl = mftb(); | |
173 | } while (mftbu() != tbu); | |
174 | ||
175 | proc_info->hibernate = 0; | |
176 | hibernate_machine_init(); | |
177 | ||
178 | // hibernate_machine_init() could take minutes and we don't want timeouts | |
179 | // to fire as soon as scheduling starts. Reset timebase so it appears | |
180 | // no time has elapsed, as it would for regular sleep. | |
181 | mttb(0); | |
182 | mttbu(tbu); | |
183 | mttb(tbl); | |
184 | } | |
91447636 A |
185 | |
186 | if (proc_info != mproc_info) { | |
187 | while (!((mproc_info->cpu_flags) & SignalReady)) | |
0b4e3aa0 | 188 | continue; |
1c79356b | 189 | cpu_sync_timebase(); |
0b4e3aa0 | 190 | } |
91447636 | 191 | |
1c79356b | 192 | ml_init_interrupt(); |
91447636 | 193 | if (proc_info != mproc_info) |
5353443c | 194 | simple_lock(&SignalReadyLock); |
91447636 A |
195 | proc_info->cpu_flags |= BootDone|SignalReady; |
196 | if (proc_info != mproc_info) { | |
197 | if (proc_info->ppXFlags & SignalReadyWait) { | |
198 | hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait); | |
199 | thread_wakeup(&proc_info->cpu_flags); | |
5353443c A |
200 | } |
201 | simple_unlock(&SignalReadyLock); | |
3a60a9f5 | 202 | pmsPark(); /* Timers should be cool now, park the power management stepper */ |
5353443c | 203 | } |
1c79356b A |
204 | } |
205 | ||
1c79356b | 206 | |
91447636 A |
207 | /* |
208 | * Routine: cpu_per_proc_alloc | |
209 | * Function: | |
210 | */ | |
211 | struct per_proc_info * | |
212 | cpu_per_proc_alloc( | |
213 | void) | |
214 | { | |
215 | struct per_proc_info *proc_info=0; | |
216 | void *interrupt_stack=0; | |
217 | void *debugger_stack=0; | |
218 | ||
3a60a9f5 A |
219 | if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0) |
220 | return (struct per_proc_info *)NULL; | |
91447636 | 221 | if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) { |
3a60a9f5 A |
222 | kfree(proc_info, sizeof(struct per_proc_info)); |
223 | return (struct per_proc_info *)NULL; | |
1c79356b | 224 | } |
3a60a9f5 | 225 | |
91447636 | 226 | if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) { |
3a60a9f5 | 227 | kfree(proc_info, sizeof(struct per_proc_info)); |
91447636 | 228 | kfree(interrupt_stack, INTSTACK_SIZE); |
3a60a9f5 | 229 | return (struct per_proc_info *)NULL; |
91447636 | 230 | } |
91447636 A |
231 | |
232 | bzero((void *)proc_info, sizeof(struct per_proc_info)); | |
233 | ||
3a60a9f5 | 234 | proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */ |
91447636 A |
235 | proc_info->next_savearea = (uint64_t)save_get_init(); |
236 | proc_info->pf = BootProcInfo.pf; | |
237 | proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE; | |
238 | proc_info->intstack_top_ss = proc_info->istackptr; | |
91447636 A |
239 | proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE; |
240 | proc_info->debstack_top_ss = proc_info->debstackptr; | |
3a60a9f5 | 241 | |
91447636 A |
242 | return proc_info; |
243 | ||
244 | } | |
245 | ||
246 | ||
247 | /* | |
248 | * Routine: cpu_per_proc_free | |
249 | * Function: | |
250 | */ | |
251 | void | |
252 | cpu_per_proc_free( | |
253 | struct per_proc_info *proc_info | |
254 | ) | |
255 | { | |
256 | if (proc_info->cpu_number == master_cpu) | |
257 | return; | |
258 | kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE); | |
259 | kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE); | |
3a60a9f5 | 260 | kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */ |
91447636 A |
261 | } |
262 | ||
263 | ||
264 | /* | |
265 | * Routine: cpu_per_proc_register | |
266 | * Function: | |
267 | */ | |
268 | kern_return_t | |
269 | cpu_per_proc_register( | |
270 | struct per_proc_info *proc_info | |
271 | ) | |
272 | { | |
273 | int cpu; | |
274 | ||
275 | mutex_lock(&ppt_lock); | |
276 | if (real_ncpus >= max_ncpus) { | |
277 | mutex_unlock(&ppt_lock); | |
1c79356b | 278 | return KERN_FAILURE; |
91447636 A |
279 | } |
280 | cpu = real_ncpus; | |
281 | proc_info->cpu_number = cpu; | |
282 | PerProcTable[cpu].ppe_vaddr = proc_info; | |
3a60a9f5 | 283 | PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT; |
91447636 A |
284 | eieio(); |
285 | real_ncpus++; | |
286 | mutex_unlock(&ppt_lock); | |
287 | return KERN_SUCCESS; | |
1c79356b A |
288 | } |
289 | ||
91447636 A |
290 | |
291 | /* | |
292 | * Routine: cpu_start | |
293 | * Function: | |
294 | */ | |
1c79356b A |
295 | kern_return_t |
296 | cpu_start( | |
297 | int cpu) | |
298 | { | |
299 | struct per_proc_info *proc_info; | |
91447636 A |
300 | kern_return_t ret; |
301 | mapping_t *mp; | |
1c79356b | 302 | |
91447636 | 303 | proc_info = PerProcTable[cpu].ppe_vaddr; |
1c79356b A |
304 | |
305 | if (cpu == cpu_number()) { | |
306 | PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); | |
307 | ml_init_interrupt(); | |
0b4e3aa0 | 308 | proc_info->cpu_flags |= BootDone|SignalReady; |
1c79356b A |
309 | |
310 | return KERN_SUCCESS; | |
311 | } else { | |
1c79356b | 312 | proc_info->cpu_flags &= BootDone; |
0b4e3aa0 | 313 | proc_info->interrupts_enabled = 0; |
91447636 A |
314 | proc_info->pending_ast = AST_NONE; |
315 | proc_info->istackptr = proc_info->intstack_top_ss; | |
3a60a9f5 A |
316 | proc_info->rtcPop = EndOfAllTime; |
317 | proc_info->FPU_owner = 0; | |
318 | proc_info->VMX_owner = 0; | |
319 | proc_info->pms.pmsStamp = 0; /* Dummy transition time */ | |
320 | proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */ | |
321 | proc_info->pms.pmsState = pmsParked; /* Park the stepper */ | |
322 | proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */ | |
91447636 A |
323 | mp = (mapping_t *)(&proc_info->ppUMWmp); |
324 | mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1; | |
55e303ae | 325 | mp->mpSpace = invalSpace; |
1c79356b A |
326 | |
327 | if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { | |
328 | ||
91447636 A |
329 | simple_lock(&rht_lock); |
330 | while (rht_state & RHT_BUSY) { | |
331 | rht_state |= RHT_WAIT; | |
332 | thread_sleep_usimple_lock((event_t)&rht_state, | |
333 | &rht_lock, THREAD_UNINT); | |
334 | } | |
335 | rht_state |= RHT_BUSY; | |
336 | simple_unlock(&rht_lock); | |
1c79356b | 337 | |
1c79356b | 338 | ml_phys_write((vm_offset_t)&ResetHandler + 0, |
91447636 | 339 | RESET_HANDLER_START); |
1c79356b | 340 | ml_phys_write((vm_offset_t)&ResetHandler + 4, |
91447636 | 341 | (vm_offset_t)_start_cpu); |
1c79356b | 342 | ml_phys_write((vm_offset_t)&ResetHandler + 8, |
91447636 | 343 | (vm_offset_t)&PerProcTable[cpu]); |
1c79356b A |
344 | } |
345 | /* | |
346 | * Note: we pass the current time to the other processor here. He will load it | |
347 | * as early as possible so that there is a chance that it is close to accurate. | |
348 | * After the machine is up a while, we will officially resync the clocks so | |
349 | * that all processors are the same. This is just to get close. | |
350 | */ | |
351 | ||
91447636 | 352 | ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); |
1c79356b A |
353 | |
354 | __asm__ volatile("sync"); /* Commit to storage */ | |
355 | __asm__ volatile("isync"); /* Wait a second */ | |
91447636 A |
356 | ret = PE_cpu_start(proc_info->cpu_id, |
357 | proc_info->start_paddr, (vm_offset_t)proc_info); | |
358 | ||
359 | if (ret != KERN_SUCCESS) { | |
360 | if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { | |
361 | simple_lock(&rht_lock); | |
362 | if (rht_state & RHT_WAIT) | |
363 | thread_wakeup(&rht_state); | |
364 | rht_state &= ~(RHT_BUSY|RHT_WAIT); | |
365 | simple_unlock(&rht_lock); | |
366 | }; | |
5353443c A |
367 | } else { |
368 | simple_lock(&SignalReadyLock); | |
91447636 A |
369 | if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) { |
370 | hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); | |
371 | thread_sleep_simple_lock((event_t)&proc_info->cpu_flags, | |
372 | &SignalReadyLock, THREAD_UNINT); | |
5353443c A |
373 | } |
374 | simple_unlock(&SignalReadyLock); | |
91447636 | 375 | |
1c79356b A |
376 | } |
377 | return(ret); | |
378 | } | |
379 | } | |
380 | ||
91447636 A |
381 | /* |
382 | * Routine: cpu_exit_wait | |
383 | * Function: | |
384 | */ | |
5353443c A |
385 | void |
386 | cpu_exit_wait( | |
91447636 A |
387 | int cpu) |
388 | { | |
389 | struct per_proc_info *tpproc; | |
390 | ||
391 | if ( cpu != master_cpu) { | |
392 | tpproc = PerProcTable[cpu].ppe_vaddr; | |
393 | while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {}; | |
394 | } | |
395 | } | |
396 | ||
397 | ||
398 | /* | |
399 | * Routine: cpu_doshutdown | |
400 | * Function: | |
401 | */ | |
402 | void | |
403 | cpu_doshutdown( | |
404 | void) | |
405 | { | |
406 | enable_preemption(); | |
407 | processor_offline(current_processor()); | |
408 | } | |
409 | ||
410 | ||
411 | /* | |
412 | * Routine: cpu_sleep | |
413 | * Function: | |
414 | */ | |
415 | void | |
416 | cpu_sleep( | |
417 | void) | |
418 | { | |
419 | struct per_proc_info *proc_info; | |
420 | unsigned int i; | |
421 | unsigned int wait_ncpus_sleep, ncpus_sleep; | |
422 | facility_context *fowner; | |
423 | ||
424 | proc_info = getPerProc(); | |
425 | ||
426 | proc_info->running = FALSE; | |
427 | ||
428 | fowner = proc_info->FPU_owner; /* Cache this */ | |
429 | if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */ | |
430 | proc_info->FPU_owner = 0; /* Set no fpu owner now */ | |
431 | ||
432 | fowner = proc_info->VMX_owner; /* Cache this */ | |
433 | if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */ | |
434 | proc_info->VMX_owner = 0; /* Set no vector owner now */ | |
435 | ||
436 | if (proc_info->cpu_number == master_cpu) { | |
437 | proc_info->cpu_flags &= BootDone; | |
438 | proc_info->interrupts_enabled = 0; | |
439 | proc_info->pending_ast = AST_NONE; | |
440 | ||
441 | if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { | |
442 | ml_phys_write((vm_offset_t)&ResetHandler + 0, | |
443 | RESET_HANDLER_START); | |
444 | ml_phys_write((vm_offset_t)&ResetHandler + 4, | |
445 | (vm_offset_t)_start_cpu); | |
446 | ml_phys_write((vm_offset_t)&ResetHandler + 8, | |
447 | (vm_offset_t)&PerProcTable[master_cpu]); | |
448 | ||
449 | __asm__ volatile("sync"); | |
450 | __asm__ volatile("isync"); | |
451 | } | |
452 | ||
453 | wait_ncpus_sleep = real_ncpus-1; | |
454 | ncpus_sleep = 0; | |
455 | while (wait_ncpus_sleep != ncpus_sleep) { | |
456 | ncpus_sleep = 0; | |
457 | for(i=1; i < real_ncpus ; i++) { | |
458 | if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState) | |
459 | ncpus_sleep++; | |
460 | } | |
461 | } | |
462 | ||
463 | } | |
464 | ||
465 | /* | |
466 | * Save the TBR before stopping. | |
467 | */ | |
468 | do { | |
469 | proc_info->save_tbu = mftbu(); | |
470 | proc_info->save_tbl = mftb(); | |
471 | } while (mftbu() != proc_info->save_tbu); | |
472 | ||
473 | PE_cpu_machine_quiesce(proc_info->cpu_id); | |
474 | } | |
475 | ||
476 | ||
477 | /* | |
478 | * Routine: cpu_signal | |
479 | * Function: | |
480 | * Here is where we send a message to another processor. So far we only have two: | |
481 | * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is | |
482 | * currently disabled). SIGPdebug is used to enter the debugger. | |
483 | * | |
484 | * We set up the SIGP function to indicate that this is a simple message and set the | |
485 | * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor | |
486 | * block for the target, we lock the message block. Then we set the parameter(s). | |
487 | * Next we change the lock (also called "busy") to "passing" and finally signal | |
488 | * the other processor. Note that we only wait about 1ms to get the message lock. | |
489 | * If we time out, we return failure to our caller. It is their responsibility to | |
490 | * recover. | |
491 | */ | |
492 | kern_return_t | |
493 | cpu_signal( | |
494 | int target, | |
495 | int signal, | |
496 | unsigned int p1, | |
497 | unsigned int p2) | |
5353443c | 498 | { |
91447636 A |
499 | |
500 | unsigned int holdStat; | |
501 | struct per_proc_info *tpproc, *mpproc; | |
502 | int busybitset=0; | |
503 | ||
504 | #if DEBUG | |
505 | if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target); | |
506 | #endif | |
507 | ||
508 | mpproc = getPerProc(); /* Point to our block */ | |
509 | tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */ | |
510 | if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */ | |
511 | ||
512 | if(!tpproc->running) return KERN_FAILURE; | |
513 | ||
514 | if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE; | |
515 | ||
516 | if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */ | |
517 | ||
518 | if(signal == SIGPwake) { /* SIGPwake can merge into all others... */ | |
519 | mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */ | |
520 | return KERN_SUCCESS; | |
521 | } | |
522 | ||
523 | if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */ | |
524 | mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */ | |
525 | return KERN_SUCCESS; /* Don't bother to send this one... */ | |
526 | } | |
527 | ||
528 | if (tpproc->MPsigpParm0 == SIGPwake) { | |
529 | if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), | |
530 | (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) { | |
531 | busybitset = 1; | |
532 | mpproc->hwCtr.numSIGPmwake++; | |
533 | } | |
534 | } | |
535 | } | |
536 | ||
537 | if((busybitset == 0) && | |
538 | (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy, | |
539 | (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */ | |
540 | mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */ | |
541 | return KERN_FAILURE; /* Timed out, take your ball and go home... */ | |
542 | } | |
543 | ||
544 | holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */ | |
545 | tpproc->MPsigpParm0 = signal; /* Set message order */ | |
546 | tpproc->MPsigpParm1 = p1; /* Set additional parm */ | |
547 | tpproc->MPsigpParm2 = p2; /* Set additional parm */ | |
548 | ||
549 | __asm__ volatile("sync"); /* Make sure it's all there */ | |
550 | ||
551 | tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */ | |
552 | __asm__ volatile("eieio"); /* I'm a paraniod freak */ | |
553 | ||
554 | if (busybitset == 0) | |
555 | PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */ | |
556 | ||
557 | return KERN_SUCCESS; /* All is goodness and rainbows... */ | |
5353443c A |
558 | } |
559 | ||
55e303ae | 560 | |
1c79356b | 561 | /* |
91447636 A |
562 | * Routine: cpu_signal_handler |
563 | * Function: | |
1c79356b A |
564 | * Here is where we implement the receiver of the signaling protocol. |
565 | * We wait for the signal status area to be passed to us. Then we snarf | |
566 | * up the status, the sender, and the 3 potential parms. Next we release | |
567 | * the lock and signal the other guy. | |
568 | */ | |
1c79356b A |
569 | void |
570 | cpu_signal_handler( | |
571 | void) | |
572 | { | |
573 | ||
574 | unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype; | |
575 | unsigned int *parmAddr; | |
91447636 | 576 | struct per_proc_info *proc_info; |
1c79356b | 577 | int cpu; |
a3d08fcd | 578 | broadcastFunc xfunc; |
1c79356b | 579 | cpu = cpu_number(); /* Get the CPU number */ |
91447636 A |
580 | |
581 | proc_info = getPerProc(); | |
582 | ||
1c79356b | 583 | /* |
90556fb8 | 584 | * Since we've been signaled, wait about 31 ms for the signal lock to pass |
1c79356b | 585 | */ |
91447636 | 586 | if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass), |
90556fb8 | 587 | (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) { |
1c79356b A |
588 | panic("cpu_signal_handler: Lock pass timed out\n"); |
589 | } | |
590 | ||
91447636 A |
591 | holdStat = proc_info->MPsigpStat; /* Snarf stat word */ |
592 | holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */ | |
593 | holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */ | |
594 | holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */ | |
1c79356b A |
595 | |
596 | __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */ | |
597 | ||
91447636 | 598 | proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */ |
1c79356b A |
599 | |
600 | switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */ | |
601 | ||
602 | case MPsigpIdle: /* Was function cancelled? */ | |
603 | return; /* Yup... */ | |
604 | ||
605 | case MPsigpSigp: /* Signal Processor message? */ | |
606 | ||
607 | switch (holdParm0) { /* Decode SIGP message order */ | |
608 | ||
609 | case SIGPast: /* Should we do an AST? */ | |
91447636 | 610 | proc_info->hwCtr.numSIGPast++; /* Count this one */ |
1c79356b A |
611 | #if 0 |
612 | kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number()); | |
613 | #endif | |
91447636 | 614 | ast_check((processor_t)proc_info->processor); |
1c79356b A |
615 | return; /* All done... */ |
616 | ||
617 | case SIGPcpureq: /* CPU specific function? */ | |
618 | ||
91447636 | 619 | proc_info->hwCtr.numSIGPcpureq++; /* Count this one */ |
1c79356b A |
620 | switch (holdParm1) { /* Select specific function */ |
621 | ||
1c79356b A |
622 | case CPRQtimebase: |
623 | ||
91447636 | 624 | cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2); |
1c79356b A |
625 | return; |
626 | ||
55e303ae A |
627 | case CPRQsegload: |
628 | return; | |
629 | ||
630 | case CPRQchud: | |
631 | parmAddr = (unsigned int *)holdParm2; /* Get the destination address */ | |
632 | if(perfCpuSigHook) { | |
91447636 | 633 | struct savearea *ssp = current_thread()->machine.pcb; |
55e303ae A |
634 | if(ssp) { |
635 | (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0); | |
636 | } | |
637 | } | |
638 | parmAddr[1] = 0; | |
639 | parmAddr[0] = 0; /* Show we're done */ | |
640 | return; | |
641 | ||
642 | case CPRQscom: | |
483a1d10 A |
643 | if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */ |
644 | ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */ | |
645 | } | |
646 | else { /* No, reading... */ | |
647 | ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */ | |
648 | } | |
55e303ae A |
649 | return; |
650 | ||
5353443c A |
651 | case CPRQsps: |
652 | { | |
91447636 A |
653 | ml_set_processor_speed_slave(holdParm2); |
654 | return; | |
655 | } | |
1c79356b A |
656 | default: |
657 | panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1); | |
658 | return; | |
659 | } | |
660 | ||
661 | ||
662 | case SIGPdebug: /* Enter the debugger? */ | |
663 | ||
91447636 A |
664 | proc_info->hwCtr.numSIGPdebug++; /* Count this one */ |
665 | proc_info->debugger_is_slave++; /* Bump up the count to show we're here */ | |
1c79356b A |
666 | hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */ |
667 | __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */ | |
668 | return; /* All done now... */ | |
669 | ||
670 | case SIGPwake: /* Wake up CPU */ | |
91447636 | 671 | proc_info->hwCtr.numSIGPwake++; /* Count this one */ |
1c79356b A |
672 | return; /* No need to do anything, the interrupt does it all... */ |
673 | ||
a3d08fcd | 674 | case SIGPcall: /* Call function on CPU */ |
91447636 | 675 | proc_info->hwCtr.numSIGPcall++; /* Count this one */ |
a3d08fcd A |
676 | xfunc = holdParm1; /* Do this since I can't seem to figure C out */ |
677 | xfunc(holdParm2); /* Call the passed function */ | |
678 | return; /* Done... */ | |
679 | ||
1c79356b A |
680 | default: |
681 | panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0); | |
682 | return; | |
683 | ||
684 | } | |
685 | ||
686 | default: | |
687 | panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8); | |
688 | return; | |
689 | ||
690 | } | |
691 | panic("cpu_signal_handler: we should never get here\n"); | |
692 | } | |
693 | ||
91447636 | 694 | |
1c79356b | 695 | /* |
91447636 A |
696 | * Routine: cpu_sync_timebase |
697 | * Function: | |
1c79356b | 698 | */ |
91447636 A |
699 | void |
700 | cpu_sync_timebase( | |
701 | void) | |
1c79356b | 702 | { |
91447636 A |
703 | natural_t tbu, tbl; |
704 | boolean_t intr; | |
705 | struct SIGtimebase syncClkSpot; | |
1c79356b | 706 | |
91447636 | 707 | intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */ |
1c79356b | 708 | |
91447636 A |
709 | syncClkSpot.avail = FALSE; |
710 | syncClkSpot.ready = FALSE; | |
711 | syncClkSpot.done = FALSE; | |
1c79356b | 712 | |
91447636 A |
713 | while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase, |
714 | (unsigned int)&syncClkSpot) != KERN_SUCCESS) | |
715 | continue; | |
1c79356b | 716 | |
5d5c5d0d | 717 | while (syncClkSpot.avail == FALSE) |
91447636 | 718 | continue; |
0b4e3aa0 | 719 | |
91447636 | 720 | isync(); |
7b1edb79 | 721 | |
91447636 A |
722 | /* |
723 | * We do the following to keep the compiler from generating extra stuff | |
724 | * in tb set part | |
725 | */ | |
726 | tbu = syncClkSpot.abstime >> 32; | |
727 | tbl = (uint32_t)syncClkSpot.abstime; | |
7b1edb79 | 728 | |
91447636 A |
729 | mttb(0); |
730 | mttbu(tbu); | |
731 | mttb(tbl); | |
9bccf70c | 732 | |
91447636 | 733 | syncClkSpot.ready = TRUE; |
1c79356b | 734 | |
5d5c5d0d | 735 | while (syncClkSpot.done == FALSE) |
91447636 | 736 | continue; |
1c79356b | 737 | |
5d5c5d0d | 738 | etimer_resync_deadlines(); /* Start the timer */ |
91447636 | 739 | (void)ml_set_interrupts_enabled(intr); |
1c79356b A |
740 | } |
741 | ||
91447636 A |
742 | |
743 | /* | |
744 | * Routine: cpu_timebase_signal_handler | |
745 | * Function: | |
746 | */ | |
1c79356b | 747 | void |
91447636 A |
748 | cpu_timebase_signal_handler( |
749 | struct per_proc_info *proc_info, | |
750 | struct SIGtimebase *timebaseAddr) | |
1c79356b | 751 | { |
91447636 A |
752 | unsigned int tbu, tbu2, tbl; |
753 | ||
754 | if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) | |
755 | proc_info->time_base_enable(proc_info->cpu_id, FALSE); | |
756 | ||
757 | timebaseAddr->abstime = 0; /* Touch to force into cache */ | |
758 | sync(); | |
759 | ||
760 | do { | |
761 | asm volatile(" mftbu %0" : "=r" (tbu)); | |
762 | asm volatile(" mftb %0" : "=r" (tbl)); | |
763 | asm volatile(" mftbu %0" : "=r" (tbu2)); | |
764 | } while (tbu != tbu2); | |
765 | ||
766 | timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl; | |
767 | sync(); /* Force order */ | |
768 | ||
769 | timebaseAddr->avail = TRUE; | |
770 | ||
5d5c5d0d A |
771 | while (timebaseAddr->ready == FALSE) |
772 | continue; | |
91447636 A |
773 | |
774 | if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) | |
775 | proc_info->time_base_enable(proc_info->cpu_id, TRUE); | |
776 | ||
777 | timebaseAddr->done = TRUE; | |
1c79356b A |
778 | } |
779 | ||
91447636 A |
780 | |
781 | /* | |
782 | * Routine: cpu_control | |
783 | * Function: | |
784 | */ | |
785 | kern_return_t | |
786 | cpu_control( | |
787 | int slot_num, | |
788 | processor_info_t info, | |
789 | unsigned int count) | |
1c79356b A |
790 | { |
791 | struct per_proc_info *proc_info; | |
91447636 A |
792 | cpu_type_t tcpu_type; |
793 | cpu_subtype_t tcpu_subtype; | |
794 | processor_pm_regs_t perf_regs; | |
795 | processor_control_cmd_t cmd; | |
796 | boolean_t oldlevel; | |
797 | #define MMCR0_SUPPORT_MASK 0xf83f1fff | |
798 | #define MMCR1_SUPPORT_MASK 0xffc00000 | |
799 | #define MMCR2_SUPPORT_MASK 0x80000000 | |
800 | ||
801 | proc_info = PerProcTable[slot_num].ppe_vaddr; | |
802 | tcpu_type = proc_info->cpu_type; | |
803 | tcpu_subtype = proc_info->cpu_subtype; | |
804 | cmd = (processor_control_cmd_t) info; | |
1c79356b | 805 | |
91447636 A |
806 | if (count < PROCESSOR_CONTROL_CMD_COUNT) |
807 | return(KERN_FAILURE); | |
1c79356b | 808 | |
91447636 A |
809 | if ( tcpu_type != cmd->cmd_cpu_type || |
810 | tcpu_subtype != cmd->cmd_cpu_subtype) | |
811 | return(KERN_FAILURE); | |
1c79356b | 812 | |
91447636 A |
813 | if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) { |
814 | return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */ | |
815 | } | |
765c9de3 | 816 | |
91447636 A |
817 | switch (cmd->cmd_op) |
818 | { | |
819 | case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */ | |
820 | switch (tcpu_subtype) | |
821 | { | |
822 | case CPU_SUBTYPE_POWERPC_750: | |
823 | case CPU_SUBTYPE_POWERPC_7400: | |
824 | case CPU_SUBTYPE_POWERPC_7450: | |
825 | { | |
826 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
827 | mtpmc1(0x0); | |
828 | mtpmc2(0x0); | |
829 | mtpmc3(0x0); | |
830 | mtpmc4(0x0); | |
831 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
832 | return(KERN_SUCCESS); | |
833 | } | |
834 | default: | |
835 | return(KERN_FAILURE); | |
836 | } /* tcpu_subtype */ | |
837 | case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */ | |
838 | switch (tcpu_subtype) | |
839 | { | |
840 | case CPU_SUBTYPE_POWERPC_750: | |
841 | if (count < (PROCESSOR_CONTROL_CMD_COUNT + | |
842 | PROCESSOR_PM_REGS_COUNT_POWERPC_750)) | |
843 | return(KERN_FAILURE); | |
844 | else | |
845 | { | |
846 | perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; | |
847 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
848 | mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); | |
849 | mtpmc1(PERFMON_PMC1(perf_regs)); | |
850 | mtpmc2(PERFMON_PMC2(perf_regs)); | |
851 | mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); | |
852 | mtpmc3(PERFMON_PMC3(perf_regs)); | |
853 | mtpmc4(PERFMON_PMC4(perf_regs)); | |
854 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
855 | return(KERN_SUCCESS); | |
856 | } | |
857 | case CPU_SUBTYPE_POWERPC_7400: | |
858 | case CPU_SUBTYPE_POWERPC_7450: | |
859 | if (count < (PROCESSOR_CONTROL_CMD_COUNT + | |
860 | PROCESSOR_PM_REGS_COUNT_POWERPC_7400)) | |
861 | return(KERN_FAILURE); | |
862 | else | |
863 | { | |
864 | perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; | |
865 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
866 | mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); | |
867 | mtpmc1(PERFMON_PMC1(perf_regs)); | |
868 | mtpmc2(PERFMON_PMC2(perf_regs)); | |
869 | mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); | |
870 | mtpmc3(PERFMON_PMC3(perf_regs)); | |
871 | mtpmc4(PERFMON_PMC4(perf_regs)); | |
872 | mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK); | |
873 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
874 | return(KERN_SUCCESS); | |
875 | } | |
876 | default: | |
877 | return(KERN_FAILURE); | |
878 | } /* switch tcpu_subtype */ | |
879 | case PROCESSOR_PM_SET_MMCR: | |
880 | switch (tcpu_subtype) | |
881 | { | |
882 | case CPU_SUBTYPE_POWERPC_750: | |
883 | if (count < (PROCESSOR_CONTROL_CMD_COUNT + | |
884 | PROCESSOR_PM_REGS_COUNT_POWERPC_750)) | |
885 | return(KERN_FAILURE); | |
886 | else | |
887 | { | |
888 | perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; | |
889 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
890 | mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); | |
891 | mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); | |
892 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
893 | return(KERN_SUCCESS); | |
894 | } | |
895 | case CPU_SUBTYPE_POWERPC_7400: | |
896 | case CPU_SUBTYPE_POWERPC_7450: | |
897 | if (count < (PROCESSOR_CONTROL_CMD_COUNT + | |
898 | PROCESSOR_PM_REGS_COUNT_POWERPC_7400)) | |
899 | return(KERN_FAILURE); | |
900 | else | |
901 | { | |
902 | perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; | |
903 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
904 | mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); | |
905 | mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); | |
906 | mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK); | |
907 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
908 | return(KERN_SUCCESS); | |
909 | } | |
910 | default: | |
911 | return(KERN_FAILURE); | |
912 | } /* tcpu_subtype */ | |
913 | default: | |
914 | return(KERN_FAILURE); | |
915 | } /* switch cmd_op */ | |
916 | } | |
765c9de3 | 917 | |
1c79356b | 918 | |
91447636 A |
919 | /* |
920 | * Routine: cpu_info_count | |
921 | * Function: | |
922 | */ | |
923 | kern_return_t | |
924 | cpu_info_count( | |
925 | processor_flavor_t flavor, | |
926 | unsigned int *count) | |
927 | { | |
928 | cpu_subtype_t tcpu_subtype; | |
55e303ae | 929 | |
91447636 A |
930 | /* |
931 | * For now, we just assume that all CPUs are of the same type | |
932 | */ | |
933 | tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype; | |
934 | switch (flavor) { | |
935 | case PROCESSOR_PM_REGS_INFO: | |
936 | switch (tcpu_subtype) { | |
937 | case CPU_SUBTYPE_POWERPC_750: | |
938 | ||
939 | *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750; | |
940 | return(KERN_SUCCESS); | |
941 | ||
942 | case CPU_SUBTYPE_POWERPC_7400: | |
943 | case CPU_SUBTYPE_POWERPC_7450: | |
944 | ||
945 | *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400; | |
946 | return(KERN_SUCCESS); | |
947 | ||
948 | default: | |
949 | *count = 0; | |
950 | return(KERN_INVALID_ARGUMENT); | |
951 | } /* switch tcpu_subtype */ | |
952 | ||
953 | case PROCESSOR_TEMPERATURE: | |
954 | *count = PROCESSOR_TEMPERATURE_COUNT; | |
955 | return (KERN_SUCCESS); | |
956 | ||
957 | default: | |
958 | *count = 0; | |
959 | return(KERN_INVALID_ARGUMENT); | |
960 | ||
1c79356b | 961 | } |
91447636 | 962 | } |
1c79356b | 963 | |
91447636 A |
964 | |
965 | /* | |
966 | * Routine: cpu_info | |
967 | * Function: | |
968 | */ | |
969 | kern_return_t | |
970 | cpu_info( | |
971 | processor_flavor_t flavor, | |
972 | int slot_num, | |
973 | processor_info_t info, | |
974 | unsigned int *count) | |
975 | { | |
976 | cpu_subtype_t tcpu_subtype; | |
977 | processor_pm_regs_t perf_regs; | |
978 | boolean_t oldlevel; | |
979 | ||
980 | tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype; | |
981 | ||
982 | switch (flavor) { | |
983 | case PROCESSOR_PM_REGS_INFO: | |
984 | ||
985 | perf_regs = (processor_pm_regs_t) info; | |
986 | ||
987 | switch (tcpu_subtype) { | |
988 | case CPU_SUBTYPE_POWERPC_750: | |
989 | ||
990 | if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750) | |
991 | return(KERN_FAILURE); | |
992 | ||
993 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
994 | PERFMON_MMCR0(perf_regs) = mfmmcr0(); | |
995 | PERFMON_PMC1(perf_regs) = mfpmc1(); | |
996 | PERFMON_PMC2(perf_regs) = mfpmc2(); | |
997 | PERFMON_MMCR1(perf_regs) = mfmmcr1(); | |
998 | PERFMON_PMC3(perf_regs) = mfpmc3(); | |
999 | PERFMON_PMC4(perf_regs) = mfpmc4(); | |
1000 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
1001 | ||
1002 | *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750; | |
1003 | return(KERN_SUCCESS); | |
1004 | ||
1005 | case CPU_SUBTYPE_POWERPC_7400: | |
1006 | case CPU_SUBTYPE_POWERPC_7450: | |
1007 | ||
1008 | if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400) | |
1009 | return(KERN_FAILURE); | |
1010 | ||
1011 | oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ | |
1012 | PERFMON_MMCR0(perf_regs) = mfmmcr0(); | |
1013 | PERFMON_PMC1(perf_regs) = mfpmc1(); | |
1014 | PERFMON_PMC2(perf_regs) = mfpmc2(); | |
1015 | PERFMON_MMCR1(perf_regs) = mfmmcr1(); | |
1016 | PERFMON_PMC3(perf_regs) = mfpmc3(); | |
1017 | PERFMON_PMC4(perf_regs) = mfpmc4(); | |
1018 | PERFMON_MMCR2(perf_regs) = mfmmcr2(); | |
1019 | ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ | |
1020 | ||
1021 | *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400; | |
1022 | return(KERN_SUCCESS); | |
1023 | ||
1024 | default: | |
1025 | return(KERN_FAILURE); | |
1026 | } /* switch tcpu_subtype */ | |
1027 | ||
1028 | case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */ | |
1029 | ||
1030 | *info = -1; /* Get the temperature */ | |
1031 | return(KERN_FAILURE); | |
1032 | ||
1033 | default: | |
1034 | return(KERN_INVALID_ARGUMENT); | |
1035 | ||
1036 | } /* flavor */ | |
1c79356b A |
1037 | } |
1038 | ||
91447636 A |
1039 | |
1040 | /* | |
1041 | * Routine: cpu_to_processor | |
1042 | * Function: | |
1043 | */ | |
1044 | processor_t | |
1045 | cpu_to_processor( | |
1046 | int cpu) | |
1c79356b | 1047 | { |
91447636 A |
1048 | return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor); |
1049 | } | |
1c79356b | 1050 | |
1c79356b | 1051 | |
91447636 A |
1052 | /* |
1053 | * Routine: slot_type | |
1054 | * Function: | |
1055 | */ | |
1056 | cpu_type_t | |
1057 | slot_type( | |
1058 | int slot_num) | |
1059 | { | |
1060 | return (PerProcTable[slot_num].ppe_vaddr->cpu_type); | |
1061 | } | |
1c79356b | 1062 | |
1c79356b | 1063 | |
91447636 A |
1064 | /* |
1065 | * Routine: slot_subtype | |
1066 | * Function: | |
1067 | */ | |
1068 | cpu_subtype_t | |
1069 | slot_subtype( | |
1070 | int slot_num) | |
1071 | { | |
1072 | return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype); | |
1073 | } | |
1c79356b | 1074 | |
1c79356b | 1075 | |
91447636 A |
1076 | /* |
1077 | * Routine: slot_threadtype | |
1078 | * Function: | |
1079 | */ | |
1080 | cpu_threadtype_t | |
1081 | slot_threadtype( | |
1082 | int slot_num) | |
1083 | { | |
1084 | return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype); | |
1085 | } | |
1c79356b | 1086 | |
1c79356b | 1087 | |
91447636 A |
1088 | /* |
1089 | * Routine: cpu_type | |
1090 | * Function: | |
1091 | */ | |
1092 | cpu_type_t | |
1093 | cpu_type(void) | |
1094 | { | |
1095 | return (getPerProc()->cpu_type); | |
1096 | } | |
1c79356b | 1097 | |
1c79356b | 1098 | |
91447636 A |
1099 | /* |
1100 | * Routine: cpu_subtype | |
1101 | * Function: | |
1102 | */ | |
1103 | cpu_subtype_t | |
1104 | cpu_subtype(void) | |
1105 | { | |
1106 | return (getPerProc()->cpu_subtype); | |
1107 | } | |
1108 | ||
1109 | ||
1110 | /* | |
1111 | * Routine: cpu_threadtype | |
1112 | * Function: | |
1113 | */ | |
1114 | cpu_threadtype_t | |
1115 | cpu_threadtype(void) | |
1116 | { | |
1117 | return (getPerProc()->cpu_threadtype); | |
1c79356b | 1118 | } |
a3d08fcd A |
1119 | |
1120 | /* | |
1121 | * Call a function on all running processors | |
1122 | * | |
1123 | * Note that the synch paramter is used to wait until all functions are complete. | |
1124 | * It is not passed to the other processor and must be known by the called function. | |
1125 | * The called function must do a thread_wakeup on the synch if it decrements the | |
1126 | * synch count to 0. | |
1127 | */ | |
1128 | ||
1129 | ||
1130 | int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) { | |
1131 | ||
1132 | int sigproc, cpu, ocpu; | |
1133 | ||
1134 | cpu = cpu_number(); /* Who are we? */ | |
1135 | sigproc = 0; /* Clear called processor count */ | |
1136 | ||
1137 | if(real_ncpus > 1) { /* Are we just a uni? */ | |
1138 | ||
1139 | assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */ | |
1140 | ||
1141 | for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */ | |
1142 | if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */ | |
1143 | hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */ | |
1144 | sigproc++; /* Tentatively bump signal sent count */ | |
1145 | if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */ | |
1146 | hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */ | |
1147 | sigproc--; /* and don't count it */ | |
1148 | } | |
1149 | } | |
1150 | ||
1151 | if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */ | |
1152 | else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */ | |
1153 | } | |
1154 | ||
1155 | return sigproc; /* Return the number of guys actually signalled */ | |
1156 | ||
1157 | } |