]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cpu.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b 28
91447636
A
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/processor_info.h>
32
33#include <kern/kalloc.h>
34#include <kern/kern_types.h>
1c79356b
A
35#include <kern/machine.h>
36#include <kern/misc_protos.h>
37#include <kern/thread.h>
91447636 38#include <kern/sched_prim.h>
1c79356b 39#include <kern/processor.h>
0c530ab8 40#include <kern/pms.h>
91447636
A
41
42#include <vm/pmap.h>
3a60a9f5 43#include <IOKit/IOHibernatePrivate.h>
91447636 44
1c79356b
A
45#include <ppc/proc_reg.h>
46#include <ppc/misc_protos.h>
2d21ac55 47#include <ppc/fpu_protos.h>
1c79356b 48#include <ppc/machine_routines.h>
91447636 49#include <ppc/cpu_internal.h>
1c79356b 50#include <ppc/exception.h>
9bccf70c 51#include <ppc/asm.h>
55e303ae 52#include <ppc/hw_perfmon.h>
1c79356b 53#include <pexpert/pexpert.h>
9bccf70c 54#include <kern/cpu_data.h>
55e303ae
A
55#include <ppc/mappings.h>
56#include <ppc/Diagnostics.h>
57#include <ppc/trap.h>
91447636 58#include <ppc/machine_cpu.h>
3a60a9f5 59#include <ppc/rtclock.h>
1c79356b 60
91447636 61decl_mutex_data(static,ppt_lock);
1c79356b 62
91447636
A
63unsigned int real_ncpus = 1;
64unsigned int max_ncpus = MAX_CPUS;
1c79356b 65
91447636 66decl_simple_lock_data(static,rht_lock);
5353443c 67
91447636
A
68static unsigned int rht_state = 0;
69#define RHT_WAIT 0x01
70#define RHT_BUSY 0x02
1c79356b 71
91447636 72decl_simple_lock_data(static,SignalReadyLock);
1c79356b
A
73
74struct SIGtimebase {
0c530ab8
A
75 volatile boolean_t avail;
76 volatile boolean_t ready;
77 volatile boolean_t done;
0b4e3aa0 78 uint64_t abstime;
1c79356b
A
79};
80
2d21ac55 81perfCallback perfCpuSigHook; /* Pointer to CHUD cpu signal hook routine */
1c79356b 82
2d21ac55 83extern uint32_t debugger_sync;
1c79356b 84
91447636
A
85/*
86 * Forward definitions
87 */
1c79356b 88
91447636
A
89void cpu_sync_timebase(
90 void);
55e303ae 91
91447636
A
92void cpu_timebase_signal_handler(
93 struct per_proc_info *proc_info,
94 struct SIGtimebase *timebaseAddr);
1c79356b 95
91447636
A
96/*
97 * Routine: cpu_bootstrap
98 * Function:
99 */
100void
101cpu_bootstrap(
102 void)
1c79356b 103{
91447636
A
104 simple_lock_init(&rht_lock,0);
105 simple_lock_init(&SignalReadyLock,0);
106 mutex_init(&ppt_lock,0);
1c79356b
A
107}
108
1c79356b 109
91447636
A
110/*
111 * Routine: cpu_init
112 * Function:
113 */
1c79356b
A
114void
115cpu_init(
116 void)
117{
91447636
A
118 struct per_proc_info *proc_info;
119
120 proc_info = getPerProc();
1c79356b 121
91447636
A
122 /*
123 * Restore the TBR.
124 */
125 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
126 mttb(0);
127 mttbu(proc_info->save_tbu);
128 mttb(proc_info->save_tbl);
129 }
0c530ab8
A
130
131 proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */
132 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
1c79356b 133
91447636
A
134 proc_info->cpu_type = CPU_TYPE_POWERPC;
135 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
136 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
137 proc_info->running = TRUE;
1c79356b
A
138
139}
140
91447636
A
141/*
142 * Routine: cpu_machine_init
143 * Function:
144 */
1c79356b
A
145void
146cpu_machine_init(
147 void)
148{
91447636 149 struct per_proc_info *proc_info;
0b4e3aa0 150 volatile struct per_proc_info *mproc_info;
1c79356b 151
1c79356b 152
91447636
A
153 proc_info = getPerProc();
154 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
155
156 if (proc_info != mproc_info) {
157 simple_lock(&rht_lock);
158 if (rht_state & RHT_WAIT)
159 thread_wakeup(&rht_state);
160 rht_state &= ~(RHT_BUSY|RHT_WAIT);
161 simple_unlock(&rht_lock);
162 }
163
164 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
165
3a60a9f5
A
166 if (proc_info->hibernate) {
167 uint32_t tbu, tbl;
168
169 do {
170 tbu = mftbu();
171 tbl = mftb();
172 } while (mftbu() != tbu);
173
174 proc_info->hibernate = 0;
175 hibernate_machine_init();
176
177 // hibernate_machine_init() could take minutes and we don't want timeouts
178 // to fire as soon as scheduling starts. Reset timebase so it appears
179 // no time has elapsed, as it would for regular sleep.
180 mttb(0);
181 mttbu(tbu);
182 mttb(tbl);
183 }
91447636
A
184
185 if (proc_info != mproc_info) {
186 while (!((mproc_info->cpu_flags) & SignalReady))
0b4e3aa0 187 continue;
1c79356b 188 cpu_sync_timebase();
0b4e3aa0 189 }
91447636 190
1c79356b 191 ml_init_interrupt();
91447636 192 if (proc_info != mproc_info)
5353443c 193 simple_lock(&SignalReadyLock);
91447636
A
194 proc_info->cpu_flags |= BootDone|SignalReady;
195 if (proc_info != mproc_info) {
196 if (proc_info->ppXFlags & SignalReadyWait) {
2d21ac55 197 (void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
91447636 198 thread_wakeup(&proc_info->cpu_flags);
5353443c
A
199 }
200 simple_unlock(&SignalReadyLock);
3a60a9f5 201 pmsPark(); /* Timers should be cool now, park the power management stepper */
5353443c 202 }
1c79356b
A
203}
204
1c79356b 205
91447636
A
206/*
207 * Routine: cpu_per_proc_alloc
208 * Function:
209 */
210struct per_proc_info *
211cpu_per_proc_alloc(
212 void)
213{
2d21ac55
A
214 struct per_proc_info *proc_info = NULL;
215 void *interrupt_stack = NULL;
216 void *debugger_stack = NULL;
91447636 217
3a60a9f5
A
218 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
219 return (struct per_proc_info *)NULL;
91447636 220 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
3a60a9f5
A
221 kfree(proc_info, sizeof(struct per_proc_info));
222 return (struct per_proc_info *)NULL;
1c79356b 223 }
3a60a9f5 224
91447636 225 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
3a60a9f5 226 kfree(proc_info, sizeof(struct per_proc_info));
91447636 227 kfree(interrupt_stack, INTSTACK_SIZE);
3a60a9f5 228 return (struct per_proc_info *)NULL;
91447636 229 }
91447636
A
230
231 bzero((void *)proc_info, sizeof(struct per_proc_info));
232
2d21ac55
A
233 /* Set physical address of the second page */
234 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap,
235 ((addr64_t)(unsigned int)proc_info) + 0x1000)
236 << PAGE_SHIFT;
91447636
A
237 proc_info->next_savearea = (uint64_t)save_get_init();
238 proc_info->pf = BootProcInfo.pf;
239 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
240 proc_info->intstack_top_ss = proc_info->istackptr;
91447636
A
241 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
242 proc_info->debstack_top_ss = proc_info->debstackptr;
3a60a9f5 243
91447636
A
244 return proc_info;
245
246}
247
248
249/*
250 * Routine: cpu_per_proc_free
251 * Function:
252 */
253void
254cpu_per_proc_free(
255 struct per_proc_info *proc_info
256)
257{
258 if (proc_info->cpu_number == master_cpu)
259 return;
260 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
261 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
3a60a9f5 262 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
91447636
A
263}
264
265
266/*
267 * Routine: cpu_per_proc_register
268 * Function:
269 */
270kern_return_t
271cpu_per_proc_register(
272 struct per_proc_info *proc_info
273)
274{
275 int cpu;
276
277 mutex_lock(&ppt_lock);
278 if (real_ncpus >= max_ncpus) {
279 mutex_unlock(&ppt_lock);
1c79356b 280 return KERN_FAILURE;
91447636
A
281 }
282 cpu = real_ncpus;
283 proc_info->cpu_number = cpu;
284 PerProcTable[cpu].ppe_vaddr = proc_info;
2d21ac55 285 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT;
91447636
A
286 eieio();
287 real_ncpus++;
288 mutex_unlock(&ppt_lock);
289 return KERN_SUCCESS;
1c79356b
A
290}
291
91447636
A
292
293/*
294 * Routine: cpu_start
295 * Function:
296 */
1c79356b
A
297kern_return_t
298cpu_start(
299 int cpu)
300{
301 struct per_proc_info *proc_info;
91447636
A
302 kern_return_t ret;
303 mapping_t *mp;
1c79356b 304
91447636 305 proc_info = PerProcTable[cpu].ppe_vaddr;
1c79356b
A
306
307 if (cpu == cpu_number()) {
308 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
309 ml_init_interrupt();
0b4e3aa0 310 proc_info->cpu_flags |= BootDone|SignalReady;
1c79356b
A
311
312 return KERN_SUCCESS;
313 } else {
1c79356b 314 proc_info->cpu_flags &= BootDone;
0b4e3aa0 315 proc_info->interrupts_enabled = 0;
91447636
A
316 proc_info->pending_ast = AST_NONE;
317 proc_info->istackptr = proc_info->intstack_top_ss;
3a60a9f5 318 proc_info->rtcPop = EndOfAllTime;
2d21ac55
A
319 proc_info->FPU_owner = NULL;
320 proc_info->VMX_owner = NULL;
3a60a9f5
A
321 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
322 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
323 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
324 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
91447636
A
325 mp = (mapping_t *)(&proc_info->ppUMWmp);
326 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
55e303ae 327 mp->mpSpace = invalSpace;
1c79356b
A
328
329 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
330
91447636
A
331 simple_lock(&rht_lock);
332 while (rht_state & RHT_BUSY) {
333 rht_state |= RHT_WAIT;
334 thread_sleep_usimple_lock((event_t)&rht_state,
335 &rht_lock, THREAD_UNINT);
336 }
337 rht_state |= RHT_BUSY;
338 simple_unlock(&rht_lock);
1c79356b 339
1c79356b 340 ml_phys_write((vm_offset_t)&ResetHandler + 0,
91447636 341 RESET_HANDLER_START);
1c79356b 342 ml_phys_write((vm_offset_t)&ResetHandler + 4,
91447636 343 (vm_offset_t)_start_cpu);
1c79356b 344 ml_phys_write((vm_offset_t)&ResetHandler + 8,
91447636 345 (vm_offset_t)&PerProcTable[cpu]);
1c79356b
A
346 }
347/*
348 * Note: we pass the current time to the other processor here. He will load it
349 * as early as possible so that there is a chance that it is close to accurate.
350 * After the machine is up a while, we will officially resync the clocks so
351 * that all processors are the same. This is just to get close.
352 */
353
91447636 354 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
1c79356b
A
355
356 __asm__ volatile("sync"); /* Commit to storage */
357 __asm__ volatile("isync"); /* Wait a second */
91447636
A
358 ret = PE_cpu_start(proc_info->cpu_id,
359 proc_info->start_paddr, (vm_offset_t)proc_info);
360
361 if (ret != KERN_SUCCESS) {
362 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
363 simple_lock(&rht_lock);
364 if (rht_state & RHT_WAIT)
365 thread_wakeup(&rht_state);
366 rht_state &= ~(RHT_BUSY|RHT_WAIT);
367 simple_unlock(&rht_lock);
368 };
5353443c
A
369 } else {
370 simple_lock(&SignalReadyLock);
91447636 371 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
2d21ac55 372 (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
91447636
A
373 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
374 &SignalReadyLock, THREAD_UNINT);
5353443c
A
375 }
376 simple_unlock(&SignalReadyLock);
91447636 377
1c79356b
A
378 }
379 return(ret);
380 }
381}
382
91447636
A
383/*
384 * Routine: cpu_exit_wait
385 * Function:
386 */
5353443c
A
387void
388cpu_exit_wait(
91447636
A
389 int cpu)
390{
391 struct per_proc_info *tpproc;
392
393 if ( cpu != master_cpu) {
394 tpproc = PerProcTable[cpu].ppe_vaddr;
395 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
396 }
397}
398
399
400/*
401 * Routine: cpu_doshutdown
402 * Function:
403 */
404void
405cpu_doshutdown(
406 void)
407{
408 enable_preemption();
409 processor_offline(current_processor());
410}
411
412
413/*
414 * Routine: cpu_sleep
415 * Function:
416 */
417void
418cpu_sleep(
419 void)
420{
421 struct per_proc_info *proc_info;
422 unsigned int i;
423 unsigned int wait_ncpus_sleep, ncpus_sleep;
424 facility_context *fowner;
425
426 proc_info = getPerProc();
427
428 proc_info->running = FALSE;
429
430 fowner = proc_info->FPU_owner; /* Cache this */
2d21ac55
A
431 if(fowner) /* If anyone owns FPU, save it */
432 fpu_save(fowner);
433 proc_info->FPU_owner = NULL; /* Set no fpu owner now */
91447636
A
434
435 fowner = proc_info->VMX_owner; /* Cache this */
436 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
2d21ac55 437 proc_info->VMX_owner = NULL; /* Set no vector owner now */
91447636
A
438
439 if (proc_info->cpu_number == master_cpu) {
440 proc_info->cpu_flags &= BootDone;
441 proc_info->interrupts_enabled = 0;
442 proc_info->pending_ast = AST_NONE;
443
444 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
445 ml_phys_write((vm_offset_t)&ResetHandler + 0,
446 RESET_HANDLER_START);
447 ml_phys_write((vm_offset_t)&ResetHandler + 4,
448 (vm_offset_t)_start_cpu);
449 ml_phys_write((vm_offset_t)&ResetHandler + 8,
450 (vm_offset_t)&PerProcTable[master_cpu]);
451
452 __asm__ volatile("sync");
453 __asm__ volatile("isync");
454 }
455
456 wait_ncpus_sleep = real_ncpus-1;
457 ncpus_sleep = 0;
458 while (wait_ncpus_sleep != ncpus_sleep) {
459 ncpus_sleep = 0;
460 for(i=1; i < real_ncpus ; i++) {
461 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
462 ncpus_sleep++;
463 }
464 }
465
466 }
467
468 /*
469 * Save the TBR before stopping.
470 */
471 do {
472 proc_info->save_tbu = mftbu();
473 proc_info->save_tbl = mftb();
474 } while (mftbu() != proc_info->save_tbu);
475
476 PE_cpu_machine_quiesce(proc_info->cpu_id);
477}
478
479
480/*
481 * Routine: cpu_signal
482 * Function:
483 * Here is where we send a message to another processor. So far we only have two:
484 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
485 * currently disabled). SIGPdebug is used to enter the debugger.
486 *
487 * We set up the SIGP function to indicate that this is a simple message and set the
488 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
489 * block for the target, we lock the message block. Then we set the parameter(s).
490 * Next we change the lock (also called "busy") to "passing" and finally signal
491 * the other processor. Note that we only wait about 1ms to get the message lock.
492 * If we time out, we return failure to our caller. It is their responsibility to
493 * recover.
494 */
495kern_return_t
496cpu_signal(
497 int target,
498 int signal,
499 unsigned int p1,
500 unsigned int p2)
5353443c 501{
91447636
A
502
503 unsigned int holdStat;
504 struct per_proc_info *tpproc, *mpproc;
505 int busybitset=0;
506
507#if DEBUG
508 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
509#endif
510
511 mpproc = getPerProc(); /* Point to our block */
512 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
513 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
514
515 if(!tpproc->running) return KERN_FAILURE;
516
517 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
518
519 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
520
521 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
522 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
523 return KERN_SUCCESS;
524 }
525
526 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
527 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
528 return KERN_SUCCESS; /* Don't bother to send this one... */
529 }
530
531 if (tpproc->MPsigpParm0 == SIGPwake) {
532 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
533 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
534 busybitset = 1;
535 mpproc->hwCtr.numSIGPmwake++;
536 }
537 }
538 }
539
540 if((busybitset == 0) &&
541 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
542 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
543 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
544 return KERN_FAILURE; /* Timed out, take your ball and go home... */
545 }
546
547 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
548 tpproc->MPsigpParm0 = signal; /* Set message order */
549 tpproc->MPsigpParm1 = p1; /* Set additional parm */
550 tpproc->MPsigpParm2 = p2; /* Set additional parm */
551
552 __asm__ volatile("sync"); /* Make sure it's all there */
553
554 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
555 __asm__ volatile("eieio"); /* I'm a paraniod freak */
556
557 if (busybitset == 0)
558 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
559
560 return KERN_SUCCESS; /* All is goodness and rainbows... */
5353443c
A
561}
562
55e303ae 563
1c79356b 564/*
91447636
A
565 * Routine: cpu_signal_handler
566 * Function:
1c79356b
A
567 * Here is where we implement the receiver of the signaling protocol.
568 * We wait for the signal status area to be passed to us. Then we snarf
569 * up the status, the sender, and the 3 potential parms. Next we release
570 * the lock and signal the other guy.
571 */
1c79356b 572void
2d21ac55 573cpu_signal_handler(void)
1c79356b 574{
2d21ac55 575 unsigned int holdStat, holdParm0, holdParm1, holdParm2;
1c79356b 576 unsigned int *parmAddr;
91447636 577 struct per_proc_info *proc_info;
1c79356b 578 int cpu;
a3d08fcd 579 broadcastFunc xfunc;
1c79356b 580 cpu = cpu_number(); /* Get the CPU number */
91447636
A
581
582 proc_info = getPerProc();
583
1c79356b 584/*
90556fb8 585 * Since we've been signaled, wait about 31 ms for the signal lock to pass
1c79356b 586 */
91447636 587 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
90556fb8 588 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
1c79356b
A
589 panic("cpu_signal_handler: Lock pass timed out\n");
590 }
591
91447636
A
592 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
593 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
594 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
595 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
1c79356b
A
596
597 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
598
91447636 599 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
1c79356b
A
600
601 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
602
603 case MPsigpIdle: /* Was function cancelled? */
604 return; /* Yup... */
605
606 case MPsigpSigp: /* Signal Processor message? */
607
608 switch (holdParm0) { /* Decode SIGP message order */
609
610 case SIGPast: /* Should we do an AST? */
91447636 611 proc_info->hwCtr.numSIGPast++; /* Count this one */
1c79356b
A
612#if 0
613 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
614#endif
91447636 615 ast_check((processor_t)proc_info->processor);
1c79356b
A
616 return; /* All done... */
617
618 case SIGPcpureq: /* CPU specific function? */
619
91447636 620 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
1c79356b
A
621 switch (holdParm1) { /* Select specific function */
622
1c79356b
A
623 case CPRQtimebase:
624
91447636 625 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
1c79356b
A
626 return;
627
55e303ae
A
628 case CPRQsegload:
629 return;
630
631 case CPRQchud:
632 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
633 if(perfCpuSigHook) {
91447636 634 struct savearea *ssp = current_thread()->machine.pcb;
55e303ae
A
635 if(ssp) {
636 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
637 }
638 }
639 parmAddr[1] = 0;
640 parmAddr[0] = 0; /* Show we're done */
641 return;
642
643 case CPRQscom:
483a1d10
A
644 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
645 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
646 }
647 else { /* No, reading... */
648 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
649 }
55e303ae
A
650 return;
651
5353443c
A
652 case CPRQsps:
653 {
91447636
A
654 ml_set_processor_speed_slave(holdParm2);
655 return;
656 }
1c79356b
A
657 default:
658 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
659 return;
660 }
661
662
663 case SIGPdebug: /* Enter the debugger? */
664
91447636
A
665 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
666 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
2d21ac55 667 (void)hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
1c79356b
A
668 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
669 return; /* All done now... */
670
671 case SIGPwake: /* Wake up CPU */
91447636 672 proc_info->hwCtr.numSIGPwake++; /* Count this one */
1c79356b
A
673 return; /* No need to do anything, the interrupt does it all... */
674
a3d08fcd 675 case SIGPcall: /* Call function on CPU */
91447636 676 proc_info->hwCtr.numSIGPcall++; /* Count this one */
2d21ac55 677 xfunc = (broadcastFunc)holdParm1; /* Do this since I can't seem to figure C out */
a3d08fcd
A
678 xfunc(holdParm2); /* Call the passed function */
679 return; /* Done... */
680
1c79356b
A
681 default:
682 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
683 return;
684
685 }
686
687 default:
688 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
689 return;
690
691 }
692 panic("cpu_signal_handler: we should never get here\n");
693}
694
91447636 695
1c79356b 696/*
91447636
A
697 * Routine: cpu_sync_timebase
698 * Function:
1c79356b 699 */
91447636
A
700void
701cpu_sync_timebase(
702 void)
1c79356b 703{
91447636
A
704 natural_t tbu, tbl;
705 boolean_t intr;
706 struct SIGtimebase syncClkSpot;
1c79356b 707
91447636 708 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
1c79356b 709
91447636
A
710 syncClkSpot.avail = FALSE;
711 syncClkSpot.ready = FALSE;
712 syncClkSpot.done = FALSE;
1c79356b 713
91447636
A
714 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
715 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
716 continue;
1c79356b 717
0c530ab8 718 while (syncClkSpot.avail == FALSE)
91447636 719 continue;
0b4e3aa0 720
91447636 721 isync();
7b1edb79 722
91447636
A
723 /*
724 * We do the following to keep the compiler from generating extra stuff
725 * in tb set part
726 */
727 tbu = syncClkSpot.abstime >> 32;
728 tbl = (uint32_t)syncClkSpot.abstime;
7b1edb79 729
91447636
A
730 mttb(0);
731 mttbu(tbu);
732 mttb(tbl);
9bccf70c 733
91447636 734 syncClkSpot.ready = TRUE;
1c79356b 735
0c530ab8 736 while (syncClkSpot.done == FALSE)
91447636 737 continue;
1c79356b 738
0c530ab8 739 etimer_resync_deadlines(); /* Start the timer */
91447636 740 (void)ml_set_interrupts_enabled(intr);
1c79356b
A
741}
742
91447636
A
743
744/*
745 * Routine: cpu_timebase_signal_handler
746 * Function:
747 */
1c79356b 748void
91447636
A
749cpu_timebase_signal_handler(
750 struct per_proc_info *proc_info,
751 struct SIGtimebase *timebaseAddr)
1c79356b 752{
91447636
A
753 unsigned int tbu, tbu2, tbl;
754
755 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
756 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
757
758 timebaseAddr->abstime = 0; /* Touch to force into cache */
759 sync();
760
761 do {
762 asm volatile(" mftbu %0" : "=r" (tbu));
763 asm volatile(" mftb %0" : "=r" (tbl));
764 asm volatile(" mftbu %0" : "=r" (tbu2));
765 } while (tbu != tbu2);
766
767 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
768 sync(); /* Force order */
769
770 timebaseAddr->avail = TRUE;
771
0c530ab8
A
772 while (timebaseAddr->ready == FALSE)
773 continue;
91447636
A
774
775 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
776 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
777
778 timebaseAddr->done = TRUE;
1c79356b
A
779}
780
91447636
A
781
782/*
783 * Routine: cpu_control
784 * Function:
785 */
786kern_return_t
787cpu_control(
788 int slot_num,
789 processor_info_t info,
790 unsigned int count)
1c79356b
A
791{
792 struct per_proc_info *proc_info;
91447636
A
793 cpu_type_t tcpu_type;
794 cpu_subtype_t tcpu_subtype;
795 processor_pm_regs_t perf_regs;
796 processor_control_cmd_t cmd;
797 boolean_t oldlevel;
798#define MMCR0_SUPPORT_MASK 0xf83f1fff
799#define MMCR1_SUPPORT_MASK 0xffc00000
800#define MMCR2_SUPPORT_MASK 0x80000000
801
802 proc_info = PerProcTable[slot_num].ppe_vaddr;
803 tcpu_type = proc_info->cpu_type;
804 tcpu_subtype = proc_info->cpu_subtype;
805 cmd = (processor_control_cmd_t) info;
1c79356b 806
91447636
A
807 if (count < PROCESSOR_CONTROL_CMD_COUNT)
808 return(KERN_FAILURE);
1c79356b 809
91447636
A
810 if ( tcpu_type != cmd->cmd_cpu_type ||
811 tcpu_subtype != cmd->cmd_cpu_subtype)
812 return(KERN_FAILURE);
1c79356b 813
91447636
A
814 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
815 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
816 }
765c9de3 817
91447636
A
818 switch (cmd->cmd_op)
819 {
820 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
821 switch (tcpu_subtype)
822 {
823 case CPU_SUBTYPE_POWERPC_750:
824 case CPU_SUBTYPE_POWERPC_7400:
825 case CPU_SUBTYPE_POWERPC_7450:
826 {
827 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
828 mtpmc1(0x0);
829 mtpmc2(0x0);
830 mtpmc3(0x0);
831 mtpmc4(0x0);
832 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
833 return(KERN_SUCCESS);
834 }
835 default:
836 return(KERN_FAILURE);
837 } /* tcpu_subtype */
838 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
839 switch (tcpu_subtype)
840 {
841 case CPU_SUBTYPE_POWERPC_750:
842 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
843 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
844 return(KERN_FAILURE);
845 else
846 {
847 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
848 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
849 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
850 mtpmc1(PERFMON_PMC1(perf_regs));
851 mtpmc2(PERFMON_PMC2(perf_regs));
852 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
853 mtpmc3(PERFMON_PMC3(perf_regs));
854 mtpmc4(PERFMON_PMC4(perf_regs));
855 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
856 return(KERN_SUCCESS);
857 }
858 case CPU_SUBTYPE_POWERPC_7400:
859 case CPU_SUBTYPE_POWERPC_7450:
860 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
861 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
862 return(KERN_FAILURE);
863 else
864 {
865 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
866 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
867 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
868 mtpmc1(PERFMON_PMC1(perf_regs));
869 mtpmc2(PERFMON_PMC2(perf_regs));
870 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
871 mtpmc3(PERFMON_PMC3(perf_regs));
872 mtpmc4(PERFMON_PMC4(perf_regs));
873 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
874 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
875 return(KERN_SUCCESS);
876 }
877 default:
878 return(KERN_FAILURE);
879 } /* switch tcpu_subtype */
880 case PROCESSOR_PM_SET_MMCR:
881 switch (tcpu_subtype)
882 {
883 case CPU_SUBTYPE_POWERPC_750:
884 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
885 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
886 return(KERN_FAILURE);
887 else
888 {
889 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
890 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
891 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
892 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
893 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
894 return(KERN_SUCCESS);
895 }
896 case CPU_SUBTYPE_POWERPC_7400:
897 case CPU_SUBTYPE_POWERPC_7450:
898 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
899 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
900 return(KERN_FAILURE);
901 else
902 {
903 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
904 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
905 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
906 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
907 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
908 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
909 return(KERN_SUCCESS);
910 }
911 default:
912 return(KERN_FAILURE);
913 } /* tcpu_subtype */
914 default:
915 return(KERN_FAILURE);
916 } /* switch cmd_op */
917}
765c9de3 918
1c79356b 919
91447636
A
920/*
921 * Routine: cpu_info_count
922 * Function:
923 */
924kern_return_t
925cpu_info_count(
926 processor_flavor_t flavor,
927 unsigned int *count)
928{
929 cpu_subtype_t tcpu_subtype;
55e303ae 930
91447636
A
931 /*
932 * For now, we just assume that all CPUs are of the same type
933 */
934 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
935 switch (flavor) {
936 case PROCESSOR_PM_REGS_INFO:
937 switch (tcpu_subtype) {
938 case CPU_SUBTYPE_POWERPC_750:
939
940 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
941 return(KERN_SUCCESS);
942
943 case CPU_SUBTYPE_POWERPC_7400:
944 case CPU_SUBTYPE_POWERPC_7450:
945
946 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
947 return(KERN_SUCCESS);
948
949 default:
950 *count = 0;
951 return(KERN_INVALID_ARGUMENT);
952 } /* switch tcpu_subtype */
953
954 case PROCESSOR_TEMPERATURE:
955 *count = PROCESSOR_TEMPERATURE_COUNT;
956 return (KERN_SUCCESS);
957
958 default:
959 *count = 0;
960 return(KERN_INVALID_ARGUMENT);
961
1c79356b 962 }
91447636 963}
1c79356b 964
91447636
A
965
966/*
967 * Routine: cpu_info
968 * Function:
969 */
970kern_return_t
971cpu_info(
972 processor_flavor_t flavor,
973 int slot_num,
974 processor_info_t info,
975 unsigned int *count)
976{
977 cpu_subtype_t tcpu_subtype;
978 processor_pm_regs_t perf_regs;
979 boolean_t oldlevel;
980
981 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
982
983 switch (flavor) {
984 case PROCESSOR_PM_REGS_INFO:
985
986 perf_regs = (processor_pm_regs_t) info;
987
988 switch (tcpu_subtype) {
989 case CPU_SUBTYPE_POWERPC_750:
990
991 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
992 return(KERN_FAILURE);
993
994 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
995 PERFMON_MMCR0(perf_regs) = mfmmcr0();
996 PERFMON_PMC1(perf_regs) = mfpmc1();
997 PERFMON_PMC2(perf_regs) = mfpmc2();
998 PERFMON_MMCR1(perf_regs) = mfmmcr1();
999 PERFMON_PMC3(perf_regs) = mfpmc3();
1000 PERFMON_PMC4(perf_regs) = mfpmc4();
1001 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1002
1003 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1004 return(KERN_SUCCESS);
1005
1006 case CPU_SUBTYPE_POWERPC_7400:
1007 case CPU_SUBTYPE_POWERPC_7450:
1008
1009 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1010 return(KERN_FAILURE);
1011
1012 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1013 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1014 PERFMON_PMC1(perf_regs) = mfpmc1();
1015 PERFMON_PMC2(perf_regs) = mfpmc2();
1016 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1017 PERFMON_PMC3(perf_regs) = mfpmc3();
1018 PERFMON_PMC4(perf_regs) = mfpmc4();
1019 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1020 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1021
1022 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1023 return(KERN_SUCCESS);
1024
1025 default:
1026 return(KERN_FAILURE);
1027 } /* switch tcpu_subtype */
1028
1029 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1030
1031 *info = -1; /* Get the temperature */
1032 return(KERN_FAILURE);
1033
1034 default:
1035 return(KERN_INVALID_ARGUMENT);
1036
1037 } /* flavor */
1c79356b
A
1038}
1039
91447636
A
1040
1041/*
1042 * Routine: cpu_to_processor
1043 * Function:
1044 */
1045processor_t
1046cpu_to_processor(
1047 int cpu)
1c79356b 1048{
91447636
A
1049 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1050}
1c79356b 1051
1c79356b 1052
91447636
A
1053/*
1054 * Routine: slot_type
1055 * Function:
1056 */
1057cpu_type_t
1058slot_type(
1059 int slot_num)
1060{
1061 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1062}
1c79356b 1063
1c79356b 1064
91447636
A
1065/*
1066 * Routine: slot_subtype
1067 * Function:
1068 */
1069cpu_subtype_t
1070slot_subtype(
1071 int slot_num)
1072{
1073 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1074}
1c79356b 1075
1c79356b 1076
91447636
A
1077/*
1078 * Routine: slot_threadtype
1079 * Function:
1080 */
1081cpu_threadtype_t
1082slot_threadtype(
1083 int slot_num)
1084{
1085 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1086}
1c79356b 1087
1c79356b 1088
91447636
A
1089/*
1090 * Routine: cpu_type
1091 * Function:
1092 */
1093cpu_type_t
1094cpu_type(void)
1095{
1096 return (getPerProc()->cpu_type);
1097}
1c79356b 1098
1c79356b 1099
91447636
A
1100/*
1101 * Routine: cpu_subtype
1102 * Function:
1103 */
1104cpu_subtype_t
1105cpu_subtype(void)
1106{
1107 return (getPerProc()->cpu_subtype);
1108}
1109
1110
1111/*
1112 * Routine: cpu_threadtype
1113 * Function:
1114 */
1115cpu_threadtype_t
1116cpu_threadtype(void)
1117{
1118 return (getPerProc()->cpu_threadtype);
1c79356b 1119}
a3d08fcd
A
1120
1121/*
1122 * Call a function on all running processors
1123 *
1124 * Note that the synch paramter is used to wait until all functions are complete.
1125 * It is not passed to the other processor and must be known by the called function.
1126 * The called function must do a thread_wakeup on the synch if it decrements the
1127 * synch count to 0.
2d21ac55
A
1128 *
1129 * We start by initializing the synchronizer to the number of possible cpus.
1130 * The we signal each popssible processor.
1131 * If the signal fails, we count it. We also skip our own.
1132 * When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals.
1133 * Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0
1134 * If this happens, all other processors are finished with the function.
1135 * If so, we clear the wait and continue
1136 * Otherwise, we block waiting for the other processor(s) to finish.
1137 *
1138 * Meanwhile, the other processors are decrementing the synchronizer when they are done
1139 * If it goes to zero, thread_wakeup is called to run the broadcaster
1140 *
1141 * Note that because we account for the broadcaster in the synchronization count, we will not get any
1142 * premature wakeup calls.
1143 *
1144 * Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that
1145 * all of the other processors are finished. Otherwise, we know that there is at least one more.
1146 * When that thread decrements the synchronizer to zero, it will do a thread_wake.
1147 *
a3d08fcd
A
1148 */
1149
2d21ac55
A
1150int32_t
1151cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm)
1152{
1153 int failsig;
1154 unsigned int cpu, ocpu;
a3d08fcd 1155
2d21ac55
A
1156 cpu = cpu_number(); /* Who are we? */
1157 failsig = 0; /* Clear called processor count */
1158
1159 if(real_ncpus > 1) { /* Are we just a uni? */
1160
1161 *synch = real_ncpus; /* Set how many we are going to try */
a3d08fcd 1162 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
2d21ac55 1163
a3d08fcd 1164 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
2d21ac55
A
1165
1166 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1167
a3d08fcd 1168 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
2d21ac55 1169 failsig++; /* Count failed signals */
a3d08fcd
A
1170 }
1171 }
2d21ac55
A
1172
1173 if (hw_atomic_sub(synch, failsig + 1) == 0)
1174 clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled or all of the others finished */
1175 else
1176 thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
a3d08fcd 1177 }
2d21ac55
A
1178
1179 return (real_ncpus - failsig - 1); /* Return the number of guys actually signalled... */
a3d08fcd 1180}