X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0c530ab8987f0ae6a1a3d9284f40182b88852816..4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32:/osfmk/ppc/cpu.c?ds=sidebyside diff --git a/osfmk/ppc/cpu.c b/osfmk/ppc/cpu.c index 5326e5dcf..3a77bccb6 100644 --- a/osfmk/ppc/cpu.c +++ b/osfmk/ppc/cpu.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -38,6 +44,7 @@ #include #include +#include #include #include #include @@ -71,9 +78,9 @@ struct SIGtimebase { uint64_t abstime; }; -perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */ +perfCallback perfCpuSigHook; /* Pointer to CHUD cpu signal hook routine */ -extern int debugger_sync; +extern uint32_t debugger_sync; /* * Forward definitions @@ -187,7 +194,7 @@ cpu_machine_init( proc_info->cpu_flags |= BootDone|SignalReady; if (proc_info != mproc_info) { if (proc_info->ppXFlags & SignalReadyWait) { - hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait); + (void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait); thread_wakeup(&proc_info->cpu_flags); } simple_unlock(&SignalReadyLock); @@ -204,9 +211,9 @@ struct per_proc_info * cpu_per_proc_alloc( void) { - struct per_proc_info *proc_info=0; - void *interrupt_stack=0; - void *debugger_stack=0; + struct per_proc_info *proc_info = NULL; + void *interrupt_stack = NULL; + void *debugger_stack = NULL; if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0) return (struct per_proc_info *)NULL; @@ -223,7 +230,10 @@ cpu_per_proc_alloc( bzero((void *)proc_info, sizeof(struct per_proc_info)); - proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */ + /* Set physical address of the second page */ + proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, + ((addr64_t)(unsigned int)proc_info) + 0x1000) + << PAGE_SHIFT; proc_info->next_savearea = (uint64_t)save_get_init(); proc_info->pf = BootProcInfo.pf; proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE; @@ -272,7 +282,7 @@ cpu_per_proc_register( cpu = real_ncpus; proc_info->cpu_number = cpu; PerProcTable[cpu].ppe_vaddr = proc_info; - PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT; + PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT; eieio(); real_ncpus++; mutex_unlock(&ppt_lock); @@ -306,8 +316,8 @@ cpu_start( proc_info->pending_ast = AST_NONE; proc_info->istackptr = proc_info->intstack_top_ss; proc_info->rtcPop = EndOfAllTime; - proc_info->FPU_owner = 0; - proc_info->VMX_owner = 0; + proc_info->FPU_owner = NULL; + proc_info->VMX_owner = NULL; proc_info->pms.pmsStamp = 0; /* Dummy transition time */ proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */ proc_info->pms.pmsState = pmsParked; /* Park the stepper */ @@ -359,7 +369,7 @@ cpu_start( } else { simple_lock(&SignalReadyLock); if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) { - hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); + (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait); thread_sleep_simple_lock((event_t)&proc_info->cpu_flags, &SignalReadyLock, THREAD_UNINT); } @@ -418,12 +428,13 @@ cpu_sleep( proc_info->running = FALSE; fowner = proc_info->FPU_owner; /* Cache this */ - if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */ - proc_info->FPU_owner = 0; /* Set no fpu owner now */ + if(fowner) /* If anyone owns FPU, save it */ + fpu_save(fowner); + proc_info->FPU_owner = NULL; /* Set no fpu owner now */ fowner = proc_info->VMX_owner; /* Cache this */ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */ - proc_info->VMX_owner = 0; /* Set no vector owner now */ + proc_info->VMX_owner = NULL; /* Set no vector owner now */ if (proc_info->cpu_number == master_cpu) { proc_info->cpu_flags &= BootDone; @@ -559,11 +570,9 @@ cpu_signal( * the lock and signal the other guy. */ void -cpu_signal_handler( - void) +cpu_signal_handler(void) { - - unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype; + unsigned int holdStat, holdParm0, holdParm1, holdParm2; unsigned int *parmAddr; struct per_proc_info *proc_info; int cpu; @@ -655,7 +664,7 @@ cpu_signal_handler( proc_info->hwCtr.numSIGPdebug++; /* Count this one */ proc_info->debugger_is_slave++; /* Bump up the count to show we're here */ - hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */ + (void)hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */ __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */ return; /* All done now... */ @@ -665,7 +674,7 @@ cpu_signal_handler( case SIGPcall: /* Call function on CPU */ proc_info->hwCtr.numSIGPcall++; /* Count this one */ - xfunc = holdParm1; /* Do this since I can't seem to figure C out */ + xfunc = (broadcastFunc)holdParm1; /* Do this since I can't seem to figure C out */ xfunc(holdParm2); /* Call the passed function */ return; /* Done... */ @@ -1116,34 +1125,56 @@ cpu_threadtype(void) * It is not passed to the other processor and must be known by the called function. * The called function must do a thread_wakeup on the synch if it decrements the * synch count to 0. + * + * We start by initializing the synchronizer to the number of possible cpus. + * The we signal each popssible processor. + * If the signal fails, we count it. We also skip our own. + * When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals. + * Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0 + * If this happens, all other processors are finished with the function. + * If so, we clear the wait and continue + * Otherwise, we block waiting for the other processor(s) to finish. + * + * Meanwhile, the other processors are decrementing the synchronizer when they are done + * If it goes to zero, thread_wakeup is called to run the broadcaster + * + * Note that because we account for the broadcaster in the synchronization count, we will not get any + * premature wakeup calls. + * + * Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that + * all of the other processors are finished. Otherwise, we know that there is at least one more. + * When that thread decrements the synchronizer to zero, it will do a thread_wake. + * */ - -int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) { - - int sigproc, cpu, ocpu; - - cpu = cpu_number(); /* Who are we? */ - sigproc = 0; /* Clear called processor count */ - - if(real_ncpus > 1) { /* Are we just a uni? */ +int32_t +cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) +{ + int failsig; + unsigned int cpu, ocpu; + cpu = cpu_number(); /* Who are we? */ + failsig = 0; /* Clear called processor count */ + + if(real_ncpus > 1) { /* Are we just a uni? */ + + *synch = real_ncpus; /* Set how many we are going to try */ assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */ - + for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */ - if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */ - hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */ - sigproc++; /* Tentatively bump signal sent count */ + + if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */ + if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */ - hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */ - sigproc--; /* and don't count it */ + failsig++; /* Count failed signals */ } } - - if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */ - else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */ + + if (hw_atomic_sub(synch, failsig + 1) == 0) + clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled or all of the others finished */ + else + thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */ } - - return sigproc; /* Return the number of guys actually signalled */ - + + return (real_ncpus - failsig - 1); /* Return the number of guys actually signalled... */ }