X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e5568f75972dfc723778653c11cb6b4dc825716a..4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32:/osfmk/ppc/commpage/commpage.c diff --git a/osfmk/ppc/commpage/commpage.c b/osfmk/ppc/commpage/commpage.c index 544a0bbb2..6b0227322 100644 --- a/osfmk/ppc/commpage/commpage.c +++ b/osfmk/ppc/commpage/commpage.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,64 +39,198 @@ * The source files should be in osfmk/ppc/commpage/. * * 3. Add a ptr to your new commpage_descriptor(s) in the "routines" - * array in commpage_populate(). Of course, you'll also have to - * declare them "extern" in commpage_populate(). + * static array below. Of course, you'll also have to declare them + * "extern". * * 4. Write the code in Libc to use the new routine. */ #include #include +#include #include #include #include #include #include #include -#include +#include +#include -static char *next = NULL; // next available byte in comm page -static int cur_routine = 0; // comm page address of "current" routine -static int matched; // true if we've found a match for "current" routine +extern vm_map_t commpage32_map; // the 32-bit shared submap, set up in vm init +extern vm_map_t commpage64_map; // the 64-bit shared submap +char *commPagePtr32 = NULL; // virtual address of 32-bit comm page in kernel map +char *commPagePtr64 = NULL; // and 64-bit commpage int _cpu_capabilities = 0; // define the capability vector -char *commPagePtr = NULL; // virtual address of comm page in kernel map - - -/* Allocate the commpages and add to the shared submap created by vm: +static char *next; // next available byte in comm page +static int cur_routine; // comm page address of "current" routine +static int matched; // true if we've found a match for "current" routine +static char *commPagePtr; // virtual address in kernel of commpage we are working on + +extern commpage_descriptor compare_and_swap32_on32; +extern commpage_descriptor compare_and_swap32_on64; +extern commpage_descriptor compare_and_swap64; +extern commpage_descriptor atomic_enqueue32; +extern commpage_descriptor atomic_enqueue64; +extern commpage_descriptor atomic_dequeue32_on32; +extern commpage_descriptor atomic_dequeue32_on64; +extern commpage_descriptor atomic_dequeue64; +extern commpage_descriptor memory_barrier_up; +extern commpage_descriptor memory_barrier_mp32; +extern commpage_descriptor memory_barrier_mp64; +extern commpage_descriptor atomic_add32; +extern commpage_descriptor atomic_add64; +extern commpage_descriptor mach_absolute_time_32; +extern commpage_descriptor mach_absolute_time_64; +extern commpage_descriptor mach_absolute_time_lp64; +extern commpage_descriptor spinlock_32_try_mp; +extern commpage_descriptor spinlock_32_try_up; +extern commpage_descriptor spinlock_64_try_mp; +extern commpage_descriptor spinlock_64_try_up; +extern commpage_descriptor spinlock_32_lock_mp; +extern commpage_descriptor spinlock_32_lock_up; +extern commpage_descriptor spinlock_64_lock_mp; +extern commpage_descriptor spinlock_64_lock_up; +extern commpage_descriptor spinlock_32_unlock_mp; +extern commpage_descriptor spinlock_32_unlock_up; +extern commpage_descriptor spinlock_64_unlock_mp; +extern commpage_descriptor spinlock_64_unlock_up; +extern commpage_descriptor pthread_getspecific_sprg3_32; +extern commpage_descriptor pthread_getspecific_sprg3_64; +extern commpage_descriptor pthread_getspecific_uftrap; +extern commpage_descriptor gettimeofday_32; +extern commpage_descriptor gettimeofday_g5_32; +extern commpage_descriptor gettimeofday_g5_64; +extern commpage_descriptor commpage_flush_dcache; +extern commpage_descriptor commpage_flush_icache; +extern commpage_descriptor pthread_self_sprg3; +extern commpage_descriptor pthread_self_uftrap; +extern commpage_descriptor spinlock_relinquish; +extern commpage_descriptor bzero_32; +extern commpage_descriptor bzero_128; +extern commpage_descriptor bcopy_g3; +extern commpage_descriptor bcopy_g4; +extern commpage_descriptor bcopy_970; +extern commpage_descriptor bcopy_64; +extern commpage_descriptor compare_and_swap32_on32b; +extern commpage_descriptor compare_and_swap32_on64b; +extern commpage_descriptor compare_and_swap64b; +extern commpage_descriptor memset_64; +extern commpage_descriptor memset_g3; +extern commpage_descriptor memset_g4; +extern commpage_descriptor memset_g5; +extern commpage_descriptor bigcopy_970; + +/* The list of all possible commpage routines. WARNING: the check for overlap + * assumes that these routines are in strictly ascending order, sorted by address + * in the commpage. We panic if not. + */ +static commpage_descriptor *routines[] = { + &compare_and_swap32_on32, + &compare_and_swap32_on64, + &compare_and_swap64, + &atomic_enqueue32, + &atomic_enqueue64, + &atomic_dequeue32_on32, + &atomic_dequeue32_on64, + &atomic_dequeue64, + &memory_barrier_up, + &memory_barrier_mp32, + &memory_barrier_mp64, + &atomic_add32, + &atomic_add64, + &mach_absolute_time_32, + &mach_absolute_time_64, + &mach_absolute_time_lp64, + &spinlock_32_try_mp, + &spinlock_32_try_up, + &spinlock_64_try_mp, + &spinlock_64_try_up, + &spinlock_32_lock_mp, + &spinlock_32_lock_up, + &spinlock_64_lock_mp, + &spinlock_64_lock_up, + &spinlock_32_unlock_mp, + &spinlock_32_unlock_up, + &spinlock_64_unlock_mp, + &spinlock_64_unlock_up, + &pthread_getspecific_sprg3_32, + &pthread_getspecific_sprg3_64, + &pthread_getspecific_uftrap, + &gettimeofday_32, + &gettimeofday_g5_32, + &gettimeofday_g5_64, + &commpage_flush_dcache, + &commpage_flush_icache, + &pthread_self_sprg3, + &pthread_self_uftrap, + &spinlock_relinquish, + &bzero_32, + &bzero_128, + &bcopy_g3, + &bcopy_g4, + &bcopy_970, + &bcopy_64, + &compare_and_swap32_on32b, + &compare_and_swap32_on64b, + &compare_and_swap64b, + &memset_64, + &memset_g3, + &memset_g4, + &memset_g5, + &bigcopy_970, + NULL }; + + +/* Allocate the commpages and add to one of the shared submaps created by vm. + * Called once each for the 32 and 64-bit submaps. * 1. allocate pages in the kernel map (RW) * 2. wire them down * 3. make a memory entry out of them * 4. map that entry into the shared comm region map (R-only) */ static void* -commpage_allocate( void ) +commpage_allocate( + vm_map_t submap ) // commpage32_map or commpage64_map { - extern vm_map_t com_region_map; // the shared submap, set up in vm init - vm_offset_t kernel_addr; // address of commpage in kernel map + vm_offset_t kernel_addr = 0; // address of commpage in kernel map vm_offset_t zero = 0; vm_size_t size = _COMM_PAGE_AREA_USED; // size actually populated + vm_map_entry_t entry; ipc_port_t handle; - if (com_region_map == NULL) - panic("commpage map is null"); + if (submap == NULL) + panic("commpage submap is null"); - if (vm_allocate(kernel_map,&kernel_addr,_COMM_PAGE_AREA_USED,VM_FLAGS_ANYWHERE)) + if (vm_map(kernel_map,&kernel_addr,_COMM_PAGE_AREA_USED,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE)) panic("cannot allocate commpage"); if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+_COMM_PAGE_AREA_USED,VM_PROT_DEFAULT,FALSE)) panic("cannot wire commpage"); - + + /* + * Now that the object is created and wired into the kernel map, mark it so that no delay + * copy-on-write will ever be performed on it as a result of mapping it into user-space. + * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and + * that would be a real disaster. + * + * JMM - What we really need is a way to create it like this in the first place. + */ + if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map) + panic("cannot find commpage entry"); + entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + if (mach_make_memory_entry( kernel_map, // target map &size, // size kernel_addr, // offset (address in kernel map) - VM_PROT_DEFAULT, // map it RW + VM_PROT_ALL, // map it RWX &handle, // this is the object handle we get NULL )) // parent_entry panic("cannot make entry for commpage"); - if (vm_map_64( com_region_map, // target map (shared submap) + if (vm_map_64( submap, // target map (shared submap) &zero, // address (map into 1st page in submap) _COMM_PAGE_AREA_USED, // size 0, // mask @@ -98,8 +238,8 @@ commpage_allocate( void ) handle, // port is the memory entry we just made 0, // offset (map 1st page in memory entry) FALSE, // copy - VM_PROT_READ, // cur_protection (R-only in user map) - VM_PROT_READ, // max_protection + VM_PROT_READ|VM_PROT_EXECUTE, // cur_protection (R-only in user map) + VM_PROT_READ|VM_PROT_EXECUTE, // max_protection VM_INHERIT_SHARE )) // inheritance panic("cannot map commpage"); @@ -143,13 +283,11 @@ commpage_cpus( void ) static void commpage_init_cpu_capabilities( void ) { - struct per_proc_info *pp; procFeatures *pfp; int cpus; int available; - pp = per_proc_info; // use CPU 0's per-proc - pfp = &pp->pf; // point to features in per-proc + pfp = &(PerProcTable[0].ppe_vaddr->pf); // point to features in per-proc available = pfp->Available; // If AltiVec is disabled make sure it is not reported as available. @@ -161,26 +299,29 @@ commpage_init_cpu_capabilities( void ) _cpu_capabilities |= commpage_time_dcba(); // ...and set kDcbaRecomended if it helps. } - cpus = commpage_cpus(); // how many CPUs do we have + cpus = commpage_cpus(); // how many CPUs do we have if (cpus == 1) _cpu_capabilities |= kUP; _cpu_capabilities |= (cpus << kNumCPUsShift); + + if (_cpu_capabilities & k64Bit) // 64-bit processors use SPRG3 for TLS + _cpu_capabilities |= kFastThreadLocalStorage; } /* Copy data into commpage. */ - void +static void commpage_stuff( - int address, - void *source, - int length ) + int address, + const void *source, + int length ) { char *dest = commpage_addr_of(address); if (dest < next) - panic("commpage overlap: %08 - %08X", dest, next); + panic("commpage overlap: %p - %p", dest, next); - bcopy((char*)source,dest,length); + bcopy((const char*)source,dest,length); next = (dest + length); } @@ -200,13 +341,11 @@ commpage_change( { int words = bytes >> 2; uint32_t word; - int found_one = 0; while( (--words) >= 0 ) { word = *ptr; if ((word & search_mask)==search_pattern) { if ((check==NULL) || (check(word))) { // check instruction if necessary - found_one = 1; word &= ~new_mask; word |= new_pattern; *ptr = word; @@ -214,9 +353,6 @@ commpage_change( } ptr++; } - - if (!found_one) - panic("commpage opcode not found"); } @@ -235,7 +371,21 @@ commpage_onebit( } -/* Handle kCommPageDCBA bit: this routine uses DCBA. If the machine we're +/* Check to see if a RLWINM (whose ME is 31) is a SRWI. Since to shift right n bits + * you must "RLWINM ra,rs,32-n,n,31", if (SH+MB)==32 then we have a SRWI. + */ +static int +commpage_srwi( + uint32_t rlwinm ) +{ + int sh = (rlwinm >> 11) & 0x1F; // extract SH field of RLWINM, ie bits 16-20 + int mb = (rlwinm >> 6 ) & 0x1F; // extract MB field of RLWINM, ie bits 21-25 + + return (sh + mb) == 32; // it is a SRWI if (SH+MB)==32 +} + + +/* Handle kCommPageDCBA bit: the commpage routine uses DCBA. If the machine we're * running on doesn't benefit from use of that instruction, map them to NOPs * in the commpage. */ @@ -246,7 +396,7 @@ commpage_handle_dcbas( { uint32_t *ptr, search_mask, search, replace_mask, replace; - if ((_cpu_capabilities & kDcbaAvailable) == 0) { + if ( (_cpu_capabilities & kDcbaRecommended) == 0 ) { ptr = commpage_addr_of(address); search_mask = 0xFC0007FE; // search x-form opcode bits @@ -259,7 +409,7 @@ commpage_handle_dcbas( } -/* Handle kCommPageSYNC bit: this routine uses SYNC or LWSYNC. If we're +/* Handle kCommPageSYNC bit: this routine uses SYNC, LWSYNC, or EIEIO. If we're * running on a UP machine, map them to NOPs. */ static void @@ -272,8 +422,31 @@ commpage_handle_syncs( if (_NumCPUs() == 1) { ptr = commpage_addr_of(address); - search_mask = 0xFC0007FE; // search x-form opcode bits - search = 0x7C0004AC; // for a SYNC or LWSYNC + search_mask = 0xFC0005FE; // search x-form opcode bits (but ignore bit 0x00000200) + search = 0x7C0004AC; // for a SYNC, LWSYNC, or EIEIO + replace_mask = 0xFFFFFFFF; // replace all bits... + replace = 0x60000000; // ...with a NOP + + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + } +} + + +/* Handle kCommPageISYNC bit: this routine uses ISYNCs. If we're running on a UP machine, + * map them to NOPs. + */ +static void +commpage_handle_isyncs( + int address, + int length ) +{ + uint32_t *ptr, search_mask, search, replace_mask, replace; + + if (_NumCPUs() == 1) { + ptr = commpage_addr_of(address); + + search_mask = 0xFC0007FE; // search xl-form opcode bits + search = 0x4C00012C; // for an ISYNC replace_mask = 0xFFFFFFFF; // replace all bits... replace = 0x60000000; // ...with a NOP @@ -308,18 +481,70 @@ commpage_handle_mtcrfs( } +/* Port 32-bit code to 64-bit for use in the 64-bit commpage. This sounds fancier than + * it is. We do the following: + * - map "cmpw*" into "cmpd*" + * - map "srwi" into "srdi" + * Perhaps surprisingly, this is enough to permit lots of code to run in 64-bit mode, as + * long as it is written with this in mind. + */ +static void +commpage_port_32_to_64( + int address, + int length ) +{ + uint32_t *ptr, search_mask, search, replace_mask, replace; + + ptr = commpage_addr_of(address); + + search_mask = 0xFC2007FE; // search x-form opcode bits (and L bit) + search = 0x7C000000; // for a CMPW + replace_mask = 0x00200000; // replace bit 10 (L)... + replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + + search_mask = 0xFC2007FE; // search x-form opcode bits (and L bit) + search = 0x7C000040; // for a CMPLW + replace_mask = 0x00200000; // replace bit 10 (L)... + replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + + search_mask = 0xFC200000; // search d-form opcode bits (and L bit) + search = 0x28000000; // for a CMPLWI + replace_mask = 0x00200000; // replace bit 10 (L)... + replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + + search_mask = 0xFC200000; // search d-form opcode bits (and L bit) + search = 0x2C000000; // for a CMPWI + replace_mask = 0x00200000; // replace bit 10 (L)... + replace = 0x00200000; // ...with a 1-bit, converting word to doubleword compares + commpage_change(ptr,length,search_mask,search,replace_mask,replace,NULL); + + search_mask = 0xFC00003E; // search d-form opcode bits and ME (mask end) field + search = 0x5400003E; // for an RLWINM with ME=31 (which might be a "srwi") + replace_mask = 0xFC00003E; // then replace RLWINM's opcode and ME field to make a RLDICL + replace = 0x78000002; // opcode is 30, ME is 0, except we add 32 to SH amount + commpage_change(ptr,length,search_mask,search,replace_mask,replace,commpage_srwi); +} + + /* Copy a routine into comm page if it matches running machine. */ static void commpage_stuff_routine( - commpage_descriptor *rd ) + commpage_descriptor *rd, + int mode ) // kCommPage32 or kCommPage64 { char *routine_code; int must,cant; + + if ( (rd->special & mode) == 0 ) // is this routine useable in this mode? + return; if (rd->commpage_address != cur_routine) { if ((cur_routine!=0) && (matched==0)) - panic("commpage no match"); + panic("commpage no match for last, next address %08x", rd->commpage_address); cur_routine = rd->commpage_address; matched = 0; } @@ -329,7 +554,7 @@ commpage_stuff_routine( if ((must == rd->musthave) && (cant == 0)) { if (matched) - panic("commpage duplicate matches"); + panic("commpage multiple matches for address %08x", rd->commpage_address); matched = 1; routine_code = ((char*)rd) + rd->code_offset; @@ -341,38 +566,49 @@ commpage_stuff_routine( if (rd->special & kCommPageSYNC) commpage_handle_syncs(rd->commpage_address,rd->code_length); + if (rd->special & kCommPageISYNC) + commpage_handle_isyncs(rd->commpage_address,rd->code_length); + if (rd->special & kCommPageMTCRF) commpage_handle_mtcrfs(rd->commpage_address,rd->code_length); + + if ((mode == kCommPage64) && (rd->special & kPort32to64)) + commpage_port_32_to_64(rd->commpage_address,rd->code_length); } } -/* Fill in commpage: called once, during kernel initialization, from the - * startup thread before user-mode code is running. - * See the top of this file for a list of what you have to do to add - * a new routine to the commpage. - */ -void -commpage_populate( void ) +/* Fill in the 32- or 64-bit commpage. Called once for each. */ + +static void +commpage_populate_one( + vm_map_t submap, // the map to populate + char ** kernAddressPtr, // address within kernel of this commpage + int mode, // either kCommPage32 or kCommPage64 + const char* signature ) // "commpage 32-bit" or "commpage 64-bit" { char c1; short c2; addr64_t c8; static double two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52 static double ten6 = 1000000.0; // 10**6 + static uint64_t magicFE = 0xFEFEFEFEFEFEFEFFLL; // used to find 0s in strings + static uint64_t magic80 = 0x8080808080808080LL; // also used to find 0s commpage_descriptor **rd; short version = _COMM_PAGE_THIS_VERSION; - - commPagePtr = (char*) commpage_allocate(); - - commpage_init_cpu_capabilities(); - + next = NULL; // initialize next available byte in the commpage + cur_routine = 0; // initialize comm page address of "current" routine + + commPagePtr = (char*) commpage_allocate( submap ); + *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 /* Stuff in the constants. We move things into the comm page in strictly * ascending order, so we can check for overlap and panic if so. */ - + + commpage_stuff(_COMM_PAGE_SIGNATURE,signature,strlen(signature)); + commpage_stuff(_COMM_PAGE_VERSION,&version,2); commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int)); @@ -391,100 +627,53 @@ commpage_populate( void ) c2 = 128; commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); - commpage_stuff(_COMM_PAGE_2_TO_52,&two52,8); - + commpage_stuff(_COMM_PAGE_2_TO_52,&two52,8); commpage_stuff(_COMM_PAGE_10_TO_6,&ten6,8); + commpage_stuff(_COMM_PAGE_MAGIC_FE,&magicFE,8); + commpage_stuff(_COMM_PAGE_MAGIC_80,&magic80,8); c8 = 0; // 0 timestamp means "disabled" commpage_stuff(_COMM_PAGE_TIMEBASE,&c8,8); commpage_stuff(_COMM_PAGE_TIMESTAMP,&c8,8); commpage_stuff(_COMM_PAGE_SEC_PER_TICK,&c8,8); - /* Now the routines. We try each potential routine in turn, * and copy in any that "match" the platform we are running on. * We require that exactly one routine match for each slot in the * comm page, and panic if not. - * - * The check for overlap assumes that these routines are - * in strictly ascending order, sorted by address in the - * comm page. */ - - extern commpage_descriptor mach_absolute_time_32; - extern commpage_descriptor mach_absolute_time_64; - extern commpage_descriptor spinlock_32_try_mp; - extern commpage_descriptor spinlock_32_try_up; - extern commpage_descriptor spinlock_64_try_mp; - extern commpage_descriptor spinlock_64_try_up; - extern commpage_descriptor spinlock_32_lock_mp; - extern commpage_descriptor spinlock_32_lock_up; - extern commpage_descriptor spinlock_64_lock_mp; - extern commpage_descriptor spinlock_64_lock_up; - extern commpage_descriptor spinlock_32_unlock_mp; - extern commpage_descriptor spinlock_32_unlock_up; - extern commpage_descriptor spinlock_64_unlock_mp; - extern commpage_descriptor spinlock_64_unlock_up; - extern commpage_descriptor pthread_getspecific_sprg3; - extern commpage_descriptor pthread_getspecific_uftrap; - extern commpage_descriptor gettimeofday_32; - extern commpage_descriptor gettimeofday_64; - extern commpage_descriptor commpage_flush_dcache; - extern commpage_descriptor commpage_flush_icache; - extern commpage_descriptor pthread_self_sprg3; - extern commpage_descriptor pthread_self_uftrap; - extern commpage_descriptor spinlock_relinquish; - extern commpage_descriptor bzero_32; - extern commpage_descriptor bzero_128; - extern commpage_descriptor bcopy_g3; - extern commpage_descriptor bcopy_g4; - extern commpage_descriptor bcopy_970; - extern commpage_descriptor bcopy_64; - extern commpage_descriptor bigcopy_970; - - static commpage_descriptor *routines[] = { - &mach_absolute_time_32, - &mach_absolute_time_64, - &spinlock_32_try_mp, - &spinlock_32_try_up, - &spinlock_64_try_mp, - &spinlock_64_try_up, - &spinlock_32_lock_mp, - &spinlock_32_lock_up, - &spinlock_64_lock_mp, - &spinlock_64_lock_up, - &spinlock_32_unlock_mp, - &spinlock_32_unlock_up, - &spinlock_64_unlock_mp, - &spinlock_64_unlock_up, - &pthread_getspecific_sprg3, - &pthread_getspecific_uftrap, - &gettimeofday_32, - &gettimeofday_64, - &commpage_flush_dcache, - &commpage_flush_icache, - &pthread_self_sprg3, - &pthread_self_uftrap, - &spinlock_relinquish, - &bzero_32, - &bzero_128, - &bcopy_g3, - &bcopy_g4, - &bcopy_970, - &bcopy_64, - &bigcopy_970, - NULL }; for( rd = routines; *rd != NULL ; rd++ ) - commpage_stuff_routine(*rd); + commpage_stuff_routine(*rd,mode); if (!matched) panic("commpage no match on last routine"); if (next > (commPagePtr + _COMM_PAGE_AREA_USED)) panic("commpage overflow"); - - sync_cache_virtual((vm_offset_t) commPagePtr,_COMM_PAGE_AREA_USED); // make all that new code executable - + + + // make all that new code executable + + sync_cache_virtual((vm_offset_t) commPagePtr,_COMM_PAGE_AREA_USED); } + +/* Fill in commpage: called once, during kernel initialization, from the + * startup thread before user-mode code is running. + * + * See the top of this file for a list of what you have to do to add + * a new routine to the commpage. + */ + +void +commpage_populate( void ) +{ + commpage_init_cpu_capabilities(); + commpage_populate_one( commpage32_map, &commPagePtr32, kCommPage32, "commpage 32-bit"); + if (_cpu_capabilities & k64Bit) { + commpage_populate_one( commpage64_map, &commPagePtr64, kCommPage64, "commpage 64-bit"); + pmap_init_sharedpage((vm_offset_t)commPagePtr64); // Do the 64-bit version + } + +}