2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Here's what to do if you want to add a new routine to the comm page:
32 * 1. Add a definition for it's address in osfmk/ppc/cpu_capabilities.h,
33 * being careful to reserve room for future expansion.
35 * 2. Write one or more versions of the routine, each with it's own
36 * commpage_descriptor. The tricky part is getting the "special",
37 * "musthave", and "canthave" fields right, so that exactly one
38 * version of the routine is selected for every machine.
39 * The source files should be in osfmk/ppc/commpage/.
41 * 3. Add a ptr to your new commpage_descriptor(s) in the "routines"
42 * static array below. Of course, you'll also have to declare them
45 * 4. Write the code in Libc to use the new routine.
48 #include <mach/mach_types.h>
49 #include <mach/machine.h>
50 #include <mach/vm_map.h>
51 #include <ppc/exception.h>
52 #include <ppc/machine_routines.h>
53 #include <machine/cpu_capabilities.h>
54 #include <machine/commpage.h>
55 #include <machine/pmap.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58 #include <ipc/ipc_port.h>
60 extern vm_map_t commpage32_map
; // the 32-bit shared submap, set up in vm init
61 extern vm_map_t commpage64_map
; // the 64-bit shared submap
63 char *commPagePtr32
= NULL
; // virtual address of 32-bit comm page in kernel map
64 char *commPagePtr64
= NULL
; // and 64-bit commpage
65 int _cpu_capabilities
= 0; // define the capability vector
67 static char *next
; // next available byte in comm page
68 static int cur_routine
; // comm page address of "current" routine
69 static int matched
; // true if we've found a match for "current" routine
70 static char *commPagePtr
; // virtual address in kernel of commpage we are working on
72 extern commpage_descriptor compare_and_swap32_on32
;
73 extern commpage_descriptor compare_and_swap32_on64
;
74 extern commpage_descriptor compare_and_swap64
;
75 extern commpage_descriptor atomic_enqueue32
;
76 extern commpage_descriptor atomic_enqueue64
;
77 extern commpage_descriptor atomic_dequeue32_on32
;
78 extern commpage_descriptor atomic_dequeue32_on64
;
79 extern commpage_descriptor atomic_dequeue64
;
80 extern commpage_descriptor memory_barrier_up
;
81 extern commpage_descriptor memory_barrier_mp32
;
82 extern commpage_descriptor memory_barrier_mp64
;
83 extern commpage_descriptor atomic_add32
;
84 extern commpage_descriptor atomic_add64
;
85 extern commpage_descriptor mach_absolute_time_32
;
86 extern commpage_descriptor mach_absolute_time_64
;
87 extern commpage_descriptor mach_absolute_time_lp64
;
88 extern commpage_descriptor spinlock_32_try_mp
;
89 extern commpage_descriptor spinlock_32_try_up
;
90 extern commpage_descriptor spinlock_64_try_mp
;
91 extern commpage_descriptor spinlock_64_try_up
;
92 extern commpage_descriptor spinlock_32_lock_mp
;
93 extern commpage_descriptor spinlock_32_lock_up
;
94 extern commpage_descriptor spinlock_64_lock_mp
;
95 extern commpage_descriptor spinlock_64_lock_up
;
96 extern commpage_descriptor spinlock_32_unlock_mp
;
97 extern commpage_descriptor spinlock_32_unlock_up
;
98 extern commpage_descriptor spinlock_64_unlock_mp
;
99 extern commpage_descriptor spinlock_64_unlock_up
;
100 extern commpage_descriptor pthread_getspecific_sprg3_32
;
101 extern commpage_descriptor pthread_getspecific_sprg3_64
;
102 extern commpage_descriptor pthread_getspecific_uftrap
;
103 extern commpage_descriptor gettimeofday_32
;
104 extern commpage_descriptor gettimeofday_g5_32
;
105 extern commpage_descriptor gettimeofday_g5_64
;
106 extern commpage_descriptor commpage_flush_dcache
;
107 extern commpage_descriptor commpage_flush_icache
;
108 extern commpage_descriptor pthread_self_sprg3
;
109 extern commpage_descriptor pthread_self_uftrap
;
110 extern commpage_descriptor spinlock_relinquish
;
111 extern commpage_descriptor bzero_32
;
112 extern commpage_descriptor bzero_128
;
113 extern commpage_descriptor bcopy_g3
;
114 extern commpage_descriptor bcopy_g4
;
115 extern commpage_descriptor bcopy_970
;
116 extern commpage_descriptor bcopy_64
;
117 extern commpage_descriptor compare_and_swap32_on32b
;
118 extern commpage_descriptor compare_and_swap32_on64b
;
119 extern commpage_descriptor compare_and_swap64b
;
120 extern commpage_descriptor memset_64
;
121 extern commpage_descriptor memset_g3
;
122 extern commpage_descriptor memset_g4
;
123 extern commpage_descriptor memset_g5
;
124 extern commpage_descriptor bigcopy_970
;
126 /* The list of all possible commpage routines. WARNING: the check for overlap
127 * assumes that these routines are in strictly ascending order, sorted by address
128 * in the commpage. We panic if not.
130 static commpage_descriptor
*routines
[] = {
131 &compare_and_swap32_on32
,
132 &compare_and_swap32_on64
,
136 &atomic_dequeue32_on32
,
137 &atomic_dequeue32_on64
,
140 &memory_barrier_mp32
,
141 &memory_barrier_mp64
,
144 &mach_absolute_time_32
,
145 &mach_absolute_time_64
,
146 &mach_absolute_time_lp64
,
151 &spinlock_32_lock_mp
,
152 &spinlock_32_lock_up
,
153 &spinlock_64_lock_mp
,
154 &spinlock_64_lock_up
,
155 &spinlock_32_unlock_mp
,
156 &spinlock_32_unlock_up
,
157 &spinlock_64_unlock_mp
,
158 &spinlock_64_unlock_up
,
159 &pthread_getspecific_sprg3_32
,
160 &pthread_getspecific_sprg3_64
,
161 &pthread_getspecific_uftrap
,
165 &commpage_flush_dcache
,
166 &commpage_flush_icache
,
168 &pthread_self_uftrap
,
169 &spinlock_relinquish
,
176 &compare_and_swap32_on32b
,
177 &compare_and_swap32_on64b
,
178 &compare_and_swap64b
,
187 /* Allocate the commpages and add to one of the shared submaps created by vm.
188 * Called once each for the 32 and 64-bit submaps.
189 * 1. allocate pages in the kernel map (RW)
191 * 3. make a memory entry out of them
192 * 4. map that entry into the shared comm region map (R-only)
196 vm_map_t submap
) // commpage32_map or commpage64_map
198 vm_offset_t kernel_addr
= 0; // address of commpage in kernel map
199 vm_offset_t zero
= 0;
200 vm_size_t size
= _COMM_PAGE_AREA_USED
; // size actually populated
201 vm_map_entry_t entry
;
205 panic("commpage submap is null");
207 if (vm_map(kernel_map
,&kernel_addr
,_COMM_PAGE_AREA_USED
,0,VM_FLAGS_ANYWHERE
,NULL
,0,FALSE
,VM_PROT_ALL
,VM_PROT_ALL
,VM_INHERIT_NONE
))
208 panic("cannot allocate commpage");
210 if (vm_map_wire(kernel_map
,kernel_addr
,kernel_addr
+_COMM_PAGE_AREA_USED
,VM_PROT_DEFAULT
,FALSE
))
211 panic("cannot wire commpage");
214 * Now that the object is created and wired into the kernel map, mark it so that no delay
215 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
216 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
217 * that would be a real disaster.
219 * JMM - What we really need is a way to create it like this in the first place.
221 if (!vm_map_lookup_entry( kernel_map
, vm_map_trunc_page(kernel_addr
), &entry
) || entry
->is_sub_map
)
222 panic("cannot find commpage entry");
223 entry
->object
.vm_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
225 if (mach_make_memory_entry( kernel_map
, // target map
227 kernel_addr
, // offset (address in kernel map)
228 VM_PROT_ALL
, // map it RWX
229 &handle
, // this is the object handle we get
230 NULL
)) // parent_entry
231 panic("cannot make entry for commpage");
233 if (vm_map_64( submap
, // target map (shared submap)
234 &zero
, // address (map into 1st page in submap)
235 _COMM_PAGE_AREA_USED
, // size
237 VM_FLAGS_FIXED
, // flags (it must be 1st page in submap)
238 handle
, // port is the memory entry we just made
239 0, // offset (map 1st page in memory entry)
241 VM_PROT_READ
|VM_PROT_EXECUTE
, // cur_protection (R-only in user map)
242 VM_PROT_READ
|VM_PROT_EXECUTE
, // max_protection
243 VM_INHERIT_SHARE
)) // inheritance
244 panic("cannot map commpage");
246 ipc_port_release(handle
);
248 return (void*) kernel_addr
; // return address in kernel map
252 /* Get address (in kernel map) of a commpage field. */
256 int addr_at_runtime
)
258 return (void*) (commPagePtr
+ addr_at_runtime
- _COMM_PAGE_BASE_ADDRESS
);
262 /* Determine number of CPUs on this system. We cannot rely on
263 * machine_info.max_cpus this early in the boot.
266 commpage_cpus( void )
270 cpus
= ml_get_max_cpus(); // NB: this call can block
273 panic("commpage cpus==0");
281 /* Initialize kernel version of _cpu_capabilities vector (used by KEXTs.) */
284 commpage_init_cpu_capabilities( void )
290 pfp
= &(PerProcTable
[0].ppe_vaddr
->pf
); // point to features in per-proc
291 available
= pfp
->Available
;
293 // If AltiVec is disabled make sure it is not reported as available.
294 if ((available
& pfAltivec
) == 0) {
295 _cpu_capabilities
&= ~kHasAltivec
;
298 if (_cpu_capabilities
& kDcbaAvailable
) { // if this processor has DCBA, time it...
299 _cpu_capabilities
|= commpage_time_dcba(); // ...and set kDcbaRecomended if it helps.
302 cpus
= commpage_cpus(); // how many CPUs do we have
303 if (cpus
== 1) _cpu_capabilities
|= kUP
;
304 _cpu_capabilities
|= (cpus
<< kNumCPUsShift
);
306 if (_cpu_capabilities
& k64Bit
) // 64-bit processors use SPRG3 for TLS
307 _cpu_capabilities
|= kFastThreadLocalStorage
;
311 /* Copy data into commpage. */
319 char *dest
= commpage_addr_of(address
);
322 panic("commpage overlap: %p - %p", dest
, next
);
324 bcopy((const char*)source
,dest
,length
);
326 next
= (dest
+ length
);
330 /* Modify commpage code in-place for this specific platform. */
336 uint32_t search_mask
,
337 uint32_t search_pattern
,
339 uint32_t new_pattern
,
340 int (*check
)(uint32_t instruction
) )
342 int words
= bytes
>> 2;
345 while( (--words
) >= 0 ) {
347 if ((word
& search_mask
)==search_pattern
) {
348 if ((check
==NULL
) || (check(word
))) { // check instruction if necessary
359 /* Check to see if exactly one bit is set in a MTCRF instruction's FXM field.
365 int x
= (mtcrf
>> 12) & 0xFF; // isolate the FXM field of the MTCRF
368 panic("commpage bad mtcrf");
370 return (x
& (x
-1))==0 ? 1 : 0; // return 1 iff exactly 1 bit set in FXM field
374 /* Check to see if a RLWINM (whose ME is 31) is a SRWI. Since to shift right n bits
375 * you must "RLWINM ra,rs,32-n,n,31", if (SH+MB)==32 then we have a SRWI.
381 int sh
= (rlwinm
>> 11) & 0x1F; // extract SH field of RLWINM, ie bits 16-20
382 int mb
= (rlwinm
>> 6 ) & 0x1F; // extract MB field of RLWINM, ie bits 21-25
384 return (sh
+ mb
) == 32; // it is a SRWI if (SH+MB)==32
388 /* Handle kCommPageDCBA bit: the commpage routine uses DCBA. If the machine we're
389 * running on doesn't benefit from use of that instruction, map them to NOPs
393 commpage_handle_dcbas(
397 uint32_t *ptr
, search_mask
, search
, replace_mask
, replace
;
399 if ( (_cpu_capabilities
& kDcbaRecommended
) == 0 ) {
400 ptr
= commpage_addr_of(address
);
402 search_mask
= 0xFC0007FE; // search x-form opcode bits
403 search
= 0x7C0005EC; // for a DCBA
404 replace_mask
= 0xFFFFFFFF; // replace all bits...
405 replace
= 0x60000000; // ...with a NOP
407 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
412 /* Handle kCommPageSYNC bit: this routine uses SYNC, LWSYNC, or EIEIO. If we're
413 * running on a UP machine, map them to NOPs.
416 commpage_handle_syncs(
420 uint32_t *ptr
, search_mask
, search
, replace_mask
, replace
;
422 if (_NumCPUs() == 1) {
423 ptr
= commpage_addr_of(address
);
425 search_mask
= 0xFC0005FE; // search x-form opcode bits (but ignore bit 0x00000200)
426 search
= 0x7C0004AC; // for a SYNC, LWSYNC, or EIEIO
427 replace_mask
= 0xFFFFFFFF; // replace all bits...
428 replace
= 0x60000000; // ...with a NOP
430 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
435 /* Handle kCommPageISYNC bit: this routine uses ISYNCs. If we're running on a UP machine,
439 commpage_handle_isyncs(
443 uint32_t *ptr
, search_mask
, search
, replace_mask
, replace
;
445 if (_NumCPUs() == 1) {
446 ptr
= commpage_addr_of(address
);
448 search_mask
= 0xFC0007FE; // search xl-form opcode bits
449 search
= 0x4C00012C; // for an ISYNC
450 replace_mask
= 0xFFFFFFFF; // replace all bits...
451 replace
= 0x60000000; // ...with a NOP
453 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
458 /* Handle kCommPageMTCRF bit. When this was written (3/03), the assembler did not
459 * recognize the special form of MTCRF instructions, in which exactly one bit is set
460 * in the 8-bit mask field. Bit 11 of the instruction should be set in this case,
461 * since the 970 and probably other 64-bit processors optimize it. Once the assembler
462 * has been updated this code can be removed, though it need not be.
465 commpage_handle_mtcrfs(
469 uint32_t *ptr
, search_mask
, search
, replace_mask
, replace
;
471 if (_cpu_capabilities
& k64Bit
) {
472 ptr
= commpage_addr_of(address
);
474 search_mask
= 0xFC0007FE; // search x-form opcode bits
475 search
= 0x7C000120; // for a MTCRF
476 replace_mask
= 0x00100000; // replace bit 11...
477 replace
= 0x00100000; // ...with a 1-bit
479 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,commpage_onebit
);
484 /* Port 32-bit code to 64-bit for use in the 64-bit commpage. This sounds fancier than
485 * it is. We do the following:
486 * - map "cmpw*" into "cmpd*"
487 * - map "srwi" into "srdi"
488 * Perhaps surprisingly, this is enough to permit lots of code to run in 64-bit mode, as
489 * long as it is written with this in mind.
492 commpage_port_32_to_64(
496 uint32_t *ptr
, search_mask
, search
, replace_mask
, replace
;
498 ptr
= commpage_addr_of(address
);
500 search_mask
= 0xFC2007FE; // search x-form opcode bits (and L bit)
501 search
= 0x7C000000; // for a CMPW
502 replace_mask
= 0x00200000; // replace bit 10 (L)...
503 replace
= 0x00200000; // ...with a 1-bit, converting word to doubleword compares
504 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
506 search_mask
= 0xFC2007FE; // search x-form opcode bits (and L bit)
507 search
= 0x7C000040; // for a CMPLW
508 replace_mask
= 0x00200000; // replace bit 10 (L)...
509 replace
= 0x00200000; // ...with a 1-bit, converting word to doubleword compares
510 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
512 search_mask
= 0xFC200000; // search d-form opcode bits (and L bit)
513 search
= 0x28000000; // for a CMPLWI
514 replace_mask
= 0x00200000; // replace bit 10 (L)...
515 replace
= 0x00200000; // ...with a 1-bit, converting word to doubleword compares
516 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
518 search_mask
= 0xFC200000; // search d-form opcode bits (and L bit)
519 search
= 0x2C000000; // for a CMPWI
520 replace_mask
= 0x00200000; // replace bit 10 (L)...
521 replace
= 0x00200000; // ...with a 1-bit, converting word to doubleword compares
522 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,NULL
);
524 search_mask
= 0xFC00003E; // search d-form opcode bits and ME (mask end) field
525 search
= 0x5400003E; // for an RLWINM with ME=31 (which might be a "srwi")
526 replace_mask
= 0xFC00003E; // then replace RLWINM's opcode and ME field to make a RLDICL
527 replace
= 0x78000002; // opcode is 30, ME is 0, except we add 32 to SH amount
528 commpage_change(ptr
,length
,search_mask
,search
,replace_mask
,replace
,commpage_srwi
);
532 /* Copy a routine into comm page if it matches running machine.
535 commpage_stuff_routine(
536 commpage_descriptor
*rd
,
537 int mode
) // kCommPage32 or kCommPage64
542 if ( (rd
->special
& mode
) == 0 ) // is this routine useable in this mode?
545 if (rd
->commpage_address
!= cur_routine
) {
546 if ((cur_routine
!=0) && (matched
==0))
547 panic("commpage no match for last, next address %08x", rd
->commpage_address
);
548 cur_routine
= rd
->commpage_address
;
552 must
= _cpu_capabilities
& rd
->musthave
;
553 cant
= _cpu_capabilities
& rd
->canthave
;
555 if ((must
== rd
->musthave
) && (cant
== 0)) {
557 panic("commpage multiple matches for address %08x", rd
->commpage_address
);
559 routine_code
= ((char*)rd
) + rd
->code_offset
;
561 commpage_stuff(rd
->commpage_address
,routine_code
,rd
->code_length
);
563 if (rd
->special
& kCommPageDCBA
)
564 commpage_handle_dcbas(rd
->commpage_address
,rd
->code_length
);
566 if (rd
->special
& kCommPageSYNC
)
567 commpage_handle_syncs(rd
->commpage_address
,rd
->code_length
);
569 if (rd
->special
& kCommPageISYNC
)
570 commpage_handle_isyncs(rd
->commpage_address
,rd
->code_length
);
572 if (rd
->special
& kCommPageMTCRF
)
573 commpage_handle_mtcrfs(rd
->commpage_address
,rd
->code_length
);
575 if ((mode
== kCommPage64
) && (rd
->special
& kPort32to64
))
576 commpage_port_32_to_64(rd
->commpage_address
,rd
->code_length
);
581 /* Fill in the 32- or 64-bit commpage. Called once for each. */
584 commpage_populate_one(
585 vm_map_t submap
, // the map to populate
586 char ** kernAddressPtr
, // address within kernel of this commpage
587 int mode
, // either kCommPage32 or kCommPage64
588 const char* signature
) // "commpage 32-bit" or "commpage 64-bit"
593 static double two52
= 1048576.0 * 1048576.0 * 4096.0; // 2**52
594 static double ten6
= 1000000.0; // 10**6
595 static uint64_t magicFE
= 0xFEFEFEFEFEFEFEFFLL
; // used to find 0s in strings
596 static uint64_t magic80
= 0x8080808080808080LL
; // also used to find 0s
597 commpage_descriptor
**rd
;
598 short version
= _COMM_PAGE_THIS_VERSION
;
600 next
= NULL
; // initialize next available byte in the commpage
601 cur_routine
= 0; // initialize comm page address of "current" routine
603 commPagePtr
= (char*) commpage_allocate( submap
);
604 *kernAddressPtr
= commPagePtr
; // save address either in commPagePtr32 or 64
606 /* Stuff in the constants. We move things into the comm page in strictly
607 * ascending order, so we can check for overlap and panic if so.
610 commpage_stuff(_COMM_PAGE_SIGNATURE
,signature
,strlen(signature
));
612 commpage_stuff(_COMM_PAGE_VERSION
,&version
,2);
614 commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES
,&_cpu_capabilities
,sizeof(int));
616 c1
= (_cpu_capabilities
& kHasAltivec
) ? -1 : 0;
617 commpage_stuff(_COMM_PAGE_ALTIVEC
,&c1
,1);
619 c1
= (_cpu_capabilities
& k64Bit
) ? -1 : 0;
620 commpage_stuff(_COMM_PAGE_64_BIT
,&c1
,1);
622 if (_cpu_capabilities
& kCache32
)
624 else if (_cpu_capabilities
& kCache64
)
626 else if (_cpu_capabilities
& kCache128
)
628 commpage_stuff(_COMM_PAGE_CACHE_LINESIZE
,&c2
,2);
630 commpage_stuff(_COMM_PAGE_2_TO_52
,&two52
,8);
631 commpage_stuff(_COMM_PAGE_10_TO_6
,&ten6
,8);
632 commpage_stuff(_COMM_PAGE_MAGIC_FE
,&magicFE
,8);
633 commpage_stuff(_COMM_PAGE_MAGIC_80
,&magic80
,8);
635 c8
= 0; // 0 timestamp means "disabled"
636 commpage_stuff(_COMM_PAGE_TIMEBASE
,&c8
,8);
637 commpage_stuff(_COMM_PAGE_TIMESTAMP
,&c8
,8);
638 commpage_stuff(_COMM_PAGE_SEC_PER_TICK
,&c8
,8);
640 /* Now the routines. We try each potential routine in turn,
641 * and copy in any that "match" the platform we are running on.
642 * We require that exactly one routine match for each slot in the
643 * comm page, and panic if not.
646 for( rd
= routines
; *rd
!= NULL
; rd
++ )
647 commpage_stuff_routine(*rd
,mode
);
650 panic("commpage no match on last routine");
652 if (next
> (commPagePtr
+ _COMM_PAGE_AREA_USED
))
653 panic("commpage overflow");
656 // make all that new code executable
658 sync_cache_virtual((vm_offset_t
) commPagePtr
,_COMM_PAGE_AREA_USED
);
662 /* Fill in commpage: called once, during kernel initialization, from the
663 * startup thread before user-mode code is running.
665 * See the top of this file for a list of what you have to do to add
666 * a new routine to the commpage.
670 commpage_populate( void )
672 commpage_init_cpu_capabilities();
673 commpage_populate_one( commpage32_map
, &commPagePtr32
, kCommPage32
, "commpage 32-bit");
674 if (_cpu_capabilities
& k64Bit
) {
675 commpage_populate_one( commpage64_map
, &commPagePtr64
, kCommPage64
, "commpage 64-bit");
676 pmap_init_sharedpage((vm_offset_t
)commPagePtr64
); // Do the 64-bit version