X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d1ecb069dfe24481e4a83f44cb5217a2b06746d7..3903760236c30e3b5ace7a4eefac3a269d68957c:/osfmk/i386/commpage/commpage.c

diff --git a/osfmk/i386/commpage/commpage.c b/osfmk/i386/commpage/commpage.c
index 53030645b..6dae08567 100644
--- a/osfmk/i386/commpage/commpage.c
+++ b/osfmk/i386/commpage/commpage.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -52,7 +52,7 @@
 #include <mach/machine.h>
 #include <i386/cpuid.h>
 #include <i386/tsc.h>
-#include <i386/rtclock.h>
+#include <i386/rtclock_protos.h>
 #include <i386/cpu_data.h>
 #include <i386/machine_routines.h>
 #include <i386/misc_protos.h>
@@ -66,29 +66,34 @@
 #include <ipc/ipc_port.h>
 
 #include <kern/page_decrypt.h>
+#include <kern/processor.h>
+
+#include <sys/kdebug.h>
+
+#if CONFIG_ATM
+#include <atm/atm_internal.h>
+#endif
 
 /* the lists of commpage routines are in commpage_asm.s  */
 extern	commpage_descriptor*	commpage_32_routines[];
 extern	commpage_descriptor*	commpage_64_routines[];
 
-/* translated commpage descriptors from commpage_sigs.c  */
-extern	commpage_descriptor sigdata_descriptor;
-extern	commpage_descriptor *ba_descriptors[];
-
 extern vm_map_t	commpage32_map;	// the shared submap, set up in vm init
 extern vm_map_t	commpage64_map;	// the shared submap, set up in vm init
+extern vm_map_t	commpage_text32_map;	// the shared submap, set up in vm init
+extern vm_map_t	commpage_text64_map;	// the shared submap, set up in vm init
+
 
 char	*commPagePtr32 = NULL;		// virtual addr in kernel map of 32-bit commpage
 char	*commPagePtr64 = NULL;		// ...and of 64-bit commpage
-int     _cpu_capabilities = 0;          // define the capability vector
+char	*commPageTextPtr32 = NULL;	// virtual addr in kernel map of 32-bit commpage
+char	*commPageTextPtr64 = NULL;	// ...and of 64-bit commpage
 
-int	noVMX = 0;		/* if true, do not set kHasAltivec in ppc _cpu_capabilities */
+uint64_t     _cpu_capabilities = 0;     // define the capability vector
 
 typedef uint32_t commpage_address_t;
 
-static commpage_address_t	next;			// next available address in comm page
-static commpage_address_t	cur_routine;		// comm page address of "current" routine
-static boolean_t		matched;		// true if we've found a match for "current" routine
+static commpage_address_t	next;	// next available address in comm page
 
 static char    *commPagePtr;		// virtual addr in kernel map of commpage we are working on
 static commpage_address_t	commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map
@@ -96,6 +101,8 @@ static commpage_address_t	commPageBaseOffset; // subtract from 32-bit runtime ad
 static	commpage_time_data	*time_data32 = NULL;
 static	commpage_time_data	*time_data64 = NULL;
 
+decl_simple_lock_data(static,commpage_active_cpus_lock);
+
 /* Allocate the commpage and add to the shared submap created by vm:
  * 	1. allocate a page in the kernel map (RW)
  *	2. wire it down
@@ -106,22 +113,38 @@ static	commpage_time_data	*time_data64 = NULL;
 static  void*
 commpage_allocate( 
 	vm_map_t	submap,			// commpage32_map or commpage_map64
-	size_t		area_used )		// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
+	size_t		area_used,		// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
+	vm_prot_t	uperm)
 {
 	vm_offset_t	kernel_addr = 0;	// address of commpage in kernel map
 	vm_offset_t	zero = 0;
 	vm_size_t	size = area_used;	// size actually populated
 	vm_map_entry_t	entry;
 	ipc_port_t	handle;
+	kern_return_t	kr;
 
 	if (submap == NULL)
 		panic("commpage submap is null");
 
-	if (vm_map(kernel_map,&kernel_addr,area_used,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE))
-		panic("cannot allocate commpage");
-
-	if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE))
-		panic("cannot wire commpage");
+	if ((kr = vm_map(kernel_map,
+			 &kernel_addr,
+			 area_used,
+			 0,
+			 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+			 NULL,
+			 0,
+			 FALSE,
+			 VM_PROT_ALL,
+			 VM_PROT_ALL,
+			 VM_INHERIT_NONE)))
+		panic("cannot allocate commpage %d", kr);
+
+	if ((kr = vm_map_wire(kernel_map,
+			      kernel_addr,
+			      kernel_addr+area_used,
+			      VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
+			      FALSE)))
+		panic("cannot wire commpage: %d", kr);
 
 	/* 
 	 * Now that the object is created and wired into the kernel map, mark it so that no delay
@@ -131,19 +154,19 @@ commpage_allocate(
 	 *
 	 * JMM - What we really need is a way to create it like this in the first place.
 	 */
-	if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map)
-		panic("cannot find commpage entry");
-	entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+	if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map))
+		panic("cannot find commpage entry %d", kr);
+	VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE;
 
-	if (mach_make_memory_entry( kernel_map,		// target map
+	if ((kr = mach_make_memory_entry( kernel_map,		// target map
 				    &size,		// size 
 				    kernel_addr,	// offset (address in kernel map)
-				    VM_PROT_ALL,	// map it RWX
+				    uperm,	// protections as specified
 				    &handle,		// this is the object handle we get
-				    NULL ))		// parent_entry (what is this?)
-		panic("cannot make entry for commpage");
+				    NULL )))		// parent_entry (what is this?)
+		panic("cannot make entry for commpage %d", kr);
 
-	if (vm_map_64(	submap,				// target map (shared submap)
+	if ((kr = vm_map_64(	submap,				// target map (shared submap)
 			&zero,				// address (map into 1st page in submap)
 			area_used,			// size
 			0,				// mask
@@ -151,12 +174,18 @@ commpage_allocate(
 			handle,				// port is the memory entry we just made
 			0,                              // offset (map 1st page in memory entry)
 			FALSE,                          // copy
-			VM_PROT_READ|VM_PROT_EXECUTE,   // cur_protection (R-only in user map)
-			VM_PROT_READ|VM_PROT_EXECUTE,   // max_protection
-			VM_INHERIT_SHARE ))             // inheritance
-		panic("cannot map commpage");
+			uperm,   // cur_protection (R-only in user map)
+			uperm,   // max_protection
+		        VM_INHERIT_SHARE )))             // inheritance
+		panic("cannot map commpage %d", kr);
 
 	ipc_port_release(handle);
+	/* Make the kernel mapping non-executable. This cannot be done
+	 * at the time of map entry creation as mach_make_memory_entry
+	 * cannot handle disjoint permissions at this time.
+	 */
+	kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE);
+	assert (kr == KERN_SUCCESS);
 
 	return (void*)(intptr_t)kernel_addr;                     // return address in kernel map
 }
@@ -193,7 +222,7 @@ commpage_cpus( void )
 static void
 commpage_init_cpu_capabilities( void )
 {
-	int bits;
+	uint64_t bits;
 	int cpus;
 	ml_cpu_info_t cpu_info;
 
@@ -201,6 +230,9 @@ commpage_init_cpu_capabilities( void )
 	ml_cpu_get_info(&cpu_info);
 	
 	switch (cpu_info.vector_unit) {
+		case 9:
+			bits |= kHasAVX1_0;
+			/* fall thru */
 		case 8:
 			bits |= kHasSSE4_2;
 			/* fall thru */
@@ -239,26 +271,96 @@ commpage_init_cpu_capabilities( void )
 	}
 	cpus = commpage_cpus();			// how many CPUs do we have
 
-	if (cpus == 1)
-		bits |= kUP;
-
 	bits |= (cpus << kNumCPUsShift);
 
 	bits |= kFastThreadLocalStorage;	// we use %gs for TLS
 
-	if (cpu_mode_is64bit())			// k64Bit means processor is 64-bit capable
-		bits |= k64Bit;
+#define setif(_bits, _bit, _condition) \
+	if (_condition) _bits |= _bit
+
+	setif(bits, kUP,         cpus == 1);
+	setif(bits, k64Bit,      cpu_mode_is64bit());
+	setif(bits, kSlow,       tscFreq <= SLOW_TSC_THRESHOLD);
+
+	setif(bits, kHasAES,     cpuid_features() &
+					CPUID_FEATURE_AES);
+	setif(bits, kHasF16C,    cpuid_features() &
+					CPUID_FEATURE_F16C);
+	setif(bits, kHasRDRAND,  cpuid_features() &
+					CPUID_FEATURE_RDRAND);
+	setif(bits, kHasFMA,     cpuid_features() &
+					CPUID_FEATURE_FMA);
+
+	setif(bits, kHasBMI1,    cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_BMI1);
+	setif(bits, kHasBMI2,    cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_BMI2);
+	setif(bits, kHasRTM,     cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_RTM);
+	setif(bits, kHasHLE,     cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_HLE);
+	setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_AVX2);
+	setif(bits, kHasRDSEED,  cpuid_features() &
+					CPUID_LEAF7_FEATURE_RDSEED);
+	setif(bits, kHasADX,     cpuid_features() &
+					CPUID_LEAF7_FEATURE_ADX);
+	
+	setif(bits, kHasMPX,     cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_MPX);
+	setif(bits, kHasSGX,     cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_SGX);
+	uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
+	setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
+				 (cpuid_leaf7_features() &
+					CPUID_LEAF7_FEATURE_ERMS));
+	
+	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
+}
+
+/* initialize the approx_time_supported flag and set the approx time to 0.
+ * Called during initial commpage population.
+ */
+static void
+commpage_mach_approximate_time_init(void)
+{
+	char *cp = commPagePtr32;
+	uint8_t supported;
 
-	if (tscFreq <= SLOW_TSC_THRESHOLD)	/* is TSC too slow for _commpage_nanotime?  */
-		bits |= kSlow;
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+	supported = 1;
+#else
+	supported = 0;
+#endif
+	if ( cp ) {
+		cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_BASE_ADDRESS);
+		*(boolean_t *)cp = supported;
+	}
+	
+	cp = commPagePtr64;
+	if ( cp ) {
+		cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_START_ADDRESS);
+		*(boolean_t *)cp = supported;
+	}
+	commpage_update_mach_approximate_time(0);
+}
 
-	if (cpuid_features() & CPUID_FEATURE_AES)
-		bits |= kHasAES;
+static void
+commpage_mach_continuous_time_init(void)
+{
+	commpage_update_mach_continuous_time(0);
+}
 
-	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
+static void
+commpage_boottime_init(void)
+{
+	clock_sec_t secs;
+	clock_usec_t microsecs;
+	clock_get_boottime_microtime(&secs, &microsecs);
+	commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
 }
 
-int
+uint64_t
 _get_cpu_capabilities(void)
 {
 	return _cpu_capabilities;
@@ -275,78 +377,23 @@ commpage_stuff(
     void	*dest = commpage_addr_of(address);
     
     if (address < next)
-    	panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next);
+       panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next);
     
     bcopy(source,dest,length);
     
     next = address + length;
 }
 
-static void
-commpage_stuff_swap(
-	commpage_address_t	address,
-	void	*source,
-	int	length,
-	int	legacy )
-{
-	if ( legacy ) {
-		void *dest = commpage_addr_of(address);
-		dest = (void *)((uintptr_t) dest + _COMM_PAGE_SIGS_OFFSET);
-		switch (length) {
-			case 2:
-				OSWriteSwapInt16(dest, 0, *(uint16_t *)source);
-				break;
-			case 4:
-				OSWriteSwapInt32(dest, 0, *(uint32_t *)source);
-				break;
-			case 8:
-				OSWriteSwapInt64(dest, 0, *(uint64_t *)source);
-				break;
-		}
-	}
-}
-
-static void
-commpage_stuff2(
-	commpage_address_t	address,
-	void	*source,
-	int	length,
-	int	legacy )
-{
-	commpage_stuff_swap(address, source, length, legacy);
-	commpage_stuff(address, source, length);
-}
-
 /* Copy a routine into comm page if it matches running machine.
  */
 static void
 commpage_stuff_routine(
-    commpage_descriptor	*rd	)
+    commpage_descriptor *rd     )
 {
-    uint32_t		must,cant;
-    
-    if (rd->commpage_address != cur_routine) {
-        if ((cur_routine!=0) && (matched==0))
-            panic("commpage no match for last, next address %08x", rd->commpage_address);
-        cur_routine = rd->commpage_address;
-        matched = 0;
-    }
-    
-    must = _cpu_capabilities & rd->musthave;
-    cant = _cpu_capabilities & rd->canthave;
-    
-    if ((must == rd->musthave) && (cant == 0)) {
-        if (matched)
-            panic("commpage multiple matches for address %08x", rd->commpage_address);
-        matched = 1;
-        
-        commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
-	}
+	commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
 }
 
 /* Fill in the 32- or 64-bit commpage.  Called once for each.
- * The 32-bit ("legacy") commpage has a bunch of stuff added to it
- * for translated processes, some of which is byte-swapped.
  */
 
 static void
@@ -355,22 +402,19 @@ commpage_populate_one(
 	char **		kernAddressPtr,	// &commPagePtr32 or &commPagePtr64
 	size_t		area_used,	// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
 	commpage_address_t base_offset,	// will become commPageBaseOffset
-	commpage_descriptor** commpage_routines, // list of routine ptrs for this commpage
-	boolean_t	legacy,		// true if 32-bit commpage
 	commpage_time_data** time_data,	// &time_data32 or &time_data64
-	const char*	signature )	// "commpage 32-bit" or "commpage 64-bit"
+	const char*	signature,	// "commpage 32-bit" or "commpage 64-bit"
+	vm_prot_t	uperm)
 {
-   	short   c2;
-	int	c4;
-	static double   two52 = 1048576.0 * 1048576.0 * 4096.0; // 2**52
-	static double   ten6 = 1000000.0;                       // 10**6
-	commpage_descriptor **rd;
+	uint8_t		c1;
+	uint16_t	c2;
+	int		c4;
+	uint64_t	c8;
+	uint32_t	cfamily;
 	short   version = _COMM_PAGE_THIS_VERSION;
-	int		swapcaps;
 
 	next = 0;
-	cur_routine = 0;
-	commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used );
+	commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm );
 	*kernAddressPtr = commPagePtr;				// save address either in commPagePtr32 or 64
 	commPageBaseOffset = base_offset;
 
@@ -378,57 +422,39 @@ commpage_populate_one(
 
 	/* Stuff in the constants.  We move things into the comm page in strictly
 	* ascending order, so we can check for overlap and panic if so.
+	* Note: the 32-bit cpu_capabilities vector is retained in addition to
+	* the expanded 64-bit vector.
 	*/
-	commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)strlen(signature));
-	commpage_stuff2(_COMM_PAGE_VERSION,&version,sizeof(short),legacy);
-	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
-
-	/* excuse our magic constants, we cannot include ppc/cpu_capabilities.h */
-	/* always set kCache32 and kDcbaAvailable */
-	swapcaps =  0x44;
-	if ( _cpu_capabilities & kUP )
-		swapcaps |= (kUP + (1 << kNumCPUsShift));
-	else
-		swapcaps |= 2 << kNumCPUsShift;	/* limit #cpus to 2 */
-	if ( ! noVMX )		/* if rosetta will be emulating altivec... */
-		swapcaps |= 0x101;	/* ...then set kHasAltivec and kDataStreamsAvailable too */
-	commpage_stuff_swap(_COMM_PAGE_CPU_CAPABILITIES, &swapcaps, sizeof(int), legacy);
-	c2 = 32;
-	commpage_stuff_swap(_COMM_PAGE_CACHE_LINESIZE,&c2,2,legacy);
-
-	if (_cpu_capabilities & kCache32)
-		c2 = 32;
-	else if (_cpu_capabilities & kCache64)
+	commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature)));
+	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities));
+	commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short));
+	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t));
+
+	c2 = 32;  // default
+	if (_cpu_capabilities & kCache64)
 		c2 = 64;
 	else if (_cpu_capabilities & kCache128)
 		c2 = 128;
 	commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
-	
+
 	c4 = MP_SPIN_TRIES;
 	commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4);
 
-	if ( legacy ) {
-		commpage_stuff2(_COMM_PAGE_2_TO_52,&two52,8,legacy);
-		commpage_stuff2(_COMM_PAGE_10_TO_6,&ten6,8,legacy);
-	}
+	/* machine_info valid after ml_get_max_cpus() */
+	c1 = machine_info.physical_cpu_max;
+	commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1);
+	c1 = machine_info.logical_cpu_max;
+	commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1);
 
-	for( rd = commpage_routines; *rd != NULL ; rd++ )
-		commpage_stuff_routine(*rd);
+	c8 = ml_cpu_cache_size(0);
+	commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8);
 
-	if (!matched)
-		panic("commpage no match on last routine");
+	cfamily = cpuid_info()->cpuid_cpufamily;
+	commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4);
 
 	if (next > _COMM_PAGE_END)
 		panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr);
 
-	if ( legacy ) {
-		next = 0;
-		for( rd = ba_descriptors; *rd != NULL ; rd++ )
-			commpage_stuff_routine(*rd);
-
-		next = 0;
-		commpage_stuff_routine(&sigdata_descriptor);
-	}	
 }
 
 
@@ -448,10 +474,9 @@ commpage_populate( void )
 				&commPagePtr32,
 				_COMM_PAGE32_AREA_USED,
 				_COMM_PAGE32_BASE_ADDRESS,
-				commpage_32_routines, 
-				TRUE,			/* legacy (32-bit) commpage */
 				&time_data32,
-				"commpage 32-bit");
+				"commpage 32-bit",
+				VM_PROT_READ);
 #ifndef __LP64__
 	pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, 
 			   _COMM_PAGE32_AREA_USED/INTEL_PGBYTES);
@@ -463,23 +488,80 @@ commpage_populate( void )
 					&commPagePtr64,
 					_COMM_PAGE64_AREA_USED,
 					_COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */
-					commpage_64_routines, 
-					FALSE,		/* not a legacy commpage */
 					&time_data64,
-					"commpage 64-bit");
+					"commpage 64-bit",
+					VM_PROT_READ);
 #ifndef __LP64__
 		pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, 
 				   _COMM_PAGE64_AREA_USED/INTEL_PGBYTES);
 #endif
 	}
 
+	simple_lock_init(&commpage_active_cpus_lock, 0);
+
+	commpage_update_active_cpus();
+	commpage_mach_approximate_time_init();
+	commpage_mach_continuous_time_init();
+	commpage_boottime_init();
 	rtc_nanotime_init_commpage();
+	commpage_update_kdebug_state();
+#if CONFIG_ATM
+	commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
+#endif
 }
 
+/* Fill in the common routines during kernel initialization. 
+ * This is called before user-mode code is running.
+ */
+void commpage_text_populate( void ){
+	commpage_descriptor **rd;
+	
+	next = 0;
+	commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE);
+	commPageTextPtr32 = commPagePtr;
+	
+	char *cptr = commPagePtr;
+	int i=0;
+	for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){
+		cptr[i]=0xCC;
+	}
+	
+	commPageBaseOffset = _COMM_PAGE_TEXT_START;
+	for (rd = commpage_32_routines; *rd != NULL; rd++) {
+		commpage_stuff_routine(*rd);
+	}
+
+#ifndef __LP64__
+	pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, 
+			   _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES);
+#endif	
+
+	if (_cpu_capabilities & k64Bit) {
+		next = 0;
+		commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE);
+		commPageTextPtr64 = commPagePtr;
+
+		cptr=commPagePtr;
+		for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){
+			cptr[i]=0xCC;
+		}
+
+		for (rd = commpage_64_routines; *rd !=NULL; rd++) {
+			commpage_stuff_routine(*rd);
+		}
 
-/* Update commpage nanotime information.  Note that we interleave
- * setting the 32- and 64-bit commpages, in order to keep nanotime more
- * nearly in sync between the two environments.
+#ifndef __LP64__
+	pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, 
+			   _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES);
+#endif	
+	}
+
+	if (next > _COMM_PAGE_TEXT_END) 
+		panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr); 
+
+}
+
+/* Update commpage nanotime information.
  *
  * This routine must be serialized by some external means, ie a lock.
  */
@@ -503,7 +585,7 @@ commpage_set_nanotime(
 		panic("nanotime trouble 1");	/* possibly not serialized */
 	if ( ns_base < p32->nt_ns_base )
 		panic("nanotime trouble 2");
-	if ((shift != 32) && ((_cpu_capabilities & kSlow)==0) )
+	if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) )
 		panic("nanotime trouble 3");
 		
 	next_gen = ++generation;
@@ -587,14 +669,14 @@ commpage_set_memory_pressure(
 	cp = commPagePtr32;
 	if ( cp ) {
 		cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS);
-		ip = (uint32_t*) cp;
+		ip = (uint32_t*) (void *) cp;
 		*ip = (uint32_t) pressure;
 	}
 	
 	cp = commPagePtr64;
 	if ( cp ) {
 		cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS);
-		ip = (uint32_t*) cp;
+		ip = (uint32_t*) (void *) cp;
 		*ip = (uint32_t) pressure;
 	}
 
@@ -616,26 +698,183 @@ commpage_set_spin_count(
 	cp = commPagePtr32;
 	if ( cp ) {
 		cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS);
-		ip = (uint32_t*) cp;
+		ip = (uint32_t*) (void *) cp;
 		*ip = (uint32_t) count;
 	}
 	
 	cp = commPagePtr64;
 	if ( cp ) {
 		cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS);
-		ip = (uint32_t*) cp;
+		ip = (uint32_t*) (void *) cp;
 		*ip = (uint32_t) count;
 	}
 
 }
 
+/* Updated every time a logical CPU goes offline/online */
+void
+commpage_update_active_cpus(void)
+{
+	char	    *cp;
+	volatile uint8_t    *ip;
+	
+	/* At least 32-bit commpage must be initialized */
+	if (!commPagePtr32)
+		return;
+
+	simple_lock(&commpage_active_cpus_lock);
+
+	cp = commPagePtr32;
+	cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS);
+	ip = (volatile uint8_t*) cp;
+	*ip = (uint8_t) processor_avail_count;
+	
+	cp = commPagePtr64;
+	if ( cp ) {
+		cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS);
+		ip = (volatile uint8_t*) cp;
+		*ip = (uint8_t) processor_avail_count;
+	}
+
+	simple_unlock(&commpage_active_cpus_lock);
+}
+
+/*
+ * Update the commpage with current kdebug state. This currently has bits for
+ * global trace state, and typefilter enablement. It is likely additional state
+ * will be tracked in the future.
+ *
+ * INVARIANT: This value will always be 0 if global tracing is disabled. This
+ * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
+ */
+void
+commpage_update_kdebug_state(void)
+{
+	volatile uint32_t *saved_data_ptr;
+	char *cp;
+
+	cp = commPagePtr32;
+	if (cp) {
+		cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS);
+		saved_data_ptr = (volatile uint32_t *)cp;
+		*saved_data_ptr = kdebug_commpage_state();
+	}
+
+	cp = commPagePtr64;
+	if (cp) {
+		cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS);
+		saved_data_ptr = (volatile uint32_t *)cp;
+		*saved_data_ptr = kdebug_commpage_state();
+	}
+}
+
+/* Ditto for atm_diagnostic_config */
+void
+commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
+{
+	volatile uint32_t *saved_data_ptr;
+	char *cp;
+
+	cp = commPagePtr32;
+	if (cp) {
+		cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_BASE_ADDRESS);
+		saved_data_ptr = (volatile uint32_t *)cp;
+		*saved_data_ptr = diagnostic_config;
+	}
+
+	cp = commPagePtr64;
+	if ( cp ) {
+		cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_START_ADDRESS);
+		saved_data_ptr = (volatile uint32_t *)cp;
+		*saved_data_ptr = diagnostic_config;
+	}
+}
+
+/*
+ * update the commpage data for last known value of mach_absolute_time()
+ */
+
+void
+commpage_update_mach_approximate_time(uint64_t abstime)
+{
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+	uint64_t saved_data;
+	char *cp;
+	
+	cp = commPagePtr32;
+	if ( cp ) {
+		cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
+		saved_data = *(uint64_t *)cp;
+		if (saved_data < abstime) {
+			/* ignoring the success/fail return value assuming that
+			 * if the value has been updated since we last read it,
+			 * "someone" has a newer timestamp than us and ours is
+			 * now invalid. */
+			OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
+		}
+	}
+	cp = commPagePtr64;
+	if ( cp ) {
+		cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
+		saved_data = *(uint64_t *)cp;
+		if (saved_data < abstime) {
+			/* ignoring the success/fail return value assuming that
+			 * if the value has been updated since we last read it,
+			 * "someone" has a newer timestamp than us and ours is
+			 * now invalid. */
+			OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
+		}
+	}
+#else
+#pragma unused (abstime)
+#endif
+}
+
+void
+commpage_update_mach_continuous_time(uint64_t sleeptime)
+{
+	char *cp;
+	cp = commPagePtr32;
+	if (cp) {
+		cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS);
+		*(uint64_t *)cp = sleeptime;
+	}
+	
+	cp = commPagePtr64;
+	if (cp) {
+		cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS);
+		*(uint64_t *)cp = sleeptime;
+	}
+}
+
+void
+commpage_update_boottime(uint64_t boottime)
+{
+	char *cp;
+	cp = commPagePtr32;
+	if (cp) {
+		cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS);
+		*(uint64_t *)cp = boottime;
+	}
+
+	cp = commPagePtr64;
+	if (cp) {
+		cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS);
+		*(uint64_t *)cp = boottime;
+	}
+}
+
+
+extern user32_addr_t commpage_text32_location;
+extern user64_addr_t commpage_text64_location;
 
 /* Check to see if a given address is in the Preemption Free Zone (PFZ) */
 
 uint32_t
 commpage_is_in_pfz32(uint32_t addr32)
 {
-	if ( (addr32 >= _COMM_PAGE_PFZ_START) && (addr32 < _COMM_PAGE_PFZ_END)) {
+	if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) 
+		&& (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) {
 		return 1;
 	}
 	else
@@ -645,8 +884,8 @@ commpage_is_in_pfz32(uint32_t addr32)
 uint32_t
 commpage_is_in_pfz64(addr64_t addr64)
 {
-	if ( (addr64 >= _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_START))
-	     && (addr64 <  _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_END))) {
+	if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET))
+	     && (addr64 <  (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) {
 		return 1;
 	}
 	else