#include <kern/page_decrypt.h>
#include <kern/processor.h>
+#include <sys/kdebug.h>
+
+#if CONFIG_ATM
+#include <atm/atm_internal.h>
+#endif
+
/* the lists of commpage routines are in commpage_asm.s */
extern commpage_descriptor* commpage_32_routines[];
extern commpage_descriptor* commpage_64_routines[];
extern vm_map_t commpage32_map; // the shared submap, set up in vm init
extern vm_map_t commpage64_map; // the shared submap, set up in vm init
+extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init
+extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init
+
char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage
char *commPagePtr64 = NULL; // ...and of 64-bit commpage
-uint32_t _cpu_capabilities = 0; // define the capability vector
+char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage
+char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage
-int noVMX = 0; /* if true, do not set kHasAltivec in ppc _cpu_capabilities */
+uint64_t _cpu_capabilities = 0; // define the capability vector
typedef uint32_t commpage_address_t;
-static commpage_address_t next; // next available address in comm page
-static commpage_address_t cur_routine; // comm page address of "current" routine
-static boolean_t matched; // true if we've found a match for "current" routine
+static commpage_address_t next; // next available address in comm page
static char *commPagePtr; // virtual addr in kernel map of commpage we are working on
static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map
static void*
commpage_allocate(
vm_map_t submap, // commpage32_map or commpage_map64
- size_t area_used ) // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
+ size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
+ vm_prot_t uperm)
{
vm_offset_t kernel_addr = 0; // address of commpage in kernel map
vm_offset_t zero = 0;
vm_size_t size = area_used; // size actually populated
vm_map_entry_t entry;
ipc_port_t handle;
+ kern_return_t kr;
if (submap == NULL)
panic("commpage submap is null");
- if (vm_map(kernel_map,&kernel_addr,area_used,0,VM_FLAGS_ANYWHERE,NULL,0,FALSE,VM_PROT_ALL,VM_PROT_ALL,VM_INHERIT_NONE))
- panic("cannot allocate commpage");
-
- if (vm_map_wire(kernel_map,kernel_addr,kernel_addr+area_used,VM_PROT_DEFAULT,FALSE))
- panic("cannot wire commpage");
+ if ((kr = vm_map(kernel_map,
+ &kernel_addr,
+ area_used,
+ 0,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+ NULL,
+ 0,
+ FALSE,
+ VM_PROT_ALL,
+ VM_PROT_ALL,
+ VM_INHERIT_NONE)))
+ panic("cannot allocate commpage %d", kr);
+
+ if ((kr = vm_map_wire(kernel_map,
+ kernel_addr,
+ kernel_addr+area_used,
+ VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
+ FALSE)))
+ panic("cannot wire commpage: %d", kr);
/*
* Now that the object is created and wired into the kernel map, mark it so that no delay
*
* JMM - What we really need is a way to create it like this in the first place.
*/
- if (!vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr), &entry) || entry->is_sub_map)
- panic("cannot find commpage entry");
- entry->object.vm_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map))
+ panic("cannot find commpage entry %d", kr);
+ VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE;
- if (mach_make_memory_entry( kernel_map, // target map
+ if ((kr = mach_make_memory_entry( kernel_map, // target map
&size, // size
kernel_addr, // offset (address in kernel map)
- VM_PROT_ALL, // map it RWX
+ uperm, // protections as specified
&handle, // this is the object handle we get
- NULL )) // parent_entry (what is this?)
- panic("cannot make entry for commpage");
+ NULL ))) // parent_entry (what is this?)
+ panic("cannot make entry for commpage %d", kr);
- if (vm_map_64( submap, // target map (shared submap)
+ if ((kr = vm_map_64( submap, // target map (shared submap)
&zero, // address (map into 1st page in submap)
area_used, // size
0, // mask
handle, // port is the memory entry we just made
0, // offset (map 1st page in memory entry)
FALSE, // copy
- VM_PROT_READ|VM_PROT_EXECUTE, // cur_protection (R-only in user map)
- VM_PROT_READ|VM_PROT_EXECUTE, // max_protection
- VM_INHERIT_SHARE )) // inheritance
- panic("cannot map commpage");
+ uperm, // cur_protection (R-only in user map)
+ uperm, // max_protection
+ VM_INHERIT_SHARE ))) // inheritance
+ panic("cannot map commpage %d", kr);
ipc_port_release(handle);
-
- // Initialize the text section of the commpage with INT3
- char *commpage_ptr = (char*)(intptr_t)kernel_addr;
- vm_size_t i;
- for( i = _COMM_PAGE_TEXT_START - _COMM_PAGE_START_ADDRESS; i < size; i++ )
- // This is the hex for the X86 opcode INT3
- commpage_ptr[i] = 0xCC;
+ /* Make the kernel mapping non-executable. This cannot be done
+ * at the time of map entry creation as mach_make_memory_entry
+ * cannot handle disjoint permissions at this time.
+ */
+ kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE);
+ assert (kr == KERN_SUCCESS);
return (void*)(intptr_t)kernel_addr; // return address in kernel map
}
static void
commpage_init_cpu_capabilities( void )
{
- uint32_t bits;
+ uint64_t bits;
int cpus;
ml_cpu_info_t cpu_info;
}
cpus = commpage_cpus(); // how many CPUs do we have
- if (cpus == 1)
- bits |= kUP;
-
bits |= (cpus << kNumCPUsShift);
bits |= kFastThreadLocalStorage; // we use %gs for TLS
- if (cpu_mode_is64bit()) // k64Bit means processor is 64-bit capable
- bits |= k64Bit;
+#define setif(_bits, _bit, _condition) \
+ if (_condition) _bits |= _bit
+
+ setif(bits, kUP, cpus == 1);
+ setif(bits, k64Bit, cpu_mode_is64bit());
+ setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD);
+
+ setif(bits, kHasAES, cpuid_features() &
+ CPUID_FEATURE_AES);
+ setif(bits, kHasF16C, cpuid_features() &
+ CPUID_FEATURE_F16C);
+ setif(bits, kHasRDRAND, cpuid_features() &
+ CPUID_FEATURE_RDRAND);
+ setif(bits, kHasFMA, cpuid_features() &
+ CPUID_FEATURE_FMA);
+
+ setif(bits, kHasBMI1, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_BMI1);
+ setif(bits, kHasBMI2, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_BMI2);
+ setif(bits, kHasRTM, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_RTM);
+ setif(bits, kHasHLE, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_HLE);
+ setif(bits, kHasAVX2_0, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_AVX2);
+ setif(bits, kHasRDSEED, cpuid_features() &
+ CPUID_LEAF7_FEATURE_RDSEED);
+ setif(bits, kHasADX, cpuid_features() &
+ CPUID_LEAF7_FEATURE_ADX);
+
+ setif(bits, kHasMPX, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_MPX);
+ setif(bits, kHasSGX, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_SGX);
+ uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
+ setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
+ (cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_ERMS));
+
+ _cpu_capabilities = bits; // set kernel version for use by drivers etc
+}
+
+/* initialize the approx_time_supported flag and set the approx time to 0.
+ * Called during initial commpage population.
+ */
+static void
+commpage_mach_approximate_time_init(void)
+{
+ char *cp = commPagePtr32;
+ uint8_t supported;
- if (tscFreq <= SLOW_TSC_THRESHOLD) /* is TSC too slow for _commpage_nanotime? */
- bits |= kSlow;
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ supported = 1;
+#else
+ supported = 0;
+#endif
+ if ( cp ) {
+ cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_BASE_ADDRESS);
+ *(boolean_t *)cp = supported;
+ }
+
+ cp = commPagePtr64;
+ if ( cp ) {
+ cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_START_ADDRESS);
+ *(boolean_t *)cp = supported;
+ }
+ commpage_update_mach_approximate_time(0);
+}
- bits |= (cpuid_features() & CPUID_FEATURE_AES) ? kHasAES : 0;
+static void
+commpage_mach_continuous_time_init(void)
+{
+ commpage_update_mach_continuous_time(0);
+}
- _cpu_capabilities = bits; // set kernel version for use by drivers etc
+static void
+commpage_boottime_init(void)
+{
+ clock_sec_t secs;
+ clock_usec_t microsecs;
+ clock_get_boottime_microtime(&secs, µsecs);
+ commpage_update_boottime(secs * USEC_PER_SEC + microsecs);
}
-int
+uint64_t
_get_cpu_capabilities(void)
{
return _cpu_capabilities;
*/
static void
commpage_stuff_routine(
- commpage_descriptor *rd )
+ commpage_descriptor *rd )
{
- uint32_t must,cant;
-
- if (rd->commpage_address != cur_routine) {
- if ((cur_routine!=0) && (matched==0))
- panic("commpage no match for last, next address %08x", rd->commpage_address);
- cur_routine = rd->commpage_address;
- matched = 0;
- }
-
- must = _cpu_capabilities & rd->musthave;
- cant = _cpu_capabilities & rd->canthave;
-
- if ((must == rd->musthave) && (cant == 0)) {
- if (matched)
- panic("commpage multiple matches for address %08x", rd->commpage_address);
- matched = 1;
-
- commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
- }
+ commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length);
}
/* Fill in the 32- or 64-bit commpage. Called once for each.
char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64
size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
commpage_address_t base_offset, // will become commPageBaseOffset
- commpage_descriptor** commpage_routines, // list of routine ptrs for this commpage
commpage_time_data** time_data, // &time_data32 or &time_data64
- const char* signature ) // "commpage 32-bit" or "commpage 64-bit"
+ const char* signature, // "commpage 32-bit" or "commpage 64-bit"
+ vm_prot_t uperm)
{
- uint8_t c1;
- short c2;
- int c4;
- uint64_t c8;
+ uint8_t c1;
+ uint16_t c2;
+ int c4;
+ uint64_t c8;
uint32_t cfamily;
- commpage_descriptor **rd;
short version = _COMM_PAGE_THIS_VERSION;
next = 0;
- cur_routine = 0;
- commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used );
+ commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm );
*kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64
commPageBaseOffset = base_offset;
/* Stuff in the constants. We move things into the comm page in strictly
* ascending order, so we can check for overlap and panic if so.
+ * Note: the 32-bit cpu_capabilities vector is retained in addition to
+ * the expanded 64-bit vector.
*/
- commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)strlen(signature));
+ commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature)));
+ commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities));
commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short));
- commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(int));
+ commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t));
c2 = 32; // default
if (_cpu_capabilities & kCache64)
else if (_cpu_capabilities & kCache128)
c2 = 128;
commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);
-
+
c4 = MP_SPIN_TRIES;
commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4);
cfamily = cpuid_info()->cpuid_cpufamily;
commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4);
- for( rd = commpage_routines; *rd != NULL ; rd++ )
- commpage_stuff_routine(*rd);
-
- if (!matched)
- panic("commpage no match on last routine");
-
if (next > _COMM_PAGE_END)
panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr);
&commPagePtr32,
_COMM_PAGE32_AREA_USED,
_COMM_PAGE32_BASE_ADDRESS,
- commpage_32_routines,
&time_data32,
- "commpage 32-bit");
+ "commpage 32-bit",
+ VM_PROT_READ);
#ifndef __LP64__
pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS,
_COMM_PAGE32_AREA_USED/INTEL_PGBYTES);
&commPagePtr64,
_COMM_PAGE64_AREA_USED,
_COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */
- commpage_64_routines,
&time_data64,
- "commpage 64-bit");
+ "commpage 64-bit",
+ VM_PROT_READ);
#ifndef __LP64__
pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS,
_COMM_PAGE64_AREA_USED/INTEL_PGBYTES);
simple_lock_init(&commpage_active_cpus_lock, 0);
commpage_update_active_cpus();
+ commpage_mach_approximate_time_init();
+ commpage_mach_continuous_time_init();
+ commpage_boottime_init();
rtc_nanotime_init_commpage();
+ commpage_update_kdebug_state();
+#if CONFIG_ATM
+ commpage_update_atm_diagnostic_config(atm_get_diagnostic_config());
+#endif
}
+/* Fill in the common routines during kernel initialization.
+ * This is called before user-mode code is running.
+ */
+void commpage_text_populate( void ){
+ commpage_descriptor **rd;
+
+ next = 0;
+ commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE);
+ commPageTextPtr32 = commPagePtr;
+
+ char *cptr = commPagePtr;
+ int i=0;
+ for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){
+ cptr[i]=0xCC;
+ }
+
+ commPageBaseOffset = _COMM_PAGE_TEXT_START;
+ for (rd = commpage_32_routines; *rd != NULL; rd++) {
+ commpage_stuff_routine(*rd);
+ }
+
+#ifndef __LP64__
+ pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START,
+ _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES);
+#endif
+
+ if (_cpu_capabilities & k64Bit) {
+ next = 0;
+ commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE);
+ commPageTextPtr64 = commPagePtr;
+
+ cptr=commPagePtr;
+ for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){
+ cptr[i]=0xCC;
+ }
+
+ for (rd = commpage_64_routines; *rd !=NULL; rd++) {
+ commpage_stuff_routine(*rd);
+ }
+
+#ifndef __LP64__
+ pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START,
+ _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES);
+#endif
+ }
+
+ if (next > _COMM_PAGE_TEXT_END)
+ panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr);
-/* Update commpage nanotime information. Note that we interleave
- * setting the 32- and 64-bit commpages, in order to keep nanotime more
- * nearly in sync between the two environments.
+}
+
+/* Update commpage nanotime information.
*
* This routine must be serialized by some external means, ie a lock.
*/
panic("nanotime trouble 1"); /* possibly not serialized */
if ( ns_base < p32->nt_ns_base )
panic("nanotime trouble 2");
- if ((shift != 32) && ((_cpu_capabilities & kSlow)==0) )
+ if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) )
panic("nanotime trouble 3");
next_gen = ++generation;
cp = commPagePtr32;
if ( cp ) {
cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS);
- ip = (uint32_t*) cp;
+ ip = (uint32_t*) (void *) cp;
*ip = (uint32_t) pressure;
}
cp = commPagePtr64;
if ( cp ) {
cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS);
- ip = (uint32_t*) cp;
+ ip = (uint32_t*) (void *) cp;
*ip = (uint32_t) pressure;
}
cp = commPagePtr32;
if ( cp ) {
cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS);
- ip = (uint32_t*) cp;
+ ip = (uint32_t*) (void *) cp;
*ip = (uint32_t) count;
}
cp = commPagePtr64;
if ( cp ) {
cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS);
- ip = (uint32_t*) cp;
+ ip = (uint32_t*) (void *) cp;
*ip = (uint32_t) count;
}
simple_unlock(&commpage_active_cpus_lock);
}
+/*
+ * Update the commpage with current kdebug state. This currently has bits for
+ * global trace state, and typefilter enablement. It is likely additional state
+ * will be tracked in the future.
+ *
+ * INVARIANT: This value will always be 0 if global tracing is disabled. This
+ * allows simple guard tests of "if (*_COMM_PAGE_KDEBUG_ENABLE) { ... }"
+ */
+void
+commpage_update_kdebug_state(void)
+{
+ volatile uint32_t *saved_data_ptr;
+ char *cp;
+
+ cp = commPagePtr32;
+ if (cp) {
+ cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = kdebug_commpage_state();
+ }
+
+ cp = commPagePtr64;
+ if (cp) {
+ cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = kdebug_commpage_state();
+ }
+}
+
+/* Ditto for atm_diagnostic_config */
+void
+commpage_update_atm_diagnostic_config(uint32_t diagnostic_config)
+{
+ volatile uint32_t *saved_data_ptr;
+ char *cp;
+
+ cp = commPagePtr32;
+ if (cp) {
+ cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_BASE_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = diagnostic_config;
+ }
+
+ cp = commPagePtr64;
+ if ( cp ) {
+ cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_START_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = diagnostic_config;
+ }
+}
+
+/*
+ * update the commpage data for last known value of mach_absolute_time()
+ */
+
+void
+commpage_update_mach_approximate_time(uint64_t abstime)
+{
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ uint64_t saved_data;
+ char *cp;
+
+ cp = commPagePtr32;
+ if ( cp ) {
+ cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
+ saved_data = *(uint64_t *)cp;
+ if (saved_data < abstime) {
+ /* ignoring the success/fail return value assuming that
+ * if the value has been updated since we last read it,
+ * "someone" has a newer timestamp than us and ours is
+ * now invalid. */
+ OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
+ }
+ }
+ cp = commPagePtr64;
+ if ( cp ) {
+ cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
+ saved_data = *(uint64_t *)cp;
+ if (saved_data < abstime) {
+ /* ignoring the success/fail return value assuming that
+ * if the value has been updated since we last read it,
+ * "someone" has a newer timestamp than us and ours is
+ * now invalid. */
+ OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
+ }
+ }
+#else
+#pragma unused (abstime)
+#endif
+}
+
+void
+commpage_update_mach_continuous_time(uint64_t sleeptime)
+{
+ char *cp;
+ cp = commPagePtr32;
+ if (cp) {
+ cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS);
+ *(uint64_t *)cp = sleeptime;
+ }
+
+ cp = commPagePtr64;
+ if (cp) {
+ cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS);
+ *(uint64_t *)cp = sleeptime;
+ }
+}
+
+void
+commpage_update_boottime(uint64_t boottime)
+{
+ char *cp;
+ cp = commPagePtr32;
+ if (cp) {
+ cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS);
+ *(uint64_t *)cp = boottime;
+ }
+
+ cp = commPagePtr64;
+ if (cp) {
+ cp += (_COMM_PAGE_BOOTTIME_USEC - _COMM_PAGE32_START_ADDRESS);
+ *(uint64_t *)cp = boottime;
+ }
+}
+
+
+extern user32_addr_t commpage_text32_location;
+extern user64_addr_t commpage_text64_location;
/* Check to see if a given address is in the Preemption Free Zone (PFZ) */
uint32_t
commpage_is_in_pfz32(uint32_t addr32)
{
- if ( (addr32 >= _COMM_PAGE_PFZ_START) && (addr32 < _COMM_PAGE_PFZ_END)) {
+ if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET))
+ && (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) {
return 1;
}
else
uint32_t
commpage_is_in_pfz64(addr64_t addr64)
{
- if ( (addr64 >= _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_START))
- && (addr64 < _COMM_PAGE_32_TO_64(_COMM_PAGE_PFZ_END))) {
+ if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET))
+ && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) {
return 1;
}
else