]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/arm/machine_routines.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
index 94fc76bf42efa3756f655b47c3eb57b3d2eb1aad..072ed38c2dc542910c86be635dd26537434a8280 100644 (file)
@@ -33,6 +33,7 @@
 #include <arm/io_map_entries.h>
 #include <arm/cpu_data.h>
 #include <arm/cpu_data_internal.h>
 #include <arm/io_map_entries.h>
 #include <arm/cpu_data.h>
 #include <arm/cpu_data_internal.h>
+#include <arm/machine_routines.h>
 #include <arm/misc_protos.h>
 #include <arm/rtclock.h>
 #include <arm/caches_internal.h>
 #include <arm/misc_protos.h>
 #include <arm/rtclock.h>
 #include <arm/caches_internal.h>
 #include <machine/atomic.h>
 #include <vm/pmap.h>
 #include <vm/vm_page.h>
 #include <machine/atomic.h>
 #include <vm/pmap.h>
 #include <vm/vm_page.h>
+#include <vm/vm_map.h>
 #include <sys/kdebug.h>
 #include <kern/coalition.h>
 #include <pexpert/device_tree.h>
 #include <sys/kdebug.h>
 #include <kern/coalition.h>
 #include <pexpert/device_tree.h>
+#include <arm/cpuid_internal.h>
+#include <arm/cpu_capabilities.h>
 
 #include <IOKit/IOPlatformExpert.h>
 
 
 #include <IOKit/IOPlatformExpert.h>
 
 #include <kern/kpc.h>
 #endif
 
 #include <kern/kpc.h>
 #endif
 
-static int max_cpus_initialized = 0;
-#define MAX_CPUS_SET    0x1
-#define MAX_CPUS_WAIT   0x2
-
-static unsigned int avail_cpus = 0;
+/* arm32 only supports a highly simplified topology, fixed at 1 cluster */
+static ml_topology_cpu_t topology_cpu_array[MAX_CPUS];
+static ml_topology_cluster_t topology_cluster = {
+       .cluster_id = 0,
+       .cluster_type = CLUSTER_TYPE_SMP,
+       .first_cpu_id = 0,
+};
+static ml_topology_info_t topology_info = {
+       .version = CPU_TOPOLOGY_VERSION,
+       .num_clusters = 1,
+       .max_cluster_id = 0,
+       .cpus = topology_cpu_array,
+       .clusters = &topology_cluster,
+       .boot_cpu = &topology_cpu_array[0],
+       .boot_cluster = &topology_cluster,
+};
 
 uint32_t LockTimeOut;
 uint32_t LockTimeOutUsec;
 
 uint32_t LockTimeOut;
 uint32_t LockTimeOutUsec;
+uint64_t TLockTimeOut;
 uint64_t MutexSpin;
 uint64_t MutexSpin;
-boolean_t is_clock_configured = FALSE;
-
-extern int mach_assert;
-extern volatile uint32_t debug_enabled;
-
-void machine_conf(void);
+extern uint32_t lockdown_done;
+uint64_t low_MutexSpin;
+int64_t  high_MutexSpin;
 
 void
 machine_startup(__unused boot_args * args)
 {
 
 void
 machine_startup(__unused boot_args * args)
 {
-       int boot_arg;
-
-       PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert));
-
-       if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
-               default_preemption_rate = boot_arg;
-       }
-       if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) {
-               default_bg_preemption_rate = boot_arg;
-       }
-
        machine_conf();
 
        /*
        machine_conf();
 
        /*
@@ -97,33 +99,17 @@ machine_startup(__unused boot_args * args)
 
 char           *
 machine_boot_info(
 
 char           *
 machine_boot_info(
-                 __unused char *buf,
-                 __unused vm_size_t size)
-{
-       return (PE_boot_args());
-}
-
-void
-machine_conf(void)
+       __unused char *buf,
+       __unused vm_size_t size)
 {
 {
-       machine_info.memory_size = mem_size;
+       return PE_boot_args();
 }
 
 void
 }
 
 void
-machine_init(void)
-{
-       debug_log_init();
-       clock_config();
-       is_clock_configured = TRUE;
-       if (debug_enabled)
-               pmap_map_globals();
-}
-
-void 
 slave_machine_init(__unused void *param)
 {
 slave_machine_init(__unused void *param)
 {
-       cpu_machine_init();     /* Initialize the processor */
-       clock_init();           /* Init the clock */
+       cpu_machine_init();     /* Initialize the processor */
+       clock_init();           /* Init the clock */
 }
 
 /*
 }
 
 /*
@@ -132,51 +118,11 @@ slave_machine_init(__unused void *param)
  */
 thread_t
 machine_processor_shutdown(
  */
 thread_t
 machine_processor_shutdown(
-                          __unused thread_t thread,
-                          void (*doshutdown) (processor_t),
-                          processor_t processor)
+       __unused thread_t thread,
+       void (*doshutdown)(processor_t),
+       processor_t processor)
 {
 {
-       return (Shutdown_context(doshutdown, processor));
-}
-
-/*
- *     Routine:        ml_init_max_cpus
- *     Function:
- */
-void
-ml_init_max_cpus(unsigned int max_cpus)
-{
-       boolean_t       current_state;
-
-       current_state = ml_set_interrupts_enabled(FALSE);
-       if (max_cpus_initialized != MAX_CPUS_SET) {
-               machine_info.max_cpus = max_cpus;
-               machine_info.physical_cpu_max = max_cpus;
-               machine_info.logical_cpu_max = max_cpus;
-               if (max_cpus_initialized == MAX_CPUS_WAIT)
-                       thread_wakeup((event_t) & max_cpus_initialized);
-               max_cpus_initialized = MAX_CPUS_SET;
-       }
-       (void) ml_set_interrupts_enabled(current_state);
-}
-
-/*
- *     Routine:        ml_get_max_cpus
- *     Function:
- */
-unsigned int
-ml_get_max_cpus(void)
-{
-       boolean_t       current_state;
-
-       current_state = ml_set_interrupts_enabled(FALSE);
-       if (max_cpus_initialized != MAX_CPUS_SET) {
-               max_cpus_initialized = MAX_CPUS_WAIT;
-               assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT);
-               (void) thread_block(THREAD_CONTINUE_NULL);
-       }
-       (void) ml_set_interrupts_enabled(current_state);
-       return (machine_info.max_cpus);
+       return Shutdown_context(doshutdown, processor);
 }
 
 /*
 }
 
 /*
@@ -188,24 +134,47 @@ ml_init_lock_timeout(void)
 {
        uint64_t        abstime;
        uint64_t        mtxspin;
 {
        uint64_t        abstime;
        uint64_t        mtxspin;
-       uint64_t        default_timeout_ns = NSEC_PER_SEC>>2;
+       uint64_t        default_timeout_ns = NSEC_PER_SEC >> 2;
        uint32_t        slto;
 
        uint32_t        slto;
 
-       if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
+       if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
                default_timeout_ns = slto * NSEC_PER_USEC;
                default_timeout_ns = slto * NSEC_PER_USEC;
+       }
 
        nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
 
        nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
-       LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC);
+       LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC);
        LockTimeOut = (uint32_t)abstime;
        LockTimeOut = (uint32_t)abstime;
+       TLockTimeOut = LockTimeOut;
 
 
-       if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
-               if (mtxspin > USEC_PER_SEC>>4)
-                       mtxspin =  USEC_PER_SEC>>4;
-                       nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
+       if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
+               if (mtxspin > USEC_PER_SEC >> 4) {
+                       mtxspin =  USEC_PER_SEC >> 4;
+               }
+               nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
        } else {
        } else {
-               nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
+               nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
        }
        MutexSpin = abstime;
        }
        MutexSpin = abstime;
+       low_MutexSpin = MutexSpin;
+       /*
+        * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
+        * real_ncpus is not set at this time
+        *
+        * NOTE: active spinning is disabled in arm. It can be activated
+        * by setting high_MutexSpin through the sysctl.
+        */
+       high_MutexSpin = low_MutexSpin;
+}
+
+/*
+ * This is called when all of the ml_processor_info_t structures have been
+ * initialized and all the processors have been started through processor_start().
+ *
+ * Required by the scheduler subsystem.
+ */
+void
+ml_cpu_init_completed(void)
+{
 }
 
 /*
 }
 
 /*
@@ -215,8 +184,8 @@ ml_init_lock_timeout(void)
 void
 ml_cpu_up(void)
 {
 void
 ml_cpu_up(void)
 {
-       hw_atomic_add(&machine_info.physical_cpu, 1);
-       hw_atomic_add(&machine_info.logical_cpu, 1);
+       os_atomic_inc(&machine_info.physical_cpu, relaxed);
+       os_atomic_inc(&machine_info.logical_cpu, relaxed);
 }
 
 /*
 }
 
 /*
@@ -226,11 +195,11 @@ ml_cpu_up(void)
 void
 ml_cpu_down(void)
 {
 void
 ml_cpu_down(void)
 {
-       cpu_data_t      *cpu_data_ptr;
+       cpu_data_t      *cpu_data_ptr;
+
+       os_atomic_dec(&machine_info.physical_cpu, relaxed);
+       os_atomic_dec(&machine_info.logical_cpu, relaxed);
 
 
-       hw_atomic_sub(&machine_info.physical_cpu, 1);
-       hw_atomic_sub(&machine_info.logical_cpu, 1);
-       
        /*
         * If we want to deal with outstanding IPIs, we need to
         * do relatively early in the processor_doshutdown path,
        /*
         * If we want to deal with outstanding IPIs, we need to
         * do relatively early in the processor_doshutdown path,
@@ -277,50 +246,44 @@ ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
 unsigned int
 ml_get_machine_mem(void)
 {
 unsigned int
 ml_get_machine_mem(void)
 {
-       return (machine_info.memory_size);
+       return machine_info.memory_size;
 }
 
 /* Return max offset */
 vm_map_offset_t
 ml_get_max_offset(
 }
 
 /* Return max offset */
 vm_map_offset_t
 ml_get_max_offset(
-       boolean_t       is64,
+       boolean_t       is64,
        unsigned int option)
 {
        unsigned int option)
 {
-       unsigned int    pmap_max_offset_option = 0;
+       unsigned int    pmap_max_offset_option = 0;
 
        switch (option) {
        case MACHINE_MAX_OFFSET_DEFAULT:
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
 
        switch (option) {
        case MACHINE_MAX_OFFSET_DEFAULT:
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
-                break;
-        case MACHINE_MAX_OFFSET_MIN:
+               break;
+       case MACHINE_MAX_OFFSET_MIN:
                pmap_max_offset_option =  ARM_PMAP_MAX_OFFSET_MIN;
                pmap_max_offset_option =  ARM_PMAP_MAX_OFFSET_MIN;
-                break;
-        case MACHINE_MAX_OFFSET_MAX:
+               break;
+       case MACHINE_MAX_OFFSET_MAX:
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
-                break;
-        case MACHINE_MAX_OFFSET_DEVICE:
+               break;
+       case MACHINE_MAX_OFFSET_DEVICE:
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
                pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
-                break;
-        default:
+               break;
+       default:
                panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
                panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
-                break;
-        }
+               break;
+       }
        return pmap_max_offset(is64, pmap_max_offset_option);
 }
 
        return pmap_max_offset(is64, pmap_max_offset_option);
 }
 
-boolean_t
-ml_wants_panic_trap_to_debugger(void)
-{
-       return FALSE;
-}
-
 void
 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
 void
 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
-                          __unused va_list *panic_args,
-                          __unused unsigned int reason,
-                          __unused void *ctx,
-                          __unused uint64_t panic_options_mask,
-                          __unused unsigned long panic_caller)
+    __unused va_list *panic_args,
+    __unused unsigned int reason,
+    __unused void *ctx,
+    __unused uint64_t panic_options_mask,
+    __unused unsigned long panic_caller)
 {
        return;
 }
 {
        return;
 }
@@ -336,7 +299,9 @@ halt_all_cpus(boolean_t reboot)
                printf("CPU halted\n");
                PEHaltRestart(kPEHaltCPU);
        }
                printf("CPU halted\n");
                PEHaltRestart(kPEHaltCPU);
        }
-       while (1);
+       while (1) {
+               ;
+       }
 }
 
 __attribute__((noreturn))
 }
 
 __attribute__((noreturn))
@@ -352,7 +317,7 @@ halt_cpu(void)
  */
 void
 machine_signal_idle(
  */
 void
 machine_signal_idle(
-                   processor_t processor)
+       processor_t processor)
 {
        cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
 {
        cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
@@ -360,7 +325,7 @@ machine_signal_idle(
 
 void
 machine_signal_idle_deferred(
 
 void
 machine_signal_idle_deferred(
-                            processor_t processor)
+       processor_t processor)
 {
        cpu_signal_deferred(processor_to_cpu_datap(processor));
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
 {
        cpu_signal_deferred(processor_to_cpu_datap(processor));
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
@@ -368,7 +333,7 @@ machine_signal_idle_deferred(
 
 void
 machine_signal_idle_cancel(
 
 void
 machine_signal_idle_cancel(
-                          processor_t processor)
+       processor_t processor)
 {
        cpu_signal_cancel(processor_to_cpu_datap(processor));
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
 {
        cpu_signal_cancel(processor_to_cpu_datap(processor));
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
@@ -378,13 +343,13 @@ machine_signal_idle_cancel(
  *     Routine:        ml_install_interrupt_handler
  *     Function:       Initialize Interrupt Handler
  */
  *     Routine:        ml_install_interrupt_handler
  *     Function:       Initialize Interrupt Handler
  */
-void 
+void
 ml_install_interrupt_handler(
 ml_install_interrupt_handler(
-                            void *nub,
-                            int source,
-                            void *target,
-                            IOInterruptHandler handler,
-                            void *refCon)
+       void *nub,
+       int source,
+       void *target,
+       IOInterruptHandler handler,
+       void *refCon)
 {
        cpu_data_t     *cpu_data_ptr;
        boolean_t       current_state;
 {
        cpu_data_t     *cpu_data_ptr;
        boolean_t       current_state;
@@ -398,17 +363,14 @@ ml_install_interrupt_handler(
        cpu_data_ptr->interrupt_handler = handler;
        cpu_data_ptr->interrupt_refCon = refCon;
 
        cpu_data_ptr->interrupt_handler = handler;
        cpu_data_ptr->interrupt_refCon = refCon;
 
-       cpu_data_ptr->interrupts_enabled = TRUE;
        (void) ml_set_interrupts_enabled(current_state);
        (void) ml_set_interrupts_enabled(current_state);
-
-       initialize_screen(NULL, kPEAcquireScreen);
 }
 
 /*
  *     Routine:        ml_init_interrupt
  *     Function:       Initialize Interrupts
  */
 }
 
 /*
  *     Routine:        ml_init_interrupt
  *     Function:       Initialize Interrupts
  */
-void 
+void
 ml_init_interrupt(void)
 {
 }
 ml_init_interrupt(void)
 {
 }
@@ -417,11 +379,12 @@ ml_init_interrupt(void)
  *     Routine:        ml_init_timebase
  *     Function:       register and setup Timebase, Decremeter services
  */
  *     Routine:        ml_init_timebase
  *     Function:       register and setup Timebase, Decremeter services
  */
-void ml_init_timebase(
-       void            *args,
-       tbd_ops_t       tbd_funcs,
-       vm_offset_t     int_address,
-       vm_offset_t     int_value)
+void
+ml_init_timebase(
+       void            *args,
+       tbd_ops_t       tbd_funcs,
+       vm_offset_t     int_address,
+       vm_offset_t     int_value)
 {
        cpu_data_t     *cpu_data_ptr;
 
 {
        cpu_data_t     *cpu_data_ptr;
 
@@ -435,12 +398,6 @@ void ml_init_timebase(
        }
 }
 
        }
 }
 
-void
-fiq_context_bootstrap(boolean_t enable_fiq)
-{
-       fiq_context_init(enable_fiq);
-}
-
 void
 ml_parse_cpu_topology(void)
 {
 void
 ml_parse_cpu_topology(void)
 {
@@ -449,43 +406,73 @@ ml_parse_cpu_topology(void)
        uint32_t cpu_boot_arg;
        int err;
 
        uint32_t cpu_boot_arg;
        int err;
 
-       err = DTLookupEntry(NULL, "/cpus", &entry);
+       err = SecureDTLookupEntry(NULL, "/cpus", &entry);
        assert(err == kSuccess);
 
        assert(err == kSuccess);
 
-       err = DTInitEntryIterator(entry, &iter);
+       err = SecureDTInitEntryIterator(entry, &iter);
        assert(err == kSuccess);
 
        assert(err == kSuccess);
 
-       while (kSuccess == DTIterateEntries(&iter, &child)) {
+       cpu_boot_arg = MAX_CPUS;
+       PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg));
 
 
+       ml_topology_cluster_t *cluster = &topology_info.clusters[0];
+       unsigned int cpu_id = 0;
+       while (kSuccess == SecureDTIterateEntries(&iter, &child)) {
 #if MACH_ASSERT
                unsigned int propSize;
 #if MACH_ASSERT
                unsigned int propSize;
-               void *prop = NULL;
-               if (avail_cpus == 0) {
-                       if (kSuccess != DTGetProperty(child, "state", &prop, &propSize))
-                               panic("unable to retrieve state for cpu %u", avail_cpus);
+               void const *prop = NULL;
+               if (cpu_id == 0) {
+                       if (kSuccess != SecureDTGetProperty(child, "state", &prop, &propSize)) {
+                               panic("unable to retrieve state for cpu %u", cpu_id);
+                       }
 
 
-                       if (strncmp((char*)prop, "running", propSize) != 0)
+                       if (strncmp((char const *)prop, "running", propSize) != 0) {
                                panic("cpu 0 has not been marked as running!");
                                panic("cpu 0 has not been marked as running!");
+                       }
                }
                }
-               assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize));
-               assert(avail_cpus == *((uint32_t*)prop));
+               assert(kSuccess == SecureDTGetProperty(child, "reg", &prop, &propSize));
+               assert(cpu_id == *((uint32_t const *)prop));
 #endif
 #endif
-               ++avail_cpus;
-       }
+               if (cpu_id >= cpu_boot_arg) {
+                       break;
+               }
+
+               ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id];
+
+               cpu->cpu_id = cpu_id;
+               cpu->phys_id = cpu_id;
+               cpu->cluster_type = cluster->cluster_type;
+
+               cluster->num_cpus++;
+               cluster->cpu_mask |= 1ULL << cpu_id;
+
+               topology_info.num_cpus++;
+               topology_info.max_cpu_id = cpu_id;
 
 
-       cpu_boot_arg = avail_cpus;
-       if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) &&
-           (avail_cpus > cpu_boot_arg))
-               avail_cpus = cpu_boot_arg;
+               cpu_id++;
+       }
 
 
-       if (avail_cpus == 0)
+       if (cpu_id == 0) {
                panic("No cpus found!");
                panic("No cpus found!");
+       }
+}
+
+const ml_topology_info_t *
+ml_get_topology_info(void)
+{
+       return &topology_info;
 }
 
 unsigned int
 ml_get_cpu_count(void)
 {
 }
 
 unsigned int
 ml_get_cpu_count(void)
 {
-       return avail_cpus;
+       return topology_info.num_cpus;
+}
+
+unsigned int
+ml_get_cluster_count(void)
+{
+       return topology_info.num_clusters;
 }
 
 int
 }
 
 int
@@ -503,25 +490,47 @@ ml_get_boot_cluster(void)
 int
 ml_get_cpu_number(uint32_t phys_id)
 {
 int
 ml_get_cpu_number(uint32_t phys_id)
 {
+       if (phys_id > (uint32_t)ml_get_max_cpu_number()) {
+               return -1;
+       }
+
        return (int)phys_id;
 }
 
        return (int)phys_id;
 }
 
+int
+ml_get_cluster_number(__unused uint32_t phys_id)
+{
+       return 0;
+}
+
 int
 ml_get_max_cpu_number(void)
 {
 int
 ml_get_max_cpu_number(void)
 {
-       return avail_cpus - 1;
+       return topology_info.num_cpus - 1;
+}
+
+int
+ml_get_max_cluster_number(void)
+{
+       return topology_info.max_cluster_id;
+}
+
+unsigned int
+ml_get_first_cpu_id(unsigned int cluster_id)
+{
+       return topology_info.clusters[cluster_id].first_cpu_id;
 }
 
 kern_return_t
 }
 
 kern_return_t
-ml_processor_register(
-                      ml_processor_info_t * in_processor_info,
-                      processor_t * processor_out,
-                      ipi_handler_t * ipi_handler)
+ml_processor_register(ml_processor_info_t *in_processor_info,
+    processor_t * processor_out, ipi_handler_t *ipi_handler_out,
+    perfmon_interrupt_handler_func *pmi_handler_out)
 {
        cpu_data_t *this_cpu_datap;
        boolean_t  is_boot_cpu;
 
 {
        cpu_data_t *this_cpu_datap;
        boolean_t  is_boot_cpu;
 
-       if (in_processor_info->phys_id >= MAX_CPUS) {
+       const unsigned int max_cpu_id = ml_get_max_cpu_number();
+       if (in_processor_info->phys_id > max_cpu_id) {
                /*
                 * The physical CPU ID indicates that we have more CPUs than
                 * this xnu build support.  This probably means we have an
                /*
                 * The physical CPU ID indicates that we have more CPUs than
                 * this xnu build support.  This probably means we have an
@@ -531,13 +540,14 @@ ml_processor_register(
                 * is simply a convenient way to catch bugs in the pexpert
                 * headers.
                 */
                 * is simply a convenient way to catch bugs in the pexpert
                 * headers.
                 */
-               panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS);
+               panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info->phys_id, max_cpu_id);
        }
 
        /* Fail the registration if the number of CPUs has been limited by boot-arg. */
        }
 
        /* Fail the registration if the number of CPUs has been limited by boot-arg. */
-       if ((in_processor_info->phys_id >= avail_cpus) ||
-           (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) 
+       if ((in_processor_info->phys_id >= topology_info.num_cpus) ||
+           (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) {
                return KERN_FAILURE;
                return KERN_FAILURE;
+       }
 
        if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
                is_boot_cpu = FALSE;
 
        if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
                is_boot_cpu = FALSE;
@@ -551,30 +561,33 @@ ml_processor_register(
        this_cpu_datap->cpu_id = in_processor_info->cpu_id;
 
        this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
        this_cpu_datap->cpu_id = in_processor_info->cpu_id;
 
        this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
-       if (this_cpu_datap->cpu_console_buf == (void *)(NULL))
+       if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) {
                goto processor_register_error;
                goto processor_register_error;
+       }
 
        if (!is_boot_cpu) {
 
        if (!is_boot_cpu) {
-               if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS)
+               if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
                        goto processor_register_error;
                        goto processor_register_error;
+               }
        }
 
        }
 
-       this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle;
-       this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch;
+       this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle;
+       this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t) in_processor_info->platform_cache_dispatch;
        nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
        this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
 
        nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
        this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
 
-       this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer;
+       this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer;
        this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
 
        this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
 
-       this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler;
+       this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler;
        this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
        this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
        this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
 
        this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
        this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
        this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
 
+       processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap);
        if (!is_boot_cpu) {
        if (!is_boot_cpu) {
-               processor_init((struct processor *)this_cpu_datap->cpu_processor,
-                              this_cpu_datap->cpu_number, processor_pset(master_processor));
+               processor_init(processor, this_cpu_datap->cpu_number,
+                   processor_pset(master_processor));
 
                if (this_cpu_datap->cpu_l2_access_penalty) {
                        /*
 
                if (this_cpu_datap->cpu_l2_access_penalty) {
                        /*
@@ -583,23 +596,26 @@ ml_processor_register(
                         * scheduler, so that threads use the cores with better L2
                         * preferentially.
                         */
                         * scheduler, so that threads use the cores with better L2
                         * preferentially.
                         */
-                       processor_set_primary(this_cpu_datap->cpu_processor,
-                                             master_processor);
+                       processor_set_primary(processor, master_processor);
                }
        }
 
                }
        }
 
-       *processor_out = this_cpu_datap->cpu_processor;
-       *ipi_handler = cpu_signal_handler;
-       if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL)
+       *processor_out = processor;
+       *ipi_handler_out = cpu_signal_handler;
+       *pmi_handler_out = NULL;
+       if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
                *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
                *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
+       }
 
 #if KPC
 
 #if KPC
-       if (kpc_register_cpu(this_cpu_datap) != TRUE)
+       if (kpc_register_cpu(this_cpu_datap) != TRUE) {
                goto processor_register_error;
                goto processor_register_error;
+       }
 #endif
 
 #endif
 
-       if (!is_boot_cpu)
-               early_random_cpu_init(this_cpu_datap->cpu_number);
+       if (!is_boot_cpu) {
+               random_cpu_init(this_cpu_datap->cpu_number);
+       }
 
        return KERN_SUCCESS;
 
 
        return KERN_SUCCESS;
 
@@ -607,15 +623,16 @@ processor_register_error:
 #if KPC
        kpc_unregister_cpu(this_cpu_datap);
 #endif
 #if KPC
        kpc_unregister_cpu(this_cpu_datap);
 #endif
-       if (!is_boot_cpu)
+       if (!is_boot_cpu) {
                cpu_data_free(this_cpu_datap);
                cpu_data_free(this_cpu_datap);
+       }
        return KERN_FAILURE;
 }
 
 void
 ml_init_arm_debug_interface(
        return KERN_FAILURE;
 }
 
 void
 ml_init_arm_debug_interface(
-                           void * in_cpu_datap,
-                           vm_offset_t virt_address)
+       void * in_cpu_datap,
+       vm_offset_t virt_address)
 {
        ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
        do_debugid();
 {
        ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
        do_debugid();
@@ -627,7 +644,7 @@ ml_init_arm_debug_interface(
  */
 void
 init_ast_check(
  */
 void
 init_ast_check(
-              __unused processor_t processor)
+       __unused processor_t processor)
 {
 }
 
 {
 }
 
@@ -637,7 +654,7 @@ init_ast_check(
  */
 void
 cause_ast_check(
  */
 void
 cause_ast_check(
-                processor_t processor)
+       processor_t processor)
 {
        if (current_processor() != processor) {
                cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
 {
        if (current_processor() != processor) {
                cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
@@ -647,7 +664,9 @@ cause_ast_check(
 
 extern uint32_t cpu_idle_count;
 
 
 extern uint32_t cpu_idle_count;
 
-void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
+void
+ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
+{
        *icp = ml_at_interrupt_context();
        *pidlep = (cpu_idle_count == real_ncpus);
 }
        *icp = ml_at_interrupt_context();
        *pidlep = (cpu_idle_count == real_ncpus);
 }
@@ -656,59 +675,93 @@ void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
  *     Routine:        ml_cause_interrupt
  *     Function:       Generate a fake interrupt
  */
  *     Routine:        ml_cause_interrupt
  *     Function:       Generate a fake interrupt
  */
-void 
+void
 ml_cause_interrupt(void)
 {
 ml_cause_interrupt(void)
 {
-       return;                 /* BS_XXX */
+       return;                 /* BS_XXX */
 }
 
 /* Map memory map IO space */
 vm_offset_t
 ml_io_map(
 }
 
 /* Map memory map IO space */
 vm_offset_t
 ml_io_map(
-         vm_offset_t phys_addr,
-         vm_size_t size)
+       vm_offset_t phys_addr,
+       vm_size_t size)
 {
 {
-       return (io_map(phys_addr, size, VM_WIMG_IO));
+       return io_map(phys_addr, size, VM_WIMG_IO);
+}
+
+/* Map memory map IO space (with protections specified) */
+vm_offset_t
+ml_io_map_with_prot(
+       vm_offset_t phys_addr,
+       vm_size_t size,
+       vm_prot_t prot)
+{
+       return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot);
 }
 
 vm_offset_t
 ml_io_map_wcomb(
 }
 
 vm_offset_t
 ml_io_map_wcomb(
-          vm_offset_t phys_addr,
-          vm_size_t size)
+       vm_offset_t phys_addr,
+       vm_size_t size)
+{
+       return io_map(phys_addr, size, VM_WIMG_WCOMB);
+}
+
+void
+ml_io_unmap(vm_offset_t addr, vm_size_t sz)
 {
 {
-        return (io_map(phys_addr, size, VM_WIMG_WCOMB));
+       pmap_remove(kernel_pmap, addr, addr + sz);
+       kmem_free(kernel_map, addr, sz);
 }
 
 /* boot memory allocation */
 }
 
 /* boot memory allocation */
-vm_offset_t 
+vm_offset_t
 ml_static_malloc(
 ml_static_malloc(
-                __unused vm_size_t size)
+       __unused vm_size_t size)
 {
 {
-       return ((vm_offset_t) NULL);
+       return (vm_offset_t) NULL;
 }
 
 vm_map_address_t
 ml_map_high_window(
 }
 
 vm_map_address_t
 ml_map_high_window(
-       vm_offset_t     phys_addr,
-       vm_size_t       len)
+       vm_offset_t     phys_addr,
+       vm_size_t       len)
 {
        return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
 }
 
 vm_offset_t
 ml_static_ptovirt(
 {
        return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
 }
 
 vm_offset_t
 ml_static_ptovirt(
-                 vm_offset_t paddr)
+       vm_offset_t paddr)
 {
        return phystokv(paddr);
 }
 
 vm_offset_t
 ml_static_vtop(
 {
        return phystokv(paddr);
 }
 
 vm_offset_t
 ml_static_vtop(
-                 vm_offset_t vaddr)
+       vm_offset_t vaddr)
+{
+       assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr);
+       return (vm_address_t)(vaddr) - gVirtBase + gPhysBase;
+}
+
+/*
+ * Return the maximum contiguous KVA range that can be accessed from this
+ * physical address.  For arm64, we employ a segmented physical aperture
+ * relocation table which can limit the available range for a given PA to
+ * something less than the extent of physical memory.  But here, we still
+ * have a flat physical aperture, so no such requirement exists.
+ */
+vm_map_address_t
+phystokv_range(pmap_paddr_t pa, vm_size_t *max_len)
 {
 {
-       if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) 
-               panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr);
-       return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase);
+       vm_size_t len = gPhysSize - (pa - gPhysBase);
+       if (*max_len > len) {
+               *max_len = len;
+       }
+       assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa);
+       return pa - gPhysBase + gVirtBase;
 }
 
 vm_offset_t
 }
 
 vm_offset_t
@@ -718,6 +771,18 @@ ml_static_slide(
        return VM_KERNEL_SLIDE(vaddr);
 }
 
        return VM_KERNEL_SLIDE(vaddr);
 }
 
+kern_return_t
+ml_static_verify_page_protections(
+       uint64_t base, uint64_t size, vm_prot_t prot)
+{
+       /* XXX Implement Me */
+       (void)base;
+       (void)size;
+       (void)prot;
+       return KERN_FAILURE;
+}
+
+
 vm_offset_t
 ml_static_unslide(
        vm_offset_t vaddr)
 vm_offset_t
 ml_static_unslide(
        vm_offset_t vaddr)
@@ -737,14 +802,18 @@ ml_static_protect(
        ppnum_t       ppn;
        kern_return_t result = KERN_SUCCESS;
 
        ppnum_t       ppn;
        kern_return_t result = KERN_SUCCESS;
 
-       if (vaddr < VM_MIN_KERNEL_ADDRESS)
+       if (vaddr < VM_MIN_KERNEL_ADDRESS) {
                return KERN_FAILURE;
                return KERN_FAILURE;
+       }
 
        assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
 
        if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
                panic("ml_static_protect(): WX request on %p", (void *) vaddr);
        }
 
        assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
 
        if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
                panic("ml_static_protect(): WX request on %p", (void *) vaddr);
        }
+       if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) {
+               panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr);
+       }
 
        /* Set up the protection bits, and block bits so we can validate block mappings. */
        if (new_prot & VM_PROT_WRITE) {
 
        /* Set up the protection bits, and block bits so we can validate block mappings. */
        if (new_prot & VM_PROT_WRITE) {
@@ -761,8 +830,8 @@ ml_static_protect(
        }
 
        for (vaddr_cur = vaddr;
        }
 
        for (vaddr_cur = vaddr;
-            vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
-            vaddr_cur += ARM_PGBYTES) {
+           vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
+           vaddr_cur += ARM_PGBYTES) {
                ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
                if (ppn != (vm_offset_t) NULL) {
                        tt_entry_t     *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
                ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
                if (ppn != (vm_offset_t) NULL) {
                        tt_entry_t     *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
@@ -787,14 +856,12 @@ ml_static_protect(
 
                        ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
                        *pte_p = ptmp;
 
                        ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
                        *pte_p = ptmp;
-#ifndef  __ARM_L1_PTW__
-                       FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p));
-#endif
                }
        }
 
                }
        }
 
-       if (vaddr_cur > vaddr)
+       if (vaddr_cur > vaddr) {
                flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
                flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
+       }
 
        return result;
 }
 
        return result;
 }
@@ -805,22 +872,24 @@ ml_static_protect(
  */
 void
 ml_static_mfree(
  */
 void
 ml_static_mfree(
-               vm_offset_t vaddr,
-               vm_size_t size)
+       vm_offset_t vaddr,
+       vm_size_t size)
 {
        vm_offset_t     vaddr_cur;
        ppnum_t         ppn;
        uint32_t freed_pages = 0;
 {
        vm_offset_t     vaddr_cur;
        ppnum_t         ppn;
        uint32_t freed_pages = 0;
+       uint32_t freed_kernelcache_pages = 0;
 
        /* It is acceptable (if bad) to fail to free. */
 
        /* It is acceptable (if bad) to fail to free. */
-       if (vaddr < VM_MIN_KERNEL_ADDRESS)
+       if (vaddr < VM_MIN_KERNEL_ADDRESS) {
                return;
                return;
+       }
 
 
-       assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
+       assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
 
        for (vaddr_cur = vaddr;
 
        for (vaddr_cur = vaddr;
-            vaddr_cur < trunc_page_32(vaddr + size);
-            vaddr_cur += PAGE_SIZE) {
+           vaddr_cur < trunc_page_32(vaddr + size);
+           vaddr_cur += PAGE_SIZE) {
                ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
                if (ppn != (vm_offset_t) NULL) {
                        /*
                ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
                if (ppn != (vm_offset_t) NULL) {
                        /*
@@ -832,22 +901,19 @@ ml_static_mfree(
                        if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
                                panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
                        }
                        if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
                                panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
                        }
-#if 0
-                       /*
-                        * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
-                        * relies on the persistence of these mappings for all time.
-                        */
-                       // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
-#endif
                        vm_page_create(ppn, (ppn + 1));
                        freed_pages++;
                        vm_page_create(ppn, (ppn + 1));
                        freed_pages++;
+                       if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) {
+                               freed_kernelcache_pages++;
+                       }
                }
        }
        vm_page_lockspin_queues();
        vm_page_wire_count -= freed_pages;
        vm_page_wire_count_initial -= freed_pages;
                }
        }
        vm_page_lockspin_queues();
        vm_page_wire_count -= freed_pages;
        vm_page_wire_count_initial -= freed_pages;
+       vm_page_kernelcache_count -= freed_kernelcache_pages;
        vm_page_unlock_queues();
        vm_page_unlock_queues();
-#if    DEBUG
+#if     DEBUG
        kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
 #endif
 }
        kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
 #endif
 }
@@ -867,25 +933,30 @@ ml_vtophys(vm_offset_t vaddr)
  * assumed to be wired; e.g., no attempt is made to guarantee that the
  * translations obtained remain valid for the duration of the copy process.
  */
  * assumed to be wired; e.g., no attempt is made to guarantee that the
  * translations obtained remain valid for the duration of the copy process.
  */
-vm_size_t 
+vm_size_t
 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
 {
        addr64_t        cur_phys_dst, cur_phys_src;
        uint32_t        count, nbytes = 0;
 
        while (size > 0) {
 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
 {
        addr64_t        cur_phys_dst, cur_phys_src;
        uint32_t        count, nbytes = 0;
 
        while (size > 0) {
-               if (!(cur_phys_src = kvtophys(virtsrc)))
+               if (!(cur_phys_src = kvtophys(virtsrc))) {
                        break;
                        break;
-               if (!(cur_phys_dst = kvtophys(virtdst)))
+               }
+               if (!(cur_phys_dst = kvtophys(virtdst))) {
                        break;
                        break;
+               }
                if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
                if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
-                   !pmap_valid_address(trunc_page_64(cur_phys_src)))
+                   !pmap_valid_address(trunc_page_64(cur_phys_src))) {
                        break;
                        break;
+               }
                count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
                count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
-               if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
+               if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
                        count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
                        count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
-               if (count > size)
+               }
+               if (count > size) {
                        count = size;
                        count = size;
+               }
 
                bcopy_phys(cur_phys_src, cur_phys_dst, count);
 
 
                bcopy_phys(cur_phys_src, cur_phys_dst, count);
 
@@ -908,20 +979,24 @@ ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
  *                     FALSE otherwise.
  */
 
  *                     FALSE otherwise.
  */
 
-boolean_t ml_validate_nofault(
+boolean_t
+ml_validate_nofault(
        vm_offset_t virtsrc, vm_size_t size)
 {
        addr64_t cur_phys_src;
        uint32_t count;
 
        while (size > 0) {
        vm_offset_t virtsrc, vm_size_t size)
 {
        addr64_t cur_phys_src;
        uint32_t count;
 
        while (size > 0) {
-               if (!(cur_phys_src = kvtophys(virtsrc)))
+               if (!(cur_phys_src = kvtophys(virtsrc))) {
                        return FALSE;
                        return FALSE;
-               if (!pmap_valid_address(trunc_page_64(cur_phys_src)))
+               }
+               if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
                        return FALSE;
                        return FALSE;
+               }
                count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
                count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
-               if (count > size)
+               if (count > size) {
                        count = (uint32_t)size;
                        count = (uint32_t)size;
+               }
 
                virtsrc += count;
                size -= count;
 
                virtsrc += count;
                size -= count;
@@ -946,11 +1021,11 @@ active_rt_threads(__unused boolean_t active)
 }
 
 void
 }
 
 void
-thread_tell_urgency(__unused int urgency,
-                    __unused uint64_t rt_period,
-                   __unused uint64_t rt_deadline,
-                   __unused uint64_t sched_latency,
-                   __unused thread_t nthread)
+thread_tell_urgency(__unused thread_urgency_t urgency,
+    __unused uint64_t rt_period,
+    __unused uint64_t rt_deadline,
+    __unused uint64_t sched_latency,
+    __unused thread_t nthread)
 {
 }
 
 {
 }
 
@@ -962,15 +1037,17 @@ machine_run_count(__unused uint32_t count)
 processor_t
 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
 {
 processor_t
 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
 {
-       return (processor);
+       return processor;
 }
 
 }
 
-boolean_t machine_timeout_suspended(void) {
+boolean_t
+machine_timeout_suspended(void)
+{
        return FALSE;
 }
 
        return FALSE;
 }
 
-kern_return_t 
-ml_interrupt_prewarm(__unused uint64_t deadline) 
+kern_return_t
+ml_interrupt_prewarm(__unused uint64_t deadline)
 {
        return KERN_FAILURE;
 }
 {
        return KERN_FAILURE;
 }
@@ -1009,40 +1086,48 @@ ml_delay_should_spin(uint64_t interval)
        }
 }
 
        }
 }
 
-void ml_delay_on_yield(void) {}
+void
+ml_delay_on_yield(void)
+{
+}
 
 
-boolean_t ml_thread_is64bit(thread_t thread)
+boolean_t
+ml_thread_is64bit(thread_t thread)
 {
 {
-       return (thread_is_64bit_addr(thread));
+       return thread_is_64bit_addr(thread);
 }
 
 }
 
-void ml_timer_evaluate(void) {
+void
+ml_timer_evaluate(void)
+{
 }
 
 boolean_t
 }
 
 boolean_t
-ml_timer_forced_evaluation(void) {
+ml_timer_forced_evaluation(void)
+{
        return FALSE;
 }
 
 uint64_t
        return FALSE;
 }
 
 uint64_t
-ml_energy_stat(__unused thread_t t) {
+ml_energy_stat(__unused thread_t t)
+{
        return 0;
 }
 
 
 void
        return 0;
 }
 
 
 void
-ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) {
-#if CONFIG_EMBEDDED
+ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
+{
        /*
         * For now: update the resource coalition stats of the
         * current thread's coalition
         */
        task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
        /*
         * For now: update the resource coalition stats of the
         * current thread's coalition
         */
        task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
-#endif
 }
 
 uint64_t
 }
 
 uint64_t
-ml_gpu_stat(__unused thread_t t) {
+ml_gpu_stat(__unused thread_t t)
+{
        return 0;
 }
 
        return 0;
 }
 
@@ -1051,9 +1136,11 @@ static void
 timer_state_event(boolean_t switch_to_kernel)
 {
        thread_t thread = current_thread();
 timer_state_event(boolean_t switch_to_kernel)
 {
        thread_t thread = current_thread();
-       if (!thread->precise_user_kernel_time) return;
+       if (!thread->precise_user_kernel_time) {
+               return;
+       }
 
 
-       processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data;
+       processor_t pd = current_processor();
        uint64_t now = ml_get_timebase();
 
        timer_stop(pd->current_state, now);
        uint64_t now = ml_get_timebase();
 
        timer_stop(pd->current_state, now);
@@ -1078,19 +1165,28 @@ timer_state_event_kernel_to_user(void)
 }
 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
 
 }
 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
 
+uint32_t
+get_arm_cpu_version(void)
+{
+       uint32_t value = machine_read_midr();
+
+       /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
+       return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4));
+}
+
 boolean_t
 user_cont_hwclock_allowed(void)
 {
        return FALSE;
 }
 
 boolean_t
 user_cont_hwclock_allowed(void)
 {
        return FALSE;
 }
 
-boolean_t
-user_timebase_allowed(void)
+uint8_t
+user_timebase_type(void)
 {
 #if __ARM_TIME__
 {
 #if __ARM_TIME__
-       return TRUE;
+       return USER_TIMEBASE_SPEC;
 #else
 #else
-       return FALSE;
+       return USER_TIMEBASE_NONE;
 #endif
 }
 
 #endif
 }
 
@@ -1098,7 +1194,7 @@ user_timebase_allowed(void)
  * The following are required for parts of the kernel
  * that cannot resolve these functions as inlines:
  */
  * The following are required for parts of the kernel
  * that cannot resolve these functions as inlines:
  */
-extern thread_t current_act(void);
+extern thread_t current_act(void) __attribute__((const));
 thread_t
 current_act(void)
 {
 thread_t
 current_act(void)
 {
@@ -1106,7 +1202,7 @@ current_act(void)
 }
 
 #undef current_thread
 }
 
 #undef current_thread
-extern thread_t current_thread(void);
+extern thread_t current_thread(void) __attribute__((const));
 thread_t
 current_thread(void)
 {
 thread_t
 current_thread(void)
 {
@@ -1117,27 +1213,60 @@ current_thread(void)
 uintptr_t
 arm_user_protect_begin(thread_t thread)
 {
 uintptr_t
 arm_user_protect_begin(thread_t thread)
 {
-    uintptr_t  ttbr0, asid = 0;                //  kernel asid
+       uintptr_t   ttbr0, asid = 0;            //  kernel asid
 
 
-    ttbr0 = __builtin_arm_mrc(15,0,2,0,0);             // Get TTBR0
-    if (ttbr0 != thread->machine.kptw_ttb) {
-        __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0);        // Set TTBR0
-        __builtin_arm_mcr(15,0,asid,13,0,1);   // Set CONTEXTIDR
-        __builtin_arm_isb(ISB_SY);
-    }
-    return ttbr0;
+       ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0);      // Get TTBR0
+       if (ttbr0 != thread->machine.kptw_ttb) {
+               __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0
+               __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR
+               __builtin_arm_isb(ISB_SY);
+       }
+       return ttbr0;
 }
 
 void
 arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
 {
 }
 
 void
 arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
 {
-    if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
-        if (disable_interrupts)
-            __asm__ volatile ("cpsid if" ::: "memory");        // Disable FIQ/IRQ
-        __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0);        // Set TTBR0
-        __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1);   // Set CONTEXTIDR with thread asid
-        __builtin_arm_dsb(DSB_ISH);
-        __builtin_arm_isb(ISB_SY);
-    }
+       if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
+               if (disable_interrupts) {
+                       __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
+               }
+               __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0
+               __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid
+               __builtin_arm_dsb(DSB_ISH);
+               __builtin_arm_isb(ISB_SY);
+       }
 }
 #endif // __ARM_USER_PROTECT__
 }
 #endif // __ARM_USER_PROTECT__
+
+void
+machine_lockdown(void)
+{
+       arm_vm_prot_finalize(PE_state.bootArgs);
+       lockdown_done = 1;
+}
+
+void
+ml_lockdown_init(void)
+{
+}
+
+void
+ml_hibernate_active_pre(void)
+{
+}
+
+void
+ml_hibernate_active_post(void)
+{
+}
+
+size_t
+ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions)
+{
+#pragma unused(vm_is64bit)
+       assert(regions != NULL);
+
+       *regions = NULL;
+       return 0;
+}