+static char *next; // next available byte in comm page
+static int cur_routine; // comm page address of "current" routine
+static int matched; // true if we've found a match for "current" routine
+static char *commPagePtr; // virtual address in kernel of commpage we are working on
+
+extern commpage_descriptor compare_and_swap32_on32;
+extern commpage_descriptor compare_and_swap32_on64;
+extern commpage_descriptor compare_and_swap64;
+extern commpage_descriptor atomic_enqueue32;
+extern commpage_descriptor atomic_enqueue64;
+extern commpage_descriptor atomic_dequeue32_on32;
+extern commpage_descriptor atomic_dequeue32_on64;
+extern commpage_descriptor atomic_dequeue64;
+extern commpage_descriptor memory_barrier_up;
+extern commpage_descriptor memory_barrier_mp32;
+extern commpage_descriptor memory_barrier_mp64;
+extern commpage_descriptor atomic_add32;
+extern commpage_descriptor atomic_add64;
+extern commpage_descriptor mach_absolute_time_32;
+extern commpage_descriptor mach_absolute_time_64;
+extern commpage_descriptor mach_absolute_time_lp64;
+extern commpage_descriptor spinlock_32_try_mp;
+extern commpage_descriptor spinlock_32_try_up;
+extern commpage_descriptor spinlock_64_try_mp;
+extern commpage_descriptor spinlock_64_try_up;
+extern commpage_descriptor spinlock_32_lock_mp;
+extern commpage_descriptor spinlock_32_lock_up;
+extern commpage_descriptor spinlock_64_lock_mp;
+extern commpage_descriptor spinlock_64_lock_up;
+extern commpage_descriptor spinlock_32_unlock_mp;
+extern commpage_descriptor spinlock_32_unlock_up;
+extern commpage_descriptor spinlock_64_unlock_mp;
+extern commpage_descriptor spinlock_64_unlock_up;
+extern commpage_descriptor pthread_getspecific_sprg3_32;
+extern commpage_descriptor pthread_getspecific_sprg3_64;
+extern commpage_descriptor pthread_getspecific_uftrap;
+extern commpage_descriptor gettimeofday_32;
+extern commpage_descriptor gettimeofday_g5_32;
+extern commpage_descriptor gettimeofday_g5_64;
+extern commpage_descriptor commpage_flush_dcache;
+extern commpage_descriptor commpage_flush_icache;
+extern commpage_descriptor pthread_self_sprg3;
+extern commpage_descriptor pthread_self_uftrap;
+extern commpage_descriptor spinlock_relinquish;
+extern commpage_descriptor bzero_32;
+extern commpage_descriptor bzero_128;
+extern commpage_descriptor bcopy_g3;
+extern commpage_descriptor bcopy_g4;
+extern commpage_descriptor bcopy_970;
+extern commpage_descriptor bcopy_64;
+extern commpage_descriptor compare_and_swap32_on32b;
+extern commpage_descriptor compare_and_swap32_on64b;
+extern commpage_descriptor compare_and_swap64b;
+extern commpage_descriptor memset_64;
+extern commpage_descriptor memset_g3;
+extern commpage_descriptor memset_g4;
+extern commpage_descriptor memset_g5;
+extern commpage_descriptor bigcopy_970;
+
+/* The list of all possible commpage routines. WARNING: the check for overlap
+ * assumes that these routines are in strictly ascending order, sorted by address
+ * in the commpage. We panic if not.
+ */
+static commpage_descriptor *routines[] = {
+ &compare_and_swap32_on32,
+ &compare_and_swap32_on64,
+ &compare_and_swap64,
+ &atomic_enqueue32,
+ &atomic_enqueue64,
+ &atomic_dequeue32_on32,
+ &atomic_dequeue32_on64,
+ &atomic_dequeue64,
+ &memory_barrier_up,
+ &memory_barrier_mp32,
+ &memory_barrier_mp64,
+ &atomic_add32,
+ &atomic_add64,
+ &mach_absolute_time_32,
+ &mach_absolute_time_64,
+ &mach_absolute_time_lp64,
+ &spinlock_32_try_mp,
+ &spinlock_32_try_up,
+ &spinlock_64_try_mp,
+ &spinlock_64_try_up,
+ &spinlock_32_lock_mp,
+ &spinlock_32_lock_up,
+ &spinlock_64_lock_mp,
+ &spinlock_64_lock_up,
+ &spinlock_32_unlock_mp,
+ &spinlock_32_unlock_up,
+ &spinlock_64_unlock_mp,
+ &spinlock_64_unlock_up,
+ &pthread_getspecific_sprg3_32,
+ &pthread_getspecific_sprg3_64,
+ &pthread_getspecific_uftrap,
+ &gettimeofday_32,
+ &gettimeofday_g5_32,
+ &gettimeofday_g5_64,
+ &commpage_flush_dcache,
+ &commpage_flush_icache,
+ &pthread_self_sprg3,
+ &pthread_self_uftrap,
+ &spinlock_relinquish,
+ &bzero_32,
+ &bzero_128,
+ &bcopy_g3,
+ &bcopy_g4,
+ &bcopy_970,
+ &bcopy_64,
+ &compare_and_swap32_on32b,
+ &compare_and_swap32_on64b,
+ &compare_and_swap64b,
+ &memset_64,
+ &memset_g3,
+ &memset_g4,
+ &memset_g5,
+ &bigcopy_970,
+ NULL };
+
+
+/* Allocate the commpages and add to one of the shared submaps created by vm.
+ * Called once each for the 32 and 64-bit submaps.