+/*
+ * void MapUserMemoryWindowInit(void)
+ *
+ * Initialize anything we need to in order to map user address space slices into
+ * the kernel. Primarily used for copy in/out.
+ *
+ * Currently we only support one 512MB slot for this purpose. There are two special
+ * mappings defined for the purpose: the special pmap nest, and linkage mapping.
+ *
+ * The special pmap nest (which is allocated in this function) is used as a place holder
+ * in the kernel's pmap search list. It is 512MB long and covers the address range
+ * starting at lgUMWvaddr. It points to no actual memory and when the fault handler
+ * hits in it, it knows to look in the per_proc and start using the linkage
+ * mapping contained therin.
+ *
+ * The linkage mapping is used to glue the user address space slice into the
+ * kernel. It contains the relocation information used to transform the faulting
+ * kernel address into the user address space. It also provides the link to the
+ * user's pmap. This is pointed to by the per_proc and is switched in and out
+ * whenever there is a context switch.
+ *
+ */
+
+void MapUserMemoryWindowInit(void) {
+
+ addr64_t colladdr;
+ int nlists;
+ mapping_t *mp;
+
+ nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */
+
+ mp = mapping_alloc(nlists); /* Get a spare mapping block */
+
+ mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
+ /* Set the flags. Make sure busy count is 1 */
+ mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
+ mp->u.mpBSize = 15; /* Set the size to 2 segments in 32MB chunks - 1 */
+ mp->mpPte = 0; /* Means nothing */
+ mp->mpPAddr = 0; /* Means nothing */
+ mp->mpVAddr = lowGlo.lgUMWvaddr; /* Set the address range we cover */
+ mp->mpNestReloc = 0; /* Means nothing */
+
+ colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */
+
+ if(colladdr) { /* Did it collide? */
+ panic("MapUserMemoryWindowInit: MapUserMemoryWindow range already mapped\n");
+ }
+
+ return;
+}
+
+/*
+ * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size)
+ *
+ * map = the vm_map that we are mapping into the kernel
+ * va = start of the address range we are mapping
+ * Note that we do not test validty, we chose to trust our fellows...
+ *
+ * Maps a 512M slice of a user address space into a predefined kernel range
+ * on a per-thread basis. We map only the first 256M segment, allowing the
+ * second 256M segment to fault in as needed. This allows our clients to access
+ * an arbitrarily aligned operand up to 256M in size.
+ *
+ * In the future, the restriction of a predefined range may be loosened.
+ *
+ * Builds the proper linkage map to map the user range
+ * We will round this down to the previous segment boundary and calculate
+ * the relocation to the kernel slot
+ *
+ * We always make a segment table entry here if we need to. This is mainly because of
+ * copyin/out and if we don't, there will be multiple segment faults for
+ * each system call. I have seen upwards of 30000 per second.
+ *
+ * We do check, however, to see if the slice is already mapped and if so,
+ * we just exit. This is done for performance reasons. It was found that
+ * there was a considerable boost in copyin/out performance if we did not
+ * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the
+ * restriction that you had to bracket MapUserMemoryWindow. Further, there
+ * is a yet further boost if you didn't need to map it each time. The theory
+ * behind this is that many times copies are to or from the same segment and
+ * done multiple times within the same system call. To take advantage of that,
+ * we check umwSpace and umwRelo to see if we've already got it.
+ *
+ * We also need to half-invalidate the slice when we context switch or go
+ * back to user state. A half-invalidate does not clear the actual mapping,
+ * but it does force the MapUserMemoryWindow function to reload the segment
+ * register/SLBE. If this is not done, we can end up some pretty severe
+ * performance penalties. If we map a slice, and the cached space/relocation is
+ * the same, we won't reload the segment registers. Howver, since we ran someone else,
+ * our SR is cleared and we will take a fault. This is reasonable if we block
+ * while copying (e.g., we took a page fault), but it is not reasonable when we
+ * just start. For this reason, we half-invalidate to make sure that the SR is
+ * explicitly reloaded.
+ *
+ * Note that we do not go to the trouble of making a pmap segment cache
+ * entry for these guys because they are very short term -- 99.99% of the time
+ * they will be unmapped before the next context switch.
+ *
+ */
+
+addr64_t MapUserMemoryWindow(
+ vm_map_t map,
+ addr64_t va) {
+
+ addr64_t baddrs, reladd;
+ thread_t thread;
+ mapping_t *mp;
+
+ baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */
+ thread = current_thread(); /* Remember our activation */