+ * This function will return KERN_INVALID_ADDRESS if an optimal address
+ * can not be found. It is not necessarily a fatal error, the caller may still be
+ * still be able to do a non-optimal assignment.
+ */
+
+kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa,
+ vm_size_t size, vm_prot_t prot) {
+
+ vm_map_entry_t entry, next, tmp_entry, new_entry;
+ vm_offset_t start, end, algnpa, endadr, strtadr, curradr;
+ vm_offset_t boundary;
+
+ unsigned int maxsize, minsize, leading, trailing;
+
+ assert(page_aligned(pa));
+ assert(page_aligned(size));
+
+ if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */
+
+ minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
+ maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
+
+ boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */
+ if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */
+
+ vm_map_lock(map); /* No touchee no mapee */
+
+ for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */
+ if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */
+ algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */
+ leading = algnpa - pa; /* Get leading size */
+
+ curradr = 0; /* Start low */
+
+ while(1) { /* Try all possible values for this opt level */
+
+ curradr = curradr + boundary; /* Get the next optimal address */
+ strtadr = curradr - leading; /* Calculate start of optimal range */
+ endadr = strtadr + size; /* And now the end */
+
+ if((curradr < boundary) || /* Did address wrap here? */
+ (strtadr > curradr) || /* How about this way? */
+ (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */
+
+ if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
+ if(endadr > map->max_offset) break; /* No room right now... */
+
+ if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
+
+ next = entry->vme_next; /* Get the next entry */
+ if((next == vm_map_to_entry(map)) || /* Are we the last entry? */
+ (next->vme_start >= endadr)) { /* or do we end before the next entry? */
+
+ new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
+ VM_OBJECT_NULL,
+ 0, /* Offset into object of 0 */
+ FALSE, /* No copy needed */
+ FALSE, /* Not shared */
+ FALSE, /* Not in transition */
+ prot, /* Set the protection to requested */
+ prot, /* We can't change protection */
+ VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind,
+ 'cause we don't page in this area */
+ VM_INHERIT_DEFAULT, /* Default inheritance */
+ 0); /* Nothing is wired */
+
+ vm_map_unlock(map); /* Let the world see it all */
+ *va = strtadr; /* Tell everyone */
+ *bnd = boundary; /* Say what boundary we are aligned to */
+ return(KERN_SUCCESS); /* Leave, all is right with the world... */
+ }
+ }
+ }
+
+ vm_map_unlock(map); /* Couldn't find a slot */
+ return(KERN_INVALID_ADDRESS);
+}
+
+/*
+ * Copies data from a physical page to a virtual page. This is used to
+ * move data from the kernel to user state.
+ *
+ * Note that it is invalid to have a source that spans a page boundry.
+ * This can block.
+ * We don't check protection either.
+ * And we don't handle a block mapped sink address either.