+kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
+ mach_vm_size_t __unused length, unsigned int __unused options)
+{
+#if __ppc__
+ // can't remap block mappings, but ppc doesn't speculatively read from WC
+#else
+
+ mach_vm_size_t off;
+ vm_prot_t prot;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
+
+ prot = (options & kIOMapReadOnly)
+ ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
+
+ switch (options & kIOMapCacheMask)
+ {
+ // what cache mode do we need?
+ case kIOMapDefaultCache:
+ default:
+ return (KERN_INVALID_ARGUMENT);
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+
+ case kIOMapWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+ }
+
+ // enter each page's physical address in the target map
+ for (off = 0; off < length; off += page_size)
+ {
+ ppnum_t ppnum = pmap_find_phys(pmap, va + off);
+ if (ppnum)
+ pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE);
+ }
+
+#endif
+
+ return (KERN_SUCCESS);
+}
+
+ppnum_t IOGetLastPageNumber(void)
+{
+ ppnum_t lastPage, highest = 0;
+ unsigned int idx;
+
+#if __ppc__
+ for (idx = 0; idx < pmap_mem_regions_count; idx++)
+ {
+ lastPage = pmap_mem_regions[idx].mrEnd;
+#elif __i386__ || __x86_64__
+ for (idx = 0; idx < pmap_memory_region_count; idx++)
+ {
+ lastPage = pmap_memory_regions[idx].end - 1;
+#else
+#error arch
+#endif
+ if (lastPage > highest)
+ highest = lastPage;
+ }
+ return (highest);
+}
+
+