+kern_return_t
+IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
+ mach_vm_size_t __unused length, unsigned int __unused options)
+{
+ mach_vm_size_t off;
+ vm_prot_t prot;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
+ pmap_flush_context pmap_flush_context_storage;
+ boolean_t delayed_pmap_flush = FALSE;
+
+ prot = (options & kIOMapReadOnly)
+ ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
+
+ switch (options & kIOMapCacheMask) {
+ // what cache mode do we need?
+ case kIOMapDefaultCache:
+ default:
+ return KERN_INVALID_ARGUMENT;
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+
+ case kIOMapWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+
+ case kIOMapCopybackInnerCache:
+ flags = VM_WIMG_INNERWBACK;
+ break;
+
+ case kIOMapPostedWrite:
+ flags = VM_WIMG_POSTED;
+ break;
+
+ case kIOMapRealTimeCache:
+ flags = VM_WIMG_RT;
+ break;
+ }
+
+ pmap_flush_context_init(&pmap_flush_context_storage);
+ delayed_pmap_flush = FALSE;
+
+ // enter each page's physical address in the target map
+ for (off = 0; off < length; off += page_size) {
+ ppnum_t ppnum = pmap_find_phys(pmap, va + off);
+ if (ppnum) {
+ pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
+ PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+ delayed_pmap_flush = TRUE;
+ }
+ }
+ if (delayed_pmap_flush == TRUE) {
+ pmap_flush(&pmap_flush_context_storage);
+ }
+
+ return KERN_SUCCESS;
+}
+
+ppnum_t
+IOGetLastPageNumber(void)
+{
+#if __i386__ || __x86_64__
+ ppnum_t lastPage, highest = 0;
+ unsigned int idx;
+
+ for (idx = 0; idx < pmap_memory_region_count; idx++) {
+ lastPage = pmap_memory_regions[idx].end - 1;
+ if (lastPage > highest) {
+ highest = lastPage;
+ }
+ }
+ return highest;
+#elif __arm__ || __arm64__
+ return 0;
+#else
+#error unknown arch
+#endif
+}
+
+