- pmap_map_block(pmap, va, pa, length, prot, memattr, 0); /* Set up a block mapped area */
-
-#else
-// enter each page's physical address in the target map
- for (off = 0; off < length; off += page_size) { /* Loop for the whole length */
- pmap_enter(pmap, va + off, pa + off, prot, TRUE); /* Map it in */
+ pmap_set_cache_attributes(pagenum, flags);
+
+ vm_map_set_cache_attr(map, (vm_map_offset_t)va);
+
+
+ // Set up a block mapped area
+ return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
+}
+
+kern_return_t
+IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
+{
+ pmap_t pmap = map->pmap;
+
+ pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
+ mach_vm_size_t __unused length, unsigned int __unused options)
+{
+ mach_vm_size_t off;
+ vm_prot_t prot;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
+ pmap_flush_context pmap_flush_context_storage;
+ boolean_t delayed_pmap_flush = FALSE;
+
+ prot = (options & kIOMapReadOnly)
+ ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
+
+ switch (options & kIOMapCacheMask) {
+ // what cache mode do we need?
+ case kIOMapDefaultCache:
+ default:
+ return KERN_INVALID_ARGUMENT;
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+
+ case kIOMapWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+
+ case kIOMapCopybackInnerCache:
+ flags = VM_WIMG_INNERWBACK;
+ break;
+
+ case kIOMapPostedWrite:
+ flags = VM_WIMG_POSTED;
+ break;
+
+ case kIOMapRealTimeCache:
+ flags = VM_WIMG_RT;
+ break;
+ }
+
+ pmap_flush_context_init(&pmap_flush_context_storage);
+ delayed_pmap_flush = FALSE;
+
+ // enter each page's physical address in the target map
+ for (off = 0; off < length; off += page_size) {
+ ppnum_t ppnum = pmap_find_phys(pmap, va + off);
+ if (ppnum) {
+ pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
+ PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+ delayed_pmap_flush = TRUE;
+ }
+ }
+ if (delayed_pmap_flush == TRUE) {
+ pmap_flush(&pmap_flush_context_storage);