vm_address_t * address,
vm_size_t * size );
+
+extern ppnum_t IOGetLastPageNumber(void);
+
/*
* Lookup a device by its port.
* Doesn't consume the naked send right; produces a device reference.
/* need to create a pmap function to generalize */
unsigned int IODefaultCacheBits(addr64_t pa)
{
- unsigned int flags;
-#ifndef i386
- struct phys_entry * pp;
-
- // Find physical address
- if ((pp = pmap_find_physentry(pa >> 12))) {
- // Use physical attributes as default
- // NOTE: DEVICE_PAGER_FLAGS are made to line up
- flags = VM_MEM_COHERENT; /* We only support coherent memory */
- if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */
- if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */
- } else
- // If no physical, just hard code attributes
- flags = VM_WIMG_IO;
-#else
- extern pmap_paddr_t avail_end;
- if (pa < avail_end)
- flags = VM_WIMG_COPYBACK;
- else
- flags = VM_WIMG_IO;
-#endif
-
- return flags;
+ return(pmap_cache_attributes(pa >> PAGE_SHIFT));
}
-kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
- vm_size_t length, unsigned int options)
+kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
+ mach_vm_size_t length, unsigned int options)
{
- vm_size_t off;
vm_prot_t prot;
unsigned int flags;
pmap_t pmap = map->pmap;
flags = VM_WIMG_WTHRU;
break;
- case kIOWriteCombineCache:
+ case kIOMapWriteCombineCache:
flags = VM_WIMG_WCOMB;
break;
flags = VM_WIMG_COPYBACK;
break;
}
-#if __ppc__
// Set up a block mapped area
- pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), (uint32_t)(length >> 12), prot, flags, 0);
+ pmap_map_block(pmap, va, (ppnum_t)atop_64(pa), (uint32_t) atop_64(round_page_64(length)), prot, flags, 0);
+
+ return( KERN_SUCCESS );
+}
+
+kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
+{
+ pmap_t pmap = map->pmap;
+
+ pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
+
+ return( KERN_SUCCESS );
+}
+
+kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
+ mach_vm_size_t length, unsigned int options)
+{
+ mach_vm_size_t off;
+ vm_prot_t prot;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
+
+ prot = (options & kIOMapReadOnly)
+ ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
+
+ switch (options & kIOMapCacheMask)
+ {
+ /* What cache mode do we need? */
+ case kIOMapDefaultCache:
+ default:
+ return (KERN_INVALID_ARGUMENT);
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+ case kIOMapWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+ }
+#if __ppc__
+ // can't remap block mappings, but ppc doesn't speculative read from WC
#else
-// enter each page's physical address in the target map
+ // enter each page's physical address in the target map
for (off = 0; off < length; off += page_size)
- pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE);
+ {
+ ppnum_t ppnum = pmap_find_phys(pmap, va + off);
+ if (ppnum)
+ pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE);
+ }
#endif
- return( KERN_SUCCESS );
+ return (KERN_SUCCESS);
}
-kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length)
+ppnum_t IOGetLastPageNumber(void)
{
- pmap_t pmap = map->pmap;
+ ppnum_t lastPage, highest = 0;
- pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
-
- return( KERN_SUCCESS );
+#if __ppc__
+ int idx;
+ for (idx = 0; idx < pmap_mem_regions_count; idx++)
+ {
+ lastPage = pmap_mem_regions[idx].mrEnd;
+#elif __i386__
+ unsigned int idx;
+ for (idx = 0; idx < pmap_memory_region_count; idx++)
+ {
+ lastPage = pmap_memory_regions[idx].end - 1;
+#else
+#error arch
+#endif
+ if (lastPage > highest)
+ highest = lastPage;
+ }
+ return (highest);
}
+
void IOGetTime( mach_timespec_t * clock_time);
void IOGetTime( mach_timespec_t * clock_time)
{
clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec);
}
+