mach_vm_address_t alignment,
mach_vm_address_t physicalMask)
{
- kern_return_t kr;
task_t mapTask = NULL;
vm_map_t vmmap = NULL;
mach_vm_address_t highestMask = 0;
bool mapped = false;
bool needZero;
- if (!capacity)
- return false;
+ if (!capacity) return false;
_options = options;
_capacity = capacity;
IOMapper::checkForSystemMapper();
mapped = (0 != IOMapper::gSystem);
}
- needZero = mapped;
+ needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
if (physicalMask && (alignment <= 1))
{
highestMask = 0;
}
- // set flags for entry + object create
- vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
-
- // set memory entry cache mode
- switch (options & kIOMapCacheMask)
- {
- case kIOMapInhibitCache:
- SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
- break;
-
- case kIOMapWriteThruCache:
- SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
- break;
-
- case kIOMapWriteCombineCache:
- SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
- break;
-
- case kIOMapCopybackCache:
- SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
- break;
-
- case kIOMapCopybackInnerCache:
- SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
- break;
-
- case kIOMapDefaultCache:
- default:
- SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
- break;
- }
-
+ // set memory entry cache mode, pageable, purgeable
+ iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
if (options & kIOMemoryPageable)
{
iomdOptions |= kIOMemoryBufferPageable;
-
- // must create the entry before any pages are allocated
-
- // set flags for entry + object create
- memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
-
- if (options & kIOMemoryPurgeable)
- memEntryCacheMode |= MAP_MEM_PURGABLE;
+ if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
}
else
{
- memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
vmmap = kernel_map;
// Buffer shouldn't auto prepare they should be prepared explicitly
capacity, highestMask, alignment, contig);
}
else if (needZero
- && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+ && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
{
_internalFlags |= kInternalFlagPageAllocated;
needZero = false;
}
if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
- ipc_port_t sharedMem;
vm_size_t size = round_page(capacity);
- kr = mach_make_memory_entry(vmmap,
- &size, (vm_offset_t)_buffer,
- memEntryCacheMode, &sharedMem,
- NULL );
-
- if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
- ipc_port_release_send( sharedMem );
- kr = kIOReturnVMError;
- }
- if( KERN_SUCCESS != kr)
- return( false );
-
- _memEntry = (void *) sharedMem;
+ // initWithOptions will create memory entry
+ iomdOptions |= kIOMemoryPersistent;
if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
return( false );
}
reserved->map = createMappingInTask(mapTask, 0,
- kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
+ kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
if (!reserved->map)
{
_buffer = 0;