X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..a39ff7e25e19b3a8c3020042a3872ca9ec9659f1:/iokit/Kernel/IOBufferMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 1b1775916..b3ff13dda 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -66,7 +66,8 @@ enum { kInternalFlagPhysical = 0x00000001, kInternalFlagPageSized = 0x00000002, - kInternalFlagPageAllocated = 0x00000004 + kInternalFlagPageAllocated = 0x00000004, + kInternalFlagInit = 0x00000008 }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -84,7 +85,7 @@ static uintptr_t IOBMDPageProc(iopa_t * a) int options = 0; // KMA_LOMEM; kr = kernel_memory_allocate(kernel_map, &vmaddr, - page_size, 0, options); + page_size, 0, options, VM_KERN_MEMORY_IOKIT); if (KERN_SUCCESS != kr) vmaddr = 0; else bzero((void *) vmaddr, page_size); @@ -113,7 +114,6 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( mach_vm_address_t alignment, mach_vm_address_t physicalMask) { - kern_return_t kr; task_t mapTask = NULL; vm_map_t vmmap = NULL; mach_vm_address_t highestMask = 0; @@ -122,8 +122,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( bool mapped = false; bool needZero; - if (!capacity) - return false; + if (!capacity) return false; _options = options; _capacity = capacity; @@ -147,7 +146,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( IOMapper::checkForSystemMapper(); mapped = (0 != IOMapper::gSystem); } - needZero = mapped; + needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (physicalMask && (alignment <= 1)) { @@ -169,6 +168,8 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( _alignment = alignment; + if ((capacity + alignment) < _capacity) return (false); + if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; @@ -184,53 +185,15 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( highestMask = 0; } - // set flags for entry + object create - vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; - - // set memory entry cache mode - switch (options & kIOMapCacheMask) - { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapCopybackInnerCache: - SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; - } - + // set memory entry cache mode, pageable, purgeable + iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; - - // must create the entry before any pages are allocated - - // set flags for entry + object create - memEntryCacheMode |= MAP_MEM_NAMED_CREATE; - - if (options & kIOMemoryPurgeable) - memEntryCacheMode |= MAP_MEM_PURGABLE; + if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; } else { - memEntryCacheMode |= MAP_MEM_NAMED_REUSE; vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly @@ -263,7 +226,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( capacity, highestMask, alignment, contig); } else if (needZero - && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes))) + && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { _internalFlags |= kInternalFlagPageAllocated; needZero = false; @@ -272,7 +235,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( { IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG - debug_iomalloc_size += capacity; + OSAddAtomic(capacity, &debug_iomalloc_size); #endif } } @@ -292,26 +255,14 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { - ipc_port_t sharedMem; vm_size_t size = round_page(capacity); - kr = mach_make_memory_entry(vmmap, - &size, (vm_offset_t)_buffer, - memEntryCacheMode, &sharedMem, - NULL ); - - if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { - ipc_port_release_send( sharedMem ); - kr = kIOReturnVMError; - } - if( KERN_SUCCESS != kr) - return( false ); - - _memEntry = (void *) sharedMem; + // initWithOptions will create memory entry + iomdOptions |= kIOMemoryPersistent; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG - debug_iomallocpageable_size += size; + OSAddAtomicLong(size, &debug_iomallocpageable_size); #endif mapTask = inTask; if (NULL == inTask) @@ -339,6 +290,11 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( inTask, iomdOptions, /* System mapper */ 0)) return false; + _internalFlags |= kInternalFlagInit; +#if IOTRACKING + if (!(options & kIOMemoryPageable)) trackingAccumSize(capacity); +#endif /* IOTRACKING */ + // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) @@ -352,7 +308,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( return( false ); } reserved->map = createMappingInTask(mapTask, 0, - kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); + kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; @@ -538,19 +494,26 @@ void IOBufferMemoryDescriptor::free() map->release(); } + if ((options & kIOMemoryPageable) + || (kInternalFlagPageSized & internalFlags)) size = round_page(size); + +#if IOTRACKING + if (!(options & kIOMemoryPageable) + && buffer + && (kInternalFlagInit & _internalFlags)) trackingAccumSize(-size); +#endif /* IOTRACKING */ + /* super::free may unwire - deallocate buffer afterwards */ super::free(); if (options & kIOMemoryPageable) { #if IOALLOCDEBUG - debug_iomallocpageable_size -= round_page(size); + OSAddAtomicLong(-size, &debug_iomallocpageable_size); #endif } else if (buffer) { - if (kInternalFlagPageSized & internalFlags) size = round_page(size); - if (kInternalFlagPhysical & internalFlags) { IOKernelFreePhysical((mach_vm_address_t) buffer, size); @@ -564,7 +527,7 @@ void IOBufferMemoryDescriptor::free() kmem_free(kernel_map, page, page_size); } #if IOALLOCDEBUG - debug_iomalloc_size -= size; + OSAddAtomic(-size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsFreeAligned, size); } @@ -604,6 +567,7 @@ vm_size_t IOBufferMemoryDescriptor::getCapacity() const void IOBufferMemoryDescriptor::setLength(vm_size_t length) { assert(length <= _capacity); + if (length > _capacity) return; _length = length; _ranges.v64->length = length; @@ -675,6 +639,9 @@ void * IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) { IOVirtualAddress address; + + if ((start + withLength) < start) return 0; + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) address = (IOVirtualAddress) _buffer; else