X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0b4c1975fb5e4eccf1012a35081f7e7799b81046..04b8595b18b1b41ac7a206e4b3d51a635f8413d7:/iokit/Kernel/IOBufferMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 004d2ec89..1c486fa0f 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -35,9 +35,25 @@ #include #include #include +#include #include "IOKitKernelInternal.h" +#ifdef IOALLOCDEBUG +#include +#endif +#include + +#if IOKITSTATS +#define IOStatisticsAlloc(type, size) \ +do { \ + IOStatistics::countAlloc(type, size); \ +} while (0) +#else +#define IOStatisticsAlloc(type, size) +#endif /* IOKITSTATS */ + + __BEGIN_DECLS void ipc_port_release_send(ipc_port_t port); #include @@ -48,8 +64,9 @@ __END_DECLS enum { - kInternalFlagPhysical = 0x00000001, - kInternalFlagPageSized = 0x00000002 + kInternalFlagPhysical = 0x00000001, + kInternalFlagPageSized = 0x00000002, + kInternalFlagPageAllocated = 0x00000004 }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -60,6 +77,23 @@ OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static uintptr_t IOBMDPageProc(iopa_t * a) +{ + kern_return_t kr; + vm_address_t vmaddr = 0; + int options = 0; // KMA_LOMEM; + + kr = kernel_memory_allocate(kernel_map, &vmaddr, + page_size, 0, options); + + if (KERN_SUCCESS != kr) vmaddr = 0; + else bzero((void *) vmaddr, page_size); + + return ((uintptr_t) vmaddr); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + #ifndef __LP64__ bool IOBufferMemoryDescriptor::initWithOptions( IOOptionBits options, @@ -79,14 +113,15 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( mach_vm_address_t alignment, mach_vm_address_t physicalMask) { - kern_return_t kr; - task_t mapTask = NULL; - vm_map_t vmmap = NULL; - mach_vm_address_t highestMask = 0; - IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; + task_t mapTask = NULL; + vm_map_t vmmap = NULL; + mach_vm_address_t highestMask = 0; + IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; + IODMAMapSpecification mapSpec; + bool mapped = false; + bool needZero; - if (!capacity) - return false; + if (!capacity) return false; _options = options; _capacity = capacity; @@ -99,19 +134,18 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( return (false); _ranges.v64->address = 0; _ranges.v64->length = 0; + // make sure super::free doesn't dealloc _ranges before super::init + _flags = kIOMemoryAsReference; // Grab IOMD bits from the Buffer MD options iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); -#if 0 - // workarounds- - if ((options & kIOMemoryPhysicallyContiguous) || ((capacity == 0x1000) && (inTask == kernel_task)) - && !physicalMask) + if (!(kIOMemoryMapperNone & options)) { - highestMask = physicalMask = 0xFFFFF000; + IOMapper::checkForSystemMapper(); + mapped = (0 != IOMapper::gSystem); } - //- -#endif + needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (physicalMask && (alignment <= 1)) { @@ -136,49 +170,27 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; - // set flags for entry + object create - vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; - - // set memory entry cache mode - switch (options & kIOMapCacheMask) + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.alignment = _alignment; + mapSpec.numAddressBits = 64; + if (highestMask && mapped) { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; + if (highestMask <= 0xFFFFFFFF) + mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); + else + mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); + highestMask = 0; } + // set memory entry cache mode, pageable, purgeable + iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; - - // must create the entry before any pages are allocated - - // set flags for entry + object create - memEntryCacheMode |= MAP_MEM_NAMED_CREATE; - - if (options & kIOMemoryPurgeable) - memEntryCacheMode |= MAP_MEM_PURGABLE; + if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; } else { - memEntryCacheMode |= MAP_MEM_NAMED_REUSE; vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly @@ -187,7 +199,19 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( /* Allocate a wired-down buffer inside kernel space. */ - if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size)) + bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); + + if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) + { + contig |= (!mapped); + contig |= (0 != (kIOMemoryMapperNone & options)); +#if 0 + // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now + contig |= true; +#endif + } + + if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) @@ -195,8 +219,22 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( _internalFlags |= kInternalFlagPageSized; capacity = round_page(capacity); } - _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment, - (0 != (options & kIOMemoryPhysicallyContiguous))); + _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( + capacity, highestMask, alignment, contig); + } + else if (needZero + && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) + { + _internalFlags |= kInternalFlagPageAllocated; + needZero = false; + _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); + if (_buffer) + { + IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); +#if IOALLOCDEBUG + debug_iomalloc_size += capacity; +#endif + } } else if (alignment > 1) { @@ -206,30 +244,18 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( { _buffer = IOMalloc(capacity); } - if (!_buffer) { return false; } + if (needZero) bzero(_buffer, capacity); } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { - ipc_port_t sharedMem; vm_size_t size = round_page(capacity); - kr = mach_make_memory_entry(vmmap, - &size, (vm_offset_t)_buffer, - memEntryCacheMode, &sharedMem, - NULL ); - - if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { - ipc_port_release_send( sharedMem ); - kr = kIOReturnVMError; - } - if( KERN_SUCCESS != kr) - return( false ); - - _memEntry = (void *) sharedMem; + // initWithOptions will create memory entry + iomdOptions |= kIOMemoryPersistent; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG @@ -247,9 +273,10 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( while (startAddr < endAddr) { - *startAddr; + UInt8 dummyVar = *startAddr; + (void) dummyVar; startAddr += page_size; - } + } } } @@ -260,6 +287,11 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( inTask, iomdOptions, /* System mapper */ 0)) return false; + // give any system mapper the allocation params + if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, + &mapSpec, sizeof(mapSpec))) + return false; + if (mapTask) { if (!reserved) { @@ -268,7 +300,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( return( false ); } reserved->map = createMappingInTask(mapTask, 0, - kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); + kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; @@ -465,16 +497,33 @@ void IOBufferMemoryDescriptor::free() } else if (buffer) { - if (internalFlags & kInternalFlagPhysical) + if (kInternalFlagPageSized & internalFlags) size = round_page(size); + + if (kInternalFlagPhysical & internalFlags) { - if (kInternalFlagPageSized & internalFlags) - size = round_page(size); IOKernelFreePhysical((mach_vm_address_t) buffer, size); - } + } + else if (kInternalFlagPageAllocated & internalFlags) + { + uintptr_t page; + page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); + if (page) + { + kmem_free(kernel_map, page, page_size); + } +#if IOALLOCDEBUG + debug_iomalloc_size -= size; +#endif + IOStatisticsAlloc(kIOStatisticsFreeAligned, size); + } else if (alignment > 1) + { IOFreeAligned(buffer, size); + } else + { IOFree(buffer, size); + } } if (range && (kIOMemoryAsReference & flags)) IODelete(range, IOAddressRange, 1);