X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..a39ff7e25e19b3a8c3020042a3872ca9ec9659f1:/iokit/Kernel/IOBufferMemoryDescriptor.cpp?ds=inline diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 1c486fa0f..b3ff13dda 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -66,7 +66,8 @@ enum { kInternalFlagPhysical = 0x00000001, kInternalFlagPageSized = 0x00000002, - kInternalFlagPageAllocated = 0x00000004 + kInternalFlagPageAllocated = 0x00000004, + kInternalFlagInit = 0x00000008 }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -84,7 +85,7 @@ static uintptr_t IOBMDPageProc(iopa_t * a) int options = 0; // KMA_LOMEM; kr = kernel_memory_allocate(kernel_map, &vmaddr, - page_size, 0, options); + page_size, 0, options, VM_KERN_MEMORY_IOKIT); if (KERN_SUCCESS != kr) vmaddr = 0; else bzero((void *) vmaddr, page_size); @@ -167,6 +168,8 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( _alignment = alignment; + if ((capacity + alignment) < _capacity) return (false); + if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; @@ -232,7 +235,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( { IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG - debug_iomalloc_size += capacity; + OSAddAtomic(capacity, &debug_iomalloc_size); #endif } } @@ -259,7 +262,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( if( options & kIOMemoryPageable) { #if IOALLOCDEBUG - debug_iomallocpageable_size += size; + OSAddAtomicLong(size, &debug_iomallocpageable_size); #endif mapTask = inTask; if (NULL == inTask) @@ -287,6 +290,11 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( inTask, iomdOptions, /* System mapper */ 0)) return false; + _internalFlags |= kInternalFlagInit; +#if IOTRACKING + if (!(options & kIOMemoryPageable)) trackingAccumSize(capacity); +#endif /* IOTRACKING */ + // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) @@ -486,19 +494,26 @@ void IOBufferMemoryDescriptor::free() map->release(); } + if ((options & kIOMemoryPageable) + || (kInternalFlagPageSized & internalFlags)) size = round_page(size); + +#if IOTRACKING + if (!(options & kIOMemoryPageable) + && buffer + && (kInternalFlagInit & _internalFlags)) trackingAccumSize(-size); +#endif /* IOTRACKING */ + /* super::free may unwire - deallocate buffer afterwards */ super::free(); if (options & kIOMemoryPageable) { #if IOALLOCDEBUG - debug_iomallocpageable_size -= round_page(size); + OSAddAtomicLong(-size, &debug_iomallocpageable_size); #endif } else if (buffer) { - if (kInternalFlagPageSized & internalFlags) size = round_page(size); - if (kInternalFlagPhysical & internalFlags) { IOKernelFreePhysical((mach_vm_address_t) buffer, size); @@ -512,7 +527,7 @@ void IOBufferMemoryDescriptor::free() kmem_free(kernel_map, page, page_size); } #if IOALLOCDEBUG - debug_iomalloc_size -= size; + OSAddAtomic(-size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsFreeAligned, size); } @@ -552,6 +567,7 @@ vm_size_t IOBufferMemoryDescriptor::getCapacity() const void IOBufferMemoryDescriptor::setLength(vm_size_t length) { assert(length <= _capacity); + if (length > _capacity) return; _length = length; _ranges.v64->length = length; @@ -623,6 +639,9 @@ void * IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) { IOVirtualAddress address; + + if ((start + withLength) < start) return 0; + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) address = (IOVirtualAddress) _buffer; else