X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..ebb1b9f42b62218f29061826217bb0f71cd375a6:/iokit/Kernel/IOBufferMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 4821b81dc..563059600 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -25,26 +25,32 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + +#define _IOMEMORYDESCRIPTOR_INTERNAL_ + #include #include #include #include #include +#include #include "IOKitKernelInternal.h" -#include "IOCopyMapper.h" __BEGIN_DECLS void ipc_port_release_send(ipc_port_t port); #include -vm_map_t IOPageableMapForAddress( vm_address_t address ); __END_DECLS /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -volatile ppnum_t gIOHighestAllocatedPage; +enum +{ + kInternalFlagPhysical = 0x00000001, + kInternalFlagPageSized = 0x00000002 +}; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -52,50 +58,9 @@ volatile ppnum_t gIOHighestAllocatedPage; OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, IOGeneralMemoryDescriptor); -bool IOBufferMemoryDescriptor::initWithAddress( - void * /* address */ , - IOByteCount /* withLength */ , - IODirection /* withDirection */ ) -{ - return false; -} - -bool IOBufferMemoryDescriptor::initWithAddress( - vm_address_t /* address */ , - IOByteCount /* withLength */ , - IODirection /* withDirection */ , - task_t /* withTask */ ) -{ - return false; -} - -bool IOBufferMemoryDescriptor::initWithPhysicalAddress( - IOPhysicalAddress /* address */ , - IOByteCount /* withLength */ , - IODirection /* withDirection */ ) -{ - return false; -} - -bool IOBufferMemoryDescriptor::initWithPhysicalRanges( - IOPhysicalRange * /* ranges */ , - UInt32 /* withCount */ , - IODirection /* withDirection */ , - bool /* asReference */ ) -{ - return false; -} - -bool IOBufferMemoryDescriptor::initWithRanges( - IOVirtualRange * /* ranges */ , - UInt32 /* withCount */ , - IODirection /* withDirection */ , - task_t /* withTask */ , - bool /* asReference */ ) -{ - return false; -} +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#ifndef __LP64__ bool IOBufferMemoryDescriptor::initWithOptions( IOOptionBits options, vm_size_t capacity, @@ -105,6 +70,7 @@ bool IOBufferMemoryDescriptor::initWithOptions( mach_vm_address_t physicalMask = 0; return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); } +#endif /* !__LP64__ */ bool IOBufferMemoryDescriptor::initWithPhysicalMask( task_t inTask, @@ -114,82 +80,139 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( mach_vm_address_t physicalMask) { kern_return_t kr; - addr64_t lastIOAddr; - vm_map_t vmmap = 0; - IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual; + task_t mapTask = NULL; + vm_map_t vmmap = NULL; + mach_vm_address_t highestMask = 0; + IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; if (!capacity) return false; - _options = options; - _capacity = capacity; - _physAddrs = 0; - _physSegCount = 0; - _buffer = 0; + _options = options; + _capacity = capacity; + _internalFlags = 0; + _internalReserved = 0; + _buffer = 0; - // Grab the direction and the Auto Prepare bits from the Buffer MD options - iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare); + _ranges.v64 = IONew(IOAddressRange, 1); + if (!_ranges.v64) + return (false); + _ranges.v64->address = 0; + _ranges.v64->length = 0; - if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) - alignment = page_size; - - if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) - return false; + // Grab IOMD bits from the Buffer MD options + iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); if (physicalMask && (alignment <= 1)) - alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1; + { + alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); + highestMask = (physicalMask | alignment); + alignment++; + if (alignment < page_size) + alignment = page_size; + } + + if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) + alignment = page_size; - if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask) - physicalMask = 0xFFFFFFFF; + if (alignment >= page_size) + capacity = round_page(capacity); + + if (alignment > page_size) + options |= kIOMemoryPhysicallyContiguous; _alignment = alignment; - if (options & kIOMemoryPageable) + + if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) + return false; + + // set flags for entry + object create + vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; + + // set memory entry cache mode + switch (options & kIOMapCacheMask) { - iomdOptions |= kIOMemoryBufferPageable; + case kIOMapInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); + break; + + case kIOMapWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); + break; + + case kIOMapWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); + break; + + case kIOMapCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); + break; + + case kIOMapDefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); + break; + } - ipc_port_t sharedMem; - vm_size_t size = round_page_32(capacity); + if (options & kIOMemoryPageable) + { + iomdOptions |= kIOMemoryBufferPageable; - // must create the entry before any pages are allocated + // must create the entry before any pages are allocated // set flags for entry + object create - vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE - | MAP_MEM_NAMED_CREATE; + memEntryCacheMode |= MAP_MEM_NAMED_CREATE; - if (options & kIOMemoryPurgeable) - memEntryCacheMode |= MAP_MEM_PURGABLE; + if (options & kIOMemoryPurgeable) + memEntryCacheMode |= MAP_MEM_PURGABLE; + } + else + { + memEntryCacheMode |= MAP_MEM_NAMED_REUSE; + vmmap = kernel_map; - // set memory entry cache mode - switch (options & kIOMapCacheMask) + // Buffer shouldn't auto prepare they should be prepared explicitly + // But it never was enforced so what are you going to do? + iomdOptions |= kIOMemoryAutoPrepare; + + /* Allocate a wired-down buffer inside kernel space. */ + + if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size)) { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; + _internalFlags |= kInternalFlagPhysical; + if (highestMask) + { + _internalFlags |= kInternalFlagPageSized; + capacity = round_page(capacity); + } + _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment, + (0 != (options & kIOMemoryPhysicallyContiguous))); + } + else if (alignment > 1) + { + _buffer = IOMallocAligned(capacity, alignment); + } + else + { + _buffer = IOMalloc(capacity); } - kr = mach_make_memory_entry( vmmap, - &size, 0, - memEntryCacheMode, &sharedMem, - NULL ); + if (!_buffer) + { + return false; + } + } - if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) { + if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { + ipc_port_t sharedMem; + vm_size_t size = round_page(capacity); + + kr = mach_make_memory_entry(vmmap, + &size, (vm_offset_t)_buffer, + memEntryCacheMode, &sharedMem, + NULL ); + + if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { ipc_port_release_send( sharedMem ); kr = kIOReturnVMError; } @@ -197,139 +220,61 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask( return( false ); _memEntry = (void *) sharedMem; + + if( options & kIOMemoryPageable) { #if IOALLOCDEBUG - debug_iomallocpageable_size += size; + debug_iomallocpageable_size += size; #endif - if (NULL == inTask) - inTask = kernel_task; - else if (inTask == kernel_task) - { - vmmap = kernel_map; + mapTask = inTask; + if (NULL == inTask) + inTask = kernel_task; } - else + else if (options & kIOMapCacheMask) { - if( !reserved) { - reserved = IONew( ExpansionData, 1 ); - if( !reserved) - return( false ); - } - vmmap = get_task_map(inTask); - vm_map_reference(vmmap); - reserved->map = vmmap; - } - } - else - { - if (IOMapper::gSystem) - // assuming mapped space is 2G - lastIOAddr = (1UL << 31) - PAGE_SIZE; - else - lastIOAddr = ptoa_64(gIOHighestAllocatedPage); - - if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask))) - { - mach_vm_address_t address; - iomdOptions &= ~kIOMemoryTypeVirtual; - iomdOptions |= kIOMemoryTypePhysical; - - address = IOMallocPhysical(capacity, physicalMask); - _buffer = (void *) address; - if (!_buffer) - return false; + // Prefetch each page to put entries into the pmap + volatile UInt8 * startAddr = (UInt8 *)_buffer; + volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; - if (inTask == kernel_task) + while (startAddr < endAddr) { - vmmap = kernel_map; + *startAddr; + startAddr += page_size; } - else if (NULL != inTask) - { - if( !reserved) { - reserved = IONew( ExpansionData, 1 ); - if( !reserved) - return( false ); - } - vmmap = get_task_map(inTask); - vm_map_reference(vmmap); - reserved->map = vmmap; - } - inTask = 0; - } - else - { - // Buffer shouldn't auto prepare they should be prepared explicitly - // But it never was enforced so what are you going to do? - iomdOptions |= kIOMemoryAutoPrepare; - - /* Allocate a wired-down buffer inside kernel space. */ - if (options & kIOMemoryPhysicallyContiguous) - _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment); - else if (alignment > 1) - _buffer = IOMallocAligned(capacity, alignment); - else - _buffer = IOMalloc(capacity); - if (!_buffer) - return false; } } - _singleRange.v.address = (vm_address_t) _buffer; - _singleRange.v.length = capacity; + _ranges.v64->address = (mach_vm_address_t) _buffer;; + _ranges.v64->length = _capacity; - if (!super::initWithOptions(&_singleRange.v, 1, 0, - inTask, iomdOptions, /* System mapper */ 0)) + if (!super::initWithOptions(_ranges.v64, 1, 0, + inTask, iomdOptions, /* System mapper */ 0)) return false; - if (physicalMask && !IOMapper::gSystem) + if (mapTask) { - IOMDDMACharacteristics mdSummary; - - bzero(&mdSummary, sizeof(mdSummary)); - IOReturn rtn = dmaCommandOperation( - kIOMDGetCharacteristics, - &mdSummary, sizeof(mdSummary)); - if (rtn) - return false; - - if (mdSummary.fHighestPage) - { - ppnum_t highest; - while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage)) - { - if (OSCompareAndSwap(highest, mdSummary.fHighestPage, - (UInt32 *) &gIOHighestAllocatedPage)) - break; - } - lastIOAddr = ptoa_64(mdSummary.fHighestPage); + if (!reserved) { + reserved = IONew( ExpansionData, 1 ); + if( !reserved) + return( false ); } - else - lastIOAddr = ptoa_64(gIOLastPage); - - if (lastIOAddr != (lastIOAddr & physicalMask)) - { - if (kIOMemoryTypePhysical != (_flags & kIOMemoryTypeMask)) - { - // flag a retry - _physSegCount = 1; - } - return false; - } - } - - if (vmmap) - { - kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, capacity); - if (KERN_SUCCESS != kr) + reserved->map = createMappingInTask(mapTask, 0, + kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); + if (!reserved->map) { _buffer = 0; return( false ); } - - if (kIOMemoryTypeVirtual & iomdOptions) - _singleRange.v.address = (vm_address_t) _buffer; + release(); // map took a retain on this + reserved->map->retain(); + removeMapping(reserved->map); + mach_vm_address_t buffer = reserved->map->getAddress(); + _buffer = (void *) buffer; + if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) + _ranges.v64->address = buffer; } - setLength(capacity); - + setLength(_capacity); + return true; } @@ -341,19 +286,9 @@ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( { IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - if (me && !me->initWithOptions(options, capacity, alignment, inTask)) { - bool retry = me->_physSegCount; + if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { me->release(); me = 0; - if (retry) - { - me = new IOBufferMemoryDescriptor; - if (me && !me->initWithOptions(options, capacity, alignment, inTask)) - { - me->release(); - me = 0; - } - } } return me; } @@ -368,36 +303,34 @@ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { - bool retry = me->_physSegCount; me->release(); me = 0; - if (retry) - { - me = new IOBufferMemoryDescriptor; - if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) - { - me->release(); - me = 0; - } - } } return me; } +#ifndef __LP64__ bool IOBufferMemoryDescriptor::initWithOptions( IOOptionBits options, vm_size_t capacity, vm_offset_t alignment) { - return( initWithOptions(options, capacity, alignment, kernel_task) ); + return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); } +#endif /* !__LP64__ */ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( IOOptionBits options, vm_size_t capacity, vm_offset_t alignment) { - return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment)); + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { + me->release(); + me = 0; + } + return me; } @@ -418,6 +351,7 @@ IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, inCapacity, inContiguous ? inCapacity : 1 )); } +#ifndef __LP64__ /* * initWithBytes: * @@ -429,10 +363,9 @@ bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, IODirection inDirection, bool inContiguous) { - if (!initWithOptions( - inDirection | kIOMemoryUnshared - | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), - inLength, inLength )) + if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inLength, inLength, (mach_vm_address_t)0)) return false; // start out with no data @@ -443,6 +376,7 @@ bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, return true; } +#endif /* !__LP64__ */ /* * withBytes: @@ -458,21 +392,25 @@ IOBufferMemoryDescriptor::withBytes(const void * inBytes, { IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)) + if (me && !me->initWithPhysicalMask( + kernel_task, inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inLength, inLength, 0 )) { - bool retry = me->_physSegCount; me->release(); me = 0; - if (retry) + } + + if (me) + { + // start out with no data + me->setLength(0); + + if (!me->appendBytes(inBytes, inLength)) { - me = new IOBufferMemoryDescriptor; - if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)) - { - me->release(); - me = 0; - } + me->release(); + me = 0; } - } return me; } @@ -486,18 +424,24 @@ void IOBufferMemoryDescriptor::free() { // Cache all of the relevant information on the stack for use // after we call super::free()! - IOOptionBits flags = _flags; + IOOptionBits flags = _flags; + IOOptionBits internalFlags = _internalFlags; IOOptionBits options = _options; vm_size_t size = _capacity; void * buffer = _buffer; - IOVirtualAddress source = _singleRange.v.address; - vm_map_t vmmap = 0; + IOMemoryMap * map = 0; + IOAddressRange * range = _ranges.v64; vm_offset_t alignment = _alignment; + if (alignment >= page_size) + size = round_page(size); + if (reserved) { - vmmap = reserved->map; + map = reserved->map; IODelete( reserved, ExpansionData, 1 ); + if (map) + map->release(); } /* super::free may unwire - deallocate buffer afterwards */ @@ -506,34 +450,24 @@ void IOBufferMemoryDescriptor::free() if (options & kIOMemoryPageable) { #if IOALLOCDEBUG - if (!buffer || vmmap) - debug_iomallocpageable_size -= round_page_32(size); + debug_iomallocpageable_size -= round_page(size); #endif - if (buffer) - { - if (vmmap) - vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size)); - else - IOFreePageable(buffer, size); - } } else if (buffer) { - if (kIOMemoryTypePhysical == (flags & kIOMemoryTypeMask)) - { - if (vmmap) - vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size)); - IOFreePhysical((mach_vm_address_t) source, size); - } - else if (options & kIOMemoryPhysicallyContiguous) - IOKernelFreeContiguous((mach_vm_address_t) buffer, size); + if (internalFlags & kInternalFlagPhysical) + { + if (kInternalFlagPageSized & internalFlags) + size = round_page(size); + IOKernelFreePhysical((mach_vm_address_t) buffer, size); + } else if (alignment > 1) IOFreeAligned(buffer, size); else IOFree(buffer, size); } - if (vmmap) - vm_map_deallocate(vmmap); + if (range && (kIOMemoryAsReference & flags)) + IODelete(range, IOAddressRange, 1); } /* @@ -561,7 +495,7 @@ void IOBufferMemoryDescriptor::setLength(vm_size_t length) assert(length <= _capacity); _length = length; - _singleRange.v.length = length; + _ranges.v64->length = length; } /* @@ -573,7 +507,10 @@ void IOBufferMemoryDescriptor::setLength(vm_size_t length) */ void IOBufferMemoryDescriptor::setDirection(IODirection direction) { - _direction = direction; + _flags = (_flags & ~kIOMemoryDirectionMask) | direction; +#ifndef __LP64__ + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); +#endif /* !__LP64__ */ } /* @@ -593,10 +530,10 @@ IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) offset = _length; _length += actualBytesToCopy; - _singleRange.v.length += actualBytesToCopy; + _ranges.v64->length += actualBytesToCopy; if (_task == kernel_task) - bcopy(/* from */ bytes, (void *)(_singleRange.v.address + offset), + bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), actualBytesToCopy); else writeBytes(offset, bytes, actualBytesToCopy); @@ -611,10 +548,10 @@ IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) */ void * IOBufferMemoryDescriptor::getBytesNoCopy() { - if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)) + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) return _buffer; else - return (void *)_singleRange.v.address; + return (void *)_ranges.v64->address; } @@ -627,18 +564,19 @@ void * IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) { IOVirtualAddress address; - if (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)) + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) address = (IOVirtualAddress) _buffer; else - address = _singleRange.v.address; + address = _ranges.v64->address; if (start < _length && (start + withLength) <= _length) return (void *)(address + start); return 0; } -/* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, -/* DEPRECATED */ IOByteCount * lengthOfSegment) +#ifndef __LP64__ +void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) { void * bytes = getBytesNoCopy(offset, 0); @@ -647,9 +585,15 @@ IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) return bytes; } +#endif /* !__LP64__ */ +#ifdef __LP64__ +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); +#else /* !__LP64__ */ OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); +#endif /* !__LP64__ */ OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);