X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e3027f41d0120b4278cca462f397b6619dcd9ac5..21362eb3e66fd2c787aee132bce100a44d71a99c:/iokit/Kernel/IOMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index dc0ad9b50..f8927e9c8 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -1,23 +1,29 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. @@ -25,44 +31,283 @@ * HISTORY * */ +// 45678901234567890123456789012345678901234567890123456789012345678901234567890 +#include #include #include #include #include +#include +#include #include +#include "IOKitKernelInternal.h" + #include -#include +#include +#include +#include +#include + +#include __BEGIN_DECLS #include -void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, - vm_prot_t prot, boolean_t wired); +#include +#include +#include +#include + +#ifndef i386 +#include +#include +struct phys_entry *pmap_find_physentry(ppnum_t pa); +#endif + +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); void ipc_port_release_send(ipc_port_t port); -vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset); + +/* Copy between a physical page and a virtual address in the given vm_map */ +kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); + +memory_object_t +device_pager_setup( + memory_object_t pager, + int device_handle, + vm_size_t size, + int flags); +void +device_pager_deallocate( + memory_object_t); +kern_return_t +device_pager_populate_object( + memory_object_t pager, + vm_object_offset_t offset, + ppnum_t phys_addr, + vm_size_t size); +kern_return_t +memory_object_iopl_request( + ipc_port_t port, + memory_object_offset_t offset, + vm_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags); + +unsigned int IOTranslateCacheBits(struct phys_entry *pp); + __END_DECLS +#define kIOMaximumMappedIOByteCount (512*1024*1024) + +static IOMapper * gIOSystemMapper; +static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSDefineMetaClass( IOMemoryDescriptor, OSObject ) -OSDefineAbstractStructors( IOMemoryDescriptor, OSObject ) +OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) #define super IOMemoryDescriptor OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) -extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IORecursiveLock * gIOMemoryLock; + +#define LOCK IORecursiveLockLock( gIOMemoryLock) +#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) +#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) +#define WAKEUP \ + IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class _IOMemoryMap : public IOMemoryMap +{ + OSDeclareDefaultStructors(_IOMemoryMap) +public: + IOMemoryDescriptor * memory; + IOMemoryMap * superMap; + IOByteCount offset; + IOByteCount length; + IOVirtualAddress logical; + task_t addressTask; + vm_map_t addressMap; + IOOptionBits options; + upl_t redirUPL; + ipc_port_t redirEntry; + IOMemoryDescriptor * owner; + +protected: + virtual void taggedRelease(const void *tag = 0) const; + virtual void free(); + +public: + + // IOMemoryMap methods + virtual IOVirtualAddress getVirtualAddress(); + virtual IOByteCount getLength(); + virtual task_t getAddressTask(); + virtual IOMemoryDescriptor * getMemoryDescriptor(); + virtual IOOptionBits getMapOptions(); + + virtual IOReturn unmap(); + virtual void taskDied(); + + virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + IOByteCount offset = 0); + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); + + // for IOMemoryDescriptor use + _IOMemoryMap * copyCompatible( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); + + bool initCompatible( + IOMemoryDescriptor * memory, + IOMemoryMap * superMap, + IOByteCount offset, + IOByteCount length ); + + bool initWithDescriptor( + IOMemoryDescriptor * memory, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); + + IOReturn redirect( + task_t intoTask, bool redirect ); +}; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address ) +// Some data structures and accessor macros used by the initWithOptions +// Function + +enum ioPLBlockFlags { + kIOPLOnDevice = 0x00000001, + kIOPLExternUPL = 0x00000002, +}; + +struct typePersMDData { - if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags)) - return( IOPageableMapForAddress( address ) ); + const IOGeneralMemoryDescriptor *fMD; + ipc_port_t fMemEntry; +}; + +struct ioPLBlock { + upl_t fIOPL; + vm_address_t fIOMDOffset; // The offset of this iopl in descriptor + vm_offset_t fPageInfo; // Pointer to page list or index into it + ppnum_t fMappedBase; // Page number of first page in this iopl + unsigned int fPageOffset; // Offset within first page of iopl + unsigned int fFlags; // Flags +}; + +struct ioGMDData { + IOMapper *fMapper; + unsigned int fPageCnt; + upl_page_info_t fPageList[]; + ioPLBlock fBlocks[]; +}; + +#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) +#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) +#define getNumIOPL(osd, d) \ + (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) +#define getPageList(d) (&(d->fPageList[0])) +#define computeDataSize(p, u) \ + (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) + + +extern "C" { + +kern_return_t device_data_action( + int device_handle, + ipc_port_t device_pager, + vm_prot_t protection, + vm_object_offset_t offset, + vm_size_t size) +{ + struct ExpansionData { + void * devicePager; + unsigned int pagerContig:1; + unsigned int unused:31; + IOMemoryDescriptor * memory; + }; + kern_return_t kr; + ExpansionData * ref = (ExpansionData *) device_handle; + IOMemoryDescriptor * memDesc; + + LOCK; + memDesc = ref->memory; + if( memDesc) + { + memDesc->retain(); + kr = memDesc->handleFault( device_pager, 0, 0, + offset, size, kIOMapDefaultCache /*?*/); + memDesc->release(); + } else - return( get_task_map( task )); + kr = KERN_ABORTED; + UNLOCK; + + return( kr ); +} + +kern_return_t device_close( + int device_handle) +{ + struct ExpansionData { + void * devicePager; + unsigned int pagerContig:1; + unsigned int unused:31; + IOMemoryDescriptor * memory; + }; + ExpansionData * ref = (ExpansionData *) device_handle; + + IODelete( ref, ExpansionData, 1 ); + + return( kIOReturnSuccess ); +} +}; // end extern "C" + +// Note this inline function uses C++ reference arguments to return values +// This means that pointers are not passed and NULLs don't have to be +// checked for as a NULL reference is illegal. +static inline void +getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables + UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) +{ + assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type + || kIOMemoryTypeVirtual == type); + if (kIOMemoryTypeUIO == type) { + user_size_t us; + uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us; + } + else { + IOVirtualRange cur = r.v[ind]; + addr = cur.address; + len = cur.length; + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -76,13 +321,23 @@ inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_addres */ IOMemoryDescriptor * IOMemoryDescriptor::withAddress(void * address, - IOByteCount withLength, - IODirection withDirection) + IOByteCount length, + IODirection direction) +{ + return IOMemoryDescriptor:: + withAddress((vm_address_t) address, length, direction, kernel_task); +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(vm_address_t address, + IOByteCount length, + IODirection direction, + task_t task) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithAddress(address, withLength, withDirection)) + if (that->initWithAddress(address, length, direction, task)) return that; that->release(); @@ -91,15 +346,32 @@ IOMemoryDescriptor::withAddress(void * address, } IOMemoryDescriptor * -IOMemoryDescriptor::withAddress(vm_address_t address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) +IOMemoryDescriptor::withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount length, + IODirection direction ) +{ + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; + if (self + && !self->initWithPhysicalAddress(address, length, direction)) { + self->release(); + return 0; + } + + return self; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection direction, + task_t task, + bool asReference) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithAddress(address, withLength, withDirection, withTask)) + if (that->initWithRanges(ranges, withCount, direction, task, asReference)) return that; that->release(); @@ -107,16 +379,6 @@ IOMemoryDescriptor::withAddress(vm_address_t address, return 0; } -IOMemoryDescriptor * -IOMemoryDescriptor::withPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) -{ - return( IOMemoryDescriptor::withAddress( address, withLength, - withDirection, (task_t) 0 )); -} - /* * withRanges: @@ -127,33 +389,48 @@ IOMemoryDescriptor::withPhysicalAddress( * Passing the ranges as a reference will avoid an extra allocation. */ IOMemoryDescriptor * -IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) +IOMemoryDescriptor::withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits opts, + IOMapper * mapper) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference)) - return that; + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; - that->release(); + if (self + && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) + { + self->release(); + return 0; } + + return self; +} + +// Can't leave abstract but this should never be used directly, +bool IOMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + // @@@ gvdl: Should I panic? + panic("IOMD::initWithOptions called\n"); return 0; } IOMemoryDescriptor * IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, - IODirection withDirection, - bool asReference = false) + IODirection direction, + bool asReference) { IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { - if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference)) + if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) return that; that->release(); @@ -165,15 +442,96 @@ IOMemoryDescriptor * IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount offset, IOByteCount length, - IODirection withDirection) + IODirection direction) { - IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor; + IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; - if (that && !that->initSubRange(of, offset, length, withDirection)) { - that->release(); - that = 0; + if (self && !self->initSubRange(of, offset, length, direction)) { + self->release(); + self = 0; + } + return self; +} + +IOMemoryDescriptor * IOMemoryDescriptor:: + withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) +{ + IOGeneralMemoryDescriptor *origGenMD = + OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); + + if (origGenMD) + return IOGeneralMemoryDescriptor:: + withPersistentMemoryDescriptor(origGenMD); + else + return 0; +} + +IOMemoryDescriptor * IOGeneralMemoryDescriptor:: + withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) +{ + ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry(); + + if (!sharedMem) + return 0; + + if (sharedMem == originalMD->_memEntry) { + originalMD->retain(); // Add a new reference to ourselves + ipc_port_release_send(sharedMem); // Remove extra send right + return originalMD; + } + + IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; + typePersMDData initData = { originalMD, sharedMem }; + + if (self + && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { + self->release(); + self = 0; + } + return self; +} + +void *IOGeneralMemoryDescriptor::createNamedEntry() +{ + kern_return_t error; + ipc_port_t sharedMem; + + IOOptionBits type = _flags & kIOMemoryTypeMask; + + user_addr_t range0Addr; + IOByteCount range0Len; + getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); + range0Addr = trunc_page_64(range0Addr); + + vm_size_t size = ptoa_32(_pages); + vm_address_t kernelPage = (vm_address_t) range0Addr; + + vm_map_t theMap = ((_task == kernel_task) + && (kIOMemoryBufferPageable & _flags)) + ? IOPageableMapForAddress(kernelPage) + : get_task_map(_task); + + memory_object_size_t actualSize = size; + vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE; + if (_memEntry) + prot |= MAP_MEM_NAMED_REUSE; + + error = mach_make_memory_entry_64(theMap, + &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry); + + if (KERN_SUCCESS == error) { + if (actualSize == size) { + return sharedMem; + } else { +#if IOASSERT + IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", + (UInt64)range0Addr, (UInt32)actualSize, size); +#endif + ipc_port_release_send( sharedMem ); + } } - return that; + + return MACH_PORT_NULL; } /* @@ -223,29 +581,120 @@ IOGeneralMemoryDescriptor::initWithPhysicalAddress( return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); } +bool +IOGeneralMemoryDescriptor::initWithPhysicalRanges( + IOPhysicalRange * ranges, + UInt32 count, + IODirection direction, + bool reference) +{ + IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); +} + +bool +IOGeneralMemoryDescriptor::initWithRanges( + IOVirtualRange * ranges, + UInt32 count, + IODirection direction, + task_t task, + bool reference) +{ + IOOptionBits mdOpts = direction; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + if (task) { + mdOpts |= kIOMemoryTypeVirtual; + + // Auto-prepare if this is a kernel memory descriptor as very few + // clients bother to prepare() kernel memory. + // But it was not enforced so what are you going to do? + if (task == kernel_task) + mdOpts |= kIOMemoryAutoPrepare; + } + else + mdOpts |= kIOMemoryTypePhysical; + + return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); +} + /* - * initWithRanges: + * initWithOptions: * - * Initialize an IOMemoryDescriptor. The buffer is made up of several - * virtual address ranges, from a given task + * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, + * from a given task, several physical ranges, an UPL from the ubc + * system or a uio (may be 64bit) from the BSD subsystem. * * Passing the ranges as a reference will avoid an extra allocation. * - * An IOMemoryDescriptor can be re-used by calling initWithAddress or - * initWithRanges again on an existing instance -- note this behavior - * is not commonly supported in other I/O Kit classes, although it is - * supported here. + * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an + * existing instance -- note this behavior is not commonly supported in other + * I/O Kit classes, although it is supported here. */ + bool -IOGeneralMemoryDescriptor::initWithRanges( - IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) -{ - assert(ranges); - assert(withCount); +IOGeneralMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + IOOptionBits type = options & kIOMemoryTypeMask; + + // Grab the original MD's configuation data to initialse the + // arguments to this function. + if (kIOMemoryTypePersistentMD == type) { + + typePersMDData *initData = (typePersMDData *) buffers; + const IOGeneralMemoryDescriptor *orig = initData->fMD; + ioGMDData *dataP = getDataP(orig->_memoryEntries); + + // Only accept persistent memory descriptors with valid dataP data. + assert(orig->_rangesCount == 1); + if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) + return false; + + _memEntry = initData->fMemEntry; // Grab the new named entry + options = orig->_flags | kIOMemoryAsReference; + _singleRange = orig->_singleRange; // Initialise our range + buffers = &_singleRange; + count = 1; + + // Now grab the original task and whatever mapper was previously used + task = orig->_task; + mapper = dataP->fMapper; + + // We are ready to go through the original initialisation now + } + + switch (type) { + case kIOMemoryTypeUIO: + case kIOMemoryTypeVirtual: + assert(task); + if (!task) + return false; + else + break; + + case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task + mapper = kIOMapperNone; + + case kIOMemoryTypeUPL: + assert(!task); + break; + default: + return false; /* bad argument */ + } + + assert(buffers); + assert(count); /* * We can check the _initialized instance variable before having ever set @@ -253,77 +702,170 @@ IOGeneralMemoryDescriptor::initWithRanges( * variables are zeroed on an object's allocation. */ - if (_initialized == false) - { - if (super::init() == false) return false; - _initialized = true; - } - else - { + if (_initialized) { /* * An existing memory descriptor is being retargeted to point to * somewhere else. Clean up our present state. */ - assert(_wireCount == 0); - while (_wireCount) complete(); if (_kernPtrAligned) unmapFromKernel(); if (_ranges.v && _rangesIsAllocated) IODelete(_ranges.v, IOVirtualRange, _rangesCount); + if (_memEntry) + { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; } + } + else { + if (!super::init()) + return false; + _initialized = true; } - /* - * Initialize the memory descriptor. - */ + // Grab the appropriate mapper + if (mapper == kIOMapperNone) + mapper = 0; // No Mapper + else if (!mapper) { + IOMapper::checkForSystemMapper(); + gIOSystemMapper = mapper = IOMapper::gSystem; + } + + // Remove the dynamic internal use flags from the initial setting + options &= ~(kIOMemoryPreparedReadOnly); + _flags = options; + _task = task; - _ranges.v = 0; - _rangesCount = withCount; - _rangesIsAllocated = asReference ? false : true; - _direction = withDirection; - _length = 0; - _task = withTask; + // DEPRECATED variable initialisation + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); _position = 0; - _positionAtIndex = 0; - _positionAtOffset = 0; _kernPtrAligned = 0; _cachedPhysicalAddress = 0; _cachedVirtualAddress = 0; - _flags = 0; - if (withTask && (withTask != kernel_task)) - _flags |= kIOMemoryRequiresWire; + if (kIOMemoryTypeUPL == type) { - if (asReference) - _ranges.v = ranges; - else - { - _ranges.v = IONew(IOVirtualRange, withCount); - if (_ranges.v == 0) return false; - bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange)); - } + ioGMDData *dataP; + unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); - for (unsigned index = 0; index < _rangesCount; index++) - { - _length += _ranges.v[index].length; + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + dataP = getDataP(_memoryEntries); + dataP->fMapper = mapper; + dataP->fPageCnt = 0; + + _wireCount++; // UPLs start out life wired + + _length = count; + _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); + + ioPLBlock iopl; + upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); + + iopl.fIOPL = (upl_t) buffers; + // Set the flag kIOPLOnDevice convieniently equal to 1 + iopl.fFlags = pageList->device | kIOPLExternUPL; + iopl.fIOMDOffset = 0; + if (!pageList->device) { + // Pre-compute the offset into the UPL's page list + pageList = &pageList[atop_32(offset)]; + offset &= PAGE_MASK; + if (mapper) { + iopl.fMappedBase = mapper->iovmAlloc(_pages); + mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); + } + else + iopl.fMappedBase = 0; + } + else + iopl.fMappedBase = 0; + iopl.fPageInfo = (vm_address_t) pageList; + iopl.fPageOffset = offset; + + _memoryEntries->appendBytes(&iopl, sizeof(iopl)); + } + else { + // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical + + // Initialize the memory descriptor + if (options & kIOMemoryAsReference) { + _rangesIsAllocated = false; + + // Hack assignment to get the buffer arg into _ranges. + // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't + // work, C++ sigh. + // This also initialises the uio & physical ranges. + _ranges.v = (IOVirtualRange *) buffers; + } + else { + assert(kIOMemoryTypeUIO != type); + + _rangesIsAllocated = true; + _ranges.v = IONew(IOVirtualRange, count); + if (!_ranges.v) + return false; + bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); + } + + // Find starting address within the vector of ranges + Ranges vec = _ranges; + UInt32 length = 0; + UInt32 pages = 0; + for (unsigned ind = 0; ind < count; ind++) { + user_addr_t addr; + UInt32 len; + + // addr & len are returned by this function + getAddrLenForInd(addr, len, type, vec, ind); + pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr)); + len += length; + assert(len > length); // Check for 32 bit wrap around + length = len; + } + _length = length; + _pages = pages; + _rangesCount = count; + + // Auto-prepare memory at creation time. + // Implied completion when descriptor is free-ed + if (kIOMemoryTypePhysical == type) + _wireCount++; // Physical MDs are, by definition, wired + else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */ + ioGMDData *dataP; + unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); + + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + dataP = getDataP(_memoryEntries); + dataP->fMapper = mapper; + dataP->fPageCnt = _pages; + + if ( (kIOMemoryPersistent & _flags) && !_memEntry) + _memEntry = createNamedEntry(); + + if ((_flags & kIOMemoryAutoPrepare) + && prepare() != kIOReturnSuccess) + return false; + } } return true; } -bool -IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, - UInt32 withCount, - IODirection withDirection, - bool asReference = false) -{ -#warning assuming virtual, physical addresses same size - return( initWithRanges( (IOVirtualRange *) ranges, - withCount, withDirection, (task_t) 0, asReference )); -} - /* * free * @@ -331,107 +873,51 @@ IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, */ void IOGeneralMemoryDescriptor::free() { + LOCK; + if( reserved) + reserved->memory = 0; + UNLOCK; + while (_wireCount) complete(); + if (_memoryEntries) + _memoryEntries->release(); + if (_kernPtrAligned) unmapFromKernel(); if (_ranges.v && _rangesIsAllocated) IODelete(_ranges.v, IOVirtualRange, _rangesCount); - if( _memEntry) + + if (reserved && reserved->devicePager) + device_pager_deallocate( (memory_object_t) reserved->devicePager ); + + // memEntry holds a ref on the device pager which owns reserved + // (ExpansionData) so no reserved access after this point + if (_memEntry) ipc_port_release_send( (ipc_port_t) _memEntry ); + super::free(); } -void IOGeneralMemoryDescriptor::unmapFromKernel() -{ - kern_return_t krtn; - vm_offset_t off; - // Pull the shared pages out of the task map - // Do we need to unwire it first? - for ( off = 0; off < _kernSize; off += page_size ) - { - pmap_change_wiring( - kernel_pmap, - _kernPtrAligned + off, - FALSE); - - pmap_remove( - kernel_pmap, - _kernPtrAligned + off, - _kernPtrAligned + off + page_size); - } - // Free the former shmem area in the task - krtn = vm_deallocate(kernel_map, - _kernPtrAligned, - _kernSize ); - assert(krtn == KERN_SUCCESS); - _kernPtrAligned = 0; -} +/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() +/* DEPRECATED */ { + panic("IOGMD::unmapFromKernel deprecated"); +/* DEPRECATED */ } +/* DEPRECATED */ +/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +/* DEPRECATED */ { + panic("IOGMD::mapIntoKernel deprecated"); +/* DEPRECATED */ } -void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +/* + * getDirection: + * + * Get the direction of the transfer. + */ +IODirection IOMemoryDescriptor::getDirection() const { - kern_return_t krtn; - vm_offset_t off; - - if (_kernPtrAligned) - { - if (_kernPtrAtIndex == rangeIndex) return; - unmapFromKernel(); - assert(_kernPtrAligned == 0); - } - - vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); - - _kernSize = trunc_page(_ranges.v[rangeIndex].address + - _ranges.v[rangeIndex].length + - page_size - 1) - srcAlign; - - /* Find some memory of the same size in kernel task. We use vm_allocate() - to do this. vm_allocate inserts the found memory object in the - target task's map as a side effect. */ - krtn = vm_allocate( kernel_map, - &_kernPtrAligned, - _kernSize, - VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit - assert(krtn == KERN_SUCCESS); - if(krtn) return; - - /* For each page in the area allocated from the kernel map, - find the physical address of the page. - Enter the page in the target task's pmap, at the - appropriate target task virtual address. */ - for ( off = 0; off < _kernSize; off += page_size ) - { - vm_offset_t kern_phys_addr, phys_addr; - if( _task) - phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off ); - else - phys_addr = srcAlign + off; - assert(phys_addr); - if(phys_addr == 0) return; - - // Check original state. - kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off ); - // Set virtual page to point to the right physical one - pmap_enter( - kernel_pmap, - _kernPtrAligned + off, - phys_addr, - VM_PROT_READ|VM_PROT_WRITE, - TRUE); - } - _kernPtrAtIndex = rangeIndex; -} - -/* - * getDirection: - * - * Get the direction of the transfer. - */ -IODirection IOMemoryDescriptor::getDirection() const -{ - return _direction; -} + return _direction; +} /* * getLength: @@ -443,8 +929,7 @@ IOByteCount IOMemoryDescriptor::getLength() const return _length; } -void IOMemoryDescriptor::setTag( - IOOptionBits tag ) +void IOMemoryDescriptor::setTag( IOOptionBits tag ) { _tag = tag; } @@ -454,459 +939,736 @@ IOOptionBits IOMemoryDescriptor::getTag( void ) return( _tag); } -/* - * setPosition - * - * Set the logical start position inside the client buffer. - * - * It is convention that the position reflect the actual byte count that - * is successfully transferred into or out of the buffer, before the I/O - * request is "completed" (ie. sent back to its originator). - */ - -void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +// @@@ gvdl: who is using this API? Seems like a wierd thing to implement. +IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, + IOByteCount * length ) { - assert(position <= _length); + IOPhysicalAddress physAddr = 0; - if (position >= _length) - { - _position = _length; - _positionAtIndex = _rangesCount; /* careful: out-of-bounds */ - _positionAtOffset = 0; - return; + if( prepare() == kIOReturnSuccess) { + physAddr = getPhysicalSegment( offset, length ); + complete(); } - if (position < _position) - { - _positionAtOffset = position; - _positionAtIndex = 0; - } - else - { - _positionAtOffset += (position - _position); + return( physAddr ); +} + +IOByteCount IOMemoryDescriptor::readBytes + (IOByteCount offset, void *bytes, IOByteCount length) +{ + addr64_t dstAddr = (addr64_t) (UInt32) bytes; + IOByteCount remaining; + + // Assert that this entire I/O is withing the available range + assert(offset < _length); + assert(offset + length <= _length); + if (offset >= _length) { +IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl + return 0; } - _position = position; - while (_positionAtOffset >= _ranges.v[_positionAtIndex].length) - { - _positionAtOffset -= _ranges.v[_positionAtIndex].length; - _positionAtIndex++; + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t srcAddr64; + IOByteCount srcLen; + + srcAddr64 = getPhysicalSegment64(offset, &srcLen); + if (!srcAddr64) + break; + + // Clip segment length to remaining + if (srcLen > remaining) + srcLen = remaining; + + copypv(srcAddr64, dstAddr, srcLen, + cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); + + dstAddr += srcLen; + offset += srcLen; + remaining -= srcLen; } -} -/* - * readBytes: - * - * Copy data from the memory descriptor's buffer into the specified buffer, - * relative to the current position. The memory descriptor's position is - * advanced based on the number of bytes copied. - */ + assert(!remaining); + + return length - remaining; +} -IOByteCount IOGeneralMemoryDescriptor::readBytes(IOByteCount offset, - void * bytes, IOByteCount withLength) +IOByteCount IOMemoryDescriptor::writeBytes + (IOByteCount offset, const void *bytes, IOByteCount length) { - IOByteCount bytesLeft; - void * segment; - IOByteCount segmentLength; + addr64_t srcAddr = (addr64_t) (UInt32) bytes; + IOByteCount remaining; - if( offset != _position) - setPosition( offset ); + // Assert that this entire I/O is withing the available range + assert(offset < _length); + assert(offset + length <= _length); - withLength = min(withLength, _length - _position); - bytesLeft = withLength; + assert( !(kIOMemoryPreparedReadOnly & _flags) ); -#if 0 - while (bytesLeft && (_position < _length)) - { - /* Compute the relative length to the end of this virtual segment. */ - segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft); + if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { +IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl + return 0; + } - /* Compute the relative address of this virtual segment. */ - segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; - if (KERN_SUCCESS != vm_map_read_user(getMapForTask(_task, segment), - /* from */ (vm_offset_t) segment, /* to */ (vm_offset_t) bytes, - /* size */ segmentLength)) - { - assert( false ); - bytesLeft = withLength; - break; - } - bytesLeft -= segmentLength; - offset += segmentLength; - setPosition(offset); - } -#else - while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength))) - { - segmentLength = min(segmentLength, bytesLeft); - bcopy(/* from */ segment, /* to */ bytes, /* size */ segmentLength); - bytesLeft -= segmentLength; - offset += segmentLength; - bytes = (void *) (((UInt32) bytes) + segmentLength); + dstAddr64 = getPhysicalSegment64(offset, &dstLen); + if (!dstAddr64) + break; + + // Clip segment length to remaining + if (dstLen > remaining) + dstLen = remaining; + + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + + srcAddr += dstLen; + offset += dstLen; + remaining -= dstLen; } -#endif - return withLength - bytesLeft; + assert(!remaining); + + return length - remaining; } -/* - * writeBytes: - * - * Copy data to the memory descriptor's buffer from the specified buffer, - * relative to the current position. The memory descriptor's position is - * advanced based on the number of bytes copied. - */ -IOByteCount IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset, - const void* bytes,IOByteCount withLength) -{ - IOByteCount bytesLeft; - void * segment; - IOByteCount segmentLength; +// osfmk/device/iokit_rpc.c +extern "C" unsigned int IODefaultCacheBits(addr64_t pa); - if( offset != _position) - setPosition( offset ); +/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +/* DEPRECATED */ { + panic("IOGMD::setPosition deprecated"); +/* DEPRECATED */ } - withLength = min(withLength, _length - _position); - bytesLeft = withLength; +IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment + (IOByteCount offset, IOByteCount *lengthOfSegment) +{ + IOPhysicalAddress address = 0; + IOPhysicalLength length = 0; -#if 0 - while (bytesLeft && (_position < _length)) +// assert(offset <= _length); + if (offset < _length) // (within bounds?) { - assert(_position <= _length); + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + unsigned int ind; - /* Compute the relative length to the end of this virtual segment. */ - segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft); + // Physical address based memory descriptor - /* Compute the relative address of this virtual segment. */ - segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); + // Find offset within descriptor and make it relative + // to the current _range. + for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ ) + offset -= _ranges.p[ind].length; + + IOPhysicalRange cur = _ranges.p[ind]; + address = cur.address + offset; + length = cur.length - offset; - if (KERN_SUCCESS != vm_map_write_user(getMapForTask(_task, segment), - /* from */ (vm_offset_t) bytes, - /* to */ (vm_offset_t) segment, - /* size */ segmentLength)) - { - assert( false ); - bytesLeft = withLength; - break; - } - bytesLeft -= segmentLength; - offset += segmentLength; - setPosition(offset); - } -#else - while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength))) - { - segmentLength = min(segmentLength, bytesLeft); - bcopy(/* from */ bytes, /* to */ segment, /* size */ segmentLength); - // Flush cache in case we're copying code around, eg. handling a code page fault - IOFlushProcessorCache(kernel_task, (vm_offset_t) segment, segmentLength ); + // see how far we can coalesce ranges + for (++ind; ind < _rangesCount; ind++) { + cur = _ranges.p[ind]; - bytesLeft -= segmentLength; - offset += segmentLength; - bytes = (void *) (((UInt32) bytes) + segmentLength); + if (address + length != cur.address) + break; + + length += cur.length; + } + + // @@@ gvdl: should be assert(address); + // but can't as NVidia GeForce creates a bogus physical mem + assert(address + || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount)); + assert(length); + } + else do { + // We need wiring & we are wired. + assert(_wireCount); + + if (!_wireCount) + { + panic("IOGMD: not wired for getPhysicalSegment()"); + continue; + } + + assert(_memoryEntries); + + ioGMDData * dataP = getDataP(_memoryEntries); + const ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP); + upl_page_info_t *pageList = getPageList(dataP); + + assert(numIOPLs > 0); + + // Scan through iopl info blocks looking for block containing offset + for (ind = 1; ind < numIOPLs; ind++) { + if (offset < ioplList[ind].fIOMDOffset) + break; + } + + // Go back to actual range as search goes past it + ioPLBlock ioplInfo = ioplList[ind - 1]; + + if (ind < numIOPLs) + length = ioplList[ind].fIOMDOffset; + else + length = _length; + length -= offset; // Remainder within iopl + + // Subtract offset till this iopl in total list + offset -= ioplInfo.fIOMDOffset; + + // This is a mapped IOPL so we just need to compute an offset + // relative to the mapped base. + if (ioplInfo.fMappedBase) { + offset += (ioplInfo.fPageOffset & PAGE_MASK); + address = ptoa_32(ioplInfo.fMappedBase) + offset; + continue; + } + + // Currently the offset is rebased into the current iopl. + // Now add the iopl 1st page offset. + offset += ioplInfo.fPageOffset; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t *) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; + + // Check for direct device non-paged memory + if ( ioplInfo.fFlags & kIOPLOnDevice ) { + address = ptoa_32(pageList->phys_addr) + offset; + continue; + } + + // Now we need compute the index into the pageList + ind = atop_32(offset); + offset &= PAGE_MASK; + + IOPhysicalAddress pageAddr = pageList[ind].phys_addr; + address = ptoa_32(pageAddr) + offset; + + // Check for the remaining data in this upl being longer than the + // remainder on the current page. This should be checked for + // contiguous pages + if (length > PAGE_SIZE - offset) { + // See if the next page is contiguous. Stop looking when we hit + // the end of this upl, which is indicated by the + // contigLength >= length. + IOByteCount contigLength = PAGE_SIZE - offset; + + // Look for contiguous segment + while (contigLength < length + && ++pageAddr == pageList[++ind].phys_addr) { + contigLength += PAGE_SIZE; + } + if (length > contigLength) + length = contigLength; + } + + assert(address); + assert(length); + + } while (0); + + if (!address) + length = 0; } -#endif - return withLength - bytesLeft; + if (lengthOfSegment) + *lengthOfSegment = length; + + return address; } -/* - * getPhysicalSegment: - * - * Get the physical address of the buffer, relative to the current position. - * If the current position is at the end of the buffer, a zero is returned. - */ -IOPhysicalAddress -IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +addr64_t IOMemoryDescriptor::getPhysicalSegment64 + (IOByteCount offset, IOByteCount *lengthOfSegment) { - vm_address_t virtualAddress; - IOByteCount virtualLength; - pmap_t virtualPMap; - IOPhysicalAddress physicalAddress; - IOPhysicalLength physicalLength; + IOPhysicalAddress phys32; + IOByteCount length; + addr64_t phys64; - if( kIOMemoryRequiresWire & _flags) - assert( _wireCount ); + phys32 = getPhysicalSegment(offset, lengthOfSegment); + if (!phys32) + return 0; - if ((0 == _task) && (1 == _rangesCount)) + if (gIOSystemMapper) { - assert(offset <= _length); - if (offset >= _length) - { - physicalAddress = 0; - physicalLength = 0; - } - else - { - physicalLength = _length - offset; - physicalAddress = offset + _ranges.v[0].address; - } - - if (lengthOfSegment) - *lengthOfSegment = physicalLength; - return physicalAddress; + IOByteCount origLen; + + phys64 = gIOSystemMapper->mapAddr(phys32); + origLen = *lengthOfSegment; + length = page_size - (phys64 & (page_size - 1)); + while ((length < origLen) + && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length))) + length += page_size; + if (length > origLen) + length = origLen; + + *lengthOfSegment = length; } + else + phys64 = (addr64_t) phys32; - if( offset != _position) - setPosition( offset ); + return phys64; +} + +IOPhysicalAddress IOGeneralMemoryDescriptor:: +getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + IOPhysicalAddress address = 0; + IOPhysicalLength length = 0; + IOOptionBits type = _flags & kIOMemoryTypeMask; - assert(_position <= _length); + assert(offset <= _length); - /* Fail gracefully if the position is at (or past) the end-of-buffer. */ - if (_position >= _length) + if ( type == kIOMemoryTypeUPL) + return super::getSourceSegment( offset, lengthOfSegment ); + else if ( offset < _length ) // (within bounds?) { - *lengthOfSegment = 0; - return 0; + unsigned rangesIndex = 0; + Ranges vec = _ranges; + user_addr_t addr; + + // Find starting address within the vector of ranges + for (;;) { + getAddrLenForInd(addr, length, type, vec, rangesIndex); + if (offset < length) + break; + offset -= length; // (make offset relative) + rangesIndex++; + } + + // Now that we have the starting range, + // lets find the last contiguous range + addr += offset; + length -= offset; + + for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { + user_addr_t newAddr; + IOPhysicalLength newLen; + + getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); + if (addr + length != newAddr) + break; + length += newLen; + } + if (addr) + address = (IOPhysicalAddress) addr; // Truncate address to 32bit + else + length = 0; } - /* Prepare to compute the largest contiguous physical length possible. */ + if ( lengthOfSegment ) *lengthOfSegment = length; - virtualAddress = _ranges.v[_positionAtIndex].address + _positionAtOffset; - virtualLength = _ranges.v[_positionAtIndex].length - _positionAtOffset; - vm_address_t virtualPage = trunc_page(virtualAddress); - if( _task) - virtualPMap = get_task_pmap(_task); - else - virtualPMap = 0; + return address; +} - physicalAddress = (virtualAddress == _cachedVirtualAddress) ? - _cachedPhysicalAddress : /* optimization */ - virtualPMap ? - pmap_extract(virtualPMap, virtualAddress) : - virtualAddress; - physicalLength = trunc_page(physicalAddress) + page_size - physicalAddress; +/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ +/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, +/* DEPRECATED */ IOByteCount * lengthOfSegment) +/* DEPRECATED */ { + if (_task == kernel_task) + return (void *) getSourceSegment(offset, lengthOfSegment); + else + panic("IOGMD::getVirtualSegment deprecated"); - if (!physicalAddress && _task) - { - physicalAddress = - vm_map_get_phys_page(get_task_map(_task), virtualPage); - physicalAddress += virtualAddress - virtualPage; - } + return 0; +/* DEPRECATED */ } +/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ - if (physicalAddress == 0) /* memory must be wired in order to proceed */ - { - assert(physicalAddress); - *lengthOfSegment = 0; - return 0; - } - /* Compute the largest contiguous physical length possible, within range. */ - IOPhysicalAddress physicalPage = trunc_page(physicalAddress); - while (physicalLength < virtualLength) +IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) +{ + IOReturn err = kIOReturnSuccess; + vm_purgable_t control; + int state; + + do { - physicalPage += page_size; - virtualPage += page_size; - _cachedVirtualAddress = virtualPage; - _cachedPhysicalAddress = virtualPMap ? - pmap_extract(virtualPMap, virtualPage) : - virtualPage; - if (!_cachedPhysicalAddress && _task) - { - _cachedPhysicalAddress = - vm_map_get_phys_page(get_task_map(_task), virtualPage); - } + if (!_memEntry) + { + err = kIOReturnNotReady; + break; + } - if (_cachedPhysicalAddress != physicalPage) break; + control = VM_PURGABLE_SET_STATE; + switch (newState) + { + case kIOMemoryPurgeableKeepCurrent: + control = VM_PURGABLE_GET_STATE; + break; + + case kIOMemoryPurgeableNonVolatile: + state = VM_PURGABLE_NONVOLATILE; + break; + case kIOMemoryPurgeableVolatile: + state = VM_PURGABLE_VOLATILE; + break; + case kIOMemoryPurgeableEmpty: + state = VM_PURGABLE_EMPTY; + break; + default: + err = kIOReturnBadArgument; + break; + } - physicalLength += page_size; - } + if (kIOReturnSuccess != err) + break; - /* Clip contiguous physical length at the end of this range. */ - if (physicalLength > virtualLength) - physicalLength = virtualLength; + err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state); - if( lengthOfSegment) - *lengthOfSegment = physicalLength; + if (oldState) + { + if (kIOReturnSuccess == err) + { + switch (state) + { + case VM_PURGABLE_NONVOLATILE: + state = kIOMemoryPurgeableNonVolatile; + break; + case VM_PURGABLE_VOLATILE: + state = kIOMemoryPurgeableVolatile; + break; + case VM_PURGABLE_EMPTY: + state = kIOMemoryPurgeableEmpty; + break; + default: + state = kIOMemoryPurgeableNonVolatile; + err = kIOReturnNotReady; + break; + } + *oldState = state; + } + } + } + while (false); - return physicalAddress; + return (err); } +extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); +extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); -/* - * getVirtualSegment: - * - * Get the virtual address of the buffer, relative to the current position. - * If the memory wasn't mapped into the caller's address space, it will be - * mapped in now. If the current position is at the end of the buffer, a - * null is returned. - */ -void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, + IOByteCount offset, IOByteCount length ) { - if( offset != _position) - setPosition( offset ); - - assert(_position <= _length); + IOByteCount remaining; + void (*func)(addr64_t pa, unsigned int count) = 0; - /* Fail gracefully if the position is at (or past) the end-of-buffer. */ - if (_position >= _length) + switch (options) { - *lengthOfSegment = 0; - return 0; + case kIOMemoryIncoherentIOFlush: + func = &dcache_incoherent_io_flush64; + break; + case kIOMemoryIncoherentIOStore: + func = &dcache_incoherent_io_store64; + break; } - /* Compute the relative length to the end of this virtual segment. */ - *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset; + if (!func) + return (kIOReturnUnsupported); - /* Compute the relative address of this virtual segment. */ - if (_task == kernel_task) - return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); - else + remaining = length = min(length, getLength() - offset); + while (remaining) + // (process another target segment?) { - vm_offset_t off; + addr64_t dstAddr64; + IOByteCount dstLen; + + dstAddr64 = getPhysicalSegment64(offset, &dstLen); + if (!dstAddr64) + break; - mapIntoKernel(_positionAtIndex); + // Clip segment length to remaining + if (dstLen > remaining) + dstLen = remaining; - off = _ranges.v[_kernPtrAtIndex].address; - off -= trunc_page(off); + (*func)(dstAddr64, dstLen); - return (void *) (_kernPtrAligned + off + _positionAtOffset); + offset += dstLen; + remaining -= dstLen; } + + return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); } -/* - * prepare - * - * Prepare the memory for an I/O transfer. This involves paging in - * the memory, if necessary, and wiring it down for the duration of - * the transfer. The complete() method completes the processing of - * the memory after the I/O transfer finishes. This method needn't - * called for non-pageable memory. - */ -IOReturn IOGeneralMemoryDescriptor::prepare( - IODirection forDirection = kIODirectionNone) +#ifdef __ppc__ +extern vm_offset_t static_memory_end; +#define io_kernel_static_end static_memory_end +#else +extern vm_offset_t first_avail; +#define io_kernel_static_end first_avail +#endif + +static kern_return_t +io_get_kernel_static_upl( + vm_map_t /* map */, + vm_address_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count) { - UInt rangeIndex = 0; + unsigned int pageCount, page; + ppnum_t phys; - if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { - kern_return_t rc; + pageCount = atop_32(*upl_size); + if (pageCount > *count) + pageCount = *count; - if(forDirection == kIODirectionNone) - forDirection = _direction; + *upl = NULL; - vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction + for (page = 0; page < pageCount; page++) + { + phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); + if (!phys) + break; + page_list[page].phys_addr = phys; + page_list[page].pageout = 0; + page_list[page].absent = 0; + page_list[page].dirty = 0; + page_list[page].precious = 0; + page_list[page].device = 0; + } - // - // Check user read/write access to the data buffer. - // + return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); +} - for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) - { - vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address); - vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length ); +IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) +{ + IOOptionBits type = _flags & kIOMemoryTypeMask; + IOReturn error = kIOReturnNoMemory; + ioGMDData *dataP; + ppnum_t mapBase = 0; + IOMapper *mapper; + ipc_port_t sharedMem = (ipc_port_t) _memEntry; - while (checkSize) - { - vm_region_basic_info_data_t regionInfo; - mach_msg_type_number_t regionInfoSize = sizeof(regionInfo); - vm_size_t regionSize; - - if ( (vm_region( - /* map */ getMapForTask(_task, checkBase), - /* address */ &checkBase, - /* size */ ®ionSize, - /* flavor */ VM_REGION_BASIC_INFO, - /* info */ (vm_region_info_t) ®ionInfo, - /* info size */ ®ionInfoSize, - /* object name */ 0 ) != KERN_SUCCESS ) || - ( (forDirection & kIODirectionIn ) && - !(regionInfo.protection & VM_PROT_WRITE) ) || - ( (forDirection & kIODirectionOut) && - !(regionInfo.protection & VM_PROT_READ ) ) ) - { - return kIOReturnVMError; - } + assert(!_wireCount); + assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type); - assert((regionSize & PAGE_MASK) == 0); + if (_pages >= gIOMaximumMappedIOPageCount) + return kIOReturnNoResources; - regionSize = min(regionSize, checkSize); - checkSize -= regionSize; - checkBase += regionSize; - } // (for each vm region) - } // (for each io range) + dataP = getDataP(_memoryEntries); + mapper = dataP->fMapper; + if (mapper && _pages) + mapBase = mapper->iovmAlloc(_pages); - for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { + // Note that appendBytes(NULL) zeros the data up to the + // desired length. + _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); + dataP = 0; // May no longer be valid so lets not get tempted. - vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + - _ranges.v[rangeIndex].length + - page_size - 1); + if (forDirection == kIODirectionNone) + forDirection = _direction; - vm_map_t taskVMMap = getMapForTask(_task, srcAlign); + int uplFlags; // This Mem Desc's default flags for upl creation + switch (forDirection) + { + case kIODirectionOut: + // Pages do not need to be marked as dirty on commit + uplFlags = UPL_COPYOUT_FROM; + _flags |= kIOMemoryPreparedReadOnly; + break; + + case kIODirectionIn: + default: + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; + } + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; - rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE); - if (KERN_SUCCESS != rc) { - IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc); - goto abortExit; + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) + curMap = 0; + else + { curMap = get_task_map(_task); } + + // Iterate over the vector of virtual ranges + Ranges vec = _ranges; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + user_addr_t startPage; + IOByteCount numBytes; + + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + iopl.fPageOffset = (short) startPage & PAGE_MASK; + numBytes += iopl.fPageOffset; + startPage = trunc_page_64(startPage); + + if (mapper) + iopl.fMappedBase = mapBase + pageIndex; + else + iopl.fMappedBase = 0; + + // Iterate over the current range, creating UPLs + while (numBytes) { + dataP = getDataP(_memoryEntries); + vm_address_t kernelStart = (vm_address_t) startPage; + vm_map_t theMap; + if (curMap) + theMap = curMap; + else if (!sharedMem) { + assert(_task == kernel_task); + theMap = IOPageableMapForAddress(kernelStart); + } + else + theMap = NULL; + + upl_page_info_array_t pageInfo = getPageList(dataP); + int ioplFlags = uplFlags; + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + vm_size_t ioplSize = round_page_32(numBytes); + unsigned int numPageInfo = atop_32(ioplSize); + + if (theMap == kernel_map && kernelStart < io_kernel_static_end) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo); + } + else if (sharedMem) { + error = memory_object_iopl_request(sharedMem, + ptoa_32(pageIndex), + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + } + else { + assert(theMap); + error = vm_map_create_upl(theMap, + startPage, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); } - // If this I/O is for a user land task then protect ourselves - // against COW and other vm_shenanigans - if (_task && _task != kernel_task) { - // setup a data object to hold the 'named' memory regions - // @@@ gvdl: If we fail to allocate an OSData we will just - // hope for the best for the time being. Lets not fail a - // prepare at this late stage in product release. - if (!_memoryEntries) - _memoryEntries = OSData::withCapacity(16); - if (_memoryEntries) { - vm_object_offset_t desiredSize = srcAlignEnd - srcAlign; - vm_object_offset_t entryStart = srcAlign; - ipc_port_t memHandle; - - do { - vm_object_offset_t actualSize = desiredSize; - - rc = mach_make_memory_entry_64 - (taskVMMap, &actualSize, entryStart, - forDirection, &memHandle, NULL); - if (KERN_SUCCESS != rc) { - IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc); - goto abortExit; - } - - _memoryEntries-> - appendBytes(&memHandle, sizeof(memHandle)); - desiredSize -= actualSize; - entryStart += actualSize; - } while (desiredSize); + assert(ioplSize); + if (error != KERN_SUCCESS) + goto abortExit; + + error = kIOReturnNoMemory; + + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + // Don't translate device memory at all + if (mapper && mapBase) { + mapper->iovmFree(mapBase, _pages); + mapBase = 0; + iopl.fMappedBase = 0; } + } + else { + iopl.fFlags = 0; + if (mapper) + mapper->iovmInsert(mapBase, pageIndex, + baseInfo, numPageInfo); + } + + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + + if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) + { + upl_commit(iopl.fIOPL, 0, 0); + upl_deallocate(iopl.fIOPL); + iopl.fIOPL = 0; } + + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) { + upl_abort(iopl.fIOPL, 0); + upl_deallocate(iopl.fIOPL); + } + goto abortExit; + } + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) + iopl.fMappedBase = mapBase + pageIndex; + } + else { + mdOffset += numBytes; + break; + } } } - _wireCount++; + return kIOReturnSuccess; abortExit: - UInt doneIndex; - - - for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) { - vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address + - _ranges.v[doneIndex].length + - page_size - 1); + { + dataP = getDataP(_memoryEntries); + UInt done = getNumIOPL(_memoryEntries, dataP); + ioPLBlock *ioplList = getIOPLList(dataP); + + for (UInt range = 0; range < done; range++) + { + if (ioplList[range].fIOPL) { + upl_abort(ioplList[range].fIOPL, 0); + upl_deallocate(ioplList[range].fIOPL); + } + } + (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() - vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, - srcAlignEnd, FALSE); + if (mapper && mapBase) + mapper->iovmFree(mapBase, _pages); } - if (_memoryEntries) { - ipc_port_t *handles, *handlesEnd; + return error; +} + +/* + * prepare + * + * Prepare the memory for an I/O transfer. This involves paging in + * the memory, if necessary, and wiring it down for the duration of + * the transfer. The complete() method completes the processing of + * the memory after the I/O transfer finishes. This method needn't + * called for non-pageable memory. + */ +IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) +{ + IOReturn error = kIOReturnSuccess; + IOOptionBits type = _flags & kIOMemoryTypeMask; - handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); - handlesEnd = (ipc_port_t *) - ((vm_address_t) handles + _memoryEntries->getLength()); - while (handles < handlesEnd) - ipc_port_release_send(*handles++); - _memoryEntries->release(); - _memoryEntries = 0; + if (!_wireCount + && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) { + error = wireVirtual(forDirection); + if (error) + return error; } - return kIOReturnVMError; + _wireCount++; + + return kIOReturnSuccess; } /* @@ -918,52 +1680,40 @@ abortExit: * before and after an I/O transfer involving pageable memory. */ -IOReturn IOGeneralMemoryDescriptor::complete( - IODirection forDirection = kIODirectionNone) +IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) { assert(_wireCount); - if(0 == _wireCount) + if (!_wireCount) return kIOReturnSuccess; _wireCount--; - if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { - UInt rangeIndex; - kern_return_t rc; - - if(forDirection == kIODirectionNone) - forDirection = _direction; - - for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { - - vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); - IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + - _ranges.v[rangeIndex].length + - page_size - 1); + if (!_wireCount) { + IOOptionBits type = _flags & kIOMemoryTypeMask; - if(forDirection == kIODirectionIn) - pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd); - - rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, - srcAlignEnd, FALSE); - if(rc != KERN_SUCCESS) - IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc); + if (kIOMemoryTypePhysical == type) { + /* kIOMemoryTypePhysical */ + // DO NOTHING } + else { + ioGMDData * dataP = getDataP(_memoryEntries); + ioPLBlock *ioplList = getIOPLList(dataP); + UInt count = getNumIOPL(_memoryEntries, dataP); + + if (dataP->fMapper && _pages && ioplList[0].fMappedBase) + dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); + + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) { + for (UInt ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) { + upl_commit(ioplList[ind].fIOPL, 0, 0); + upl_deallocate(ioplList[ind].fIOPL); + } + } - if (_memoryEntries) { - ipc_port_t *handles, *handlesEnd; - - handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); - handlesEnd = (ipc_port_t *) - ((vm_address_t) handles + _memoryEntries->getLength()); - while (handles < handlesEnd) - ipc_port_release_send(*handles++); - - _memoryEntries->release(); - _memoryEntries = 0; - } - - _cachedVirtualAddress = 0; + (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() + } } return kIOReturnSuccess; } @@ -972,163 +1722,173 @@ IOReturn IOGeneralMemoryDescriptor::doMap( vm_map_t addressMap, IOVirtualAddress * atAddress, IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) + IOByteCount sourceOffset, + IOByteCount length ) { kern_return_t kr; + ipc_port_t sharedMem = (ipc_port_t) _memEntry; + + IOOptionBits type = _flags & kIOMemoryTypeMask; + Ranges vec = _ranges; + + user_addr_t range0Addr = 0; + IOByteCount range0Len = 0; + + if (vec.v) + getAddrLenForInd(range0Addr, range0Len, type, vec, 0); // mapping source == dest? (could be much better) - if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) - && (1 == _rangesCount) && (0 == sourceOffset) - && (length <= _ranges.v[0].length) ) { - *atAddress = _ranges.v[0].address; + if( _task + && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) + && (1 == _rangesCount) && (0 == sourceOffset) + && range0Addr && (length <= range0Len) ) { + if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32) + return kIOReturnOverrun; // Doesn't fit in 32bit return field + else { + *atAddress = range0Addr; return( kIOReturnSuccess ); + } } - if( _task && _memEntry && (_flags & kIOMemoryRequiresWire)) { + if( 0 == sharedMem) { - do { + vm_size_t size = ptoa_32(_pages); - if( (1 != _rangesCount) - || (kIOMapDefaultCache != (options & kIOMapCacheMask)) ) { - kr = kIOReturnUnsupported; - continue; - } + if( _task) { +#ifndef i386 + memory_object_size_t actualSize = size; + kr = mach_make_memory_entry_64(get_task_map(_task), + &actualSize, range0Addr, + VM_PROT_READ | VM_PROT_WRITE, &sharedMem, + NULL ); - if( 0 == length) - length = getLength(); - if( (sourceOffset + length) > _ranges.v[0].length) { - kr = kIOReturnBadArgument; - continue; + if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { +#if IOASSERT + IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", + range0Addr, (UInt32) actualSize, size); +#endif + kr = kIOReturnVMError; + ipc_port_release_send( sharedMem ); } - ipc_port_t sharedMem = (ipc_port_t) _memEntry; - vm_prot_t prot = VM_PROT_READ - | ((options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); + if( KERN_SUCCESS != kr) +#endif /* !i386 */ + sharedMem = MACH_PORT_NULL; - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - if( options & kIOMapAnywhere) - *atAddress = 0; + } else do { - if( 0 == sharedMem) - kr = kIOReturnVMError; - else - kr = KERN_SUCCESS; - - if( KERN_SUCCESS == kr) - kr = vm_map( addressMap, - atAddress, - length, 0 /* mask */, - (( options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT), - sharedMem, sourceOffset, - false, // copy - prot, // cur - prot, // max - VM_INHERIT_NONE); - - } while( false ); + memory_object_t pager; + unsigned int flags = 0; + addr64_t pa; + IOPhysicalLength segLen; - } else - kr = super::doMap( addressMap, atAddress, - options, sourceOffset, length ); - return( kr ); -} + pa = getPhysicalSegment64( sourceOffset, &segLen ); -IOReturn IOGeneralMemoryDescriptor::doUnmap( - vm_map_t addressMap, - IOVirtualAddress logical, - IOByteCount length ) -{ - // could be much better - if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount) - && (logical == _ranges.v[0].address) - && (length <= _ranges.v[0].length) ) - return( kIOReturnSuccess ); + if( !reserved) { + reserved = IONew( ExpansionData, 1 ); + if( !reserved) + continue; + } + reserved->pagerContig = (1 == _rangesCount); + reserved->memory = this; - return( super::doUnmap( addressMap, logical, length )); -} + /*What cache mode do we need*/ + switch(options & kIOMapCacheMask ) { -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + case kIOMapDefaultCache: + default: + flags = IODefaultCacheBits(pa); + break; + + case kIOMapInhibitCache: + flags = DEVICE_PAGER_CACHE_INHIB | + DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; + + case kIOMapWriteThruCache: + flags = DEVICE_PAGER_WRITE_THROUGH | + DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; -extern "C" { -// osfmk/device/iokit_rpc.c -extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa, - vm_size_t length, unsigned int mapFlags); -extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); -}; + case kIOMapCopybackCache: + flags = DEVICE_PAGER_COHERENT; + break; -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + case kIOMapWriteCombineCache: + flags = DEVICE_PAGER_CACHE_INHIB | + DEVICE_PAGER_COHERENT; + break; + } -static IORecursiveLock * gIOMemoryLock; + flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; -#define LOCK IORecursiveLockLock( gIOMemoryLock) -#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) + pager = device_pager_setup( (memory_object_t) 0, (int) reserved, + size, flags); + assert( pager ); -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + if( pager) { + kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, + size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); -OSDefineMetaClass( IOMemoryMap, OSObject ) -OSDefineAbstractStructors( IOMemoryMap, OSObject ) + assert( KERN_SUCCESS == kr ); + if( KERN_SUCCESS != kr) { + device_pager_deallocate( pager ); + pager = MACH_PORT_NULL; + sharedMem = MACH_PORT_NULL; + } + } + if( pager && sharedMem) + reserved->devicePager = pager; + else { + IODelete( reserved, ExpansionData, 1 ); + reserved = 0; + } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + } while( false ); -class _IOMemoryMap : public IOMemoryMap -{ - OSDeclareDefaultStructors(_IOMemoryMap) + _memEntry = (void *) sharedMem; + } - IOMemoryDescriptor * memory; - IOMemoryMap * superMap; - IOByteCount offset; - IOByteCount length; - IOVirtualAddress logical; - task_t addressTask; - vm_map_t addressMap; - IOOptionBits options; -public: - virtual void free(); +#ifndef i386 + if( 0 == sharedMem) + kr = kIOReturnVMError; + else +#endif + kr = super::doMap( addressMap, atAddress, + options, sourceOffset, length ); - // IOMemoryMap methods - virtual IOVirtualAddress getVirtualAddress(); - virtual IOByteCount getLength(); - virtual task_t getAddressTask(); - virtual IOMemoryDescriptor * getMemoryDescriptor(); - virtual IOOptionBits getMapOptions(); + return( kr ); +} - virtual IOReturn unmap(); - virtual void taskDied(); +IOReturn IOGeneralMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ) +{ + // could be much better + if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) { - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); + IOOptionBits type = _flags & kIOMemoryTypeMask; + user_addr_t range0Addr; + IOByteCount range0Len; - // for IOMemoryDescriptor use - _IOMemoryMap * isCompatible( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ); + getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); + if (logical == range0Addr && length <= range0Len) + return( kIOReturnSuccess ); + } - bool init( - IOMemoryDescriptor * memory, - IOMemoryMap * superMap, - IOByteCount offset, - IOByteCount length ); + return( super::doUnmap( addressMap, logical, length )); +} - bool init( - IOMemoryDescriptor * memory, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - IOReturn redirect( - task_t intoTask, bool redirect ); -}; +OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject ) + +/* inline function implementation */ +IOPhysicalAddress IOMemoryMap::getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #undef super #define super IOMemoryMap @@ -1137,7 +1897,7 @@ OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool _IOMemoryMap::init( +bool _IOMemoryMap::initCompatible( IOMemoryDescriptor * _memory, IOMemoryMap * _superMap, IOByteCount _offset, @@ -1167,7 +1927,7 @@ bool _IOMemoryMap::init( return( true ); } -bool _IOMemoryMap::init( +bool _IOMemoryMap::initWithDescriptor( IOMemoryDescriptor * _memory, task_t intoTask, IOVirtualAddress toAddress, @@ -1175,139 +1935,409 @@ bool _IOMemoryMap::init( IOByteCount _offset, IOByteCount _length ) { - bool ok; + bool ok; + bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options)); - if( (!_memory) || (!intoTask) || !super::init()) + if ((!_memory) || (!intoTask)) return( false); if( (_offset + _length) > _memory->getLength()) return( false); - addressMap = get_task_map(intoTask); - if( !addressMap) - return( false); - kernel_vm_map_reference(addressMap); + if (!redir) + { + if (!super::init()) + return(false); + addressMap = get_task_map(intoTask); + if( !addressMap) + return( false); + vm_map_reference(addressMap); + addressTask = intoTask; + logical = toAddress; + options = _options; + } + + _memory->retain(); + + offset = _offset; + if( _length) + length = _length; + else + length = _memory->getLength(); + + if( options & kIOMapStatic) + ok = true; + else + ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress, + _options, offset, length )); + if (ok || redir) + { + if (memory) + memory->release(); + memory = _memory; + logical = toAddress; + } + else + { + _memory->release(); + if (!redir) + { + logical = 0; + memory = 0; + vm_map_deallocate(addressMap); + addressMap = 0; + } + } + + return( ok ); +} + +/* LP64todo - these need to expand */ +struct IOMemoryDescriptorMapAllocRef +{ + ipc_port_t sharedMem; + vm_size_t size; + vm_offset_t mapped; + IOByteCount sourceOffset; + IOOptionBits options; +}; + +static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) +{ + IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; + IOReturn err; + + do { + if( ref->sharedMem) { + vm_prot_t prot = VM_PROT_READ + | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); + + // set memory entry cache + vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; + switch (ref->options & kIOMapCacheMask) + { + case kIOMapInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); + break; + + case kIOMapWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); + break; + + case kIOMapWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); + break; + + case kIOMapCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); + break; + + case kIOMapDefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); + break; + } + + vm_size_t unused = 0; + + err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, ref->sharedMem ); + if (KERN_SUCCESS != err) + IOLog("MAP_MEM_ONLY failed %d\n", err); + + err = vm_map( map, + &ref->mapped, + ref->size, 0 /* mask */, + (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) + | VM_MAKE_TAG(VM_MEMORY_IOKIT), + ref->sharedMem, ref->sourceOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + + if( KERN_SUCCESS != err) { + ref->mapped = 0; + continue; + } + + } else { + + err = vm_allocate( map, &ref->mapped, ref->size, + ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) + | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); + + if( KERN_SUCCESS != err) { + ref->mapped = 0; + continue; + } + + // we have to make sure that these guys don't get copied if we fork. + err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); + assert( KERN_SUCCESS == err ); + } + + } while( false ); + + return( err ); +} + + +IOReturn IOMemoryDescriptor::doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset, + IOByteCount length ) +{ + IOReturn err = kIOReturnSuccess; + memory_object_t pager; + vm_address_t logical; + IOByteCount pageOffset; + IOPhysicalAddress sourceAddr; + IOMemoryDescriptorMapAllocRef ref; + + ref.sharedMem = (ipc_port_t) _memEntry; + ref.sourceOffset = sourceOffset; + ref.options = options; + + do { + + if( 0 == length) + length = getLength(); + + sourceAddr = getSourceSegment( sourceOffset, NULL ); + pageOffset = sourceAddr - trunc_page_32( sourceAddr ); + + ref.size = round_page_32( length + pageOffset ); + + if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) + { + upl_t redirUPL2; + vm_size_t size; + int flags; + + _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress; + ref.mapped = mapping->getVirtualAddress(); + + if (!_memEntry) + { + err = kIOReturnNotReadable; + continue; + } + + size = length; + flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; + + if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2, + NULL, NULL, + &flags)) + redirUPL2 = NULL; + + err = upl_transpose(redirUPL2, mapping->redirUPL); + if (kIOReturnSuccess != err) + { + IOLog("upl_transpose(%x)\n", err); + err = kIOReturnSuccess; + } + + if (redirUPL2) + { + upl_commit(redirUPL2, NULL, 0); + upl_deallocate(redirUPL2); + redirUPL2 = 0; + } + { + // swap the memEntries since they now refer to different vm_objects + void * me = _memEntry; + _memEntry = mapping->memory->_memEntry; + mapping->memory->_memEntry = me; + } + } + else + { + + logical = *atAddress; + if( options & kIOMapAnywhere) + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + else { + ref.mapped = trunc_page_32( logical ); + if( (logical - ref.mapped) != pageOffset) { + err = kIOReturnVMError; + continue; + } + } + + if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + else + err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); + } + + if( err != KERN_SUCCESS) + continue; + + if( reserved) + pager = (memory_object_t) reserved->devicePager; + else + pager = MACH_PORT_NULL; - _memory->retain(); - memory = _memory; + if( !ref.sharedMem || pager ) + err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options ); - offset = _offset; - if( _length) - length = _length; - else - length = _memory->getLength(); + } while( false ); - addressTask = intoTask; - logical = toAddress; - options = _options; + if( err != KERN_SUCCESS) { + if( ref.mapped) + doUnmap( addressMap, ref.mapped, ref.size ); + *atAddress = NULL; + } else + *atAddress = ref.mapped + pageOffset; - if( options & kIOMapStatic) - ok = true; - else - ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical, - options, offset, length )); - if( !ok) { - logical = 0; - memory->release(); - memory = 0; - vm_map_deallocate(addressMap); - addressMap = 0; - } - return( ok ); + return( err ); } -IOReturn IOMemoryDescriptor::doMap( +enum { + kIOMemoryRedirected = 0x00010000 +}; + +IOReturn IOMemoryDescriptor::handleFault( + void * _pager, vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) + IOVirtualAddress address, + IOByteCount sourceOffset, + IOByteCount length, + IOOptionBits options ) { IOReturn err = kIOReturnSuccess; - vm_size_t ourSize; + memory_object_t pager = (memory_object_t) _pager; + vm_size_t size; vm_size_t bytes; - vm_offset_t mapped; - vm_address_t logical; + vm_size_t page; IOByteCount pageOffset; + IOByteCount pagerOffset; IOPhysicalLength segLen; - IOPhysicalAddress physAddr; - - if( 0 == length) - length = getLength(); - - physAddr = getPhysicalSegment( sourceOffset, &segLen ); - assert( physAddr ); - - pageOffset = physAddr - trunc_page( physAddr ); - ourSize = length + pageOffset; - physAddr -= pageOffset; + addr64_t physAddr; - logical = *atAddress; - if( 0 == (options & kIOMapAnywhere)) { - mapped = trunc_page( logical ); - if( (logical - mapped) != pageOffset) - err = kIOReturnVMError; - } - if( kIOReturnSuccess == err) - err = vm_allocate( addressMap, &mapped, ourSize, - ((options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); + if( !addressMap) { - if( err) { + if( kIOMemoryRedirected & _flags) { #ifdef DEBUG - kprintf("IOMemoryDescriptor::doMap: vm_allocate() " - "returned %08x\n", err); + IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset); #endif - return( err); - } + do { + SLEEP; + } while( kIOMemoryRedirected & _flags ); + } - // we have to make sure that these guys don't get copied if we fork. - err = vm_inherit( addressMap, mapped, ourSize, VM_INHERIT_NONE); - if( err != KERN_SUCCESS) { - doUnmap( addressMap, mapped, ourSize); // back out - return( err); + return( kIOReturnSuccess ); } - logical = mapped; - *atAddress = mapped + pageOffset; + physAddr = getPhysicalSegment64( sourceOffset, &segLen ); + assert( physAddr ); + pageOffset = physAddr - trunc_page_64( physAddr ); + pagerOffset = sourceOffset; + + size = length + pageOffset; + physAddr -= pageOffset; segLen += pageOffset; - bytes = ourSize; + bytes = size; do { // in the middle of the loop only map whole pages if( segLen >= bytes) segLen = bytes; - else if( segLen != trunc_page( segLen)) + else if( segLen != trunc_page_32( segLen)) err = kIOReturnVMError; - if( physAddr != trunc_page( physAddr)) + if( physAddr != trunc_page_64( physAddr)) err = kIOReturnBadArgument; + if (kIOReturnSuccess != err) + break; #ifdef DEBUG if( kIOLogMapping & gIOKitDebug) - kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n", - addressMap, mapped + pageOffset, physAddr + pageOffset, + IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n", + addressMap, address + pageOffset, physAddr + pageOffset, segLen - pageOffset); #endif - if( kIOReturnSuccess == err) - err = IOMapPages( addressMap, mapped, physAddr, segLen, options ); + + + + +#ifdef i386 + /* i386 doesn't support faulting on device memory yet */ + if( addressMap && (kIOReturnSuccess == err)) + err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options ); + assert( KERN_SUCCESS == err ); if( err) break; +#endif + + if( pager) { + if( reserved && reserved->pagerContig) { + IOPhysicalLength allLen; + addr64_t allPhys; + allPhys = getPhysicalSegment64( 0, &allLen ); + assert( allPhys ); + err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) ); + + } else { + + for( page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += page_size) { + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size); + pagerOffset += page_size; + } + } + assert( KERN_SUCCESS == err ); + if( err) + break; + } +#ifndef i386 + /* *** ALERT *** */ + /* *** Temporary Workaround *** */ + + /* This call to vm_fault causes an early pmap level resolution */ + /* of the mappings created above. Need for this is in absolute */ + /* violation of the basic tenet that the pmap layer is a cache. */ + /* Further, it implies a serious I/O architectural violation on */ + /* the part of some user of the mapping. As of this writing, */ + /* the call to vm_fault is needed because the NVIDIA driver */ + /* makes a call to pmap_extract. The NVIDIA driver needs to be */ + /* fixed as soon as possible. The NVIDIA driver should not */ + /* need to query for this info as it should know from the doMap */ + /* call where the physical memory is mapped. When a query is */ + /* necessary to find a physical mapping, it should be done */ + /* through an iokit call which includes the mapped memory */ + /* handle. This is required for machine architecture independence.*/ + + if(!(kIOMemoryRedirected & _flags)) { + vm_fault(addressMap, + (vm_map_offset_t)address, + VM_PROT_READ|VM_PROT_WRITE, + FALSE, THREAD_UNINT, NULL, + (vm_map_offset_t)0); + } + + /* *** Temporary Workaround *** */ + /* *** ALERT *** */ +#endif sourceOffset += segLen - pageOffset; - mapped += segLen; + address += segLen; bytes -= segLen; pageOffset = 0; } while( bytes - && (physAddr = getPhysicalSegment( sourceOffset, &segLen ))); + && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); if( bytes) err = kIOReturnBadArgument; - if( err) - doUnmap( addressMap, logical, ourSize ); - else - mapped = true; return( err ); } @@ -1325,76 +2355,98 @@ IOReturn IOMemoryDescriptor::doUnmap( addressMap, logical, length ); #endif - if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))) + if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) { + + if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + addressMap = IOPageableMapForAddress( logical ); + err = vm_deallocate( addressMap, logical, length ); - else + + } else err = kIOReturnSuccess; return( err ); } -IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect ) +IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { - IOReturn err; + IOReturn err = kIOReturnSuccess; _IOMemoryMap * mapping = 0; OSIterator * iter; LOCK; + if( doRedirect) + _flags |= kIOMemoryRedirected; + else + _flags &= ~kIOMemoryRedirected; + do { if( (iter = OSCollectionIterator::withCollection( _mappings))) { - while( (mapping = (_IOMemoryMap *) iter->getNextObject())) - mapping->redirect( safeTask, redirect ); + while( (mapping = (_IOMemoryMap *) iter->getNextObject())) + mapping->redirect( safeTask, doRedirect ); - iter->release(); - } + iter->release(); + } } while( false ); + if (!doRedirect) + { + WAKEUP; + } + UNLOCK; // temporary binary compatibility IOSubMemoryDescriptor * subMem; if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) - err = subMem->redirect( safeTask, redirect ); + err = subMem->redirect( safeTask, doRedirect ); else - err = kIOReturnSuccess; + err = kIOReturnSuccess; return( err ); } -IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect ) +IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { -// temporary binary compatibility IOMemoryDescriptor::redirect( safeTask, redirect ); - return( _parent->redirect( safeTask, redirect )); + return( _parent->redirect( safeTask, doRedirect )); } -IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect ) +IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) { IOReturn err = kIOReturnSuccess; if( superMap) { -// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect ); +// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); } else { LOCK; if( logical && addressMap - && (get_task_map( safeTask) != addressMap) - && (0 == (options & kIOMapStatic))) { - - IOUnmapPages( addressMap, logical, length ); - if( !redirect) { + && (!safeTask || (get_task_map(safeTask) != addressMap)) + && (0 == (options & kIOMapStatic))) + { + IOUnmapPages( addressMap, logical, length ); + if(!doRedirect && safeTask + && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)) + { err = vm_deallocate( addressMap, logical, length ); err = memory->doMap( addressMap, &logical, - (options & ~kIOMapAnywhere) /*| kIOMapReserve*/ ); + (options & ~kIOMapAnywhere) /*| kIOMapReserve*/, + offset, length ); } else err = kIOReturnSuccess; #ifdef DEBUG - IOLog("IOMemoryMap::redirect(%d, %x) %x from %lx\n", redirect, err, logical, addressMap); + IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap); #endif } UNLOCK; } + if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + && safeTask + && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected)))) + memory->redirect(safeTask, doRedirect); + return( err ); } @@ -1433,6 +2485,17 @@ void _IOMemoryMap::taskDied( void ) UNLOCK; } +// Overload the release mechanism. All mappings must be a member +// of a memory descriptors _mappings set. This means that we +// always have 2 references on a mapping. When either of these mappings +// are released we need to free ourselves. +void _IOMemoryMap::taggedRelease(const void *tag) const +{ + LOCK; + super::taggedRelease(tag, 2); + UNLOCK; +} + void _IOMemoryMap::free() { unmap(); @@ -1444,9 +2507,21 @@ void _IOMemoryMap::free() memory->release(); } + if (owner && (owner != memory)) + { + LOCK; + owner->removeMapping(this); + UNLOCK; + } + if( superMap) superMap->release(); + if (redirUPL) { + upl_commit(redirUPL, NULL, 0); + upl_deallocate(redirUPL); + } + super::free(); } @@ -1478,7 +2553,7 @@ IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() return( memory ); } -_IOMemoryMap * _IOMemoryMap::isCompatible( +_IOMemoryMap * _IOMemoryMap::copyCompatible( IOMemoryDescriptor * owner, task_t task, IOVirtualAddress toAddress, @@ -1488,9 +2563,14 @@ _IOMemoryMap * _IOMemoryMap::isCompatible( { _IOMemoryMap * mapping; - if( (!task) || (task != getAddressTask())) + if( (!task) || (!addressMap) || (addressMap != get_task_map(task))) + return( 0 ); + if( options & kIOMapUnique) return( 0 ); - if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly)) + if( (options ^ _options) & kIOMapReadOnly) + return( 0 ); + if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) + && ((options ^ _options) & kIOMapCacheMask)) return( 0 ); if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) @@ -1511,7 +2591,7 @@ _IOMemoryMap * _IOMemoryMap::isCompatible( } else { mapping = new _IOMemoryMap; if( mapping - && !mapping->init( owner, this, _offset, _length )) { + && !mapping->initCompatible( owner, this, _offset, _length )) { mapping->release(); mapping = 0; } @@ -1521,12 +2601,12 @@ _IOMemoryMap * _IOMemoryMap::isCompatible( } IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, - IOPhysicalLength * length) + IOPhysicalLength * _length) { IOPhysicalAddress address; LOCK; - address = memory->getPhysicalSegment( offset + _offset, length ); + address = memory->getPhysicalSegment( offset + _offset, _length ); UNLOCK; return( address ); @@ -1543,6 +2623,9 @@ void IOMemoryDescriptor::initialize( void ) { if( 0 == gIOMemoryLock) gIOMemoryLock = IORecursiveLockAlloc(); + + IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, + ptoa_64(gIOMaximumMappedIOPageCount), 64); } void IOMemoryDescriptor::free( void ) @@ -1556,30 +2639,30 @@ void IOMemoryDescriptor::free( void ) IOMemoryMap * IOMemoryDescriptor::setMapping( task_t intoTask, IOVirtualAddress mapAddress, - IOOptionBits options = 0 ) + IOOptionBits options ) { - _IOMemoryMap * map; + _IOMemoryMap * newMap; - map = new _IOMemoryMap; + newMap = new _IOMemoryMap; LOCK; - if( map - && !map->init( this, intoTask, mapAddress, + if( newMap + && !newMap->initWithDescriptor( this, intoTask, mapAddress, options | kIOMapStatic, 0, getLength() )) { - map->release(); - map = 0; + newMap->release(); + newMap = 0; } - addMapping( map); + addMapping( newMap); UNLOCK; - return( map); + return( newMap); } IOMemoryMap * IOMemoryDescriptor::map( - IOOptionBits options = 0 ) + IOOptionBits options ) { return( makeMapping( this, kernel_task, 0, @@ -1591,8 +2674,8 @@ IOMemoryMap * IOMemoryDescriptor::map( task_t intoTask, IOVirtualAddress toAddress, IOOptionBits options, - IOByteCount offset = 0, - IOByteCount length = 0 ) + IOByteCount offset, + IOByteCount length ) { if( 0 == length) length = getLength(); @@ -1600,6 +2683,69 @@ IOMemoryMap * IOMemoryDescriptor::map( return( makeMapping( this, intoTask, toAddress, options, offset, length )); } +IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + IOByteCount offset) +{ + IOReturn err = kIOReturnSuccess; + IOMemoryDescriptor * physMem = 0; + + LOCK; + + if (logical && addressMap) do + { + if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + { + physMem = memory; + physMem->retain(); + } + + if (!redirUPL) + { + vm_size_t size = length; + int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; + if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL, + NULL, NULL, + &flags)) + redirUPL = 0; + + if (physMem) + { + IOUnmapPages( addressMap, logical, length ); + physMem->redirect(0, true); + } + } + + if (newBackingMemory) + { + if (newBackingMemory != memory) + { + if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this, + options | kIOMapUnique | kIOMapReference, + offset, length)) + err = kIOReturnError; + } + if (redirUPL) + { + upl_commit(redirUPL, NULL, 0); + upl_deallocate(redirUPL); + redirUPL = 0; + } + if (physMem) + physMem->redirect(0, false); + } + } + while (false); + + UNLOCK; + + if (physMem) + physMem->release(); + + return (err); +} + IOMemoryMap * IOMemoryDescriptor::makeMapping( IOMemoryDescriptor * owner, task_t intoTask, @@ -1608,50 +2754,127 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( IOByteCount offset, IOByteCount length ) { + IOMemoryDescriptor * mapDesc = 0; _IOMemoryMap * mapping = 0; OSIterator * iter; LOCK; - do { - // look for an existing mapping - if( (iter = OSCollectionIterator::withCollection( _mappings))) { + do + { + if (kIOMapUnique & options) + { + IOPhysicalAddress phys; + IOByteCount physLen; - while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { + if (owner != this) + continue; - if( (mapping = mapping->isCompatible( - owner, intoTask, toAddress, - options | kIOMapReference, - offset, length ))) - break; - } - iter->release(); - if( mapping) - continue; - } + if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + { + phys = getPhysicalSegment(offset, &physLen); + if (!phys || (physLen < length)) + continue; + + mapDesc = IOMemoryDescriptor::withPhysicalAddress( + phys, length, _direction); + if (!mapDesc) + continue; + offset = 0; + } + else + { + mapDesc = this; + mapDesc->retain(); + } + + if (kIOMapReference & options) + { + mapping = (_IOMemoryMap *) toAddress; + mapping->retain(); + +#if 1 + uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL ); + pageOffset1 -= trunc_page_32( pageOffset1 ); + + uint32_t pageOffset2 = mapping->getVirtualAddress(); + pageOffset2 -= trunc_page_32( pageOffset2 ); + + if (pageOffset1 != pageOffset2) + IOLog("::redirect can't map offset %x to addr %x\n", + pageOffset1, mapping->getVirtualAddress()); +#endif + + + if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options, + offset, length )) + { +#ifdef DEBUG + IOLog("Didn't redirect map %08lx : %08lx\n", offset, length ); +#endif + } + + if (mapping->owner) + mapping->owner->removeMapping(mapping); + continue; + } + } + else + { + // look for an existing mapping + if( (iter = OSCollectionIterator::withCollection( _mappings))) { + + while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { + + if( (mapping = mapping->copyCompatible( + owner, intoTask, toAddress, + options | kIOMapReference, + offset, length ))) + break; + } + iter->release(); + } - if( mapping || (options & kIOMapReference)) - continue; + if (mapping) + mapping->retain(); + if( mapping || (options & kIOMapReference)) + continue; + + mapDesc = owner; + mapDesc->retain(); + } owner = this; mapping = new _IOMemoryMap; if( mapping - && !mapping->init( owner, intoTask, toAddress, options, + && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options, offset, length )) { - +#ifdef DEBUG IOLog("Didn't make map %08lx : %08lx\n", offset, length ); +#endif mapping->release(); mapping = 0; } + if (mapping) + mapping->retain(); + } while( false ); - owner->addMapping( mapping); + if (mapping) + { + mapping->owner = owner; + owner->addMapping( mapping); + mapping->release(); + } UNLOCK; + if (mapDesc) + mapDesc->release(); + return( mapping); } @@ -1661,19 +2884,16 @@ void IOMemoryDescriptor::addMapping( if( mapping) { if( 0 == _mappings) _mappings = OSSet::withCapacity(1); - if( _mappings && _mappings->setObject( mapping )) - mapping->release(); /* really */ + if( _mappings ) + _mappings->setObject( mapping ); } } void IOMemoryDescriptor::removeMapping( IOMemoryMap * mapping ) { - if( _mappings) { - mapping->retain(); - mapping->retain(); + if( _mappings) _mappings->removeObject( mapping); - } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1687,22 +2907,38 @@ OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, IOByteCount offset, IOByteCount length, - IODirection withDirection ) + IODirection direction ) { - if( !super::init()) - return( false ); - if( !parent) return( false); if( (offset + length) > parent->getLength()) return( false); + /* + * We can check the _parent instance variable before having ever set it + * to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. + */ + + if( !_parent) { + if( !super::init()) + return( false ); + } else { + /* + * An existing memory descriptor is being retargeted to + * point to somewhere else. Clean up our present state. + */ + + _parent->release(); + _parent = 0; + } + parent->retain(); _parent = parent; _start = offset; _length = length; - _direction = withDirection; + _direction = direction; _tag = parent->getTag(); return( true ); @@ -1739,6 +2975,41 @@ IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, return( address ); } + +IOReturn IOSubMemoryDescriptor::doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset, + IOByteCount length ) +{ + if( sourceOffset >= _length) + return( kIOReturnOverrun ); + return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length)); +} + +IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, + IOByteCount * length ) +{ + IOPhysicalAddress address; + IOByteCount actualLength; + + assert(offset <= _length); + + if( length) + *length = 0; + + if( offset >= _length) + return( 0 ); + + address = _parent->getSourceSegment( offset + _start, &actualLength ); + + if( address && length) + *length = min( _length - offset, actualLength ); + + return( address ); +} + void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, IOByteCount * lengthOfSegment) { @@ -1746,7 +3017,7 @@ void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, } IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, - void * bytes, IOByteCount withLength) + void * bytes, IOByteCount length) { IOByteCount byteCount; @@ -1757,14 +3028,14 @@ IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, LOCK; byteCount = _parent->readBytes( _start + offset, bytes, - min(withLength, _length - offset) ); + min(length, _length - offset) ); UNLOCK; return( byteCount ); } IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, - const void* bytes, IOByteCount withLength) + const void* bytes, IOByteCount length) { IOByteCount byteCount; @@ -1775,14 +3046,44 @@ IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, LOCK; byteCount = _parent->writeBytes( _start + offset, bytes, - min(withLength, _length - offset) ); + min(length, _length - offset) ); UNLOCK; return( byteCount ); } +IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) +{ + IOReturn err; + + LOCK; + err = _parent->setPurgeable( newState, oldState ); + UNLOCK; + + return( err ); +} + +IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options, + IOByteCount offset, IOByteCount length ) +{ + IOReturn err; + + assert(offset <= _length); + + if( offset >= _length) + return( kIOReturnOverrun ); + + LOCK; + err = _parent->performOperation( options, _start + offset, + min(length, _length - offset) ); + UNLOCK; + + return( err ); +} + IOReturn IOSubMemoryDescriptor::prepare( - IODirection forDirection = kIODirectionNone) + IODirection forDirection) { IOReturn err; @@ -1794,7 +3095,7 @@ IOReturn IOSubMemoryDescriptor::prepare( } IOReturn IOSubMemoryDescriptor::complete( - IODirection forDirection = kIODirectionNone) + IODirection forDirection) { IOReturn err; @@ -1813,14 +3114,21 @@ IOMemoryMap * IOSubMemoryDescriptor::makeMapping( IOByteCount offset, IOByteCount length ) { - IOMemoryMap * mapping; + IOMemoryMap * mapping = 0; - mapping = (IOMemoryMap *) _parent->makeMapping( + if (!(kIOMapUnique & options)) + mapping = (IOMemoryMap *) _parent->makeMapping( _parent, intoTask, toAddress - (_start + offset), options | kIOMapReference, _start + offset, length ); + if( !mapping) + mapping = (IOMemoryMap *) _parent->makeMapping( + _parent, intoTask, + toAddress, + options, _start + offset, length ); + if( !mapping) mapping = super::makeMapping( owner, intoTask, toAddress, options, offset, length ); @@ -1832,17 +3140,17 @@ IOMemoryMap * IOSubMemoryDescriptor::makeMapping( bool IOSubMemoryDescriptor::initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) + IOByteCount length, + IODirection direction) { return( false ); } bool IOSubMemoryDescriptor::initWithAddress(vm_address_t address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) + IOByteCount length, + IODirection direction, + task_t task) { return( false ); } @@ -1850,8 +3158,8 @@ IOSubMemoryDescriptor::initWithAddress(vm_address_t address, bool IOSubMemoryDescriptor::initWithPhysicalAddress( IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) + IOByteCount length, + IODirection direction ) { return( false ); } @@ -1860,9 +3168,9 @@ bool IOSubMemoryDescriptor::initWithRanges( IOVirtualRange * ranges, UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) + IODirection direction, + task_t task, + bool asReference) { return( false ); } @@ -1870,19 +3178,164 @@ IOSubMemoryDescriptor::initWithRanges( bool IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, - IODirection withDirection, - bool asReference = false) + IODirection direction, + bool asReference) { return( false ); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 0); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); +bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const +{ + OSSymbol const *keys[2]; + OSObject *values[2]; + struct SerData { + user_addr_t address; + user_size_t length; + } *vcopy; + unsigned int index, nRanges; + bool result; + + IOOptionBits type = _flags & kIOMemoryTypeMask; + + if (s == NULL) return false; + if (s->previouslySerialized(this)) return true; + + // Pretend we are an array. + if (!s->addXMLStartTag(this, "array")) return false; + + nRanges = _rangesCount; + vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); + if (vcopy == 0) return false; + + keys[0] = OSSymbol::withCString("address"); + keys[1] = OSSymbol::withCString("length"); + + result = false; + values[0] = values[1] = 0; + + // From this point on we can go to bail. + + // Copy the volatile data so we don't have to allocate memory + // while the lock is held. + LOCK; + if (nRanges == _rangesCount) { + Ranges vec = _ranges; + for (index = 0; index < nRanges; index++) { + user_addr_t addr; IOByteCount len; + getAddrLenForInd(addr, len, type, vec, index); + vcopy[index].address = addr; + vcopy[index].length = len; + } + } else { + // The descriptor changed out from under us. Give up. + UNLOCK; + result = false; + goto bail; + } + UNLOCK; + + for (index = 0; index < nRanges; index++) + { + user_addr_t addr = vcopy[index].address; + IOByteCount len = (IOByteCount) vcopy[index].length; + values[0] = + OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32); + if (values[0] == 0) { + result = false; + goto bail; + } + values[1] = OSNumber::withNumber(len, sizeof(len) * 8); + if (values[1] == 0) { + result = false; + goto bail; + } + OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); + if (dict == 0) { + result = false; + goto bail; + } + values[0]->release(); + values[1]->release(); + values[0] = values[1] = 0; + + result = dict->serialize(s); + dict->release(); + if (!result) { + goto bail; + } + } + result = s->addXMLEndTag("array"); + + bail: + if (values[0]) + values[0]->release(); + if (values[1]) + values[1]->release(); + if (keys[0]) + keys[0]->release(); + if (keys[1]) + keys[1]->release(); + if (vcopy) + IOFree(vcopy, sizeof(IOVirtualRange) * nRanges); + return result; +} + +bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const +{ + if (!s) { + return (false); + } + if (s->previouslySerialized(this)) return true; + + // Pretend we are a dictionary. + // We must duplicate the functionality of OSDictionary here + // because otherwise object references will not work; + // they are based on the value of the object passed to + // previouslySerialized and addXMLStartTag. + + if (!s->addXMLStartTag(this, "dict")) return false; + + char const *keys[3] = {"offset", "length", "parent"}; + + OSObject *values[3]; + values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8); + if (values[0] == 0) + return false; + values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8); + if (values[1] == 0) { + values[0]->release(); + return false; + } + values[2] = _parent; + + bool result = true; + for (int i=0; i<3; i++) { + if (!s->addString("") || + !s->addString(keys[i]) || + !s->addXMLEndTag("key") || + !values[i]->serialize(s)) { + result = false; + break; + } + } + values[0]->release(); + values[1]->release(); + if (!result) { + return false; + } + + return s->addXMLEndTag("dict"); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); @@ -1894,3 +3347,7 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); + +/* ex-inline function implementation */ +IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); }