X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cf7d32b81c573a0536dc4da4157f9c26f8d0bed3..04b8595b18b1b41ac7a206e4b3d51a635f8413d7:/iokit/Kernel/IOMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index b86f7f651..0c7744386 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -31,7 +31,8 @@ * HISTORY * */ -// 45678901234567890123456789012345678901234567890123456789012345678901234567890 + + #include #include @@ -39,13 +40,17 @@ #include #include #include +#include #include +#ifndef __LP64__ +#include +#endif /* !__LP64__ */ + #include #include #include "IOKitKernelInternal.h" -#include "IOCopyMapper.h" #include #include @@ -67,26 +72,8 @@ __BEGIN_DECLS #include extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); -void ipc_port_release_send(ipc_port_t port); - -/* Copy between a physical page and a virtual address in the given vm_map */ -kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); - -memory_object_t -device_pager_setup( - memory_object_t pager, - int device_handle, - vm_size_t size, - int flags); -void -device_pager_deallocate( - memory_object_t); -kern_return_t -device_pager_populate_object( - memory_object_t pager, - vm_object_offset_t offset, - ppnum_t phys_addr, - vm_size_t size); +extern void ipc_port_release_send(ipc_port_t port); + kern_return_t memory_object_iopl_request( ipc_port_t port, @@ -97,18 +84,16 @@ memory_object_iopl_request( unsigned int *page_list_count, int *flags); +// osfmk/device/iokit_rpc.c +unsigned int IODefaultCacheBits(addr64_t pa); unsigned int IOTranslateCacheBits(struct phys_entry *pp); __END_DECLS -#define kIOMaximumMappedIOByteCount (512*1024*1024) +#define kIOMapperWaitSystem ((IOMapper *) 1) static IOMapper * gIOSystemMapper = NULL; -IOCopyMapper * gIOCopyMapper = NULL; - -static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); - ppnum_t gIOLastPage; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -135,68 +120,7 @@ static IORecursiveLock * gIOMemoryLock; #define DEBG(fmt, args...) {} #endif -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -class _IOMemoryMap : public IOMemoryMap -{ - OSDeclareDefaultStructors(_IOMemoryMap) -public: - IOMemoryDescriptor * fMemory; - IOMemoryMap * fSuperMap; - mach_vm_size_t fOffset; - mach_vm_address_t fAddress; - mach_vm_size_t fLength; - task_t fAddressTask; - vm_map_t fAddressMap; - IOOptionBits fOptions; - upl_t fRedirUPL; - ipc_port_t fRedirEntry; - IOMemoryDescriptor * fOwner; - -protected: - virtual void taggedRelease(const void *tag = 0) const; - virtual void free(); - -public: - - // IOMemoryMap methods - virtual IOVirtualAddress getVirtualAddress(); - virtual IOByteCount getLength(); - virtual task_t getAddressTask(); - virtual mach_vm_address_t getAddress(); - virtual mach_vm_size_t getSize(); - virtual IOMemoryDescriptor * getMemoryDescriptor(); - virtual IOOptionBits getMapOptions(); - - virtual IOReturn unmap(); - virtual void taskDied(); - - virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - IOByteCount offset = 0); - - virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - mach_vm_size_t offset = 0); - - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); - - // for IOMemoryDescriptor use - _IOMemoryMap * copyCompatible( _IOMemoryMap * newMapping ); - - bool init( - task_t intoTask, - mach_vm_address_t toAddress, - IOOptionBits options, - mach_vm_size_t offset, - mach_vm_size_t length ); - - bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset); - - IOReturn redirect( - task_t intoTask, bool redirect ); -}; +#define IOMD_DEBUG_DMAACTIVE 1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -208,68 +132,70 @@ enum ioPLBlockFlags { kIOPLExternUPL = 0x00000002, }; -struct typePersMDData +struct IOMDPersistentInitData { - const IOGeneralMemoryDescriptor *fMD; - ipc_port_t fMemEntry; + const IOGeneralMemoryDescriptor * fMD; + IOMemoryReference * fMemRef; }; struct ioPLBlock { upl_t fIOPL; - vm_address_t fIOMDOffset; // The offset of this iopl in descriptor - vm_offset_t fPageInfo; // Pointer to page list or index into it - ppnum_t fMappedBase; // Page number of first page in this iopl - unsigned int fPageOffset; // Offset within first page of iopl - unsigned int fFlags; // Flags + vm_address_t fPageInfo; // Pointer to page list or index into it + uint32_t fIOMDOffset; // The offset of this iopl in descriptor + ppnum_t fMappedPage; // Page number of first page in this iopl + unsigned int fPageOffset; // Offset within first page of iopl + unsigned int fFlags; // Flags }; struct ioGMDData { - IOMapper *fMapper; + IOMapper * fMapper; + uint8_t fDMAMapNumAddressBits; + uint64_t fDMAMapAlignment; + addr64_t fMappedBase; + uint64_t fPreparationID; unsigned int fPageCnt; - upl_page_info_t fPageList[]; - ioPLBlock fBlocks[]; + unsigned char fDiscontig:1; + unsigned char fCompletionError:1; + unsigned char _resv:6; +#if __LP64__ + // align arrays to 8 bytes so following macros work + unsigned char fPad[3]; +#endif + upl_page_info_t fPageList[1]; /* variable length */ + ioPLBlock fBlocks[1]; /* variable length */ }; #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) -#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) +#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) #define getNumIOPL(osd, d) \ (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) #define getPageList(d) (&(d->fPageList[0])) #define computeDataSize(p, u) \ - (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) - + (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) - +#define next_page(a) ( trunc_page(a) + PAGE_SIZE ) extern "C" { kern_return_t device_data_action( - int device_handle, + uintptr_t device_handle, ipc_port_t device_pager, vm_prot_t protection, vm_object_offset_t offset, vm_size_t size) { - struct ExpansionData { - void * devicePager; - unsigned int pagerContig:1; - unsigned int unused:31; - IOMemoryDescriptor * memory; - }; kern_return_t kr; - ExpansionData * ref = (ExpansionData *) device_handle; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; IOMemoryDescriptor * memDesc; LOCK; - memDesc = ref->memory; + memDesc = ref->dp.memory; if( memDesc) { memDesc->retain(); - kr = memDesc->handleFault( device_pager, 0, 0, - offset, size, kIOMapDefaultCache /*?*/); + kr = memDesc->handleFault(device_pager, offset, size); memDesc->release(); } else @@ -280,27 +206,23 @@ kern_return_t device_data_action( } kern_return_t device_close( - int device_handle) + uintptr_t device_handle) { - struct ExpansionData { - void * devicePager; - unsigned int pagerContig:1; - unsigned int unused:31; - IOMemoryDescriptor * memory; - }; - ExpansionData * ref = (ExpansionData *) device_handle; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; - IODelete( ref, ExpansionData, 1 ); + IODelete( ref, IOMemoryDescriptorReserved, 1 ); return( kIOReturnSuccess ); } }; // end extern "C" +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + // Note this inline function uses C++ reference arguments to return values // This means that pointers are not passed and NULLs don't have to be // checked for as a NULL reference is illegal. static inline void -getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables +getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) { assert(kIOMemoryTypeUIO == type @@ -308,13 +230,16 @@ getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); if (kIOMemoryTypeUIO == type) { user_size_t us; - uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us; + user_addr_t ad; + uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; } +#ifndef __LP64__ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { IOAddressRange cur = r.v64[ind]; addr = cur.address; len = cur.length; } +#endif /* !__LP64__ */ else { IOVirtualRange cur = r.v[ind]; addr = cur.address; @@ -324,37 +249,808 @@ getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * withAddress: - * - * Create a new IOMemoryDescriptor. The buffer is a virtual address - * relative to the specified task. If no task is supplied, the kernel - * task is implied. - */ +static IOReturn +purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) +{ + IOReturn err = kIOReturnSuccess; + + *control = VM_PURGABLE_SET_STATE; + + enum { kIOMemoryPurgeableControlMask = 15 }; + + switch (kIOMemoryPurgeableControlMask & newState) + { + case kIOMemoryPurgeableKeepCurrent: + *control = VM_PURGABLE_GET_STATE; + break; + + case kIOMemoryPurgeableNonVolatile: + *state = VM_PURGABLE_NONVOLATILE; + break; + case kIOMemoryPurgeableVolatile: + *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); + break; + case kIOMemoryPurgeableEmpty: + *state = VM_PURGABLE_EMPTY; + break; + default: + err = kIOReturnBadArgument; + break; + } + return (err); +} + +static IOReturn +purgeableStateBits(int * state) +{ + IOReturn err = kIOReturnSuccess; + + switch (VM_PURGABLE_STATE_MASK & *state) + { + case VM_PURGABLE_NONVOLATILE: + *state = kIOMemoryPurgeableNonVolatile; + break; + case VM_PURGABLE_VOLATILE: + *state = kIOMemoryPurgeableVolatile; + break; + case VM_PURGABLE_EMPTY: + *state = kIOMemoryPurgeableEmpty; + break; + default: + *state = kIOMemoryPurgeableNonVolatile; + err = kIOReturnNotReady; + break; + } + return (err); +} + + +static vm_prot_t +vmProtForCacheMode(IOOptionBits cacheMode) +{ + vm_prot_t prot = 0; + switch (cacheMode) + { + case kIOInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, prot); + break; + + case kIOWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, prot); + break; + + case kIOWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, prot); + break; + + case kIOCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, prot); + break; + + case kIOCopybackInnerCache: + SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); + break; + + case kIODefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, prot); + break; + } + + return (prot); +} + +static unsigned int +pagerFlagsForCacheMode(IOOptionBits cacheMode) +{ + unsigned int pagerFlags = 0; + switch (cacheMode) + { + case kIOInhibitCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; + + case kIOWriteThruCache: + pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; + + case kIOWriteCombineCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; + break; + + case kIOCopybackCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIOCopybackInnerCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIODefaultCache: + default: + pagerFlags = -1U; + break; + } + return (pagerFlags); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct IOMemoryEntry +{ + ipc_port_t entry; + int64_t offset; + uint64_t size; +}; + +struct IOMemoryReference +{ + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + IOMemoryEntry entries[0]; +}; + +enum +{ + kIOMemoryReferenceReuse = 0x00000001, + kIOMemoryReferenceWrite = 0x00000002, +}; + +SInt32 gIOMemoryReferenceCount; + +IOMemoryReference * +IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc) +{ + IOMemoryReference * ref; + size_t newSize, oldSize, copySize; + + newSize = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + capacity * sizeof(ref->entries[0])); + ref = (typeof(ref)) IOMalloc(newSize); + if (realloc) + { + oldSize = (sizeof(IOMemoryReference) + - sizeof(realloc->entries) + + realloc->capacity * sizeof(realloc->entries[0])); + copySize = oldSize; + if (copySize > newSize) copySize = newSize; + if (ref) bcopy(realloc, ref, copySize); + IOFree(realloc, oldSize); + } + else if (ref) + { + bzero(ref, sizeof(*ref)); + ref->refCount = 1; + OSIncrementAtomic(&gIOMemoryReferenceCount); + } + if (!ref) return (0); + ref->capacity = capacity; + return (ref); +} + +void +IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) +{ + IOMemoryEntry * entries; + size_t size; + + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + ipc_port_release_send(entries->entry); + } + size = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + ref->capacity * sizeof(ref->entries[0])); + IOFree(ref, size); + + OSDecrementAtomic(&gIOMemoryReferenceCount); +} + +void +IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref) +{ + if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref); +} + + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceCreate( + IOOptionBits options, + IOMemoryReference ** reference) +{ + enum { kCapacity = 4, kCapacityInc = 4 }; + + kern_return_t err; + IOMemoryReference * ref; + IOMemoryEntry * entries; + IOMemoryEntry * cloneEntries; + vm_map_t map; + ipc_port_t entry, cloneEntry; + vm_prot_t prot; + memory_object_size_t actualSize; + uint32_t rangeIdx; + uint32_t count; + mach_vm_address_t entryAddr, endAddr, entrySize; + mach_vm_size_t srcAddr, srcLen; + mach_vm_size_t nextAddr, nextLen; + mach_vm_size_t offset, remain; + IOByteCount physLen; + IOOptionBits type = (_flags & kIOMemoryTypeMask); + IOOptionBits cacheMode; + unsigned int pagerFlags; + + ref = memoryReferenceAlloc(kCapacity, NULL); + if (!ref) return (kIOReturnNoMemory); + entries = &ref->entries[0]; + count = 0; + + offset = 0; + rangeIdx = 0; + if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + else + { + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = physLen; + // default cache mode for physical + if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) + { + IOOptionBits mode; + pagerFlags = IODefaultCacheBits(nextAddr); + if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) + { + if (DEVICE_PAGER_GUARDED & pagerFlags) + mode = kIOInhibitCache; + else + mode = kIOWriteCombineCache; + } + else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) + mode = kIOWriteThruCache; + else + mode = kIOCopybackCache; + _flags |= (mode << kIOMemoryBufferCacheShift); + } + } + + // cache mode & vm_prot + prot = VM_PROT_READ; + cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + prot |= vmProtForCacheMode(cacheMode); + // VM system requires write access to change cache mode + if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; + if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; + + if ((kIOMemoryReferenceReuse & options) && _memRef) + { + cloneEntries = &_memRef->entries[0]; + prot |= MAP_MEM_NAMED_REUSE; + } + + if (_task) + { + // virtual ranges + + if (kIOMemoryBufferPageable & _flags) + { + // IOBufferMemoryDescriptor alloc - set flags for entry + object create + prot |= MAP_MEM_NAMED_CREATE; + if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE; + prot |= VM_PROT_WRITE; + map = NULL; + } + else map = get_task_map(_task); + + remain = _length; + while (remain) + { + srcAddr = nextAddr; + srcLen = nextLen; + nextAddr = 0; + nextLen = 0; + // coalesce addr range + for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if ((srcAddr + srcLen) != nextAddr) break; + srcLen += nextLen; + } + entryAddr = trunc_page_64(srcAddr); + endAddr = round_page_64(srcAddr + srcLen); + do + { + entrySize = (endAddr - entryAddr); + if (!entrySize) break; + actualSize = entrySize; + + cloneEntry = MACH_PORT_NULL; + if (MAP_MEM_NAMED_REUSE & prot) + { + if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + + err = mach_make_memory_entry_64(map, + &actualSize, entryAddr, prot, &entry, cloneEntry); + + if (KERN_SUCCESS != err) break; + if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize"); + + if (count >= ref->capacity) + { + ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); + entries = &ref->entries[count]; + } + entries->entry = entry; + entries->size = actualSize; + entries->offset = offset + (entryAddr - srcAddr); + entryAddr += actualSize; + if (MAP_MEM_NAMED_REUSE & prot) + { + if ((cloneEntries->entry == entries->entry) + && (cloneEntries->size == entries->size) + && (cloneEntries->offset == entries->offset)) cloneEntries++; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + entries++; + count++; + } + while (true); + offset += srcLen; + remain -= srcLen; + } + } + else + { + // _task == 0, physical + memory_object_t pager; + vm_size_t size = ptoa_32(_pages); + + if (!getKernelReserved()) panic("getKernelReserved"); + + reserved->dp.pagerContig = (1 == _rangesCount); + reserved->dp.memory = this; + + pagerFlags = pagerFlagsForCacheMode(cacheMode); + if (-1U == pagerFlags) panic("phys is kIODefaultCache"); + if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS; + + pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, + size, pagerFlags); + assert (pager); + if (!pager) err = kIOReturnVMError; + else + { + srcAddr = nextAddr; + entryAddr = trunc_page_64(srcAddr); + err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, + size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); + assert (KERN_SUCCESS == err); + if (KERN_SUCCESS != err) device_pager_deallocate(pager); + else + { + reserved->dp.devicePager = pager; + entries->entry = entry; + entries->size = size; + entries->offset = offset + (entryAddr - srcAddr); + entries++; + count++; + } + } + } + + ref->count = count; + ref->prot = prot; + + if (KERN_SUCCESS == err) + { + if (MAP_MEM_NAMED_REUSE & prot) + { + memoryReferenceFree(ref); + OSIncrementAtomic(&_memRef->refCount); + ref = _memRef; + } + } + else + { + memoryReferenceFree(ref); + ref = NULL; + } + + *reference = ref; + + return (err); +} + +struct IOMemoryDescriptorMapAllocRef +{ + vm_map_t map; + mach_vm_address_t mapped; + mach_vm_size_t size; + vm_prot_t prot; + IOOptionBits options; +}; + +static kern_return_t +IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) +{ + IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; + IOReturn err; + vm_map_offset_t addr; + + addr = ref->mapped; + err = vm_map_enter_mem_object(map, &addr, ref->size, + (vm_map_offset_t) 0, + (((ref->options & kIOMapAnywhere) + ? VM_FLAGS_ANYWHERE + : VM_FLAGS_FIXED) + | VM_MAKE_TAG(VM_MEMORY_IOKIT) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + IPC_PORT_NULL, + (memory_object_offset_t) 0, + false, /* copy */ + ref->prot, + ref->prot, + VM_INHERIT_NONE); + if (KERN_SUCCESS == err) + { + ref->mapped = (mach_vm_address_t) addr; + ref->map = map; + } + + return( err ); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceMap( + IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr) +{ + IOReturn err; + int64_t offset = inoffset; + uint32_t rangeIdx, entryIdx; + vm_map_offset_t addr, mapAddr; + vm_map_offset_t pageOffset, entryOffset, remain, chunk; + + mach_vm_address_t srcAddr, nextAddr; + mach_vm_size_t srcLen, nextLen; + IOByteCount physLen; + IOMemoryEntry * entry; + vm_prot_t prot, memEntryCacheMode; + IOOptionBits type; + IOOptionBits cacheMode; + + /* + * For the kIOMapPrefault option. + */ + upl_page_info_t *pageList = NULL; + UInt currentPageIndex = 0; + + type = _flags & kIOMemoryTypeMask; + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; + prot &= ref->prot; + + cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); + if (kIODefaultCache != cacheMode) + { + // VM system requires write access to update named entry cache mode + memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); + } + + if (_task) + { + // Find first range for offset + for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (remain < nextLen) break; + remain -= nextLen; + } + } + else + { + rangeIdx = 0; + remain = 0; + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = size; + } + + assert(remain < nextLen); + if (remain >= nextLen) return (kIOReturnBadArgument); + + nextAddr += remain; + nextLen -= remain; + pageOffset = (page_mask & nextAddr); + addr = 0; + if (!(options & kIOMapAnywhere)) + { + addr = *inaddr; + if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned); + addr -= pageOffset; + } + + // find first entry for offset + for (entryIdx = 0; + (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); + entryIdx++) {} + entryIdx--; + entry = &ref->entries[entryIdx]; + + // allocate VM + size = round_page_64(size + pageOffset); + { + IOMemoryDescriptorMapAllocRef ref; + ref.map = map; + ref.options = options; + ref.size = size; + ref.prot = prot; + if (options & kIOMapAnywhere) + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + else + ref.mapped = addr; + + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + else + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + if (KERN_SUCCESS == err) + { + addr = ref.mapped; + map = ref.map; + } + } + + /* + * Prefaulting is only possible if we wired the memory earlier. Check the + * memory type, and the underlying data. + */ + if (options & kIOMapPrefault) { + /* + * The memory must have been wired by calling ::prepare(), otherwise + * we don't have the UPL. Without UPLs, pages cannot be pre-faulted + */ + assert(map != kernel_map); + assert(_wireCount != 0); + assert(_memoryEntries != NULL); + if ((map == kernel_map) || + (_wireCount == 0) || + (_memoryEntries == NULL)) + { + return kIOReturnBadArgument; + } + + // Get the page list. + ioGMDData* dataP = getDataP(_memoryEntries); + ioPLBlock const* ioplList = getIOPLList(dataP); + pageList = getPageList(dataP); + + // Get the number of IOPLs. + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + + /* + * Scan through the IOPL Info Blocks, looking for the first block containing + * the offset. The research will go past it, so we'll need to go back to the + * right range at the end. + */ + UInt ioplIndex = 0; + while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) + ioplIndex++; + ioplIndex--; + + // Retrieve the IOPL info block. + ioPLBlock ioplInfo = ioplList[ioplIndex]; + + /* + * For external UPLs, the fPageInfo points directly to the UPL's page_info_t + * array. + */ + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t*) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; + + // Rebase [offset] into the IOPL in order to looks for the first page index. + mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; + + // Retrieve the index of the first page corresponding to the offset. + currentPageIndex = atop_32(offsetInIOPL); + } + + // enter mappings + remain = size; + mapAddr = addr; + addr += pageOffset; + while (remain && nextLen && (KERN_SUCCESS == err)) + { + srcAddr = nextAddr; + srcLen = nextLen; + nextAddr = 0; + nextLen = 0; + // coalesce addr range + for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if ((srcAddr + srcLen) != nextAddr) break; + srcLen += nextLen; + } + + while (srcLen && (KERN_SUCCESS == err)) + { + entryOffset = offset - entry->offset; + if ((page_mask & entryOffset) != pageOffset) + { + err = kIOReturnNotAligned; + break; + } + + if (kIODefaultCache != cacheMode) + { + vm_size_t unused = 0; + err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, entry->entry); + assert (KERN_SUCCESS == err); + } + + entryOffset -= pageOffset; + if (entryOffset >= entry->size) panic("entryOffset"); + chunk = entry->size - entryOffset; + if (chunk) + { + if (chunk > remain) chunk = remain; + + if (options & kIOMapPrefault) { + UInt nb_pages = round_page(chunk) / PAGE_SIZE; + err = vm_map_enter_mem_object_prefault(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(VM_MEMORY_IOKIT) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + prot, // cur + prot, // max + &pageList[currentPageIndex], + nb_pages); + + // Compute the next index in the page list. + currentPageIndex += nb_pages; + assert(currentPageIndex <= _pages); + } else { + err = vm_map_enter_mem_object(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(VM_MEMORY_IOKIT) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + } + + if (KERN_SUCCESS != err) break; + remain -= chunk; + if (!remain) break; + mapAddr += chunk; + offset += chunk - pageOffset; + } + pageOffset = 0; + entry++; + entryIdx++; + if (entryIdx >= ref->count) + { + err = kIOReturnOverrun; + break; + } + } + } + + if ((KERN_SUCCESS != err) && addr) + { + (void) mach_vm_deallocate(map, trunc_page_64(addr), size); + addr = 0; + } + *inaddr = addr; + + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( + IOMemoryReference * ref, + IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount) +{ + IOReturn err; + IOMemoryEntry * entries; + unsigned int resident, dirty; + unsigned int totalResident, totalDirty; + + totalResident = totalDirty = 0; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); + if (KERN_SUCCESS != err) break; + totalResident += resident; + totalDirty += dirty; + } + + if (residentPageCount) *residentPageCount = totalResident; + if (dirtyPageCount) *dirtyPageCount = totalDirty; + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( + IOMemoryReference * ref, + IOOptionBits newState, + IOOptionBits * oldState) +{ + IOReturn err; + IOMemoryEntry * entries; + vm_purgable_t control; + int totalState, state; + + entries = ref->entries + ref->count; + totalState = kIOMemoryPurgeableNonVolatile; + while (entries > &ref->entries[0]) + { + entries--; + + err = purgeableControlBits(newState, &control, &state); + if (KERN_SUCCESS != err) break; + err = mach_memory_entry_purgable_control(entries->entry, control, &state); + if (KERN_SUCCESS != err) break; + err = purgeableStateBits(&state); + if (KERN_SUCCESS != err) break; + + if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; + else if (kIOMemoryPurgeableEmpty == totalState) continue; + else if (kIOMemoryPurgeableVolatile == totalState) continue; + else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; + else totalState = kIOMemoryPurgeableNonVolatile; + } + + if (oldState) *oldState = totalState; + return (err); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + IOMemoryDescriptor * IOMemoryDescriptor::withAddress(void * address, IOByteCount length, IODirection direction) { return IOMemoryDescriptor:: - withAddress((vm_address_t) address, length, direction, kernel_task); + withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); } +#ifndef __LP64__ IOMemoryDescriptor * -IOMemoryDescriptor::withAddress(vm_address_t address, +IOMemoryDescriptor::withAddress(IOVirtualAddress address, IOByteCount length, IODirection direction, task_t task) { -#if TEST_V64 - if (task) - { - IOOptionBits options = (IOOptionBits) direction; - if (task == kernel_task) - options |= kIOMemoryAutoPrepare; - return (IOMemoryDescriptor::withAddressRange(address, length, options, task)); - } -#endif IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; if (that) { @@ -365,6 +1061,7 @@ IOMemoryDescriptor::withAddress(vm_address_t address, } return 0; } +#endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withPhysicalAddress( @@ -372,19 +1069,10 @@ IOMemoryDescriptor::withPhysicalAddress( IOByteCount length, IODirection direction ) { -#if TEST_P64 - return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL)); -#endif - IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; - if (self - && !self->initWithPhysicalAddress(address, length, direction)) { - self->release(); - return 0; - } - - return self; + return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL)); } +#ifndef __LP64__ IOMemoryDescriptor * IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, UInt32 withCount, @@ -402,6 +1090,7 @@ IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, } return 0; } +#endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, @@ -438,7 +1127,7 @@ IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, /* - * withRanges: + * withOptions: * * Create a new IOMemoryDescriptor. The buffer is made up of several * virtual address ranges, from a given task. @@ -465,7 +1154,6 @@ IOMemoryDescriptor::withOptions(void * buffers, return self; } -// Can't leave abstract but this should never be used directly, bool IOMemoryDescriptor::initWithOptions(void * buffers, UInt32 count, UInt32 offset, @@ -473,11 +1161,10 @@ bool IOMemoryDescriptor::initWithOptions(void * buffers, IOOptionBits options, IOMapper * mapper) { - // @@@ gvdl: Should I panic? - panic("IOMD::initWithOptions called\n"); - return 0; + return( false ); } +#ifndef __LP64__ IOMemoryDescriptor * IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, @@ -501,14 +1188,9 @@ IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount length, IODirection direction) { - IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; - - if (self && !self->initSubRange(of, offset, length, direction)) { - self->release(); - self = 0; - } - return self; + return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe)); } +#endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) @@ -526,101 +1208,42 @@ IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalM IOMemoryDescriptor * IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) { - ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry(); + IOMemoryReference * memRef; + + if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0); - if (!sharedMem) - return 0; - - if (sharedMem == originalMD->_memEntry) { + if (memRef == originalMD->_memRef) + { originalMD->retain(); // Add a new reference to ourselves - ipc_port_release_send(sharedMem); // Remove extra send right + originalMD->memoryReferenceRelease(memRef); return originalMD; } IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; - typePersMDData initData = { originalMD, sharedMem }; - - if (self - && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { - self->release(); - self = 0; - } - return self; -} - -void *IOGeneralMemoryDescriptor::createNamedEntry() -{ - kern_return_t error; - ipc_port_t sharedMem; - - IOOptionBits type = _flags & kIOMemoryTypeMask; - - user_addr_t range0Addr; - IOByteCount range0Len; - getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); - range0Addr = trunc_page_64(range0Addr); - - vm_size_t size = ptoa_32(_pages); - vm_address_t kernelPage = (vm_address_t) range0Addr; - - vm_map_t theMap = ((_task == kernel_task) - && (kIOMemoryBufferPageable & _flags)) - ? IOPageableMapForAddress(kernelPage) - : get_task_map(_task); - - memory_object_size_t actualSize = size; - vm_prot_t prot = VM_PROT_READ; -#if CONFIG_EMBEDDED - if (kIODirectionOut != (kIODirectionOutIn & _flags)) -#endif - prot |= VM_PROT_WRITE; - - if (_memEntry) - prot |= MAP_MEM_NAMED_REUSE; - - error = mach_make_memory_entry_64(theMap, - &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry); - - if (KERN_SUCCESS == error) { - if (actualSize == size) { - return sharedMem; - } else { -#if IOASSERT - IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", - (UInt64)range0Addr, (UInt32)actualSize, size); -#endif - ipc_port_release_send( sharedMem ); - } - } + IOMDPersistentInitData initData = { originalMD, memRef }; - return MACH_PORT_NULL; + if (self + && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { + self->release(); + self = 0; + } + return self; } -/* - * initWithAddress: - * - * Initialize an IOMemoryDescriptor. The buffer is a virtual address - * relative to the specified task. If no task is supplied, the kernel - * task is implied. - * - * An IOMemoryDescriptor can be re-used by calling initWithAddress or - * initWithRanges again on an existing instance -- note this behavior - * is not commonly supported in other I/O Kit classes, although it is - * supported here. - */ +#ifndef __LP64__ bool IOGeneralMemoryDescriptor::initWithAddress(void * address, IOByteCount withLength, IODirection withDirection) { - _singleRange.v.address = (vm_address_t) address; + _singleRange.v.address = (vm_offset_t) address; _singleRange.v.length = withLength; return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); } bool -IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, +IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address, IOByteCount withLength, IODirection withDirection, task_t withTask) @@ -685,6 +1308,7 @@ IOGeneralMemoryDescriptor::initWithRanges( return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); } +#endif /* !__LP64__ */ /* * initWithOptions: @@ -710,11 +1334,22 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, { IOOptionBits type = options & kIOMemoryTypeMask; +#ifndef __LP64__ + if (task + && (kIOMemoryTypeVirtual == type) + && vm_map_is_64bit(get_task_map(task)) + && ((IOVirtualRange *) buffers)->address) + { + OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); + return false; + } +#endif /* !__LP64__ */ + // Grab the original MD's configuation data to initialse the // arguments to this function. if (kIOMemoryTypePersistentMD == type) { - typePersMDData *initData = (typePersMDData *) buffers; + IOMDPersistentInitData *initData = (typeof(initData)) buffers; const IOGeneralMemoryDescriptor *orig = initData->fMD; ioGMDData *dataP = getDataP(orig->_memoryEntries); @@ -723,11 +1358,11 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) return false; - _memEntry = initData->fMemEntry; // Grab the new named entry - options = orig->_flags | kIOMemoryAsReference; - _singleRange = orig->_singleRange; // Initialise our range - buffers = &_singleRange; - count = 1; + _memRef = initData->fMemRef; // Grab the new named entry + options = orig->_flags & ~kIOMemoryAsReference; + type = options & kIOMemoryTypeMask; + buffers = orig->_ranges.v; + count = orig->_rangesCount; // Now grab the original task and whatever mapper was previously used task = orig->_task; @@ -739,24 +1374,18 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, switch (type) { case kIOMemoryTypeUIO: case kIOMemoryTypeVirtual: +#ifndef __LP64__ case kIOMemoryTypeVirtual64: +#endif /* !__LP64__ */ assert(task); if (!task) return false; - - if (vm_map_is_64bit(get_task_map(task)) - && (kIOMemoryTypeVirtual == type) - && ((IOVirtualRange *) buffers)->address) - { - OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); - return false; - } break; case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task +#ifndef __LP64__ case kIOMemoryTypePhysical64: - mapper = kIOMapperNone; - +#endif /* !__LP64__ */ case kIOMemoryTypeUPL: assert(!task); break; @@ -784,20 +1413,29 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, while (_wireCount) complete(); } - if (_ranges.v && _rangesIsAllocated) + if (_ranges.v && !(kIOMemoryAsReference & _flags)) { if (kIOMemoryTypeUIO == type) uio_free((uio_t) _ranges.v); +#ifndef __LP64__ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) IODelete(_ranges.v64, IOAddressRange, _rangesCount); +#endif /* !__LP64__ */ else IODelete(_ranges.v, IOVirtualRange, _rangesCount); } - if (_memEntry) - { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; } - if (_mappings) - _mappings->flushCollection(); + options |= (kIOMemoryRedirected & _flags); + if (!(kIOMemoryRedirected & options)) + { + if (_memRef) + { + memoryReferenceRelease(_memRef); + _memRef = 0; + } + if (_mappings) + _mappings->flushCollection(); + } } else { if (!super::init()) @@ -806,20 +1444,28 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, } // Grab the appropriate mapper - if (mapper == kIOMapperNone) + if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone; + if (kIOMemoryMapperNone & options) mapper = 0; // No Mapper else if (mapper == kIOMapperSystem) { IOMapper::checkForSystemMapper(); gIOSystemMapper = mapper = IOMapper::gSystem; } + // Temp binary compatibility for kIOMemoryThreadSafe + if (kIOMemoryReserved6156215 & options) + { + options &= ~kIOMemoryReserved6156215; + options |= kIOMemoryThreadSafe; + } // Remove the dynamic internal use flags from the initial setting options &= ~(kIOMemoryPreparedReadOnly); _flags = options; _task = task; - // DEPRECATED variable initialisation +#ifndef __LP64__ _direction = (IODirection) (_flags & kIOMemoryDirectionMask); +#endif /* !__LP64__ */ __iomd_reservedA = 0; __iomd_reservedB = 0; @@ -841,17 +1487,8 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, ioGMDData *dataP; unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); - if (!_memoryEntries) { - _memoryEntries = OSData::withCapacity(dataSize); - if (!_memoryEntries) - return false; - } - else if (!_memoryEntries->initWithCapacity(dataSize)) - return false; - - _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + if (!initMemoryEntries(dataSize, mapper)) return (false); dataP = getDataP(_memoryEntries); - dataP->fMapper = mapper; dataP->fPageCnt = 0; // _wireCount++; // UPLs start out life wired @@ -860,31 +1497,26 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); ioPLBlock iopl; - upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); - iopl.fIOPL = (upl_t) buffers; - // Set the flag kIOPLOnDevice convieniently equal to 1 - iopl.fFlags = pageList->device | kIOPLExternUPL; - iopl.fIOMDOffset = 0; + upl_set_referenced(iopl.fIOPL, true); + upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); + + if (upl_get_size(iopl.fIOPL) < (count + offset)) + panic("short external upl"); _highestPage = upl_get_highest_page(iopl.fIOPL); + // Set the flag kIOPLOnDevice convieniently equal to 1 + iopl.fFlags = pageList->device | kIOPLExternUPL; if (!pageList->device) { // Pre-compute the offset into the UPL's page list pageList = &pageList[atop_32(offset)]; offset &= PAGE_MASK; - if (mapper) { - iopl.fMappedBase = mapper->iovmAlloc(_pages); - mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); - } - else - iopl.fMappedBase = 0; } - else - iopl.fMappedBase = 0; + iopl.fIOMDOffset = 0; + iopl.fMappedPage = 0; iopl.fPageInfo = (vm_address_t) pageList; iopl.fPageOffset = offset; - _memoryEntries->appendBytes(&iopl, sizeof(iopl)); } else { @@ -893,7 +1525,9 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, // Initialize the memory descriptor if (options & kIOMemoryAsReference) { +#ifndef __LP64__ _rangesIsAllocated = false; +#endif /* !__LP64__ */ // Hack assignment to get the buffer arg into _ranges. // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't @@ -902,25 +1536,51 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _ranges.v = (IOVirtualRange *) buffers; } else { +#ifndef __LP64__ _rangesIsAllocated = true; - switch (_flags & kIOMemoryTypeMask) +#endif /* !__LP64__ */ + switch (type) { case kIOMemoryTypeUIO: _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); break; +#ifndef __LP64__ case kIOMemoryTypeVirtual64: case kIOMemoryTypePhysical64: + if (count == 1 + && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL + ) { + if (kIOMemoryTypeVirtual64 == type) + type = kIOMemoryTypeVirtual; + else + type = kIOMemoryTypePhysical; + _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; + _rangesIsAllocated = false; + _ranges.v = &_singleRange.v; + _singleRange.v.address = ((IOAddressRange *) buffers)->address; + _singleRange.v.length = ((IOAddressRange *) buffers)->length; + break; + } _ranges.v64 = IONew(IOAddressRange, count); if (!_ranges.v64) return false; bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); break; +#endif /* !__LP64__ */ case kIOMemoryTypeVirtual: case kIOMemoryTypePhysical: - _ranges.v = IONew(IOVirtualRange, count); - if (!_ranges.v) - return false; + if (count == 1) { + _flags |= kIOMemoryAsReference; +#ifndef __LP64__ + _rangesIsAllocated = false; +#endif /* !__LP64__ */ + _ranges.v = &_singleRange.v; + } else { + _ranges.v = IONew(IOVirtualRange, count); + if (!_ranges.v) + return false; + } bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); break; } @@ -931,8 +1591,8 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, UInt32 length = 0; UInt32 pages = 0; for (unsigned ind = 0; ind < count; ind++) { - user_addr_t addr; - UInt32 len; + mach_vm_address_t addr; + mach_vm_size_t len; // addr & len are returned by this function getAddrLenForInd(addr, len, type, vec, ind); @@ -960,21 +1620,16 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, ioGMDData *dataP; unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); - if (!_memoryEntries) { - _memoryEntries = OSData::withCapacity(dataSize); - if (!_memoryEntries) - return false; - } - else if (!_memoryEntries->initWithCapacity(dataSize)) - return false; - - _memoryEntries->appendBytes(0, sizeof(ioGMDData)); + if (!initMemoryEntries(dataSize, mapper)) return false; dataP = getDataP(_memoryEntries); - dataP->fMapper = mapper; dataP->fPageCnt = _pages; - if ( (kIOMemoryPersistent & _flags) && !_memEntry) - _memEntry = createNamedEntry(); + if ( (kIOMemoryPersistent & _flags) && !_memRef) + { + IOReturn + err = memoryReferenceCreate(0, &_memRef); + if (kIOReturnSuccess != err) return false; + } if ((_flags & kIOMemoryAutoPrepare) && prepare() != kIOReturnSuccess) @@ -997,53 +1652,69 @@ void IOGeneralMemoryDescriptor::free() if( reserved) { LOCK; - reserved->memory = 0; + reserved->dp.memory = 0; UNLOCK; } - - if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + { + ioGMDData * dataP; + if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); + dataP->fMappedBase = 0; + } + } + else { - while (_wireCount) - complete(); + while (_wireCount) complete(); } - if (_memoryEntries) - _memoryEntries->release(); - if (_ranges.v && _rangesIsAllocated) + if (_memoryEntries) _memoryEntries->release(); + + if (_ranges.v && !(kIOMemoryAsReference & _flags)) { if (kIOMemoryTypeUIO == type) uio_free((uio_t) _ranges.v); +#ifndef __LP64__ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) IODelete(_ranges.v64, IOAddressRange, _rangesCount); +#endif /* !__LP64__ */ else IODelete(_ranges.v, IOVirtualRange, _rangesCount); _ranges.v = NULL; } - if (reserved && reserved->devicePager) - device_pager_deallocate( (memory_object_t) reserved->devicePager ); - - // memEntry holds a ref on the device pager which owns reserved - // (ExpansionData) so no reserved access after this point - if (_memEntry) - ipc_port_release_send( (ipc_port_t) _memEntry ); + if (reserved) + { + if (reserved->dp.devicePager) + { + // memEntry holds a ref on the device pager which owns reserved + // (IOMemoryDescriptorReserved) so no reserved access after this point + device_pager_deallocate( (memory_object_t) reserved->dp.devicePager ); + } + else + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } - if (_prepareLock) - IOLockFree(_prepareLock); + if (_memRef) memoryReferenceRelease(_memRef); + if (_prepareLock) IOLockFree(_prepareLock); super::free(); } -/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() -/* DEPRECATED */ { - panic("IOGMD::unmapFromKernel deprecated"); -/* DEPRECATED */ } -/* DEPRECATED */ -/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) -/* DEPRECATED */ { - panic("IOGMD::mapIntoKernel deprecated"); -/* DEPRECATED */ } +#ifndef __LP64__ +void IOGeneralMemoryDescriptor::unmapFromKernel() +{ + panic("IOGMD::unmapFromKernel deprecated"); +} + +void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +{ + panic("IOGMD::mapIntoKernel deprecated"); +} +#endif /* !__LP64__ */ /* * getDirection: @@ -1052,7 +1723,11 @@ void IOGeneralMemoryDescriptor::free() */ IODirection IOMemoryDescriptor::getDirection() const { - return _direction; +#ifndef __LP64__ + if (_direction) + return _direction; +#endif /* !__LP64__ */ + return (IODirection) (_flags & kIOMemoryDirectionMask); } /* @@ -1075,6 +1750,7 @@ IOOptionBits IOMemoryDescriptor::getTag( void ) return( _tag); } +#ifndef __LP64__ // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) @@ -1088,26 +1764,30 @@ IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used } +#endif /* !__LP64__ */ IOByteCount IOMemoryDescriptor::readBytes (IOByteCount offset, void *bytes, IOByteCount length) { - addr64_t dstAddr = (addr64_t) (UInt32) bytes; + addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); IOByteCount remaining; // Assert that this entire I/O is withing the available range - assert(offset < _length); + assert(offset <= _length); assert(offset + length <= _length); if (offset >= _length) { return 0; } + if (kIOMemoryThreadSafe & _flags) + LOCK; + remaining = length = min(length, _length - offset); while (remaining) { // (process another target segment?) addr64_t srcAddr64; IOByteCount srcLen; - srcAddr64 = getPhysicalSegment64(offset, &srcLen); + srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); if (!srcAddr64) break; @@ -1123,19 +1803,23 @@ IOByteCount IOMemoryDescriptor::readBytes remaining -= srcLen; } + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + assert(!remaining); return length - remaining; } IOByteCount IOMemoryDescriptor::writeBytes - (IOByteCount offset, const void *bytes, IOByteCount length) + (IOByteCount inoffset, const void *bytes, IOByteCount length) { - addr64_t srcAddr = (addr64_t) (UInt32) bytes; + addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); IOByteCount remaining; + IOByteCount offset = inoffset; // Assert that this entire I/O is withing the available range - assert(offset < _length); + assert(offset <= _length); assert(offset + length <= _length); assert( !(kIOMemoryPreparedReadOnly & _flags) ); @@ -1144,12 +1828,15 @@ IOByteCount IOMemoryDescriptor::writeBytes return 0; } + if (kIOMemoryThreadSafe & _flags) + LOCK; + remaining = length = min(length, _length - offset); while (remaining) { // (process another target segment?) addr64_t dstAddr64; IOByteCount dstLen; - dstAddr64 = getPhysicalSegment64(offset, &dstLen); + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); if (!dstAddr64) break; @@ -1157,29 +1844,167 @@ IOByteCount IOMemoryDescriptor::writeBytes if (dstLen > remaining) dstLen = remaining; - copypv(srcAddr, (addr64_t) dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); - - srcAddr += dstLen; + if (!srcAddr) bzero_phys(dstAddr64, dstLen); + else + { + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + srcAddr += dstLen; + } offset += dstLen; remaining -= dstLen; } + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + assert(!remaining); + if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); + return length - remaining; } -// osfmk/device/iokit_rpc.c -extern "C" unsigned int IODefaultCacheBits(addr64_t pa); +#ifndef __LP64__ +void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +{ + panic("IOGMD::setPosition deprecated"); +} +#endif /* !__LP64__ */ + +static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32); + +uint64_t +IOGeneralMemoryDescriptor::getPreparationID( void ) +{ + ioGMDData *dataP; + + if (!_wireCount) + return (kIOPreparationIDUnprepared); + + if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical) + || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) + { + IOMemoryDescriptor::setPreparationID(); + return (IOMemoryDescriptor::getPreparationID()); + } + + if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) + return (kIOPreparationIDUnprepared); + + if (kIOPreparationIDUnprepared == dataP->fPreparationID) + { + dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); + } + return (dataP->fPreparationID); +} + +IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void ) +{ + if (!reserved) + { + reserved = IONew(IOMemoryDescriptorReserved, 1); + if (reserved) + bzero(reserved, sizeof(IOMemoryDescriptorReserved)); + } + return (reserved); +} + +void IOMemoryDescriptor::setPreparationID( void ) +{ + if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) + { +#if defined(__ppc__ ) + reserved->preparationID = gIOMDPreparationID++; +#else + reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); +#endif + } +} -/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) -/* DEPRECATED */ { - panic("IOGMD::setPosition deprecated"); -/* DEPRECATED */ } +uint64_t IOMemoryDescriptor::getPreparationID( void ) +{ + if (reserved) + return (reserved->preparationID); + else + return (kIOPreparationIDUnsupported); +} IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { + IOReturn err = kIOReturnSuccess; + DMACommandOps params; + IOGeneralMemoryDescriptor * md = const_cast(this); + ioGMDData *dataP; + + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; + + if (kIOMDDMAMap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + if (_memoryEntries && data->fMapper) + { + bool remap; + bool whole = ((data->fOffset == 0) && (data->fLength == _length)); + dataP = getDataP(_memoryEntries); + + if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; + if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; + + remap = (dataP->fDMAMapNumAddressBits < 64) + && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); + remap |= (dataP->fDMAMapAlignment > page_size); + remap |= (!whole); + if (remap || !dataP->fMappedBase) + { +// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); + err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); + if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase) + { + dataP->fMappedBase = data->fAlloc; + data->fAllocCount = 0; // IOMD owns the alloc now + } + } + else + { + data->fAlloc = dataP->fMappedBase; + data->fAllocCount = 0; // IOMD owns the alloc + } + data->fMapContig = !dataP->fDiscontig; + } + + return (err); + } + + if (kIOMDAddDMAMapSpec == op) + { + if (dataSize < sizeof(IODMAMapSpecification)) + return kIOReturnUnderrun; + + IODMAMapSpecification * data = (IODMAMapSpecification *) vData; + + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + if (_memoryEntries) + { + dataP = getDataP(_memoryEntries); + if (data->numAddressBits < dataP->fDMAMapNumAddressBits) + dataP->fDMAMapNumAddressBits = data->numAddressBits; + if (data->alignment > dataP->fDMAMapAlignment) + dataP->fDMAMapAlignment = data->alignment; + } + return kIOReturnSuccess; + } + if (kIOMDGetCharacteristics == op) { if (dataSize < sizeof(IOMDDMACharacteristics)) @@ -1189,29 +2014,36 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * data->fLength = _length; data->fSGCount = _rangesCount; data->fPages = _pages; - data->fDirection = _direction; + data->fDirection = getDirection(); if (!_wireCount) data->fIsPrepared = false; else { data->fIsPrepared = true; data->fHighestPage = _highestPage; - if (_memoryEntries) { - ioGMDData *gmdData = getDataP(_memoryEntries); - ioPLBlock *ioplList = getIOPLList(gmdData); - UInt count = getNumIOPL(_memoryEntries, gmdData); - - data->fIsMapped = (gmdData->fMapper && _pages && (count > 0) - && ioplList[0].fMappedBase); + if (_memoryEntries) + { + dataP = getDataP(_memoryEntries); + ioPLBlock *ioplList = getIOPLList(dataP); + UInt count = getNumIOPL(_memoryEntries, dataP); if (count == 1) data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; } - else - data->fIsMapped = false; } return kIOReturnSuccess; - } - else if (!(kIOMDWalkSegments & op)) + +#if IOMD_DEBUG_DMAACTIVE + } else if (kIOMDDMAActive == op) { + if (params) OSIncrementAtomic(&md->__iomd_reservedA); + else { + if (md->__iomd_reservedA) + OSDecrementAtomic(&md->__iomd_reservedA); + else + panic("kIOMDSetDMAInactive"); + } +#endif /* IOMD_DEBUG_DMAACTIVE */ + + } else if (kIOMDWalkSegments != op) return kIOReturnBadArgument; // Get the next segment @@ -1230,12 +2062,32 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * UInt offset = isP->fIO.fOffset; bool mapped = isP->fIO.fMapped; + if (IOMapper::gSystem && mapped + && (!(kIOMemoryHostOnly & _flags)) + && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase)) +// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase)) + { + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + dataP = getDataP(_memoryEntries); + if (dataP->fMapper) + { + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL); + if (kIOReturnSuccess != err) return (err); + } + } + if (offset >= _length) return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; // Validate the previous offset UInt ind, off2Ind = isP->fOffset2Index; - if ((kIOMDFirstSegment != op) + if (!params && offset && (offset == isP->fNextOffset || off2Ind <= offset)) ind = isP->fIndex; @@ -1244,13 +2096,15 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * UInt length; UInt64 address; + + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { // Physical address based memory descriptor const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; // Find the range after the one that contains the offset - UInt len; + mach_vm_size_t len; for (len = 0; off2Ind <= offset; ind++) { len = physP[ind].length; off2Ind += len; @@ -1260,18 +2114,27 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * length = off2Ind - offset; address = physP[ind - 1].address + len - length; - // see how far we can coalesce ranges - while (ind < _rangesCount && address + length == physP[ind].address) { - len = physP[ind].length; - length += len; - off2Ind += len; - ind++; + if (true && mapped && _memoryEntries + && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + address = dataP->fMappedBase + offset; + } + else + { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } } // correct contiguous check overshoot ind--; off2Ind -= len; } +#ifndef __LP64__ else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { // Physical address based memory descriptor @@ -1288,25 +2151,33 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * length = off2Ind - offset; address = physP[ind - 1].address + len - length; - // see how far we can coalesce ranges - while (ind < _rangesCount && address + length == physP[ind].address) { - len = physP[ind].length; - length += len; - off2Ind += len; - ind++; + if (true && mapped && _memoryEntries + && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + address = dataP->fMappedBase + offset; + } + else + { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } } - // correct contiguous check overshoot ind--; off2Ind -= len; - } + } +#endif /* !__LP64__ */ else do { if (!_wireCount) panic("IOGMD: not wired for the IODMACommand"); assert(_memoryEntries); - ioGMDData * dataP = getDataP(_memoryEntries); + dataP = getDataP(_memoryEntries); const ioPLBlock *ioplList = getIOPLList(dataP); UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); upl_page_info_t *pageList = getPageList(dataP); @@ -1332,9 +2203,9 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * // If a mapped address is requested and this is a pre-mapped IOPL // then just need to compute an offset relative to the mapped base. - if (mapped && ioplInfo.fMappedBase) { + if (mapped && dataP->fMappedBase) { offset += (ioplInfo.fPageOffset & PAGE_MASK); - address = ptoa_64(ioplInfo.fMappedBase) + offset; + address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; continue; // Done leave do/while(false) now } @@ -1361,6 +2232,10 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * // Compute the starting address of this segment IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; + if (!pageAddr) { + panic("!pageList phys_addr"); + } + address = ptoa_64(pageAddr) + offset; // length is currently set to the length of the remainider of the iopl. @@ -1393,28 +2268,70 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * } addr64_t -IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) +IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { - IOReturn ret; - IOByteCount length = 0; - addr64_t address = 0; + IOReturn ret; + mach_vm_address_t address = 0; + mach_vm_size_t length = 0; + IOMapper * mapper = gIOSystemMapper; + IOOptionBits type = _flags & kIOMemoryTypeMask; - if (gIOSystemMapper && (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask))) - return (super::getPhysicalSegment64(offset, lengthOfSegment)); + if (lengthOfSegment) + *lengthOfSegment = 0; + + if (offset >= _length) + return 0; + + // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must + // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use + // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation + // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up + + if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) + { + unsigned rangesIndex = 0; + Ranges vec = _ranges; + mach_vm_address_t addr; + + // Find starting address within the vector of ranges + for (;;) { + getAddrLenForInd(addr, length, type, vec, rangesIndex); + if (offset < length) + break; + offset -= length; // (make offset relative) + rangesIndex++; + } + + // Now that we have the starting range, + // lets find the last contiguous range + addr += offset; + length -= offset; + + for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { + mach_vm_address_t newAddr; + mach_vm_size_t newLen; - if (offset < _length) // (within bounds?) + getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); + if (addr + length != newAddr) + break; + length += newLen; + } + if (addr) + address = (IOPhysicalAddress) addr; // Truncate address to 32bit + } + else { IOMDDMAWalkSegmentState _state; - IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state; + IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state; state->fOffset = offset; state->fLength = _length - offset; - state->fMapped = false; + state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly); ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) - DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", + DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", ret, this, state->fOffset, state->fIOVMAddr, state->fLength); if (kIOReturnSuccess == ret) @@ -1422,60 +2339,82 @@ IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount address = state->fIOVMAddr; length = state->fLength; } - if (!address) - length = 0; + + // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even + // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up + + if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) + { + if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) + { + addr64_t origAddr = address; + IOByteCount origLen = length; + + address = mapper->mapAddr(origAddr); + length = page_size - (address & (page_size - 1)); + while ((length < origLen) + && ((address + length) == mapper->mapAddr(origAddr + length))) + length += page_size; + if (length > origLen) + length = origLen; + } + } } + if (!address) + length = 0; + if (lengthOfSegment) *lengthOfSegment = length; return (address); } -IOPhysicalAddress -IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +#ifndef __LP64__ +addr64_t +IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { - IOReturn ret; - IOByteCount length = 0; - addr64_t address = 0; - -// assert(offset <= _length); + addr64_t address = 0; - if (offset < _length) // (within bounds?) + if (options & _kIOMemorySourceSegment) { - IOMDDMAWalkSegmentState _state; - IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state; + address = getSourceSegment(offset, lengthOfSegment); + } + else if (options & kIOMemoryMapperNone) + { + address = getPhysicalSegment64(offset, lengthOfSegment); + } + else + { + address = getPhysicalSegment(offset, lengthOfSegment); + } - state->fOffset = offset; - state->fLength = _length - offset; - state->fMapped = true; + return (address); +} + +addr64_t +IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone)); +} - ret = dmaCommandOperation( - kIOMDFirstSegment, _state, sizeof(_state)); +IOPhysicalAddress +IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + addr64_t address = 0; + IOByteCount length = 0; - if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) - DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", - ret, this, state->fOffset, - state->fIOVMAddr, state->fLength); - if (kIOReturnSuccess == ret) - { - address = state->fIOVMAddr; - length = state->fLength; - } + address = getPhysicalSegment(offset, lengthOfSegment, 0); - if (!address) - length = 0; - } + if (lengthOfSegment) + length = *lengthOfSegment; if ((address + length) > 0x100000000ULL) { panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", - address, length, (getMetaClass())->getClassName()); + address, (long) length, (getMetaClass())->getClassName()); } - if (lengthOfSegment) - *lengthOfSegment = length; - return ((IOPhysicalAddress) address); } @@ -1516,74 +2455,39 @@ IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *length } IOPhysicalAddress -IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - IOPhysicalAddress address = 0; - IOPhysicalLength length = 0; - IOOptionBits type = _flags & kIOMemoryTypeMask; - - assert(offset <= _length); - - if ( type == kIOMemoryTypeUPL) - return super::getSourceSegment( offset, lengthOfSegment ); - else if ( offset < _length ) // (within bounds?) - { - unsigned rangesIndex = 0; - Ranges vec = _ranges; - user_addr_t addr; - - // Find starting address within the vector of ranges - for (;;) { - getAddrLenForInd(addr, length, type, vec, rangesIndex); - if (offset < length) - break; - offset -= length; // (make offset relative) - rangesIndex++; - } - - // Now that we have the starting range, - // lets find the last contiguous range - addr += offset; - length -= offset; - - for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { - user_addr_t newAddr; - IOPhysicalLength newLen; - - getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); - if (addr + length != newAddr) - break; - length += newLen; - } - if (addr) - address = (IOPhysicalAddress) addr; // Truncate address to 32bit - else - length = 0; - } - - if ( lengthOfSegment ) *lengthOfSegment = length; - - return address; + return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0)); } -/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ -/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, -/* DEPRECATED */ IOByteCount * lengthOfSegment) -/* DEPRECATED */ { - if (_task == kernel_task) - return (void *) getSourceSegment(offset, lengthOfSegment); - else - panic("IOGMD::getVirtualSegment deprecated"); - - return 0; -/* DEPRECATED */ } -/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ +IOPhysicalAddress +IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); +} +void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + if (_task == kernel_task) + return (void *) getSourceSegment(offset, lengthOfSegment); + else + panic("IOGMD::getVirtualSegment deprecated"); + return 0; +} +#endif /* !__LP64__ */ IOReturn IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { + IOMemoryDescriptor *md = const_cast(this); + DMACommandOps params; + IOReturn err; + + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; + if (kIOMDGetCharacteristics == op) { if (dataSize < sizeof(IOMDDMACharacteristics)) return kIOReturnUnderrun; @@ -1591,12 +2495,10 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; data->fLength = getLength(); data->fSGCount = 0; - data->fDirection = _direction; - if (IOMapper::gSystem) - data->fIsMapped = true; + data->fDirection = getDirection(); data->fIsPrepared = true; // Assume prepared - fails safe } - else if (kIOMDWalkSegments & op) { + else if (kIOMDWalkSegments == op) { if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) return kIOReturnUnderrun; @@ -1604,96 +2506,149 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data IOByteCount offset = (IOByteCount) data->fOffset; IOPhysicalLength length; - IOMemoryDescriptor *ncmd = const_cast(this); if (data->fMapped && IOMapper::gSystem) - data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length); + data->fIOVMAddr = md->getPhysicalSegment(offset, &length); else - data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length); + data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); data->fLength = length; } - else - return kIOReturnBadArgument; + else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; + else if (kIOMDDMAMap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); + + data->fMapContig = true; + err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); + return (err); + } + else return kIOReturnBadArgument; return kIOReturnSuccess; } -IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) +IOReturn +IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) { IOReturn err = kIOReturnSuccess; + vm_purgable_t control; int state; - do + if (_memRef) { - if (!_memEntry) - { - err = kIOReturnNotReady; - break; - } + err = super::setPurgeable(newState, oldState); + } + else + { + if (kIOMemoryThreadSafe & _flags) + LOCK; + do + { + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) + { + err = kIOReturnNotReady; + break; + } + else if (!_task) + { + err = kIOReturnUnsupported; + break; + } + else + curMap = get_task_map(_task); - control = VM_PURGABLE_SET_STATE; - switch (newState) - { - case kIOMemoryPurgeableKeepCurrent: - control = VM_PURGABLE_GET_STATE; - break; + // can only do one range + Ranges vec = _ranges; + IOOptionBits type = _flags & kIOMemoryTypeMask; + mach_vm_address_t addr; + mach_vm_size_t len; + getAddrLenForInd(addr, len, type, vec, 0); - case kIOMemoryPurgeableNonVolatile: - state = VM_PURGABLE_NONVOLATILE; - break; - case kIOMemoryPurgeableVolatile: - state = VM_PURGABLE_VOLATILE; - break; - case kIOMemoryPurgeableEmpty: - state = VM_PURGABLE_EMPTY; - break; - default: - err = kIOReturnBadArgument; - break; - } + err = purgeableControlBits(newState, &control, &state); + if (kIOReturnSuccess != err) + break; + err = mach_vm_purgable_control(curMap, addr, control, &state); + if (oldState) + { + if (kIOReturnSuccess == err) + { + err = purgeableStateBits(&state); + *oldState = state; + } + } + } + while (false); + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + } - if (kIOReturnSuccess != err) - break; + return (err); +} - err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state); +IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) +{ + IOReturn err = kIOReturnNotReady; - if (oldState) - { - if (kIOReturnSuccess == err) - { - switch (state) - { - case VM_PURGABLE_NONVOLATILE: - state = kIOMemoryPurgeableNonVolatile; - break; - case VM_PURGABLE_VOLATILE: - state = kIOMemoryPurgeableVolatile; - break; - case VM_PURGABLE_EMPTY: - state = kIOMemoryPurgeableEmpty; - break; - default: - state = kIOMemoryPurgeableNonVolatile; - err = kIOReturnNotReady; - break; - } - *oldState = state; - } - } - } - while (false); + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); + if (kIOMemoryThreadSafe & _flags) UNLOCK; + + return (err); +} + +IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount ) +{ + IOReturn err = kIOReturnNotReady; + + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); + if (kIOMemoryThreadSafe & _flags) UNLOCK; return (err); } + extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); +static void SetEncryptOp(addr64_t pa, unsigned int count) +{ + ppnum_t page, end; + + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) + { + pmap_clear_noencrypt(page); + } +} + +static void ClearEncryptOp(addr64_t pa, unsigned int count) +{ + ppnum_t page, end; + + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) + { + pmap_set_noencrypt(page); + } +} + IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, IOByteCount offset, IOByteCount length ) { IOByteCount remaining; + unsigned int res; void (*func)(addr64_t pa, unsigned int count) = 0; switch (options) @@ -1704,11 +2659,22 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, case kIOMemoryIncoherentIOStore: func = &dcache_incoherent_io_store64; break; + + case kIOMemorySetEncrypted: + func = &SetEncryptOp; + break; + case kIOMemoryClearEncrypted: + func = &ClearEncryptOp; + break; } if (!func) return (kIOReturnUnsupported); + if (kIOMemoryThreadSafe & _flags) + LOCK; + + res = 0x0UL; remaining = length = min(length, getLength() - offset); while (remaining) // (process another target segment?) @@ -1716,7 +2682,7 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, addr64_t dstAddr64; IOByteCount dstLen; - dstAddr64 = getPhysicalSegment64(offset, &dstLen); + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); if (!dstAddr64) break; @@ -1730,21 +2696,23 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, remaining -= dstLen; } + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); } -#if defined(__ppc__) || defined(__arm__) -extern vm_offset_t static_memory_end; -#define io_kernel_static_end static_memory_end -#else +#if defined(__i386__) || defined(__x86_64__) extern vm_offset_t first_avail; #define io_kernel_static_end first_avail +#else +#error io_kernel_static_end is undefined for this architecture #endif static kern_return_t io_get_kernel_static_upl( vm_map_t /* map */, - vm_address_t offset, + uintptr_t offset, vm_size_t *upl_size, upl_t *upl, upl_page_info_array_t page_list, @@ -1773,7 +2741,7 @@ io_get_kernel_static_upl( page_list[page].precious = 0; page_list[page].device = 0; if (phys > highestPage) - highestPage = page; + highestPage = phys; } *highest_page = highestPage; @@ -1786,28 +2754,13 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) IOOptionBits type = _flags & kIOMemoryTypeMask; IOReturn error = kIOReturnCannotWire; ioGMDData *dataP; - ppnum_t mapBase = 0; - IOMapper *mapper; - ipc_port_t sharedMem = (ipc_port_t) _memEntry; + upl_page_info_array_t pageInfo; + ppnum_t mapBase; - assert(!_wireCount); assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); - if (_pages >= gIOMaximumMappedIOPageCount) - return kIOReturnNoResources; - - dataP = getDataP(_memoryEntries); - mapper = dataP->fMapper; - if (mapper && _pages) - mapBase = mapper->iovmAlloc(_pages); - - // Note that appendBytes(NULL) zeros the data up to the - // desired length. - _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); - dataP = 0; // May no longer be valid so lets not get tempted. - - if (forDirection == kIODirectionNone) - forDirection = _direction; + if ((kIODirectionOutIn & forDirection) == kIODirectionNone) + forDirection = (IODirection) (forDirection | getDirection()); int uplFlags; // This Mem Desc's default flags for upl creation switch (kIODirectionOutIn & forDirection) @@ -1815,7 +2768,6 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) case kIODirectionOut: // Pages do not need to be marked as dirty on commit uplFlags = UPL_COPYOUT_FROM; - _flags |= kIOMemoryPreparedReadOnly; break; case kIODirectionIn: @@ -1823,64 +2775,97 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM break; } - uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; -#ifdef UPL_NEED_32BIT_ADDR + if (_wireCount) + { + if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) + { + OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); + error = kIOReturnNotWritable; + } + else error = kIOReturnSuccess; + return (error); + } + + dataP = getDataP(_memoryEntries); + IOMapper *mapper; + mapper = dataP->fMapper; + dataP->fMappedBase = 0; + + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; if (kIODirectionPrepareToPhys32 & forDirection) - uplFlags |= UPL_NEED_32BIT_ADDR; -#endif + { + if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; + if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; + } + if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; + if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; + if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; + + mapBase = 0; + + // Note that appendBytes(NULL) zeros the data up to the desired length + // and the length parameter is an unsigned int + size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); + if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); + if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); + dataP = 0; // Find the appropriate vm_map for the given task vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) - curMap = 0; - else - { curMap = get_task_map(_task); } + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; + else curMap = get_task_map(_task); // Iterate over the vector of virtual ranges Ranges vec = _ranges; - unsigned int pageIndex = 0; - IOByteCount mdOffset = 0; - ppnum_t highestPage = 0; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + ppnum_t highestPage = 0; + + IOMemoryEntry * memRefEntry = 0; + if (_memRef) memRefEntry = &_memRef->entries[0]; + for (UInt range = 0; range < _rangesCount; range++) { ioPLBlock iopl; - user_addr_t startPage; - IOByteCount numBytes; + mach_vm_address_t startPage; + mach_vm_size_t numBytes; ppnum_t highPage = 0; // Get the startPage address and length of vec[range] getAddrLenForInd(startPage, numBytes, type, vec, range); - iopl.fPageOffset = (short) startPage & PAGE_MASK; + iopl.fPageOffset = startPage & PAGE_MASK; numBytes += iopl.fPageOffset; startPage = trunc_page_64(startPage); if (mapper) - iopl.fMappedBase = mapBase + pageIndex; + iopl.fMappedPage = mapBase + pageIndex; else - iopl.fMappedBase = 0; + iopl.fMappedPage = 0; // Iterate over the current range, creating UPLs while (numBytes) { - dataP = getDataP(_memoryEntries); vm_address_t kernelStart = (vm_address_t) startPage; vm_map_t theMap; - if (curMap) - theMap = curMap; - else if (!sharedMem) { + if (curMap) theMap = curMap; + else if (_memRef) + { + theMap = NULL; + } + else + { assert(_task == kernel_task); theMap = IOPageableMapForAddress(kernelStart); } - else - theMap = NULL; - upl_page_info_array_t pageInfo = getPageList(dataP); int ioplFlags = uplFlags; + dataP = getDataP(_memoryEntries); + pageInfo = getPageList(dataP); upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - vm_size_t ioplSize = round_page_32(numBytes); + vm_size_t ioplSize = round_page(numBytes); unsigned int numPageInfo = atop_32(ioplSize); - if (theMap == kernel_map && kernelStart < io_kernel_static_end) { + if ((theMap == kernel_map) && (kernelStart < io_kernel_static_end)) { error = io_get_kernel_static_upl(theMap, kernelStart, &ioplSize, @@ -1889,20 +2874,29 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) &numPageInfo, &highPage); } - else if (sharedMem) { - error = memory_object_iopl_request(sharedMem, - ptoa_32(pageIndex), - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags); + else if (_memRef) { + memory_object_offset_t entryOffset; + + entryOffset = (mdOffset - iopl.fPageOffset - memRefEntry->offset); + if (entryOffset >= memRefEntry->size) { + memRefEntry++; + if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); + entryOffset = 0; + } + if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); + error = memory_object_iopl_request(memRefEntry->entry, + entryOffset, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); } else { assert(theMap); error = vm_map_create_upl(theMap, startPage, - &ioplSize, + (upl_size_t*)&ioplSize, &iopl.fIOPL, baseInfo, &numPageInfo, @@ -1922,30 +2916,26 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) if (baseInfo->device) { numPageInfo = 1; - iopl.fFlags = kIOPLOnDevice; - // Don't translate device memory at all - if (mapper && mapBase) { - mapper->iovmFree(mapBase, _pages); - mapBase = 0; - iopl.fMappedBase = 0; - } + iopl.fFlags = kIOPLOnDevice; } else { iopl.fFlags = 0; - if (mapper) - mapper->iovmInsert(mapBase, pageIndex, - baseInfo, numPageInfo); } iopl.fIOMDOffset = mdOffset; iopl.fPageInfo = pageIndex; + if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true; +#if 0 + // used to remove the upl for auto prepares here, for some errant code + // that freed memory before the descriptor pointing at it if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) { upl_commit(iopl.fIOPL, 0, 0); upl_deallocate(iopl.fIOPL); iopl.fIOPL = 0; } +#endif if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { // Clean up partial created and unsaved iopl @@ -1955,6 +2945,7 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) } goto abortExit; } + dataP = 0; // Check for a multiple iopl's in one virtual range pageIndex += numPageInfo; @@ -1964,8 +2955,7 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) startPage += ioplSize; mdOffset += ioplSize; iopl.fPageOffset = 0; - if (mapper) - iopl.fMappedBase = mapBase + pageIndex; + if (mapper) iopl.fMappedPage = mapBase + pageIndex; } else { mdOffset += numBytes; @@ -1976,6 +2966,8 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) _highestPage = highestPage; + if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; + return kIOReturnSuccess; abortExit: @@ -1991,18 +2983,181 @@ abortExit: upl_deallocate(ioplList[range].fIOPL); } } - (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() - - if (mapper && mapBase) - mapper->iovmFree(mapBase, _pages); + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() } if (error == KERN_FAILURE) error = kIOReturnCannotWire; + else if (error == KERN_MEMORY_ERROR) + error = kIOReturnNoResources; return error; } +bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) +{ + ioGMDData * dataP; + unsigned dataSize = size; + + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, computeDataSize(0, 0)); + dataP = getDataP(_memoryEntries); + + if (mapper == kIOMapperWaitSystem) { + IOMapper::checkForSystemMapper(); + mapper = IOMapper::gSystem; + } + dataP->fMapper = mapper; + dataP->fPageCnt = 0; + dataP->fMappedBase = 0; + dataP->fDMAMapNumAddressBits = 64; + dataP->fDMAMapAlignment = 0; + dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fDiscontig = false; + dataP->fCompletionError = false; + + return (true); +} + +IOReturn IOMemoryDescriptor::dmaMap( + IOMapper * mapper, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * address, + ppnum_t * mapPages) +{ + IOMDDMAWalkSegmentState walkState; + IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; + IOOptionBits mdOp; + IOReturn ret; + IOPhysicalLength segLen; + addr64_t phys, align, pageOffset; + ppnum_t base, pageIndex, pageCount; + uint64_t index; + uint32_t mapOptions = 0; + + if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + + walkArgs->fMapped = false; + mdOp = kIOMDFirstSegment; + pageCount = 0; + for (index = 0; index < length; ) + { + if (index && (page_mask & (index + pageOffset))) break; + + walkArgs->fOffset = offset + index; + ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); + mdOp = kIOMDWalkSegments; + if (ret != kIOReturnSuccess) break; + phys = walkArgs->fIOVMAddr; + segLen = walkArgs->fLength; + + align = (phys & page_mask); + if (!index) pageOffset = align; + else if (align) break; + pageCount += atop_64(round_page_64(align + segLen)); + index += segLen; + } + + if (index < length) return (kIOReturnVMError); + + base = mapper->iovmMapMemory(this, offset, pageCount, + mapOptions, NULL, mapSpec); + + if (!base) return (kIOReturnNoResources); + + mdOp = kIOMDFirstSegment; + for (pageIndex = 0, index = 0; index < length; ) + { + walkArgs->fOffset = offset + index; + ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); + mdOp = kIOMDWalkSegments; + if (ret != kIOReturnSuccess) break; + phys = walkArgs->fIOVMAddr; + segLen = walkArgs->fLength; + + ppnum_t page = atop_64(phys); + ppnum_t count = atop_64(round_page_64(phys + segLen)) - page; + while (count--) + { + mapper->iovmInsert(base, pageIndex, page); + page++; + pageIndex++; + } + index += segLen; + } + if (pageIndex != pageCount) panic("pageIndex"); + + *address = ptoa_64(base) + pageOffset; + if (mapPages) *mapPages = pageCount; + + return (kIOReturnSuccess); +} + +IOReturn IOGeneralMemoryDescriptor::dmaMap( + IOMapper * mapper, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * address, + ppnum_t * mapPages) +{ + IOReturn err = kIOReturnSuccess; + ioGMDData * dataP; + IOOptionBits type = _flags & kIOMemoryTypeMask; + + *address = 0; + if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); + + if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) + || offset || (length != _length)) + { + err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages); + } + else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) + { + const ioPLBlock * ioplList = getIOPLList(dataP); + upl_page_info_t * pageList; + uint32_t mapOptions = 0; + ppnum_t base; + + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplList->fFlags & kIOPLExternUPL) + { + pageList = (upl_page_info_t *) ioplList->fPageInfo; + mapOptions |= kIODMAMapPagingPath; + } + else + pageList = getPageList(dataP); + + if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + + // Check for direct device non-paged memory + if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; + + base = mapper->iovmMapMemory( + this, offset, _pages, mapOptions, &pageList[0], &mapSpec); + *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK); + if (mapPages) *mapPages = _pages; + } + + return (err); +} + /* * prepare * @@ -2012,6 +3167,7 @@ abortExit: * the memory after the I/O transfer finishes. This method needn't * called for non-pageable memory. */ + IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { IOReturn error = kIOReturnSuccess; @@ -2023,13 +3179,21 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) if (_prepareLock) IOLockLock(_prepareLock); - if (!_wireCount - && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) { - error = wireVirtual(forDirection); + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + error = wireVirtual(forDirection); } if (kIOReturnSuccess == error) - _wireCount++; + { + if (1 == ++_wireCount) + { + if (kIOMemoryClearEncrypt & _flags) + { + performOperation(kIOMemoryClearEncrypted, 0, _length); + } + } + } if (_prepareLock) IOLockUnlock(_prepareLock); @@ -2045,10 +3209,11 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) * issued; the prepare() and complete() must occur in pairs, before * before and after an I/O transfer involving pageable memory. */ - -IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) + +IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; + ioGMDData * dataP; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) return kIOReturnSuccess; @@ -2058,28 +3223,64 @@ IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) assert(_wireCount); + if ((kIODirectionCompleteWithError & forDirection) + && (dataP = getDataP(_memoryEntries))) + dataP->fCompletionError = true; + if (_wireCount) { + if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) + { + performOperation(kIOMemorySetEncrypted, 0, _length); + } + _wireCount--; - if (!_wireCount) + if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) { IOOptionBits type = _flags & kIOMemoryTypeMask; - ioGMDData * dataP = getDataP(_memoryEntries); + dataP = getDataP(_memoryEntries); ioPLBlock *ioplList = getIOPLList(dataP); - UInt count = getNumIOPL(_memoryEntries, dataP); - - if (dataP->fMapper && _pages && ioplList[0].fMappedBase) - dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); + UInt ind, count = getNumIOPL(_memoryEntries, dataP); - // Only complete iopls that we created which are for TypeVirtual - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { - for (UInt ind = 0; ind < count; ind++) - if (ioplList[ind].fIOPL) { - upl_commit(ioplList[ind].fIOPL, 0, 0); - upl_deallocate(ioplList[ind].fIOPL); + if (_wireCount) + { + // kIODirectionCompleteWithDataValid & forDirection + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + for (ind = 0; ind < count; ind++) + { + if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL); } + } + } + else + { +#if IOMD_DEBUG_DMAACTIVE + if (__iomd_reservedA) panic("complete() while dma active"); +#endif /* IOMD_DEBUG_DMAACTIVE */ + + if (dataP->fMappedBase) { + dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); + dataP->fMappedBase = 0; + } + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { + for (ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) { + if (dataP->fCompletionError) + upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); + else + upl_commit(ioplList[ind].fIOPL, 0, 0); + upl_deallocate(ioplList[ind].fIOPL); + } + } else if (kIOMemoryTypeUPL == type) { + upl_set_referenced(ioplList[0].fIOPL, false); + } + + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() + + dataP->fPreparationID = kIOPreparationIDUnprepared; } - (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() } } @@ -2097,29 +3298,37 @@ IOReturn IOGeneralMemoryDescriptor::doMap( IOByteCount __length ) { +#ifndef __LP64__ if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); +#endif /* !__LP64__ */ - _IOMemoryMap * mapping = (_IOMemoryMap *) *__address; + kern_return_t err; + + IOMemoryMap * mapping = (IOMemoryMap *) *__address; mach_vm_size_t offset = mapping->fOffset + __offset; mach_vm_size_t length = mapping->fLength; - kern_return_t kr; - ipc_port_t sharedMem = (ipc_port_t) _memEntry; - IOOptionBits type = _flags & kIOMemoryTypeMask; Ranges vec = _ranges; - user_addr_t range0Addr = 0; - IOByteCount range0Len = 0; + mach_vm_address_t range0Addr = 0; + mach_vm_size_t range0Len = 0; + + if ((offset >= _length) || ((offset + length) > _length)) + return( kIOReturnBadArgument ); if (vec.v) getAddrLenForInd(range0Addr, range0Len, type, vec, 0); // mapping source == dest? (could be much better) - if( _task - && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere) - && (1 == _rangesCount) && (0 == offset) - && range0Addr && (length <= range0Len) ) + if (_task + && (mapping->fAddressTask == _task) + && (mapping->fAddressMap == get_task_map(_task)) + && (options & kIOMapAnywhere) + && (1 == _rangesCount) + && (0 == offset) + && range0Addr + && (length <= range0Len)) { mapping->fAddress = range0Addr; mapping->fOptions |= kIOMapStatic; @@ -2127,130 +3336,106 @@ IOReturn IOGeneralMemoryDescriptor::doMap( return( kIOReturnSuccess ); } - if( 0 == sharedMem) { - - vm_size_t size = ptoa_32(_pages); - - if( _task) { - - memory_object_size_t actualSize = size; - vm_prot_t prot = VM_PROT_READ; - if (!(kIOMapReadOnly & options)) - prot |= VM_PROT_WRITE; - else if (kIOMapDefaultCache != (options & kIOMapCacheMask)) - prot |= VM_PROT_WRITE; - - kr = mach_make_memory_entry_64(get_task_map(_task), - &actualSize, range0Addr, - prot, &sharedMem, - NULL ); - - if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { -#if IOASSERT - IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", - range0Addr, (UInt32) actualSize, size); + if (!_memRef) + { + IOOptionBits createOptions = 0; + if (!(kIOMapReadOnly & options)) + { + createOptions |= kIOMemoryReferenceWrite; +#if DEVELOPMENT || DEBUG + if (kIODirectionOut == (kIODirectionOutIn & _flags)) + { + OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); + } #endif - kr = kIOReturnVMError; - ipc_port_release_send( sharedMem ); - } - - if( KERN_SUCCESS != kr) - sharedMem = MACH_PORT_NULL; - - } else do { // _task == 0, must be physical - - memory_object_t pager; - unsigned int flags = 0; - addr64_t pa; - IOPhysicalLength segLen; - - pa = getPhysicalSegment64( offset, &segLen ); - - if( !reserved) { - reserved = IONew( ExpansionData, 1 ); - if( !reserved) - continue; - } - reserved->pagerContig = (1 == _rangesCount); - reserved->memory = this; - - /*What cache mode do we need*/ - switch(options & kIOMapCacheMask ) { - - case kIOMapDefaultCache: - default: - flags = IODefaultCacheBits(pa); - if (DEVICE_PAGER_CACHE_INHIB & flags) - { - if (DEVICE_PAGER_GUARDED & flags) - mapping->fOptions |= kIOMapInhibitCache; - else - mapping->fOptions |= kIOMapWriteCombineCache; - } - else if (DEVICE_PAGER_WRITE_THROUGH & flags) - mapping->fOptions |= kIOMapWriteThruCache; - else - mapping->fOptions |= kIOMapCopybackCache; - break; - - case kIOMapInhibitCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; - - case kIOMapWriteThruCache: - flags = DEVICE_PAGER_WRITE_THROUGH | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + } + err = memoryReferenceCreate(createOptions, &_memRef); + if (kIOReturnSuccess != err) return (err); + } - case kIOMapCopybackCache: - flags = DEVICE_PAGER_COHERENT; - break; + memory_object_t pager; + pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); - case kIOMapWriteCombineCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT; - break; - } + // pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; + if (!_memRef || (1 != _memRef->count)) + { + err = kIOReturnNotReadable; + break; + } - pager = device_pager_setup( (memory_object_t) 0, (int) reserved, - size, flags); - assert( pager ); + size = round_page(mapping->fLength); + flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - if( pager) { - kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, - size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); + if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, + NULL, NULL, + &flags)) + redirUPL2 = NULL; - assert( KERN_SUCCESS == kr ); - if( KERN_SUCCESS != kr) - { - device_pager_deallocate( pager ); - pager = MACH_PORT_NULL; - sharedMem = MACH_PORT_NULL; - } - } - if( pager && sharedMem) - reserved->devicePager = pager; - else { - IODelete( reserved, ExpansionData, 1 ); - reserved = 0; + for (lock_count = 0; + IORecursiveLockHaveLock(gIOMemoryLock); + lock_count++) { + UNLOCK; + } + err = upl_transpose(redirUPL2, mapping->fRedirUPL); + for (; + lock_count; + lock_count--) { + LOCK; } - } while( false ); + if (kIOReturnSuccess != err) + { + IOLog("upl_transpose(%x)\n", err); + err = kIOReturnSuccess; + } - _memEntry = (void *) sharedMem; + if (redirUPL2) + { + upl_commit(redirUPL2, NULL, 0); + upl_deallocate(redirUPL2); + redirUPL2 = 0; + } + { + // swap the memEntries since they now refer to different vm_objects + IOMemoryReference * me = _memRef; + _memRef = mapping->fMemory->_memRef; + mapping->fMemory->_memRef = me; + } + if (pager) + err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); + } + while (false); } - - IOReturn result; - if (0 == sharedMem) - result = kIOReturnVMError; + // upl_transpose> // else - result = super::doMap( __addressMap, __address, - options, __offset, __length ); + { + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); + + if ((err == KERN_SUCCESS) && pager) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); + if (err != KERN_SUCCESS) + { + doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); + } + else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) + { + mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + } + } + } - return( result ); + return (err); } IOReturn IOGeneralMemoryDescriptor::doUnmap( @@ -2263,21 +3448,27 @@ IOReturn IOGeneralMemoryDescriptor::doUnmap( /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject ) - -/* inline function implementation */ -IOPhysicalAddress IOMemoryMap::getPhysicalAddress() - { return( getPhysicalSegment( 0, 0 )); } +#undef super +#define super OSObject +OSDefineMetaClassAndStructors( IOMemoryMap, OSObject ) -#undef super -#define super IOMemoryMap +OSMetaClassDefineReservedUnused(IOMemoryMap, 0); +OSMetaClassDefineReservedUnused(IOMemoryMap, 1); +OSMetaClassDefineReservedUnused(IOMemoryMap, 2); +OSMetaClassDefineReservedUnused(IOMemoryMap, 3); +OSMetaClassDefineReservedUnused(IOMemoryMap, 4); +OSMetaClassDefineReservedUnused(IOMemoryMap, 5); +OSMetaClassDefineReservedUnused(IOMemoryMap, 6); +OSMetaClassDefineReservedUnused(IOMemoryMap, 7); -OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) +/* ex-inline function implementation */ +IOPhysicalAddress IOMemoryMap::getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool _IOMemoryMap::init( +bool IOMemoryMap::init( task_t intoTask, mach_vm_address_t toAddress, IOOptionBits _options, @@ -2304,7 +3495,7 @@ bool _IOMemoryMap::init( return (true); } -bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) +bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) { if (!_memory) return(false); @@ -2328,129 +3519,6 @@ bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_siz return( true ); } -struct IOMemoryDescriptorMapAllocRef -{ - ipc_port_t sharedMem; - mach_vm_address_t mapped; - mach_vm_size_t size; - mach_vm_size_t sourceOffset; - IOOptionBits options; -}; - -static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) -{ - IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; - IOReturn err; - - do { - if( ref->sharedMem) - { - vm_prot_t prot = VM_PROT_READ - | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); - - // VM system requires write access to change cache mode - if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask)) - prot |= VM_PROT_WRITE; - - // set memory entry cache - vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; - switch (ref->options & kIOMapCacheMask) - { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; - } - - vm_size_t unused = 0; - - err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, - memEntryCacheMode, NULL, ref->sharedMem ); - if (KERN_SUCCESS != err) - IOLog("MAP_MEM_ONLY failed %d\n", err); - - err = mach_vm_map( map, - &ref->mapped, - ref->size, 0 /* mask */, - (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT), - ref->sharedMem, ref->sourceOffset, - false, // copy - prot, // cur - prot, // max - VM_INHERIT_NONE); - - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - - } - else - { - err = mach_vm_allocate( map, &ref->mapped, ref->size, - ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - // we have to make sure that these guys don't get copied if we fork. - err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); - assert( KERN_SUCCESS == err ); - } - } - while( false ); - - return( err ); -} - -kern_return_t -IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable, - mach_vm_size_t offset, - mach_vm_address_t * address, mach_vm_size_t length) -{ - IOReturn err; - IOMemoryDescriptorMapAllocRef ref; - - ref.sharedMem = entry; - ref.sourceOffset = trunc_page_64(offset); - ref.options = options; - - ref.size = length; - - if (options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else - ref.mapped = *address; - - if( ref.sharedMem && (map == kernel_map) && pageable) - err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); - else - err = IOMemoryDescriptorMapAlloc( map, &ref ); - - *address = ref.mapped; - return (err); -} - - IOReturn IOMemoryDescriptor::doMap( vm_map_t __addressMap, IOVirtualAddress * __address, @@ -2458,119 +3526,27 @@ IOReturn IOMemoryDescriptor::doMap( IOByteCount __offset, IOByteCount __length ) { - if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit"); - - _IOMemoryMap * mapping = (_IOMemoryMap *) *__address; - mach_vm_size_t offset = mapping->fOffset + __offset; - mach_vm_size_t length = mapping->fLength; - - IOReturn err = kIOReturnSuccess; - memory_object_t pager; - mach_vm_size_t pageOffset; - IOPhysicalAddress sourceAddr; + return (kIOReturnUnsupported); +} - do +IOReturn IOMemoryDescriptor::handleFault( + void * _pager, + mach_vm_size_t sourceOffset, + mach_vm_size_t length) +{ + if( kIOMemoryRedirected & _flags) { - sourceAddr = getSourceSegment( offset, NULL ); - pageOffset = sourceAddr - trunc_page_32( sourceAddr ); - - if( reserved) - pager = (memory_object_t) reserved->devicePager; - else - pager = MACH_PORT_NULL; - - if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) - { - upl_t redirUPL2; - vm_size_t size; - int flags; - - if (!_memEntry) - { - err = kIOReturnNotReadable; - continue; - } - - size = mapping->fLength + pageOffset; - flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - - if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2, - NULL, NULL, - &flags)) - redirUPL2 = NULL; - - err = upl_transpose(redirUPL2, mapping->fRedirUPL); - if (kIOReturnSuccess != err) - { - IOLog("upl_transpose(%x)\n", err); - err = kIOReturnSuccess; - } - - if (redirUPL2) - { - upl_commit(redirUPL2, NULL, 0); - upl_deallocate(redirUPL2); - redirUPL2 = 0; - } - { - // swap the memEntries since they now refer to different vm_objects - void * me = _memEntry; - _memEntry = mapping->fMemory->_memEntry; - mapping->fMemory->_memEntry = me; - } - if (pager) - err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); - } - else - { - mach_vm_address_t address; - - if (!(options & kIOMapAnywhere)) - { - address = trunc_page_64(mapping->fAddress); - if( (mapping->fAddress - address) != pageOffset) - { - err = kIOReturnVMError; - continue; - } - } - - err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry, - options, (kIOMemoryBufferPageable & _flags), - offset, &address, round_page_64(length + pageOffset)); - if( err != KERN_SUCCESS) - continue; - - if (!_memEntry || pager) - { - err = handleFault( pager, mapping->fAddressMap, address, offset, length, options ); - if (err != KERN_SUCCESS) - doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 ); - } - -#ifdef DEBUG - if (kIOLogMapping & gIOKitDebug) - IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n", - err, this, sourceAddr, mapping, address, offset, length); +#if DEBUG + IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); #endif - - if (err == KERN_SUCCESS) - mapping->fAddress = address + pageOffset; - else - mapping->fAddress = NULL; - } + do { + SLEEP; + } while( kIOMemoryRedirected & _flags ); } - while( false ); - - return (err); + return (kIOReturnSuccess); } -enum { - kIOMemoryRedirected = 0x00010000 -}; - -IOReturn IOMemoryDescriptor::handleFault( +IOReturn IOMemoryDescriptor::populateDevicePager( void * _pager, vm_map_t addressMap, mach_vm_address_t address, @@ -2588,22 +3564,7 @@ IOReturn IOMemoryDescriptor::handleFault( IOPhysicalLength segLen; addr64_t physAddr; - if( !addressMap) - { - if( kIOMemoryRedirected & _flags) - { -#ifdef DEBUG - IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); -#endif - do { - SLEEP; - } while( kIOMemoryRedirected & _flags ); - } - - return( kIOReturnSuccess ); - } - - physAddr = getPhysicalSegment64( sourceOffset, &segLen ); + physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); assert( physAddr ); pageOffset = physAddr - trunc_page_64( physAddr ); pagerOffset = sourceOffset; @@ -2616,74 +3577,53 @@ IOReturn IOMemoryDescriptor::handleFault( do { // in the middle of the loop only map whole pages - if( segLen >= bytes) - segLen = bytes; - else if( segLen != trunc_page_32( segLen)) - err = kIOReturnVMError; - if( physAddr != trunc_page_64( physAddr)) - err = kIOReturnBadArgument; - if (kIOReturnSuccess != err) - break; - -#ifdef DEBUG - if( kIOLogMapping & gIOKitDebug) - IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n", - addressMap, address + pageOffset, physAddr + pageOffset, - segLen - pageOffset); -#endif + if( segLen >= bytes) segLen = bytes; + else if (segLen != trunc_page(segLen)) err = kIOReturnVMError; + if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument; + if (kIOReturnSuccess != err) break; - if( pager) { - if( reserved && reserved->pagerContig) { - IOPhysicalLength allLen; - addr64_t allPhys; + if (reserved && reserved->dp.pagerContig) + { + IOPhysicalLength allLen; + addr64_t allPhys; - allPhys = getPhysicalSegment64( 0, &allLen ); - assert( allPhys ); - err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page_32(allLen) ); - } - else + allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone ); + assert( allPhys ); + err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) ); + } + else + { + for( page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += page_size) { - - for( page = 0; - (page < segLen) && (KERN_SUCCESS == err); - page += page_size) - { - err = device_pager_populate_object(pager, pagerOffset, - (ppnum_t)(atop_64(physAddr + page)), page_size); - pagerOffset += page_size; - } - } - assert( KERN_SUCCESS == err ); - if( err) - break; - } + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)(atop_64(physAddr + page)), page_size); + pagerOffset += page_size; + } + } + assert (KERN_SUCCESS == err); + if (err) break; // This call to vm_fault causes an early pmap level resolution // of the mappings created above for kernel mappings, since // faulting in later can't take place from interrupt level. - /* *** ALERT *** */ - /* *** Temporary Workaround *** */ - if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) { - vm_fault(addressMap, - (vm_map_offset_t)address, - VM_PROT_READ|VM_PROT_WRITE, - FALSE, THREAD_UNINT, NULL, - (vm_map_offset_t)0); + vm_fault(addressMap, + (vm_map_offset_t)trunc_page_64(address), + VM_PROT_READ|VM_PROT_WRITE, + FALSE, THREAD_UNINT, NULL, + (vm_map_offset_t)0); } - /* *** Temporary Workaround *** */ - /* *** ALERT *** */ - sourceOffset += segLen - pageOffset; address += segLen; bytes -= segLen; pageOffset = 0; - } - while (bytes && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); + while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); if (bytes) err = kIOReturnBadArgument; @@ -2707,15 +3647,15 @@ IOReturn IOMemoryDescriptor::doUnmap( } else { - addressMap = ((_IOMemoryMap *) __address)->fAddressMap; - address = ((_IOMemoryMap *) __address)->fAddress; - length = ((_IOMemoryMap *) __address)->fLength; + addressMap = ((IOMemoryMap *) __address)->fAddressMap; + address = ((IOMemoryMap *) __address)->fAddress; + length = ((IOMemoryMap *) __address)->fLength; } - if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) addressMap = IOPageableMapForAddress( address ); -#ifdef DEBUG +#if DEBUG if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", addressMap, address, length ); @@ -2729,7 +3669,7 @@ IOReturn IOMemoryDescriptor::doUnmap( IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { IOReturn err = kIOReturnSuccess; - _IOMemoryMap * mapping = 0; + IOMemoryMap * mapping = 0; OSIterator * iter; LOCK; @@ -2741,8 +3681,22 @@ IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) do { if( (iter = OSCollectionIterator::withCollection( _mappings))) { - while( (mapping = (_IOMemoryMap *) iter->getNextObject())) + + memory_object_t pager; + + if( reserved) + pager = (memory_object_t) reserved->dp.devicePager; + else + pager = MACH_PORT_NULL; + + while( (mapping = (IOMemoryMap *) iter->getNextObject())) + { mapping->redirect( safeTask, doRedirect ); + if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); + } + } iter->release(); } @@ -2755,27 +3709,24 @@ IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) UNLOCK; +#ifndef __LP64__ // temporary binary compatibility IOSubMemoryDescriptor * subMem; if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) err = subMem->redirect( safeTask, doRedirect ); else err = kIOReturnSuccess; +#endif /* !__LP64__ */ return( err ); } -IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) -{ - return( _parent->redirect( safeTask, doRedirect )); -} - -IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) +IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) { IOReturn err = kIOReturnSuccess; if( fSuperMap) { -// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); +// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); } else { LOCK; @@ -2791,18 +3742,8 @@ IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) && (0 == (fOptions & kIOMapStatic))) { IOUnmapPages( fAddressMap, fAddress, fLength ); - if(!doRedirect && safeTask - && (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) - || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))) - { - IOVirtualAddress iova = (IOVirtualAddress) this; - err = mach_vm_deallocate( fAddressMap, fAddress, fLength ); - err = fMemory->doMap( fAddressMap, &iova, - (fOptions & ~kIOMapAnywhere) | kIOMap64Bit/*| kIOMapReserve*/, - 0, 0 ); - } else - err = kIOReturnSuccess; -#ifdef DEBUG + err = kIOReturnSuccess; +#if DEBUG IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap); #endif } @@ -2826,7 +3767,7 @@ IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) return( err ); } -IOReturn _IOMemoryMap::unmap( void ) +IOReturn IOMemoryMap::unmap( void ) { IOReturn err; @@ -2853,9 +3794,11 @@ IOReturn _IOMemoryMap::unmap( void ) return( err ); } -void _IOMemoryMap::taskDied( void ) +void IOMemoryMap::taskDied( void ) { LOCK; + if (fUserClientUnmap) + unmap(); if( fAddressMap) { vm_map_deallocate(fAddressMap); fAddressMap = 0; @@ -2865,18 +3808,24 @@ void _IOMemoryMap::taskDied( void ) UNLOCK; } +IOReturn IOMemoryMap::userClientUnmap( void ) +{ + fUserClientUnmap = true; + return (kIOReturnSuccess); +} + // Overload the release mechanism. All mappings must be a member // of a memory descriptors _mappings set. This means that we // always have 2 references on a mapping. When either of these mappings // are released we need to free ourselves. -void _IOMemoryMap::taggedRelease(const void *tag) const +void IOMemoryMap::taggedRelease(const void *tag) const { LOCK; super::taggedRelease(tag, 2); UNLOCK; } -void _IOMemoryMap::free() +void IOMemoryMap::free() { unmap(); @@ -2906,35 +3855,41 @@ void _IOMemoryMap::free() super::free(); } -IOByteCount _IOMemoryMap::getLength() +IOByteCount IOMemoryMap::getLength() { return( fLength ); } -IOVirtualAddress _IOMemoryMap::getVirtualAddress() +IOVirtualAddress IOMemoryMap::getVirtualAddress() { +#ifndef __LP64__ if (fSuperMap) fSuperMap->getVirtualAddress(); - else if (fAddressMap && vm_map_is_64bit(fAddressMap)) + else if (fAddressMap + && vm_map_is_64bit(fAddressMap) + && (sizeof(IOVirtualAddress) < 8)) { OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress); } +#endif /* !__LP64__ */ return (fAddress); } -mach_vm_address_t _IOMemoryMap::getAddress() +#ifndef __LP64__ +mach_vm_address_t IOMemoryMap::getAddress() { return( fAddress); } -mach_vm_size_t _IOMemoryMap::getSize() +mach_vm_size_t IOMemoryMap::getSize() { return( fLength ); } +#endif /* !__LP64__ */ -task_t _IOMemoryMap::getAddressTask() +task_t IOMemoryMap::getAddressTask() { if( fSuperMap) return( fSuperMap->getAddressTask()); @@ -2942,18 +3897,18 @@ task_t _IOMemoryMap::getAddressTask() return( fAddressTask); } -IOOptionBits _IOMemoryMap::getMapOptions() +IOOptionBits IOMemoryMap::getMapOptions() { return( fOptions); } -IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() +IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() { return( fMemory ); } -_IOMemoryMap * _IOMemoryMap::copyCompatible( - _IOMemoryMap * newMapping ) +IOMemoryMap * IOMemoryMap::copyCompatible( + IOMemoryMap * newMapping ) { task_t task = newMapping->getAddressTask(); mach_vm_address_t toAddress = newMapping->fAddress; @@ -2983,26 +3938,55 @@ _IOMemoryMap * _IOMemoryMap::copyCompatible( retain(); if( (fLength == _length) && (!_offset)) { - newMapping->release(); newMapping = this; } else { newMapping->fSuperMap = this; - newMapping->fOffset = _offset; + newMapping->fOffset = fOffset + _offset; newMapping->fAddress = fAddress + _offset; } return( newMapping ); } +IOReturn IOMemoryMap::wireRange( + uint32_t options, + mach_vm_size_t offset, + mach_vm_size_t length) +{ + IOReturn kr; + mach_vm_address_t start = trunc_page_64(fAddress + offset); + mach_vm_address_t end = round_page_64(fAddress + offset + length); + + if (kIODirectionOutIn & options) + { + kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE); + } + else + { + kr = vm_map_unwire(fAddressMap, start, end, FALSE); + } + + return (kr); +} + + IOPhysicalAddress -_IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) +#ifdef __LP64__ +IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options) +#else /* !__LP64__ */ +IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) +#endif /* !__LP64__ */ { IOPhysicalAddress address; LOCK; +#ifdef __LP64__ + address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); +#else /* !__LP64__ */ address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); +#endif /* !__LP64__ */ UNLOCK; return( address ); @@ -3020,21 +4004,6 @@ void IOMemoryDescriptor::initialize( void ) if( 0 == gIOMemoryLock) gIOMemoryLock = IORecursiveLockAlloc(); - IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, - ptoa_64(gIOMaximumMappedIOPageCount), 64); - if (!gIOCopyMapper) - { - IOMapper * - mapper = new IOCopyMapper; - if (mapper) - { - if (mapper->init() && mapper->start(NULL)) - gIOCopyMapper = (IOCopyMapper *) mapper; - else - mapper->release(); - } - } - gIOLastPage = IOGetLastPageNumber(); } @@ -3064,6 +4033,7 @@ IOMemoryMap * IOMemoryDescriptor::map( 0, getLength() )); } +#ifndef __LP64__ IOMemoryMap * IOMemoryDescriptor::map( task_t intoTask, IOVirtualAddress atAddress, @@ -3080,6 +4050,7 @@ IOMemoryMap * IOMemoryDescriptor::map( return (createMappingInTask(intoTask, atAddress, options, offset, length)); } +#endif /* !__LP64__ */ IOMemoryMap * IOMemoryDescriptor::createMappingInTask( task_t intoTask, @@ -3088,13 +4059,13 @@ IOMemoryMap * IOMemoryDescriptor::createMappingInTask( mach_vm_size_t offset, mach_vm_size_t length) { - IOMemoryMap * result; - _IOMemoryMap * mapping; + IOMemoryMap * result; + IOMemoryMap * mapping; if (0 == length) length = getLength(); - mapping = new _IOMemoryMap; + mapping = new IOMemoryMap; if( mapping && !mapping->init( intoTask, atAddress, @@ -3108,23 +4079,25 @@ IOMemoryMap * IOMemoryDescriptor::createMappingInTask( else result = 0; -#ifdef DEBUG +#if DEBUG if (!result) - IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n", - this, atAddress, options, offset, length); + IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", + this, atAddress, (uint32_t) options, offset, length); #endif return (result); } -IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, +#ifndef __LP64__ // there is only a 64 bit version for LP64 +IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, IOOptionBits options, IOByteCount offset) { return (redirect(newBackingMemory, options, (mach_vm_size_t)offset)); } +#endif -IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, +IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, IOOptionBits options, mach_vm_size_t offset) { @@ -3142,12 +4115,12 @@ IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, physMem->retain(); } - if (!fRedirUPL) + if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) { - vm_size_t size = fLength; + vm_size_t size = round_page(fLength); int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL, + if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, NULL, NULL, &flags)) fRedirUPL = 0; @@ -3155,7 +4128,8 @@ IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, if (physMem) { IOUnmapPages( fAddressMap, fAddress, fLength ); - physMem->redirect(0, true); + if ((false)) + physMem->redirect(0, true); } } @@ -3175,7 +4149,7 @@ IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, upl_deallocate(fRedirUPL); fRedirUPL = 0; } - if (physMem) + if ((false) && physMem) physMem->redirect(0, false); } } @@ -3197,13 +4171,15 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( IOByteCount __offset, IOByteCount __length ) { +#ifndef __LP64__ if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit"); +#endif /* !__LP64__ */ IOMemoryDescriptor * mapDesc = 0; - _IOMemoryMap * result = 0; + IOMemoryMap * result = 0; OSIterator * iter; - _IOMemoryMap * mapping = (_IOMemoryMap *) __address; + IOMemoryMap * mapping = (IOMemoryMap *) __address; mach_vm_size_t offset = mapping->fOffset + __offset; mach_vm_size_t length = mapping->fLength; @@ -3223,7 +4199,7 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( if (kIOMapUnique & options) { - IOPhysicalAddress phys; + addr64_t phys; IOByteCount physLen; // if (owner != this) continue; @@ -3231,12 +4207,12 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) { - phys = getPhysicalSegment(offset, &physLen); + phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); if (!phys || (physLen < length)) continue; - mapDesc = IOMemoryDescriptor::withPhysicalAddress( - phys, length, _direction); + mapDesc = IOMemoryDescriptor::withAddressRange( + phys, length, getDirection() | kIOMemoryMapperNone, NULL); if (!mapDesc) continue; offset = 0; @@ -3248,8 +4224,8 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( // look for a compatible existing mapping if( (iter = OSCollectionIterator::withCollection(_mappings))) { - _IOMemoryMap * lookMapping; - while ((lookMapping = (_IOMemoryMap *) iter->getNextObject())) + IOMemoryMap * lookMapping; + while ((lookMapping = (IOMemoryMap *) iter->getNextObject())) { if ((result = lookMapping->copyCompatible(mapping))) { @@ -3261,7 +4237,14 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( iter->release(); } if (result || (options & kIOMapReference)) + { + if (result != mapping) + { + mapping->release(); + mapping = NULL; + } continue; + } } if (!mapDesc) @@ -3312,305 +4295,11 @@ void IOMemoryDescriptor::removeMapping( _mappings->removeObject( mapping); } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#undef super -#define super IOMemoryDescriptor - -OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, - IOByteCount offset, IOByteCount length, - IODirection direction ) -{ - if( !parent) - return( false); - - if( (offset + length) > parent->getLength()) - return( false); - - /* - * We can check the _parent instance variable before having ever set it - * to an initial value because I/O Kit guarantees that all our instance - * variables are zeroed on an object's allocation. - */ - - if( !_parent) { - if( !super::init()) - return( false ); - } else { - /* - * An existing memory descriptor is being retargeted to - * point to somewhere else. Clean up our present state. - */ - - _parent->release(); - _parent = 0; - } - - parent->retain(); - _parent = parent; - _start = offset; - _length = length; - _direction = direction; - _tag = parent->getTag(); - - return( true ); -} - -void IOSubMemoryDescriptor::free( void ) -{ - if( _parent) - _parent->release(); - - super::free(); -} - - -IOReturn -IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const -{ - IOReturn rtn; - - if (kIOMDGetCharacteristics == op) { - - rtn = _parent->dmaCommandOperation(op, vData, dataSize); - if (kIOReturnSuccess == rtn) { - IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; - data->fLength = _length; - data->fSGCount = 0; // XXX gvdl: need to compute and pages - data->fPages = 0; - data->fPageAlign = 0; - } - - return rtn; - } - else if (kIOMDWalkSegments & op) { - if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) - return kIOReturnUnderrun; - - IOMDDMAWalkSegmentArgs *data = - reinterpret_cast(vData); - UInt offset = data->fOffset; - UInt remain = _length - offset; - if ((int) remain <= 0) - return (!remain)? kIOReturnOverrun : kIOReturnInternalError; - - data->fOffset = offset + _start; - rtn = _parent->dmaCommandOperation(op, vData, dataSize); - if (data->fLength > remain) - data->fLength = remain; - data->fOffset = offset; - - return rtn; - } - else - return kIOReturnBadArgument; -} - -addr64_t -IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length) -{ - addr64_t address; - IOByteCount actualLength; - - assert(offset <= _length); - - if( length) - *length = 0; - - if( offset >= _length) - return( 0 ); - - address = _parent->getPhysicalSegment64( offset + _start, &actualLength ); - - if( address && length) - *length = min( _length - offset, actualLength ); - - return( address ); -} - -IOPhysicalAddress -IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length ) -{ - IOPhysicalAddress address; - IOByteCount actualLength; - - assert(offset <= _length); - - if( length) - *length = 0; - - if( offset >= _length) - return( 0 ); - - address = _parent->getPhysicalSegment( offset + _start, &actualLength ); - - if( address && length) - *length = min( _length - offset, actualLength ); - - return( address ); -} - -IOPhysicalAddress -IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) -{ - IOPhysicalAddress address; - IOByteCount actualLength; - - assert(offset <= _length); - - if( length) - *length = 0; - - if( offset >= _length) - return( 0 ); - - address = _parent->getSourceSegment( offset + _start, &actualLength ); - - if( address && length) - *length = min( _length - offset, actualLength ); - - return( address ); -} - -void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) -{ - return( 0 ); -} - -IOReturn IOSubMemoryDescriptor::doMap( - vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset, - IOByteCount length ) -{ - panic("IOSubMemoryDescriptor::doMap"); - return (IOMemoryDescriptor::doMap(addressMap, atAddress, options, sourceOffset, length)); -} - -IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, - void * bytes, IOByteCount length) -{ - IOByteCount byteCount; - - assert(offset <= _length); - - if( offset >= _length) - return( 0 ); - - LOCK; - byteCount = _parent->readBytes( _start + offset, bytes, - min(length, _length - offset) ); - UNLOCK; - - return( byteCount ); -} - -IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, - const void* bytes, IOByteCount length) -{ - IOByteCount byteCount; - - assert(offset <= _length); - - if( offset >= _length) - return( 0 ); - - LOCK; - byteCount = _parent->writeBytes( _start + offset, bytes, - min(length, _length - offset) ); - UNLOCK; - - return( byteCount ); -} - -IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) -{ - IOReturn err; - - LOCK; - err = _parent->setPurgeable( newState, oldState ); - UNLOCK; - - return( err ); -} - -IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options, - IOByteCount offset, IOByteCount length ) -{ - IOReturn err; - - assert(offset <= _length); - - if( offset >= _length) - return( kIOReturnOverrun ); - - LOCK; - err = _parent->performOperation( options, _start + offset, - min(length, _length - offset) ); - UNLOCK; - - return( err ); -} - -IOReturn IOSubMemoryDescriptor::prepare( - IODirection forDirection) -{ - IOReturn err; - - LOCK; - err = _parent->prepare( forDirection); - UNLOCK; - - return( err ); -} - -IOReturn IOSubMemoryDescriptor::complete( - IODirection forDirection) -{ - IOReturn err; - - LOCK; - err = _parent->complete( forDirection); - UNLOCK; - - return( err ); -} - -IOMemoryMap * IOSubMemoryDescriptor::makeMapping( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress address, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ) -{ - IOMemoryMap * mapping = 0; - - if (!(kIOMap64Bit & options)) - { - panic("IOSubMemoryDescriptor::makeMapping !64bit"); - } - - mapping = (IOMemoryMap *) _parent->makeMapping( - owner, - intoTask, - address, - options, _start + offset, length ); - - return( mapping ); -} - -/* ick */ - +#ifndef __LP64__ +// obsolete initializers +// - initWithOptions is the designated initializer bool -IOSubMemoryDescriptor::initWithAddress(void * address, +IOMemoryDescriptor::initWithAddress(void * address, IOByteCount length, IODirection direction) { @@ -3618,7 +4307,7 @@ IOSubMemoryDescriptor::initWithAddress(void * address, } bool -IOSubMemoryDescriptor::initWithAddress(vm_address_t address, +IOMemoryDescriptor::initWithAddress(IOVirtualAddress address, IOByteCount length, IODirection direction, task_t task) @@ -3627,7 +4316,7 @@ IOSubMemoryDescriptor::initWithAddress(vm_address_t address, } bool -IOSubMemoryDescriptor::initWithPhysicalAddress( +IOMemoryDescriptor::initWithPhysicalAddress( IOPhysicalAddress address, IOByteCount length, IODirection direction ) @@ -3636,7 +4325,7 @@ IOSubMemoryDescriptor::initWithPhysicalAddress( } bool -IOSubMemoryDescriptor::initWithRanges( +IOMemoryDescriptor::initWithRanges( IOVirtualRange * ranges, UInt32 withCount, IODirection direction, @@ -3647,7 +4336,7 @@ IOSubMemoryDescriptor::initWithRanges( } bool -IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, +IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, IODirection direction, bool asReference) @@ -3655,12 +4344,21 @@ IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, return( false ); } +void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + return( 0 ); +} +#endif /* !__LP64__ */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { OSSymbol const *keys[2]; OSObject *values[2]; + OSArray * array; + struct SerData { user_addr_t address; user_size_t length; @@ -3671,10 +4369,9 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const IOOptionBits type = _flags & kIOMemoryTypeMask; if (s == NULL) return false; - if (s->previouslySerialized(this)) return true; - // Pretend we are an array. - if (!s->addXMLStartTag(this, "array")) return false; + array = OSArray::withCapacity(4); + if (!array) return (false); nRanges = _rangesCount; vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); @@ -3694,7 +4391,7 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const if (nRanges == _rangesCount) { Ranges vec = _ranges; for (index = 0; index < nRanges; index++) { - user_addr_t addr; IOByteCount len; + mach_vm_address_t addr; mach_vm_size_t len; getAddrLenForInd(addr, len, type, vec, index); vcopy[index].address = addr; vcopy[index].length = len; @@ -3711,8 +4408,7 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { user_addr_t addr = vcopy[index].address; IOByteCount len = (IOByteCount) vcopy[index].length; - values[0] = - OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32); + values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); if (values[0] == 0) { result = false; goto bail; @@ -3727,19 +4423,18 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const result = false; goto bail; } + array->setObject(dict); + dict->release(); values[0]->release(); values[1]->release(); values[0] = values[1] = 0; - - result = dict->serialize(s); - dict->release(); - if (!result) { - goto bail; - } } - result = s->addXMLEndTag("array"); + + result = array->serialize(s); bail: + if (array) + array->release(); if (values[0]) values[0]->release(); if (values[1]) @@ -3750,66 +4445,168 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const keys[1]->release(); if (vcopy) IOFree(vcopy, sizeof(SerData) * nRanges); + return result; } -bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if DEVELOPMENT || DEBUG + +extern "C" void IOMemoryDescriptorTest(int x) { - if (!s) { - return (false); - } - if (s->previouslySerialized(this)) return true; + IOGeneralMemoryDescriptor * md; - // Pretend we are a dictionary. - // We must duplicate the functionality of OSDictionary here - // because otherwise object references will not work; - // they are based on the value of the object passed to - // previouslySerialized and addXMLStartTag. + vm_offset_t data[2]; + vm_size_t bsize = 16*1024*1024; - if (!s->addXMLStartTag(this, "dict")) return false; + vm_size_t srcsize, srcoffset, mapoffset, size; + + kern_return_t kr; - char const *keys[3] = {"offset", "length", "parent"}; + kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE); + vm_inherit(kernel_map, data[0] + 1*4096, 4096, VM_INHERIT_NONE); + vm_inherit(kernel_map, data[0] + 16*4096, 4096, VM_INHERIT_NONE); - OSObject *values[3]; - values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8); - if (values[0] == 0) - return false; - values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8); - if (values[1] == 0) { - values[0]->release(); - return false; - } - values[2] = _parent; + kprintf("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]); - bool result = true; - for (int i=0; i<3; i++) { - if (!s->addString("") || - !s->addString(keys[i]) || - !s->addXMLEndTag("key") || - !values[i]->serialize(s)) { - result = false; - break; - } + uint32_t idx, offidx; + for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) + { + ((uint32_t*)data[0])[idx] = idx; } - values[0]->release(); - values[1]->release(); - if (!result) { - return false; + + for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 1) + 0x40c)) + { + for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 1) + 0x3fc)) + { + IOAddressRange ranges[3]; + uint32_t rangeCount = 1; + + bzero(&ranges[0], sizeof(ranges)); + ranges[0].address = data[0] + srcoffset; + ranges[0].length = srcsize; + + if (srcsize > 5*page_size) + { + ranges[0].length = 7634; + ranges[1].length = 9870; + ranges[2].length = srcsize - ranges[0].length - ranges[1].length; + ranges[1].address = ranges[0].address + ranges[0].length; + ranges[2].address = ranges[1].address + ranges[1].length; + rangeCount = 3; + } + else if ((srcsize > 2*page_size) && !(page_mask & srcoffset)) + { + ranges[0].length = 4096; + ranges[1].length = 4096; + ranges[2].length = srcsize - ranges[0].length - ranges[1].length; + ranges[0].address = data[0] + srcoffset + 4096; + ranges[1].address = data[0] + srcoffset; + ranges[2].address = ranges[0].address + ranges[0].length; + rangeCount = 3; + } + + md = OSDynamicCast(IOGeneralMemoryDescriptor, + IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task)); + assert(md); + + kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n", + (long) srcsize, (long) srcoffset, + (long long) ranges[0].address - data[0], (long long) ranges[0].length, + (long long) ranges[1].address - data[0], (long long) ranges[1].length, + (long long) ranges[2].address - data[0], (long long) ranges[2].length); + + if (kIOReturnSuccess == kr) + { + for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) + { + for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 1) + 0x20)) + { + IOMemoryMap * map; + mach_vm_address_t addr = 0; + uint32_t data; + + kprintf("createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size); + if (map) addr = map->getAddress(); + else kr = kIOReturnError; + + kprintf(">mapRef 0x%x %llx\n", kr, addr); + + if (kIOReturnSuccess != kr) break; + kr = md->prepare(); + if (kIOReturnSuccess != kr) + { + kprintf("prepare() fail 0x%x\n", kr); + break; + } + for (idx = 0; idx < size; idx += sizeof(uint32_t)) + { + offidx = (idx + mapoffset + srcoffset); + if ((srcsize <= 5*page_size) && (srcsize > 2*page_size) && !(page_mask & srcoffset)) + { + if (offidx < 8192) offidx ^= 0x1000; + } + offidx /= sizeof(uint32_t); + + if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)]) + { + kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset); + kr = kIOReturnBadMedia; + } + else + { + if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0; + if (offidx != data) + { + kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset); + kr = kIOReturnBadMedia; + } + } + } + md->complete(); + map->release(); + kprintf("unmapRef %llx\n", addr); + } + if (kIOReturnSuccess != kr) break; + } + } + if (kIOReturnSuccess != kr) break; + } + if (kIOReturnSuccess != kr) break; } - return s->addXMLEndTag("dict"); + if (kIOReturnSuccess != kr) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n", + (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset); + + vm_deallocate(kernel_map, data[0], bsize); +// vm_deallocate(kernel_map, data[1], size); } +#endif /* DEVELOPMENT || DEBUG */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); +#ifdef __LP64__ +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); +#else /* !__LP64__ */ OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); -OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7); +#endif /* !__LP64__ */ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); @@ -3823,6 +4620,3 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() { return( getPhysicalSegment( 0, 0 )); } - - -