X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/bd504ef0e0b883cdd7917b73b3574eb9ce669905..813fb2f63a553c957e917ede5f119b021d6ce391:/iokit/Kernel/IOMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index 8d49aeebe..fa735f3e4 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2007 Apple Inc. All rights reserved. + * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,12 +25,6 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ #include @@ -43,9 +37,8 @@ #include #include -#ifndef __LP64__ #include -#endif /* !__LP64__ */ +#include #include #include @@ -57,6 +50,7 @@ #include #include #include +#include #include @@ -74,28 +68,16 @@ __BEGIN_DECLS extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); extern void ipc_port_release_send(ipc_port_t port); -kern_return_t -memory_object_iopl_request( - ipc_port_t port, - memory_object_offset_t offset, - vm_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags); - +// osfmk/device/iokit_rpc.c +unsigned int IODefaultCacheBits(addr64_t pa); unsigned int IOTranslateCacheBits(struct phys_entry *pp); __END_DECLS -#define kIOMaximumMappedIOByteCount (512*1024*1024) - #define kIOMapperWaitSystem ((IOMapper *) 1) static IOMapper * gIOSystemMapper = NULL; -static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); - ppnum_t gIOLastPage; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -134,10 +116,10 @@ enum ioPLBlockFlags { kIOPLExternUPL = 0x00000002, }; -struct typePersMDData +struct IOMDPersistentInitData { - const IOGeneralMemoryDescriptor *fMD; - ipc_port_t fMemEntry; + const IOGeneralMemoryDescriptor * fMD; + IOMemoryReference * fMemRef; }; struct ioPLBlock { @@ -149,19 +131,35 @@ struct ioPLBlock { unsigned int fFlags; // Flags }; -struct ioGMDData { +enum { kMaxWireTags = 6 }; + +struct ioGMDData +{ IOMapper * fMapper; - uint8_t fDMAMapNumAddressBits; uint64_t fDMAMapAlignment; - addr64_t fMappedBase; - uint64_t fPreparationID; - unsigned int fPageCnt; + uint64_t fMappedBase; + uint64_t fMappedLength; + uint64_t fPreparationID; +#if IOTRACKING + IOTracking fWireTracking; + struct vm_tag_set fWireTags; + struct vm_tag_set_entry fWireTagsEntries[kMaxWireTags]; +#endif /* IOTRACKING */ + unsigned int fPageCnt; + uint8_t fDMAMapNumAddressBits; + vm_tag_t fAllocTag; + unsigned char fDiscontig:1; + unsigned char fCompletionError:1; + unsigned char _resv:6; + + /* variable length arrays */ + upl_page_info_t fPageList[1] #if __LP64__ - // align arrays to 8 bytes so following macros work - unsigned int fPad; + // align fPageList as for ioPLBlock + __attribute__((aligned(sizeof(upl_t)))) #endif - upl_page_info_t fPageList[1]; /* variable length */ - ioPLBlock fBlocks[1]; /* variable length */ + ; + ioPLBlock fBlocks[1]; }; #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) @@ -172,12 +170,10 @@ struct ioGMDData { #define computeDataSize(p, u) \ (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) - extern "C" { kern_return_t device_data_action( @@ -196,8 +192,7 @@ kern_return_t device_data_action( if( memDesc) { memDesc->retain(); - kr = memDesc->handleFault( device_pager, 0, 0, - offset, size, kIOMapDefaultCache /*?*/); + kr = memDesc->handleFault(device_pager, offset, size); memDesc->release(); } else @@ -218,11 +213,13 @@ kern_return_t device_close( } }; // end extern "C" +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + // Note this inline function uses C++ reference arguments to return values // This means that pointers are not passed and NULLs don't have to be // checked for as a NULL reference is illegal. static inline void -getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables +getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) { assert(kIOMemoryTypeUIO == type @@ -230,7 +227,8 @@ getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); if (kIOMemoryTypeUIO == type) { user_size_t us; - uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us; + user_addr_t ad; + uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; } #ifndef __LP64__ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { @@ -248,6 +246,820 @@ getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static IOReturn +purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) +{ + IOReturn err = kIOReturnSuccess; + + *control = VM_PURGABLE_SET_STATE; + + enum { kIOMemoryPurgeableControlMask = 15 }; + + switch (kIOMemoryPurgeableControlMask & newState) + { + case kIOMemoryPurgeableKeepCurrent: + *control = VM_PURGABLE_GET_STATE; + break; + + case kIOMemoryPurgeableNonVolatile: + *state = VM_PURGABLE_NONVOLATILE; + break; + case kIOMemoryPurgeableVolatile: + *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); + break; + case kIOMemoryPurgeableEmpty: + *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); + break; + default: + err = kIOReturnBadArgument; + break; + } + return (err); +} + +static IOReturn +purgeableStateBits(int * state) +{ + IOReturn err = kIOReturnSuccess; + + switch (VM_PURGABLE_STATE_MASK & *state) + { + case VM_PURGABLE_NONVOLATILE: + *state = kIOMemoryPurgeableNonVolatile; + break; + case VM_PURGABLE_VOLATILE: + *state = kIOMemoryPurgeableVolatile; + break; + case VM_PURGABLE_EMPTY: + *state = kIOMemoryPurgeableEmpty; + break; + default: + *state = kIOMemoryPurgeableNonVolatile; + err = kIOReturnNotReady; + break; + } + return (err); +} + + +static vm_prot_t +vmProtForCacheMode(IOOptionBits cacheMode) +{ + vm_prot_t prot = 0; + switch (cacheMode) + { + case kIOInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, prot); + break; + + case kIOWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, prot); + break; + + case kIOWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, prot); + break; + + case kIOCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, prot); + break; + + case kIOCopybackInnerCache: + SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); + break; + + case kIODefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, prot); + break; + } + + return (prot); +} + +static unsigned int +pagerFlagsForCacheMode(IOOptionBits cacheMode) +{ + unsigned int pagerFlags = 0; + switch (cacheMode) + { + case kIOInhibitCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; + + case kIOWriteThruCache: + pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; + + case kIOWriteCombineCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; + break; + + case kIOCopybackCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIOCopybackInnerCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIODefaultCache: + default: + pagerFlags = -1U; + break; + } + return (pagerFlags); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct IOMemoryEntry +{ + ipc_port_t entry; + int64_t offset; + uint64_t size; +}; + +struct IOMemoryReference +{ + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + struct IOMemoryReference * mapRef; + IOMemoryEntry entries[0]; +}; + +enum +{ + kIOMemoryReferenceReuse = 0x00000001, + kIOMemoryReferenceWrite = 0x00000002, + kIOMemoryReferenceCOW = 0x00000004, +}; + +SInt32 gIOMemoryReferenceCount; + +IOMemoryReference * +IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc) +{ + IOMemoryReference * ref; + size_t newSize, oldSize, copySize; + + newSize = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + capacity * sizeof(ref->entries[0])); + ref = (typeof(ref)) IOMalloc(newSize); + if (realloc) + { + oldSize = (sizeof(IOMemoryReference) + - sizeof(realloc->entries) + + realloc->capacity * sizeof(realloc->entries[0])); + copySize = oldSize; + if (copySize > newSize) copySize = newSize; + if (ref) bcopy(realloc, ref, copySize); + IOFree(realloc, oldSize); + } + else if (ref) + { + bzero(ref, sizeof(*ref)); + ref->refCount = 1; + OSIncrementAtomic(&gIOMemoryReferenceCount); + } + if (!ref) return (0); + ref->capacity = capacity; + return (ref); +} + +void +IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) +{ + IOMemoryEntry * entries; + size_t size; + + if (ref->mapRef) + { + memoryReferenceFree(ref->mapRef); + ref->mapRef = 0; + } + + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + ipc_port_release_send(entries->entry); + } + size = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + ref->capacity * sizeof(ref->entries[0])); + IOFree(ref, size); + + OSDecrementAtomic(&gIOMemoryReferenceCount); +} + +void +IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref) +{ + if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref); +} + + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceCreate( + IOOptionBits options, + IOMemoryReference ** reference) +{ + enum { kCapacity = 4, kCapacityInc = 4 }; + + kern_return_t err; + IOMemoryReference * ref; + IOMemoryEntry * entries; + IOMemoryEntry * cloneEntries; + vm_map_t map; + ipc_port_t entry, cloneEntry; + vm_prot_t prot; + memory_object_size_t actualSize; + uint32_t rangeIdx; + uint32_t count; + mach_vm_address_t entryAddr, endAddr, entrySize; + mach_vm_size_t srcAddr, srcLen; + mach_vm_size_t nextAddr, nextLen; + mach_vm_size_t offset, remain; + IOByteCount physLen; + IOOptionBits type = (_flags & kIOMemoryTypeMask); + IOOptionBits cacheMode; + unsigned int pagerFlags; + vm_tag_t tag; + + ref = memoryReferenceAlloc(kCapacity, NULL); + if (!ref) return (kIOReturnNoMemory); + + tag = getVMTag(kernel_map); + entries = &ref->entries[0]; + count = 0; + err = KERN_SUCCESS; + + offset = 0; + rangeIdx = 0; + if (_task) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + } + else + { + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = physLen; + + // default cache mode for physical + if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) + { + IOOptionBits mode; + pagerFlags = IODefaultCacheBits(nextAddr); + if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) + { + if (DEVICE_PAGER_GUARDED & pagerFlags) + mode = kIOInhibitCache; + else + mode = kIOWriteCombineCache; + } + else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) + mode = kIOWriteThruCache; + else + mode = kIOCopybackCache; + _flags |= (mode << kIOMemoryBufferCacheShift); + } + } + + // cache mode & vm_prot + prot = VM_PROT_READ; + cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + prot |= vmProtForCacheMode(cacheMode); + // VM system requires write access to change cache mode + if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; + if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; + + if ((kIOMemoryReferenceReuse & options) && _memRef) + { + cloneEntries = &_memRef->entries[0]; + prot |= MAP_MEM_NAMED_REUSE; + } + + if (_task) + { + // virtual ranges + + if (kIOMemoryBufferPageable & _flags) + { + // IOBufferMemoryDescriptor alloc - set flags for entry + object create + prot |= MAP_MEM_NAMED_CREATE; + if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE; + if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED; + + prot |= VM_PROT_WRITE; + map = NULL; + } + else map = get_task_map(_task); + + remain = _length; + while (remain) + { + srcAddr = nextAddr; + srcLen = nextLen; + nextAddr = 0; + nextLen = 0; + // coalesce addr range + for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if ((srcAddr + srcLen) != nextAddr) break; + srcLen += nextLen; + } + entryAddr = trunc_page_64(srcAddr); + endAddr = round_page_64(srcAddr + srcLen); + do + { + entrySize = (endAddr - entryAddr); + if (!entrySize) break; + actualSize = entrySize; + + cloneEntry = MACH_PORT_NULL; + if (MAP_MEM_NAMED_REUSE & prot) + { + if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + + err = mach_make_memory_entry_64(map, + &actualSize, entryAddr, prot, &entry, cloneEntry); + + if (KERN_SUCCESS != err) break; + if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize"); + + if (count >= ref->capacity) + { + ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); + entries = &ref->entries[count]; + } + entries->entry = entry; + entries->size = actualSize; + entries->offset = offset + (entryAddr - srcAddr); + entryAddr += actualSize; + if (MAP_MEM_NAMED_REUSE & prot) + { + if ((cloneEntries->entry == entries->entry) + && (cloneEntries->size == entries->size) + && (cloneEntries->offset == entries->offset)) cloneEntries++; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + entries++; + count++; + } + while (true); + offset += srcLen; + remain -= srcLen; + } + } + else + { + // _task == 0, physical or kIOMemoryTypeUPL + memory_object_t pager; + vm_size_t size = ptoa_32(_pages); + + if (!getKernelReserved()) panic("getKernelReserved"); + + reserved->dp.pagerContig = (1 == _rangesCount); + reserved->dp.memory = this; + + pagerFlags = pagerFlagsForCacheMode(cacheMode); + if (-1U == pagerFlags) panic("phys is kIODefaultCache"); + if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS; + + pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, + size, pagerFlags); + assert (pager); + if (!pager) err = kIOReturnVMError; + else + { + srcAddr = nextAddr; + entryAddr = trunc_page_64(srcAddr); + err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, + size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); + assert (KERN_SUCCESS == err); + if (KERN_SUCCESS != err) device_pager_deallocate(pager); + else + { + reserved->dp.devicePager = pager; + entries->entry = entry; + entries->size = size; + entries->offset = offset + (entryAddr - srcAddr); + entries++; + count++; + } + } + } + + ref->count = count; + ref->prot = prot; + + if (_task && (KERN_SUCCESS == err) + && (kIOMemoryMapCopyOnWrite & _flags) + && !(kIOMemoryReferenceCOW & options)) + { + err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + } + + if (KERN_SUCCESS == err) + { + if (MAP_MEM_NAMED_REUSE & prot) + { + memoryReferenceFree(ref); + OSIncrementAtomic(&_memRef->refCount); + ref = _memRef; + } + } + else + { + memoryReferenceFree(ref); + ref = NULL; + } + + *reference = ref; + + return (err); +} + +kern_return_t +IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) +{ + IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; + IOReturn err; + vm_map_offset_t addr; + + addr = ref->mapped; + + err = vm_map_enter_mem_object(map, &addr, ref->size, + (vm_map_offset_t) 0, + (((ref->options & kIOMapAnywhere) + ? VM_FLAGS_ANYWHERE + : VM_FLAGS_FIXED) + | VM_MAKE_TAG(ref->tag)), + IPC_PORT_NULL, + (memory_object_offset_t) 0, + false, /* copy */ + ref->prot, + ref->prot, + VM_INHERIT_NONE); + if (KERN_SUCCESS == err) + { + ref->mapped = (mach_vm_address_t) addr; + ref->map = map; + } + + return( err ); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceMap( + IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr) +{ + IOReturn err; + int64_t offset = inoffset; + uint32_t rangeIdx, entryIdx; + vm_map_offset_t addr, mapAddr; + vm_map_offset_t pageOffset, entryOffset, remain, chunk; + + mach_vm_address_t nextAddr; + mach_vm_size_t nextLen; + IOByteCount physLen; + IOMemoryEntry * entry; + vm_prot_t prot, memEntryCacheMode; + IOOptionBits type; + IOOptionBits cacheMode; + vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; + + if (ref->mapRef) + { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return (err); + } + + type = _flags & kIOMemoryTypeMask; + + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; + prot &= ref->prot; + + cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); + if (kIODefaultCache != cacheMode) + { + // VM system requires write access to update named entry cache mode + memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); + } + + tag = getVMTag(map); + + if (_task) + { + // Find first range for offset + if (!_rangesCount) return (kIOReturnBadArgument); + for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (remain < nextLen) break; + remain -= nextLen; + } + } + else + { + rangeIdx = 0; + remain = 0; + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = size; + } + + assert(remain < nextLen); + if (remain >= nextLen) return (kIOReturnBadArgument); + + nextAddr += remain; + nextLen -= remain; + pageOffset = (page_mask & nextAddr); + addr = 0; + didAlloc = false; + + if (!(options & kIOMapAnywhere)) + { + addr = *inaddr; + if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned); + addr -= pageOffset; + } + + // find first entry for offset + for (entryIdx = 0; + (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); + entryIdx++) {} + entryIdx--; + entry = &ref->entries[entryIdx]; + + // allocate VM + size = round_page_64(size + pageOffset); + if (kIOMapOverwrite & options) + { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + { + map = IOPageableMapForAddress(addr); + } + err = KERN_SUCCESS; + } + else + { + IOMemoryDescriptorMapAllocRef ref; + ref.map = map; + ref.tag = tag; + ref.options = options; + ref.size = size; + ref.prot = prot; + if (options & kIOMapAnywhere) + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + else + ref.mapped = addr; + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + else + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + if (KERN_SUCCESS == err) + { + addr = ref.mapped; + map = ref.map; + didAlloc = true; + } + } + + /* + * Prefaulting is only possible if we wired the memory earlier. Check the + * memory type, and the underlying data. + */ + if (options & kIOMapPrefault) + { + /* + * The memory must have been wired by calling ::prepare(), otherwise + * we don't have the UPL. Without UPLs, pages cannot be pre-faulted + */ + assert(map != kernel_map); + assert(_wireCount != 0); + assert(_memoryEntries != NULL); + if ((map == kernel_map) || + (_wireCount == 0) || + (_memoryEntries == NULL)) + { + return kIOReturnBadArgument; + } + + // Get the page list. + ioGMDData* dataP = getDataP(_memoryEntries); + ioPLBlock const* ioplList = getIOPLList(dataP); + pageList = getPageList(dataP); + + // Get the number of IOPLs. + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + + /* + * Scan through the IOPL Info Blocks, looking for the first block containing + * the offset. The research will go past it, so we'll need to go back to the + * right range at the end. + */ + UInt ioplIndex = 0; + while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) + ioplIndex++; + ioplIndex--; + + // Retrieve the IOPL info block. + ioPLBlock ioplInfo = ioplList[ioplIndex]; + + /* + * For external UPLs, the fPageInfo points directly to the UPL's page_info_t + * array. + */ + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t*) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; + + // Rebase [offset] into the IOPL in order to looks for the first page index. + mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; + + // Retrieve the index of the first page corresponding to the offset. + currentPageIndex = atop_32(offsetInIOPL); + } + + // enter mappings + remain = size; + mapAddr = addr; + addr += pageOffset; + + while (remain && (KERN_SUCCESS == err)) + { + entryOffset = offset - entry->offset; + if ((page_mask & entryOffset) != pageOffset) + { + err = kIOReturnNotAligned; + break; + } + + if (kIODefaultCache != cacheMode) + { + vm_size_t unused = 0; + err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, entry->entry); + assert (KERN_SUCCESS == err); + } + + entryOffset -= pageOffset; + if (entryOffset >= entry->size) panic("entryOffset"); + chunk = entry->size - entryOffset; + if (chunk) + { + if (chunk > remain) chunk = remain; + if (options & kIOMapPrefault) + { + UInt nb_pages = round_page(chunk) / PAGE_SIZE; + err = vm_map_enter_mem_object_prefault(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(tag) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + prot, // cur + prot, // max + &pageList[currentPageIndex], + nb_pages); + + // Compute the next index in the page list. + currentPageIndex += nb_pages; + assert(currentPageIndex <= _pages); + } + else + { + err = vm_map_enter_mem_object(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(tag) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + } + if (KERN_SUCCESS != err) break; + remain -= chunk; + if (!remain) break; + mapAddr += chunk; + offset += chunk - pageOffset; + } + pageOffset = 0; + entry++; + entryIdx++; + if (entryIdx >= ref->count) + { + err = kIOReturnOverrun; + break; + } + } + + if ((KERN_SUCCESS != err) && didAlloc) + { + (void) mach_vm_deallocate(map, trunc_page_64(addr), size); + addr = 0; + } + *inaddr = addr; + + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( + IOMemoryReference * ref, + IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount) +{ + IOReturn err; + IOMemoryEntry * entries; + unsigned int resident, dirty; + unsigned int totalResident, totalDirty; + + totalResident = totalDirty = 0; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); + if (KERN_SUCCESS != err) break; + totalResident += resident; + totalDirty += dirty; + } + + if (residentPageCount) *residentPageCount = totalResident; + if (dirtyPageCount) *dirtyPageCount = totalDirty; + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( + IOMemoryReference * ref, + IOOptionBits newState, + IOOptionBits * oldState) +{ + IOReturn err; + IOMemoryEntry * entries; + vm_purgable_t control; + int totalState, state; + + totalState = kIOMemoryPurgeableNonVolatile; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + + err = purgeableControlBits(newState, &control, &state); + if (KERN_SUCCESS != err) break; + err = mach_memory_entry_purgable_control(entries->entry, control, &state); + if (KERN_SUCCESS != err) break; + err = purgeableStateBits(&state); + if (KERN_SUCCESS != err) break; + + if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; + else if (kIOMemoryPurgeableEmpty == totalState) continue; + else if (kIOMemoryPurgeableVolatile == totalState) continue; + else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; + else totalState = kIOMemoryPurgeableNonVolatile; + } + + if (oldState) *oldState = totalState; + return (err); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + IOMemoryDescriptor * IOMemoryDescriptor::withAddress(void * address, IOByteCount length, @@ -401,7 +1213,7 @@ IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount length, IODirection direction) { - return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe)); + return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction)); } #endif /* !__LP64__ */ @@ -421,19 +1233,19 @@ IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalM IOMemoryDescriptor * IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) { - ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry(); + IOMemoryReference * memRef; + + if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0); - if (!sharedMem) - return 0; - - if (sharedMem == originalMD->_memEntry) { + if (memRef == originalMD->_memRef) + { originalMD->retain(); // Add a new reference to ourselves - ipc_port_release_send(sharedMem); // Remove extra send right + originalMD->memoryReferenceRelease(memRef); return originalMD; } IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; - typePersMDData initData = { originalMD, sharedMem }; + IOMDPersistentInitData initData = { originalMD, memRef }; if (self && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { @@ -443,52 +1255,6 @@ IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescrip return self; } -void *IOGeneralMemoryDescriptor::createNamedEntry() -{ - kern_return_t error; - ipc_port_t sharedMem; - - IOOptionBits type = _flags & kIOMemoryTypeMask; - - user_addr_t range0Addr; - IOByteCount range0Len; - getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); - range0Addr = trunc_page_64(range0Addr); - - vm_size_t size = ptoa_32(_pages); - vm_address_t kernelPage = (vm_address_t) range0Addr; - - vm_map_t theMap = ((_task == kernel_task) - && (kIOMemoryBufferPageable & _flags)) - ? IOPageableMapForAddress(kernelPage) - : get_task_map(_task); - - memory_object_size_t actualSize = size; - vm_prot_t prot = VM_PROT_READ; - if (kIODirectionOut != (kIODirectionOutIn & _flags)) - prot |= VM_PROT_WRITE; - - if (_memEntry) - prot |= MAP_MEM_NAMED_REUSE; - - error = mach_make_memory_entry_64(theMap, - &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry); - - if (KERN_SUCCESS == error) { - if (actualSize == size) { - return sharedMem; - } else { -#if IOASSERT - IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n", - (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size); -#endif - ipc_port_release_send( sharedMem ); - } - } - - return MACH_PORT_NULL; -} - #ifndef __LP64__ bool IOGeneralMemoryDescriptor::initWithAddress(void * address, @@ -608,7 +1374,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, // arguments to this function. if (kIOMemoryTypePersistentMD == type) { - typePersMDData *initData = (typePersMDData *) buffers; + IOMDPersistentInitData *initData = (typeof(initData)) buffers; const IOGeneralMemoryDescriptor *orig = initData->fMD; ioGMDData *dataP = getDataP(orig->_memoryEntries); @@ -617,7 +1383,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) return false; - _memEntry = initData->fMemEntry; // Grab the new named entry + _memRef = initData->fMemRef; // Grab the new named entry options = orig->_flags & ~kIOMemoryAsReference; type = options & kIOMemoryTypeMask; buffers = orig->_ranges.v; @@ -684,13 +1450,17 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, IODelete(_ranges.v, IOVirtualRange, _rangesCount); } - if (_memEntry) + options |= (kIOMemoryRedirected & _flags); + if (!(kIOMemoryRedirected & options)) { - ipc_port_release_send((ipc_port_t) _memEntry); - _memEntry = 0; + if (_memRef) + { + memoryReferenceRelease(_memRef); + _memRef = 0; + } + if (_mappings) + _mappings->flushCollection(); } - if (_mappings) - _mappings->flushCollection(); } else { if (!super::init()) @@ -707,12 +1477,6 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, gIOSystemMapper = mapper = IOMapper::gSystem; } - // Temp binary compatibility for kIOMemoryThreadSafe - if (kIOMemoryReserved6156215 & options) - { - options &= ~kIOMemoryReserved6156215; - options |= kIOMemoryThreadSafe; - } // Remove the dynamic internal use flags from the initial setting options &= ~(kIOMemoryPreparedReadOnly); _flags = options; @@ -843,19 +1607,18 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, // Find starting address within the vector of ranges Ranges vec = _ranges; - UInt32 length = 0; - UInt32 pages = 0; - for (unsigned ind = 0; ind < count; ind++) { - user_addr_t addr; - IOPhysicalLength len; + mach_vm_size_t totalLength = 0; + unsigned int ind, pages = 0; + for (ind = 0; ind < count; ind++) { + mach_vm_address_t addr; + mach_vm_address_t endAddr; + mach_vm_size_t len; // addr & len are returned by this function getAddrLenForInd(addr, len, type, vec, ind); - pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr)); - len += length; - assert(len >= length); // Check for 32 bit wrap around - length = len; - + if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break; + if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break; + if (os_add_overflow(totalLength, len, &totalLength)) break; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { ppnum_t highPage = atop_64(addr + len - 1); @@ -863,7 +1626,10 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _highestPage = highPage; } } - _length = length; + if ((ind < count) + || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */ + + _length = totalLength; _pages = pages; _rangesCount = count; @@ -873,14 +1639,21 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _wireCount++; // Physical MDs are, by definition, wired else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ ioGMDData *dataP; - unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); + unsigned dataSize; + + if (_pages > atop_64(max_mem)) return false; + dataSize = computeDataSize(_pages, /* upls */ count * 2); if (!initMemoryEntries(dataSize, mapper)) return false; dataP = getDataP(_memoryEntries); dataP->fPageCnt = _pages; - if ( (kIOMemoryPersistent & _flags) && !_memEntry) - _memEntry = createNamedEntry(); + if ( (kIOMemoryPersistent & _flags) && !_memRef) + { + IOReturn + err = memoryReferenceCreate(0, &_memRef); + if (kIOReturnSuccess != err) return false; + } if ((_flags & kIOMemoryAutoPrepare) && prepare() != kIOReturnSuccess) @@ -911,7 +1684,7 @@ void IOGeneralMemoryDescriptor::free() ioGMDData * dataP; if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) { - dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); + dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength); dataP->fMappedBase = 0; } } @@ -949,11 +1722,8 @@ void IOGeneralMemoryDescriptor::free() reserved = NULL; } - if (_memEntry) - ipc_port_release_send( (ipc_port_t) _memEntry ); - - if (_prepareLock) - IOLockFree(_prepareLock); + if (_memRef) memoryReferenceRelease(_memRef); + if (_prepareLock) IOLockFree(_prepareLock); super::free(); } @@ -1005,6 +1775,9 @@ IOOptionBits IOMemoryDescriptor::getTag( void ) } #ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) @@ -1018,6 +1791,9 @@ IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used } + +#pragma clang diagnostic pop + #endif /* !__LP64__ */ IOByteCount IOMemoryDescriptor::readBytes @@ -1027,9 +1803,10 @@ IOByteCount IOMemoryDescriptor::readBytes IOByteCount remaining; // Assert that this entire I/O is withing the available range - assert(offset < _length); + assert(offset <= _length); assert(offset + length <= _length); - if (offset >= _length) { + if ((offset >= _length) + || ((offset + length) > _length)) { return 0; } @@ -1066,18 +1843,21 @@ IOByteCount IOMemoryDescriptor::readBytes } IOByteCount IOMemoryDescriptor::writeBytes - (IOByteCount offset, const void *bytes, IOByteCount length) + (IOByteCount inoffset, const void *bytes, IOByteCount length) { addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); IOByteCount remaining; + IOByteCount offset = inoffset; // Assert that this entire I/O is withing the available range - assert(offset < _length); + assert(offset <= _length); assert(offset + length <= _length); assert( !(kIOMemoryPreparedReadOnly & _flags) ); - if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { + if ( (kIOMemoryPreparedReadOnly & _flags) + || (offset >= _length) + || ((offset + length) > _length)) { return 0; } @@ -1097,10 +1877,13 @@ IOByteCount IOMemoryDescriptor::writeBytes if (dstLen > remaining) dstLen = remaining; - copypv(srcAddr, (addr64_t) dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); - - srcAddr += dstLen; + if (!srcAddr) bzero_phys(dstAddr64, dstLen); + else + { + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + srcAddr += dstLen; + } offset += dstLen; remaining -= dstLen; } @@ -1110,12 +1893,11 @@ IOByteCount IOMemoryDescriptor::writeBytes assert(!remaining); + if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); + return length - remaining; } -// osfmk/device/iokit_rpc.c -extern "C" unsigned int IODefaultCacheBits(addr64_t pa); - #ifndef __LP64__ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) { @@ -1165,11 +1947,7 @@ void IOMemoryDescriptor::setPreparationID( void ) { if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) { -#if defined(__ppc__ ) - reserved->preparationID = gIOMDPreparationID++; -#else reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); -#endif } } @@ -1181,6 +1959,26 @@ uint64_t IOMemoryDescriptor::getPreparationID( void ) return (kIOPreparationIDUnsupported); } +void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) +{ + if (!getKernelReserved()) return; + reserved->kernelTag = kernelTag; + reserved->userTag = userTag; +} + +vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map) +{ + if (!reserved + || (VM_KERN_MEMORY_NONE == reserved->kernelTag) + || (VM_KERN_MEMORY_NONE == reserved->userTag)) + { + return (IOMemoryTag(map)); + } + + if (vm_kernel_map_is_kernel(map)) return (reserved->kernelTag); + return (reserved->userTag); +} + IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { IOReturn err = kIOReturnSuccess; @@ -1203,35 +2001,37 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * if (_memoryEntries && data->fMapper) { - bool remap = false; - bool whole = ((data->fOffset == 0) && (data->fLength == _length)); + bool remap, keepMap; dataP = getDataP(_memoryEntries); - if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) - { - dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; - remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); - } - if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) - { - dataP->fDMAMapAlignment = data->fMapSpec.alignment; - remap |= (dataP->fDMAMapAlignment > page_size); - } - remap |= (!whole); + + if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; + if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; + + keepMap = (data->fMapper == gIOSystemMapper); + keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); + + remap = (!keepMap); + remap |= (dataP->fDMAMapNumAddressBits < 64) + && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); + remap |= (dataP->fDMAMapAlignment > page_size); + if (remap || !dataP->fMappedBase) { // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); - err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); - if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase) + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase) { - dataP->fMappedBase = data->fAlloc; - data->fAllocCount = 0; // IOMD owns the alloc now + dataP->fMappedBase = data->fAlloc; + dataP->fMappedLength = data->fAllocLength; + data->fAllocLength = 0; // IOMD owns the alloc now } } else { data->fAlloc = dataP->fMappedBase; - data->fAllocCount = 0; // IOMD owns the alloc + data->fAllocLength = 0; // give out IOMD map } + data->fMapContig = !dataP->fDiscontig; } return (err); @@ -1285,17 +2085,6 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * return kIOReturnSuccess; -#if IOMD_DEBUG_DMAACTIVE - } else if (kIOMDDMAActive == op) { - if (params) OSIncrementAtomic(&md->__iomd_reservedA); - else { - if (md->__iomd_reservedA) - OSDecrementAtomic(&md->__iomd_reservedA); - else - panic("kIOMDSetDMAInactive"); - } -#endif /* IOMD_DEBUG_DMAACTIVE */ - } else if (kIOMDWalkSegments != op) return kIOReturnBadArgument; @@ -1330,7 +2119,7 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * bzero(&mapSpec, sizeof(mapSpec)); mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; mapSpec.alignment = dataP->fDMAMapAlignment; - err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL); + err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); if (kIOReturnSuccess != err) return (err); } } @@ -1523,11 +2312,11 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * addr64_t IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { - IOReturn ret; - addr64_t address = 0; - IOByteCount length = 0; - IOMapper * mapper = gIOSystemMapper; - IOOptionBits type = _flags & kIOMemoryTypeMask; + IOReturn ret; + mach_vm_address_t address = 0; + mach_vm_size_t length = 0; + IOMapper * mapper = gIOSystemMapper; + IOOptionBits type = _flags & kIOMemoryTypeMask; if (lengthOfSegment) *lengthOfSegment = 0; @@ -1544,7 +2333,7 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l { unsigned rangesIndex = 0; Ranges vec = _ranges; - user_addr_t addr; + mach_vm_address_t addr; // Find starting address within the vector of ranges for (;;) { @@ -1561,8 +2350,8 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l length -= offset; for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { - user_addr_t newAddr; - IOPhysicalLength newLen; + mach_vm_address_t newAddr; + mach_vm_size_t newLen; getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); if (addr + length != newAddr) @@ -1603,10 +2392,10 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l addr64_t origAddr = address; IOByteCount origLen = length; - address = mapper->mapAddr(origAddr); + address = mapper->mapToPhysicalAddress(origAddr); length = page_size - (address & (page_size - 1)); while ((length < origLen) - && ((address + length) == mapper->mapAddr(origAddr + length))) + && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) length += page_size; if (length > origLen) length = origLen; @@ -1624,6 +2413,9 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l } #ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + addr64_t IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { @@ -1644,6 +2436,7 @@ IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOf return (address); } +#pragma clang diagnostic pop addr64_t IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) @@ -1690,11 +2483,11 @@ IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *length { IOByteCount origLen; - phys64 = mapper->mapAddr(phys32); + phys64 = mapper->mapToPhysicalAddress(phys32); origLen = *lengthOfSegment; length = page_size - (phys64 & (page_size - 1)); while ((length < origLen) - && ((phys64 + length) == mapper->mapAddr(phys32 + length))) + && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) length += page_size; if (length > origLen) length = origLen; @@ -1719,6 +2512,9 @@ IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *len return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, IOByteCount * lengthOfSegment) { @@ -1729,6 +2525,7 @@ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, return 0; } +#pragma clang diagnostic pop #endif /* !__LP64__ */ IOReturn @@ -1758,80 +2555,29 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; IOByteCount offset = (IOByteCount) data->fOffset; - IOPhysicalLength length; - if (data->fMapped && IOMapper::gSystem) - data->fIOVMAddr = md->getPhysicalSegment(offset, &length); - else - data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); - data->fLength = length; - } - else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; - else if (kIOMDDMAMap == op) - { - if (dataSize < sizeof(IOMDDMAMapArgs)) - return kIOReturnUnderrun; - IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - - if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); - - err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); - return (err); - } - else return kIOReturnBadArgument; - - return kIOReturnSuccess; -} - -static IOReturn -purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) -{ - IOReturn err = kIOReturnSuccess; - - *control = VM_PURGABLE_SET_STATE; - switch (newState) - { - case kIOMemoryPurgeableKeepCurrent: - *control = VM_PURGABLE_GET_STATE; - break; - - case kIOMemoryPurgeableNonVolatile: - *state = VM_PURGABLE_NONVOLATILE; - break; - case kIOMemoryPurgeableVolatile: - *state = VM_PURGABLE_VOLATILE; - break; - case kIOMemoryPurgeableEmpty: - *state = VM_PURGABLE_EMPTY; - break; - default: - err = kIOReturnBadArgument; - break; + IOPhysicalLength length; + if (data->fMapped && IOMapper::gSystem) + data->fIOVMAddr = md->getPhysicalSegment(offset, &length); + else + data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); + data->fLength = length; } - return (err); -} + else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; + else if (kIOMDDMAMap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; -static IOReturn -purgeableStateBits(int * state) -{ - IOReturn err = kIOReturnSuccess; + if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); - switch (*state) - { - case VM_PURGABLE_NONVOLATILE: - *state = kIOMemoryPurgeableNonVolatile; - break; - case VM_PURGABLE_VOLATILE: - *state = kIOMemoryPurgeableVolatile; - break; - case VM_PURGABLE_EMPTY: - *state = kIOMemoryPurgeableEmpty; - break; - default: - *state = kIOMemoryPurgeableNonVolatile; - err = kIOReturnNotReady; - break; + data->fMapContig = true; + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + return (err); } - return (err); + else return kIOReturnBadArgument; + + return kIOReturnSuccess; } IOReturn @@ -1839,10 +2585,11 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, IOOptionBits * oldState ) { IOReturn err = kIOReturnSuccess; + vm_purgable_t control; int state; - if (_memEntry) + if (_memRef) { err = super::setPurgeable(newState, oldState); } @@ -1859,14 +2606,19 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, err = kIOReturnNotReady; break; } + else if (!_task) + { + err = kIOReturnUnsupported; + break; + } else curMap = get_task_map(_task); // can only do one range Ranges vec = _ranges; IOOptionBits type = _flags & kIOMemoryTypeMask; - user_addr_t addr; - IOByteCount len; + mach_vm_address_t addr; + mach_vm_size_t len; getAddrLenForInd(addr, len, type, vec, 0); err = purgeableControlBits(newState, &control, &state); @@ -1886,46 +2638,47 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, if (kIOMemoryThreadSafe & _flags) UNLOCK; } + return (err); } IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, IOOptionBits * oldState ) { - IOReturn err = kIOReturnSuccess; - vm_purgable_t control; - int state; + IOReturn err = kIOReturnNotReady; - if (kIOMemoryThreadSafe & _flags) - LOCK; + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); + if (kIOMemoryThreadSafe & _flags) UNLOCK; - do + return (err); +} + +IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount ) +{ + IOReturn err = kIOReturnNotReady; + + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); + else { - if (!_memEntry) - { - err = kIOReturnNotReady; - break; - } - err = purgeableControlBits(newState, &control, &state); - if (kIOReturnSuccess != err) - break; - err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state); - if (oldState) - { - if (kIOReturnSuccess == err) - { - err = purgeableStateBits(&state); - *oldState = state; - } + IOMultiMemoryDescriptor * mmd; + IOSubMemoryDescriptor * smd; + if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) + { + err = smd->getPageCounts(residentPageCount, dirtyPageCount); + } + else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) + { + err = mmd->getPageCounts(residentPageCount, dirtyPageCount); } } - while (false); - - if (kIOMemoryThreadSafe & _flags) - UNLOCK; + if (kIOMemoryThreadSafe & _flags) UNLOCK; return (err); } + extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); @@ -2012,9 +2765,15 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); } +/* + * + */ + #if defined(__i386__) || defined(__x86_64__) -extern vm_offset_t first_avail; -#define io_kernel_static_end first_avail + +#define io_kernel_static_start vm_kernel_stext +#define io_kernel_static_end vm_kernel_etext + #else #error io_kernel_static_end is undefined for this architecture #endif @@ -2023,7 +2782,7 @@ static kern_return_t io_get_kernel_static_upl( vm_map_t /* map */, uintptr_t offset, - vm_size_t *upl_size, + upl_size_t *upl_size, upl_t *upl, upl_page_info_array_t page_list, unsigned int *count, @@ -2045,7 +2804,7 @@ io_get_kernel_static_upl( if (!phys) break; page_list[page].phys_addr = phys; - page_list[page].pageout = 0; + page_list[page].free_when_done = 0; page_list[page].absent = 0; page_list[page].dirty = 0; page_list[page].precious = 0; @@ -2059,36 +2818,62 @@ io_get_kernel_static_upl( return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); } +/* + * + */ +#if IOTRACKING +static void +IOMemoryDescriptorUpdateWireOwner(ioGMDData * dataP, OSData * memoryEntries, vm_tag_t tag) +{ + ioPLBlock *ioplList; + UInt ind, count; + vm_tag_t prior; + + count = getNumIOPL(memoryEntries, dataP); + if (!count) return; + ioplList = getIOPLList(dataP); + + if (VM_KERN_MEMORY_NONE == tag) tag = dataP->fAllocTag; + assert(VM_KERN_MEMORY_NONE != tag); + + for (ind = 0; ind < count; ind++) + { + if (!ioplList[ind].fIOPL) continue; + prior = iopl_set_tag(ioplList[ind].fIOPL, tag); + if (VM_KERN_MEMORY_NONE == dataP->fAllocTag) dataP->fAllocTag = prior; +#if 0 + if (tag != prior) + { + char name[2][48]; + vm_tag_get_kext(prior, &name[0][0], sizeof(name[0])); + vm_tag_get_kext(tag, &name[1][0], sizeof(name[1])); + IOLog("switched %48s to %48s\n", name[0], name[1]); + } +#endif + } +} +#endif /* IOTRACKING */ + + IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; - IOReturn error = kIOReturnCannotWire; + IOReturn error = kIOReturnSuccess; ioGMDData *dataP; upl_page_info_array_t pageInfo; - ppnum_t mapBase = 0; - ipc_port_t sharedMem = (ipc_port_t) _memEntry; + ppnum_t mapBase; - assert(!_wireCount); assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); - if (_pages > gIOMaximumMappedIOPageCount) - return kIOReturnNoResources; + if ((kIODirectionOutIn & forDirection) == kIODirectionNone) + forDirection = (IODirection) (forDirection | getDirection()); - dataP = getDataP(_memoryEntries); - IOMapper *mapper; - mapper = dataP->fMapper; - dataP->fMappedBase = 0; - - if (forDirection == kIODirectionNone) - forDirection = getDirection(); - - int uplFlags; // This Mem Desc's default flags for upl creation + upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation switch (kIODirectionOutIn & forDirection) { case kIODirectionOut: // Pages do not need to be marked as dirty on commit uplFlags = UPL_COPYOUT_FROM; - _flags |= kIOMemoryPreparedReadOnly; break; case kIODirectionIn: @@ -2096,163 +2881,217 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM break; } - uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + dataP = getDataP(_memoryEntries); -#ifdef UPL_NEED_32BIT_ADDR - if (kIODirectionPrepareToPhys32 & forDirection) + if (kIODirectionDMACommand & forDirection) assert(_wireCount); + + if (_wireCount) { - if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; - if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; + if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) + { + OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); + error = kIOReturnNotWritable; + } } -#endif - - // Note that appendBytes(NULL) zeros the data up to the desired length. - _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); - dataP = 0; - - // Find the appropriate vm_map for the given task - vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) - curMap = 0; else - { curMap = get_task_map(_task); } - - // Iterate over the vector of virtual ranges - Ranges vec = _ranges; - unsigned int pageIndex = 0; - IOByteCount mdOffset = 0; - ppnum_t highestPage = 0; + { + IOMapper *mapper; + mapper = dataP->fMapper; + dataP->fMappedBase = 0; - for (UInt range = 0; range < _rangesCount; range++) { - ioPLBlock iopl; - user_addr_t startPage; - IOByteCount numBytes; - ppnum_t highPage = 0; - - // Get the startPage address and length of vec[range] - getAddrLenForInd(startPage, numBytes, type, vec, range); - iopl.fPageOffset = startPage & PAGE_MASK; - numBytes += iopl.fPageOffset; - startPage = trunc_page_64(startPage); - - if (mapper) - iopl.fMappedPage = mapBase + pageIndex; - else - iopl.fMappedPage = 0; - - // Iterate over the current range, creating UPLs - while (numBytes) { - vm_address_t kernelStart = (vm_address_t) startPage; - vm_map_t theMap; - if (curMap) - theMap = curMap; - else if (!sharedMem) { - assert(_task == kernel_task); - theMap = IOPageableMapForAddress(kernelStart); - } - else - theMap = NULL; + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + uplFlags |= UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map)); - int ioplFlags = uplFlags; - dataP = getDataP(_memoryEntries); - pageInfo = getPageList(dataP); - upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - - vm_size_t ioplSize = round_page(numBytes); - unsigned int numPageInfo = atop_32(ioplSize); - - if (theMap == kernel_map && kernelStart < io_kernel_static_end) { - error = io_get_kernel_static_upl(theMap, - kernelStart, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &highPage); - } - else if (sharedMem) { - error = memory_object_iopl_request(sharedMem, - ptoa_32(pageIndex), - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags); - } - else { - assert(theMap); - error = vm_map_create_upl(theMap, - startPage, - (upl_size_t*)&ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags); - } + if (kIODirectionPrepareToPhys32 & forDirection) + { + if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; + if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; + } + if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; + if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; + if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; + + mapBase = 0; + + // Note that appendBytes(NULL) zeros the data up to the desired length + // and the length parameter is an unsigned int + size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); + if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); + if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); + dataP = 0; + + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; + else curMap = get_task_map(_task); + + // Iterate over the vector of virtual ranges + Ranges vec = _ranges; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + ppnum_t highestPage = 0; + + IOMemoryEntry * memRefEntry = 0; + if (_memRef) memRefEntry = &_memRef->entries[0]; + + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + mach_vm_address_t startPage; + mach_vm_size_t numBytes; + ppnum_t highPage = 0; + + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + iopl.fPageOffset = startPage & PAGE_MASK; + numBytes += iopl.fPageOffset; + startPage = trunc_page_64(startPage); + + if (mapper) + iopl.fMappedPage = mapBase + pageIndex; + else + iopl.fMappedPage = 0; + + // Iterate over the current range, creating UPLs + while (numBytes) { + vm_address_t kernelStart = (vm_address_t) startPage; + vm_map_t theMap; + if (curMap) theMap = curMap; + else if (_memRef) + { + theMap = NULL; + } + else + { + assert(_task == kernel_task); + theMap = IOPageableMapForAddress(kernelStart); + } - assert(ioplSize); - if (error != KERN_SUCCESS) - goto abortExit; + // ioplFlags is an in/out parameter + upl_control_flags_t ioplFlags = uplFlags; + dataP = getDataP(_memoryEntries); + pageInfo = getPageList(dataP); + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + mach_vm_size_t _ioplSize = round_page(numBytes); + upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; + unsigned int numPageInfo = atop_32(ioplSize); + + if ((theMap == kernel_map) + && (kernelStart >= io_kernel_static_start) + && (kernelStart < io_kernel_static_end)) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &highPage); + } + else if (_memRef) { + memory_object_offset_t entryOffset; + + entryOffset = mdOffset; + entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); + if (entryOffset >= memRefEntry->size) { + memRefEntry++; + if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); + entryOffset = 0; + } + if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); + error = memory_object_iopl_request(memRefEntry->entry, + entryOffset, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + } + else { + assert(theMap); + error = vm_map_create_upl(theMap, + startPage, + (upl_size_t*)&ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + } - if (iopl.fIOPL) - highPage = upl_get_highest_page(iopl.fIOPL); - if (highPage > highestPage) - highestPage = highPage; + if (error != KERN_SUCCESS) goto abortExit; - error = kIOReturnCannotWire; + assert(ioplSize); - if (baseInfo->device) { - numPageInfo = 1; - iopl.fFlags = kIOPLOnDevice; - } - else { - iopl.fFlags = 0; - } + if (iopl.fIOPL) + highPage = upl_get_highest_page(iopl.fIOPL); + if (highPage > highestPage) + highestPage = highPage; - iopl.fIOMDOffset = mdOffset; - iopl.fPageInfo = pageIndex; + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + } + else { + iopl.fFlags = 0; + } -#if 0 - // used to remove the upl for auto prepares here, for some errant code - // that freed memory before the descriptor pointing at it - if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) - { - upl_commit(iopl.fIOPL, 0, 0); - upl_deallocate(iopl.fIOPL); - iopl.fIOPL = 0; - } -#endif + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true; - if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { - // Clean up partial created and unsaved iopl - if (iopl.fIOPL) { - upl_abort(iopl.fIOPL, 0); - upl_deallocate(iopl.fIOPL); + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) { + upl_abort(iopl.fIOPL, 0); + upl_deallocate(iopl.fIOPL); + } + goto abortExit; + } + dataP = 0; + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) iopl.fMappedPage = mapBase + pageIndex; + } + else { + mdOffset += numBytes; + break; } - goto abortExit; - } - dataP = 0; - - // Check for a multiple iopl's in one virtual range - pageIndex += numPageInfo; - mdOffset -= iopl.fPageOffset; - if (ioplSize < numBytes) { - numBytes -= ioplSize; - startPage += ioplSize; - mdOffset += ioplSize; - iopl.fPageOffset = 0; - if (mapper) iopl.fMappedPage = mapBase + pageIndex; - } - else { - mdOffset += numBytes; - break; } } + + _highestPage = highestPage; + + if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; } - _highestPage = highestPage; +#if IOTRACKING + if (kIOReturnSuccess == error) + { + vm_tag_t tag; + + dataP = getDataP(_memoryEntries); + if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift; + else tag = IOMemoryTag(kernel_map); + + if (!_wireCount) vm_tag_set_init(&dataP->fWireTags, kMaxWireTags); + vm_tag_set_enter(&dataP->fWireTags, kMaxWireTags, tag); - return kIOReturnSuccess; + IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag); + if (!_wireCount) + { + //if (!(_flags & kIOMemoryAutoPrepare)) + IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false); + } + } +#endif /* IOTRACKING */ + + return (error); abortExit: { @@ -2272,6 +3111,8 @@ abortExit: if (error == KERN_FAILURE) error = kIOReturnCannotWire; + else if (error == KERN_MEMORY_ERROR) + error = kIOReturnNoResources; return error; } @@ -2302,112 +3143,60 @@ bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper dataP->fDMAMapNumAddressBits = 64; dataP->fDMAMapAlignment = 0; dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fDiscontig = false; + dataP->fCompletionError = false; return (true); } IOReturn IOMemoryDescriptor::dmaMap( IOMapper * mapper, + IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, uint64_t length, - uint64_t * address, - ppnum_t * mapPages) + uint64_t * mapAddress, + uint64_t * mapLength) { - IOMDDMAWalkSegmentState walkState; - IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; - IOOptionBits mdOp; - IOReturn ret; - IOPhysicalLength segLen; - addr64_t phys, align, pageOffset; - ppnum_t base, pageIndex, pageCount; - uint64_t index; - uint32_t mapOptions = 0; + IOReturn ret; + uint32_t mapOptions; + mapOptions = 0; + mapOptions |= kIODMAMapReadAccess; if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; - walkArgs->fMapped = false; - mdOp = kIOMDFirstSegment; - pageCount = 0; - for (index = 0; index < length; ) - { - if (index && (page_mask & (index + pageOffset))) break; - - walkArgs->fOffset = offset + index; - ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); - mdOp = kIOMDWalkSegments; - if (ret != kIOReturnSuccess) break; - phys = walkArgs->fIOVMAddr; - segLen = walkArgs->fLength; - - align = (phys & page_mask); - if (!index) pageOffset = align; - else if (align) break; - pageCount += atop_64(round_page_64(align + segLen)); - index += segLen; - } - - if (index < length) return (kIOReturnVMError); - - base = mapper->iovmMapMemory(this, offset, pageCount, - mapOptions, NULL, mapSpec); - - if (!base) return (kIOReturnNoResources); - - mdOp = kIOMDFirstSegment; - for (pageIndex = 0, index = 0; index < length; ) - { - walkArgs->fOffset = offset + index; - ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); - mdOp = kIOMDWalkSegments; - if (ret != kIOReturnSuccess) break; - phys = walkArgs->fIOVMAddr; - segLen = walkArgs->fLength; - - ppnum_t page = atop_64(phys); - ppnum_t count = atop_64(round_page_64(phys + segLen)) - page; - while (count--) - { - mapper->iovmInsert(base, pageIndex, page); - page++; - pageIndex++; - } - index += segLen; - } - if (pageIndex != pageCount) panic("pageIndex"); - - *address = ptoa_64(base) + pageOffset; - if (mapPages) *mapPages = pageCount; + ret = mapper->iovmMapMemory(this, offset, length, mapOptions, + mapSpec, command, NULL, mapAddress, mapLength); - return (kIOReturnSuccess); + return (ret); } IOReturn IOGeneralMemoryDescriptor::dmaMap( IOMapper * mapper, + IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, uint64_t length, - uint64_t * address, - ppnum_t * mapPages) + uint64_t * mapAddress, + uint64_t * mapLength) { IOReturn err = kIOReturnSuccess; ioGMDData * dataP; IOOptionBits type = _flags & kIOMemoryTypeMask; - *address = 0; + *mapAddress = 0; if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) || offset || (length != _length)) { - err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages); + err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) { const ioPLBlock * ioplList = getIOPLList(dataP); upl_page_info_t * pageList; uint32_t mapOptions = 0; - ppnum_t base; IODMAMapSpecification mapSpec; bzero(&mapSpec, sizeof(mapSpec)); @@ -2421,18 +3210,27 @@ IOReturn IOGeneralMemoryDescriptor::dmaMap( pageList = (upl_page_info_t *) ioplList->fPageInfo; mapOptions |= kIODMAMapPagingPath; } - else - pageList = getPageList(dataP); + else pageList = getPageList(dataP); - if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) + { + mapOptions |= kIODMAMapPageListFullyOccupied; + } + + mapOptions |= kIODMAMapReadAccess; + if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; // Check for direct device non-paged memory if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; - base = mapper->iovmMapMemory( - this, offset, _pages, mapOptions, &pageList[0], &mapSpec); - *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK); - if (mapPages) *mapPages = _pages; + IODMAMapPageList dmaPageList = + { + .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), + .pageListCount = _pages, + .pageList = &pageList[0] + }; + err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, + command, &dmaPageList, mapAddress, mapLength); } return (err); @@ -2450,33 +3248,37 @@ IOReturn IOGeneralMemoryDescriptor::dmaMap( IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { - IOReturn error = kIOReturnSuccess; + IOReturn error = kIOReturnSuccess; IOOptionBits type = _flags & kIOMemoryTypeMask; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) return kIOReturnSuccess; - if (_prepareLock) - IOLockLock(_prepareLock); + if (_prepareLock) IOLockLock(_prepareLock); - if (!_wireCount - && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) { + if (kIODirectionDMACommand & forDirection) + { +#if IOMD_DEBUG_DMAACTIVE + OSIncrementAtomic(&__iomd_reservedA); +#endif /* IOMD_DEBUG_DMAACTIVE */ + } + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { error = wireVirtual(forDirection); } - if (kIOReturnSuccess == error) + if ((kIOReturnSuccess == error) && !(kIODirectionDMACommand & forDirection)) { - if (1 == ++_wireCount) - { - if (kIOMemoryClearEncrypt & _flags) - { - performOperation(kIOMemoryClearEncrypted, 0, _length); - } - } + if (1 == ++_wireCount) + { + if (kIOMemoryClearEncrypt & _flags) + { + performOperation(kIOMemoryClearEncrypted, 0, _length); + } + } } - if (_prepareLock) - IOLockUnlock(_prepareLock); + if (_prepareLock) IOLockUnlock(_prepareLock); return error; } @@ -2490,60 +3292,106 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) * before and after an I/O transfer involving pageable memory. */ -IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) +IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; + ioGMDData * dataP; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) return kIOReturnSuccess; - if (_prepareLock) - IOLockLock(_prepareLock); + if (_prepareLock) IOLockLock(_prepareLock); + do + { + assert(_wireCount); + if (!_wireCount) break; + dataP = getDataP(_memoryEntries); + if (!dataP) break; - assert(_wireCount); +#if IOMD_DEBUG_DMAACTIVE + if (kIODirectionDMACommand & forDirection) + { + if (__iomd_reservedA) OSDecrementAtomic(&__iomd_reservedA); + else panic("kIOMDSetDMAInactive"); + } +#endif /* IOMD_DEBUG_DMAACTIVE */ +#if IOTRACKING + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + vm_tag_t tag; + + if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift; + else tag = IOMemoryTag(kernel_map); + vm_tag_set_remove(&dataP->fWireTags, kMaxWireTags, tag, &tag); + IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag); + } + if (kIODirectionDMACommand & forDirection) break; +#endif /* IOTRACKING */ + + if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true; - if (_wireCount) - { if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) { performOperation(kIOMemorySetEncrypted, 0, _length); } - _wireCount--; - if (!_wireCount) - { - IOOptionBits type = _flags & kIOMemoryTypeMask; - ioGMDData * dataP = getDataP(_memoryEntries); - ioPLBlock *ioplList = getIOPLList(dataP); - UInt count = getNumIOPL(_memoryEntries, dataP); + _wireCount--; + if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) + { + ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, count = getNumIOPL(_memoryEntries, dataP); + if (_wireCount) + { + // kIODirectionCompleteWithDataValid & forDirection + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + for (ind = 0; ind < count; ind++) + { + if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL); + } + } + } + else + { #if IOMD_DEBUG_DMAACTIVE - if (__iomd_reservedA) panic("complete() while dma active"); + if (__iomd_reservedA) panic("complete() while dma active"); #endif /* IOMD_DEBUG_DMAACTIVE */ - if (dataP->fMappedBase) { - dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); - dataP->fMappedBase = 0; - } - // Only complete iopls that we created which are for TypeVirtual - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { - for (UInt ind = 0; ind < count; ind++) - if (ioplList[ind].fIOPL) { - upl_commit(ioplList[ind].fIOPL, 0, 0); - upl_deallocate(ioplList[ind].fIOPL); - } - } else if (kIOMemoryTypeUPL == type) { - upl_set_referenced(ioplList[0].fIOPL, false); - } + if (dataP->fMappedBase) { + dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBase = 0; + } + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { +#if IOTRACKING + //if (!(_flags & kIOMemoryAutoPrepare)) + { + IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); + } +#endif /* IOTRACKING */ + for (ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) { + if (dataP->fCompletionError) + upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); + else + upl_commit(ioplList[ind].fIOPL, 0, 0); + upl_deallocate(ioplList[ind].fIOPL); + } + } else if (kIOMemoryTypeUPL == type) { + upl_set_referenced(ioplList[0].fIOPL, false); + } - (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() - dataP->fPreparationID = kIOPreparationIDUnprepared; - } + dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fAllocTag = VM_KERN_MEMORY_NONE; + } + } } + while (false); - if (_prepareLock) - IOLockUnlock(_prepareLock); + if (_prepareLock) IOLockUnlock(_prepareLock); return kIOReturnSuccess; } @@ -2554,24 +3402,22 @@ IOReturn IOGeneralMemoryDescriptor::doMap( IOOptionBits options, IOByteCount __offset, IOByteCount __length ) - { #ifndef __LP64__ if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); #endif /* !__LP64__ */ + kern_return_t err; + IOMemoryMap * mapping = (IOMemoryMap *) *__address; mach_vm_size_t offset = mapping->fOffset + __offset; mach_vm_size_t length = mapping->fLength; - kern_return_t kr = kIOReturnVMError; - ipc_port_t sharedMem = (ipc_port_t) _memEntry; - IOOptionBits type = _flags & kIOMemoryTypeMask; Ranges vec = _ranges; - user_addr_t range0Addr = 0; - IOByteCount range0Len = 0; + mach_vm_address_t range0Addr = 0; + mach_vm_size_t range0Len = 0; if ((offset >= _length) || ((offset + length) > _length)) return( kIOReturnBadArgument ); @@ -2580,10 +3426,14 @@ IOReturn IOGeneralMemoryDescriptor::doMap( getAddrLenForInd(range0Addr, range0Len, type, vec, 0); // mapping source == dest? (could be much better) - if( _task - && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere) - && (1 == _rangesCount) && (0 == offset) - && range0Addr && (length <= range0Len) ) + if (_task + && (mapping->fAddressTask == _task) + && (mapping->fAddressMap == get_task_map(_task)) + && (options & kIOMapAnywhere) + && (1 == _rangesCount) + && (0 == offset) + && range0Addr + && (length <= range0Len)) { mapping->fAddress = range0Addr; mapping->fOptions |= kIOMapStatic; @@ -2591,216 +3441,131 @@ IOReturn IOGeneralMemoryDescriptor::doMap( return( kIOReturnSuccess ); } - if( 0 == sharedMem) { - - vm_size_t size = ptoa_32(_pages); + if (!_memRef) + { + IOOptionBits createOptions = 0; + if (!(kIOMapReadOnly & options)) + { + createOptions |= kIOMemoryReferenceWrite; +#if DEVELOPMENT || DEBUG + if (kIODirectionOut == (kIODirectionOutIn & _flags)) + { + OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); + } +#endif + } + err = memoryReferenceCreate(createOptions, &_memRef); + if (kIOReturnSuccess != err) return (err); + } - if( _task) { + memory_object_t pager; + pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); - memory_object_size_t actualSize = size; - vm_prot_t prot = VM_PROT_READ; - if (!(kIOMapReadOnly & options)) - prot |= VM_PROT_WRITE; - else if (kIOMapDefaultCache != (options & kIOMapCacheMask)) - prot |= VM_PROT_WRITE; + // count)) { -#if IOASSERT - IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n", - _rangesCount, (UInt64)actualSize, (UInt64)size); -#endif - kr = kIOReturnVMError; - if (sharedMem) - { - ipc_port_release_send(sharedMem); - sharedMem = MACH_PORT_NULL; - } - - mach_vm_address_t address, segDestAddr; - mach_vm_size_t mapLength; - unsigned rangesIndex; - IOOptionBits type = _flags & kIOMemoryTypeMask; - user_addr_t srcAddr; - IOPhysicalLength segLen = 0; - - // Find starting address within the vector of ranges - for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) { - getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex); - if (offset < segLen) - break; - offset -= segLen; // (make offset relative) - } - - mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK); - address = trunc_page_64(mapping->fAddress); - - if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset)) - { - vm_map_t map = mapping->fAddressMap; - kr = IOMemoryDescriptorMapCopy(&map, - options, - offset, &address, round_page_64(length + pageOffset)); - if (kr == KERN_SUCCESS) - { - segDestAddr = address; - segLen -= offset; - srcAddr += offset; - mapLength = length; - - while (true) - { - vm_prot_t cur_prot, max_prot; - - if (segLen > length) segLen = length; - kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK, - VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, - get_task_map(_task), trunc_page_64(srcAddr), - FALSE /* copy */, - &cur_prot, - &max_prot, - VM_INHERIT_NONE); - if (KERN_SUCCESS == kr) - { - if ((!(VM_PROT_READ & cur_prot)) - || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot))) - { - kr = KERN_PROTECTION_FAILURE; - } - } - if (KERN_SUCCESS != kr) - break; - segDestAddr += segLen; - mapLength -= segLen; - if (!mapLength) - break; - rangesIndex++; - if (rangesIndex >= _rangesCount) - { - kr = kIOReturnBadArgument; - break; - } - getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex); - if (srcAddr & PAGE_MASK) - { - kr = kIOReturnBadArgument; - break; - } - if (segLen > mapLength) - segLen = mapLength; - } - if (KERN_SUCCESS != kr) - { - mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset)); - } - } + err = kIOReturnNotReadable; + break; + } - if (KERN_SUCCESS == kr) - mapping->fAddress = address + pageOffset; - else - mapping->fAddress = NULL; - } - } - while (false); - } - else do - { // _task == 0, must be physical - - memory_object_t pager; - unsigned int flags = 0; - addr64_t pa; - IOPhysicalLength segLen; - - pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone ); - - if( !getKernelReserved()) - continue; - reserved->dp.pagerContig = (1 == _rangesCount); - reserved->dp.memory = this; - - /*What cache mode do we need*/ - switch(options & kIOMapCacheMask ) { - - case kIOMapDefaultCache: - default: - flags = IODefaultCacheBits(pa); - if (DEVICE_PAGER_CACHE_INHIB & flags) - { - if (DEVICE_PAGER_GUARDED & flags) - mapping->fOptions |= kIOMapInhibitCache; - else - mapping->fOptions |= kIOMapWriteCombineCache; - } - else if (DEVICE_PAGER_WRITE_THROUGH & flags) - mapping->fOptions |= kIOMapWriteThruCache; - else - mapping->fOptions |= kIOMapCopybackCache; - break; - - case kIOMapInhibitCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; - - case kIOMapWriteThruCache: - flags = DEVICE_PAGER_WRITE_THROUGH | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + size = round_page(mapping->fLength); + flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS + | UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map)); - case kIOMapCopybackCache: - flags = DEVICE_PAGER_COHERENT; - break; + if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, + NULL, NULL, + &flags)) + redirUPL2 = NULL; - case kIOMapWriteCombineCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT; - break; - } + for (lock_count = 0; + IORecursiveLockHaveLock(gIOMemoryLock); + lock_count++) { + UNLOCK; + } + err = upl_transpose(redirUPL2, mapping->fRedirUPL); + for (; + lock_count; + lock_count--) { + LOCK; + } + + if (kIOReturnSuccess != err) + { + IOLog("upl_transpose(%x)\n", err); + err = kIOReturnSuccess; + } - flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; + if (redirUPL2) + { + upl_commit(redirUPL2, NULL, 0); + upl_deallocate(redirUPL2); + redirUPL2 = 0; + } + { + // swap the memEntries since they now refer to different vm_objects + IOMemoryReference * me = _memRef; + _memRef = mapping->fMemory->_memRef; + mapping->fMemory->_memRef = me; + } + if (pager) + err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); + } + while (false); + } + // upl_transpose> // + else + { + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); +#if IOTRACKING + if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) + { + // only dram maps in the default on developement case + IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + } +#endif /* IOTRACKING */ + if ((err == KERN_SUCCESS) && pager) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); - pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved, - size, flags); - assert( pager ); + if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); + else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) + { + mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + } + } + } - if( pager) { - kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, - size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); + return (err); +} - assert( KERN_SUCCESS == kr ); - if( KERN_SUCCESS != kr) - { - device_pager_deallocate( pager ); - pager = MACH_PORT_NULL; - sharedMem = MACH_PORT_NULL; - } - } - if( pager && sharedMem) - reserved->dp.devicePager = pager; +#if IOTRACKING +IOReturn +IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, + mach_vm_address_t * address, mach_vm_size_t * size) +{ +#define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) - } while( false ); + IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); - _memEntry = (void *) sharedMem; - } + if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady); - IOReturn result; - if (0 == sharedMem) - result = kr; - else - result = super::doMap( __addressMap, __address, - options, __offset, __length ); + *task = map->fAddressTask; + *address = map->fAddress; + *size = map->fLength; - return( result ); + return (kIOReturnSuccess); } +#endif /* IOTRACKING */ IOReturn IOGeneralMemoryDescriptor::doUnmap( vm_map_t addressMap, @@ -2883,168 +3648,6 @@ bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size return( true ); } -struct IOMemoryDescriptorMapAllocRef -{ - ipc_port_t sharedMem; - vm_map_t map; - mach_vm_address_t mapped; - mach_vm_size_t size; - mach_vm_size_t sourceOffset; - IOOptionBits options; -}; - -static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) -{ - IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; - IOReturn err; - - do { - if( ref->sharedMem) - { - vm_prot_t prot = VM_PROT_READ - | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); - - // VM system requires write access to change cache mode - if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask)) - prot |= VM_PROT_WRITE; - - // set memory entry cache - vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; - switch (ref->options & kIOMapCacheMask) - { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapCopybackInnerCache: - SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; - } - - vm_size_t unused = 0; - - err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, - memEntryCacheMode, NULL, ref->sharedMem ); - if (KERN_SUCCESS != err) - IOLog("MAP_MEM_ONLY failed %d\n", err); - - err = mach_vm_map( map, - &ref->mapped, - ref->size, 0 /* mask */, - (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT), - ref->sharedMem, ref->sourceOffset, - false, // copy - prot, // cur - prot, // max - VM_INHERIT_NONE); - - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - ref->map = map; - } - else - { - err = mach_vm_allocate(map, &ref->mapped, ref->size, - ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - ref->map = map; - // we have to make sure that these guys don't get copied if we fork. - err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE); - assert( KERN_SUCCESS == err ); - } - } - while( false ); - - return( err ); -} - -kern_return_t -IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable, - mach_vm_size_t offset, - mach_vm_address_t * address, mach_vm_size_t length) -{ - IOReturn err; - IOMemoryDescriptorMapAllocRef ref; - - ref.map = *map; - ref.sharedMem = entry; - ref.sourceOffset = trunc_page_64(offset); - ref.options = options; - ref.size = length; - - if (options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else - ref.mapped = *address; - - if( ref.sharedMem && (ref.map == kernel_map) && pageable) - err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); - else - err = IOMemoryDescriptorMapAlloc( ref.map, &ref ); - - *address = ref.mapped; - *map = ref.map; - - return (err); -} - -kern_return_t -IOMemoryDescriptorMapCopy(vm_map_t * map, - IOOptionBits options, - mach_vm_size_t offset, - mach_vm_address_t * address, mach_vm_size_t length) -{ - IOReturn err; - IOMemoryDescriptorMapAllocRef ref; - - ref.map = *map; - ref.sharedMem = NULL; - ref.sourceOffset = trunc_page_64(offset); - ref.options = options; - ref.size = length; - - if (options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else - ref.mapped = *address; - - if (ref.map == kernel_map) - err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref); - else - err = IOMemoryDescriptorMapAlloc(ref.map, &ref); - - *address = ref.mapped; - *map = ref.map; - - return (err); -} - IOReturn IOMemoryDescriptor::doMap( vm_map_t __addressMap, IOVirtualAddress * __address, @@ -3052,130 +3655,27 @@ IOReturn IOMemoryDescriptor::doMap( IOByteCount __offset, IOByteCount __length ) { -#ifndef __LP64__ - if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit"); -#endif /* !__LP64__ */ - - IOMemoryMap * mapping = (IOMemoryMap *) *__address; - mach_vm_size_t offset = mapping->fOffset + __offset; - mach_vm_size_t length = mapping->fLength; - - IOReturn err = kIOReturnSuccess; - memory_object_t pager; - mach_vm_size_t pageOffset; - IOPhysicalAddress sourceAddr; - unsigned int lock_count; + return (kIOReturnUnsupported); +} - do +IOReturn IOMemoryDescriptor::handleFault( + void * _pager, + mach_vm_size_t sourceOffset, + mach_vm_size_t length) +{ + if( kIOMemoryRedirected & _flags) { - sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment ); - pageOffset = sourceAddr - trunc_page( sourceAddr ); - - if( reserved) - pager = (memory_object_t) reserved->dp.devicePager; - else - pager = MACH_PORT_NULL; - - if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) - { - upl_t redirUPL2; - vm_size_t size; - int flags; - - if (!_memEntry) - { - err = kIOReturnNotReadable; - continue; - } - - size = round_page(mapping->fLength + pageOffset); - flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - - if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2, - NULL, NULL, - &flags)) - redirUPL2 = NULL; - - for (lock_count = 0; - IORecursiveLockHaveLock(gIOMemoryLock); - lock_count++) { - UNLOCK; - } - err = upl_transpose(redirUPL2, mapping->fRedirUPL); - for (; - lock_count; - lock_count--) { - LOCK; - } - - if (kIOReturnSuccess != err) - { - IOLog("upl_transpose(%x)\n", err); - err = kIOReturnSuccess; - } - - if (redirUPL2) - { - upl_commit(redirUPL2, NULL, 0); - upl_deallocate(redirUPL2); - redirUPL2 = 0; - } - { - // swap the memEntries since they now refer to different vm_objects - void * me = _memEntry; - _memEntry = mapping->fMemory->_memEntry; - mapping->fMemory->_memEntry = me; - } - if (pager) - err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); - } - else - { - mach_vm_address_t address; - - if (!(options & kIOMapAnywhere)) - { - address = trunc_page_64(mapping->fAddress); - if( (mapping->fAddress - address) != pageOffset) - { - err = kIOReturnVMError; - continue; - } - } - - vm_map_t map = mapping->fAddressMap; - err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry, - options, (kIOMemoryBufferPageable & _flags), - offset, &address, round_page_64(length + pageOffset)); - if( err != KERN_SUCCESS) - continue; - - if (!_memEntry || pager) - { - err = handleFault( pager, mapping->fAddressMap, address, offset, length, options ); - if (err != KERN_SUCCESS) - doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 ); - } - #if DEBUG - if (kIOLogMapping & gIOKitDebug) - IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n", - err, this, (uint64_t)sourceAddr, mapping, address, offset, length); + IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); #endif - - if (err == KERN_SUCCESS) - mapping->fAddress = address + pageOffset; - else - mapping->fAddress = NULL; - } + do { + SLEEP; + } while( kIOMemoryRedirected & _flags ); } - while( false ); - - return (err); + return (kIOReturnSuccess); } -IOReturn IOMemoryDescriptor::handleFault( +IOReturn IOMemoryDescriptor::populateDevicePager( void * _pager, vm_map_t addressMap, mach_vm_address_t address, @@ -3190,22 +3690,16 @@ IOReturn IOMemoryDescriptor::handleFault( mach_vm_size_t page; mach_vm_size_t pageOffset; mach_vm_size_t pagerOffset; - IOPhysicalLength segLen; + IOPhysicalLength segLen, chunk; addr64_t physAddr; + IOOptionBits type; - if( !addressMap) - { - if( kIOMemoryRedirected & _flags) - { -#if DEBUG - IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); -#endif - do { - SLEEP; - } while( kIOMemoryRedirected & _flags ); - } + type = _flags & kIOMemoryTypeMask; - return( kIOReturnSuccess ); + if (reserved->dp.pagerContig) + { + sourceOffset = 0; + pagerOffset = 0; } physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); @@ -3221,72 +3715,49 @@ IOReturn IOMemoryDescriptor::handleFault( do { // in the middle of the loop only map whole pages - if( segLen >= bytes) - segLen = bytes; - else if( segLen != trunc_page( segLen)) - err = kIOReturnVMError; - if( physAddr != trunc_page_64( physAddr)) - err = kIOReturnBadArgument; - if (kIOReturnSuccess != err) - break; - -#if DEBUG - if( kIOLogMapping & gIOKitDebug) - IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n", - addressMap, address + pageOffset, physAddr + pageOffset, - segLen - pageOffset); -#endif - + if( segLen >= bytes) segLen = bytes; + else if (segLen != trunc_page(segLen)) err = kIOReturnVMError; + if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument; - if( pager) { - if( reserved && reserved->dp.pagerContig) { - IOPhysicalLength allLen; - addr64_t allPhys; + if (kIOReturnSuccess != err) break; - allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone ); - assert( allPhys ); - err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) ); - } - else - { +#if DEBUG || DEVELOPMENT + if ((kIOMemoryTypeUPL != type) + && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) + { + OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); + } +#endif /* DEBUG || DEVELOPMENT */ - for( page = 0; - (page < segLen) && (KERN_SUCCESS == err); - page += page_size) - { - err = device_pager_populate_object(pager, pagerOffset, - (ppnum_t)(atop_64(physAddr + page)), page_size); - pagerOffset += page_size; - } - } - assert( KERN_SUCCESS == err ); - if( err) - break; + chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); + for (page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += chunk) + { + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)(atop_64(physAddr + page)), chunk); + pagerOffset += chunk; } + assert (KERN_SUCCESS == err); + if (err) break; + // This call to vm_fault causes an early pmap level resolution // of the mappings created above for kernel mappings, since // faulting in later can't take place from interrupt level. - /* *** ALERT *** */ - /* *** Temporary Workaround *** */ - if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) { - vm_fault(addressMap, - (vm_map_offset_t)address, - VM_PROT_READ|VM_PROT_WRITE, - FALSE, THREAD_UNINT, NULL, - (vm_map_offset_t)0); + vm_fault(addressMap, + (vm_map_offset_t)trunc_page_64(address), + VM_PROT_READ|VM_PROT_WRITE, + FALSE, THREAD_UNINT, NULL, + (vm_map_offset_t)0); } - /* *** Temporary Workaround *** */ - /* *** ALERT *** */ - sourceOffset += segLen - pageOffset; address += segLen; bytes -= segLen; pageOffset = 0; - } while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); @@ -3302,32 +3773,32 @@ IOReturn IOMemoryDescriptor::doUnmap( IOByteCount __length ) { IOReturn err; + IOMemoryMap * mapping; mach_vm_address_t address; mach_vm_size_t length; - if (__length) - { - address = __address; - length = __length; - } - else - { - addressMap = ((IOMemoryMap *) __address)->fAddressMap; - address = ((IOMemoryMap *) __address)->fAddress; - length = ((IOMemoryMap *) __address)->fLength; - } + if (__length) panic("doUnmap"); - if ((addressMap == kernel_map) - && ((kIOMemoryBufferPageable & _flags) || !_memEntry)) - addressMap = IOPageableMapForAddress( address ); + mapping = (IOMemoryMap *) __address; + addressMap = mapping->fAddressMap; + address = mapping->fAddress; + length = mapping->fLength; + if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS; + else + { + if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + addressMap = IOPageableMapForAddress( address ); #if DEBUG - if( kIOLogMapping & gIOKitDebug) - IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", - addressMap, address, length ); + if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", + addressMap, address, length ); #endif + err = mach_vm_deallocate( addressMap, address, length ); + } - err = mach_vm_deallocate( addressMap, address, length ); +#if IOTRACKING + IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); +#endif /* IOTRACKING */ return (err); } @@ -3347,8 +3818,22 @@ IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) do { if( (iter = OSCollectionIterator::withCollection( _mappings))) { + + memory_object_t pager; + + if( reserved) + pager = (memory_object_t) reserved->dp.devicePager; + else + pager = MACH_PORT_NULL; + while( (mapping = (IOMemoryMap *) iter->getNextObject())) + { mapping->redirect( safeTask, doRedirect ); + if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); + } + } iter->release(); } @@ -3426,7 +3911,7 @@ IOReturn IOMemoryMap::unmap( void ) LOCK; if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory - && (0 == (fOptions & kIOMapStatic))) { + && (0 == (kIOMapStatic & fOptions))) { err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); @@ -3449,8 +3934,11 @@ IOReturn IOMemoryMap::unmap( void ) void IOMemoryMap::taskDied( void ) { LOCK; - if (fUserClientUnmap) - unmap(); + if (fUserClientUnmap) unmap(); +#if IOTRACKING + else IOTrackingRemoveUser(gIOMapTracking, &fTracking); +#endif /* IOTRACKING */ + if( fAddressMap) { vm_map_deallocate(fAddressMap); fAddressMap = 0; @@ -3610,10 +4098,13 @@ IOReturn IOMemoryMap::wireRange( IOReturn kr; mach_vm_address_t start = trunc_page_64(fAddress + offset); mach_vm_address_t end = round_page_64(fAddress + offset + length); - - if (kIODirectionOutIn & options) + vm_prot_t prot; + + prot = (kIODirectionOutIn & options); + if (prot) { - kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE); + prot |= VM_PROT_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map)); + kr = vm_map_wire(fAddressMap, start, end, prot, FALSE); } else { @@ -3656,19 +4147,18 @@ void IOMemoryDescriptor::initialize( void ) if( 0 == gIOMemoryLock) gIOMemoryLock = IORecursiveLockAlloc(); - IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, - ptoa_64(gIOMaximumMappedIOPageCount), 64); gIOLastPage = IOGetLastPageNumber(); - - gIOPageAllocLock = IOSimpleLockAlloc(); - queue_init(&gIOPageAllocList); } void IOMemoryDescriptor::free( void ) { - if( _mappings) - _mappings->release(); + if( _mappings) _mappings->release(); + if (reserved) + { + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } super::free(); } @@ -3772,12 +4262,13 @@ IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, physMem->retain(); } - if (!fRedirUPL) + if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) { - vm_size_t size = round_page(fLength); - int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL, + upl_size_t size = round_page(fLength); + upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS + | UPL_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map)); + if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, NULL, NULL, &flags)) fRedirUPL = 0; @@ -3785,7 +4276,7 @@ IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, if (physMem) { IOUnmapPages( fAddressMap, fAddress, fLength ); - if (false) + if ((false)) physMem->redirect(0, true); } } @@ -3806,7 +4297,7 @@ IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, upl_deallocate(fRedirUPL); fRedirUPL = 0; } - if (false && physMem) + if ((false) && physMem) physMem->redirect(0, false); } } @@ -4014,6 +4505,8 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { OSSymbol const *keys[2]; OSObject *values[2]; + OSArray * array; + struct SerData { user_addr_t address; user_size_t length; @@ -4024,10 +4517,9 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const IOOptionBits type = _flags & kIOMemoryTypeMask; if (s == NULL) return false; - if (s->previouslySerialized(this)) return true; - // Pretend we are an array. - if (!s->addXMLStartTag(this, "array")) return false; + array = OSArray::withCapacity(4); + if (!array) return (false); nRanges = _rangesCount; vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); @@ -4047,7 +4539,7 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const if (nRanges == _rangesCount) { Ranges vec = _ranges; for (index = 0; index < nRanges; index++) { - user_addr_t addr; IOByteCount len; + mach_vm_address_t addr; mach_vm_size_t len; getAddrLenForInd(addr, len, type, vec, index); vcopy[index].address = addr; vcopy[index].length = len; @@ -4064,8 +4556,7 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { user_addr_t addr = vcopy[index].address; IOByteCount len = (IOByteCount) vcopy[index].length; - values[0] = - OSNumber::withNumber(addr, sizeof(addr) * 8); + values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); if (values[0] == 0) { result = false; goto bail; @@ -4080,19 +4571,18 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const result = false; goto bail; } + array->setObject(dict); + dict->release(); values[0]->release(); values[1]->release(); values[0] = values[1] = 0; - - result = dict->serialize(s); - dict->release(); - if (!result) { - goto bail; - } } - result = s->addXMLEndTag("array"); + + result = array->serialize(s); bail: + if (array) + array->release(); if (values[0]) values[0]->release(); if (values[1]) @@ -4103,6 +4593,7 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const keys[1]->release(); if (vcopy) IOFree(vcopy, sizeof(SerData) * nRanges); + return result; }