X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/a1c7dba18ef36983396c282fe85292db066e39db..d9a64523371fa019c4575bb400cbbc3a50ac9903:/iokit/Kernel/IOMemoryDescriptor.cpp?ds=sidebyside diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index 0c7744386..3c1c4674b 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2007 Apple Inc. All rights reserved. + * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,12 +25,6 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ #include @@ -43,12 +37,12 @@ #include #include -#ifndef __LP64__ #include -#endif /* !__LP64__ */ +#include #include #include +#include #include "IOKitKernelInternal.h" @@ -57,6 +51,7 @@ #include #include #include +#include #include @@ -74,20 +69,6 @@ __BEGIN_DECLS extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); extern void ipc_port_release_send(ipc_port_t port); -kern_return_t -memory_object_iopl_request( - ipc_port_t port, - memory_object_offset_t offset, - vm_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags); - -// osfmk/device/iokit_rpc.c -unsigned int IODefaultCacheBits(addr64_t pa); -unsigned int IOTranslateCacheBits(struct phys_entry *pp); - __END_DECLS #define kIOMapperWaitSystem ((IOMapper *) 1) @@ -120,8 +101,6 @@ static IORecursiveLock * gIOMemoryLock; #define DEBG(fmt, args...) {} #endif -#define IOMD_DEBUG_DMAACTIVE 1 - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // Some data structures and accessor macros used by the initWithOptions @@ -147,22 +126,34 @@ struct ioPLBlock { unsigned int fFlags; // Flags }; -struct ioGMDData { +enum { kMaxWireTags = 6 }; + +struct ioGMDData +{ IOMapper * fMapper; - uint8_t fDMAMapNumAddressBits; uint64_t fDMAMapAlignment; - addr64_t fMappedBase; - uint64_t fPreparationID; - unsigned int fPageCnt; - unsigned char fDiscontig:1; - unsigned char fCompletionError:1; - unsigned char _resv:6; + uint64_t fMappedBase; + uint64_t fMappedLength; + uint64_t fPreparationID; +#if IOTRACKING + IOTracking fWireTracking; +#endif /* IOTRACKING */ + unsigned int fPageCnt; + uint8_t fDMAMapNumAddressBits; + unsigned char fDiscontig:1; + unsigned char fCompletionError:1; + unsigned char fMappedBaseValid:1; + unsigned char _resv:3; + unsigned char fDMAAccess:2; + + /* variable length arrays */ + upl_page_info_t fPageList[1] #if __LP64__ - // align arrays to 8 bytes so following macros work - unsigned char fPad[3]; + // align fPageList as for ioPLBlock + __attribute__((aligned(sizeof(upl_t)))) #endif - upl_page_info_t fPageList[1]; /* variable length */ - ioPLBlock fBlocks[1]; /* variable length */ + ; + ioPLBlock fBlocks[1]; }; #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) @@ -173,6 +164,8 @@ struct ioGMDData { #define computeDataSize(p, u) \ (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) +enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote }; + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) @@ -271,12 +264,20 @@ purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); break; case kIOMemoryPurgeableEmpty: - *state = VM_PURGABLE_EMPTY; + *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); break; default: err = kIOReturnBadArgument; break; } + + if (*control == VM_PURGABLE_SET_STATE) { + // let VM know this call is from the kernel and is allowed to alter + // the volatility of the memory entry even if it was created with + // MAP_MEM_PURGABLE_KERNEL_ONLY + *control = VM_PURGABLE_SET_STATE_FROM_KERNEL; + } + return (err); } @@ -331,6 +332,10 @@ vmProtForCacheMode(IOOptionBits cacheMode) SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); break; + case kIOPostedWrite: + SET_MAP_MEM(MAP_MEM_POSTED, prot); + break; + case kIODefaultCache: default: SET_MAP_MEM(MAP_MEM_NOOP, prot); @@ -366,6 +371,10 @@ pagerFlagsForCacheMode(IOOptionBits cacheMode) pagerFlags = DEVICE_PAGER_COHERENT; break; + case kIOPostedWrite: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK; + break; + case kIODefaultCache: default: pagerFlags = -1U; @@ -386,17 +395,19 @@ struct IOMemoryEntry struct IOMemoryReference { - volatile SInt32 refCount; - vm_prot_t prot; - uint32_t capacity; - uint32_t count; - IOMemoryEntry entries[0]; + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + struct IOMemoryReference * mapRef; + IOMemoryEntry entries[0]; }; enum { kIOMemoryReferenceReuse = 0x00000001, kIOMemoryReferenceWrite = 0x00000002, + kIOMemoryReferenceCOW = 0x00000004, }; SInt32 gIOMemoryReferenceCount; @@ -438,6 +449,12 @@ IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) IOMemoryEntry * entries; size_t size; + if (ref->mapRef) + { + memoryReferenceFree(ref->mapRef); + ref->mapRef = 0; + } + entries = ref->entries + ref->count; while (entries > &ref->entries[0]) { @@ -484,19 +501,27 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( IOOptionBits type = (_flags & kIOMemoryTypeMask); IOOptionBits cacheMode; unsigned int pagerFlags; + vm_tag_t tag; ref = memoryReferenceAlloc(kCapacity, NULL); if (!ref) return (kIOReturnNoMemory); + + tag = getVMTag(kernel_map); entries = &ref->entries[0]; count = 0; + err = KERN_SUCCESS; offset = 0; rangeIdx = 0; - if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (_task) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + } else { nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); nextLen = physLen; + // default cache mode for physical if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) { @@ -504,7 +529,9 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( pagerFlags = IODefaultCacheBits(nextAddr); if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) { - if (DEVICE_PAGER_GUARDED & pagerFlags) + if (DEVICE_PAGER_EARLY_ACK & pagerFlags) + mode = kIOPostedWrite; + else if (DEVICE_PAGER_GUARDED & pagerFlags) mode = kIOInhibitCache; else mode = kIOWriteCombineCache; @@ -525,6 +552,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; if ((kIOMemoryReferenceReuse & options) && _memRef) { @@ -540,7 +568,16 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( { // IOBufferMemoryDescriptor alloc - set flags for entry + object create prot |= MAP_MEM_NAMED_CREATE; - if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE; + if (kIOMemoryBufferPurgeable & _flags) + { + prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); + if (VM_KERN_MEMORY_SKYWALK == tag) + { + prot |= MAP_MEM_LEDGER_TAG_NETWORK; + } + } + if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED; + prot |= VM_PROT_WRITE; map = NULL; } @@ -575,7 +612,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( else prot &= ~MAP_MEM_NAMED_REUSE; } - err = mach_make_memory_entry_64(map, + err = mach_make_memory_entry_internal(map, &actualSize, entryAddr, prot, &entry, cloneEntry); if (KERN_SUCCESS != err) break; @@ -607,7 +644,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( } else { - // _task == 0, physical + // _task == 0, physical or kIOMemoryTypeUPL memory_object_t pager; vm_size_t size = ptoa_32(_pages); @@ -647,6 +684,13 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( ref->count = count; ref->prot = prot; + if (_task && (KERN_SUCCESS == err) + && (kIOMemoryMapCopyOnWrite & _flags) + && !(kIOMemoryReferenceCOW & options)) + { + err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + } + if (KERN_SUCCESS == err) { if (MAP_MEM_NAMED_REUSE & prot) @@ -667,16 +711,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( return (err); } -struct IOMemoryDescriptorMapAllocRef -{ - vm_map_t map; - mach_vm_address_t mapped; - mach_vm_size_t size; - vm_prot_t prot; - IOOptionBits options; -}; - -static kern_return_t +kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) { IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; @@ -684,13 +719,14 @@ IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) vm_map_offset_t addr; addr = ref->mapped; + err = vm_map_enter_mem_object(map, &addr, ref->size, (vm_map_offset_t) 0, (((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE - : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT) - | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + : VM_FLAGS_FIXED)), + VM_MAP_KERNEL_FLAGS_NONE, + ref->tag, IPC_PORT_NULL, (memory_object_offset_t) 0, false, /* copy */ @@ -721,21 +757,27 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( vm_map_offset_t addr, mapAddr; vm_map_offset_t pageOffset, entryOffset, remain, chunk; - mach_vm_address_t srcAddr, nextAddr; - mach_vm_size_t srcLen, nextLen; + mach_vm_address_t nextAddr; + mach_vm_size_t nextLen; IOByteCount physLen; IOMemoryEntry * entry; vm_prot_t prot, memEntryCacheMode; IOOptionBits type; IOOptionBits cacheMode; + vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; - /* - * For the kIOMapPrefault option. - */ - upl_page_info_t *pageList = NULL; - UInt currentPageIndex = 0; + if (ref->mapRef) + { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return (err); + } type = _flags & kIOMemoryTypeMask; + prot = VM_PROT_READ; if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; prot &= ref->prot; @@ -747,9 +789,12 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); } + tag = getVMTag(map); + if (_task) { // Find first range for offset + if (!_rangesCount) return (kIOReturnBadArgument); for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) { getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); @@ -771,7 +816,9 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( nextAddr += remain; nextLen -= remain; pageOffset = (page_mask & nextAddr); - addr = 0; + addr = 0; + didAlloc = false; + if (!(options & kIOMapAnywhere)) { addr = *inaddr; @@ -788,9 +835,19 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( // allocate VM size = round_page_64(size + pageOffset); + if (kIOMapOverwrite & options) + { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + { + map = IOPageableMapForAddress(addr); + } + err = KERN_SUCCESS; + } + else { IOMemoryDescriptorMapAllocRef ref; ref.map = map; + ref.tag = tag; ref.options = options; ref.size = size; ref.prot = prot; @@ -799,37 +856,45 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( ref.mapped = 0; else ref.mapped = addr; - if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); else err = IOMemoryDescriptorMapAlloc(ref.map, &ref); if (KERN_SUCCESS == err) { - addr = ref.mapped; - map = ref.map; + addr = ref.mapped; + map = ref.map; + didAlloc = true; } } + /* + * If the memory is associated with a device pager but doesn't have a UPL, + * it will be immediately faulted in through the pager via populateDevicePager(). + * kIOMapPrefault is redundant in that case, so don't try to use it for UPL + * operations. + */ + if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) + options &= ~kIOMapPrefault; + /* * Prefaulting is only possible if we wired the memory earlier. Check the * memory type, and the underlying data. */ - if (options & kIOMapPrefault) { + if (options & kIOMapPrefault) + { /* * The memory must have been wired by calling ::prepare(), otherwise * we don't have the UPL. Without UPLs, pages cannot be pre-faulted */ - assert(map != kernel_map); assert(_wireCount != 0); assert(_memoryEntries != NULL); - if ((map == kernel_map) || - (_wireCount == 0) || + if ((_wireCount == 0) || (_memoryEntries == NULL)) { return kIOReturnBadArgument; } - + // Get the page list. ioGMDData* dataP = getDataP(_memoryEntries); ioPLBlock const* ioplList = getIOPLList(dataP); @@ -871,22 +936,9 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( remain = size; mapAddr = addr; addr += pageOffset; - while (remain && nextLen && (KERN_SUCCESS == err)) - { - srcAddr = nextAddr; - srcLen = nextLen; - nextAddr = 0; - nextLen = 0; - // coalesce addr range - for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) - { - getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); - if ((srcAddr + srcLen) != nextAddr) break; - srcLen += nextLen; - } - while (srcLen && (KERN_SUCCESS == err)) - { + while (remain && (KERN_SUCCESS == err)) + { entryOffset = offset - entry->offset; if ((page_mask & entryOffset) != pageOffset) { @@ -907,17 +959,23 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( chunk = entry->size - entryOffset; if (chunk) { - if (chunk > remain) chunk = remain; + vm_map_kernel_flags_t vmk_flags; - if (options & kIOMapPrefault) { + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */ + + if (chunk > remain) chunk = remain; + if (options & kIOMapPrefault) + { UInt nb_pages = round_page(chunk) / PAGE_SIZE; + err = vm_map_enter_mem_object_prefault(map, &mapAddr, chunk, 0 /* mask */, - (VM_FLAGS_FIXED - | VM_FLAGS_OVERWRITE - | VM_MAKE_TAG(VM_MEMORY_IOKIT) - | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE), + vmk_flags, + tag, entry->entry, entryOffset, prot, // cur @@ -928,14 +986,16 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( // Compute the next index in the page list. currentPageIndex += nb_pages; assert(currentPageIndex <= _pages); - } else { + } + else + { err = vm_map_enter_mem_object(map, &mapAddr, chunk, 0 /* mask */, (VM_FLAGS_FIXED - | VM_FLAGS_OVERWRITE - | VM_MAKE_TAG(VM_MEMORY_IOKIT) - | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + | VM_FLAGS_OVERWRITE), + vmk_flags, + tag, entry->entry, entryOffset, false, // copy @@ -943,7 +1003,6 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( prot, // max VM_INHERIT_NONE); } - if (KERN_SUCCESS != err) break; remain -= chunk; if (!remain) break; @@ -959,9 +1018,8 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( break; } } - } - if ((KERN_SUCCESS != err) && addr) + if ((KERN_SUCCESS != err) && didAlloc) { (void) mach_vm_deallocate(map, trunc_page_64(addr), size); addr = 0; @@ -983,6 +1041,7 @@ IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( unsigned int totalResident, totalDirty; totalResident = totalDirty = 0; + err = kIOReturnSuccess; entries = ref->entries + ref->count; while (entries > &ref->entries[0]) { @@ -1009,15 +1068,16 @@ IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( vm_purgable_t control; int totalState, state; - entries = ref->entries + ref->count; totalState = kIOMemoryPurgeableNonVolatile; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; while (entries > &ref->entries[0]) { entries--; err = purgeableControlBits(newState, &control, &state); if (KERN_SUCCESS != err) break; - err = mach_memory_entry_purgable_control(entries->entry, control, &state); + err = memory_entry_purgeable_control_internal(entries->entry, control, &state); if (KERN_SUCCESS != err) break; err = purgeableStateBits(&state); if (KERN_SUCCESS != err) break; @@ -1188,7 +1248,7 @@ IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount length, IODirection direction) { - return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe)); + return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction)); } #endif /* !__LP64__ */ @@ -1444,7 +1504,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, } // Grab the appropriate mapper - if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone; + if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone; if (kIOMemoryMapperNone & options) mapper = 0; // No Mapper else if (mapper == kIOMapperSystem) { @@ -1452,12 +1512,6 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, gIOSystemMapper = mapper = IOMapper::gSystem; } - // Temp binary compatibility for kIOMemoryThreadSafe - if (kIOMemoryReserved6156215 & options) - { - options &= ~kIOMemoryReserved6156215; - options |= kIOMemoryThreadSafe; - } // Remove the dynamic internal use flags from the initial setting options &= ~(kIOMemoryPreparedReadOnly); _flags = options; @@ -1467,6 +1521,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ + _dmaReferences = 0; __iomd_reservedA = 0; __iomd_reservedB = 0; _highestPage = 0; @@ -1490,7 +1545,20 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, if (!initMemoryEntries(dataSize, mapper)) return (false); dataP = getDataP(_memoryEntries); dataP->fPageCnt = 0; - + switch (kIOMemoryDirectionMask & options) + { + case kIODirectionOut: + dataP->fDMAAccess = kIODMAMapReadAccess; + break; + case kIODirectionIn: + dataP->fDMAAccess = kIODMAMapWriteAccess; + break; + case kIODirectionNone: + case kIODirectionOutIn: + default: + panic("bad dir for upl 0x%x\n", (int) options); + break; + } // _wireCount++; // UPLs start out life wired _length = count; @@ -1549,7 +1617,9 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, case kIOMemoryTypeVirtual64: case kIOMemoryTypePhysical64: if (count == 1 +#ifndef __arm__ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL +#endif ) { if (kIOMemoryTypeVirtual64 == type) type = kIOMemoryTypeVirtual; @@ -1585,22 +1655,22 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, break; } } + _rangesCount = count; // Find starting address within the vector of ranges Ranges vec = _ranges; - UInt32 length = 0; - UInt32 pages = 0; - for (unsigned ind = 0; ind < count; ind++) { + mach_vm_size_t totalLength = 0; + unsigned int ind, pages = 0; + for (ind = 0; ind < count; ind++) { mach_vm_address_t addr; - mach_vm_size_t len; + mach_vm_address_t endAddr; + mach_vm_size_t len; // addr & len are returned by this function getAddrLenForInd(addr, len, type, vec, ind); - pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr)); - len += length; - assert(len >= length); // Check for 32 bit wrap around - length = len; - + if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break; + if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break; + if (os_add_overflow(totalLength, len, &totalLength)) break; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { ppnum_t highPage = atop_64(addr + len - 1); @@ -1608,22 +1678,36 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, _highestPage = highPage; } } - _length = length; + if ((ind < count) + || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */ + + _length = totalLength; _pages = pages; - _rangesCount = count; // Auto-prepare memory at creation time. // Implied completion when descriptor is free-ed + + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) _wireCount++; // Physical MDs are, by definition, wired else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ ioGMDData *dataP; - unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); + unsigned dataSize; + if (_pages > atop_64(max_mem)) return false; + + dataSize = computeDataSize(_pages, /* upls */ count * 2); if (!initMemoryEntries(dataSize, mapper)) return false; dataP = getDataP(_memoryEntries); dataP->fPageCnt = _pages; + if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags)) + && (VM_KERN_MEMORY_NONE == _kernelTag)) + { + _kernelTag = IOMemoryTag(kernel_map); + if (_kernelTag == gIOSurfaceTag) _userTag = VM_MEMORY_IOSURFACE; + } + if ( (kIOMemoryPersistent & _flags) && !_memRef) { IOReturn @@ -1658,10 +1742,10 @@ void IOGeneralMemoryDescriptor::free() if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { ioGMDData * dataP; - if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) { - dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); - dataP->fMappedBase = 0; + dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBaseValid = dataP->fMappedBase = 0; } } else @@ -1750,7 +1834,15 @@ IOOptionBits IOMemoryDescriptor::getTag( void ) return( _tag); } +uint64_t IOMemoryDescriptor::getFlags(void) +{ + return (_flags); +} + #ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) @@ -1764,6 +1856,9 @@ IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used } + +#pragma clang diagnostic pop + #endif /* !__LP64__ */ IOByteCount IOMemoryDescriptor::readBytes @@ -1775,10 +1870,14 @@ IOByteCount IOMemoryDescriptor::readBytes // Assert that this entire I/O is withing the available range assert(offset <= _length); assert(offset + length <= _length); - if (offset >= _length) { + if ((offset >= _length) + || ((offset + length) > _length)) { return 0; } + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (0); + if (kIOMemoryThreadSafe & _flags) LOCK; @@ -1824,10 +1923,15 @@ IOByteCount IOMemoryDescriptor::writeBytes assert( !(kIOMemoryPreparedReadOnly & _flags) ); - if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { + if ( (kIOMemoryPreparedReadOnly & _flags) + || (offset >= _length) + || ((offset + length) > _length)) { return 0; } + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (0); + if (kIOMemoryThreadSafe & _flags) LOCK; @@ -1860,7 +1964,11 @@ IOByteCount IOMemoryDescriptor::writeBytes assert(!remaining); +#if defined(__x86_64__) + // copypv does not cppvFsnk on intel +#else if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); +#endif return length - remaining; } @@ -1914,11 +2022,7 @@ void IOMemoryDescriptor::setPreparationID( void ) { if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) { -#if defined(__ppc__ ) - reserved->preparationID = gIOMDPreparationID++; -#else reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); -#endif } } @@ -1930,6 +2034,25 @@ uint64_t IOMemoryDescriptor::getPreparationID( void ) return (kIOPreparationIDUnsupported); } +void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) +{ + _kernelTag = kernelTag; + _userTag = userTag; +} + +vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map) +{ + if (vm_kernel_map_is_kernel(map)) + { + if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag); + } + else + { + if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag); + } + return (IOMemoryTag(map)); +} + IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { IOReturn err = kIOReturnSuccess; @@ -1952,37 +2075,56 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * if (_memoryEntries && data->fMapper) { - bool remap; - bool whole = ((data->fOffset == 0) && (data->fLength == _length)); + bool remap, keepMap; dataP = getDataP(_memoryEntries); if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; - remap = (dataP->fDMAMapNumAddressBits < 64) - && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); + keepMap = (data->fMapper == gIOSystemMapper); + keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); + + if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock); + + remap = (!keepMap); + remap |= (dataP->fDMAMapNumAddressBits < 64) + && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); remap |= (dataP->fDMAMapAlignment > page_size); - remap |= (!whole); - if (remap || !dataP->fMappedBase) + + if (remap || !dataP->fMappedBaseValid) { -// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); - err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); - if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase) +// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) { - dataP->fMappedBase = data->fAlloc; - data->fAllocCount = 0; // IOMD owns the alloc now + dataP->fMappedBase = data->fAlloc; + dataP->fMappedBaseValid = true; + dataP->fMappedLength = data->fAllocLength; + data->fAllocLength = 0; // IOMD owns the alloc now } } else { data->fAlloc = dataP->fMappedBase; - data->fAllocCount = 0; // IOMD owns the alloc + data->fAllocLength = 0; // give out IOMD map + md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength); } data->fMapContig = !dataP->fDiscontig; - } + if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock); + } return (err); } + if (kIOMDDMAUnmap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); + + return kIOReturnSuccess; + } if (kIOMDAddDMAMapSpec == op) { @@ -2030,20 +2172,24 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * } } - return kIOReturnSuccess; - -#if IOMD_DEBUG_DMAACTIVE - } else if (kIOMDDMAActive == op) { - if (params) OSIncrementAtomic(&md->__iomd_reservedA); - else { - if (md->__iomd_reservedA) - OSDecrementAtomic(&md->__iomd_reservedA); - else - panic("kIOMDSetDMAInactive"); - } -#endif /* IOMD_DEBUG_DMAACTIVE */ + return kIOReturnSuccess; + } - } else if (kIOMDWalkSegments != op) + else if (kIOMDDMAActive == op) + { + if (params) + { + int16_t prior; + prior = OSAddAtomic16(1, &md->_dmaReferences); + if (!prior) md->_mapName = NULL; + } + else + { + if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences); + else panic("_dmaReferences underflow"); + } + } + else if (kIOMDWalkSegments != op) return kIOReturnBadArgument; // Get the next segment @@ -2060,12 +2206,15 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * isP = (InternalState *) vData; UInt offset = isP->fIO.fOffset; - bool mapped = isP->fIO.fMapped; + uint8_t mapped = isP->fIO.fMapped; + uint64_t mappedBase; + + if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached); if (IOMapper::gSystem && mapped && (!(kIOMemoryHostOnly & _flags)) - && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase)) -// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase)) + && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) +// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) { if (!_memoryEntries && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); @@ -2077,11 +2226,26 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * bzero(&mapSpec, sizeof(mapSpec)); mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; mapSpec.alignment = dataP->fDMAMapAlignment; - err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL); + err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); if (kIOReturnSuccess != err) return (err); + dataP->fMappedBaseValid = true; } } + if (kIOMDDMAWalkMappedLocal == mapped) mappedBase = isP->fIO.fMappedBase; + else if (mapped) + { + if (IOMapper::gSystem + && (!(kIOMemoryHostOnly & _flags)) + && _memoryEntries + && (dataP = getDataP(_memoryEntries)) + && dataP->fMappedBaseValid) + { + mappedBase = dataP->fMappedBase; + } + else mapped = 0; + } + if (offset >= _length) return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; @@ -2097,7 +2261,6 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * UInt length; UInt64 address; - if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { // Physical address based memory descriptor @@ -2114,10 +2277,9 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * length = off2Ind - offset; address = physP[ind - 1].address + len - length; - if (true && mapped && _memoryEntries - && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + if (true && mapped) { - address = dataP->fMappedBase + offset; + address = mappedBase + offset; } else { @@ -2151,10 +2313,9 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * length = off2Ind - offset; address = physP[ind - 1].address + len - length; - if (true && mapped && _memoryEntries - && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + if (true && mapped) { - address = dataP->fMappedBase + offset; + address = mappedBase + offset; } else { @@ -2203,9 +2364,9 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void * // If a mapped address is requested and this is a pre-mapped IOPL // then just need to compute an offset relative to the mapped base. - if (mapped && dataP->fMappedBase) { + if (mapped) { offset += (ioplInfo.fPageOffset & PAGE_MASK); - address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; + address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; continue; // Done leave do/while(false) now } @@ -2326,7 +2487,7 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l state->fOffset = offset; state->fLength = _length - offset; - state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly); + state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote); ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); @@ -2350,10 +2511,10 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l addr64_t origAddr = address; IOByteCount origLen = length; - address = mapper->mapAddr(origAddr); + address = mapper->mapToPhysicalAddress(origAddr); length = page_size - (address & (page_size - 1)); while ((length < origLen) - && ((address + length) == mapper->mapAddr(origAddr + length))) + && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) length += page_size; if (length > origLen) length = origLen; @@ -2371,6 +2532,9 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l } #ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + addr64_t IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { @@ -2391,6 +2555,7 @@ IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOf return (address); } +#pragma clang diagnostic pop addr64_t IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) @@ -2437,11 +2602,11 @@ IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *length { IOByteCount origLen; - phys64 = mapper->mapAddr(phys32); + phys64 = mapper->mapToPhysicalAddress(phys32); origLen = *lengthOfSegment; length = page_size - (phys64 & (page_size - 1)); while ((length < origLen) - && ((phys64 + length) == mapper->mapAddr(phys32 + length))) + && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) length += page_size; if (length > origLen) length = origLen; @@ -2466,6 +2631,9 @@ IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *len return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, IOByteCount * lengthOfSegment) { @@ -2476,6 +2644,7 @@ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, return 0; } +#pragma clang diagnostic pop #endif /* !__LP64__ */ IOReturn @@ -2522,9 +2691,20 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); data->fMapContig = true; - err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount); + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + return (err); } + else if (kIOMDDMAUnmap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); + + return (kIOReturnSuccess); + } else return kIOReturnBadArgument; return kIOReturnSuccess; @@ -2539,6 +2719,9 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, vm_purgable_t control; int state; + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); + if (_memRef) { err = super::setPurgeable(newState, oldState); @@ -2562,7 +2745,14 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, break; } else + { curMap = get_task_map(_task); + if (NULL == curMap) + { + err = KERN_INVALID_ARGUMENT; + break; + } + } // can only do one range Ranges vec = _ranges; @@ -2574,7 +2764,7 @@ IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, err = purgeableControlBits(newState, &control, &state); if (kIOReturnSuccess != err) break; - err = mach_vm_purgable_control(curMap, addr, control, &state); + err = vm_map_purgable_control(curMap, addr, control, &state); if (oldState) { if (kIOReturnSuccess == err) @@ -2607,18 +2797,39 @@ IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, IOByteCount * dirtyPageCount ) { - IOReturn err = kIOReturnNotReady; + IOReturn err = kIOReturnNotReady; + + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); if (kIOMemoryThreadSafe & _flags) LOCK; if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); + else + { + IOMultiMemoryDescriptor * mmd; + IOSubMemoryDescriptor * smd; + if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) + { + err = smd->getPageCounts(residentPageCount, dirtyPageCount); + } + else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) + { + err = mmd->getPageCounts(residentPageCount, dirtyPageCount); + } + } if (kIOMemoryThreadSafe & _flags) UNLOCK; return (err); } +#if defined(__arm__) || defined(__arm64__) +extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); +extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); +#else /* defined(__arm__) || defined(__arm64__) */ extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); +#endif /* defined(__arm__) || defined(__arm64__) */ static void SetEncryptOp(addr64_t pa, unsigned int count) { @@ -2650,15 +2861,41 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, IOByteCount remaining; unsigned int res; void (*func)(addr64_t pa, unsigned int count) = 0; +#if defined(__arm__) || defined(__arm64__) + void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0; +#endif + + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); switch (options) { case kIOMemoryIncoherentIOFlush: +#if defined(__arm__) || defined(__arm64__) + func_ext = &dcache_incoherent_io_flush64; +#if __ARM_COHERENT_IO__ + func_ext(0, 0, 0, &res); + return kIOReturnSuccess; +#else /* __ARM_COHERENT_IO__ */ + break; +#endif /* __ARM_COHERENT_IO__ */ +#else /* defined(__arm__) || defined(__arm64__) */ func = &dcache_incoherent_io_flush64; break; +#endif /* defined(__arm__) || defined(__arm64__) */ case kIOMemoryIncoherentIOStore: +#if defined(__arm__) || defined(__arm64__) + func_ext = &dcache_incoherent_io_store64; +#if __ARM_COHERENT_IO__ + func_ext(0, 0, 0, &res); + return kIOReturnSuccess; +#else /* __ARM_COHERENT_IO__ */ + break; +#endif /* __ARM_COHERENT_IO__ */ +#else /* defined(__arm__) || defined(__arm64__) */ func = &dcache_incoherent_io_store64; break; +#endif /* defined(__arm__) || defined(__arm64__) */ case kIOMemorySetEncrypted: func = &SetEncryptOp; @@ -2668,8 +2905,13 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, break; } +#if defined(__arm__) || defined(__arm64__) + if ((func == 0) && (func_ext == 0)) + return (kIOReturnUnsupported); +#else /* defined(__arm__) || defined(__arm64__) */ if (!func) return (kIOReturnUnsupported); +#endif /* defined(__arm__) || defined(__arm64__) */ if (kIOMemoryThreadSafe & _flags) LOCK; @@ -2690,7 +2932,19 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, if (dstLen > remaining) dstLen = remaining; +#if defined(__arm__) || defined(__arm64__) + if (func) + (*func)(dstAddr64, dstLen); + if (func_ext) { + (*func_ext)(dstAddr64, dstLen, remaining, &res); + if (res != 0x0UL) { + remaining = 0; + break; + } + } +#else /* defined(__arm__) || defined(__arm64__) */ (*func)(dstAddr64, dstLen); +#endif /* defined(__arm__) || defined(__arm64__) */ offset += dstLen; remaining -= dstLen; @@ -2702,9 +2956,27 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); } +/* + * + */ + #if defined(__i386__) || defined(__x86_64__) -extern vm_offset_t first_avail; -#define io_kernel_static_end first_avail + +#define io_kernel_static_start vm_kernel_stext +#define io_kernel_static_end vm_kernel_etext + +#elif defined(__arm__) || defined(__arm64__) + +extern vm_offset_t static_memory_end; + +#if defined(__arm64__) +#define io_kernel_static_start vm_kext_base +#else /* defined(__arm64__) */ +#define io_kernel_static_start vm_kernel_stext +#endif /* defined(__arm64__) */ + +#define io_kernel_static_end static_memory_end + #else #error io_kernel_static_end is undefined for this architecture #endif @@ -2713,7 +2985,7 @@ static kern_return_t io_get_kernel_static_upl( vm_map_t /* map */, uintptr_t offset, - vm_size_t *upl_size, + upl_size_t *upl_size, upl_t *upl, upl_page_info_array_t page_list, unsigned int *count, @@ -2735,7 +3007,7 @@ io_get_kernel_static_upl( if (!phys) break; page_list[page].phys_addr = phys; - page_list[page].pageout = 0; + page_list[page].free_when_done = 0; page_list[page].absent = 0; page_list[page].dirty = 0; page_list[page].precious = 0; @@ -2752,28 +3024,36 @@ io_get_kernel_static_upl( IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; - IOReturn error = kIOReturnCannotWire; + IOReturn error = kIOReturnSuccess; ioGMDData *dataP; upl_page_info_array_t pageInfo; ppnum_t mapBase; + vm_tag_t tag = VM_KERN_MEMORY_NONE; assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); if ((kIODirectionOutIn & forDirection) == kIODirectionNone) forDirection = (IODirection) (forDirection | getDirection()); - int uplFlags; // This Mem Desc's default flags for upl creation + dataP = getDataP(_memoryEntries); + upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation switch (kIODirectionOutIn & forDirection) { - case kIODirectionOut: - // Pages do not need to be marked as dirty on commit - uplFlags = UPL_COPYOUT_FROM; - break; + case kIODirectionOut: + // Pages do not need to be marked as dirty on commit + uplFlags = UPL_COPYOUT_FROM; + dataP->fDMAAccess = kIODMAMapReadAccess; + break; - case kIODirectionIn: - default: - uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM - break; + case kIODirectionIn: + dataP->fDMAAccess = kIODMAMapWriteAccess; + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; + + default: + dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess; + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; } if (_wireCount) @@ -2783,192 +3063,201 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); error = kIOReturnNotWritable; } - else error = kIOReturnSuccess; - return (error); } - - dataP = getDataP(_memoryEntries); - IOMapper *mapper; - mapper = dataP->fMapper; - dataP->fMappedBase = 0; - - uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; - if (kIODirectionPrepareToPhys32 & forDirection) + else { - if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; - if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; - } - if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; - if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; - if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; - - mapBase = 0; - - // Note that appendBytes(NULL) zeros the data up to the desired length - // and the length parameter is an unsigned int - size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); - if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); - if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); - dataP = 0; + IOMapper *mapper; - // Find the appropriate vm_map for the given task - vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; - else curMap = get_task_map(_task); + mapper = dataP->fMapper; + dataP->fMappedBaseValid = dataP->fMappedBase = 0; - // Iterate over the vector of virtual ranges - Ranges vec = _ranges; - unsigned int pageIndex = 0; - IOByteCount mdOffset = 0; - ppnum_t highestPage = 0; - - IOMemoryEntry * memRefEntry = 0; - if (_memRef) memRefEntry = &_memRef->entries[0]; + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + tag = _kernelTag; + if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map); - for (UInt range = 0; range < _rangesCount; range++) { - ioPLBlock iopl; - mach_vm_address_t startPage; - mach_vm_size_t numBytes; - ppnum_t highPage = 0; - - // Get the startPage address and length of vec[range] - getAddrLenForInd(startPage, numBytes, type, vec, range); - iopl.fPageOffset = startPage & PAGE_MASK; - numBytes += iopl.fPageOffset; - startPage = trunc_page_64(startPage); - - if (mapper) - iopl.fMappedPage = mapBase + pageIndex; - else - iopl.fMappedPage = 0; - - // Iterate over the current range, creating UPLs - while (numBytes) { - vm_address_t kernelStart = (vm_address_t) startPage; - vm_map_t theMap; - if (curMap) theMap = curMap; - else if (_memRef) - { - theMap = NULL; - } - else - { - assert(_task == kernel_task); - theMap = IOPageableMapForAddress(kernelStart); - } - - int ioplFlags = uplFlags; - dataP = getDataP(_memoryEntries); - pageInfo = getPageList(dataP); - upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - - vm_size_t ioplSize = round_page(numBytes); - unsigned int numPageInfo = atop_32(ioplSize); - - if ((theMap == kernel_map) && (kernelStart < io_kernel_static_end)) { - error = io_get_kernel_static_upl(theMap, - kernelStart, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &highPage); - } - else if (_memRef) { - memory_object_offset_t entryOffset; - - entryOffset = (mdOffset - iopl.fPageOffset - memRefEntry->offset); - if (entryOffset >= memRefEntry->size) { - memRefEntry++; - if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); - entryOffset = 0; - } - if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); - error = memory_object_iopl_request(memRefEntry->entry, - entryOffset, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags); - } - else { - assert(theMap); - error = vm_map_create_upl(theMap, - startPage, - (upl_size_t*)&ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags); - } - - assert(ioplSize); - if (error != KERN_SUCCESS) - goto abortExit; + if (kIODirectionPrepareToPhys32 & forDirection) + { + if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; + if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; + } + if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; + if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; + if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; + + mapBase = 0; + + // Note that appendBytes(NULL) zeros the data up to the desired length + // and the length parameter is an unsigned int + size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); + if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); + if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); + dataP = 0; + + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; + else curMap = get_task_map(_task); + + // Iterate over the vector of virtual ranges + Ranges vec = _ranges; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + ppnum_t highestPage = 0; + + IOMemoryEntry * memRefEntry = 0; + if (_memRef) memRefEntry = &_memRef->entries[0]; + + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + mach_vm_address_t startPage, startPageOffset; + mach_vm_size_t numBytes; + ppnum_t highPage = 0; + + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + startPageOffset = startPage & PAGE_MASK; + iopl.fPageOffset = startPageOffset; + numBytes += startPageOffset; + startPage = trunc_page_64(startPage); + + if (mapper) + iopl.fMappedPage = mapBase + pageIndex; + else + iopl.fMappedPage = 0; + + // Iterate over the current range, creating UPLs + while (numBytes) { + vm_address_t kernelStart = (vm_address_t) startPage; + vm_map_t theMap; + if (curMap) theMap = curMap; + else if (_memRef) + { + theMap = NULL; + } + else + { + assert(_task == kernel_task); + theMap = IOPageableMapForAddress(kernelStart); + } - if (iopl.fIOPL) - highPage = upl_get_highest_page(iopl.fIOPL); - if (highPage > highestPage) - highestPage = highPage; + // ioplFlags is an in/out parameter + upl_control_flags_t ioplFlags = uplFlags; + dataP = getDataP(_memoryEntries); + pageInfo = getPageList(dataP); + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + mach_vm_size_t _ioplSize = round_page(numBytes); + upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; + unsigned int numPageInfo = atop_32(ioplSize); + + if ((theMap == kernel_map) + && (kernelStart >= io_kernel_static_start) + && (kernelStart < io_kernel_static_end)) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &highPage); + } + else if (_memRef) { + memory_object_offset_t entryOffset; + + entryOffset = mdOffset; + entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); + if (entryOffset >= memRefEntry->size) { + memRefEntry++; + if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); + entryOffset = 0; + } + if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); + error = memory_object_iopl_request(memRefEntry->entry, + entryOffset, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + tag); + } + else { + assert(theMap); + error = vm_map_create_upl(theMap, + startPage, + (upl_size_t*)&ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + tag); + } - error = kIOReturnCannotWire; + if (error != KERN_SUCCESS) goto abortExit; - if (baseInfo->device) { - numPageInfo = 1; - iopl.fFlags = kIOPLOnDevice; - } - else { - iopl.fFlags = 0; - } + assert(ioplSize); - iopl.fIOMDOffset = mdOffset; - iopl.fPageInfo = pageIndex; - if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true; + if (iopl.fIOPL) + highPage = upl_get_highest_page(iopl.fIOPL); + if (highPage > highestPage) + highestPage = highPage; -#if 0 - // used to remove the upl for auto prepares here, for some errant code - // that freed memory before the descriptor pointing at it - if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) - { - upl_commit(iopl.fIOPL, 0, 0); - upl_deallocate(iopl.fIOPL); - iopl.fIOPL = 0; - } -#endif + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + } + else { + iopl.fFlags = 0; + } - if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { - // Clean up partial created and unsaved iopl - if (iopl.fIOPL) { - upl_abort(iopl.fIOPL, 0); - upl_deallocate(iopl.fIOPL); + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) dataP->fDiscontig = true; + + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) { + upl_abort(iopl.fIOPL, 0); + upl_deallocate(iopl.fIOPL); + } + goto abortExit; + } + dataP = 0; + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) iopl.fMappedPage = mapBase + pageIndex; + } + else { + mdOffset += numBytes; + break; } - goto abortExit; - } - dataP = 0; - - // Check for a multiple iopl's in one virtual range - pageIndex += numPageInfo; - mdOffset -= iopl.fPageOffset; - if (ioplSize < numBytes) { - numBytes -= ioplSize; - startPage += ioplSize; - mdOffset += ioplSize; - iopl.fPageOffset = 0; - if (mapper) iopl.fMappedPage = mapBase + pageIndex; - } - else { - mdOffset += numBytes; - break; } } - } - _highestPage = highestPage; + _highestPage = highestPage; - if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; + if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; + } - return kIOReturnSuccess; +#if IOTRACKING + if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) + { + dataP = getDataP(_memoryEntries); + if (!dataP->fWireTracking.link.next) + { + IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag); + } + } +#endif /* IOTRACKING */ + + return (error); abortExit: { @@ -3022,112 +3311,127 @@ bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper dataP->fPreparationID = kIOPreparationIDUnprepared; dataP->fDiscontig = false; dataP->fCompletionError = false; + dataP->fMappedBaseValid = false; return (true); } IOReturn IOMemoryDescriptor::dmaMap( IOMapper * mapper, + IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, uint64_t length, - uint64_t * address, - ppnum_t * mapPages) + uint64_t * mapAddress, + uint64_t * mapLength) { - IOMDDMAWalkSegmentState walkState; - IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; - IOOptionBits mdOp; - IOReturn ret; - IOPhysicalLength segLen; - addr64_t phys, align, pageOffset; - ppnum_t base, pageIndex, pageCount; - uint64_t index; - uint32_t mapOptions = 0; + IOReturn err; + uint32_t mapOptions; + mapOptions = 0; + mapOptions |= kIODMAMapReadAccess; if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; - walkArgs->fMapped = false; - mdOp = kIOMDFirstSegment; - pageCount = 0; - for (index = 0; index < length; ) - { - if (index && (page_mask & (index + pageOffset))) break; - - walkArgs->fOffset = offset + index; - ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); - mdOp = kIOMDWalkSegments; - if (ret != kIOReturnSuccess) break; - phys = walkArgs->fIOVMAddr; - segLen = walkArgs->fLength; + err = mapper->iovmMapMemory(this, offset, length, mapOptions, + mapSpec, command, NULL, mapAddress, mapLength); - align = (phys & page_mask); - if (!index) pageOffset = align; - else if (align) break; - pageCount += atop_64(round_page_64(align + segLen)); - index += segLen; - } + if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); - if (index < length) return (kIOReturnVMError); + return (err); +} - base = mapper->iovmMapMemory(this, offset, pageCount, - mapOptions, NULL, mapSpec); +void IOMemoryDescriptor::dmaMapRecord( + IOMapper * mapper, + IODMACommand * command, + uint64_t mapLength) +{ + kern_allocation_name_t alloc; + int16_t prior; - if (!base) return (kIOReturnNoResources); + if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) + { + kern_allocation_update_size(mapper->fAllocName, mapLength); + } - mdOp = kIOMDFirstSegment; - for (pageIndex = 0, index = 0; index < length; ) + if (!command) return; + prior = OSAddAtomic16(1, &_dmaReferences); + if (!prior) { - walkArgs->fOffset = offset + index; - ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); - mdOp = kIOMDWalkSegments; - if (ret != kIOReturnSuccess) break; - phys = walkArgs->fIOVMAddr; - segLen = walkArgs->fLength; - - ppnum_t page = atop_64(phys); - ppnum_t count = atop_64(round_page_64(phys + segLen)) - page; - while (count--) + if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) { - mapper->iovmInsert(base, pageIndex, page); - page++; - pageIndex++; + _mapName = alloc; + mapLength = _length; + kern_allocation_update_subtotal(alloc, _kernelTag, mapLength); } - index += segLen; + else _mapName = NULL; } - if (pageIndex != pageCount) panic("pageIndex"); +} - *address = ptoa_64(base) + pageOffset; - if (mapPages) *mapPages = pageCount; +IOReturn IOMemoryDescriptor::dmaUnmap( + IOMapper * mapper, + IODMACommand * command, + uint64_t offset, + uint64_t mapAddress, + uint64_t mapLength) +{ + IOReturn ret; + kern_allocation_name_t alloc; + kern_allocation_name_t mapName; + int16_t prior; + + mapName = 0; + prior = 0; + if (command) + { + mapName = _mapName; + if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences); + else panic("_dmaReferences underflow"); + } - return (kIOReturnSuccess); + if (!mapLength) return (kIOReturnSuccess); + + ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength); + + if ((alloc = mapper->fAllocName)) + { + kern_allocation_update_size(alloc, -mapLength); + if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) + { + mapLength = _length; + kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength); + } + } + + return (ret); } IOReturn IOGeneralMemoryDescriptor::dmaMap( IOMapper * mapper, + IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, uint64_t length, - uint64_t * address, - ppnum_t * mapPages) + uint64_t * mapAddress, + uint64_t * mapLength) { IOReturn err = kIOReturnSuccess; ioGMDData * dataP; IOOptionBits type = _flags & kIOMemoryTypeMask; - *address = 0; + *mapAddress = 0; if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) || offset || (length != _length)) { - err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages); + err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) { const ioPLBlock * ioplList = getIOPLList(dataP); upl_page_info_t * pageList; uint32_t mapOptions = 0; - ppnum_t base; IODMAMapSpecification mapSpec; bzero(&mapSpec, sizeof(mapSpec)); @@ -3141,18 +3445,29 @@ IOReturn IOGeneralMemoryDescriptor::dmaMap( pageList = (upl_page_info_t *) ioplList->fPageInfo; mapOptions |= kIODMAMapPagingPath; } - else - pageList = getPageList(dataP); + else pageList = getPageList(dataP); - if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) + { + mapOptions |= kIODMAMapPageListFullyOccupied; + } + + assert(dataP->fDMAAccess); + mapOptions |= dataP->fDMAAccess; // Check for direct device non-paged memory if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; - base = mapper->iovmMapMemory( - this, offset, _pages, mapOptions, &pageList[0], &mapSpec); - *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK); - if (mapPages) *mapPages = _pages; + IODMAMapPageList dmaPageList = + { + .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), + .pageListCount = _pages, + .pageList = &pageList[0] + }; + err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, + command, &dmaPageList, mapAddress, mapLength); + + if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); } return (err); @@ -3170,33 +3485,34 @@ IOReturn IOGeneralMemoryDescriptor::dmaMap( IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { - IOReturn error = kIOReturnSuccess; + IOReturn error = kIOReturnSuccess; IOOptionBits type = _flags & kIOMemoryTypeMask; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) return kIOReturnSuccess; - if (_prepareLock) - IOLockLock(_prepareLock); + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); + + if (_prepareLock) IOLockLock(_prepareLock); if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { - error = wireVirtual(forDirection); + error = wireVirtual(forDirection); } if (kIOReturnSuccess == error) { - if (1 == ++_wireCount) - { - if (kIOMemoryClearEncrypt & _flags) - { - performOperation(kIOMemoryClearEncrypted, 0, _length); - } - } + if (1 == ++_wireCount) + { + if (kIOMemoryClearEncrypt & _flags) + { + performOperation(kIOMemoryClearEncrypted, 0, _length); + } + } } - if (_prepareLock) - IOLockUnlock(_prepareLock); + if (_prepareLock) IOLockUnlock(_prepareLock); return error; } @@ -3213,79 +3529,84 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; - ioGMDData * dataP; + ioGMDData * dataP; if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) return kIOReturnSuccess; - if (_prepareLock) - IOLockLock(_prepareLock); + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); - assert(_wireCount); + if (_prepareLock) IOLockLock(_prepareLock); + do + { + assert(_wireCount); + if (!_wireCount) break; + dataP = getDataP(_memoryEntries); + if (!dataP) break; - if ((kIODirectionCompleteWithError & forDirection) - && (dataP = getDataP(_memoryEntries))) - dataP->fCompletionError = true; + if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true; - if (_wireCount) - { if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) { performOperation(kIOMemorySetEncrypted, 0, _length); } - _wireCount--; - if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) - { - IOOptionBits type = _flags & kIOMemoryTypeMask; - dataP = getDataP(_memoryEntries); - ioPLBlock *ioplList = getIOPLList(dataP); - UInt ind, count = getNumIOPL(_memoryEntries, dataP); + _wireCount--; + if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) + { + ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, count = getNumIOPL(_memoryEntries, dataP); - if (_wireCount) - { - // kIODirectionCompleteWithDataValid & forDirection - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) - { - for (ind = 0; ind < count; ind++) - { - if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL); - } - } - } - else - { -#if IOMD_DEBUG_DMAACTIVE - if (__iomd_reservedA) panic("complete() while dma active"); -#endif /* IOMD_DEBUG_DMAACTIVE */ + if (_wireCount) + { + // kIODirectionCompleteWithDataValid & forDirection + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + vm_tag_t tag; + tag = getVMTag(kernel_map); + for (ind = 0; ind < count; ind++) + { + if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag); + } + } + } + else + { + if (_dmaReferences) panic("complete() while dma active"); - if (dataP->fMappedBase) { - dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages); - dataP->fMappedBase = 0; - } - // Only complete iopls that we created which are for TypeVirtual - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { - for (ind = 0; ind < count; ind++) - if (ioplList[ind].fIOPL) { - if (dataP->fCompletionError) - upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); - else - upl_commit(ioplList[ind].fIOPL, 0, 0); - upl_deallocate(ioplList[ind].fIOPL); - } - } else if (kIOMemoryTypeUPL == type) { - upl_set_referenced(ioplList[0].fIOPL, false); - } + if (dataP->fMappedBaseValid) { + dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBaseValid = dataP->fMappedBase = 0; + } +#if IOTRACKING + if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); +#endif /* IOTRACKING */ + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + for (ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) { + if (dataP->fCompletionError) + upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); + else + upl_commit(ioplList[ind].fIOPL, 0, 0); + upl_deallocate(ioplList[ind].fIOPL); + } + } else if (kIOMemoryTypeUPL == type) { + upl_set_referenced(ioplList[0].fIOPL, false); + } - (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() - dataP->fPreparationID = kIOPreparationIDUnprepared; - } - } + dataP->fPreparationID = kIOPreparationIDUnprepared; + _flags &= ~kIOMemoryPreparedReadOnly; + } + } } + while (false); - if (_prepareLock) - IOLockUnlock(_prepareLock); + if (_prepareLock) IOLockUnlock(_prepareLock); return kIOReturnSuccess; } @@ -3296,7 +3617,6 @@ IOReturn IOGeneralMemoryDescriptor::doMap( IOOptionBits options, IOByteCount __offset, IOByteCount __length ) - { #ifndef __LP64__ if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); @@ -3317,6 +3637,9 @@ IOReturn IOGeneralMemoryDescriptor::doMap( if ((offset >= _length) || ((offset + length) > _length)) return( kIOReturnBadArgument ); + assert (!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) return (0); + if (vec.v) getAddrLenForInd(range0Addr, range0Len, type, vec, 0); @@ -3325,6 +3648,7 @@ IOReturn IOGeneralMemoryDescriptor::doMap( && (mapping->fAddressTask == _task) && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere) + && (!(kIOMapUnique & options)) && (1 == _rangesCount) && (0 == offset) && range0Addr @@ -3361,10 +3685,10 @@ IOReturn IOGeneralMemoryDescriptor::doMap( { do { - upl_t redirUPL2; - vm_size_t size; - int flags; - unsigned int lock_count; + upl_t redirUPL2; + upl_size_t size; + upl_control_flags_t flags; + unsigned int lock_count; if (!_memRef || (1 != _memRef->count)) { @@ -3378,7 +3702,7 @@ IOReturn IOGeneralMemoryDescriptor::doMap( if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, NULL, NULL, - &flags)) + &flags, getVMTag(kernel_map))) redirUPL2 = NULL; for (lock_count = 0; @@ -3419,15 +3743,19 @@ IOReturn IOGeneralMemoryDescriptor::doMap( // upl_transpose> // else { - err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); - + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); +#if IOTRACKING + if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) + { + // only dram maps in the default on developement case + IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + } +#endif /* IOTRACKING */ if ((err == KERN_SUCCESS) && pager) { err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); - if (err != KERN_SUCCESS) - { - doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); - } + + if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) { mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); @@ -3438,6 +3766,25 @@ IOReturn IOGeneralMemoryDescriptor::doMap( return (err); } +#if IOTRACKING +IOReturn +IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, + mach_vm_address_t * address, mach_vm_size_t * size) +{ +#define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) + + IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); + + if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady); + + *task = map->fAddressTask; + *address = map->fAddress; + *size = map->fLength; + + return (kIOReturnSuccess); +} +#endif /* IOTRACKING */ + IOReturn IOGeneralMemoryDescriptor::doUnmap( vm_map_t addressMap, IOVirtualAddress __address, @@ -3561,8 +3908,17 @@ IOReturn IOMemoryDescriptor::populateDevicePager( mach_vm_size_t page; mach_vm_size_t pageOffset; mach_vm_size_t pagerOffset; - IOPhysicalLength segLen; + IOPhysicalLength segLen, chunk; addr64_t physAddr; + IOOptionBits type; + + type = _flags & kIOMemoryTypeMask; + + if (reserved->dp.pagerContig) + { + sourceOffset = 0; + pagerOffset = 0; + } physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); assert( physAddr ); @@ -3583,26 +3939,24 @@ IOReturn IOMemoryDescriptor::populateDevicePager( if (kIOReturnSuccess != err) break; - if (reserved && reserved->dp.pagerContig) +#if DEBUG || DEVELOPMENT + if ((kIOMemoryTypeUPL != type) + && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) { - IOPhysicalLength allLen; - addr64_t allPhys; + OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); + } +#endif /* DEBUG || DEVELOPMENT */ + + chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); + for (page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += chunk) + { + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)(atop_64(physAddr + page)), chunk); + pagerOffset += chunk; + } - allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone ); - assert( allPhys ); - err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) ); - } - else - { - for( page = 0; - (page < segLen) && (KERN_SUCCESS == err); - page += page_size) - { - err = device_pager_populate_object(pager, pagerOffset, - (ppnum_t)(atop_64(physAddr + page)), page_size); - pagerOffset += page_size; - } - } assert (KERN_SUCCESS == err); if (err) break; @@ -3611,11 +3965,14 @@ IOReturn IOMemoryDescriptor::populateDevicePager( // faulting in later can't take place from interrupt level. if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) { - vm_fault(addressMap, - (vm_map_offset_t)trunc_page_64(address), - VM_PROT_READ|VM_PROT_WRITE, - FALSE, THREAD_UNINT, NULL, - (vm_map_offset_t)0); + err = vm_fault(addressMap, + (vm_map_offset_t)trunc_page_64(address), + options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, + (vm_map_offset_t)0); + + if (KERN_SUCCESS != err) break; } sourceOffset += segLen - pageOffset; @@ -3637,31 +3994,32 @@ IOReturn IOMemoryDescriptor::doUnmap( IOByteCount __length ) { IOReturn err; + IOMemoryMap * mapping; mach_vm_address_t address; mach_vm_size_t length; - if (__length) - { - address = __address; - length = __length; - } - else - { - addressMap = ((IOMemoryMap *) __address)->fAddressMap; - address = ((IOMemoryMap *) __address)->fAddress; - length = ((IOMemoryMap *) __address)->fLength; - } + if (__length) panic("doUnmap"); - if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) - addressMap = IOPageableMapForAddress( address ); + mapping = (IOMemoryMap *) __address; + addressMap = mapping->fAddressMap; + address = mapping->fAddress; + length = mapping->fLength; + if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS; + else + { + if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + addressMap = IOPageableMapForAddress( address ); #if DEBUG - if( kIOLogMapping & gIOKitDebug) - IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", - addressMap, address, length ); + if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", + addressMap, address, length ); #endif + err = mach_vm_deallocate( addressMap, address, length ); + } - err = mach_vm_deallocate( addressMap, address, length ); +#if IOTRACKING + IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); +#endif /* IOTRACKING */ return (err); } @@ -3774,7 +4132,7 @@ IOReturn IOMemoryMap::unmap( void ) LOCK; if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory - && (0 == (fOptions & kIOMapStatic))) { + && (0 == (kIOMapStatic & fOptions))) { err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); @@ -3797,8 +4155,11 @@ IOReturn IOMemoryMap::unmap( void ) void IOMemoryMap::taskDied( void ) { LOCK; - if (fUserClientUnmap) - unmap(); + if (fUserClientUnmap) unmap(); +#if IOTRACKING + else IOTrackingRemoveUser(gIOMapTracking, &fTracking); +#endif /* IOTRACKING */ + if( fAddressMap) { vm_map_deallocate(fAddressMap); fAddressMap = 0; @@ -3958,10 +4319,12 @@ IOReturn IOMemoryMap::wireRange( IOReturn kr; mach_vm_address_t start = trunc_page_64(fAddress + offset); mach_vm_address_t end = round_page_64(fAddress + offset + length); - - if (kIODirectionOutIn & options) + vm_prot_t prot; + + prot = (kIODirectionOutIn & options); + if (prot) { - kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE); + kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE); } else { @@ -4009,9 +4372,13 @@ void IOMemoryDescriptor::initialize( void ) void IOMemoryDescriptor::free( void ) { - if( _mappings) - _mappings->release(); + if( _mappings) _mappings->release(); + if (reserved) + { + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } super::free(); } @@ -4117,12 +4484,12 @@ IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) { - vm_size_t size = round_page(fLength); - int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; + upl_size_t size = round_page(fLength); + upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, NULL, NULL, - &flags)) + &flags, fMemory->getVMTag(kernel_map))) fRedirUPL = 0; if (physMem) @@ -4175,9 +4542,8 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit"); #endif /* !__LP64__ */ - IOMemoryDescriptor * mapDesc = 0; - IOMemoryMap * result = 0; - OSIterator * iter; + IOMemoryDescriptor * mapDesc = 0; + __block IOMemoryMap * result = 0; IOMemoryMap * mapping = (IOMemoryMap *) __address; mach_vm_size_t offset = mapping->fOffset + __offset; @@ -4222,20 +4588,17 @@ IOMemoryMap * IOMemoryDescriptor::makeMapping( else { // look for a compatible existing mapping - if( (iter = OSCollectionIterator::withCollection(_mappings))) + if (_mappings) _mappings->iterateObjects(^(OSObject * object) { - IOMemoryMap * lookMapping; - while ((lookMapping = (IOMemoryMap *) iter->getNextObject())) + IOMemoryMap * lookMapping = (IOMemoryMap *) object; + if ((result = lookMapping->copyCompatible(mapping))) { - if ((result = lookMapping->copyCompatible(mapping))) - { - addMapping(result); - result->setMemoryDescriptor(this, offset); - break; - } + addMapping(result); + result->setMemoryDescriptor(this, offset); + return (true); } - iter->release(); - } + return (false); + }); if (result || (options & kIOMapReference)) { if (result != mapping) @@ -4355,16 +4718,17 @@ void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { - OSSymbol const *keys[2]; - OSObject *values[2]; + OSSymbol const *keys[2] = {0}; + OSObject *values[2] = {0}; OSArray * array; + vm_size_t vcopy_size; struct SerData { user_addr_t address; user_size_t length; - } *vcopy; + } *vcopy = NULL; unsigned int index, nRanges; - bool result; + bool result = false; IOOptionBits type = _flags & kIOMemoryTypeMask; @@ -4374,17 +4738,19 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const if (!array) return (false); nRanges = _rangesCount; - vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); - if (vcopy == 0) return false; + if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) { + result = false; + goto bail; + } + vcopy = (SerData *) IOMalloc(vcopy_size); + if (vcopy == 0) { + result = false; + goto bail; + } keys[0] = OSSymbol::withCString("address"); keys[1] = OSSymbol::withCString("length"); - result = false; - values[0] = values[1] = 0; - - // From this point on we can go to bail. - // Copy the volatile data so we don't have to allocate memory // while the lock is held. LOCK; @@ -4444,151 +4810,13 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const if (keys[1]) keys[1]->release(); if (vcopy) - IOFree(vcopy, sizeof(SerData) * nRanges); + IOFree(vcopy, vcopy_size); return result; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if DEVELOPMENT || DEBUG - -extern "C" void IOMemoryDescriptorTest(int x) -{ - IOGeneralMemoryDescriptor * md; - - vm_offset_t data[2]; - vm_size_t bsize = 16*1024*1024; - - vm_size_t srcsize, srcoffset, mapoffset, size; - - kern_return_t kr; - - kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE); - vm_inherit(kernel_map, data[0] + 1*4096, 4096, VM_INHERIT_NONE); - vm_inherit(kernel_map, data[0] + 16*4096, 4096, VM_INHERIT_NONE); - - kprintf("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]); - - uint32_t idx, offidx; - for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) - { - ((uint32_t*)data[0])[idx] = idx; - } - - for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 1) + 0x40c)) - { - for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 1) + 0x3fc)) - { - IOAddressRange ranges[3]; - uint32_t rangeCount = 1; - - bzero(&ranges[0], sizeof(ranges)); - ranges[0].address = data[0] + srcoffset; - ranges[0].length = srcsize; - - if (srcsize > 5*page_size) - { - ranges[0].length = 7634; - ranges[1].length = 9870; - ranges[2].length = srcsize - ranges[0].length - ranges[1].length; - ranges[1].address = ranges[0].address + ranges[0].length; - ranges[2].address = ranges[1].address + ranges[1].length; - rangeCount = 3; - } - else if ((srcsize > 2*page_size) && !(page_mask & srcoffset)) - { - ranges[0].length = 4096; - ranges[1].length = 4096; - ranges[2].length = srcsize - ranges[0].length - ranges[1].length; - ranges[0].address = data[0] + srcoffset + 4096; - ranges[1].address = data[0] + srcoffset; - ranges[2].address = ranges[0].address + ranges[0].length; - rangeCount = 3; - } - - md = OSDynamicCast(IOGeneralMemoryDescriptor, - IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task)); - assert(md); - - kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n", - (long) srcsize, (long) srcoffset, - (long long) ranges[0].address - data[0], (long long) ranges[0].length, - (long long) ranges[1].address - data[0], (long long) ranges[1].length, - (long long) ranges[2].address - data[0], (long long) ranges[2].length); - - if (kIOReturnSuccess == kr) - { - for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) - { - for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 1) + 0x20)) - { - IOMemoryMap * map; - mach_vm_address_t addr = 0; - uint32_t data; - - kprintf("createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size); - if (map) addr = map->getAddress(); - else kr = kIOReturnError; - - kprintf(">mapRef 0x%x %llx\n", kr, addr); - - if (kIOReturnSuccess != kr) break; - kr = md->prepare(); - if (kIOReturnSuccess != kr) - { - kprintf("prepare() fail 0x%x\n", kr); - break; - } - for (idx = 0; idx < size; idx += sizeof(uint32_t)) - { - offidx = (idx + mapoffset + srcoffset); - if ((srcsize <= 5*page_size) && (srcsize > 2*page_size) && !(page_mask & srcoffset)) - { - if (offidx < 8192) offidx ^= 0x1000; - } - offidx /= sizeof(uint32_t); - - if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)]) - { - kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset); - kr = kIOReturnBadMedia; - } - else - { - if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0; - if (offidx != data) - { - kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset); - kr = kIOReturnBadMedia; - } - } - } - md->complete(); - map->release(); - kprintf("unmapRef %llx\n", addr); - } - if (kIOReturnSuccess != kr) break; - } - } - if (kIOReturnSuccess != kr) break; - } - if (kIOReturnSuccess != kr) break; - } - - if (kIOReturnSuccess != kr) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n", - (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset); - - vm_deallocate(kernel_map, data[0], bsize); -// vm_deallocate(kernel_map, data[1], size); -} - -#endif /* DEVELOPMENT || DEBUG */ - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); #ifdef __LP64__ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);