X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..813fb2f63a553c957e917ede5f119b021d6ce391:/iokit/Kernel/IOMemoryDescriptor.cpp diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index 12ace713b..fa735f3e4 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -1,16 +1,19 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,15 +23,10 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -// 45678901234567890123456789012345678901234567890123456789012345678901234567890 + + #include #include @@ -36,68 +34,51 @@ #include #include #include +#include +#include + +#include +#include #include +#include + +#include "IOKitKernelInternal.h" #include #include #include #include #include -#include +#include + +#include __BEGIN_DECLS #include +#include #include #include -#ifndef i386 -struct phys_entry *pmap_find_physentry(ppnum_t pa); -#endif -void ipc_port_release_send(ipc_port_t port); - -/* Copy between a physical page and a virtual address in the given vm_map */ -kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); - -memory_object_t -device_pager_setup( - memory_object_t pager, - int device_handle, - vm_size_t size, - int flags); -void -device_pager_deallocate( - memory_object_t); -kern_return_t -device_pager_populate_object( - memory_object_t pager, - vm_object_offset_t offset, - ppnum_t phys_addr, - vm_size_t size); +#include +#include +#include +#include -/* - * Page fault handling based on vm_map (or entries therein) - */ -extern kern_return_t vm_fault( - vm_map_t map, - vm_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - int interruptible, - pmap_t caller_pmap, - vm_offset_t caller_pmap_addr); +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); +extern void ipc_port_release_send(ipc_port_t port); +// osfmk/device/iokit_rpc.c +unsigned int IODefaultCacheBits(addr64_t pa); unsigned int IOTranslateCacheBits(struct phys_entry *pp); -vm_map_t IOPageableMapForAddress( vm_address_t address ); +__END_DECLS -typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); +#define kIOMapperWaitSystem ((IOMapper *) 1) -kern_return_t IOIteratePageableMaps(vm_size_t size, - IOIteratePageableMapsCallback callback, void * ref); -__END_DECLS +static IOMapper * gIOSystemMapper = NULL; -static IOMapper * gIOSystemMapper; +ppnum_t gIOLastPage; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -117,35 +98,103 @@ static IORecursiveLock * gIOMemoryLock; #define WAKEUP \ IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) +#if 0 +#define DEBG(fmt, args...) { kprintf(fmt, ## args); } +#else +#define DEBG(fmt, args...) {} +#endif + +#define IOMD_DEBUG_DMAACTIVE 1 + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) +// Some data structures and accessor macros used by the initWithOptions +// Function + +enum ioPLBlockFlags { + kIOPLOnDevice = 0x00000001, + kIOPLExternUPL = 0x00000002, +}; + +struct IOMDPersistentInitData +{ + const IOGeneralMemoryDescriptor * fMD; + IOMemoryReference * fMemRef; +}; + +struct ioPLBlock { + upl_t fIOPL; + vm_address_t fPageInfo; // Pointer to page list or index into it + uint32_t fIOMDOffset; // The offset of this iopl in descriptor + ppnum_t fMappedPage; // Page number of first page in this iopl + unsigned int fPageOffset; // Offset within first page of iopl + unsigned int fFlags; // Flags +}; + +enum { kMaxWireTags = 6 }; + +struct ioGMDData +{ + IOMapper * fMapper; + uint64_t fDMAMapAlignment; + uint64_t fMappedBase; + uint64_t fMappedLength; + uint64_t fPreparationID; +#if IOTRACKING + IOTracking fWireTracking; + struct vm_tag_set fWireTags; + struct vm_tag_set_entry fWireTagsEntries[kMaxWireTags]; +#endif /* IOTRACKING */ + unsigned int fPageCnt; + uint8_t fDMAMapNumAddressBits; + vm_tag_t fAllocTag; + unsigned char fDiscontig:1; + unsigned char fCompletionError:1; + unsigned char _resv:6; + + /* variable length arrays */ + upl_page_info_t fPageList[1] +#if __LP64__ + // align fPageList as for ioPLBlock + __attribute__((aligned(sizeof(upl_t)))) +#endif + ; + ioPLBlock fBlocks[1]; +}; + +#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) +#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) +#define getNumIOPL(osd, d) \ + (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) +#define getPageList(d) (&(d->fPageList[0])) +#define computeDataSize(p, u) \ + (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#define next_page(a) ( trunc_page(a) + PAGE_SIZE ) extern "C" { kern_return_t device_data_action( - int device_handle, + uintptr_t device_handle, ipc_port_t device_pager, vm_prot_t protection, vm_object_offset_t offset, vm_size_t size) { - struct ExpansionData { - void * devicePager; - unsigned int pagerContig:1; - unsigned int unused:31; - IOMemoryDescriptor * memory; - }; kern_return_t kr; - ExpansionData * ref = (ExpansionData *) device_handle; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; IOMemoryDescriptor * memDesc; LOCK; - memDesc = ref->memory; + memDesc = ref->dp.memory; if( memDesc) - kr = memDesc->handleFault( device_pager, 0, 0, - offset, size, kIOMapDefaultCache /*?*/); + { + memDesc->retain(); + kr = memDesc->handleFault(device_pager, offset, size); + memDesc->release(); + } else kr = KERN_ABORTED; UNLOCK; @@ -154,919 +203,2677 @@ kern_return_t device_data_action( } kern_return_t device_close( - int device_handle) + uintptr_t device_handle) { - struct ExpansionData { - void * devicePager; - unsigned int pagerContig:1; - unsigned int unused:31; - IOMemoryDescriptor * memory; - }; - ExpansionData * ref = (ExpansionData *) device_handle; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; - IODelete( ref, ExpansionData, 1 ); + IODelete( ref, IOMemoryDescriptorReserved, 1 ); return( kIOReturnSuccess ); } - -} +}; // end extern "C" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * withAddress: - * - * Create a new IOMemoryDescriptor. The buffer is a virtual address - * relative to the specified task. If no task is supplied, the kernel - * task is implied. - */ -IOMemoryDescriptor * -IOMemoryDescriptor::withAddress(void * address, - IOByteCount length, - IODirection direction) +// Note this inline function uses C++ reference arguments to return values +// This means that pointers are not passed and NULLs don't have to be +// checked for as a NULL reference is illegal. +static inline void +getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables + UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) { - return IOMemoryDescriptor:: - withAddress((vm_address_t) address, length, direction, kernel_task); + assert(kIOMemoryTypeUIO == type + || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type + || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); + if (kIOMemoryTypeUIO == type) { + user_size_t us; + user_addr_t ad; + uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; + } +#ifndef __LP64__ + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { + IOAddressRange cur = r.v64[ind]; + addr = cur.address; + len = cur.length; + } +#endif /* !__LP64__ */ + else { + IOVirtualRange cur = r.v[ind]; + addr = cur.address; + len = cur.length; + } } -IOMemoryDescriptor * -IOMemoryDescriptor::withAddress(vm_address_t address, - IOByteCount length, - IODirection direction, - task_t task) +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IOReturn +purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) + IOReturn err = kIOReturnSuccess; + + *control = VM_PURGABLE_SET_STATE; + + enum { kIOMemoryPurgeableControlMask = 15 }; + + switch (kIOMemoryPurgeableControlMask & newState) { - if (that->initWithAddress(address, length, direction, task)) - return that; + case kIOMemoryPurgeableKeepCurrent: + *control = VM_PURGABLE_GET_STATE; + break; - that->release(); + case kIOMemoryPurgeableNonVolatile: + *state = VM_PURGABLE_NONVOLATILE; + break; + case kIOMemoryPurgeableVolatile: + *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); + break; + case kIOMemoryPurgeableEmpty: + *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); + break; + default: + err = kIOReturnBadArgument; + break; } - return 0; + return (err); } -IOMemoryDescriptor * -IOMemoryDescriptor::withPhysicalAddress( - IOPhysicalAddress address, - IOByteCount length, - IODirection direction ) +static IOReturn +purgeableStateBits(int * state) { - IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; - if (self - && !self->initWithPhysicalAddress(address, length, direction)) { - self->release(); - return 0; - } + IOReturn err = kIOReturnSuccess; - return self; + switch (VM_PURGABLE_STATE_MASK & *state) + { + case VM_PURGABLE_NONVOLATILE: + *state = kIOMemoryPurgeableNonVolatile; + break; + case VM_PURGABLE_VOLATILE: + *state = kIOMemoryPurgeableVolatile; + break; + case VM_PURGABLE_EMPTY: + *state = kIOMemoryPurgeableEmpty; + break; + default: + *state = kIOMemoryPurgeableNonVolatile; + err = kIOReturnNotReady; + break; + } + return (err); } -IOMemoryDescriptor * -IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection direction, - task_t task, - bool asReference) + +static vm_prot_t +vmProtForCacheMode(IOOptionBits cacheMode) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) + vm_prot_t prot = 0; + switch (cacheMode) { - if (that->initWithRanges(ranges, withCount, direction, task, asReference)) - return that; + case kIOInhibitCache: + SET_MAP_MEM(MAP_MEM_IO, prot); + break; - that->release(); - } - return 0; -} + case kIOWriteThruCache: + SET_MAP_MEM(MAP_MEM_WTHRU, prot); + break; + case kIOWriteCombineCache: + SET_MAP_MEM(MAP_MEM_WCOMB, prot); + break; -/* - * withRanges: - * - * Create a new IOMemoryDescriptor. The buffer is made up of several - * virtual address ranges, from a given task. - * - * Passing the ranges as a reference will avoid an extra allocation. - */ -IOMemoryDescriptor * -IOMemoryDescriptor::withOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits opts, - IOMapper * mapper) -{ - IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; + case kIOCopybackCache: + SET_MAP_MEM(MAP_MEM_COPYBACK, prot); + break; - if (self - && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) - { - self->release(); - return 0; - } + case kIOCopybackInnerCache: + SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); + break; - return self; -} + case kIODefaultCache: + default: + SET_MAP_MEM(MAP_MEM_NOOP, prot); + break; + } -// Can't leave abstract but this should never be used directly, -bool IOMemoryDescriptor::initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper) -{ - // @@@ gvdl: Should I panic? - panic("IOMD::initWithOptions called\n"); - return 0; + return (prot); } -IOMemoryDescriptor * -IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, - UInt32 withCount, - IODirection direction, - bool asReference) +static unsigned int +pagerFlagsForCacheMode(IOOptionBits cacheMode) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) + unsigned int pagerFlags = 0; + switch (cacheMode) { - if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) - return that; + case kIOInhibitCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; - that->release(); - } - return 0; -} + case kIOWriteThruCache: + pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; -IOMemoryDescriptor * -IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, - IOByteCount offset, - IOByteCount length, - IODirection direction) -{ - IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; + case kIOWriteCombineCache: + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; + break; - if (self && !self->initSubRange(of, offset, length, direction)) { - self->release(); - self = 0; + case kIOCopybackCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIOCopybackInnerCache: + pagerFlags = DEVICE_PAGER_COHERENT; + break; + + case kIODefaultCache: + default: + pagerFlags = -1U; + break; } - return self; + return (pagerFlags); } -/* - * initWithAddress: - * - * Initialize an IOMemoryDescriptor. The buffer is a virtual address - * relative to the specified task. If no task is supplied, the kernel - * task is implied. - * - * An IOMemoryDescriptor can be re-used by calling initWithAddress or - * initWithRanges again on an existing instance -- note this behavior - * is not commonly supported in other I/O Kit classes, although it is - * supported here. - */ -bool -IOGeneralMemoryDescriptor::initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct IOMemoryEntry { - _singleRange.v.address = (vm_address_t) address; - _singleRange.v.length = withLength; + ipc_port_t entry; + int64_t offset; + uint64_t size; +}; - return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); -} +struct IOMemoryReference +{ + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + struct IOMemoryReference * mapRef; + IOMemoryEntry entries[0]; +}; -bool -IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) +enum { - _singleRange.v.address = address; - _singleRange.v.length = withLength; + kIOMemoryReferenceReuse = 0x00000001, + kIOMemoryReferenceWrite = 0x00000002, + kIOMemoryReferenceCOW = 0x00000004, +}; - return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); -} +SInt32 gIOMemoryReferenceCount; -bool -IOGeneralMemoryDescriptor::initWithPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) +IOMemoryReference * +IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc) { - _singleRange.p.address = address; - _singleRange.p.length = withLength; - - return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); + IOMemoryReference * ref; + size_t newSize, oldSize, copySize; + + newSize = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + capacity * sizeof(ref->entries[0])); + ref = (typeof(ref)) IOMalloc(newSize); + if (realloc) + { + oldSize = (sizeof(IOMemoryReference) + - sizeof(realloc->entries) + + realloc->capacity * sizeof(realloc->entries[0])); + copySize = oldSize; + if (copySize > newSize) copySize = newSize; + if (ref) bcopy(realloc, ref, copySize); + IOFree(realloc, oldSize); + } + else if (ref) + { + bzero(ref, sizeof(*ref)); + ref->refCount = 1; + OSIncrementAtomic(&gIOMemoryReferenceCount); + } + if (!ref) return (0); + ref->capacity = capacity; + return (ref); } -bool -IOGeneralMemoryDescriptor::initWithPhysicalRanges( - IOPhysicalRange * ranges, - UInt32 count, - IODirection direction, - bool reference) +void +IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) { - IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; + IOMemoryEntry * entries; + size_t size; - if (reference) - mdOpts |= kIOMemoryAsReference; + if (ref->mapRef) + { + memoryReferenceFree(ref->mapRef); + ref->mapRef = 0; + } - return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + ipc_port_release_send(entries->entry); + } + size = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + ref->capacity * sizeof(ref->entries[0])); + IOFree(ref, size); + + OSDecrementAtomic(&gIOMemoryReferenceCount); } -bool -IOGeneralMemoryDescriptor::initWithRanges( - IOVirtualRange * ranges, - UInt32 count, - IODirection direction, - task_t task, - bool reference) +void +IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref) { - IOOptionBits mdOpts = direction; + if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref); +} - if (reference) - mdOpts |= kIOMemoryAsReference; - if (task) { - mdOpts |= kIOMemoryTypeVirtual; - if (task == kernel_task) - mdOpts |= kIOMemoryAutoPrepare; +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceCreate( + IOOptionBits options, + IOMemoryReference ** reference) +{ + enum { kCapacity = 4, kCapacityInc = 4 }; + + kern_return_t err; + IOMemoryReference * ref; + IOMemoryEntry * entries; + IOMemoryEntry * cloneEntries; + vm_map_t map; + ipc_port_t entry, cloneEntry; + vm_prot_t prot; + memory_object_size_t actualSize; + uint32_t rangeIdx; + uint32_t count; + mach_vm_address_t entryAddr, endAddr, entrySize; + mach_vm_size_t srcAddr, srcLen; + mach_vm_size_t nextAddr, nextLen; + mach_vm_size_t offset, remain; + IOByteCount physLen; + IOOptionBits type = (_flags & kIOMemoryTypeMask); + IOOptionBits cacheMode; + unsigned int pagerFlags; + vm_tag_t tag; + + ref = memoryReferenceAlloc(kCapacity, NULL); + if (!ref) return (kIOReturnNoMemory); + + tag = getVMTag(kernel_map); + entries = &ref->entries[0]; + count = 0; + err = KERN_SUCCESS; + + offset = 0; + rangeIdx = 0; + if (_task) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); } else - mdOpts |= kIOMemoryTypePhysical; + { + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = physLen; + + // default cache mode for physical + if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) + { + IOOptionBits mode; + pagerFlags = IODefaultCacheBits(nextAddr); + if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) + { + if (DEVICE_PAGER_GUARDED & pagerFlags) + mode = kIOInhibitCache; + else + mode = kIOWriteCombineCache; + } + else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) + mode = kIOWriteThruCache; + else + mode = kIOCopybackCache; + _flags |= (mode << kIOMemoryBufferCacheShift); + } + } + + // cache mode & vm_prot + prot = VM_PROT_READ; + cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + prot |= vmProtForCacheMode(cacheMode); + // VM system requires write access to change cache mode + if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; + if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; + + if ((kIOMemoryReferenceReuse & options) && _memRef) + { + cloneEntries = &_memRef->entries[0]; + prot |= MAP_MEM_NAMED_REUSE; + } + + if (_task) + { + // virtual ranges + + if (kIOMemoryBufferPageable & _flags) + { + // IOBufferMemoryDescriptor alloc - set flags for entry + object create + prot |= MAP_MEM_NAMED_CREATE; + if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE; + if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED; + + prot |= VM_PROT_WRITE; + map = NULL; + } + else map = get_task_map(_task); + + remain = _length; + while (remain) + { + srcAddr = nextAddr; + srcLen = nextLen; + nextAddr = 0; + nextLen = 0; + // coalesce addr range + for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if ((srcAddr + srcLen) != nextAddr) break; + srcLen += nextLen; + } + entryAddr = trunc_page_64(srcAddr); + endAddr = round_page_64(srcAddr + srcLen); + do + { + entrySize = (endAddr - entryAddr); + if (!entrySize) break; + actualSize = entrySize; + + cloneEntry = MACH_PORT_NULL; + if (MAP_MEM_NAMED_REUSE & prot) + { + if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + + err = mach_make_memory_entry_64(map, + &actualSize, entryAddr, prot, &entry, cloneEntry); + + if (KERN_SUCCESS != err) break; + if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize"); + + if (count >= ref->capacity) + { + ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); + entries = &ref->entries[count]; + } + entries->entry = entry; + entries->size = actualSize; + entries->offset = offset + (entryAddr - srcAddr); + entryAddr += actualSize; + if (MAP_MEM_NAMED_REUSE & prot) + { + if ((cloneEntries->entry == entries->entry) + && (cloneEntries->size == entries->size) + && (cloneEntries->offset == entries->offset)) cloneEntries++; + else prot &= ~MAP_MEM_NAMED_REUSE; + } + entries++; + count++; + } + while (true); + offset += srcLen; + remain -= srcLen; + } + } + else + { + // _task == 0, physical or kIOMemoryTypeUPL + memory_object_t pager; + vm_size_t size = ptoa_32(_pages); + + if (!getKernelReserved()) panic("getKernelReserved"); + + reserved->dp.pagerContig = (1 == _rangesCount); + reserved->dp.memory = this; + + pagerFlags = pagerFlagsForCacheMode(cacheMode); + if (-1U == pagerFlags) panic("phys is kIODefaultCache"); + if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS; - // @@@ gvdl: Need to remove this - // Auto-prepare if this is a kernel memory descriptor as very few - // clients bother to prepare() kernel memory. - // But it has been enforced so what are you going to do? + pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, + size, pagerFlags); + assert (pager); + if (!pager) err = kIOReturnVMError; + else + { + srcAddr = nextAddr; + entryAddr = trunc_page_64(srcAddr); + err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, + size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); + assert (KERN_SUCCESS == err); + if (KERN_SUCCESS != err) device_pager_deallocate(pager); + else + { + reserved->dp.devicePager = pager; + entries->entry = entry; + entries->size = size; + entries->offset = offset + (entryAddr - srcAddr); + entries++; + count++; + } + } + } - return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); + ref->count = count; + ref->prot = prot; + + if (_task && (KERN_SUCCESS == err) + && (kIOMemoryMapCopyOnWrite & _flags) + && !(kIOMemoryReferenceCOW & options)) + { + err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + } + + if (KERN_SUCCESS == err) + { + if (MAP_MEM_NAMED_REUSE & prot) + { + memoryReferenceFree(ref); + OSIncrementAtomic(&_memRef->refCount); + ref = _memRef; + } + } + else + { + memoryReferenceFree(ref); + ref = NULL; + } + + *reference = ref; + + return (err); +} + +kern_return_t +IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) +{ + IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; + IOReturn err; + vm_map_offset_t addr; + + addr = ref->mapped; + + err = vm_map_enter_mem_object(map, &addr, ref->size, + (vm_map_offset_t) 0, + (((ref->options & kIOMapAnywhere) + ? VM_FLAGS_ANYWHERE + : VM_FLAGS_FIXED) + | VM_MAKE_TAG(ref->tag)), + IPC_PORT_NULL, + (memory_object_offset_t) 0, + false, /* copy */ + ref->prot, + ref->prot, + VM_INHERIT_NONE); + if (KERN_SUCCESS == err) + { + ref->mapped = (mach_vm_address_t) addr; + ref->map = map; + } + + return( err ); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceMap( + IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr) +{ + IOReturn err; + int64_t offset = inoffset; + uint32_t rangeIdx, entryIdx; + vm_map_offset_t addr, mapAddr; + vm_map_offset_t pageOffset, entryOffset, remain, chunk; + + mach_vm_address_t nextAddr; + mach_vm_size_t nextLen; + IOByteCount physLen; + IOMemoryEntry * entry; + vm_prot_t prot, memEntryCacheMode; + IOOptionBits type; + IOOptionBits cacheMode; + vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; + + if (ref->mapRef) + { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return (err); + } + + type = _flags & kIOMemoryTypeMask; + + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; + prot &= ref->prot; + + cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); + if (kIODefaultCache != cacheMode) + { + // VM system requires write access to update named entry cache mode + memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); + } + + tag = getVMTag(map); + + if (_task) + { + // Find first range for offset + if (!_rangesCount) return (kIOReturnBadArgument); + for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (remain < nextLen) break; + remain -= nextLen; + } + } + else + { + rangeIdx = 0; + remain = 0; + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = size; + } + + assert(remain < nextLen); + if (remain >= nextLen) return (kIOReturnBadArgument); + + nextAddr += remain; + nextLen -= remain; + pageOffset = (page_mask & nextAddr); + addr = 0; + didAlloc = false; + + if (!(options & kIOMapAnywhere)) + { + addr = *inaddr; + if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned); + addr -= pageOffset; + } + + // find first entry for offset + for (entryIdx = 0; + (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); + entryIdx++) {} + entryIdx--; + entry = &ref->entries[entryIdx]; + + // allocate VM + size = round_page_64(size + pageOffset); + if (kIOMapOverwrite & options) + { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + { + map = IOPageableMapForAddress(addr); + } + err = KERN_SUCCESS; + } + else + { + IOMemoryDescriptorMapAllocRef ref; + ref.map = map; + ref.tag = tag; + ref.options = options; + ref.size = size; + ref.prot = prot; + if (options & kIOMapAnywhere) + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + else + ref.mapped = addr; + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + else + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + if (KERN_SUCCESS == err) + { + addr = ref.mapped; + map = ref.map; + didAlloc = true; + } + } + + /* + * Prefaulting is only possible if we wired the memory earlier. Check the + * memory type, and the underlying data. + */ + if (options & kIOMapPrefault) + { + /* + * The memory must have been wired by calling ::prepare(), otherwise + * we don't have the UPL. Without UPLs, pages cannot be pre-faulted + */ + assert(map != kernel_map); + assert(_wireCount != 0); + assert(_memoryEntries != NULL); + if ((map == kernel_map) || + (_wireCount == 0) || + (_memoryEntries == NULL)) + { + return kIOReturnBadArgument; + } + + // Get the page list. + ioGMDData* dataP = getDataP(_memoryEntries); + ioPLBlock const* ioplList = getIOPLList(dataP); + pageList = getPageList(dataP); + + // Get the number of IOPLs. + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + + /* + * Scan through the IOPL Info Blocks, looking for the first block containing + * the offset. The research will go past it, so we'll need to go back to the + * right range at the end. + */ + UInt ioplIndex = 0; + while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) + ioplIndex++; + ioplIndex--; + + // Retrieve the IOPL info block. + ioPLBlock ioplInfo = ioplList[ioplIndex]; + + /* + * For external UPLs, the fPageInfo points directly to the UPL's page_info_t + * array. + */ + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t*) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; + + // Rebase [offset] into the IOPL in order to looks for the first page index. + mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; + + // Retrieve the index of the first page corresponding to the offset. + currentPageIndex = atop_32(offsetInIOPL); + } + + // enter mappings + remain = size; + mapAddr = addr; + addr += pageOffset; + + while (remain && (KERN_SUCCESS == err)) + { + entryOffset = offset - entry->offset; + if ((page_mask & entryOffset) != pageOffset) + { + err = kIOReturnNotAligned; + break; + } + + if (kIODefaultCache != cacheMode) + { + vm_size_t unused = 0; + err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, entry->entry); + assert (KERN_SUCCESS == err); + } + + entryOffset -= pageOffset; + if (entryOffset >= entry->size) panic("entryOffset"); + chunk = entry->size - entryOffset; + if (chunk) + { + if (chunk > remain) chunk = remain; + if (options & kIOMapPrefault) + { + UInt nb_pages = round_page(chunk) / PAGE_SIZE; + err = vm_map_enter_mem_object_prefault(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(tag) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + prot, // cur + prot, // max + &pageList[currentPageIndex], + nb_pages); + + // Compute the next index in the page list. + currentPageIndex += nb_pages; + assert(currentPageIndex <= _pages); + } + else + { + err = vm_map_enter_mem_object(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_MAKE_TAG(tag) + | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */ + entry->entry, + entryOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + } + if (KERN_SUCCESS != err) break; + remain -= chunk; + if (!remain) break; + mapAddr += chunk; + offset += chunk - pageOffset; + } + pageOffset = 0; + entry++; + entryIdx++; + if (entryIdx >= ref->count) + { + err = kIOReturnOverrun; + break; + } + } + + if ((KERN_SUCCESS != err) && didAlloc) + { + (void) mach_vm_deallocate(map, trunc_page_64(addr), size); + addr = 0; + } + *inaddr = addr; + + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( + IOMemoryReference * ref, + IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount) +{ + IOReturn err; + IOMemoryEntry * entries; + unsigned int resident, dirty; + unsigned int totalResident, totalDirty; + + totalResident = totalDirty = 0; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); + if (KERN_SUCCESS != err) break; + totalResident += resident; + totalDirty += dirty; + } + + if (residentPageCount) *residentPageCount = totalResident; + if (dirtyPageCount) *dirtyPageCount = totalDirty; + return (err); +} + +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( + IOMemoryReference * ref, + IOOptionBits newState, + IOOptionBits * oldState) +{ + IOReturn err; + IOMemoryEntry * entries; + vm_purgable_t control; + int totalState, state; + + totalState = kIOMemoryPurgeableNonVolatile; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) + { + entries--; + + err = purgeableControlBits(newState, &control, &state); + if (KERN_SUCCESS != err) break; + err = mach_memory_entry_purgable_control(entries->entry, control, &state); + if (KERN_SUCCESS != err) break; + err = purgeableStateBits(&state); + if (KERN_SUCCESS != err) break; + + if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; + else if (kIOMemoryPurgeableEmpty == totalState) continue; + else if (kIOMemoryPurgeableVolatile == totalState) continue; + else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; + else totalState = kIOMemoryPurgeableNonVolatile; + } + + if (oldState) *oldState = totalState; + return (err); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(void * address, + IOByteCount length, + IODirection direction) +{ + return IOMemoryDescriptor:: + withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); +} + +#ifndef __LP64__ +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(IOVirtualAddress address, + IOByteCount length, + IODirection direction, + task_t task) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithAddress(address, length, direction, task)) + return that; + + that->release(); + } + return 0; +} +#endif /* !__LP64__ */ + +IOMemoryDescriptor * +IOMemoryDescriptor::withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount length, + IODirection direction ) +{ + return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL)); +} + +#ifndef __LP64__ +IOMemoryDescriptor * +IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection direction, + task_t task, + bool asReference) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithRanges(ranges, withCount, direction, task, asReference)) + return that; + + that->release(); + } + return 0; +} +#endif /* !__LP64__ */ + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, + mach_vm_size_t length, + IOOptionBits options, + task_t task) +{ + IOAddressRange range = { address, length }; + return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task)); +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, + UInt32 rangeCount, + IOOptionBits options, + task_t task) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (task) + options |= kIOMemoryTypeVirtual64; + else + options |= kIOMemoryTypePhysical64; + + if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) + return that; + + that->release(); + } + + return 0; } + /* - * initWithOptions: + * withOptions: * - * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, - * from a given task or several physical ranges or finally an UPL from the ubc - * system. + * Create a new IOMemoryDescriptor. The buffer is made up of several + * virtual address ranges, from a given task. * * Passing the ranges as a reference will avoid an extra allocation. - * - * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an - * existing instance -- note this behavior is not commonly supported in other - * I/O Kit classes, although it is supported here. */ +IOMemoryDescriptor * +IOMemoryDescriptor::withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits opts, + IOMapper * mapper) +{ + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; -enum ioPLBlockFlags { - kIOPLOnDevice = 0x00000001, - kIOPLExternUPL = 0x00000002, -}; + if (self + && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) + { + self->release(); + return 0; + } -struct ioPLBlock { - upl_t fIOPL; - vm_address_t fIOMDOffset; // The offset of this iopl in descriptor - vm_offset_t fPageInfo; // Pointer to page list or index into it - ppnum_t fMappedBase; // Page number of first page in this iopl - unsigned int fPageOffset; // Offset within first page of iopl - unsigned int fFlags; // Flags -}; + return self; +} + +bool IOMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + return( false ); +} + +#ifndef __LP64__ +IOMemoryDescriptor * +IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection direction, + bool asReference) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) + return that; + + that->release(); + } + return 0; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, + IOByteCount offset, + IOByteCount length, + IODirection direction) +{ + return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction)); +} +#endif /* !__LP64__ */ + +IOMemoryDescriptor * +IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) +{ + IOGeneralMemoryDescriptor *origGenMD = + OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); + + if (origGenMD) + return IOGeneralMemoryDescriptor:: + withPersistentMemoryDescriptor(origGenMD); + else + return 0; +} + +IOMemoryDescriptor * +IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) +{ + IOMemoryReference * memRef; + + if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0); + + if (memRef == originalMD->_memRef) + { + originalMD->retain(); // Add a new reference to ourselves + originalMD->memoryReferenceRelease(memRef); + return originalMD; + } + + IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; + IOMDPersistentInitData initData = { originalMD, memRef }; + + if (self + && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { + self->release(); + self = 0; + } + return self; +} + +#ifndef __LP64__ +bool +IOGeneralMemoryDescriptor::initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) +{ + _singleRange.v.address = (vm_offset_t) address; + _singleRange.v.length = withLength; + + return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); +} + +bool +IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) +{ + _singleRange.v.address = address; + _singleRange.v.length = withLength; + + return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); +} + +bool +IOGeneralMemoryDescriptor::initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) +{ + _singleRange.p.address = address; + _singleRange.p.length = withLength; + + return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); +} + +bool +IOGeneralMemoryDescriptor::initWithPhysicalRanges( + IOPhysicalRange * ranges, + UInt32 count, + IODirection direction, + bool reference) +{ + IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); +} + +bool +IOGeneralMemoryDescriptor::initWithRanges( + IOVirtualRange * ranges, + UInt32 count, + IODirection direction, + task_t task, + bool reference) +{ + IOOptionBits mdOpts = direction; + + if (reference) + mdOpts |= kIOMemoryAsReference; + + if (task) { + mdOpts |= kIOMemoryTypeVirtual; + + // Auto-prepare if this is a kernel memory descriptor as very few + // clients bother to prepare() kernel memory. + // But it was not enforced so what are you going to do? + if (task == kernel_task) + mdOpts |= kIOMemoryAutoPrepare; + } + else + mdOpts |= kIOMemoryTypePhysical; + + return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); +} +#endif /* !__LP64__ */ + +/* + * initWithOptions: + * + * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, + * from a given task, several physical ranges, an UPL from the ubc + * system or a uio (may be 64bit) from the BSD subsystem. + * + * Passing the ranges as a reference will avoid an extra allocation. + * + * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an + * existing instance -- note this behavior is not commonly supported in other + * I/O Kit classes, although it is supported here. + */ + +bool +IOGeneralMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) +{ + IOOptionBits type = options & kIOMemoryTypeMask; + +#ifndef __LP64__ + if (task + && (kIOMemoryTypeVirtual == type) + && vm_map_is_64bit(get_task_map(task)) + && ((IOVirtualRange *) buffers)->address) + { + OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); + return false; + } +#endif /* !__LP64__ */ + + // Grab the original MD's configuation data to initialse the + // arguments to this function. + if (kIOMemoryTypePersistentMD == type) { + + IOMDPersistentInitData *initData = (typeof(initData)) buffers; + const IOGeneralMemoryDescriptor *orig = initData->fMD; + ioGMDData *dataP = getDataP(orig->_memoryEntries); + + // Only accept persistent memory descriptors with valid dataP data. + assert(orig->_rangesCount == 1); + if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) + return false; + + _memRef = initData->fMemRef; // Grab the new named entry + options = orig->_flags & ~kIOMemoryAsReference; + type = options & kIOMemoryTypeMask; + buffers = orig->_ranges.v; + count = orig->_rangesCount; + + // Now grab the original task and whatever mapper was previously used + task = orig->_task; + mapper = dataP->fMapper; + + // We are ready to go through the original initialisation now + } + + switch (type) { + case kIOMemoryTypeUIO: + case kIOMemoryTypeVirtual: +#ifndef __LP64__ + case kIOMemoryTypeVirtual64: +#endif /* !__LP64__ */ + assert(task); + if (!task) + return false; + break; + + case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task +#ifndef __LP64__ + case kIOMemoryTypePhysical64: +#endif /* !__LP64__ */ + case kIOMemoryTypeUPL: + assert(!task); + break; + default: + return false; /* bad argument */ + } + + assert(buffers); + assert(count); + + /* + * We can check the _initialized instance variable before having ever set + * it to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. + */ + + if (_initialized) { + /* + * An existing memory descriptor is being retargeted to point to + * somewhere else. Clean up our present state. + */ + IOOptionBits type = _flags & kIOMemoryTypeMask; + if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) + { + while (_wireCount) + complete(); + } + if (_ranges.v && !(kIOMemoryAsReference & _flags)) + { + if (kIOMemoryTypeUIO == type) + uio_free((uio_t) _ranges.v); +#ifndef __LP64__ + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) + IODelete(_ranges.v64, IOAddressRange, _rangesCount); +#endif /* !__LP64__ */ + else + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + } + + options |= (kIOMemoryRedirected & _flags); + if (!(kIOMemoryRedirected & options)) + { + if (_memRef) + { + memoryReferenceRelease(_memRef); + _memRef = 0; + } + if (_mappings) + _mappings->flushCollection(); + } + } + else { + if (!super::init()) + return false; + _initialized = true; + } + + // Grab the appropriate mapper + if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone; + if (kIOMemoryMapperNone & options) + mapper = 0; // No Mapper + else if (mapper == kIOMapperSystem) { + IOMapper::checkForSystemMapper(); + gIOSystemMapper = mapper = IOMapper::gSystem; + } + + // Remove the dynamic internal use flags from the initial setting + options &= ~(kIOMemoryPreparedReadOnly); + _flags = options; + _task = task; + +#ifndef __LP64__ + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); +#endif /* !__LP64__ */ + + __iomd_reservedA = 0; + __iomd_reservedB = 0; + _highestPage = 0; + + if (kIOMemoryThreadSafe & options) + { + if (!_prepareLock) + _prepareLock = IOLockAlloc(); + } + else if (_prepareLock) + { + IOLockFree(_prepareLock); + _prepareLock = NULL; + } + + if (kIOMemoryTypeUPL == type) { + + ioGMDData *dataP; + unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); + + if (!initMemoryEntries(dataSize, mapper)) return (false); + dataP = getDataP(_memoryEntries); + dataP->fPageCnt = 0; + + // _wireCount++; // UPLs start out life wired + + _length = count; + _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); + + ioPLBlock iopl; + iopl.fIOPL = (upl_t) buffers; + upl_set_referenced(iopl.fIOPL, true); + upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); + + if (upl_get_size(iopl.fIOPL) < (count + offset)) + panic("short external upl"); + + _highestPage = upl_get_highest_page(iopl.fIOPL); + + // Set the flag kIOPLOnDevice convieniently equal to 1 + iopl.fFlags = pageList->device | kIOPLExternUPL; + if (!pageList->device) { + // Pre-compute the offset into the UPL's page list + pageList = &pageList[atop_32(offset)]; + offset &= PAGE_MASK; + } + iopl.fIOMDOffset = 0; + iopl.fMappedPage = 0; + iopl.fPageInfo = (vm_address_t) pageList; + iopl.fPageOffset = offset; + _memoryEntries->appendBytes(&iopl, sizeof(iopl)); + } + else { + // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO + // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 + + // Initialize the memory descriptor + if (options & kIOMemoryAsReference) { +#ifndef __LP64__ + _rangesIsAllocated = false; +#endif /* !__LP64__ */ + + // Hack assignment to get the buffer arg into _ranges. + // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't + // work, C++ sigh. + // This also initialises the uio & physical ranges. + _ranges.v = (IOVirtualRange *) buffers; + } + else { +#ifndef __LP64__ + _rangesIsAllocated = true; +#endif /* !__LP64__ */ + switch (type) + { + case kIOMemoryTypeUIO: + _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); + break; + +#ifndef __LP64__ + case kIOMemoryTypeVirtual64: + case kIOMemoryTypePhysical64: + if (count == 1 + && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL + ) { + if (kIOMemoryTypeVirtual64 == type) + type = kIOMemoryTypeVirtual; + else + type = kIOMemoryTypePhysical; + _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; + _rangesIsAllocated = false; + _ranges.v = &_singleRange.v; + _singleRange.v.address = ((IOAddressRange *) buffers)->address; + _singleRange.v.length = ((IOAddressRange *) buffers)->length; + break; + } + _ranges.v64 = IONew(IOAddressRange, count); + if (!_ranges.v64) + return false; + bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); + break; +#endif /* !__LP64__ */ + case kIOMemoryTypeVirtual: + case kIOMemoryTypePhysical: + if (count == 1) { + _flags |= kIOMemoryAsReference; +#ifndef __LP64__ + _rangesIsAllocated = false; +#endif /* !__LP64__ */ + _ranges.v = &_singleRange.v; + } else { + _ranges.v = IONew(IOVirtualRange, count); + if (!_ranges.v) + return false; + } + bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); + break; + } + } + + // Find starting address within the vector of ranges + Ranges vec = _ranges; + mach_vm_size_t totalLength = 0; + unsigned int ind, pages = 0; + for (ind = 0; ind < count; ind++) { + mach_vm_address_t addr; + mach_vm_address_t endAddr; + mach_vm_size_t len; + + // addr & len are returned by this function + getAddrLenForInd(addr, len, type, vec, ind); + if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break; + if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break; + if (os_add_overflow(totalLength, len, &totalLength)) break; + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + { + ppnum_t highPage = atop_64(addr + len - 1); + if (highPage > _highestPage) + _highestPage = highPage; + } + } + if ((ind < count) + || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */ + + _length = totalLength; + _pages = pages; + _rangesCount = count; + + // Auto-prepare memory at creation time. + // Implied completion when descriptor is free-ed + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + _wireCount++; // Physical MDs are, by definition, wired + else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ + ioGMDData *dataP; + unsigned dataSize; + + if (_pages > atop_64(max_mem)) return false; + + dataSize = computeDataSize(_pages, /* upls */ count * 2); + if (!initMemoryEntries(dataSize, mapper)) return false; + dataP = getDataP(_memoryEntries); + dataP->fPageCnt = _pages; + + if ( (kIOMemoryPersistent & _flags) && !_memRef) + { + IOReturn + err = memoryReferenceCreate(0, &_memRef); + if (kIOReturnSuccess != err) return false; + } + + if ((_flags & kIOMemoryAutoPrepare) + && prepare() != kIOReturnSuccess) + return false; + } + } + + return true; +} + +/* + * free + * + * Free resources. + */ +void IOGeneralMemoryDescriptor::free() +{ + IOOptionBits type = _flags & kIOMemoryTypeMask; + + if( reserved) + { + LOCK; + reserved->dp.memory = 0; + UNLOCK; + } + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + { + ioGMDData * dataP; + if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBase = 0; + } + } + else + { + while (_wireCount) complete(); + } + + if (_memoryEntries) _memoryEntries->release(); + + if (_ranges.v && !(kIOMemoryAsReference & _flags)) + { + if (kIOMemoryTypeUIO == type) + uio_free((uio_t) _ranges.v); +#ifndef __LP64__ + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) + IODelete(_ranges.v64, IOAddressRange, _rangesCount); +#endif /* !__LP64__ */ + else + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + + _ranges.v = NULL; + } + + if (reserved) + { + if (reserved->dp.devicePager) + { + // memEntry holds a ref on the device pager which owns reserved + // (IOMemoryDescriptorReserved) so no reserved access after this point + device_pager_deallocate( (memory_object_t) reserved->dp.devicePager ); + } + else + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } + + if (_memRef) memoryReferenceRelease(_memRef); + if (_prepareLock) IOLockFree(_prepareLock); + + super::free(); +} + +#ifndef __LP64__ +void IOGeneralMemoryDescriptor::unmapFromKernel() +{ + panic("IOGMD::unmapFromKernel deprecated"); +} + +void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +{ + panic("IOGMD::mapIntoKernel deprecated"); +} +#endif /* !__LP64__ */ + +/* + * getDirection: + * + * Get the direction of the transfer. + */ +IODirection IOMemoryDescriptor::getDirection() const +{ +#ifndef __LP64__ + if (_direction) + return _direction; +#endif /* !__LP64__ */ + return (IODirection) (_flags & kIOMemoryDirectionMask); +} + +/* + * getLength: + * + * Get the length of the transfer (over all ranges). + */ +IOByteCount IOMemoryDescriptor::getLength() const +{ + return _length; +} + +void IOMemoryDescriptor::setTag( IOOptionBits tag ) +{ + _tag = tag; +} + +IOOptionBits IOMemoryDescriptor::getTag( void ) +{ + return( _tag); +} + +#ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + +// @@@ gvdl: who is using this API? Seems like a wierd thing to implement. +IOPhysicalAddress +IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) +{ + addr64_t physAddr = 0; + + if( prepare() == kIOReturnSuccess) { + physAddr = getPhysicalSegment64( offset, length ); + complete(); + } + + return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used +} + +#pragma clang diagnostic pop + +#endif /* !__LP64__ */ + +IOByteCount IOMemoryDescriptor::readBytes + (IOByteCount offset, void *bytes, IOByteCount length) +{ + addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); + IOByteCount remaining; + + // Assert that this entire I/O is withing the available range + assert(offset <= _length); + assert(offset + length <= _length); + if ((offset >= _length) + || ((offset + length) > _length)) { + return 0; + } + + if (kIOMemoryThreadSafe & _flags) + LOCK; + + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t srcAddr64; + IOByteCount srcLen; + + srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); + if (!srcAddr64) + break; + + // Clip segment length to remaining + if (srcLen > remaining) + srcLen = remaining; + + copypv(srcAddr64, dstAddr, srcLen, + cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); + + dstAddr += srcLen; + offset += srcLen; + remaining -= srcLen; + } + + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + + assert(!remaining); + + return length - remaining; +} + +IOByteCount IOMemoryDescriptor::writeBytes + (IOByteCount inoffset, const void *bytes, IOByteCount length) +{ + addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); + IOByteCount remaining; + IOByteCount offset = inoffset; + + // Assert that this entire I/O is withing the available range + assert(offset <= _length); + assert(offset + length <= _length); + + assert( !(kIOMemoryPreparedReadOnly & _flags) ); + + if ( (kIOMemoryPreparedReadOnly & _flags) + || (offset >= _length) + || ((offset + length) > _length)) { + return 0; + } + + if (kIOMemoryThreadSafe & _flags) + LOCK; + + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; + + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!dstAddr64) + break; + + // Clip segment length to remaining + if (dstLen > remaining) + dstLen = remaining; + + if (!srcAddr) bzero_phys(dstAddr64, dstLen); + else + { + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + srcAddr += dstLen; + } + offset += dstLen; + remaining -= dstLen; + } + + if (kIOMemoryThreadSafe & _flags) + UNLOCK; + + assert(!remaining); + + if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); + + return length - remaining; +} + +#ifndef __LP64__ +void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +{ + panic("IOGMD::setPosition deprecated"); +} +#endif /* !__LP64__ */ + +static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32); + +uint64_t +IOGeneralMemoryDescriptor::getPreparationID( void ) +{ + ioGMDData *dataP; + + if (!_wireCount) + return (kIOPreparationIDUnprepared); + + if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical) + || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) + { + IOMemoryDescriptor::setPreparationID(); + return (IOMemoryDescriptor::getPreparationID()); + } + + if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) + return (kIOPreparationIDUnprepared); + + if (kIOPreparationIDUnprepared == dataP->fPreparationID) + { + dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); + } + return (dataP->fPreparationID); +} + +IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void ) +{ + if (!reserved) + { + reserved = IONew(IOMemoryDescriptorReserved, 1); + if (reserved) + bzero(reserved, sizeof(IOMemoryDescriptorReserved)); + } + return (reserved); +} + +void IOMemoryDescriptor::setPreparationID( void ) +{ + if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) + { + reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); + } +} + +uint64_t IOMemoryDescriptor::getPreparationID( void ) +{ + if (reserved) + return (reserved->preparationID); + else + return (kIOPreparationIDUnsupported); +} + +void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) +{ + if (!getKernelReserved()) return; + reserved->kernelTag = kernelTag; + reserved->userTag = userTag; +} + +vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map) +{ + if (!reserved + || (VM_KERN_MEMORY_NONE == reserved->kernelTag) + || (VM_KERN_MEMORY_NONE == reserved->userTag)) + { + return (IOMemoryTag(map)); + } + + if (vm_kernel_map_is_kernel(map)) return (reserved->kernelTag); + return (reserved->userTag); +} + +IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const +{ + IOReturn err = kIOReturnSuccess; + DMACommandOps params; + IOGeneralMemoryDescriptor * md = const_cast(this); + ioGMDData *dataP; + + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; + + if (kIOMDDMAMap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + if (_memoryEntries && data->fMapper) + { + bool remap, keepMap; + dataP = getDataP(_memoryEntries); + + if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; + if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; + + keepMap = (data->fMapper == gIOSystemMapper); + keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); + + remap = (!keepMap); + remap |= (dataP->fDMAMapNumAddressBits < 64) + && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); + remap |= (dataP->fDMAMapAlignment > page_size); + + if (remap || !dataP->fMappedBase) + { +// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase) + { + dataP->fMappedBase = data->fAlloc; + dataP->fMappedLength = data->fAllocLength; + data->fAllocLength = 0; // IOMD owns the alloc now + } + } + else + { + data->fAlloc = dataP->fMappedBase; + data->fAllocLength = 0; // give out IOMD map + } + data->fMapContig = !dataP->fDiscontig; + } + + return (err); + } + + if (kIOMDAddDMAMapSpec == op) + { + if (dataSize < sizeof(IODMAMapSpecification)) + return kIOReturnUnderrun; + + IODMAMapSpecification * data = (IODMAMapSpecification *) vData; + + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + if (_memoryEntries) + { + dataP = getDataP(_memoryEntries); + if (data->numAddressBits < dataP->fDMAMapNumAddressBits) + dataP->fDMAMapNumAddressBits = data->numAddressBits; + if (data->alignment > dataP->fDMAMapAlignment) + dataP->fDMAMapAlignment = data->alignment; + } + return kIOReturnSuccess; + } + + if (kIOMDGetCharacteristics == op) { + + if (dataSize < sizeof(IOMDDMACharacteristics)) + return kIOReturnUnderrun; + + IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; + data->fLength = _length; + data->fSGCount = _rangesCount; + data->fPages = _pages; + data->fDirection = getDirection(); + if (!_wireCount) + data->fIsPrepared = false; + else { + data->fIsPrepared = true; + data->fHighestPage = _highestPage; + if (_memoryEntries) + { + dataP = getDataP(_memoryEntries); + ioPLBlock *ioplList = getIOPLList(dataP); + UInt count = getNumIOPL(_memoryEntries, dataP); + if (count == 1) + data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; + } + } + + return kIOReturnSuccess; + + } else if (kIOMDWalkSegments != op) + return kIOReturnBadArgument; + + // Get the next segment + struct InternalState { + IOMDDMAWalkSegmentArgs fIO; + UInt fOffset2Index; + UInt fIndex; + UInt fNextOffset; + } *isP; + + // Find the next segment + if (dataSize < sizeof(*isP)) + return kIOReturnUnderrun; + + isP = (InternalState *) vData; + UInt offset = isP->fIO.fOffset; + bool mapped = isP->fIO.fMapped; + + if (IOMapper::gSystem && mapped + && (!(kIOMemoryHostOnly & _flags)) + && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase)) +// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase)) + { + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + + dataP = getDataP(_memoryEntries); + if (dataP->fMapper) + { + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); + if (kIOReturnSuccess != err) return (err); + } + } + + if (offset >= _length) + return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; + + // Validate the previous offset + UInt ind, off2Ind = isP->fOffset2Index; + if (!params + && offset + && (offset == isP->fNextOffset || off2Ind <= offset)) + ind = isP->fIndex; + else + ind = off2Ind = 0; // Start from beginning + + UInt length; + UInt64 address; + + + if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + + // Physical address based memory descriptor + const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; + + // Find the range after the one that contains the offset + mach_vm_size_t len; + for (len = 0; off2Ind <= offset; ind++) { + len = physP[ind].length; + off2Ind += len; + } + + // Calculate length within range and starting address + length = off2Ind - offset; + address = physP[ind - 1].address + len - length; + + if (true && mapped && _memoryEntries + && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + address = dataP->fMappedBase + offset; + } + else + { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } + } + + // correct contiguous check overshoot + ind--; + off2Ind -= len; + } +#ifndef __LP64__ + else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { + + // Physical address based memory descriptor + const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; + + // Find the range after the one that contains the offset + mach_vm_size_t len; + for (len = 0; off2Ind <= offset; ind++) { + len = physP[ind].length; + off2Ind += len; + } + + // Calculate length within range and starting address + length = off2Ind - offset; + address = physP[ind - 1].address + len - length; + + if (true && mapped && _memoryEntries + && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase) + { + address = dataP->fMappedBase + offset; + } + else + { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } + } + // correct contiguous check overshoot + ind--; + off2Ind -= len; + } +#endif /* !__LP64__ */ + else do { + if (!_wireCount) + panic("IOGMD: not wired for the IODMACommand"); + + assert(_memoryEntries); + + dataP = getDataP(_memoryEntries); + const ioPLBlock *ioplList = getIOPLList(dataP); + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + upl_page_info_t *pageList = getPageList(dataP); + + assert(numIOPLs > 0); + + // Scan through iopl info blocks looking for block containing offset + while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) + ind++; + + // Go back to actual range as search goes past it + ioPLBlock ioplInfo = ioplList[ind - 1]; + off2Ind = ioplInfo.fIOMDOffset; + + if (ind < numIOPLs) + length = ioplList[ind].fIOMDOffset; + else + length = _length; + length -= offset; // Remainder within iopl + + // Subtract offset till this iopl in total list + offset -= off2Ind; + + // If a mapped address is requested and this is a pre-mapped IOPL + // then just need to compute an offset relative to the mapped base. + if (mapped && dataP->fMappedBase) { + offset += (ioplInfo.fPageOffset & PAGE_MASK); + address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; + continue; // Done leave do/while(false) now + } + + // The offset is rebased into the current iopl. + // Now add the iopl 1st page offset. + offset += ioplInfo.fPageOffset; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplInfo.fFlags & kIOPLExternUPL) + pageList = (upl_page_info_t *) ioplInfo.fPageInfo; + else + pageList = &pageList[ioplInfo.fPageInfo]; + + // Check for direct device non-paged memory + if ( ioplInfo.fFlags & kIOPLOnDevice ) { + address = ptoa_64(pageList->phys_addr) + offset; + continue; // Done leave do/while(false) now + } + + // Now we need compute the index into the pageList + UInt pageInd = atop_32(offset); + offset &= PAGE_MASK; + + // Compute the starting address of this segment + IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; + if (!pageAddr) { + panic("!pageList phys_addr"); + } + + address = ptoa_64(pageAddr) + offset; + + // length is currently set to the length of the remainider of the iopl. + // We need to check that the remainder of the iopl is contiguous. + // This is indicated by pageList[ind].phys_addr being sequential. + IOByteCount contigLength = PAGE_SIZE - offset; + while (contigLength < length + && ++pageAddr == pageList[++pageInd].phys_addr) + { + contigLength += PAGE_SIZE; + } -struct ioGMDData { - IOMapper *fMapper; - unsigned int fPageCnt; - upl_page_info_t fPageList[0]; // @@@ gvdl need to get rid of this - // should be able to use upl directly - ioPLBlock fBlocks[0]; -}; + if (contigLength < length) + length = contigLength; + -#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) -#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) -#define getNumIOPL(d,len) \ - ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) -#define getPageList(d) (&(d->fPageList[0])) -#define computeDataSize(p, u) \ - (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) + assert(address); + assert(length); -bool -IOGeneralMemoryDescriptor::initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper) -{ + } while (false); - switch (options & kIOMemoryTypeMask) { - case kIOMemoryTypeVirtual: - assert(task); - if (!task) - return false; - else - break; + // Update return values and state + isP->fIO.fIOVMAddr = address; + isP->fIO.fLength = length; + isP->fIndex = ind; + isP->fOffset2Index = off2Ind; + isP->fNextOffset = isP->fIO.fOffset + length; - case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task - mapper = kIOMapperNone; - case kIOMemoryTypeUPL: - assert(!task); - break; - default: -panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing - return false; /* bad argument */ - } + return kIOReturnSuccess; +} - assert(buffers); - assert(count); +addr64_t +IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) +{ + IOReturn ret; + mach_vm_address_t address = 0; + mach_vm_size_t length = 0; + IOMapper * mapper = gIOSystemMapper; + IOOptionBits type = _flags & kIOMemoryTypeMask; - /* - * We can check the _initialized instance variable before having ever set - * it to an initial value because I/O Kit guarantees that all our instance - * variables are zeroed on an object's allocation. - */ + if (lengthOfSegment) + *lengthOfSegment = 0; - if (_initialized) { - /* - * An existing memory descriptor is being retargeted to point to - * somewhere else. Clean up our present state. - */ + if (offset >= _length) + return 0; - while (_wireCount) - complete(); - if (_kernPtrAligned) - unmapFromKernel(); - if (_ranges.v && _rangesIsAllocated) - IODelete(_ranges.v, IOVirtualRange, _rangesCount); - } - else { - if (!super::init()) - return false; - _initialized = true; - } + // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must + // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use + // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation + // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up - // Grab the appropriate mapper - if (mapper == kIOMapperNone) - mapper = 0; // No Mapper - else if (!mapper) { - IOMapper::checkForSystemMapper(); - gIOSystemMapper = mapper = IOMapper::gSystem; + if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) + { + unsigned rangesIndex = 0; + Ranges vec = _ranges; + mach_vm_address_t addr; + + // Find starting address within the vector of ranges + for (;;) { + getAddrLenForInd(addr, length, type, vec, rangesIndex); + if (offset < length) + break; + offset -= length; // (make offset relative) + rangesIndex++; + } + + // Now that we have the starting range, + // lets find the last contiguous range + addr += offset; + length -= offset; + + for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { + mach_vm_address_t newAddr; + mach_vm_size_t newLen; + + getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); + if (addr + length != newAddr) + break; + length += newLen; + } + if (addr) + address = (IOPhysicalAddress) addr; // Truncate address to 32bit } + else + { + IOMDDMAWalkSegmentState _state; + IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state; - _flags = options; - _task = task; + state->fOffset = offset; + state->fLength = _length - offset; + state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly); - // DEPRECATED variable initialisation - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); - _position = 0; - _kernPtrAligned = 0; - _cachedPhysicalAddress = 0; - _cachedVirtualAddress = 0; + ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); - if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) { + if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) + DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", + ret, this, state->fOffset, + state->fIOVMAddr, state->fLength); + if (kIOReturnSuccess == ret) + { + address = state->fIOVMAddr; + length = state->fLength; + } - ioGMDData *dataP; - unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); + // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even + // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up - if (!_memoryEntries) { - _memoryEntries = OSData::withCapacity(dataSize); - if (!_memoryEntries) - return false; - } - else if (!_memoryEntries->initWithCapacity(dataSize)) - return false; + if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) + { + if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) + { + addr64_t origAddr = address; + IOByteCount origLen = length; + + address = mapper->mapToPhysicalAddress(origAddr); + length = page_size - (address & (page_size - 1)); + while ((length < origLen) + && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) + length += page_size; + if (length > origLen) + length = origLen; + } + } + } - _memoryEntries->appendBytes(0, sizeof(ioGMDData)); - dataP = getDataP(_memoryEntries); - dataP->fMapper = mapper; - dataP->fPageCnt = 0; + if (!address) + length = 0; - _wireCount++; // UPLs start out life wired + if (lengthOfSegment) + *lengthOfSegment = length; - _length = count; - _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); + return (address); +} - ioPLBlock iopl; - upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); +#ifndef __LP64__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" - iopl.fIOPL = (upl_t) buffers; - // Set the flag kIOPLOnDevice convieniently equal to 1 - iopl.fFlags = pageList->device | kIOPLExternUPL; - iopl.fIOMDOffset = 0; - if (!pageList->device) { - // @@@ gvdl: Ask JoeS are the pages contiguious with the list? - // or there a chance that we may be inserting 0 phys_addrs? - // Pre-compute the offset into the UPL's page list - pageList = &pageList[atop_32(offset)]; - offset &= PAGE_MASK; - if (mapper) { - iopl.fMappedBase = mapper->iovmAlloc(_pages); - mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); - } - else - iopl.fMappedBase = 0; - } - else - iopl.fMappedBase = 0; - iopl.fPageInfo = (vm_address_t) pageList; - iopl.fPageOffset = offset; +addr64_t +IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) +{ + addr64_t address = 0; - _memoryEntries->appendBytes(&iopl, sizeof(iopl)); + if (options & _kIOMemorySourceSegment) + { + address = getSourceSegment(offset, lengthOfSegment); + } + else if (options & kIOMemoryMapperNone) + { + address = getPhysicalSegment64(offset, lengthOfSegment); + } + else + { + address = getPhysicalSegment(offset, lengthOfSegment); } - else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */ - IOVirtualRange *ranges = (IOVirtualRange *) buffers; - - /* - * Initialize the memory descriptor. - */ - _length = 0; - _pages = 0; - for (unsigned ind = 0; ind < count; ind++) { - IOVirtualRange cur = ranges[ind]; - - _length += cur.length; - _pages += atop_32(cur.address + cur.length + PAGE_MASK) - - atop_32(cur.address); - } + return (address); +} +#pragma clang diagnostic pop - _ranges.v = 0; - _rangesIsAllocated = !(options & kIOMemoryAsReference); - _rangesCount = count; +addr64_t +IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone)); +} - if (options & kIOMemoryAsReference) - _ranges.v = ranges; - else { - _ranges.v = IONew(IOVirtualRange, count); - if (!_ranges.v) - return false; - bcopy(/* from */ ranges, _ranges.v, - count * sizeof(IOVirtualRange)); - } +IOPhysicalAddress +IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) +{ + addr64_t address = 0; + IOByteCount length = 0; - // Auto-prepare memory at creation time. - // Implied completion when descriptor is free-ed - if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical) - _wireCount++; // Physical MDs are start out wired - else { /* kIOMemoryTypeVirtual */ - ioGMDData *dataP; - unsigned int dataSize = - computeDataSize(_pages, /* upls */ _rangesCount * 2); + address = getPhysicalSegment(offset, lengthOfSegment, 0); - if (!_memoryEntries) { - _memoryEntries = OSData::withCapacity(dataSize); - if (!_memoryEntries) - return false; - } - else if (!_memoryEntries->initWithCapacity(dataSize)) - return false; - - _memoryEntries->appendBytes(0, sizeof(ioGMDData)); - dataP = getDataP(_memoryEntries); - dataP->fMapper = mapper; - dataP->fPageCnt = _pages; + if (lengthOfSegment) + length = *lengthOfSegment; - if ((_flags & kIOMemoryAutoPrepare) - && prepare() != kIOReturnSuccess) - return false; - } + if ((address + length) > 0x100000000ULL) + { + panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", + address, (long) length, (getMetaClass())->getClassName()); } - return true; + return ((IOPhysicalAddress) address); } -/* - * free - * - * Free resources. - */ -void IOGeneralMemoryDescriptor::free() +addr64_t +IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) { - LOCK; - if( reserved) - reserved->memory = 0; - UNLOCK; - - while (_wireCount) - complete(); - if (_memoryEntries) - _memoryEntries->release(); + IOPhysicalAddress phys32; + IOByteCount length; + addr64_t phys64; + IOMapper * mapper = 0; - if (_kernPtrAligned) - unmapFromKernel(); - if (_ranges.v && _rangesIsAllocated) - IODelete(_ranges.v, IOVirtualRange, _rangesCount); + phys32 = getPhysicalSegment(offset, lengthOfSegment); + if (!phys32) + return 0; - if (reserved && reserved->devicePager) - device_pager_deallocate( (memory_object_t) reserved->devicePager ); + if (gIOSystemMapper) + mapper = gIOSystemMapper; - // memEntry holds a ref on the device pager which owns reserved - // (ExpansionData) so no reserved access after this point - if (_memEntry) - ipc_port_release_send( (ipc_port_t) _memEntry ); + if (mapper) + { + IOByteCount origLen; - super::free(); -} + phys64 = mapper->mapToPhysicalAddress(phys32); + origLen = *lengthOfSegment; + length = page_size - (phys64 & (page_size - 1)); + while ((length < origLen) + && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) + length += page_size; + if (length > origLen) + length = origLen; -/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() -/* DEPRECATED */ { - panic("IOGMD::unmapFromKernel deprecated"); -/* DEPRECATED */ } -/* DEPRECATED */ -/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) -/* DEPRECATED */ { - panic("IOGMD::mapIntoKernel deprecated"); -/* DEPRECATED */ } + *lengthOfSegment = length; + } + else + phys64 = (addr64_t) phys32; -/* - * getDirection: - * - * Get the direction of the transfer. - */ -IODirection IOMemoryDescriptor::getDirection() const -{ - return _direction; + return phys64; } -/* - * getLength: - * - * Get the length of the transfer (over all ranges). - */ -IOByteCount IOMemoryDescriptor::getLength() const +IOPhysicalAddress +IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - return _length; + return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0)); } -void IOMemoryDescriptor::setTag( IOOptionBits tag ) +IOPhysicalAddress +IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - _tag = tag; + return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); } -IOOptionBits IOMemoryDescriptor::getTag( void ) -{ - return( _tag); -} +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" -// @@@ gvdl: who is using this API? Seems like a wierd thing to implement. -IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, - IOByteCount * length ) +void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) { - IOPhysicalAddress physAddr = 0; - - if( prepare() == kIOReturnSuccess) { - physAddr = getPhysicalSegment( offset, length ); - complete(); - } + if (_task == kernel_task) + return (void *) getSourceSegment(offset, lengthOfSegment); + else + panic("IOGMD::getVirtualSegment deprecated"); - return( physAddr ); + return 0; } +#pragma clang diagnostic pop +#endif /* !__LP64__ */ -IOByteCount IOMemoryDescriptor::readBytes - (IOByteCount offset, void *bytes, IOByteCount length) +IOReturn +IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { - addr64_t dstAddr = (addr64_t) (UInt32) bytes; - IOByteCount remaining; - - // Assert that this entire I/O is withing the available range - assert(offset < _length); - assert(offset + length <= _length); - if (offset >= _length) { -IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl - return 0; + IOMemoryDescriptor *md = const_cast(this); + DMACommandOps params; + IOReturn err; + + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; + + if (kIOMDGetCharacteristics == op) { + if (dataSize < sizeof(IOMDDMACharacteristics)) + return kIOReturnUnderrun; + + IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; + data->fLength = getLength(); + data->fSGCount = 0; + data->fDirection = getDirection(); + data->fIsPrepared = true; // Assume prepared - fails safe } + else if (kIOMDWalkSegments == op) { + if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) + return kIOReturnUnderrun; - remaining = length = min(length, _length - offset); - while (remaining) { // (process another target segment?) - addr64_t srcAddr64; - IOByteCount srcLen; - - srcAddr64 = getPhysicalSegment64(offset, &srcLen); - if (!srcAddr64) - break; + IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; + IOByteCount offset = (IOByteCount) data->fOffset; - // Clip segment length to remaining - if (srcLen > remaining) - srcLen = remaining; + IOPhysicalLength length; + if (data->fMapped && IOMapper::gSystem) + data->fIOVMAddr = md->getPhysicalSegment(offset, &length); + else + data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); + data->fLength = length; + } + else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; + else if (kIOMDDMAMap == op) + { + if (dataSize < sizeof(IOMDDMAMapArgs)) + return kIOReturnUnderrun; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - copypv(srcAddr64, dstAddr, srcLen, - cppvPsrc | cppvFsnk | cppvKmap); + if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); - dstAddr += srcLen; - offset += srcLen; - remaining -= srcLen; + data->fMapContig = true; + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + return (err); } + else return kIOReturnBadArgument; - assert(!remaining); - - return length - remaining; + return kIOReturnSuccess; } -IOByteCount IOMemoryDescriptor::writeBytes - (IOByteCount offset, const void *bytes, IOByteCount length) +IOReturn +IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) { - addr64_t srcAddr = (addr64_t) (UInt32) bytes; - IOByteCount remaining; - - // Assert that this entire I/O is withing the available range - assert(offset < _length); - assert(offset + length <= _length); + IOReturn err = kIOReturnSuccess; - assert( !(kIOMemoryPreparedReadOnly & _flags) ); + vm_purgable_t control; + int state; - if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { -IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl - return 0; + if (_memRef) + { + err = super::setPurgeable(newState, oldState); + } + else + { + if (kIOMemoryThreadSafe & _flags) + LOCK; + do + { + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) + { + err = kIOReturnNotReady; + break; + } + else if (!_task) + { + err = kIOReturnUnsupported; + break; + } + else + curMap = get_task_map(_task); + + // can only do one range + Ranges vec = _ranges; + IOOptionBits type = _flags & kIOMemoryTypeMask; + mach_vm_address_t addr; + mach_vm_size_t len; + getAddrLenForInd(addr, len, type, vec, 0); + + err = purgeableControlBits(newState, &control, &state); + if (kIOReturnSuccess != err) + break; + err = mach_vm_purgable_control(curMap, addr, control, &state); + if (oldState) + { + if (kIOReturnSuccess == err) + { + err = purgeableStateBits(&state); + *oldState = state; + } + } + } + while (false); + if (kIOMemoryThreadSafe & _flags) + UNLOCK; } - remaining = length = min(length, _length - offset); - while (remaining) { // (process another target segment?) - addr64_t dstAddr64; - IOByteCount dstLen; + return (err); +} - dstAddr64 = getPhysicalSegment64(offset, &dstLen); - if (!dstAddr64) - break; +IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) +{ + IOReturn err = kIOReturnNotReady; - // Clip segment length to remaining - if (dstLen > remaining) - dstLen = remaining; + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); + if (kIOMemoryThreadSafe & _flags) UNLOCK; - copypv(srcAddr, (addr64_t) dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoModSnk | cppvKmap); + return (err); +} + +IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount ) +{ + IOReturn err = kIOReturnNotReady; - srcAddr += dstLen; - offset += dstLen; - remaining -= dstLen; + if (kIOMemoryThreadSafe & _flags) LOCK; + if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); + else + { + IOMultiMemoryDescriptor * mmd; + IOSubMemoryDescriptor * smd; + if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) + { + err = smd->getPageCounts(residentPageCount, dirtyPageCount); + } + else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) + { + err = mmd->getPageCounts(residentPageCount, dirtyPageCount); + } } + if (kIOMemoryThreadSafe & _flags) UNLOCK; - assert(!remaining); - - return length - remaining; + return (err); } + -// osfmk/device/iokit_rpc.c -extern "C" unsigned int IODefaultCacheBits(addr64_t pa); - -/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) -/* DEPRECATED */ { - panic("IOGMD::setPosition deprecated"); -/* DEPRECATED */ } +extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); +extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); -IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment - (IOByteCount offset, IOByteCount *lengthOfSegment) +static void SetEncryptOp(addr64_t pa, unsigned int count) { - IOPhysicalAddress address = 0; - IOPhysicalLength length = 0; + ppnum_t page, end; -// assert(offset <= _length); - if (offset < _length) // (within bounds?) + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) { - if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { - unsigned int ind; - - // Physical address based memory descriptor - - // Find offset within descriptor and make it relative - // to the current _range. - for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ ) - offset -= _ranges.p[ind].length; - - IOPhysicalRange cur = _ranges.p[ind]; - address = cur.address + offset; - length = cur.length - offset; + pmap_clear_noencrypt(page); + } +} - // see how far we can coalesce ranges - for (++ind; ind < _rangesCount; ind++) { - cur = _ranges.p[ind]; - - if (address + length != cur.address) - break; - - length += cur.length; - } +static void ClearEncryptOp(addr64_t pa, unsigned int count) +{ + ppnum_t page, end; - // @@@ gvdl: should assert(address); - // but can't as NVidia GeForce creates a bogus physical mem - { - assert(address || /*nvidia*/(!_ranges.p[0].address && 1 == _rangesCount)); - } - assert(length); - } - else do { - // We need wiring & we are wired. - assert(_wireCount); + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) + { + pmap_set_noencrypt(page); + } +} - if (!_wireCount) - { - panic("IOGMD: not wired for getPhysicalSegment()"); - continue; - } +IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, + IOByteCount offset, IOByteCount length ) +{ + IOByteCount remaining; + unsigned int res; + void (*func)(addr64_t pa, unsigned int count) = 0; - assert(_memoryEntries); + switch (options) + { + case kIOMemoryIncoherentIOFlush: + func = &dcache_incoherent_io_flush64; + break; + case kIOMemoryIncoherentIOStore: + func = &dcache_incoherent_io_store64; + break; - ioGMDData * dataP = getDataP(_memoryEntries); - const ioPLBlock *ioplList = getIOPLList(dataP); - UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength()); - upl_page_info_t *pageList = getPageList(dataP); + case kIOMemorySetEncrypted: + func = &SetEncryptOp; + break; + case kIOMemoryClearEncrypted: + func = &ClearEncryptOp; + break; + } - assert(numIOPLs > 0); + if (!func) + return (kIOReturnUnsupported); - // Scan through iopl info blocks looking for block containing offset - for (ind = 1; ind < numIOPLs; ind++) { - if (offset < ioplList[ind].fIOMDOffset) - break; - } + if (kIOMemoryThreadSafe & _flags) + LOCK; - // Go back to actual range as search goes past it - ioPLBlock ioplInfo = ioplList[ind - 1]; + res = 0x0UL; + remaining = length = min(length, getLength() - offset); + while (remaining) + // (process another target segment?) + { + addr64_t dstAddr64; + IOByteCount dstLen; - if (ind < numIOPLs) - length = ioplList[ind].fIOMDOffset; - else - length = _length; - length -= offset; // Remainder within iopl - - // Subtract offset till this iopl in total list - offset -= ioplInfo.fIOMDOffset; - - // This is a mapped IOPL so we just need to compute an offset - // relative to the mapped base. - if (ioplInfo.fMappedBase) { - offset += (ioplInfo.fPageOffset & PAGE_MASK); - address = ptoa_32(ioplInfo.fMappedBase) + offset; - continue; - } + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!dstAddr64) + break; - // Currently the offset is rebased into the current iopl. - // Now add the iopl 1st page offset. - offset += ioplInfo.fPageOffset; + // Clip segment length to remaining + if (dstLen > remaining) + dstLen = remaining; - // For external UPLs the fPageInfo field points directly to - // the upl's upl_page_info_t array. - if (ioplInfo.fFlags & kIOPLExternUPL) - pageList = (upl_page_info_t *) ioplInfo.fPageInfo; - else - pageList = &pageList[ioplInfo.fPageInfo]; + (*func)(dstAddr64, dstLen); - // Check for direct device non-paged memory - if ( ioplInfo.fFlags & kIOPLOnDevice ) { - address = ptoa_32(pageList->phys_addr) + offset; - continue; - } + offset += dstLen; + remaining -= dstLen; + } - // Now we need compute the index into the pageList - ind = atop_32(offset); - offset &= PAGE_MASK; + if (kIOMemoryThreadSafe & _flags) + UNLOCK; - IOPhysicalAddress pageAddr = pageList[ind].phys_addr; - address = ptoa_32(pageAddr) + offset; - - // Check for the remaining data in this upl being longer than the - // remainder on the current page. This should be checked for - // contiguous pages - if (length > PAGE_SIZE - offset) { - // See if the next page is contiguous. Stop looking when we hit - // the end of this upl, which is indicated by the - // contigLength >= length. - IOByteCount contigLength = PAGE_SIZE - offset; - - // Look for contiguous segment - while (contigLength < length - && ++pageAddr == pageList[++ind].phys_addr) { - contigLength += PAGE_SIZE; - } - if (length > contigLength) - length = contigLength; - } - - assert(address); - assert(length); + return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); +} - } while (0); +/* + * + */ - if (!address) - length = 0; - } +#if defined(__i386__) || defined(__x86_64__) - if (lengthOfSegment) - *lengthOfSegment = length; +#define io_kernel_static_start vm_kernel_stext +#define io_kernel_static_end vm_kernel_etext - return address; -} +#else +#error io_kernel_static_end is undefined for this architecture +#endif -addr64_t IOMemoryDescriptor::getPhysicalSegment64 - (IOByteCount offset, IOByteCount *lengthOfSegment) +static kern_return_t +io_get_kernel_static_upl( + vm_map_t /* map */, + uintptr_t offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + ppnum_t *highest_page) { - IOPhysicalAddress phys32; - IOByteCount length; - addr64_t phys64; - - phys32 = getPhysicalSegment(offset, lengthOfSegment); - if (!phys32) - return 0; + unsigned int pageCount, page; + ppnum_t phys; + ppnum_t highestPage = 0; - if (gIOSystemMapper) - { - IOByteCount origLen; + pageCount = atop_32(*upl_size); + if (pageCount > *count) + pageCount = *count; - phys64 = gIOSystemMapper->mapAddr(phys32); - origLen = *lengthOfSegment; - length = page_size - (phys64 & (page_size - 1)); - while ((length < origLen) - && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length))) - length += page_size; - if (length > origLen) - length = origLen; + *upl = NULL; - *lengthOfSegment = length; + for (page = 0; page < pageCount; page++) + { + phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); + if (!phys) + break; + page_list[page].phys_addr = phys; + page_list[page].free_when_done = 0; + page_list[page].absent = 0; + page_list[page].dirty = 0; + page_list[page].precious = 0; + page_list[page].device = 0; + if (phys > highestPage) + highestPage = phys; } - else - phys64 = (addr64_t) phys32; - return phys64; + *highest_page = highestPage; + + return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); } -IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment - (IOByteCount offset, IOByteCount *lengthOfSegment) +/* + * + */ +#if IOTRACKING +static void +IOMemoryDescriptorUpdateWireOwner(ioGMDData * dataP, OSData * memoryEntries, vm_tag_t tag) { - IOPhysicalAddress address = 0; - IOPhysicalLength length = 0; + ioPLBlock *ioplList; + UInt ind, count; + vm_tag_t prior; - assert(offset <= _length); + count = getNumIOPL(memoryEntries, dataP); + if (!count) return; + ioplList = getIOPLList(dataP); - if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL) - return super::getSourceSegment( offset, lengthOfSegment ); + if (VM_KERN_MEMORY_NONE == tag) tag = dataP->fAllocTag; + assert(VM_KERN_MEMORY_NONE != tag); - if ( offset < _length ) // (within bounds?) + for (ind = 0; ind < count; ind++) { - unsigned rangesIndex = 0; - - for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ ) - { - offset -= _ranges.v[rangesIndex].length; // (make offset relative) - } - - address = _ranges.v[rangesIndex].address + offset; - length = _ranges.v[rangesIndex].length - offset; - - for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) + if (!ioplList[ind].fIOPL) continue; + prior = iopl_set_tag(ioplList[ind].fIOPL, tag); + if (VM_KERN_MEMORY_NONE == dataP->fAllocTag) dataP->fAllocTag = prior; +#if 0 + if (tag != prior) { - if ( address + length != _ranges.v[rangesIndex].address ) break; - - length += _ranges.v[rangesIndex].length; // (coalesce ranges) + char name[2][48]; + vm_tag_get_kext(prior, &name[0][0], sizeof(name[0])); + vm_tag_get_kext(tag, &name[1][0], sizeof(name[1])); + IOLog("switched %48s to %48s\n", name[0], name[1]); } - - assert(address); - if ( address == 0 ) length = 0; +#endif } - - if ( lengthOfSegment ) *lengthOfSegment = length; - - return address; } +#endif /* IOTRACKING */ -/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ -/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, -/* DEPRECATED */ IOByteCount * lengthOfSegment) -/* DEPRECATED */ { - if (_task == kernel_task) - return (void *) getSourceSegment(offset, lengthOfSegment); - else - panic("IOGMD::getVirtualSegment deprecated"); - - return 0; -/* DEPRECATED */ } -/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) { - IOReturn error = kIOReturnNoMemory; + IOOptionBits type = _flags & kIOMemoryTypeMask; + IOReturn error = kIOReturnSuccess; ioGMDData *dataP; - ppnum_t mapBase = 0; - IOMapper *mapper; + upl_page_info_array_t pageInfo; + ppnum_t mapBase; - assert(!_wireCount); + assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); - dataP = getDataP(_memoryEntries); - mapper = dataP->fMapper; - if (mapper && _pages) - mapBase = mapper->iovmAlloc(_pages); - - // Note that appendBytes(NULL) zeros the data up to the - // desired length. - _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); - dataP = 0; // May no longer be valid so lets not get tempted. + if ((kIODirectionOutIn & forDirection) == kIODirectionNone) + forDirection = (IODirection) (forDirection | getDirection()); - if (forDirection == kIODirectionNone) - forDirection = _direction; - - int uplFlags; // This Mem Desc's default flags for upl creation - switch (forDirection) + upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation + switch (kIODirectionOutIn & forDirection) { case kIODirectionOut: // Pages do not need to be marked as dirty on commit uplFlags = UPL_COPYOUT_FROM; - _flags |= kIOMemoryPreparedReadOnly; break; case kIODirectionIn: @@ -1074,130 +2881,361 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM break; } - uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + dataP = getDataP(_memoryEntries); - // - // Check user read/write access to the data buffer. - // - unsigned int pageIndex = 0; - IOByteCount mdOffset = 0; - vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) - curMap = 0; + if (kIODirectionDMACommand & forDirection) assert(_wireCount); + + if (_wireCount) + { + if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) + { + OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); + error = kIOReturnNotWritable; + } + } else - { curMap = get_task_map(_task); } + { + IOMapper *mapper; + mapper = dataP->fMapper; + dataP->fMappedBase = 0; - for (UInt range = 0; range < _rangesCount; range++) { - ioPLBlock iopl; - IOVirtualRange curRange = _ranges.v[range]; - vm_address_t startPage; - IOByteCount numBytes; - - startPage = trunc_page_32(curRange.address); - iopl.fPageOffset = (short) curRange.address & PAGE_MASK; - if (mapper) - iopl.fMappedBase = mapBase + pageIndex; - else - iopl.fMappedBase = 0; - numBytes = iopl.fPageOffset + curRange.length; + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + uplFlags |= UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map)); - while (numBytes) { - dataP = getDataP(_memoryEntries); - vm_map_t theMap = - (curMap)? curMap - : IOPageableMapForAddress(startPage); - upl_page_info_array_t pageInfo = getPageList(dataP); - int ioplFlags = uplFlags; - upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - - vm_size_t ioplSize = round_page_32(numBytes); - unsigned int numPageInfo = atop_32(ioplSize); - error = vm_map_get_upl(theMap, - startPage, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags, - false); - assert(ioplSize); - if (error != KERN_SUCCESS) - goto abortExit; - - error = kIOReturnNoMemory; - - if (baseInfo->device) { - numPageInfo = 1; - iopl.fFlags = kIOPLOnDevice; - // Don't translate device memory at all - if (mapper && mapBase) { - mapper->iovmFree(mapBase, _pages); - mapBase = 0; - iopl.fMappedBase = 0; - } - } - else { - iopl.fFlags = 0; - if (mapper) - mapper->iovmInsert(mapBase, pageIndex, - baseInfo, numPageInfo); - } + if (kIODirectionPrepareToPhys32 & forDirection) + { + if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; + if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; + } + if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; + if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; + if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; + + mapBase = 0; + + // Note that appendBytes(NULL) zeros the data up to the desired length + // and the length parameter is an unsigned int + size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); + if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); + if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); + dataP = 0; + + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; + else curMap = get_task_map(_task); + + // Iterate over the vector of virtual ranges + Ranges vec = _ranges; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + ppnum_t highestPage = 0; + + IOMemoryEntry * memRefEntry = 0; + if (_memRef) memRefEntry = &_memRef->entries[0]; + + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + mach_vm_address_t startPage; + mach_vm_size_t numBytes; + ppnum_t highPage = 0; + + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + iopl.fPageOffset = startPage & PAGE_MASK; + numBytes += iopl.fPageOffset; + startPage = trunc_page_64(startPage); + + if (mapper) + iopl.fMappedPage = mapBase + pageIndex; + else + iopl.fMappedPage = 0; + + // Iterate over the current range, creating UPLs + while (numBytes) { + vm_address_t kernelStart = (vm_address_t) startPage; + vm_map_t theMap; + if (curMap) theMap = curMap; + else if (_memRef) + { + theMap = NULL; + } + else + { + assert(_task == kernel_task); + theMap = IOPageableMapForAddress(kernelStart); + } + + // ioplFlags is an in/out parameter + upl_control_flags_t ioplFlags = uplFlags; + dataP = getDataP(_memoryEntries); + pageInfo = getPageList(dataP); + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + mach_vm_size_t _ioplSize = round_page(numBytes); + upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; + unsigned int numPageInfo = atop_32(ioplSize); + + if ((theMap == kernel_map) + && (kernelStart >= io_kernel_static_start) + && (kernelStart < io_kernel_static_end)) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &highPage); + } + else if (_memRef) { + memory_object_offset_t entryOffset; + + entryOffset = mdOffset; + entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); + if (entryOffset >= memRefEntry->size) { + memRefEntry++; + if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); + entryOffset = 0; + } + if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); + error = memory_object_iopl_request(memRefEntry->entry, + entryOffset, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + } + else { + assert(theMap); + error = vm_map_create_upl(theMap, + startPage, + (upl_size_t*)&ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags); + } - iopl.fIOMDOffset = mdOffset; - iopl.fPageInfo = pageIndex; + if (error != KERN_SUCCESS) goto abortExit; - if (_flags & kIOMemoryAutoPrepare) - { - kernel_upl_commit(iopl.fIOPL, 0, 0); - iopl.fIOPL = 0; - } + assert(ioplSize); - if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { - // Clean up partial created and unsaved iopl - if (iopl.fIOPL) - kernel_upl_abort(iopl.fIOPL, 0); - goto abortExit; - } + if (iopl.fIOPL) + highPage = upl_get_highest_page(iopl.fIOPL); + if (highPage > highestPage) + highestPage = highPage; - // Check for a multiple iopl's in one virtual range - pageIndex += numPageInfo; - mdOffset -= iopl.fPageOffset; - if (ioplSize < numBytes) { - numBytes -= ioplSize; - startPage += ioplSize; - mdOffset += ioplSize; - iopl.fPageOffset = 0; - if (mapper) - iopl.fMappedBase = mapBase + pageIndex; - } - else { - mdOffset += numBytes; - break; + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + } + else { + iopl.fFlags = 0; + } + + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true; + + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) { + upl_abort(iopl.fIOPL, 0); + upl_deallocate(iopl.fIOPL); + } + goto abortExit; + } + dataP = 0; + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) iopl.fMappedPage = mapBase + pageIndex; + } + else { + mdOffset += numBytes; + break; + } } } + + _highestPage = highestPage; + + if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; } - return kIOReturnSuccess; +#if IOTRACKING + if (kIOReturnSuccess == error) + { + vm_tag_t tag; + + dataP = getDataP(_memoryEntries); + if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift; + else tag = IOMemoryTag(kernel_map); + + if (!_wireCount) vm_tag_set_init(&dataP->fWireTags, kMaxWireTags); + vm_tag_set_enter(&dataP->fWireTags, kMaxWireTags, tag); + + IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag); + if (!_wireCount) + { + //if (!(_flags & kIOMemoryAutoPrepare)) + IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false); + } + } +#endif /* IOTRACKING */ + + return (error); abortExit: { dataP = getDataP(_memoryEntries); - UInt done = getNumIOPL(dataP, _memoryEntries->getLength()); + UInt done = getNumIOPL(_memoryEntries, dataP); ioPLBlock *ioplList = getIOPLList(dataP); for (UInt range = 0; range < done; range++) { - if (ioplList[range].fIOPL) - kernel_upl_abort(ioplList[range].fIOPL, 0); + if (ioplList[range].fIOPL) { + upl_abort(ioplList[range].fIOPL, 0); + upl_deallocate(ioplList[range].fIOPL); + } } - - if (mapper && mapBase) - mapper->iovmFree(mapBase, _pages); + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() } + if (error == KERN_FAILURE) + error = kIOReturnCannotWire; + else if (error == KERN_MEMORY_ERROR) + error = kIOReturnNoResources; + return error; } +bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) +{ + ioGMDData * dataP; + unsigned dataSize = size; + + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) + return false; + } + else if (!_memoryEntries->initWithCapacity(dataSize)) + return false; + + _memoryEntries->appendBytes(0, computeDataSize(0, 0)); + dataP = getDataP(_memoryEntries); + + if (mapper == kIOMapperWaitSystem) { + IOMapper::checkForSystemMapper(); + mapper = IOMapper::gSystem; + } + dataP->fMapper = mapper; + dataP->fPageCnt = 0; + dataP->fMappedBase = 0; + dataP->fDMAMapNumAddressBits = 64; + dataP->fDMAMapAlignment = 0; + dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fDiscontig = false; + dataP->fCompletionError = false; + + return (true); +} + +IOReturn IOMemoryDescriptor::dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength) +{ + IOReturn ret; + uint32_t mapOptions; + + mapOptions = 0; + mapOptions |= kIODMAMapReadAccess; + if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + + ret = mapper->iovmMapMemory(this, offset, length, mapOptions, + mapSpec, command, NULL, mapAddress, mapLength); + + return (ret); +} + +IOReturn IOGeneralMemoryDescriptor::dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength) +{ + IOReturn err = kIOReturnSuccess; + ioGMDData * dataP; + IOOptionBits type = _flags & kIOMemoryTypeMask; + + *mapAddress = 0; + if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); + + if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) + || offset || (length != _length)) + { + err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); + } + else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) + { + const ioPLBlock * ioplList = getIOPLList(dataP); + upl_page_info_t * pageList; + uint32_t mapOptions = 0; + + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplList->fFlags & kIOPLExternUPL) + { + pageList = (upl_page_info_t *) ioplList->fPageInfo; + mapOptions |= kIODMAMapPagingPath; + } + else pageList = getPageList(dataP); + + if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) + { + mapOptions |= kIODMAMapPageListFullyOccupied; + } + + mapOptions |= kIODMAMapReadAccess; + if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + + // Check for direct device non-paged memory + if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; + + IODMAMapPageList dmaPageList = + { + .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), + .pageListCount = _pages, + .pageList = &pageList[0] + }; + err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, + command, &dmaPageList, mapAddress, mapLength); + } + + return (err); +} + /* * prepare * @@ -1207,19 +3245,42 @@ abortExit: * the memory after the I/O transfer finishes. This method needn't * called for non-pageable memory. */ + IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { - IOReturn error = kIOReturnSuccess; + IOReturn error = kIOReturnSuccess; + IOOptionBits type = _flags & kIOMemoryTypeMask; + + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + return kIOReturnSuccess; + + if (_prepareLock) IOLockLock(_prepareLock); - if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { + if (kIODirectionDMACommand & forDirection) + { +#if IOMD_DEBUG_DMAACTIVE + OSIncrementAtomic(&__iomd_reservedA); +#endif /* IOMD_DEBUG_DMAACTIVE */ + } + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { error = wireVirtual(forDirection); - if (error) - return error; } - _wireCount++; + if ((kIOReturnSuccess == error) && !(kIODirectionDMACommand & forDirection)) + { + if (1 == ++_wireCount) + { + if (kIOMemoryClearEncrypt & _flags) + { + performOperation(kIOMemoryClearEncrypted, 0, _length); + } + } + } + + if (_prepareLock) IOLockUnlock(_prepareLock); - return kIOReturnSuccess; + return error; } /* @@ -1230,883 +3291,845 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) * issued; the prepare() and complete() must occur in pairs, before * before and after an I/O transfer involving pageable memory. */ - -IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) -{ - assert(_wireCount); - - if (!_wireCount) - return kIOReturnSuccess; - - _wireCount--; - if (!_wireCount) { - if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { - /* kIOMemoryTypePhysical */ - // DO NOTHING - } - else { - ioGMDData * dataP = getDataP(_memoryEntries); - ioPLBlock *ioplList = getIOPLList(dataP); - UInt count = getNumIOPL(dataP, _memoryEntries->getLength()); - - if (dataP->fMapper && _pages && ioplList[0].fMappedBase) - dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); - // Only complete iopls that we created which are for TypeVirtual - if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { - for (UInt ind = 0; ind < count; ind++) - if (ioplList[ind].fIOPL) - kernel_upl_commit(ioplList[ind].fIOPL, 0, 0); - } - - (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() - } - } - return kIOReturnSuccess; -} - -IOReturn IOGeneralMemoryDescriptor::doMap( - vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset, - IOByteCount length ) +IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) { - kern_return_t kr; - ipc_port_t sharedMem = (ipc_port_t) _memEntry; - - // mapping source == dest? (could be much better) - if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) - && (1 == _rangesCount) && (0 == sourceOffset) - && (length <= _ranges.v[0].length) ) { - *atAddress = _ranges.v[0].address; - return( kIOReturnSuccess ); - } - - if( 0 == sharedMem) { - - vm_size_t size = _pages << PAGE_SHIFT; - - if( _task) { -#ifndef i386 - vm_size_t actualSize = size; - kr = mach_make_memory_entry( get_task_map(_task), - &actualSize, _ranges.v[0].address, - VM_PROT_READ | VM_PROT_WRITE, &sharedMem, - NULL ); - - if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { -#if IOASSERT - IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n", - _ranges.v[0].address, (UInt32)actualSize, size); -#endif - kr = kIOReturnVMError; - ipc_port_release_send( sharedMem ); - } - - if( KERN_SUCCESS != kr) -#endif /* i386 */ - sharedMem = MACH_PORT_NULL; - - } else do { - - memory_object_t pager; - unsigned int flags = 0; - addr64_t pa; - IOPhysicalLength segLen; - - pa = getPhysicalSegment64( sourceOffset, &segLen ); + IOOptionBits type = _flags & kIOMemoryTypeMask; + ioGMDData * dataP; - if( !reserved) { - reserved = IONew( ExpansionData, 1 ); - if( !reserved) - continue; - } - reserved->pagerContig = (1 == _rangesCount); - reserved->memory = this; - - /*What cache mode do we need*/ - switch(options & kIOMapCacheMask ) { + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) + return kIOReturnSuccess; - case kIOMapDefaultCache: - default: - flags = IODefaultCacheBits(pa); - break; - - case kIOMapInhibitCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; - - case kIOMapWriteThruCache: - flags = DEVICE_PAGER_WRITE_THROUGH | - DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + if (_prepareLock) IOLockLock(_prepareLock); + do + { + assert(_wireCount); + if (!_wireCount) break; + dataP = getDataP(_memoryEntries); + if (!dataP) break; - case kIOMapCopybackCache: - flags = DEVICE_PAGER_COHERENT; - break; +#if IOMD_DEBUG_DMAACTIVE + if (kIODirectionDMACommand & forDirection) + { + if (__iomd_reservedA) OSDecrementAtomic(&__iomd_reservedA); + else panic("kIOMDSetDMAInactive"); + } +#endif /* IOMD_DEBUG_DMAACTIVE */ +#if IOTRACKING + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + vm_tag_t tag; - case kIOMapWriteCombineCache: - flags = DEVICE_PAGER_CACHE_INHIB | - DEVICE_PAGER_COHERENT; - break; - } + if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift; + else tag = IOMemoryTag(kernel_map); + vm_tag_set_remove(&dataP->fWireTags, kMaxWireTags, tag, &tag); + IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag); + } + if (kIODirectionDMACommand & forDirection) break; +#endif /* IOTRACKING */ - flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; + if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true; - pager = device_pager_setup( (memory_object_t) 0, (int) reserved, - size, flags); - assert( pager ); + if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) + { + performOperation(kIOMemorySetEncrypted, 0, _length); + } - if( pager) { - kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, - size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); + _wireCount--; + if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) + { + ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, count = getNumIOPL(_memoryEntries, dataP); - assert( KERN_SUCCESS == kr ); - if( KERN_SUCCESS != kr) { - device_pager_deallocate( pager ); - pager = MACH_PORT_NULL; - sharedMem = MACH_PORT_NULL; + if (_wireCount) + { + // kIODirectionCompleteWithDataValid & forDirection + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) + { + for (ind = 0; ind < count; ind++) + { + if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL); + } } } - if( pager && sharedMem) - reserved->devicePager = pager; - else { - IODelete( reserved, ExpansionData, 1 ); - reserved = 0; - } + else + { +#if IOMD_DEBUG_DMAACTIVE + if (__iomd_reservedA) panic("complete() while dma active"); +#endif /* IOMD_DEBUG_DMAACTIVE */ - } while( false ); + if (dataP->fMappedBase) { + dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBase = 0; + } + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { +#if IOTRACKING + //if (!(_flags & kIOMemoryAutoPrepare)) + { + IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); + } +#endif /* IOTRACKING */ + for (ind = 0; ind < count; ind++) + if (ioplList[ind].fIOPL) { + if (dataP->fCompletionError) + upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); + else + upl_commit(ioplList[ind].fIOPL, 0, 0); + upl_deallocate(ioplList[ind].fIOPL); + } + } else if (kIOMemoryTypeUPL == type) { + upl_set_referenced(ioplList[0].fIOPL, false); + } - _memEntry = (void *) sharedMem; + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() + + dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fAllocTag = VM_KERN_MEMORY_NONE; + } + } } + while (false); -#ifndef i386 - if( 0 == sharedMem) - kr = kIOReturnVMError; - else -#endif - kr = super::doMap( addressMap, atAddress, - options, sourceOffset, length ); + if (_prepareLock) IOLockUnlock(_prepareLock); - return( kr ); + return kIOReturnSuccess; } -IOReturn IOGeneralMemoryDescriptor::doUnmap( - vm_map_t addressMap, - IOVirtualAddress logical, - IOByteCount length ) +IOReturn IOGeneralMemoryDescriptor::doMap( + vm_map_t __addressMap, + IOVirtualAddress * __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) { - // could be much better - if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount) - && (logical == _ranges.v[0].address) - && (length <= _ranges.v[0].length) ) - return( kIOReturnSuccess ); - - return( super::doUnmap( addressMap, logical, length )); -} +#ifndef __LP64__ + if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); +#endif /* !__LP64__ */ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + kern_return_t err; -extern "C" { -// osfmk/device/iokit_rpc.c -extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa, - vm_size_t length, unsigned int mapFlags); -extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); -}; + IOMemoryMap * mapping = (IOMemoryMap *) *__address; + mach_vm_size_t offset = mapping->fOffset + __offset; + mach_vm_size_t length = mapping->fLength; -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + IOOptionBits type = _flags & kIOMemoryTypeMask; + Ranges vec = _ranges; -OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject ) + mach_vm_address_t range0Addr = 0; + mach_vm_size_t range0Len = 0; -/* inline function implementation */ -IOPhysicalAddress IOMemoryMap::getPhysicalAddress() - { return( getPhysicalSegment( 0, 0 )); } + if ((offset >= _length) || ((offset + length) > _length)) + return( kIOReturnBadArgument ); -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + if (vec.v) + getAddrLenForInd(range0Addr, range0Len, type, vec, 0); -class _IOMemoryMap : public IOMemoryMap -{ - OSDeclareDefaultStructors(_IOMemoryMap) - - IOMemoryDescriptor * memory; - IOMemoryMap * superMap; - IOByteCount offset; - IOByteCount length; - IOVirtualAddress logical; - task_t addressTask; - vm_map_t addressMap; - IOOptionBits options; - -protected: - virtual void taggedRelease(const void *tag = 0) const; - virtual void free(); - -public: - - // IOMemoryMap methods - virtual IOVirtualAddress getVirtualAddress(); - virtual IOByteCount getLength(); - virtual task_t getAddressTask(); - virtual IOMemoryDescriptor * getMemoryDescriptor(); - virtual IOOptionBits getMapOptions(); - - virtual IOReturn unmap(); - virtual void taskDied(); - - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); - - // for IOMemoryDescriptor use - _IOMemoryMap * copyCompatible( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ); - - bool initCompatible( - IOMemoryDescriptor * memory, - IOMemoryMap * superMap, - IOByteCount offset, - IOByteCount length ); - - bool initWithDescriptor( - IOMemoryDescriptor * memory, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ); + // mapping source == dest? (could be much better) + if (_task + && (mapping->fAddressTask == _task) + && (mapping->fAddressMap == get_task_map(_task)) + && (options & kIOMapAnywhere) + && (1 == _rangesCount) + && (0 == offset) + && range0Addr + && (length <= range0Len)) + { + mapping->fAddress = range0Addr; + mapping->fOptions |= kIOMapStatic; - IOReturn redirect( - task_t intoTask, bool redirect ); -}; + return( kIOReturnSuccess ); + } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + if (!_memRef) + { + IOOptionBits createOptions = 0; + if (!(kIOMapReadOnly & options)) + { + createOptions |= kIOMemoryReferenceWrite; +#if DEVELOPMENT || DEBUG + if (kIODirectionOut == (kIODirectionOutIn & _flags)) + { + OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); + } +#endif + } + err = memoryReferenceCreate(createOptions, &_memRef); + if (kIOReturnSuccess != err) return (err); + } -#undef super -#define super IOMemoryMap + memory_object_t pager; + pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); -OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) + // count)) + { + err = kIOReturnNotReadable; + break; + } -bool _IOMemoryMap::initCompatible( - IOMemoryDescriptor * _memory, - IOMemoryMap * _superMap, - IOByteCount _offset, - IOByteCount _length ) -{ + size = round_page(mapping->fLength); + flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS + | UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map)); - if( !super::init()) - return( false); + if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, + NULL, NULL, + &flags)) + redirUPL2 = NULL; - if( (_offset + _length) > _superMap->getLength()) - return( false); + for (lock_count = 0; + IORecursiveLockHaveLock(gIOMemoryLock); + lock_count++) { + UNLOCK; + } + err = upl_transpose(redirUPL2, mapping->fRedirUPL); + for (; + lock_count; + lock_count--) { + LOCK; + } - _memory->retain(); - memory = _memory; - _superMap->retain(); - superMap = _superMap; + if (kIOReturnSuccess != err) + { + IOLog("upl_transpose(%x)\n", err); + err = kIOReturnSuccess; + } - offset = _offset; - if( _length) - length = _length; + if (redirUPL2) + { + upl_commit(redirUPL2, NULL, 0); + upl_deallocate(redirUPL2); + redirUPL2 = 0; + } + { + // swap the memEntries since they now refer to different vm_objects + IOMemoryReference * me = _memRef; + _memRef = mapping->fMemory->_memRef; + mapping->fMemory->_memRef = me; + } + if (pager) + err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); + } + while (false); + } + // upl_transpose> // else - length = _memory->getLength(); + { + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); +#if IOTRACKING + if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) + { + // only dram maps in the default on developement case + IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + } +#endif /* IOTRACKING */ + if ((err == KERN_SUCCESS) && pager) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); - options = superMap->getMapOptions(); - logical = superMap->getVirtualAddress() + offset; + if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); + else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) + { + mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + } + } + } - return( true ); + return (err); } -bool _IOMemoryMap::initWithDescriptor( - IOMemoryDescriptor * _memory, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits _options, - IOByteCount _offset, - IOByteCount _length ) +#if IOTRACKING +IOReturn +IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, + mach_vm_address_t * address, mach_vm_size_t * size) { - bool ok; - - if( (!_memory) || (!intoTask) || !super::init()) - return( false); - - if( (_offset + _length) > _memory->getLength()) - return( false); - - addressMap = get_task_map(intoTask); - if( !addressMap) - return( false); - vm_map_reference(addressMap); +#define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) - _memory->retain(); - memory = _memory; + IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); - offset = _offset; - if( _length) - length = _length; - else - length = _memory->getLength(); + if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady); - addressTask = intoTask; - logical = toAddress; - options = _options; + *task = map->fAddressTask; + *address = map->fAddress; + *size = map->fLength; - if( options & kIOMapStatic) - ok = true; - else - ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical, - options, offset, length )); - if( !ok) { - logical = 0; - memory->release(); - memory = 0; - vm_map_deallocate(addressMap); - addressMap = 0; - } - return( ok ); + return (kIOReturnSuccess); } +#endif /* IOTRACKING */ -struct IOMemoryDescriptorMapAllocRef -{ - ipc_port_t sharedMem; - vm_size_t size; - vm_offset_t mapped; - IOByteCount sourceOffset; - IOOptionBits options; -}; - -static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) +IOReturn IOGeneralMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress __address, + IOByteCount __length ) { - IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; - IOReturn err; - - do { - if( ref->sharedMem) { - vm_prot_t prot = VM_PROT_READ - | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); - - // set memory entry cache - vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; - switch (ref->options & kIOMapCacheMask) - { - case kIOMapInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); - break; - - case kIOMapWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); - break; - - case kIOMapWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); - break; - - case kIOMapCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); - break; - - case kIOMapDefaultCache: - default: - SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); - break; - } - - vm_size_t unused = 0; - - err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, - memEntryCacheMode, NULL, ref->sharedMem ); - if (KERN_SUCCESS != err) - IOLog("MAP_MEM_ONLY failed %d\n", err); - - err = vm_map( map, - &ref->mapped, - ref->size, 0 /* mask */, - (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT), - ref->sharedMem, ref->sourceOffset, - false, // copy - prot, // cur - prot, // max - VM_INHERIT_NONE); - - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - - } else { - - err = vm_allocate( map, &ref->mapped, ref->size, - ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) - | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); - - if( KERN_SUCCESS != err) { - ref->mapped = 0; - continue; - } - - // we have to make sure that these guys don't get copied if we fork. - err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); - assert( KERN_SUCCESS == err ); - } - - } while( false ); - - return( err ); + return (super::doUnmap(addressMap, __address, __length)); } +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOReturn IOMemoryDescriptor::doMap( - vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset, - IOByteCount length ) -{ - IOReturn err = kIOReturnSuccess; - memory_object_t pager; - vm_address_t logical; - IOByteCount pageOffset; - IOPhysicalAddress sourceAddr; - IOMemoryDescriptorMapAllocRef ref; +#undef super +#define super OSObject - ref.sharedMem = (ipc_port_t) _memEntry; - ref.sourceOffset = sourceOffset; - ref.options = options; +OSDefineMetaClassAndStructors( IOMemoryMap, OSObject ) - do { +OSMetaClassDefineReservedUnused(IOMemoryMap, 0); +OSMetaClassDefineReservedUnused(IOMemoryMap, 1); +OSMetaClassDefineReservedUnused(IOMemoryMap, 2); +OSMetaClassDefineReservedUnused(IOMemoryMap, 3); +OSMetaClassDefineReservedUnused(IOMemoryMap, 4); +OSMetaClassDefineReservedUnused(IOMemoryMap, 5); +OSMetaClassDefineReservedUnused(IOMemoryMap, 6); +OSMetaClassDefineReservedUnused(IOMemoryMap, 7); - if( 0 == length) - length = getLength(); +/* ex-inline function implementation */ +IOPhysicalAddress IOMemoryMap::getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); } - sourceAddr = getSourceSegment( sourceOffset, NULL ); - assert( sourceAddr ); - pageOffset = sourceAddr - trunc_page_32( sourceAddr ); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - ref.size = round_page_32( length + pageOffset ); +bool IOMemoryMap::init( + task_t intoTask, + mach_vm_address_t toAddress, + IOOptionBits _options, + mach_vm_size_t _offset, + mach_vm_size_t _length ) +{ + if (!intoTask) + return( false); - logical = *atAddress; - if( options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else { - ref.mapped = trunc_page_32( logical ); - if( (logical - ref.mapped) != pageOffset) { - err = kIOReturnVMError; - continue; - } - } + if (!super::init()) + return(false); - if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) - err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); - else - err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); + fAddressMap = get_task_map(intoTask); + if (!fAddressMap) + return(false); + vm_map_reference(fAddressMap); - if( err != KERN_SUCCESS) - continue; + fAddressTask = intoTask; + fOptions = _options; + fLength = _length; + fOffset = _offset; + fAddress = toAddress; - if( reserved) - pager = (memory_object_t) reserved->devicePager; - else - pager = MACH_PORT_NULL; + return (true); +} - if( !ref.sharedMem || pager ) - err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options ); +bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) +{ + if (!_memory) + return(false); - } while( false ); + if (!fSuperMap) + { + if( (_offset + fLength) > _memory->getLength()) + return( false); + fOffset = _offset; + } - if( err != KERN_SUCCESS) { - if( ref.mapped) - doUnmap( addressMap, ref.mapped, ref.size ); - *atAddress = NULL; - } else - *atAddress = ref.mapped + pageOffset; + _memory->retain(); + if (fMemory) + { + if (fMemory != _memory) + fMemory->removeMapping(this); + fMemory->release(); + } + fMemory = _memory; - return( err ); + return( true ); } -enum { - kIOMemoryRedirected = 0x00010000 -}; +IOReturn IOMemoryDescriptor::doMap( + vm_map_t __addressMap, + IOVirtualAddress * __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) +{ + return (kIOReturnUnsupported); +} IOReturn IOMemoryDescriptor::handleFault( + void * _pager, + mach_vm_size_t sourceOffset, + mach_vm_size_t length) +{ + if( kIOMemoryRedirected & _flags) + { +#if DEBUG + IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); +#endif + do { + SLEEP; + } while( kIOMemoryRedirected & _flags ); + } + return (kIOReturnSuccess); +} + +IOReturn IOMemoryDescriptor::populateDevicePager( void * _pager, vm_map_t addressMap, - IOVirtualAddress address, - IOByteCount sourceOffset, - IOByteCount length, + mach_vm_address_t address, + mach_vm_size_t sourceOffset, + mach_vm_size_t length, IOOptionBits options ) { IOReturn err = kIOReturnSuccess; memory_object_t pager = (memory_object_t) _pager; - vm_size_t size; - vm_size_t bytes; - vm_size_t page; - IOByteCount pageOffset; - IOPhysicalLength segLen; + mach_vm_size_t size; + mach_vm_size_t bytes; + mach_vm_size_t page; + mach_vm_size_t pageOffset; + mach_vm_size_t pagerOffset; + IOPhysicalLength segLen, chunk; addr64_t physAddr; + IOOptionBits type; - if( !addressMap) { - - if( kIOMemoryRedirected & _flags) { -#ifdef DEBUG - IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset); -#endif - do { - SLEEP; - } while( kIOMemoryRedirected & _flags ); - } + type = _flags & kIOMemoryTypeMask; - return( kIOReturnSuccess ); + if (reserved->dp.pagerContig) + { + sourceOffset = 0; + pagerOffset = 0; } - physAddr = getPhysicalSegment64( sourceOffset, &segLen ); + physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); assert( physAddr ); pageOffset = physAddr - trunc_page_64( physAddr ); + pagerOffset = sourceOffset; size = length + pageOffset; physAddr -= pageOffset; segLen += pageOffset; bytes = size; - do { + do + { // in the middle of the loop only map whole pages - if( segLen >= bytes) - segLen = bytes; - else if( segLen != trunc_page_32( segLen)) - err = kIOReturnVMError; - if( physAddr != trunc_page_64( physAddr)) - err = kIOReturnBadArgument; - -#ifdef DEBUG - if( kIOLogMapping & gIOKitDebug) - IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n", - addressMap, address + pageOffset, physAddr + pageOffset, - segLen - pageOffset); -#endif - - + if( segLen >= bytes) segLen = bytes; + else if (segLen != trunc_page(segLen)) err = kIOReturnVMError; + if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument; + if (kIOReturnSuccess != err) break; +#if DEBUG || DEVELOPMENT + if ((kIOMemoryTypeUPL != type) + && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) + { + OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); + } +#endif /* DEBUG || DEVELOPMENT */ -#ifdef i386 - /* i386 doesn't support faulting on device memory yet */ - if( addressMap && (kIOReturnSuccess == err)) - err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options ); - assert( KERN_SUCCESS == err ); - if( err) - break; -#endif - - if( pager) { - if( reserved && reserved->pagerContig) { - IOPhysicalLength allLen; - addr64_t allPhys; - - allPhys = getPhysicalSegment64( 0, &allLen ); - assert( allPhys ); - err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) ); + chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); + for (page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += chunk) + { + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)(atop_64(physAddr + page)), chunk); + pagerOffset += chunk; + } - } else { + assert (KERN_SUCCESS == err); + if (err) break; - for( page = 0; - (page < segLen) && (KERN_SUCCESS == err); - page += page_size) { - err = device_pager_populate_object(pager, sourceOffset + page, - (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size); - } - } - assert( KERN_SUCCESS == err ); - if( err) - break; - } -#ifndef i386 - /* *** ALERT *** */ - /* *** Temporary Workaround *** */ - - /* This call to vm_fault causes an early pmap level resolution */ - /* of the mappings created above. Need for this is in absolute */ - /* violation of the basic tenet that the pmap layer is a cache. */ - /* Further, it implies a serious I/O architectural violation on */ - /* the part of some user of the mapping. As of this writing, */ - /* the call to vm_fault is needed because the NVIDIA driver */ - /* makes a call to pmap_extract. The NVIDIA driver needs to be */ - /* fixed as soon as possible. The NVIDIA driver should not */ - /* need to query for this info as it should know from the doMap */ - /* call where the physical memory is mapped. When a query is */ - /* necessary to find a physical mapping, it should be done */ - /* through an iokit call which includes the mapped memory */ - /* handle. This is required for machine architecture independence.*/ - - if(!(kIOMemoryRedirected & _flags)) { - vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0); + // This call to vm_fault causes an early pmap level resolution + // of the mappings created above for kernel mappings, since + // faulting in later can't take place from interrupt level. + if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) + { + vm_fault(addressMap, + (vm_map_offset_t)trunc_page_64(address), + VM_PROT_READ|VM_PROT_WRITE, + FALSE, THREAD_UNINT, NULL, + (vm_map_offset_t)0); } - /* *** Temporary Workaround *** */ - /* *** ALERT *** */ -#endif sourceOffset += segLen - pageOffset; address += segLen; bytes -= segLen; pageOffset = 0; + } + while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); - } while( bytes - && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); - - if( bytes) + if (bytes) err = kIOReturnBadArgument; - return( err ); + return (err); } IOReturn IOMemoryDescriptor::doUnmap( vm_map_t addressMap, - IOVirtualAddress logical, - IOByteCount length ) + IOVirtualAddress __address, + IOByteCount __length ) { - IOReturn err; - -#ifdef DEBUG - if( kIOLogMapping & gIOKitDebug) - kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n", - addressMap, logical, length ); -#endif + IOReturn err; + IOMemoryMap * mapping; + mach_vm_address_t address; + mach_vm_size_t length; - if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) { + if (__length) panic("doUnmap"); - if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) - addressMap = IOPageableMapForAddress( logical ); + mapping = (IOMemoryMap *) __address; + addressMap = mapping->fAddressMap; + address = mapping->fAddress; + length = mapping->fLength; - err = vm_deallocate( addressMap, logical, length ); + if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS; + else + { + if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) + addressMap = IOPageableMapForAddress( address ); +#if DEBUG + if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", + addressMap, address, length ); +#endif + err = mach_vm_deallocate( addressMap, address, length ); + } - } else - err = kIOReturnSuccess; +#if IOTRACKING + IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); +#endif /* IOTRACKING */ - return( err ); + return (err); } -IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect ) +IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { - IOReturn err; - _IOMemoryMap * mapping = 0; + IOReturn err = kIOReturnSuccess; + IOMemoryMap * mapping = 0; OSIterator * iter; LOCK; + if( doRedirect) + _flags |= kIOMemoryRedirected; + else + _flags &= ~kIOMemoryRedirected; + do { if( (iter = OSCollectionIterator::withCollection( _mappings))) { - while( (mapping = (_IOMemoryMap *) iter->getNextObject())) - mapping->redirect( safeTask, redirect ); - iter->release(); - } + memory_object_t pager; + + if( reserved) + pager = (memory_object_t) reserved->dp.devicePager; + else + pager = MACH_PORT_NULL; + + while( (mapping = (IOMemoryMap *) iter->getNextObject())) + { + mapping->redirect( safeTask, doRedirect ); + if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) + { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); + } + } + + iter->release(); + } } while( false ); - if( redirect) - _flags |= kIOMemoryRedirected; - else { - _flags &= ~kIOMemoryRedirected; + if (!doRedirect) + { WAKEUP; } UNLOCK; +#ifndef __LP64__ // temporary binary compatibility IOSubMemoryDescriptor * subMem; if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) - err = subMem->redirect( safeTask, redirect ); + err = subMem->redirect( safeTask, doRedirect ); else - err = kIOReturnSuccess; + err = kIOReturnSuccess; +#endif /* !__LP64__ */ return( err ); } -IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect ) -{ - return( _parent->redirect( safeTask, redirect )); -} - -IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect ) +IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) { IOReturn err = kIOReturnSuccess; - if( superMap) { -// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect ); + if( fSuperMap) { +// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); } else { LOCK; - if( logical && addressMap - && (get_task_map( safeTask) != addressMap) - && (0 == (options & kIOMapStatic))) { - - IOUnmapPages( addressMap, logical, length ); - if( !redirect) { - err = vm_deallocate( addressMap, logical, length ); - err = memory->doMap( addressMap, &logical, - (options & ~kIOMapAnywhere) /*| kIOMapReserve*/, - offset, length ); - } else - err = kIOReturnSuccess; -#ifdef DEBUG - IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap); + + do + { + if (!fAddress) + break; + if (!fAddressMap) + break; + + if ((!safeTask || (get_task_map(safeTask) != fAddressMap)) + && (0 == (fOptions & kIOMapStatic))) + { + IOUnmapPages( fAddressMap, fAddress, fLength ); + err = kIOReturnSuccess; +#if DEBUG + IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap); #endif - } - UNLOCK; + } + else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) + { + IOOptionBits newMode; + newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); + IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode); + } + } + while (false); + UNLOCK; } + if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) + && safeTask + && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) + fMemory->redirect(safeTask, doRedirect); + return( err ); } -IOReturn _IOMemoryMap::unmap( void ) +IOReturn IOMemoryMap::unmap( void ) { IOReturn err; LOCK; - if( logical && addressMap && (0 == superMap) - && (0 == (options & kIOMapStatic))) { + if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory + && (0 == (kIOMapStatic & fOptions))) { - err = memory->doUnmap( addressMap, logical, length ); - vm_map_deallocate(addressMap); - addressMap = 0; + err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); } else err = kIOReturnSuccess; - logical = 0; + if (fAddressMap) + { + vm_map_deallocate(fAddressMap); + fAddressMap = 0; + } + + fAddress = 0; UNLOCK; return( err ); } -void _IOMemoryMap::taskDied( void ) +void IOMemoryMap::taskDied( void ) { LOCK; - if( addressMap) { - vm_map_deallocate(addressMap); - addressMap = 0; + if (fUserClientUnmap) unmap(); +#if IOTRACKING + else IOTrackingRemoveUser(gIOMapTracking, &fTracking); +#endif /* IOTRACKING */ + + if( fAddressMap) { + vm_map_deallocate(fAddressMap); + fAddressMap = 0; } - addressTask = 0; - logical = 0; + fAddressTask = 0; + fAddress = 0; UNLOCK; } +IOReturn IOMemoryMap::userClientUnmap( void ) +{ + fUserClientUnmap = true; + return (kIOReturnSuccess); +} + // Overload the release mechanism. All mappings must be a member // of a memory descriptors _mappings set. This means that we // always have 2 references on a mapping. When either of these mappings // are released we need to free ourselves. -void _IOMemoryMap::taggedRelease(const void *tag) const +void IOMemoryMap::taggedRelease(const void *tag) const { LOCK; super::taggedRelease(tag, 2); UNLOCK; } -void _IOMemoryMap::free() +void IOMemoryMap::free() { unmap(); - if( memory) { + if (fMemory) + { + LOCK; + fMemory->removeMapping(this); + UNLOCK; + fMemory->release(); + } + + if (fOwner && (fOwner != fMemory)) + { LOCK; - memory->removeMapping( this); + fOwner->removeMapping(this); UNLOCK; - memory->release(); } - if( superMap) - superMap->release(); + if (fSuperMap) + fSuperMap->release(); + + if (fRedirUPL) { + upl_commit(fRedirUPL, NULL, 0); + upl_deallocate(fRedirUPL); + } + + super::free(); +} + +IOByteCount IOMemoryMap::getLength() +{ + return( fLength ); +} + +IOVirtualAddress IOMemoryMap::getVirtualAddress() +{ +#ifndef __LP64__ + if (fSuperMap) + fSuperMap->getVirtualAddress(); + else if (fAddressMap + && vm_map_is_64bit(fAddressMap) + && (sizeof(IOVirtualAddress) < 8)) + { + OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress); + } +#endif /* !__LP64__ */ - super::free(); + return (fAddress); } -IOByteCount _IOMemoryMap::getLength() +#ifndef __LP64__ +mach_vm_address_t IOMemoryMap::getAddress() { - return( length ); + return( fAddress); } -IOVirtualAddress _IOMemoryMap::getVirtualAddress() +mach_vm_size_t IOMemoryMap::getSize() { - return( logical); + return( fLength ); } +#endif /* !__LP64__ */ + -task_t _IOMemoryMap::getAddressTask() +task_t IOMemoryMap::getAddressTask() { - if( superMap) - return( superMap->getAddressTask()); + if( fSuperMap) + return( fSuperMap->getAddressTask()); else - return( addressTask); + return( fAddressTask); } -IOOptionBits _IOMemoryMap::getMapOptions() +IOOptionBits IOMemoryMap::getMapOptions() { - return( options); + return( fOptions); } -IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() +IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() { - return( memory ); + return( fMemory ); } -_IOMemoryMap * _IOMemoryMap::copyCompatible( - IOMemoryDescriptor * owner, - task_t task, - IOVirtualAddress toAddress, - IOOptionBits _options, - IOByteCount _offset, - IOByteCount _length ) +IOMemoryMap * IOMemoryMap::copyCompatible( + IOMemoryMap * newMapping ) { - _IOMemoryMap * mapping; + task_t task = newMapping->getAddressTask(); + mach_vm_address_t toAddress = newMapping->fAddress; + IOOptionBits _options = newMapping->fOptions; + mach_vm_size_t _offset = newMapping->fOffset; + mach_vm_size_t _length = newMapping->fLength; - if( (!task) || (task != getAddressTask())) + if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) return( 0 ); - if( (options ^ _options) & kIOMapReadOnly) + if( (fOptions ^ _options) & kIOMapReadOnly) return( 0 ); if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) - && ((options ^ _options) & kIOMapCacheMask)) + && ((fOptions ^ _options) & kIOMapCacheMask)) return( 0 ); - if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) + if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) return( 0 ); - if( _offset < offset) + if( _offset < fOffset) return( 0 ); - _offset -= offset; + _offset -= fOffset; - if( (_offset + _length) > length) + if( (_offset + _length) > fLength) return( 0 ); - if( (length == _length) && (!_offset)) { - retain(); - mapping = this; + retain(); + if( (fLength == _length) && (!_offset)) + { + newMapping = this; + } + else + { + newMapping->fSuperMap = this; + newMapping->fOffset = fOffset + _offset; + newMapping->fAddress = fAddress + _offset; + } + + return( newMapping ); +} - } else { - mapping = new _IOMemoryMap; - if( mapping - && !mapping->initCompatible( owner, this, _offset, _length )) { - mapping->release(); - mapping = 0; - } +IOReturn IOMemoryMap::wireRange( + uint32_t options, + mach_vm_size_t offset, + mach_vm_size_t length) +{ + IOReturn kr; + mach_vm_address_t start = trunc_page_64(fAddress + offset); + mach_vm_address_t end = round_page_64(fAddress + offset + length); + vm_prot_t prot; + + prot = (kIODirectionOutIn & options); + if (prot) + { + prot |= VM_PROT_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map)); + kr = vm_map_wire(fAddressMap, start, end, prot, FALSE); + } + else + { + kr = vm_map_unwire(fAddressMap, start, end, FALSE); } - return( mapping ); + return (kr); } -IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, - IOPhysicalLength * length) + +IOPhysicalAddress +#ifdef __LP64__ +IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options) +#else /* !__LP64__ */ +IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) +#endif /* !__LP64__ */ { IOPhysicalAddress address; LOCK; - address = memory->getPhysicalSegment( offset + _offset, length ); +#ifdef __LP64__ + address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); +#else /* !__LP64__ */ + address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); +#endif /* !__LP64__ */ UNLOCK; return( address ); @@ -2123,13 +4146,19 @@ void IOMemoryDescriptor::initialize( void ) { if( 0 == gIOMemoryLock) gIOMemoryLock = IORecursiveLockAlloc(); + + gIOLastPage = IOGetLastPageNumber(); } void IOMemoryDescriptor::free( void ) { - if( _mappings) - _mappings->release(); + if( _mappings) _mappings->release(); + if (reserved) + { + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } super::free(); } @@ -2138,322 +4167,287 @@ IOMemoryMap * IOMemoryDescriptor::setMapping( IOVirtualAddress mapAddress, IOOptionBits options ) { - _IOMemoryMap * map; - - map = new _IOMemoryMap; - - LOCK; - - if( map - && !map->initWithDescriptor( this, intoTask, mapAddress, - options | kIOMapStatic, 0, getLength() )) { - map->release(); - map = 0; - } - - addMapping( map); - - UNLOCK; - - return( map); + return (createMappingInTask( intoTask, mapAddress, + options | kIOMapStatic, + 0, getLength() )); } IOMemoryMap * IOMemoryDescriptor::map( IOOptionBits options ) { - - return( makeMapping( this, kernel_task, 0, - options | kIOMapAnywhere, - 0, getLength() )); + return (createMappingInTask( kernel_task, 0, + options | kIOMapAnywhere, + 0, getLength() )); } -IOMemoryMap * IOMemoryDescriptor::map( - task_t intoTask, - IOVirtualAddress toAddress, +#ifndef __LP64__ +IOMemoryMap * IOMemoryDescriptor::map( + task_t intoTask, + IOVirtualAddress atAddress, IOOptionBits options, IOByteCount offset, IOByteCount length ) { - if( 0 == length) - length = getLength(); + if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) + { + OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); + return (0); + } - return( makeMapping( this, intoTask, toAddress, options, offset, length )); + return (createMappingInTask(intoTask, atAddress, + options, offset, length)); } +#endif /* !__LP64__ */ -IOMemoryMap * IOMemoryDescriptor::makeMapping( - IOMemoryDescriptor * owner, +IOMemoryMap * IOMemoryDescriptor::createMappingInTask( task_t intoTask, - IOVirtualAddress toAddress, + mach_vm_address_t atAddress, IOOptionBits options, - IOByteCount offset, - IOByteCount length ) + mach_vm_size_t offset, + mach_vm_size_t length) { - _IOMemoryMap * mapping = 0; - OSIterator * iter; - - LOCK; - - do { - // look for an existing mapping - if( (iter = OSCollectionIterator::withCollection( _mappings))) { - - while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { + IOMemoryMap * result; + IOMemoryMap * mapping; - if( (mapping = mapping->copyCompatible( - owner, intoTask, toAddress, - options | kIOMapReference, - offset, length ))) - break; - } - iter->release(); - if( mapping) - continue; - } + if (0 == length) + length = getLength(); + mapping = new IOMemoryMap; - if( mapping || (options & kIOMapReference)) - continue; + if( mapping + && !mapping->init( intoTask, atAddress, + options, offset, length )) { + mapping->release(); + mapping = 0; + } - owner = this; + if (mapping) + result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); + else + result = 0; - mapping = new _IOMemoryMap; - if( mapping - && !mapping->initWithDescriptor( owner, intoTask, toAddress, options, - offset, length )) { -#ifdef DEBUG - IOLog("Didn't make map %08lx : %08lx\n", offset, length ); +#if DEBUG + if (!result) + IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", + this, atAddress, (uint32_t) options, offset, length); #endif - mapping->release(); - mapping = 0; - } - - } while( false ); - owner->addMapping( mapping); - - UNLOCK; - - return( mapping); -} - -void IOMemoryDescriptor::addMapping( - IOMemoryMap * mapping ) -{ - if( mapping) { - if( 0 == _mappings) - _mappings = OSSet::withCapacity(1); - if( _mappings ) - _mappings->setObject( mapping ); - } + return (result); } -void IOMemoryDescriptor::removeMapping( - IOMemoryMap * mapping ) +#ifndef __LP64__ // there is only a 64 bit version for LP64 +IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + IOByteCount offset) { - if( _mappings) - _mappings->removeObject( mapping); + return (redirect(newBackingMemory, options, (mach_vm_size_t)offset)); } +#endif -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#undef super -#define super IOMemoryDescriptor - -OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, - IOByteCount offset, IOByteCount length, - IODirection direction ) +IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + mach_vm_size_t offset) { - if( !parent) - return( false); + IOReturn err = kIOReturnSuccess; + IOMemoryDescriptor * physMem = 0; - if( (offset + length) > parent->getLength()) - return( false); + LOCK; - /* - * We can check the _parent instance variable before having ever set it - * to an initial value because I/O Kit guarantees that all our instance - * variables are zeroed on an object's allocation. - */ + if (fAddress && fAddressMap) do + { + if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) + { + physMem = fMemory; + physMem->retain(); + } - if( !_parent) { - if( !super::init()) - return( false ); - } else { - /* - * An existing memory descriptor is being retargeted to - * point to somewhere else. Clean up our present state. - */ + if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) + { + upl_size_t size = round_page(fLength); + upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS + | UPL_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map)); + if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, + NULL, NULL, + &flags)) + fRedirUPL = 0; + + if (physMem) + { + IOUnmapPages( fAddressMap, fAddress, fLength ); + if ((false)) + physMem->redirect(0, true); + } + } - _parent->release(); - _parent = 0; + if (newBackingMemory) + { + if (newBackingMemory != fMemory) + { + fOffset = 0; + if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this, + options | kIOMapUnique | kIOMapReference | kIOMap64Bit, + offset, fLength)) + err = kIOReturnError; + } + if (fRedirUPL) + { + upl_commit(fRedirUPL, NULL, 0); + upl_deallocate(fRedirUPL); + fRedirUPL = 0; + } + if ((false) && physMem) + physMem->redirect(0, false); + } } + while (false); - parent->retain(); - _parent = parent; - _start = offset; - _length = length; - _direction = direction; - _tag = parent->getTag(); - - return( true ); -} - -void IOSubMemoryDescriptor::free( void ) -{ - if( _parent) - _parent->release(); - - super::free(); -} - - -IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, - IOByteCount * length ) -{ - IOPhysicalAddress address; - IOByteCount actualLength; - - assert(offset <= _length); - - if( length) - *length = 0; - - if( offset >= _length) - return( 0 ); - - address = _parent->getPhysicalSegment( offset + _start, &actualLength ); - - if( address && length) - *length = min( _length - offset, actualLength ); - - return( address ); -} - -IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, - IOByteCount * length ) -{ - IOPhysicalAddress address; - IOByteCount actualLength; - - assert(offset <= _length); - - if( length) - *length = 0; - - if( offset >= _length) - return( 0 ); - - address = _parent->getSourceSegment( offset + _start, &actualLength ); + UNLOCK; - if( address && length) - *length = min( _length - offset, actualLength ); + if (physMem) + physMem->release(); - return( address ); + return (err); } -void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +IOMemoryMap * IOMemoryDescriptor::makeMapping( + IOMemoryDescriptor * owner, + task_t __intoTask, + IOVirtualAddress __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) { - return( 0 ); -} +#ifndef __LP64__ + if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit"); +#endif /* !__LP64__ */ -IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, - void * bytes, IOByteCount length) -{ - IOByteCount byteCount; + IOMemoryDescriptor * mapDesc = 0; + IOMemoryMap * result = 0; + OSIterator * iter; - assert(offset <= _length); + IOMemoryMap * mapping = (IOMemoryMap *) __address; + mach_vm_size_t offset = mapping->fOffset + __offset; + mach_vm_size_t length = mapping->fLength; - if( offset >= _length) - return( 0 ); + mapping->fOffset = offset; LOCK; - byteCount = _parent->readBytes( _start + offset, bytes, - min(length, _length - offset) ); - UNLOCK; - - return( byteCount ); -} - -IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, - const void* bytes, IOByteCount length) -{ - IOByteCount byteCount; - assert(offset <= _length); + do + { + if (kIOMapStatic & options) + { + result = mapping; + addMapping(mapping); + mapping->setMemoryDescriptor(this, 0); + continue; + } - if( offset >= _length) - return( 0 ); + if (kIOMapUnique & options) + { + addr64_t phys; + IOByteCount physLen; - LOCK; - byteCount = _parent->writeBytes( _start + offset, bytes, - min(length, _length - offset) ); - UNLOCK; +// if (owner != this) continue; - return( byteCount ); -} + if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) + { + phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + if (!phys || (physLen < length)) + continue; + + mapDesc = IOMemoryDescriptor::withAddressRange( + phys, length, getDirection() | kIOMemoryMapperNone, NULL); + if (!mapDesc) + continue; + offset = 0; + mapping->fOffset = offset; + } + } + else + { + // look for a compatible existing mapping + if( (iter = OSCollectionIterator::withCollection(_mappings))) + { + IOMemoryMap * lookMapping; + while ((lookMapping = (IOMemoryMap *) iter->getNextObject())) + { + if ((result = lookMapping->copyCompatible(mapping))) + { + addMapping(result); + result->setMemoryDescriptor(this, offset); + break; + } + } + iter->release(); + } + if (result || (options & kIOMapReference)) + { + if (result != mapping) + { + mapping->release(); + mapping = NULL; + } + continue; + } + } -IOReturn IOSubMemoryDescriptor::prepare( - IODirection forDirection) -{ - IOReturn err; + if (!mapDesc) + { + mapDesc = this; + mapDesc->retain(); + } + IOReturn + kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 ); + if (kIOReturnSuccess == kr) + { + result = mapping; + mapDesc->addMapping(result); + result->setMemoryDescriptor(mapDesc, offset); + } + else + { + mapping->release(); + mapping = NULL; + } + } + while( false ); - LOCK; - err = _parent->prepare( forDirection); UNLOCK; - return( err ); + if (mapDesc) + mapDesc->release(); + + return (result); } -IOReturn IOSubMemoryDescriptor::complete( - IODirection forDirection) +void IOMemoryDescriptor::addMapping( + IOMemoryMap * mapping ) { - IOReturn err; - - LOCK; - err = _parent->complete( forDirection); - UNLOCK; - - return( err ); + if( mapping) + { + if( 0 == _mappings) + _mappings = OSSet::withCapacity(1); + if( _mappings ) + _mappings->setObject( mapping ); + } } -IOMemoryMap * IOSubMemoryDescriptor::makeMapping( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress toAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ) +void IOMemoryDescriptor::removeMapping( + IOMemoryMap * mapping ) { - IOMemoryMap * mapping; - - mapping = (IOMemoryMap *) _parent->makeMapping( - _parent, intoTask, - toAddress - (_start + offset), - options | kIOMapReference, - _start + offset, length ); - - if( !mapping) - mapping = (IOMemoryMap *) _parent->makeMapping( - _parent, intoTask, - toAddress, - options, _start + offset, length ); - - if( !mapping) - mapping = super::makeMapping( owner, intoTask, toAddress, options, - offset, length ); - - return( mapping ); + if( _mappings) + _mappings->removeObject( mapping); } -/* ick */ - +#ifndef __LP64__ +// obsolete initializers +// - initWithOptions is the designated initializer bool -IOSubMemoryDescriptor::initWithAddress(void * address, +IOMemoryDescriptor::initWithAddress(void * address, IOByteCount length, IODirection direction) { @@ -2461,7 +4455,7 @@ IOSubMemoryDescriptor::initWithAddress(void * address, } bool -IOSubMemoryDescriptor::initWithAddress(vm_address_t address, +IOMemoryDescriptor::initWithAddress(IOVirtualAddress address, IOByteCount length, IODirection direction, task_t task) @@ -2470,7 +4464,7 @@ IOSubMemoryDescriptor::initWithAddress(vm_address_t address, } bool -IOSubMemoryDescriptor::initWithPhysicalAddress( +IOMemoryDescriptor::initWithPhysicalAddress( IOPhysicalAddress address, IOByteCount length, IODirection direction ) @@ -2479,7 +4473,7 @@ IOSubMemoryDescriptor::initWithPhysicalAddress( } bool -IOSubMemoryDescriptor::initWithRanges( +IOMemoryDescriptor::initWithRanges( IOVirtualRange * ranges, UInt32 withCount, IODirection direction, @@ -2490,7 +4484,7 @@ IOSubMemoryDescriptor::initWithRanges( } bool -IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, +IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, IODirection direction, bool asReference) @@ -2498,24 +4492,37 @@ IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, return( false ); } +void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + return( 0 ); +} +#endif /* !__LP64__ */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { OSSymbol const *keys[2]; OSObject *values[2]; - IOVirtualRange *vcopy; + OSArray * array; + + struct SerData { + user_addr_t address; + user_size_t length; + } *vcopy; unsigned int index, nRanges; bool result; + IOOptionBits type = _flags & kIOMemoryTypeMask; + if (s == NULL) return false; - if (s->previouslySerialized(this)) return true; - // Pretend we are an array. - if (!s->addXMLStartTag(this, "array")) return false; + array = OSArray::withCapacity(4); + if (!array) return (false); nRanges = _rangesCount; - vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges); + vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); if (vcopy == 0) return false; keys[0] = OSSymbol::withCString("address"); @@ -2530,8 +4537,12 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const // while the lock is held. LOCK; if (nRanges == _rangesCount) { + Ranges vec = _ranges; for (index = 0; index < nRanges; index++) { - vcopy[index] = _ranges.v[index]; + mach_vm_address_t addr; mach_vm_size_t len; + getAddrLenForInd(addr, len, type, vec, index); + vcopy[index].address = addr; + vcopy[index].length = len; } } else { // The descriptor changed out from under us. Give up. @@ -2543,12 +4554,14 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const for (index = 0; index < nRanges; index++) { - values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8); + user_addr_t addr = vcopy[index].address; + IOByteCount len = (IOByteCount) vcopy[index].length; + values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); if (values[0] == 0) { result = false; goto bail; } - values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8); + values[1] = OSNumber::withNumber(len, sizeof(len) * 8); if (values[1] == 0) { result = false; goto bail; @@ -2558,19 +4571,18 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const result = false; goto bail; } + array->setObject(dict); + dict->release(); values[0]->release(); values[1]->release(); values[0] = values[1] = 0; - - result = dict->serialize(s); - dict->release(); - if (!result) { - goto bail; - } } - result = s->addXMLEndTag("array"); + + result = array->serialize(s); bail: + if (array) + array->release(); if (values[0]) values[0]->release(); if (values[1]) @@ -2580,67 +4592,31 @@ bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const if (keys[1]) keys[1]->release(); if (vcopy) - IOFree(vcopy, sizeof(IOVirtualRange) * nRanges); - return result; -} - -bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const -{ - if (!s) { - return (false); - } - if (s->previouslySerialized(this)) return true; - - // Pretend we are a dictionary. - // We must duplicate the functionality of OSDictionary here - // because otherwise object references will not work; - // they are based on the value of the object passed to - // previouslySerialized and addXMLStartTag. - - if (!s->addXMLStartTag(this, "dict")) return false; + IOFree(vcopy, sizeof(SerData) * nRanges); - char const *keys[3] = {"offset", "length", "parent"}; - - OSObject *values[3]; - values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8); - if (values[0] == 0) - return false; - values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8); - if (values[1] == 0) { - values[0]->release(); - return false; - } - values[2] = _parent; - - bool result = true; - for (int i=0; i<3; i++) { - if (!s->addString("") || - !s->addString(keys[i]) || - !s->addXMLEndTag("key") || - !values[i]->serialize(s)) { - result = false; - break; - } - } - values[0]->release(); - values[1]->release(); - if (!result) { - return false; - } - - return s->addXMLEndTag("dict"); + return result; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); +#ifdef __LP64__ +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); +#else /* !__LP64__ */ +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6); +OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7); +#endif /* !__LP64__ */ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); @@ -2651,5 +4627,6 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); /* ex-inline function implementation */ -IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() +IOPhysicalAddress +IOMemoryDescriptor::getPhysicalAddress() { return( getPhysicalSegment( 0, 0 )); }