+ IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
+
+ IODelete( ref, IOMemoryDescriptorReserved, 1 );
+
+ return( kIOReturnSuccess );
+}
+}; // end extern "C"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+// Note this inline function uses C++ reference arguments to return values
+// This means that pointers are not passed and NULLs don't have to be
+// checked for as a NULL reference is illegal.
+static inline void
+getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
+ UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
+{
+ assert(kIOMemoryTypeUIO == type
+ || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
+ || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
+ if (kIOMemoryTypeUIO == type) {
+ user_size_t us;
+ user_addr_t ad;
+ uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
+ }
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IOAddressRange cur = r.v64[ind];
+ addr = cur.address;
+ len = cur.length;
+ }
+#endif /* !__LP64__ */
+ else {
+ IOVirtualRange cur = r.v[ind];
+ addr = cur.address;
+ len = cur.length;
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static IOReturn
+purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ *control = VM_PURGABLE_SET_STATE;
+
+ enum { kIOMemoryPurgeableControlMask = 15 };
+
+ switch (kIOMemoryPurgeableControlMask & newState)
+ {
+ case kIOMemoryPurgeableKeepCurrent:
+ *control = VM_PURGABLE_GET_STATE;
+ break;
+
+ case kIOMemoryPurgeableNonVolatile:
+ *state = VM_PURGABLE_NONVOLATILE;
+ break;
+ case kIOMemoryPurgeableVolatile:
+ *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
+ break;
+ case kIOMemoryPurgeableEmpty:
+ *state = VM_PURGABLE_EMPTY;
+ break;
+ default:
+ err = kIOReturnBadArgument;
+ break;
+ }
+ return (err);
+}
+
+static IOReturn
+purgeableStateBits(int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ switch (VM_PURGABLE_STATE_MASK & *state)
+ {
+ case VM_PURGABLE_NONVOLATILE:
+ *state = kIOMemoryPurgeableNonVolatile;
+ break;
+ case VM_PURGABLE_VOLATILE:
+ *state = kIOMemoryPurgeableVolatile;
+ break;
+ case VM_PURGABLE_EMPTY:
+ *state = kIOMemoryPurgeableEmpty;
+ break;
+ default:
+ *state = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnNotReady;
+ break;
+ }
+ return (err);
+}
+
+
+static vm_prot_t
+vmProtForCacheMode(IOOptionBits cacheMode)
+{
+ vm_prot_t prot = 0;
+ switch (cacheMode)
+ {
+ case kIOInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, prot);
+ break;
+
+ case kIOWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, prot);
+ break;
+
+ case kIOWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, prot);
+ break;
+
+ case kIOCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
+ break;
+
+ case kIOCopybackInnerCache:
+ SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
+ break;
+
+ case kIODefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, prot);
+ break;
+ }
+
+ return (prot);
+}
+
+static unsigned int
+pagerFlagsForCacheMode(IOOptionBits cacheMode)
+{
+ unsigned int pagerFlags = 0;
+ switch (cacheMode)
+ {
+ case kIOInhibitCache:
+ pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOWriteThruCache:
+ pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
+ break;
+
+ case kIOWriteCombineCache:
+ pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIOCopybackCache:
+ pagerFlags = DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIOCopybackInnerCache:
+ pagerFlags = DEVICE_PAGER_COHERENT;
+ break;
+
+ case kIODefaultCache:
+ default:
+ pagerFlags = -1U;
+ break;
+ }
+ return (pagerFlags);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOMemoryEntry
+{
+ ipc_port_t entry;
+ int64_t offset;
+ uint64_t size;
+};
+
+struct IOMemoryReference
+{
+ volatile SInt32 refCount;
+ vm_prot_t prot;
+ uint32_t capacity;
+ uint32_t count;
+ IOMemoryEntry entries[0];
+};
+
+enum
+{
+ kIOMemoryReferenceReuse = 0x00000001,
+ kIOMemoryReferenceWrite = 0x00000002,
+};
+
+SInt32 gIOMemoryReferenceCount;
+
+IOMemoryReference *
+IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
+{
+ IOMemoryReference * ref;
+ size_t newSize, oldSize, copySize;
+
+ newSize = (sizeof(IOMemoryReference)
+ - sizeof(ref->entries)
+ + capacity * sizeof(ref->entries[0]));
+ ref = (typeof(ref)) IOMalloc(newSize);
+ if (realloc)
+ {
+ oldSize = (sizeof(IOMemoryReference)
+ - sizeof(realloc->entries)
+ + realloc->capacity * sizeof(realloc->entries[0]));
+ copySize = oldSize;
+ if (copySize > newSize) copySize = newSize;
+ if (ref) bcopy(realloc, ref, copySize);
+ IOFree(realloc, oldSize);
+ }
+ else if (ref)
+ {
+ bzero(ref, sizeof(*ref));
+ ref->refCount = 1;
+ OSIncrementAtomic(&gIOMemoryReferenceCount);
+ }
+ if (!ref) return (0);
+ ref->capacity = capacity;
+ return (ref);
+}
+
+void
+IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
+{
+ IOMemoryEntry * entries;
+ size_t size;
+
+ entries = ref->entries + ref->count;
+ while (entries > &ref->entries[0])
+ {
+ entries--;
+ ipc_port_release_send(entries->entry);
+ }
+ size = (sizeof(IOMemoryReference)
+ - sizeof(ref->entries)
+ + ref->capacity * sizeof(ref->entries[0]));
+ IOFree(ref, size);
+
+ OSDecrementAtomic(&gIOMemoryReferenceCount);
+}
+
+void
+IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
+{
+ if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
+}
+
+
+IOReturn
+IOGeneralMemoryDescriptor::memoryReferenceCreate(
+ IOOptionBits options,
+ IOMemoryReference ** reference)
+{
+ enum { kCapacity = 4, kCapacityInc = 4 };
+
+ kern_return_t err;
+ IOMemoryReference * ref;
+ IOMemoryEntry * entries;
+ IOMemoryEntry * cloneEntries;
+ vm_map_t map;
+ ipc_port_t entry, cloneEntry;
+ vm_prot_t prot;
+ memory_object_size_t actualSize;
+ uint32_t rangeIdx;
+ uint32_t count;
+ mach_vm_address_t entryAddr, endAddr, entrySize;
+ mach_vm_size_t srcAddr, srcLen;
+ mach_vm_size_t nextAddr, nextLen;
+ mach_vm_size_t offset, remain;
+ IOByteCount physLen;
+ IOOptionBits type = (_flags & kIOMemoryTypeMask);
+ IOOptionBits cacheMode;
+ unsigned int pagerFlags;
+ vm_tag_t tag;
+
+ ref = memoryReferenceAlloc(kCapacity, NULL);
+ if (!ref) return (kIOReturnNoMemory);
+
+ tag = IOMemoryTag(kernel_map);
+ entries = &ref->entries[0];
+ count = 0;
+
+ offset = 0;
+ rangeIdx = 0;
+ if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ else
+ {
+ nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
+ nextLen = physLen;
+
+ // default cache mode for physical
+ if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
+ {
+ IOOptionBits mode;
+ pagerFlags = IODefaultCacheBits(nextAddr);
+ if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
+ {
+ if (DEVICE_PAGER_GUARDED & pagerFlags)
+ mode = kIOInhibitCache;
+ else
+ mode = kIOWriteCombineCache;
+ }
+ else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
+ mode = kIOWriteThruCache;
+ else
+ mode = kIOCopybackCache;
+ _flags |= (mode << kIOMemoryBufferCacheShift);
+ }
+ }
+
+ // cache mode & vm_prot
+ prot = VM_PROT_READ;
+ cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
+ prot |= vmProtForCacheMode(cacheMode);
+ // VM system requires write access to change cache mode
+ if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
+ if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
+ if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
+
+ if ((kIOMemoryReferenceReuse & options) && _memRef)
+ {
+ cloneEntries = &_memRef->entries[0];
+ prot |= MAP_MEM_NAMED_REUSE;
+ }
+
+ if (_task)
+ {
+ // virtual ranges
+
+ if (kIOMemoryBufferPageable & _flags)
+ {
+ // IOBufferMemoryDescriptor alloc - set flags for entry + object create
+ prot |= MAP_MEM_NAMED_CREATE;
+ if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
+ prot |= VM_PROT_WRITE;
+ map = NULL;
+ }
+ else map = get_task_map(_task);
+
+ remain = _length;
+ while (remain)
+ {
+ srcAddr = nextAddr;
+ srcLen = nextLen;
+ nextAddr = 0;
+ nextLen = 0;
+ // coalesce addr range
+ for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if ((srcAddr + srcLen) != nextAddr) break;
+ srcLen += nextLen;
+ }
+ entryAddr = trunc_page_64(srcAddr);
+ endAddr = round_page_64(srcAddr + srcLen);
+ do
+ {
+ entrySize = (endAddr - entryAddr);
+ if (!entrySize) break;
+ actualSize = entrySize;
+
+ cloneEntry = MACH_PORT_NULL;
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
+ else prot &= ~MAP_MEM_NAMED_REUSE;
+ }
+
+ err = mach_make_memory_entry_64(map,
+ &actualSize, entryAddr, prot, &entry, cloneEntry);
+
+ if (KERN_SUCCESS != err) break;
+ if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
+
+ if (count >= ref->capacity)
+ {
+ ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
+ entries = &ref->entries[count];
+ }
+ entries->entry = entry;
+ entries->size = actualSize;
+ entries->offset = offset + (entryAddr - srcAddr);
+ entryAddr += actualSize;
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ if ((cloneEntries->entry == entries->entry)
+ && (cloneEntries->size == entries->size)
+ && (cloneEntries->offset == entries->offset)) cloneEntries++;
+ else prot &= ~MAP_MEM_NAMED_REUSE;
+ }
+ entries++;
+ count++;
+ }
+ while (true);
+ offset += srcLen;
+ remain -= srcLen;
+ }
+ }
+ else
+ {
+ // _task == 0, physical or kIOMemoryTypeUPL
+ memory_object_t pager;
+ vm_size_t size = ptoa_32(_pages);
+
+ if (!getKernelReserved()) panic("getKernelReserved");
+
+ reserved->dp.pagerContig = (1 == _rangesCount);
+ reserved->dp.memory = this;
+
+ pagerFlags = pagerFlagsForCacheMode(cacheMode);
+ if (-1U == pagerFlags) panic("phys is kIODefaultCache");
+ if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
+
+ pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
+ size, pagerFlags);
+ assert (pager);
+ if (!pager) err = kIOReturnVMError;
+ else
+ {
+ srcAddr = nextAddr;
+ entryAddr = trunc_page_64(srcAddr);
+ err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
+ size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
+ assert (KERN_SUCCESS == err);
+ if (KERN_SUCCESS != err) device_pager_deallocate(pager);
+ else
+ {
+ reserved->dp.devicePager = pager;
+ entries->entry = entry;
+ entries->size = size;
+ entries->offset = offset + (entryAddr - srcAddr);
+ entries++;
+ count++;
+ }
+ }
+ }
+
+ ref->count = count;
+ ref->prot = prot;
+
+ if (KERN_SUCCESS == err)
+ {
+ if (MAP_MEM_NAMED_REUSE & prot)
+ {
+ memoryReferenceFree(ref);
+ OSIncrementAtomic(&_memRef->refCount);
+ ref = _memRef;
+ }
+ }
+ else
+ {
+ memoryReferenceFree(ref);
+ ref = NULL;
+ }
+
+ *reference = ref;
+
+ return (err);
+}
+
+kern_return_t
+IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
+{
+ IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
+ IOReturn err;
+ vm_map_offset_t addr;
+
+ addr = ref->mapped;
+
+ err = vm_map_enter_mem_object(map, &addr, ref->size,
+ (vm_map_offset_t) 0,
+ (((ref->options & kIOMapAnywhere)
+ ? VM_FLAGS_ANYWHERE
+ : VM_FLAGS_FIXED)
+ | VM_MAKE_TAG(ref->tag)),
+ IPC_PORT_NULL,
+ (memory_object_offset_t) 0,
+ false, /* copy */
+ ref->prot,
+ ref->prot,
+ VM_INHERIT_NONE);
+ if (KERN_SUCCESS == err)
+ {
+ ref->mapped = (mach_vm_address_t) addr;
+ ref->map = map;
+ }
+
+ return( err );
+}