/*
- * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
* HISTORY
*
*/
-// 45678901234567890123456789012345678901234567890123456789012345678901234567890
+
+
#include <sys/cdefs.h>
#include <IOKit/assert.h>
#include <IOKit/IOMapper.h>
#include <IOKit/IOKitKeysPrivate.h>
+#ifndef __LP64__
+#include <IOKit/IOSubMemoryDescriptor.h>
+#endif /* !__LP64__ */
+
#include <IOKit/IOKitDebug.h>
+#include <libkern/OSDebug.h>
#include "IOKitKernelInternal.h"
__BEGIN_DECLS
#include <vm/pmap.h>
#include <vm/vm_pageout.h>
-#include <vm/vm_shared_memory_server.h>
#include <mach/memory_object_types.h>
#include <device/device_port.h>
-#ifndef i386
#include <mach/vm_prot.h>
+#include <mach/mach_vm.h>
#include <vm/vm_fault.h>
-struct phys_entry *pmap_find_physentry(ppnum_t pa);
-#endif
+#include <vm/vm_protos.h>
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
void ipc_port_release_send(ipc_port_t port);
memory_object_t
device_pager_setup(
memory_object_t pager,
- int device_handle,
+ uintptr_t device_handle,
vm_size_t size,
int flags);
void
#define kIOMaximumMappedIOByteCount (512*1024*1024)
-static IOMapper * gIOSystemMapper;
+static IOMapper * gIOSystemMapper = NULL;
+
static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
+ppnum_t gIOLastPage;
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
#define WAKEUP \
IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-class _IOMemoryMap : public IOMemoryMap
-{
- OSDeclareDefaultStructors(_IOMemoryMap)
-public:
- IOMemoryDescriptor * memory;
- IOMemoryMap * superMap;
- IOByteCount offset;
- IOByteCount length;
- IOVirtualAddress logical;
- task_t addressTask;
- vm_map_t addressMap;
- IOOptionBits options;
- upl_t redirUPL;
- ipc_port_t redirEntry;
- IOMemoryDescriptor * owner;
-
-protected:
- virtual void taggedRelease(const void *tag = 0) const;
- virtual void free();
-
-public:
-
- // IOMemoryMap methods
- virtual IOVirtualAddress getVirtualAddress();
- virtual IOByteCount getLength();
- virtual task_t getAddressTask();
- virtual IOMemoryDescriptor * getMemoryDescriptor();
- virtual IOOptionBits getMapOptions();
-
- virtual IOReturn unmap();
- virtual void taskDied();
-
- virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
- IOOptionBits options,
- IOByteCount offset = 0);
-
- virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
- IOByteCount * length);
-
- // for IOMemoryDescriptor use
- _IOMemoryMap * copyCompatible(
- IOMemoryDescriptor * owner,
- task_t intoTask,
- IOVirtualAddress toAddress,
- IOOptionBits options,
- IOByteCount offset,
- IOByteCount length );
-
- bool initCompatible(
- IOMemoryDescriptor * memory,
- IOMemoryMap * superMap,
- IOByteCount offset,
- IOByteCount length );
-
- bool initWithDescriptor(
- IOMemoryDescriptor * memory,
- task_t intoTask,
- IOVirtualAddress toAddress,
- IOOptionBits options,
- IOByteCount offset,
- IOByteCount length );
+#if 0
+#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
+#else
+#define DEBG(fmt, args...) {}
+#endif
- IOReturn redirect(
- task_t intoTask, bool redirect );
-};
+#define IOMD_DEBUG_DMAACTIVE 1
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
struct ioPLBlock {
upl_t fIOPL;
- vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
- vm_offset_t fPageInfo; // Pointer to page list or index into it
- ppnum_t fMappedBase; // Page number of first page in this iopl
- unsigned int fPageOffset; // Offset within first page of iopl
- unsigned int fFlags; // Flags
+ vm_address_t fPageInfo; // Pointer to page list or index into it
+ uint32_t fIOMDOffset; // The offset of this iopl in descriptor
+ ppnum_t fMappedBase; // Page number of first page in this iopl
+ unsigned int fPageOffset; // Offset within first page of iopl
+ unsigned int fFlags; // Flags
};
struct ioGMDData {
IOMapper *fMapper;
+ uint64_t fPreparationID;
unsigned int fPageCnt;
+#if __LP64__
+ // align arrays to 8 bytes so following macros work
+ unsigned int fPad;
+#endif
upl_page_info_t fPageList[];
ioPLBlock fBlocks[];
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
+#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
extern "C" {
kern_return_t device_data_action(
- int device_handle,
+ uintptr_t device_handle,
ipc_port_t device_pager,
vm_prot_t protection,
vm_object_offset_t offset,
}
kern_return_t device_close(
- int device_handle)
+ uintptr_t device_handle)
{
struct ExpansionData {
void * devicePager;
getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
{
- assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type
- || kIOMemoryTypeVirtual == type);
+ assert(kIOMemoryTypeUIO == type
+ || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
+ || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
if (kIOMemoryTypeUIO == type) {
user_size_t us;
uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
}
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
+ IOAddressRange cur = r.v64[ind];
+ addr = cur.address;
+ len = cur.length;
+ }
+#endif /* !__LP64__ */
else {
IOVirtualRange cur = r.v[ind];
addr = cur.address;
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/*
- * withAddress:
- *
- * Create a new IOMemoryDescriptor. The buffer is a virtual address
- * relative to the specified task. If no task is supplied, the kernel
- * task is implied.
- */
IOMemoryDescriptor *
IOMemoryDescriptor::withAddress(void * address,
IOByteCount length,
IODirection direction)
{
return IOMemoryDescriptor::
- withAddress((vm_address_t) address, length, direction, kernel_task);
+ withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
}
+#ifndef __LP64__
IOMemoryDescriptor *
-IOMemoryDescriptor::withAddress(vm_address_t address,
+IOMemoryDescriptor::withAddress(IOVirtualAddress address,
IOByteCount length,
IODirection direction,
task_t task)
}
return 0;
}
+#endif /* !__LP64__ */
IOMemoryDescriptor *
IOMemoryDescriptor::withPhysicalAddress(
IOByteCount length,
IODirection direction )
{
+#ifdef __LP64__
+ return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
+#else /* !__LP64__ */
IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
if (self
&& !self->initWithPhysicalAddress(address, length, direction)) {
}
return self;
+#endif /* !__LP64__ */
}
+#ifndef __LP64__
IOMemoryDescriptor *
IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
UInt32 withCount,
}
return 0;
}
+#endif /* !__LP64__ */
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
+ mach_vm_size_t length,
+ IOOptionBits options,
+ task_t task)
+{
+ IOAddressRange range = { address, length };
+ return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
+}
+
+IOMemoryDescriptor *
+IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
+ UInt32 rangeCount,
+ IOOptionBits options,
+ task_t task)
+{
+ IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
+ if (that)
+ {
+ if (task)
+ options |= kIOMemoryTypeVirtual64;
+ else
+ options |= kIOMemoryTypePhysical64;
+
+ if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
+ return that;
+
+ that->release();
+ }
+
+ return 0;
+}
/*
- * withRanges:
+ * withOptions:
*
* Create a new IOMemoryDescriptor. The buffer is made up of several
* virtual address ranges, from a given task.
return self;
}
-// Can't leave abstract but this should never be used directly,
bool IOMemoryDescriptor::initWithOptions(void * buffers,
UInt32 count,
UInt32 offset,
IOOptionBits options,
IOMapper * mapper)
{
- // @@@ gvdl: Should I panic?
- panic("IOMD::initWithOptions called\n");
- return 0;
+ return( false );
}
+#ifndef __LP64__
IOMemoryDescriptor *
IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
UInt32 withCount,
IOByteCount length,
IODirection direction)
{
- IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
-
- if (self && !self->initSubRange(of, offset, length, direction)) {
- self->release();
- self = 0;
- }
- return self;
+ return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
}
+#endif /* !__LP64__ */
-IOMemoryDescriptor * IOMemoryDescriptor::
- withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
+IOMemoryDescriptor *
+IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
{
IOGeneralMemoryDescriptor *origGenMD =
OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
return 0;
}
-IOMemoryDescriptor * IOGeneralMemoryDescriptor::
- withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
+IOMemoryDescriptor *
+IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
{
ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
: get_task_map(_task);
memory_object_size_t actualSize = size;
- vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
+ vm_prot_t prot = VM_PROT_READ;
+#if CONFIG_EMBEDDED
+ if (kIODirectionOut != (kIODirectionOutIn & _flags))
+#endif
+ prot |= VM_PROT_WRITE;
+
if (_memEntry)
prot |= MAP_MEM_NAMED_REUSE;
return sharedMem;
} else {
#if IOASSERT
- IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
- (UInt64)range0Addr, (UInt32)actualSize, size);
+ IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
+ (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
#endif
ipc_port_release_send( sharedMem );
}
return MACH_PORT_NULL;
}
-/*
- * initWithAddress:
- *
- * Initialize an IOMemoryDescriptor. The buffer is a virtual address
- * relative to the specified task. If no task is supplied, the kernel
- * task is implied.
- *
- * An IOMemoryDescriptor can be re-used by calling initWithAddress or
- * initWithRanges again on an existing instance -- note this behavior
- * is not commonly supported in other I/O Kit classes, although it is
- * supported here.
- */
+#ifndef __LP64__
bool
IOGeneralMemoryDescriptor::initWithAddress(void * address,
IOByteCount withLength,
IODirection withDirection)
{
- _singleRange.v.address = (vm_address_t) address;
+ _singleRange.v.address = (vm_offset_t) address;
_singleRange.v.length = withLength;
return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
}
bool
-IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
+IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
IOByteCount withLength,
IODirection withDirection,
task_t withTask)
// Auto-prepare if this is a kernel memory descriptor as very few
// clients bother to prepare() kernel memory.
- // But it was not enforced so what are you going to do?
+ // But it was not enforced so what are you going to do?
if (task == kernel_task)
mdOpts |= kIOMemoryAutoPrepare;
}
return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
}
+#endif /* !__LP64__ */
/*
* initWithOptions:
switch (type) {
case kIOMemoryTypeUIO:
case kIOMemoryTypeVirtual:
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+#endif /* !__LP64__ */
assert(task);
if (!task)
return false;
- else
- break;
- case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
- mapper = kIOMapperNone;
+#ifndef __LP64__
+ if (vm_map_is_64bit(get_task_map(task))
+ && (kIOMemoryTypeVirtual == type)
+ && ((IOVirtualRange *) buffers)->address)
+ {
+ OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
+ return false;
+ }
+#endif /* !__LP64__ */
+ break;
+ case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
+#ifndef __LP64__
+ case kIOMemoryTypePhysical64:
+#endif /* !__LP64__ */
case kIOMemoryTypeUPL:
assert(!task);
break;
* An existing memory descriptor is being retargeted to point to
* somewhere else. Clean up our present state.
*/
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
+ {
+ while (_wireCount)
+ complete();
+ }
+ if (_ranges.v && !(kIOMemoryAsReference & _flags))
+ {
+ if (kIOMemoryTypeUIO == type)
+ uio_free((uio_t) _ranges.v);
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+#endif /* !__LP64__ */
+ else
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ }
- while (_wireCount)
- complete();
- if (_kernPtrAligned)
- unmapFromKernel();
- if (_ranges.v && _rangesIsAllocated)
- IODelete(_ranges.v, IOVirtualRange, _rangesCount);
if (_memEntry)
{ ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
+ if (_mappings)
+ _mappings->flushCollection();
}
else {
if (!super::init())
}
// Grab the appropriate mapper
- if (mapper == kIOMapperNone)
+ if (kIOMemoryMapperNone & options)
mapper = 0; // No Mapper
- else if (!mapper) {
+ else if (mapper == kIOMapperSystem) {
IOMapper::checkForSystemMapper();
gIOSystemMapper = mapper = IOMapper::gSystem;
}
+ // Temp binary compatibility for kIOMemoryThreadSafe
+ if (kIOMemoryReserved6156215 & options)
+ {
+ options &= ~kIOMemoryReserved6156215;
+ options |= kIOMemoryThreadSafe;
+ }
// Remove the dynamic internal use flags from the initial setting
options &= ~(kIOMemoryPreparedReadOnly);
_flags = options;
_task = task;
- // DEPRECATED variable initialisation
+#ifndef __LP64__
_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
- _position = 0;
- _kernPtrAligned = 0;
- _cachedPhysicalAddress = 0;
- _cachedVirtualAddress = 0;
+#endif /* !__LP64__ */
+ __iomd_reservedA = 0;
+ __iomd_reservedB = 0;
+ _highestPage = 0;
+
+ if (kIOMemoryThreadSafe & options)
+ {
+ if (!_prepareLock)
+ _prepareLock = IOLockAlloc();
+ }
+ else if (_prepareLock)
+ {
+ IOLockFree(_prepareLock);
+ _prepareLock = NULL;
+ }
+
if (kIOMemoryTypeUPL == type) {
ioGMDData *dataP;
dataP->fMapper = mapper;
dataP->fPageCnt = 0;
- _wireCount++; // UPLs start out life wired
+ // _wireCount++; // UPLs start out life wired
_length = count;
_pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
ioPLBlock iopl;
- upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
-
iopl.fIOPL = (upl_t) buffers;
+ upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
+
+ if (upl_get_size(iopl.fIOPL) < (count + offset))
+ panic("short external upl");
+
// Set the flag kIOPLOnDevice convieniently equal to 1
iopl.fFlags = pageList->device | kIOPLExternUPL;
iopl.fIOMDOffset = 0;
+
+ _highestPage = upl_get_highest_page(iopl.fIOPL);
+
if (!pageList->device) {
// Pre-compute the offset into the UPL's page list
pageList = &pageList[atop_32(offset)];
_memoryEntries->appendBytes(&iopl, sizeof(iopl));
}
else {
- // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
// Initialize the memory descriptor
if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
_rangesIsAllocated = false;
+#endif /* !__LP64__ */
// Hack assignment to get the buffer arg into _ranges.
// I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
_ranges.v = (IOVirtualRange *) buffers;
}
else {
- assert(kIOMemoryTypeUIO != type);
-
+#ifndef __LP64__
_rangesIsAllocated = true;
- _ranges.v = IONew(IOVirtualRange, count);
- if (!_ranges.v)
- return false;
- bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+#endif /* !__LP64__ */
+ switch (type)
+ {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL) {
+ if (kIOMemoryTypeVirtual64 == type)
+ type = kIOMemoryTypeVirtual;
+ else
+ type = kIOMemoryTypePhysical;
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64)
+ return false;
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v)
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
}
// Find starting address within the vector of ranges
UInt32 pages = 0;
for (unsigned ind = 0; ind < count; ind++) {
user_addr_t addr;
- UInt32 len;
+ IOPhysicalLength len;
// addr & len are returned by this function
getAddrLenForInd(addr, len, type, vec, ind);
pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
len += length;
- assert(len > length); // Check for 32 bit wrap around
+ assert(len >= length); // Check for 32 bit wrap around
length = len;
+
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ {
+ ppnum_t highPage = atop_64(addr + len - 1);
+ if (highPage > _highestPage)
+ _highestPage = highPage;
+ }
}
_length = length;
_pages = pages;
// Auto-prepare memory at creation time.
// Implied completion when descriptor is free-ed
- if (kIOMemoryTypePhysical == type)
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
_wireCount++; // Physical MDs are, by definition, wired
- else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
+ else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
ioGMDData *dataP;
unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
*/
void IOGeneralMemoryDescriptor::free()
{
- LOCK;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
if( reserved)
+ {
+ LOCK;
reserved->memory = 0;
- UNLOCK;
+ UNLOCK;
+ }
- while (_wireCount)
- complete();
+ if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
+ {
+ while (_wireCount)
+ complete();
+ }
if (_memoryEntries)
_memoryEntries->release();
- if (_kernPtrAligned)
- unmapFromKernel();
- if (_ranges.v && _rangesIsAllocated)
- IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+ if (_ranges.v && !(kIOMemoryAsReference & _flags))
+ {
+ if (kIOMemoryTypeUIO == type)
+ uio_free((uio_t) _ranges.v);
+#ifndef __LP64__
+ else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
+ IODelete(_ranges.v64, IOAddressRange, _rangesCount);
+#endif /* !__LP64__ */
+ else
+ IODelete(_ranges.v, IOVirtualRange, _rangesCount);
+
+ _ranges.v = NULL;
+ }
if (reserved && reserved->devicePager)
device_pager_deallocate( (memory_object_t) reserved->devicePager );
if (_memEntry)
ipc_port_release_send( (ipc_port_t) _memEntry );
+ if (_prepareLock)
+ IOLockFree(_prepareLock);
+
super::free();
}
-/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
-/* DEPRECATED */ {
- panic("IOGMD::unmapFromKernel deprecated");
-/* DEPRECATED */ }
-/* DEPRECATED */
-/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
-/* DEPRECATED */ {
- panic("IOGMD::mapIntoKernel deprecated");
-/* DEPRECATED */ }
+#ifndef __LP64__
+void IOGeneralMemoryDescriptor::unmapFromKernel()
+{
+ panic("IOGMD::unmapFromKernel deprecated");
+}
+
+void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
+{
+ panic("IOGMD::mapIntoKernel deprecated");
+}
+#endif /* !__LP64__ */
/*
* getDirection:
*/
IODirection IOMemoryDescriptor::getDirection() const
{
- return _direction;
+#ifndef __LP64__
+ if (_direction)
+ return _direction;
+#endif /* !__LP64__ */
+ return (IODirection) (_flags & kIOMemoryDirectionMask);
}
/*
return( _tag);
}
+#ifndef __LP64__
// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
-IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
- IOByteCount * length )
+IOPhysicalAddress
+IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
{
- IOPhysicalAddress physAddr = 0;
+ addr64_t physAddr = 0;
if( prepare() == kIOReturnSuccess) {
- physAddr = getPhysicalSegment( offset, length );
+ physAddr = getPhysicalSegment64( offset, length );
complete();
}
- return( physAddr );
+ return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
}
+#endif /* !__LP64__ */
IOByteCount IOMemoryDescriptor::readBytes
(IOByteCount offset, void *bytes, IOByteCount length)
{
- addr64_t dstAddr = (addr64_t) (UInt32) bytes;
+ addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
IOByteCount remaining;
// Assert that this entire I/O is withing the available range
assert(offset < _length);
assert(offset + length <= _length);
if (offset >= _length) {
-IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
return 0;
}
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+
remaining = length = min(length, _length - offset);
while (remaining) { // (process another target segment?)
addr64_t srcAddr64;
IOByteCount srcLen;
- srcAddr64 = getPhysicalSegment64(offset, &srcLen);
+ srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
if (!srcAddr64)
break;
remaining -= srcLen;
}
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
assert(!remaining);
return length - remaining;
IOByteCount IOMemoryDescriptor::writeBytes
(IOByteCount offset, const void *bytes, IOByteCount length)
{
- addr64_t srcAddr = (addr64_t) (UInt32) bytes;
+ addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
IOByteCount remaining;
// Assert that this entire I/O is withing the available range
assert( !(kIOMemoryPreparedReadOnly & _flags) );
if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
-IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
return 0;
}
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+
remaining = length = min(length, _length - offset);
while (remaining) { // (process another target segment?)
addr64_t dstAddr64;
IOByteCount dstLen;
- dstAddr64 = getPhysicalSegment64(offset, &dstLen);
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
if (!dstAddr64)
break;
remaining -= dstLen;
}
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
assert(!remaining);
return length - remaining;
// osfmk/device/iokit_rpc.c
extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
-/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
-/* DEPRECATED */ {
- panic("IOGMD::setPosition deprecated");
-/* DEPRECATED */ }
+#ifndef __LP64__
+void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
+{
+ panic("IOGMD::setPosition deprecated");
+}
+#endif /* !__LP64__ */
+
+static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
-IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
- (IOByteCount offset, IOByteCount *lengthOfSegment)
+uint64_t
+IOGeneralMemoryDescriptor::getPreparationID( void )
{
- IOPhysicalAddress address = 0;
- IOPhysicalLength length = 0;
+ ioGMDData *dataP;
+
+ if (!_wireCount)
+ return (kIOPreparationIDUnprepared);
-// assert(offset <= _length);
- if (offset < _length) // (within bounds?)
+ if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
+ return (kIOPreparationIDAlwaysPrepared);
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
+ return (kIOPreparationIDUnprepared);
+
+ if (kIOPreparationIDUnprepared == dataP->fPreparationID)
{
- if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
- unsigned int ind;
+#if defined(__ppc__ )
+ dataP->fPreparationID = gIOMDPreparationID++;
+#else
+ dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
+#endif
+ }
+ return (dataP->fPreparationID);
+}
- // Physical address based memory descriptor
+uint64_t
+IOMemoryDescriptor::getPreparationID( void )
+{
+ return (kIOPreparationIDUnsupported);
+}
- // Find offset within descriptor and make it relative
- // to the current _range.
- for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
- offset -= _ranges.p[ind].length;
-
- IOPhysicalRange cur = _ranges.p[ind];
- address = cur.address + offset;
- length = cur.length - offset;
-
- // see how far we can coalesce ranges
- for (++ind; ind < _rangesCount; ind++) {
- cur = _ranges.p[ind];
-
- if (address + length != cur.address)
- break;
-
- length += cur.length;
- }
+IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ if (kIOMDGetCharacteristics == op) {
- // @@@ gvdl: should be assert(address);
- // but can't as NVidia GeForce creates a bogus physical mem
- assert(address
- || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount));
- assert(length);
- }
- else do {
- // We need wiring & we are wired.
- assert(_wireCount);
+ if (dataSize < sizeof(IOMDDMACharacteristics))
+ return kIOReturnUnderrun;
- if (!_wireCount)
- {
- panic("IOGMD: not wired for getPhysicalSegment()");
- continue;
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = _length;
+ data->fSGCount = _rangesCount;
+ data->fPages = _pages;
+ data->fDirection = getDirection();
+ if (!_wireCount)
+ data->fIsPrepared = false;
+ else {
+ data->fIsPrepared = true;
+ data->fHighestPage = _highestPage;
+ if (_memoryEntries) {
+ ioGMDData *gmdData = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(gmdData);
+ UInt count = getNumIOPL(_memoryEntries, gmdData);
+
+ data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
+ && ioplList[0].fMappedBase);
+ if (count == 1)
+ data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
}
+ else
+ data->fIsMapped = false;
+ }
- assert(_memoryEntries);
+ return kIOReturnSuccess;
- ioGMDData * dataP = getDataP(_memoryEntries);
- const ioPLBlock *ioplList = getIOPLList(dataP);
- UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP);
- upl_page_info_t *pageList = getPageList(dataP);
+#if IOMD_DEBUG_DMAACTIVE
+ } else if (kIOMDSetDMAActive == op) {
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ OSIncrementAtomic(&md->__iomd_reservedA);
+ } else if (kIOMDSetDMAInactive == op) {
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ if (md->__iomd_reservedA)
+ OSDecrementAtomic(&md->__iomd_reservedA);
+ else
+ panic("kIOMDSetDMAInactive");
+#endif /* IOMD_DEBUG_DMAACTIVE */
+
+ } else if (!(kIOMDWalkSegments & op))
+ return kIOReturnBadArgument;
+
+ // Get the next segment
+ struct InternalState {
+ IOMDDMAWalkSegmentArgs fIO;
+ UInt fOffset2Index;
+ UInt fIndex;
+ UInt fNextOffset;
+ } *isP;
+
+ // Find the next segment
+ if (dataSize < sizeof(*isP))
+ return kIOReturnUnderrun;
+
+ isP = (InternalState *) vData;
+ UInt offset = isP->fIO.fOffset;
+ bool mapped = isP->fIO.fMapped;
+
+ if (offset >= _length)
+ return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
+
+ // Validate the previous offset
+ UInt ind, off2Ind = isP->fOffset2Index;
+ if ((kIOMDFirstSegment != op)
+ && offset
+ && (offset == isP->fNextOffset || off2Ind <= offset))
+ ind = isP->fIndex;
+ else
+ ind = off2Ind = 0; // Start from beginning
- assert(numIOPLs > 0);
+ UInt length;
+ UInt64 address;
+ if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
- // Scan through iopl info blocks looking for block containing offset
- for (ind = 1; ind < numIOPLs; ind++) {
- if (offset < ioplList[ind].fIOMDOffset)
- break;
- }
+ // Physical address based memory descriptor
+ const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
- // Go back to actual range as search goes past it
- ioPLBlock ioplInfo = ioplList[ind - 1];
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
- if (ind < numIOPLs)
- length = ioplList[ind].fIOMDOffset;
- else
- length = _length;
- length -= offset; // Remainder within iopl
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
- // Subtract offset till this iopl in total list
- offset -= ioplInfo.fIOMDOffset;
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
- // This is a mapped IOPL so we just need to compute an offset
- // relative to the mapped base.
- if (ioplInfo.fMappedBase) {
- offset += (ioplInfo.fPageOffset & PAGE_MASK);
- address = ptoa_32(ioplInfo.fMappedBase) + offset;
- continue;
- }
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#ifndef __LP64__
+ else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
- // Currently the offset is rebased into the current iopl.
- // Now add the iopl 1st page offset.
- offset += ioplInfo.fPageOffset;
+ // Physical address based memory descriptor
+ const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
- // For external UPLs the fPageInfo field points directly to
- // the upl's upl_page_info_t array.
- if (ioplInfo.fFlags & kIOPLExternUPL)
- pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
- else
- pageList = &pageList[ioplInfo.fPageInfo];
+ // Find the range after the one that contains the offset
+ mach_vm_size_t len;
+ for (len = 0; off2Ind <= offset; ind++) {
+ len = physP[ind].length;
+ off2Ind += len;
+ }
- // Check for direct device non-paged memory
- if ( ioplInfo.fFlags & kIOPLOnDevice ) {
- address = ptoa_32(pageList->phys_addr) + offset;
- continue;
- }
+ // Calculate length within range and starting address
+ length = off2Ind - offset;
+ address = physP[ind - 1].address + len - length;
- // Now we need compute the index into the pageList
- ind = atop_32(offset);
- offset &= PAGE_MASK;
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
- IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
- address = ptoa_32(pageAddr) + offset;
-
- // Check for the remaining data in this upl being longer than the
- // remainder on the current page. This should be checked for
- // contiguous pages
- if (length > PAGE_SIZE - offset) {
- // See if the next page is contiguous. Stop looking when we hit
- // the end of this upl, which is indicated by the
- // contigLength >= length.
- IOByteCount contigLength = PAGE_SIZE - offset;
-
- // Look for contiguous segment
- while (contigLength < length
- && ++pageAddr == pageList[++ind].phys_addr) {
- contigLength += PAGE_SIZE;
- }
- if (length > contigLength)
- length = contigLength;
- }
-
- assert(address);
- assert(length);
+ // correct contiguous check overshoot
+ ind--;
+ off2Ind -= len;
+ }
+#endif /* !__LP64__ */
+ else do {
+ if (!_wireCount)
+ panic("IOGMD: not wired for the IODMACommand");
- } while (0);
+ assert(_memoryEntries);
- if (!address)
- length = 0;
- }
+ ioGMDData * dataP = getDataP(_memoryEntries);
+ const ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
+ upl_page_info_t *pageList = getPageList(dataP);
- if (lengthOfSegment)
- *lengthOfSegment = length;
+ assert(numIOPLs > 0);
- return address;
-}
+ // Scan through iopl info blocks looking for block containing offset
+ while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
+ ind++;
-addr64_t IOMemoryDescriptor::getPhysicalSegment64
- (IOByteCount offset, IOByteCount *lengthOfSegment)
-{
- IOPhysicalAddress phys32;
- IOByteCount length;
- addr64_t phys64;
+ // Go back to actual range as search goes past it
+ ioPLBlock ioplInfo = ioplList[ind - 1];
+ off2Ind = ioplInfo.fIOMDOffset;
- phys32 = getPhysicalSegment(offset, lengthOfSegment);
- if (!phys32)
- return 0;
+ if (ind < numIOPLs)
+ length = ioplList[ind].fIOMDOffset;
+ else
+ length = _length;
+ length -= offset; // Remainder within iopl
+
+ // Subtract offset till this iopl in total list
+ offset -= off2Ind;
+
+ // If a mapped address is requested and this is a pre-mapped IOPL
+ // then just need to compute an offset relative to the mapped base.
+ if (mapped && ioplInfo.fMappedBase) {
+ offset += (ioplInfo.fPageOffset & PAGE_MASK);
+ address = ptoa_64(ioplInfo.fMappedBase) + offset;
+ continue; // Done leave do/while(false) now
+ }
- if (gIOSystemMapper)
- {
- IOByteCount origLen;
+ // The offset is rebased into the current iopl.
+ // Now add the iopl 1st page offset.
+ offset += ioplInfo.fPageOffset;
- phys64 = gIOSystemMapper->mapAddr(phys32);
- origLen = *lengthOfSegment;
- length = page_size - (phys64 & (page_size - 1));
- while ((length < origLen)
- && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
- length += page_size;
- if (length > origLen)
- length = origLen;
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplInfo.fFlags & kIOPLExternUPL)
+ pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
+ else
+ pageList = &pageList[ioplInfo.fPageInfo];
- *lengthOfSegment = length;
+ // Check for direct device non-paged memory
+ if ( ioplInfo.fFlags & kIOPLOnDevice ) {
+ address = ptoa_64(pageList->phys_addr) + offset;
+ continue; // Done leave do/while(false) now
+ }
+
+ // Now we need compute the index into the pageList
+ UInt pageInd = atop_32(offset);
+ offset &= PAGE_MASK;
+
+ // Compute the starting address of this segment
+ IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
+ if (!pageAddr) {
+ panic("!pageList phys_addr");
}
- else
- phys64 = (addr64_t) phys32;
- return phys64;
+ address = ptoa_64(pageAddr) + offset;
+
+ // length is currently set to the length of the remainider of the iopl.
+ // We need to check that the remainder of the iopl is contiguous.
+ // This is indicated by pageList[ind].phys_addr being sequential.
+ IOByteCount contigLength = PAGE_SIZE - offset;
+ while (contigLength < length
+ && ++pageAddr == pageList[++pageInd].phys_addr)
+ {
+ contigLength += PAGE_SIZE;
+ }
+
+ if (contigLength < length)
+ length = contigLength;
+
+
+ assert(address);
+ assert(length);
+
+ } while (false);
+
+ // Update return values and state
+ isP->fIO.fIOVMAddr = address;
+ isP->fIO.fLength = length;
+ isP->fIndex = ind;
+ isP->fOffset2Index = off2Ind;
+ isP->fNextOffset = isP->fIO.fOffset + length;
+
+ return kIOReturnSuccess;
}
-IOPhysicalAddress IOGeneralMemoryDescriptor::
-getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
{
- IOPhysicalAddress address = 0;
- IOPhysicalLength length = 0;
- IOOptionBits type = _flags & kIOMemoryTypeMask;
+ IOReturn ret;
+ addr64_t address = 0;
+ IOByteCount length = 0;
+ IOMapper * mapper = gIOSystemMapper;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ if (lengthOfSegment)
+ *lengthOfSegment = 0;
+
+ if (offset >= _length)
+ return 0;
- assert(offset <= _length);
+ // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
+ // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
+ // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
+ // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
- if ( type == kIOMemoryTypeUPL)
- return super::getSourceSegment( offset, lengthOfSegment );
- else if ( offset < _length ) // (within bounds?)
+ if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
{
unsigned rangesIndex = 0;
Ranges vec = _ranges;
}
if (addr)
address = (IOPhysicalAddress) addr; // Truncate address to 32bit
- else
- length = 0;
}
+ else
+ {
+ IOMDDMAWalkSegmentState _state;
+ IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
- if ( lengthOfSegment ) *lengthOfSegment = length;
+ state->fOffset = offset;
+ state->fLength = _length - offset;
+ state->fMapped = (0 == (options & kIOMemoryMapperNone));
- return address;
-}
+ ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
+
+ if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
+ DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
+ ret, this, state->fOffset,
+ state->fIOVMAddr, state->fLength);
+ if (kIOReturnSuccess == ret)
+ {
+ address = state->fIOVMAddr;
+ length = state->fLength;
+ }
-/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
-/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
-/* DEPRECATED */ IOByteCount * lengthOfSegment)
-/* DEPRECATED */ {
- if (_task == kernel_task)
- return (void *) getSourceSegment(offset, lengthOfSegment);
- else
- panic("IOGMD::getVirtualSegment deprecated");
+ // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
+ // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
+
+ if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
+ {
+ if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
+ {
+ addr64_t origAddr = address;
+ IOByteCount origLen = length;
+
+ address = mapper->mapAddr(origAddr);
+ length = page_size - (address & (page_size - 1));
+ while ((length < origLen)
+ && ((address + length) == mapper->mapAddr(origAddr + length)))
+ length += page_size;
+ if (length > origLen)
+ length = origLen;
+ }
+#ifdef __LP64__
+ else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
+ {
+ panic("getPhysicalSegment not mapped for I/O");
+ }
+#endif /* __LP64__ */
+ }
+ }
- return 0;
-/* DEPRECATED */ }
-/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
+ if (!address)
+ length = 0;
+ if (lengthOfSegment)
+ *lengthOfSegment = length;
+ return (address);
+}
-IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
- IOOptionBits * oldState )
+#ifndef __LP64__
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
{
- IOReturn err = kIOReturnSuccess;
- vm_purgable_t control;
- int state;
+ addr64_t address = 0;
- do
+ if (options & _kIOMemorySourceSegment)
{
- if (!_memEntry)
- {
- err = kIOReturnNotReady;
- break;
- }
+ address = getSourceSegment(offset, lengthOfSegment);
+ }
+ else if (options & kIOMemoryMapperNone)
+ {
+ address = getPhysicalSegment64(offset, lengthOfSegment);
+ }
+ else
+ {
+ address = getPhysicalSegment(offset, lengthOfSegment);
+ }
- control = VM_PURGABLE_SET_STATE;
- switch (newState)
- {
- case kIOMemoryPurgeableKeepCurrent:
- control = VM_PURGABLE_GET_STATE;
- break;
+ return (address);
+}
- case kIOMemoryPurgeableNonVolatile:
- state = VM_PURGABLE_NONVOLATILE;
- break;
- case kIOMemoryPurgeableVolatile:
- state = VM_PURGABLE_VOLATILE;
- break;
- case kIOMemoryPurgeableEmpty:
- state = VM_PURGABLE_EMPTY;
- break;
- default:
- err = kIOReturnBadArgument;
- break;
- }
+addr64_t
+IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
+}
- if (kIOReturnSuccess != err)
- break;
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ addr64_t address = 0;
+ IOByteCount length = 0;
- err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
+ address = getPhysicalSegment(offset, lengthOfSegment, 0);
+
+ if (lengthOfSegment)
+ length = *lengthOfSegment;
+
+ if ((address + length) > 0x100000000ULL)
+ {
+ panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
+ address, (long) length, (getMetaClass())->getClassName());
+ }
+
+ return ((IOPhysicalAddress) address);
+}
+
+addr64_t
+IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ IOPhysicalAddress phys32;
+ IOByteCount length;
+ addr64_t phys64;
+ IOMapper * mapper = 0;
+
+ phys32 = getPhysicalSegment(offset, lengthOfSegment);
+ if (!phys32)
+ return 0;
+
+ if (gIOSystemMapper)
+ mapper = gIOSystemMapper;
+
+ if (mapper)
+ {
+ IOByteCount origLen;
+
+ phys64 = mapper->mapAddr(phys32);
+ origLen = *lengthOfSegment;
+ length = page_size - (phys64 & (page_size - 1));
+ while ((length < origLen)
+ && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
+ length += page_size;
+ if (length > origLen)
+ length = origLen;
+
+ *lengthOfSegment = length;
+ }
+ else
+ phys64 = (addr64_t) phys32;
+
+ return phys64;
+}
+
+IOPhysicalAddress
+IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
+}
+
+IOPhysicalAddress
+IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
+{
+ return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
+}
+
+void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
+{
+ if (_task == kernel_task)
+ return (void *) getSourceSegment(offset, lengthOfSegment);
+ else
+ panic("IOGMD::getVirtualSegment deprecated");
+
+ return 0;
+}
+#endif /* !__LP64__ */
+
+IOReturn
+IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
+{
+ if (kIOMDGetCharacteristics == op) {
+ if (dataSize < sizeof(IOMDDMACharacteristics))
+ return kIOReturnUnderrun;
+
+ IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
+ data->fLength = getLength();
+ data->fSGCount = 0;
+ data->fDirection = getDirection();
+ if (IOMapper::gSystem)
+ data->fIsMapped = true;
+ data->fIsPrepared = true; // Assume prepared - fails safe
+ }
+ else if (kIOMDWalkSegments & op) {
+ if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
+ return kIOReturnUnderrun;
+
+ IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
+ IOByteCount offset = (IOByteCount) data->fOffset;
+
+ IOPhysicalLength length;
+ IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
+ if (data->fMapped && IOMapper::gSystem)
+ data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
+ else
+ data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+ data->fLength = length;
+ }
+ else
+ return kIOReturnBadArgument;
+
+ return kIOReturnSuccess;
+}
+
+static IOReturn
+purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ *control = VM_PURGABLE_SET_STATE;
+ switch (newState)
+ {
+ case kIOMemoryPurgeableKeepCurrent:
+ *control = VM_PURGABLE_GET_STATE;
+ break;
+
+ case kIOMemoryPurgeableNonVolatile:
+ *state = VM_PURGABLE_NONVOLATILE;
+ break;
+ case kIOMemoryPurgeableVolatile:
+ *state = VM_PURGABLE_VOLATILE;
+ break;
+ case kIOMemoryPurgeableEmpty:
+ *state = VM_PURGABLE_EMPTY;
+ break;
+ default:
+ err = kIOReturnBadArgument;
+ break;
+ }
+ return (err);
+}
+
+static IOReturn
+purgeableStateBits(int * state)
+{
+ IOReturn err = kIOReturnSuccess;
+
+ switch (*state)
+ {
+ case VM_PURGABLE_NONVOLATILE:
+ *state = kIOMemoryPurgeableNonVolatile;
+ break;
+ case VM_PURGABLE_VOLATILE:
+ *state = kIOMemoryPurgeableVolatile;
+ break;
+ case VM_PURGABLE_EMPTY:
+ *state = kIOMemoryPurgeableEmpty;
+ break;
+ default:
+ *state = kIOMemoryPurgeableNonVolatile;
+ err = kIOReturnNotReady;
+ break;
+ }
+ return (err);
+}
+
+IOReturn
+IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+ vm_purgable_t control;
+ int state;
+
+ if (_memEntry)
+ {
+ err = super::setPurgeable(newState, oldState);
+ }
+ else
+ {
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+ do
+ {
+ // Find the appropriate vm_map for the given task
+ vm_map_t curMap;
+ if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
+ {
+ err = kIOReturnNotReady;
+ break;
+ }
+ else
+ curMap = get_task_map(_task);
+
+ // can only do one range
+ Ranges vec = _ranges;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ user_addr_t addr;
+ IOByteCount len;
+ getAddrLenForInd(addr, len, type, vec, 0);
+
+ err = purgeableControlBits(newState, &control, &state);
+ if (kIOReturnSuccess != err)
+ break;
+ err = mach_vm_purgable_control(curMap, addr, control, &state);
+ if (oldState)
+ {
+ if (kIOReturnSuccess == err)
+ {
+ err = purgeableStateBits(&state);
+ *oldState = state;
+ }
+ }
+ }
+ while (false);
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+ }
+ return (err);
+}
+
+IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
+ IOOptionBits * oldState )
+{
+ IOReturn err = kIOReturnSuccess;
+ vm_purgable_t control;
+ int state;
+
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
- if (oldState)
+ do
+ {
+ if (!_memEntry)
{
- if (kIOReturnSuccess == err)
- {
- switch (state)
- {
- case VM_PURGABLE_NONVOLATILE:
- state = kIOMemoryPurgeableNonVolatile;
- break;
- case VM_PURGABLE_VOLATILE:
- state = kIOMemoryPurgeableVolatile;
- break;
- case VM_PURGABLE_EMPTY:
- state = kIOMemoryPurgeableEmpty;
- break;
- default:
- state = kIOMemoryPurgeableNonVolatile;
- err = kIOReturnNotReady;
- break;
- }
- *oldState = state;
- }
+ err = kIOReturnNotReady;
+ break;
}
+ err = purgeableControlBits(newState, &control, &state);
+ if (kIOReturnSuccess != err)
+ break;
+ err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
+ if (oldState)
+ {
+ if (kIOReturnSuccess == err)
+ {
+ err = purgeableStateBits(&state);
+ *oldState = state;
+ }
+ }
}
while (false);
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
return (err);
}
extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
+static void SetEncryptOp(addr64_t pa, unsigned int count)
+{
+ ppnum_t page, end;
+
+ page = atop_64(round_page_64(pa));
+ end = atop_64(trunc_page_64(pa + count));
+ for (; page < end; page++)
+ {
+ pmap_clear_noencrypt(page);
+ }
+}
+
+static void ClearEncryptOp(addr64_t pa, unsigned int count)
+{
+ ppnum_t page, end;
+
+ page = atop_64(round_page_64(pa));
+ end = atop_64(trunc_page_64(pa + count));
+ for (; page < end; page++)
+ {
+ pmap_set_noencrypt(page);
+ }
+}
+
IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
IOByteCount offset, IOByteCount length )
{
case kIOMemoryIncoherentIOStore:
func = &dcache_incoherent_io_store64;
break;
+
+ case kIOMemorySetEncrypted:
+ func = &SetEncryptOp;
+ break;
+ case kIOMemoryClearEncrypted:
+ func = &ClearEncryptOp;
+ break;
}
if (!func)
return (kIOReturnUnsupported);
+ if (kIOMemoryThreadSafe & _flags)
+ LOCK;
+
remaining = length = min(length, getLength() - offset);
while (remaining)
// (process another target segment?)
addr64_t dstAddr64;
IOByteCount dstLen;
- dstAddr64 = getPhysicalSegment64(offset, &dstLen);
+ dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
if (!dstAddr64)
break;
remaining -= dstLen;
}
+ if (kIOMemoryThreadSafe & _flags)
+ UNLOCK;
+
return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
}
-#ifdef __ppc__
+#if defined(__ppc__) || defined(__arm__)
extern vm_offset_t static_memory_end;
#define io_kernel_static_end static_memory_end
#else
static kern_return_t
io_get_kernel_static_upl(
vm_map_t /* map */,
- vm_address_t offset,
+ uintptr_t offset,
vm_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
- unsigned int *count)
+ unsigned int *count,
+ ppnum_t *highest_page)
{
unsigned int pageCount, page;
ppnum_t phys;
+ ppnum_t highestPage = 0;
pageCount = atop_32(*upl_size);
if (pageCount > *count)
page_list[page].dirty = 0;
page_list[page].precious = 0;
page_list[page].device = 0;
+ if (phys > highestPage)
+ highestPage = phys;
}
+ *highest_page = highestPage;
+
return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
}
IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
{
IOOptionBits type = _flags & kIOMemoryTypeMask;
- IOReturn error = kIOReturnNoMemory;
+ IOReturn error = kIOReturnCannotWire;
ioGMDData *dataP;
ppnum_t mapBase = 0;
IOMapper *mapper;
ipc_port_t sharedMem = (ipc_port_t) _memEntry;
assert(!_wireCount);
- assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
+ assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
if (_pages >= gIOMaximumMappedIOPageCount)
return kIOReturnNoResources;
dataP = 0; // May no longer be valid so lets not get tempted.
if (forDirection == kIODirectionNone)
- forDirection = _direction;
+ forDirection = getDirection();
int uplFlags; // This Mem Desc's default flags for upl creation
- switch (forDirection)
+ switch (kIODirectionOutIn & forDirection)
{
case kIODirectionOut:
// Pages do not need to be marked as dirty on commit
}
uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
+#ifdef UPL_NEED_32BIT_ADDR
+ if (kIODirectionPrepareToPhys32 & forDirection)
+ uplFlags |= UPL_NEED_32BIT_ADDR;
+#endif
+
// Find the appropriate vm_map for the given task
vm_map_t curMap;
if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
Ranges vec = _ranges;
unsigned int pageIndex = 0;
IOByteCount mdOffset = 0;
+ ppnum_t highestPage = 0;
for (UInt range = 0; range < _rangesCount; range++) {
ioPLBlock iopl;
user_addr_t startPage;
IOByteCount numBytes;
+ ppnum_t highPage = 0;
// Get the startPage address and length of vec[range]
getAddrLenForInd(startPage, numBytes, type, vec, range);
- iopl.fPageOffset = (short) startPage & PAGE_MASK;
+ iopl.fPageOffset = startPage & PAGE_MASK;
numBytes += iopl.fPageOffset;
startPage = trunc_page_64(startPage);
int ioplFlags = uplFlags;
upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
- vm_size_t ioplSize = round_page_32(numBytes);
+ vm_size_t ioplSize = round_page(numBytes);
unsigned int numPageInfo = atop_32(ioplSize);
if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
&ioplSize,
&iopl.fIOPL,
baseInfo,
- &numPageInfo);
+ &numPageInfo,
+ &highPage);
}
else if (sharedMem) {
error = memory_object_iopl_request(sharedMem,
assert(theMap);
error = vm_map_create_upl(theMap,
startPage,
- &ioplSize,
+ (upl_size_t*)&ioplSize,
&iopl.fIOPL,
baseInfo,
&numPageInfo,
if (error != KERN_SUCCESS)
goto abortExit;
- error = kIOReturnNoMemory;
+ if (iopl.fIOPL)
+ highPage = upl_get_highest_page(iopl.fIOPL);
+ if (highPage > highestPage)
+ highestPage = highPage;
+
+ error = kIOReturnCannotWire;
if (baseInfo->device) {
numPageInfo = 1;
}
else {
iopl.fFlags = 0;
- if (mapper)
+ if (mapper)
mapper->iovmInsert(mapBase, pageIndex,
baseInfo, numPageInfo);
}
}
}
+ _highestPage = highestPage;
+
return kIOReturnSuccess;
abortExit:
mapper->iovmFree(mapBase, _pages);
}
+ if (error == KERN_FAILURE)
+ error = kIOReturnCannotWire;
+
return error;
}
IOReturn error = kIOReturnSuccess;
IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ return kIOReturnSuccess;
+
+ if (_prepareLock)
+ IOLockLock(_prepareLock);
+
if (!_wireCount
- && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) {
+ && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
error = wireVirtual(forDirection);
- if (error)
- return error;
}
- _wireCount++;
+ if (kIOReturnSuccess == error)
+ _wireCount++;
- return kIOReturnSuccess;
+ if (1 == _wireCount)
+ {
+ if (kIOMemoryClearEncrypt & _flags)
+ {
+ performOperation(kIOMemoryClearEncrypted, 0, _length);
+ }
+ }
+
+ if (_prepareLock)
+ IOLockUnlock(_prepareLock);
+
+ return error;
}
/*
IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
{
- assert(_wireCount);
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
- if (!_wireCount)
- return kIOReturnSuccess;
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ return kIOReturnSuccess;
- _wireCount--;
- if (!_wireCount) {
- IOOptionBits type = _flags & kIOMemoryTypeMask;
+ if (_prepareLock)
+ IOLockLock(_prepareLock);
+
+ assert(_wireCount);
- if (kIOMemoryTypePhysical == type) {
- /* kIOMemoryTypePhysical */
- // DO NOTHING
+ if (_wireCount)
+ {
+ if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
+ {
+ performOperation(kIOMemorySetEncrypted, 0, _length);
}
- else {
- ioGMDData * dataP = getDataP(_memoryEntries);
- ioPLBlock *ioplList = getIOPLList(dataP);
+
+ _wireCount--;
+ if (!_wireCount)
+ {
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+ ioGMDData * dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
UInt count = getNumIOPL(_memoryEntries, dataP);
- if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
- dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
+#if IOMD_DEBUG_DMAACTIVE
+ if (__iomd_reservedA) panic("complete() while dma active");
+#endif /* IOMD_DEBUG_DMAACTIVE */
+
+ if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
+ dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
- // Only complete iopls that we created which are for TypeVirtual
- if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) {
- for (UInt ind = 0; ind < count; ind++)
+ // Only complete iopls that we created which are for TypeVirtual
+ if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
+ for (UInt ind = 0; ind < count; ind++)
if (ioplList[ind].fIOPL) {
upl_commit(ioplList[ind].fIOPL, 0, 0);
upl_deallocate(ioplList[ind].fIOPL);
}
- }
+ }
+ (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
- (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
- }
+ dataP->fPreparationID = kIOPreparationIDUnprepared;
+ }
}
+
+ if (_prepareLock)
+ IOLockUnlock(_prepareLock);
+
return kIOReturnSuccess;
}
IOReturn IOGeneralMemoryDescriptor::doMap(
- vm_map_t addressMap,
- IOVirtualAddress * atAddress,
+ vm_map_t __addressMap,
+ IOVirtualAddress * __address,
IOOptionBits options,
- IOByteCount sourceOffset,
- IOByteCount length )
+ IOByteCount __offset,
+ IOByteCount __length )
+
{
- kern_return_t kr;
+#ifndef __LP64__
+ if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
+#endif /* !__LP64__ */
+
+ IOMemoryMap * mapping = (IOMemoryMap *) *__address;
+ mach_vm_size_t offset = mapping->fOffset + __offset;
+ mach_vm_size_t length = mapping->fLength;
+
+ kern_return_t kr = kIOReturnVMError;
ipc_port_t sharedMem = (ipc_port_t) _memEntry;
IOOptionBits type = _flags & kIOMemoryTypeMask;
// mapping source == dest? (could be much better)
if( _task
- && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
- && (1 == _rangesCount) && (0 == sourceOffset)
- && range0Addr && (length <= range0Len) ) {
- if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
- return kIOReturnOverrun; // Doesn't fit in 32bit return field
- else {
- *atAddress = range0Addr;
- return( kIOReturnSuccess );
- }
+ && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
+ && (1 == _rangesCount) && (0 == offset)
+ && range0Addr && (length <= range0Len) )
+ {
+ mapping->fAddress = range0Addr;
+ mapping->fOptions |= kIOMapStatic;
+
+ return( kIOReturnSuccess );
}
if( 0 == sharedMem) {
vm_size_t size = ptoa_32(_pages);
if( _task) {
-#ifndef i386
+
memory_object_size_t actualSize = size;
+ vm_prot_t prot = VM_PROT_READ;
+ if (!(kIOMapReadOnly & options))
+ prot |= VM_PROT_WRITE;
+ else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
+ prot |= VM_PROT_WRITE;
+
kr = mach_make_memory_entry_64(get_task_map(_task),
&actualSize, range0Addr,
- VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
+ prot, &sharedMem,
NULL );
- if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
+ if( (KERN_SUCCESS == kr) && (actualSize != round_page(size)))
+ {
+ // map will cross vm objects
#if IOASSERT
- IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
- range0Addr, (UInt32) actualSize, size);
+ IOLog("mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
+ range0Addr, (UInt64)actualSize, (UInt64)size);
#endif
kr = kIOReturnVMError;
ipc_port_release_send( sharedMem );
- }
-
- if( KERN_SUCCESS != kr)
-#endif /* !i386 */
sharedMem = MACH_PORT_NULL;
- } else do {
+ mach_vm_address_t address;
+ mach_vm_size_t pageOffset = (range0Addr & PAGE_MASK);
+
+ address = trunc_page_64(mapping->fAddress);
+ if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
+ {
+ kr = IOMemoryDescriptorMapCopy(mapping->fAddressMap,
+ get_task_map(_task), range0Addr,
+ options,
+ offset, &address, round_page_64(length + pageOffset));
+ if (kr == KERN_SUCCESS)
+ mapping->fAddress = address + pageOffset;
+ else
+ mapping->fAddress = NULL;
+ }
+ }
+ }
+ else do
+ { // _task == 0, must be physical
memory_object_t pager;
unsigned int flags = 0;
addr64_t pa;
IOPhysicalLength segLen;
- pa = getPhysicalSegment64( sourceOffset, &segLen );
+ pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
if( !reserved) {
reserved = IONew( ExpansionData, 1 );
case kIOMapDefaultCache:
default:
flags = IODefaultCacheBits(pa);
+ if (DEVICE_PAGER_CACHE_INHIB & flags)
+ {
+ if (DEVICE_PAGER_GUARDED & flags)
+ mapping->fOptions |= kIOMapInhibitCache;
+ else
+ mapping->fOptions |= kIOMapWriteCombineCache;
+ }
+ else if (DEVICE_PAGER_WRITE_THROUGH & flags)
+ mapping->fOptions |= kIOMapWriteThruCache;
+ else
+ mapping->fOptions |= kIOMapCopybackCache;
break;
case kIOMapInhibitCache:
flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
- pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
+ pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
size, flags);
assert( pager );
size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
assert( KERN_SUCCESS == kr );
- if( KERN_SUCCESS != kr) {
+ if( KERN_SUCCESS != kr)
+ {
device_pager_deallocate( pager );
pager = MACH_PORT_NULL;
sharedMem = MACH_PORT_NULL;
_memEntry = (void *) sharedMem;
}
-
-#ifndef i386
- if( 0 == sharedMem)
- kr = kIOReturnVMError;
+ IOReturn result;
+ if (0 == sharedMem)
+ result = kr;
else
-#endif
- kr = super::doMap( addressMap, atAddress,
- options, sourceOffset, length );
+ result = super::doMap( __addressMap, __address,
+ options, __offset, __length );
- return( kr );
+ return( result );
}
IOReturn IOGeneralMemoryDescriptor::doUnmap(
vm_map_t addressMap,
- IOVirtualAddress logical,
- IOByteCount length )
+ IOVirtualAddress __address,
+ IOByteCount __length )
{
- // could be much better
- if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
-
- IOOptionBits type = _flags & kIOMemoryTypeMask;
- user_addr_t range0Addr;
- IOByteCount range0Len;
-
- getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
- if (logical == range0Addr && length <= range0Len)
- return( kIOReturnSuccess );
- }
-
- return( super::doUnmap( addressMap, logical, length ));
+ return (super::doUnmap(addressMap, __address, __length));
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
-
-/* inline function implementation */
-IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
- { return( getPhysicalSegment( 0, 0 )); }
+#undef super
+#define super OSObject
+OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
-#undef super
-#define super IOMemoryMap
+OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
+OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
-OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
+/* ex-inline function implementation */
+IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
+ { return( getPhysicalSegment( 0, 0 )); }
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-bool _IOMemoryMap::initCompatible(
- IOMemoryDescriptor * _memory,
- IOMemoryMap * _superMap,
- IOByteCount _offset,
- IOByteCount _length )
+bool IOMemoryMap::init(
+ task_t intoTask,
+ mach_vm_address_t toAddress,
+ IOOptionBits _options,
+ mach_vm_size_t _offset,
+ mach_vm_size_t _length )
{
-
- if( !super::init())
- return( false);
-
- if( (_offset + _length) > _superMap->getLength())
+ if (!intoTask)
return( false);
- _memory->retain();
- memory = _memory;
- _superMap->retain();
- superMap = _superMap;
+ if (!super::init())
+ return(false);
- offset = _offset;
- if( _length)
- length = _length;
- else
- length = _memory->getLength();
+ fAddressMap = get_task_map(intoTask);
+ if (!fAddressMap)
+ return(false);
+ vm_map_reference(fAddressMap);
- options = superMap->getMapOptions();
- logical = superMap->getVirtualAddress() + offset;
+ fAddressTask = intoTask;
+ fOptions = _options;
+ fLength = _length;
+ fOffset = _offset;
+ fAddress = toAddress;
- return( true );
+ return (true);
}
-bool _IOMemoryMap::initWithDescriptor(
- IOMemoryDescriptor * _memory,
- task_t intoTask,
- IOVirtualAddress toAddress,
- IOOptionBits _options,
- IOByteCount _offset,
- IOByteCount _length )
+bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
{
- bool ok;
- bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
-
- if ((!_memory) || (!intoTask))
- return( false);
+ if (!_memory)
+ return(false);
- if( (_offset + _length) > _memory->getLength())
- return( false);
-
- if (!redir)
+ if (!fSuperMap)
{
- if (!super::init())
- return(false);
- addressMap = get_task_map(intoTask);
- if( !addressMap)
+ if( (_offset + fLength) > _memory->getLength())
return( false);
- vm_map_reference(addressMap);
- addressTask = intoTask;
- logical = toAddress;
- options = _options;
+ fOffset = _offset;
}
_memory->retain();
-
- offset = _offset;
- if( _length)
- length = _length;
- else
- length = _memory->getLength();
-
- if( options & kIOMapStatic)
- ok = true;
- else
- ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
- _options, offset, length ));
- if (ok || redir)
+ if (fMemory)
{
- if (memory)
- memory->release();
- memory = _memory;
- logical = toAddress;
- }
- else
- {
- _memory->release();
- if (!redir)
- {
- logical = 0;
- memory = 0;
- vm_map_deallocate(addressMap);
- addressMap = 0;
- }
+ if (fMemory != _memory)
+ fMemory->removeMapping(this);
+ fMemory->release();
}
+ fMemory = _memory;
- return( ok );
+ return( true );
}
-/* LP64todo - these need to expand */
struct IOMemoryDescriptorMapAllocRef
{
ipc_port_t sharedMem;
- vm_size_t size;
- vm_offset_t mapped;
- IOByteCount sourceOffset;
+ vm_map_t src_map;
+ mach_vm_offset_t src_address;
+ mach_vm_address_t mapped;
+ mach_vm_size_t size;
+ mach_vm_size_t sourceOffset;
IOOptionBits options;
};
IOReturn err;
do {
- if( ref->sharedMem) {
+ if( ref->sharedMem)
+ {
vm_prot_t prot = VM_PROT_READ
| ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
+ // VM system requires write access to change cache mode
+ if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
+ prot |= VM_PROT_WRITE;
+
// set memory entry cache
vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
switch (ref->options & kIOMapCacheMask)
if (KERN_SUCCESS != err)
IOLog("MAP_MEM_ONLY failed %d\n", err);
- err = vm_map( map,
+ err = mach_vm_map( map,
&ref->mapped,
ref->size, 0 /* mask */,
(( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
ref->mapped = 0;
continue;
}
-
- } else {
-
- err = vm_allocate( map, &ref->mapped, ref->size,
+ }
+ else if (ref->src_map)
+ {
+ vm_prot_t cur_prot, max_prot;
+ err = mach_vm_remap(map, &ref->mapped, ref->size, PAGE_MASK,
+ (ref->options & kIOMapAnywhere) ? TRUE : FALSE,
+ ref->src_map, ref->src_address,
+ FALSE /* copy */,
+ &cur_prot,
+ &max_prot,
+ VM_INHERIT_NONE);
+ if (KERN_SUCCESS == err)
+ {
+ if ((!(VM_PROT_READ & cur_prot))
+ || (!(kIOMapReadOnly & ref->options) && !(VM_PROT_WRITE & cur_prot)))
+ {
+ mach_vm_deallocate(map, ref->mapped, ref->size);
+ err = KERN_PROTECTION_FAILURE;
+ }
+ }
+ if (KERN_SUCCESS != err)
+ ref->mapped = 0;
+ }
+ else
+ {
+ err = mach_vm_allocate( map, &ref->mapped, ref->size,
((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
| VM_MAKE_TAG(VM_MEMORY_IOKIT) );
-
if( KERN_SUCCESS != err) {
ref->mapped = 0;
continue;
}
-
// we have to make sure that these guys don't get copied if we fork.
err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
assert( KERN_SUCCESS == err );
}
-
- } while( false );
+ }
+ while( false );
return( err );
}
+kern_return_t
+IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
+ mach_vm_size_t offset,
+ mach_vm_address_t * address, mach_vm_size_t length)
+{
+ IOReturn err;
+ IOMemoryDescriptorMapAllocRef ref;
+
+ ref.sharedMem = entry;
+ ref.src_map = NULL;
+ ref.sharedMem = entry;
+ ref.sourceOffset = trunc_page_64(offset);
+ ref.options = options;
+ ref.size = length;
+
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = *address;
+
+ if( ref.sharedMem && (map == kernel_map) && pageable)
+ err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
+ else
+ err = IOMemoryDescriptorMapAlloc( map, &ref );
+
+ *address = ref.mapped;
+ return (err);
+}
+
+kern_return_t
+IOMemoryDescriptorMapCopy(vm_map_t map,
+ vm_map_t src_map,
+ mach_vm_offset_t src_address,
+ IOOptionBits options,
+ mach_vm_size_t offset,
+ mach_vm_address_t * address, mach_vm_size_t length)
+{
+ IOReturn err;
+ IOMemoryDescriptorMapAllocRef ref;
+
+ ref.sharedMem = NULL;
+ ref.src_map = src_map;
+ ref.src_address = src_address;
+ ref.sourceOffset = trunc_page_64(offset);
+ ref.options = options;
+ ref.size = length;
+
+ if (options & kIOMapAnywhere)
+ // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
+ ref.mapped = 0;
+ else
+ ref.mapped = *address;
+
+ if (map == kernel_map)
+ err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
+ else
+ err = IOMemoryDescriptorMapAlloc(map, &ref);
+
+ *address = ref.mapped;
+ return (err);
+}
IOReturn IOMemoryDescriptor::doMap(
- vm_map_t addressMap,
- IOVirtualAddress * atAddress,
+ vm_map_t __addressMap,
+ IOVirtualAddress * __address,
IOOptionBits options,
- IOByteCount sourceOffset,
- IOByteCount length )
+ IOByteCount __offset,
+ IOByteCount __length )
{
- IOReturn err = kIOReturnSuccess;
- memory_object_t pager;
- vm_address_t logical;
- IOByteCount pageOffset;
- IOPhysicalAddress sourceAddr;
- IOMemoryDescriptorMapAllocRef ref;
-
- ref.sharedMem = (ipc_port_t) _memEntry;
- ref.sourceOffset = sourceOffset;
- ref.options = options;
+#ifndef __LP64__
+ if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
+#endif /* !__LP64__ */
- do {
+ IOMemoryMap * mapping = (IOMemoryMap *) *__address;
+ mach_vm_size_t offset = mapping->fOffset + __offset;
+ mach_vm_size_t length = mapping->fLength;
- if( 0 == length)
- length = getLength();
+ IOReturn err = kIOReturnSuccess;
+ memory_object_t pager;
+ mach_vm_size_t pageOffset;
+ IOPhysicalAddress sourceAddr;
+ unsigned int lock_count;
- sourceAddr = getSourceSegment( sourceOffset, NULL );
- pageOffset = sourceAddr - trunc_page_32( sourceAddr );
+ do
+ {
+ sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
+ pageOffset = sourceAddr - trunc_page( sourceAddr );
- ref.size = round_page_32( length + pageOffset );
+ if( reserved)
+ pager = (memory_object_t) reserved->devicePager;
+ else
+ pager = MACH_PORT_NULL;
if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
{
- upl_t redirUPL2;
- vm_size_t size;
- int flags;
+ upl_t redirUPL2;
+ vm_size_t size;
+ int flags;
- _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
- ref.mapped = mapping->getVirtualAddress();
-
if (!_memEntry)
{
err = kIOReturnNotReadable;
continue;
}
- size = length;
+ size = round_page(mapping->fLength + pageOffset);
flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
&flags))
redirUPL2 = NULL;
- err = upl_transpose(redirUPL2, mapping->redirUPL);
+ for (lock_count = 0;
+ IORecursiveLockHaveLock(gIOMemoryLock);
+ lock_count++) {
+ UNLOCK;
+ }
+ err = upl_transpose(redirUPL2, mapping->fRedirUPL);
+ for (;
+ lock_count;
+ lock_count--) {
+ LOCK;
+ }
+
if (kIOReturnSuccess != err)
{
IOLog("upl_transpose(%x)\n", err);
{
// swap the memEntries since they now refer to different vm_objects
void * me = _memEntry;
- _memEntry = mapping->memory->_memEntry;
- mapping->memory->_memEntry = me;
+ _memEntry = mapping->fMemory->_memEntry;
+ mapping->fMemory->_memEntry = me;
}
+ if (pager)
+ err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
}
else
{
-
- logical = *atAddress;
- if( options & kIOMapAnywhere)
- // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
- ref.mapped = 0;
- else {
- ref.mapped = trunc_page_32( logical );
- if( (logical - ref.mapped) != pageOffset) {
+ mach_vm_address_t address;
+
+ if (!(options & kIOMapAnywhere))
+ {
+ address = trunc_page_64(mapping->fAddress);
+ if( (mapping->fAddress - address) != pageOffset)
+ {
err = kIOReturnVMError;
continue;
}
}
-
- if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
- err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
- else
- err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
- }
- if( err != KERN_SUCCESS)
- continue;
-
- if( reserved)
- pager = (memory_object_t) reserved->devicePager;
- else
- pager = MACH_PORT_NULL;
+ err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
+ options, (kIOMemoryBufferPageable & _flags),
+ offset, &address, round_page_64(length + pageOffset));
+ if( err != KERN_SUCCESS)
+ continue;
- if( !ref.sharedMem || pager )
- err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
+ if (!_memEntry || pager)
+ {
+ err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
+ if (err != KERN_SUCCESS)
+ doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
+ }
- } while( false );
+#if DEBUG
+ if (kIOLogMapping & gIOKitDebug)
+ IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
+ err, this, sourceAddr, mapping, address, offset, length);
+#endif
- if( err != KERN_SUCCESS) {
- if( ref.mapped)
- doUnmap( addressMap, ref.mapped, ref.size );
- *atAddress = NULL;
- } else
- *atAddress = ref.mapped + pageOffset;
+ if (err == KERN_SUCCESS)
+ mapping->fAddress = address + pageOffset;
+ else
+ mapping->fAddress = NULL;
+ }
+ }
+ while( false );
- return( err );
+ return (err);
}
-enum {
- kIOMemoryRedirected = 0x00010000
-};
-
IOReturn IOMemoryDescriptor::handleFault(
void * _pager,
vm_map_t addressMap,
- IOVirtualAddress address,
- IOByteCount sourceOffset,
- IOByteCount length,
+ mach_vm_address_t address,
+ mach_vm_size_t sourceOffset,
+ mach_vm_size_t length,
IOOptionBits options )
{
IOReturn err = kIOReturnSuccess;
memory_object_t pager = (memory_object_t) _pager;
- vm_size_t size;
- vm_size_t bytes;
- vm_size_t page;
- IOByteCount pageOffset;
- IOByteCount pagerOffset;
+ mach_vm_size_t size;
+ mach_vm_size_t bytes;
+ mach_vm_size_t page;
+ mach_vm_size_t pageOffset;
+ mach_vm_size_t pagerOffset;
IOPhysicalLength segLen;
addr64_t physAddr;
- if( !addressMap) {
-
- if( kIOMemoryRedirected & _flags) {
-#ifdef DEBUG
- IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
+ if( !addressMap)
+ {
+ if( kIOMemoryRedirected & _flags)
+ {
+#if DEBUG
+ IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
#endif
do {
SLEEP;
return( kIOReturnSuccess );
}
- physAddr = getPhysicalSegment64( sourceOffset, &segLen );
+ physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
assert( physAddr );
pageOffset = physAddr - trunc_page_64( physAddr );
pagerOffset = sourceOffset;
segLen += pageOffset;
bytes = size;
- do {
+ do
+ {
// in the middle of the loop only map whole pages
if( segLen >= bytes)
segLen = bytes;
- else if( segLen != trunc_page_32( segLen))
+ else if( segLen != trunc_page( segLen))
err = kIOReturnVMError;
if( physAddr != trunc_page_64( physAddr))
err = kIOReturnBadArgument;
+ if (kIOReturnSuccess != err)
+ break;
-#ifdef DEBUG
+#if DEBUG
if( kIOLogMapping & gIOKitDebug)
- IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
+ IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
addressMap, address + pageOffset, physAddr + pageOffset,
segLen - pageOffset);
#endif
-
-
-
-#ifdef i386
- /* i386 doesn't support faulting on device memory yet */
- if( addressMap && (kIOReturnSuccess == err))
- err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
- assert( KERN_SUCCESS == err );
- if( err)
- break;
-#endif
-
if( pager) {
if( reserved && reserved->pagerContig) {
IOPhysicalLength allLen;
addr64_t allPhys;
- allPhys = getPhysicalSegment64( 0, &allLen );
+ allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
assert( allPhys );
- err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
-
- } else {
+ err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
+ }
+ else
+ {
- for( page = 0;
+ for( page = 0;
(page < segLen) && (KERN_SUCCESS == err);
- page += page_size) {
- err = device_pager_populate_object(pager, pagerOffset,
- (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
- pagerOffset += page_size;
+ page += page_size)
+ {
+ err = device_pager_populate_object(pager, pagerOffset,
+ (ppnum_t)(atop_64(physAddr + page)), page_size);
+ pagerOffset += page_size;
}
}
assert( KERN_SUCCESS == err );
if( err)
break;
}
-#ifndef i386
+
+ // This call to vm_fault causes an early pmap level resolution
+ // of the mappings created above for kernel mappings, since
+ // faulting in later can't take place from interrupt level.
/* *** ALERT *** */
/* *** Temporary Workaround *** */
- /* This call to vm_fault causes an early pmap level resolution */
- /* of the mappings created above. Need for this is in absolute */
- /* violation of the basic tenet that the pmap layer is a cache. */
- /* Further, it implies a serious I/O architectural violation on */
- /* the part of some user of the mapping. As of this writing, */
- /* the call to vm_fault is needed because the NVIDIA driver */
- /* makes a call to pmap_extract. The NVIDIA driver needs to be */
- /* fixed as soon as possible. The NVIDIA driver should not */
- /* need to query for this info as it should know from the doMap */
- /* call where the physical memory is mapped. When a query is */
- /* necessary to find a physical mapping, it should be done */
- /* through an iokit call which includes the mapped memory */
- /* handle. This is required for machine architecture independence.*/
-
- if(!(kIOMemoryRedirected & _flags)) {
+ if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
+ {
vm_fault(addressMap,
(vm_map_offset_t)address,
VM_PROT_READ|VM_PROT_WRITE,
/* *** Temporary Workaround *** */
/* *** ALERT *** */
-#endif
+
sourceOffset += segLen - pageOffset;
address += segLen;
bytes -= segLen;
pageOffset = 0;
- } while( bytes
- && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
+ }
+ while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
- if( bytes)
+ if (bytes)
err = kIOReturnBadArgument;
- return( err );
+ return (err);
}
IOReturn IOMemoryDescriptor::doUnmap(
vm_map_t addressMap,
- IOVirtualAddress logical,
- IOByteCount length )
+ IOVirtualAddress __address,
+ IOByteCount __length )
{
- IOReturn err;
+ IOReturn err;
+ mach_vm_address_t address;
+ mach_vm_size_t length;
-#ifdef DEBUG
- if( kIOLogMapping & gIOKitDebug)
- kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
- addressMap, logical, length );
-#endif
-
- if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
+ if (__length)
+ {
+ address = __address;
+ length = __length;
+ }
+ else
+ {
+ addressMap = ((IOMemoryMap *) __address)->fAddressMap;
+ address = ((IOMemoryMap *) __address)->fAddress;
+ length = ((IOMemoryMap *) __address)->fLength;
+ }
- if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
- addressMap = IOPageableMapForAddress( logical );
+ if ((addressMap == kernel_map)
+ && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
+ addressMap = IOPageableMapForAddress( address );
- err = vm_deallocate( addressMap, logical, length );
+#if DEBUG
+ if( kIOLogMapping & gIOKitDebug)
+ IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
+ addressMap, address, length );
+#endif
- } else
- err = kIOReturnSuccess;
+ err = mach_vm_deallocate( addressMap, address, length );
- return( err );
+ return (err);
}
IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
{
IOReturn err = kIOReturnSuccess;
- _IOMemoryMap * mapping = 0;
+ IOMemoryMap * mapping = 0;
OSIterator * iter;
LOCK;
do {
if( (iter = OSCollectionIterator::withCollection( _mappings))) {
- while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
+ while( (mapping = (IOMemoryMap *) iter->getNextObject()))
mapping->redirect( safeTask, doRedirect );
iter->release();
UNLOCK;
+#ifndef __LP64__
// temporary binary compatibility
IOSubMemoryDescriptor * subMem;
if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
err = subMem->redirect( safeTask, doRedirect );
else
err = kIOReturnSuccess;
+#endif /* !__LP64__ */
return( err );
}
-IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
-{
- return( _parent->redirect( safeTask, doRedirect ));
-}
-
-IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
+IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
{
IOReturn err = kIOReturnSuccess;
- if( superMap) {
-// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
+ if( fSuperMap) {
+// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
} else {
LOCK;
- if( logical && addressMap
- && (!safeTask || (get_task_map(safeTask) != addressMap))
- && (0 == (options & kIOMapStatic)))
+
+ do
{
- IOUnmapPages( addressMap, logical, length );
- if(!doRedirect && safeTask
- && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical))
- {
- err = vm_deallocate( addressMap, logical, length );
- err = memory->doMap( addressMap, &logical,
- (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
- offset, length );
- } else
- err = kIOReturnSuccess;
-#ifdef DEBUG
- IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
+ if (!fAddress)
+ break;
+ if (!fAddressMap)
+ break;
+
+ if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
+ && (0 == (fOptions & kIOMapStatic)))
+ {
+ IOUnmapPages( fAddressMap, fAddress, fLength );
+ err = kIOReturnSuccess;
+#if DEBUG
+ IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
#endif
- }
- UNLOCK;
+ }
+ else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
+ {
+ IOOptionBits newMode;
+ newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
+ IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
+ }
+ }
+ while (false);
+ UNLOCK;
}
- if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
&& safeTask
- && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
- memory->redirect(safeTask, doRedirect);
+ && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
+ fMemory->redirect(safeTask, doRedirect);
return( err );
}
-IOReturn _IOMemoryMap::unmap( void )
+IOReturn IOMemoryMap::unmap( void )
{
IOReturn err;
LOCK;
- if( logical && addressMap && (0 == superMap)
- && (0 == (options & kIOMapStatic))) {
+ if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
+ && (0 == (fOptions & kIOMapStatic))) {
- err = memory->doUnmap( addressMap, logical, length );
- vm_map_deallocate(addressMap);
- addressMap = 0;
+ err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
} else
err = kIOReturnSuccess;
- logical = 0;
+ if (fAddressMap)
+ {
+ vm_map_deallocate(fAddressMap);
+ fAddressMap = 0;
+ }
+
+ fAddress = 0;
UNLOCK;
return( err );
}
-void _IOMemoryMap::taskDied( void )
+void IOMemoryMap::taskDied( void )
{
LOCK;
- if( addressMap) {
- vm_map_deallocate(addressMap);
- addressMap = 0;
+ if (fUserClientUnmap)
+ unmap();
+ if( fAddressMap) {
+ vm_map_deallocate(fAddressMap);
+ fAddressMap = 0;
}
- addressTask = 0;
- logical = 0;
+ fAddressTask = 0;
+ fAddress = 0;
UNLOCK;
}
+IOReturn IOMemoryMap::userClientUnmap( void )
+{
+ fUserClientUnmap = true;
+ return (kIOReturnSuccess);
+}
+
// Overload the release mechanism. All mappings must be a member
// of a memory descriptors _mappings set. This means that we
// always have 2 references on a mapping. When either of these mappings
// are released we need to free ourselves.
-void _IOMemoryMap::taggedRelease(const void *tag) const
+void IOMemoryMap::taggedRelease(const void *tag) const
{
LOCK;
super::taggedRelease(tag, 2);
UNLOCK;
}
-void _IOMemoryMap::free()
+void IOMemoryMap::free()
{
unmap();
- if( memory) {
+ if (fMemory)
+ {
LOCK;
- memory->removeMapping( this);
+ fMemory->removeMapping(this);
UNLOCK;
- memory->release();
+ fMemory->release();
}
- if (owner && (owner != memory))
+ if (fOwner && (fOwner != fMemory))
{
LOCK;
- owner->removeMapping(this);
+ fOwner->removeMapping(this);
UNLOCK;
}
- if( superMap)
- superMap->release();
+ if (fSuperMap)
+ fSuperMap->release();
- if (redirUPL) {
- upl_commit(redirUPL, NULL, 0);
- upl_deallocate(redirUPL);
+ if (fRedirUPL) {
+ upl_commit(fRedirUPL, NULL, 0);
+ upl_deallocate(fRedirUPL);
}
super::free();
}
-IOByteCount _IOMemoryMap::getLength()
+IOByteCount IOMemoryMap::getLength()
{
- return( length );
+ return( fLength );
}
-IOVirtualAddress _IOMemoryMap::getVirtualAddress()
+IOVirtualAddress IOMemoryMap::getVirtualAddress()
{
- return( logical);
+#ifndef __LP64__
+ if (fSuperMap)
+ fSuperMap->getVirtualAddress();
+ else if (fAddressMap
+ && vm_map_is_64bit(fAddressMap)
+ && (sizeof(IOVirtualAddress) < 8))
+ {
+ OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
+ }
+#endif /* !__LP64__ */
+
+ return (fAddress);
}
-task_t _IOMemoryMap::getAddressTask()
+#ifndef __LP64__
+mach_vm_address_t IOMemoryMap::getAddress()
{
- if( superMap)
- return( superMap->getAddressTask());
+ return( fAddress);
+}
+
+mach_vm_size_t IOMemoryMap::getSize()
+{
+ return( fLength );
+}
+#endif /* !__LP64__ */
+
+
+task_t IOMemoryMap::getAddressTask()
+{
+ if( fSuperMap)
+ return( fSuperMap->getAddressTask());
else
- return( addressTask);
+ return( fAddressTask);
}
-IOOptionBits _IOMemoryMap::getMapOptions()
+IOOptionBits IOMemoryMap::getMapOptions()
{
- return( options);
+ return( fOptions);
}
-IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
+IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
{
- return( memory );
+ return( fMemory );
}
-_IOMemoryMap * _IOMemoryMap::copyCompatible(
- IOMemoryDescriptor * owner,
- task_t task,
- IOVirtualAddress toAddress,
- IOOptionBits _options,
- IOByteCount _offset,
- IOByteCount _length )
+IOMemoryMap * IOMemoryMap::copyCompatible(
+ IOMemoryMap * newMapping )
{
- _IOMemoryMap * mapping;
+ task_t task = newMapping->getAddressTask();
+ mach_vm_address_t toAddress = newMapping->fAddress;
+ IOOptionBits _options = newMapping->fOptions;
+ mach_vm_size_t _offset = newMapping->fOffset;
+ mach_vm_size_t _length = newMapping->fLength;
- if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
- return( 0 );
- if( options & kIOMapUnique)
+ if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
return( 0 );
- if( (options ^ _options) & kIOMapReadOnly)
+ if( (fOptions ^ _options) & kIOMapReadOnly)
return( 0 );
if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
- && ((options ^ _options) & kIOMapCacheMask))
+ && ((fOptions ^ _options) & kIOMapCacheMask))
return( 0 );
- if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
+ if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
return( 0 );
- if( _offset < offset)
+ if( _offset < fOffset)
return( 0 );
- _offset -= offset;
+ _offset -= fOffset;
- if( (_offset + _length) > length)
+ if( (_offset + _length) > fLength)
return( 0 );
- if( (length == _length) && (!_offset)) {
- retain();
- mapping = this;
-
- } else {
- mapping = new _IOMemoryMap;
- if( mapping
- && !mapping->initCompatible( owner, this, _offset, _length )) {
- mapping->release();
- mapping = 0;
- }
+ retain();
+ if( (fLength == _length) && (!_offset))
+ {
+ newMapping->release();
+ newMapping = this;
+ }
+ else
+ {
+ newMapping->fSuperMap = this;
+ newMapping->fOffset = _offset;
+ newMapping->fAddress = fAddress + _offset;
}
- return( mapping );
+ return( newMapping );
}
-IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
- IOPhysicalLength * _length)
+IOPhysicalAddress
+#ifdef __LP64__
+IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
+#else /* !__LP64__ */
+IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
+#endif /* !__LP64__ */
{
IOPhysicalAddress address;
LOCK;
- address = memory->getPhysicalSegment( offset + _offset, _length );
+#ifdef __LP64__
+ address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
+#else /* !__LP64__ */
+ address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
+#endif /* !__LP64__ */
UNLOCK;
return( address );
IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
ptoa_64(gIOMaximumMappedIOPageCount), 64);
+ gIOLastPage = IOGetLastPageNumber();
}
void IOMemoryDescriptor::free( void )
IOVirtualAddress mapAddress,
IOOptionBits options )
{
- _IOMemoryMap * newMap;
-
- newMap = new _IOMemoryMap;
-
- LOCK;
-
- if( newMap
- && !newMap->initWithDescriptor( this, intoTask, mapAddress,
- options | kIOMapStatic, 0, getLength() )) {
- newMap->release();
- newMap = 0;
- }
-
- addMapping( newMap);
-
- UNLOCK;
-
- return( newMap);
+ return (createMappingInTask( intoTask, mapAddress,
+ options | kIOMapStatic,
+ 0, getLength() ));
}
IOMemoryMap * IOMemoryDescriptor::map(
IOOptionBits options )
{
-
- return( makeMapping( this, kernel_task, 0,
- options | kIOMapAnywhere,
- 0, getLength() ));
+ return (createMappingInTask( kernel_task, 0,
+ options | kIOMapAnywhere,
+ 0, getLength() ));
}
-IOMemoryMap * IOMemoryDescriptor::map(
- task_t intoTask,
- IOVirtualAddress toAddress,
+#ifndef __LP64__
+IOMemoryMap * IOMemoryDescriptor::map(
+ task_t intoTask,
+ IOVirtualAddress atAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length )
{
- if( 0 == length)
+ if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
+ {
+ OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
+ return (0);
+ }
+
+ return (createMappingInTask(intoTask, atAddress,
+ options, offset, length));
+}
+#endif /* !__LP64__ */
+
+IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
+ task_t intoTask,
+ mach_vm_address_t atAddress,
+ IOOptionBits options,
+ mach_vm_size_t offset,
+ mach_vm_size_t length)
+{
+ IOMemoryMap * result;
+ IOMemoryMap * mapping;
+
+ if (0 == length)
length = getLength();
- return( makeMapping( this, intoTask, toAddress, options, offset, length ));
+ mapping = new IOMemoryMap;
+
+ if( mapping
+ && !mapping->init( intoTask, atAddress,
+ options, offset, length )) {
+ mapping->release();
+ mapping = 0;
+ }
+
+ if (mapping)
+ result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
+ else
+ result = 0;
+
+#if DEBUG
+ if (!result)
+ IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
+ this, atAddress, options, offset, length);
+#endif
+
+ return (result);
}
-IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
+#ifndef __LP64__ // there is only a 64 bit version for LP64
+IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
IOOptionBits options,
IOByteCount offset)
+{
+ return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
+}
+#endif
+
+IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
+ IOOptionBits options,
+ mach_vm_size_t offset)
{
IOReturn err = kIOReturnSuccess;
IOMemoryDescriptor * physMem = 0;
LOCK;
- if (logical && addressMap) do
+ if (fAddress && fAddressMap) do
{
- if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
{
- physMem = memory;
+ physMem = fMemory;
physMem->retain();
}
- if (!redirUPL)
+ if (!fRedirUPL)
{
- vm_size_t size = length;
+ vm_size_t size = round_page(fLength);
int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
- if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
+ if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
NULL, NULL,
&flags))
- redirUPL = 0;
+ fRedirUPL = 0;
if (physMem)
{
- IOUnmapPages( addressMap, logical, length );
- physMem->redirect(0, true);
+ IOUnmapPages( fAddressMap, fAddress, fLength );
+ if (false)
+ physMem->redirect(0, true);
}
}
if (newBackingMemory)
{
- if (newBackingMemory != memory)
+ if (newBackingMemory != fMemory)
{
- if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
- options | kIOMapUnique | kIOMapReference,
- offset, length))
+ fOffset = 0;
+ if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
+ options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
+ offset, fLength))
err = kIOReturnError;
}
- if (redirUPL)
+ if (fRedirUPL)
{
- upl_commit(redirUPL, NULL, 0);
- upl_deallocate(redirUPL);
- redirUPL = 0;
+ upl_commit(fRedirUPL, NULL, 0);
+ upl_deallocate(fRedirUPL);
+ fRedirUPL = 0;
}
- if (physMem)
+ if (false && physMem)
physMem->redirect(0, false);
}
}
IOMemoryMap * IOMemoryDescriptor::makeMapping(
IOMemoryDescriptor * owner,
- task_t intoTask,
- IOVirtualAddress toAddress,
+ task_t __intoTask,
+ IOVirtualAddress __address,
IOOptionBits options,
- IOByteCount offset,
- IOByteCount length )
+ IOByteCount __offset,
+ IOByteCount __length )
{
+#ifndef __LP64__
+ if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
+#endif /* !__LP64__ */
+
IOMemoryDescriptor * mapDesc = 0;
- _IOMemoryMap * mapping = 0;
- OSIterator * iter;
+ IOMemoryMap * result = 0;
+ OSIterator * iter;
+
+ IOMemoryMap * mapping = (IOMemoryMap *) __address;
+ mach_vm_size_t offset = mapping->fOffset + __offset;
+ mach_vm_size_t length = mapping->fLength;
+
+ mapping->fOffset = offset;
LOCK;
do
{
+ if (kIOMapStatic & options)
+ {
+ result = mapping;
+ addMapping(mapping);
+ mapping->setMemoryDescriptor(this, 0);
+ continue;
+ }
+
if (kIOMapUnique & options)
{
IOPhysicalAddress phys;
IOByteCount physLen;
- if (owner != this)
- continue;
+// if (owner != this) continue;
- if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
+ || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
{
- phys = getPhysicalSegment(offset, &physLen);
+ phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
if (!phys || (physLen < length))
continue;
- mapDesc = IOMemoryDescriptor::withPhysicalAddress(
- phys, length, _direction);
+ mapDesc = IOMemoryDescriptor::withAddressRange(
+ phys, length, getDirection() | kIOMemoryMapperNone, NULL);
if (!mapDesc)
continue;
offset = 0;
- }
- else
- {
- mapDesc = this;
- mapDesc->retain();
- }
-
- if (kIOMapReference & options)
- {
- mapping = (_IOMemoryMap *) toAddress;
- mapping->retain();
-
-#if 1
- uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
- pageOffset1 -= trunc_page_32( pageOffset1 );
-
- uint32_t pageOffset2 = mapping->getVirtualAddress();
- pageOffset2 -= trunc_page_32( pageOffset2 );
-
- if (pageOffset1 != pageOffset2)
- IOLog("::redirect can't map offset %x to addr %x\n",
- pageOffset1, mapping->getVirtualAddress());
-#endif
-
-
- if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
- offset, length ))
- {
-#ifdef DEBUG
- IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
-#endif
- }
-
- if (mapping->owner)
- mapping->owner->removeMapping(mapping);
- continue;
+ mapping->fOffset = offset;
}
}
else
{
- // look for an existing mapping
- if( (iter = OSCollectionIterator::withCollection( _mappings))) {
-
- while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
-
- if( (mapping = mapping->copyCompatible(
- owner, intoTask, toAddress,
- options | kIOMapReference,
- offset, length )))
+ // look for a compatible existing mapping
+ if( (iter = OSCollectionIterator::withCollection(_mappings)))
+ {
+ IOMemoryMap * lookMapping;
+ while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
+ {
+ if ((result = lookMapping->copyCompatible(mapping)))
+ {
+ addMapping(result);
+ result->setMemoryDescriptor(this, offset);
break;
+ }
}
iter->release();
}
-
-
- if (mapping)
- mapping->retain();
-
- if( mapping || (options & kIOMapReference))
+ if (result || (options & kIOMapReference))
continue;
+ }
- mapDesc = owner;
+ if (!mapDesc)
+ {
+ mapDesc = this;
mapDesc->retain();
}
- owner = this;
-
- mapping = new _IOMemoryMap;
- if( mapping
- && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
- offset, length )) {
-#ifdef DEBUG
- IOLog("Didn't make map %08lx : %08lx\n", offset, length );
-#endif
+ IOReturn
+ kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
+ if (kIOReturnSuccess == kr)
+ {
+ result = mapping;
+ mapDesc->addMapping(result);
+ result->setMemoryDescriptor(mapDesc, offset);
+ }
+ else
+ {
mapping->release();
- mapping = 0;
+ mapping = NULL;
}
-
- if (mapping)
- mapping->retain();
-
- } while( false );
-
- if (mapping)
- {
- mapping->owner = owner;
- owner->addMapping( mapping);
- mapping->release();
}
+ while( false );
UNLOCK;
if (mapDesc)
mapDesc->release();
- return( mapping);
+ return (result);
}
void IOMemoryDescriptor::addMapping(
IOMemoryMap * mapping )
{
- if( mapping) {
+ if( mapping)
+ {
if( 0 == _mappings)
_mappings = OSSet::withCapacity(1);
if( _mappings )
_mappings->removeObject( mapping);
}
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#undef super
-#define super IOMemoryDescriptor
-
-OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
- IOByteCount offset, IOByteCount length,
- IODirection direction )
-{
- if( !parent)
- return( false);
-
- if( (offset + length) > parent->getLength())
- return( false);
-
- /*
- * We can check the _parent instance variable before having ever set it
- * to an initial value because I/O Kit guarantees that all our instance
- * variables are zeroed on an object's allocation.
- */
-
- if( !_parent) {
- if( !super::init())
- return( false );
- } else {
- /*
- * An existing memory descriptor is being retargeted to
- * point to somewhere else. Clean up our present state.
- */
-
- _parent->release();
- _parent = 0;
- }
-
- parent->retain();
- _parent = parent;
- _start = offset;
- _length = length;
- _direction = direction;
- _tag = parent->getTag();
-
- return( true );
-}
-
-void IOSubMemoryDescriptor::free( void )
-{
- if( _parent)
- _parent->release();
-
- super::free();
-}
-
-
-IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
- IOByteCount * length )
-{
- IOPhysicalAddress address;
- IOByteCount actualLength;
-
- assert(offset <= _length);
-
- if( length)
- *length = 0;
-
- if( offset >= _length)
- return( 0 );
-
- address = _parent->getPhysicalSegment( offset + _start, &actualLength );
-
- if( address && length)
- *length = min( _length - offset, actualLength );
-
- return( address );
-}
-
-
-IOReturn IOSubMemoryDescriptor::doMap(
- vm_map_t addressMap,
- IOVirtualAddress * atAddress,
- IOOptionBits options,
- IOByteCount sourceOffset,
- IOByteCount length )
-{
- if( sourceOffset >= _length)
- return( kIOReturnOverrun );
- return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
-}
-
-IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
- IOByteCount * length )
-{
- IOPhysicalAddress address;
- IOByteCount actualLength;
-
- assert(offset <= _length);
-
- if( length)
- *length = 0;
-
- if( offset >= _length)
- return( 0 );
-
- address = _parent->getSourceSegment( offset + _start, &actualLength );
-
- if( address && length)
- *length = min( _length - offset, actualLength );
-
- return( address );
-}
-
-void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
- IOByteCount * lengthOfSegment)
-{
- return( 0 );
-}
-
-IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
- void * bytes, IOByteCount length)
-{
- IOByteCount byteCount;
-
- assert(offset <= _length);
-
- if( offset >= _length)
- return( 0 );
-
- LOCK;
- byteCount = _parent->readBytes( _start + offset, bytes,
- min(length, _length - offset) );
- UNLOCK;
-
- return( byteCount );
-}
-
-IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
- const void* bytes, IOByteCount length)
-{
- IOByteCount byteCount;
-
- assert(offset <= _length);
-
- if( offset >= _length)
- return( 0 );
-
- LOCK;
- byteCount = _parent->writeBytes( _start + offset, bytes,
- min(length, _length - offset) );
- UNLOCK;
-
- return( byteCount );
-}
-
-IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
- IOOptionBits * oldState )
-{
- IOReturn err;
-
- LOCK;
- err = _parent->setPurgeable( newState, oldState );
- UNLOCK;
-
- return( err );
-}
-
-IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
- IOByteCount offset, IOByteCount length )
-{
- IOReturn err;
-
- assert(offset <= _length);
-
- if( offset >= _length)
- return( kIOReturnOverrun );
-
- LOCK;
- err = _parent->performOperation( options, _start + offset,
- min(length, _length - offset) );
- UNLOCK;
-
- return( err );
-}
-
-IOReturn IOSubMemoryDescriptor::prepare(
- IODirection forDirection)
-{
- IOReturn err;
-
- LOCK;
- err = _parent->prepare( forDirection);
- UNLOCK;
-
- return( err );
-}
-
-IOReturn IOSubMemoryDescriptor::complete(
- IODirection forDirection)
-{
- IOReturn err;
-
- LOCK;
- err = _parent->complete( forDirection);
- UNLOCK;
-
- return( err );
-}
-
-IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
- IOMemoryDescriptor * owner,
- task_t intoTask,
- IOVirtualAddress toAddress,
- IOOptionBits options,
- IOByteCount offset,
- IOByteCount length )
-{
- IOMemoryMap * mapping = 0;
-
- if (!(kIOMapUnique & options))
- mapping = (IOMemoryMap *) _parent->makeMapping(
- _parent, intoTask,
- toAddress - (_start + offset),
- options | kIOMapReference,
- _start + offset, length );
-
- if( !mapping)
- mapping = (IOMemoryMap *) _parent->makeMapping(
- _parent, intoTask,
- toAddress,
- options, _start + offset, length );
-
- if( !mapping)
- mapping = super::makeMapping( owner, intoTask, toAddress, options,
- offset, length );
-
- return( mapping );
-}
-
-/* ick */
-
+#ifndef __LP64__
+// obsolete initializers
+// - initWithOptions is the designated initializer
bool
-IOSubMemoryDescriptor::initWithAddress(void * address,
+IOMemoryDescriptor::initWithAddress(void * address,
IOByteCount length,
IODirection direction)
{
}
bool
-IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
+IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
IOByteCount length,
IODirection direction,
task_t task)
}
bool
-IOSubMemoryDescriptor::initWithPhysicalAddress(
+IOMemoryDescriptor::initWithPhysicalAddress(
IOPhysicalAddress address,
IOByteCount length,
IODirection direction )
}
bool
-IOSubMemoryDescriptor::initWithRanges(
+IOMemoryDescriptor::initWithRanges(
IOVirtualRange * ranges,
UInt32 withCount,
IODirection direction,
}
bool
-IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
+IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
UInt32 withCount,
IODirection direction,
bool asReference)
return( false );
}
+void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
+{
+ return( 0 );
+}
+#endif /* !__LP64__ */
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
if (keys[1])
keys[1]->release();
if (vcopy)
- IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
+ IOFree(vcopy, sizeof(SerData) * nRanges);
return result;
}
-bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
-{
- if (!s) {
- return (false);
- }
- if (s->previouslySerialized(this)) return true;
-
- // Pretend we are a dictionary.
- // We must duplicate the functionality of OSDictionary here
- // because otherwise object references will not work;
- // they are based on the value of the object passed to
- // previouslySerialized and addXMLStartTag.
-
- if (!s->addXMLStartTag(this, "dict")) return false;
-
- char const *keys[3] = {"offset", "length", "parent"};
-
- OSObject *values[3];
- values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
- if (values[0] == 0)
- return false;
- values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
- if (values[1] == 0) {
- values[0]->release();
- return false;
- }
- values[2] = _parent;
-
- bool result = true;
- for (int i=0; i<3; i++) {
- if (!s->addString("<key>") ||
- !s->addString(keys[i]) ||
- !s->addXMLEndTag("key") ||
- !values[i]->serialize(s)) {
- result = false;
- break;
- }
- }
- values[0]->release();
- values[1]->release();
- if (!result) {
- return false;
- }
-
- return s->addXMLEndTag("dict");
-}
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
+#ifdef __LP64__
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
+OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
+#else /* !__LP64__ */
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
-OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
-OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
-OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
+OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
+OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
+OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
+#endif /* !__LP64__ */
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
/* ex-inline function implementation */
-IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
+IOPhysicalAddress
+IOMemoryDescriptor::getPhysicalAddress()
{ return( getPhysicalSegment( 0, 0 )); }
+
+
+