#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include <libkern/OSDebug.h>
+#include <mach/mach_vm.h>
#include "IOKitKernelInternal.h"
-#include "IOCopyMapper.h"
+
+#ifdef IOALLOCDEBUG
+#include <libkern/c++/OSCPPDebug.h>
+#endif
+#include <IOKit/IOStatisticsPrivate.h>
+
+#if IOKITSTATS
+#define IOStatisticsAlloc(type, size) \
+do { \
+ IOStatistics::countAlloc(type, size); \
+} while (0)
+#else
+#define IOStatisticsAlloc(type, size)
+#endif /* IOKITSTATS */
+
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
enum
{
- kInternalFlagRealloc = 0x00000001,
+ kInternalFlagPhysical = 0x00000001,
+ kInternalFlagPageSized = 0x00000002,
+ kInternalFlagPageAllocated = 0x00000004
};
-volatile ppnum_t gIOHighestAllocatedPage;
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#define super IOGeneralMemoryDescriptor
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+static uintptr_t IOBMDPageProc(iopa_t * a)
+{
+ kern_return_t kr;
+ vm_address_t vmaddr = 0;
+ int options = 0; // KMA_LOMEM;
+
+ kr = kernel_memory_allocate(kernel_map, &vmaddr,
+ page_size, 0, options, VM_KERN_MEMORY_IOKIT);
+
+ if (KERN_SUCCESS != kr) vmaddr = 0;
+ else bzero((void *) vmaddr, page_size);
+
+ return ((uintptr_t) vmaddr);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
#ifndef __LP64__
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
mach_vm_address_t alignment,
mach_vm_address_t physicalMask)
{
- kern_return_t kr;
- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- addr64_t lastIOAddr;
- mach_vm_address_t highestMask = 0;
- bool usePhys;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
-
- if (!capacity)
- return false;
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ IODMAMapSpecification mapSpec;
+ bool mapped = false;
+ bool needZero;
+
+ if (!capacity) return false;
_options = options;
_capacity = capacity;
return (false);
_ranges.v64->address = 0;
_ranges.v64->length = 0;
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
// Grab IOMD bits from the Buffer MD options
iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+ if (!(kIOMemoryMapperNone & options))
+ {
+ IOMapper::checkForSystemMapper();
+ mapped = (0 != IOMapper::gSystem);
+ }
+ needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
+
if (physicalMask && (alignment <= 1))
{
alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
highestMask = (physicalMask | alignment);
alignment++;
+ if (alignment < page_size)
+ alignment = page_size;
}
- if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size))
+ if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
alignment = page_size;
if (alignment >= page_size)
_alignment = alignment;
+ if ((capacity + alignment) < _capacity) return (false);
+
if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
return false;
- if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
- physicalMask = 0xFFFFFFFF;
-
- // set flags for entry + object create
- vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
-
- // set memory entry cache mode
- switch (options & kIOMapCacheMask)
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.alignment = _alignment;
+ mapSpec.numAddressBits = 64;
+ if (highestMask && mapped)
{
- case kIOMapInhibitCache:
- SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
- break;
-
- case kIOMapWriteThruCache:
- SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
- break;
-
- case kIOMapWriteCombineCache:
- SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
- break;
-
- case kIOMapCopybackCache:
- SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
- break;
-
- case kIOMapDefaultCache:
- default:
- SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
- break;
+ if (highestMask <= 0xFFFFFFFF)
+ mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+ else
+ mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+ highestMask = 0;
}
+ // set memory entry cache mode, pageable, purgeable
+ iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
if (options & kIOMemoryPageable)
{
iomdOptions |= kIOMemoryBufferPageable;
-
- // must create the entry before any pages are allocated
-
- // set flags for entry + object create
- memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
-
- if (options & kIOMemoryPurgeable)
- memEntryCacheMode |= MAP_MEM_PURGABLE;
+ if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
}
else
{
- memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
+ vmmap = kernel_map;
- if (IOMapper::gSystem)
- // assuming mapped space is 2G
- lastIOAddr = (1UL << 31) - PAGE_SIZE;
- else
- lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
+ // Buffer shouldn't auto prepare they should be prepared explicitly
+ // But it never was enforced so what are you going to do?
+ iomdOptions |= kIOMemoryAutoPrepare;
+
+ /* Allocate a wired-down buffer inside kernel space. */
- usePhys = (highestMask && (lastIOAddr != (lastIOAddr & highestMask))
- && (alignment <= page_size));
+ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
- if (!usePhys && (options & kIOMemoryPhysicallyContiguous))
+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
{
- _buffer = (void *) IOKernelAllocateContiguous(capacity, highestMask, alignment);
- usePhys = (NULL == _buffer);
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
}
- if (usePhys)
- {
- mach_vm_address_t address;
- iomdOptions &= ~kIOMemoryTypeVirtual64;
- iomdOptions |= kIOMemoryTypePhysical64;
-
- address = IOMallocPhysical(capacity, highestMask);
- _buffer = (void *) address;
- if (!_buffer)
- return false;
- mapTask = inTask;
- inTask = 0;
+ if (contig || highestMask || (alignment > page_size))
+ {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask)
+ {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
}
- else
+ else if (needZero
+ && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
{
- vmmap = kernel_map;
-
- // Buffer shouldn't auto prepare they should be prepared explicitly
- // But it never was enforced so what are you going to do?
- iomdOptions |= kIOMemoryAutoPrepare;
-
- /* Allocate a wired-down buffer inside kernel space. */
- if (options & kIOMemoryPhysicallyContiguous)
- {
- // attempted allocate already
- }
- else if (alignment > 1)
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
+ if (_buffer)
{
- _buffer = IOMallocAligned(capacity, alignment);
- }
- else
- {
- _buffer = IOMalloc(capacity);
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
+#if IOALLOCDEBUG
+ OSAddAtomic(capacity, &debug_iomalloc_size);
+#endif
}
- if (!_buffer)
- return false;
}
+ else if (alignment > 1)
+ {
+ _buffer = IOMallocAligned(capacity, alignment);
+ }
+ else
+ {
+ _buffer = IOMalloc(capacity);
+ }
+ if (!_buffer)
+ {
+ return false;
+ }
+ if (needZero) bzero(_buffer, capacity);
}
- if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions))
- && (options & (kIOMemoryPageable | kIOMapCacheMask))) {
- ipc_port_t sharedMem;
+ if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
vm_size_t size = round_page(capacity);
- kr = mach_make_memory_entry(vmmap,
- &size, (vm_offset_t)_buffer,
- memEntryCacheMode, &sharedMem,
- NULL );
-
- if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
- ipc_port_release_send( sharedMem );
- kr = kIOReturnVMError;
- }
- if( KERN_SUCCESS != kr)
- return( false );
-
- _memEntry = (void *) sharedMem;
+ // initWithOptions will create memory entry
+ iomdOptions |= kIOMemoryPersistent;
if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
- debug_iomallocpageable_size += size;
+ OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
mapTask = inTask;
if (NULL == inTask)
while (startAddr < endAddr)
{
- *startAddr;
+ UInt8 dummyVar = *startAddr;
+ (void) dummyVar;
startAddr += page_size;
- }
+ }
}
}
inTask, iomdOptions, /* System mapper */ 0))
return false;
- if (highestMask && !IOMapper::gSystem)
- {
- IOMDDMACharacteristics mdSummary;
-
- bzero(&mdSummary, sizeof(mdSummary));
- IOReturn rtn = dmaCommandOperation(
- kIOMDGetCharacteristics,
- &mdSummary, sizeof(mdSummary));
- if (rtn)
- return false;
-
- if (mdSummary.fHighestPage)
- {
- ppnum_t highest;
- while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
- {
- if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
- (UInt32 *) &gIOHighestAllocatedPage))
- break;
- }
- lastIOAddr = ptoa_64(mdSummary.fHighestPage);
- }
- else
- lastIOAddr = ptoa_64(gIOLastPage);
-
- if (lastIOAddr != (lastIOAddr & highestMask))
- {
- if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask))
- {
- // flag a retry
- _internalFlags |= kInternalFlagRealloc;
- }
- return false;
- }
- }
+ // give any system mapper the allocation params
+ if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
+ &mapSpec, sizeof(mapSpec)))
+ return false;
if (mapTask)
{
return( false );
}
reserved->map = createMappingInTask(mapTask, 0,
- kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
+ kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
if (!reserved->map)
{
_buffer = 0;
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
- bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
me->release();
me = 0;
- if (retry)
- {
- me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0))
- {
- me->release();
- me = 0;
- }
- }
}
return me;
}
if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
{
- bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
me->release();
me = 0;
- if (retry)
- {
- me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
- {
- me->release();
- me = 0;
- }
- }
}
return me;
}
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
- bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
me->release();
me = 0;
- if (retry)
- {
- me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0))
- {
- me->release();
- me = 0;
- }
- }
}
return me;
}
| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
inLength, inLength, 0 ))
{
- bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
me->release();
me = 0;
- if (retry)
- {
- me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithPhysicalMask(
- kernel_task, inDirection | kIOMemoryUnshared
- | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
- inLength, inLength, 0 ))
- {
- me->release();
- me = 0;
- }
- }
-
}
if (me)
{
// Cache all of the relevant information on the stack for use
// after we call super::free()!
- IOOptionBits flags = _flags;
+ IOOptionBits flags = _flags;
+ IOOptionBits internalFlags = _internalFlags;
IOOptionBits options = _options;
vm_size_t size = _capacity;
void * buffer = _buffer;
IOMemoryMap * map = 0;
IOAddressRange * range = _ranges.v64;
- mach_vm_address_t source = range ? range->address : 0;
vm_offset_t alignment = _alignment;
if (alignment >= page_size)
if (options & kIOMemoryPageable)
{
#if IOALLOCDEBUG
- debug_iomallocpageable_size -= round_page(size);
+ OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size);
#endif
}
else if (buffer)
{
- if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask))
- IOFreePhysical(source, size);
- else if (options & kIOMemoryPhysicallyContiguous)
- IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
+ if (kInternalFlagPageSized & internalFlags) size = round_page(size);
+
+ if (kInternalFlagPhysical & internalFlags)
+ {
+ IOKernelFreePhysical((mach_vm_address_t) buffer, size);
+ }
+ else if (kInternalFlagPageAllocated & internalFlags)
+ {
+ uintptr_t page;
+ page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
+ if (page)
+ {
+ kmem_free(kernel_map, page, page_size);
+ }
+#if IOALLOCDEBUG
+ OSAddAtomic(-size, &debug_iomalloc_size);
+#endif
+ IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
+ }
else if (alignment > 1)
+ {
IOFreeAligned(buffer, size);
+ }
else
+ {
IOFree(buffer, size);
+ }
}
if (range && (kIOMemoryAsReference & flags))
IODelete(range, IOAddressRange, 1);
void IOBufferMemoryDescriptor::setLength(vm_size_t length)
{
assert(length <= _capacity);
+ if (length > _capacity) return;
_length = length;
_ranges.v64->length = length;