+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ IODMAMapSpecification mapSpec;
+ bool mapped = false;
+ bool withCopy = false;
+ bool needZero;
+
+ if (!capacity) {
+ return false;
+ }
+
+ _options = options;
+ _capacity = capacity;
+ _internalFlags = 0;
+ _internalReserved = 0;
+ _buffer = NULL;
+
+ if (!_ranges.v64) {
+ _ranges.v64 = IONew(IOAddressRange, 1);
+ if (!_ranges.v64) {
+ return false;
+ }
+ _ranges.v64->address = 0;
+ _ranges.v64->length = 0;
+ } else {
+ if (!_ranges.v64->address) {
+ return false;
+ }
+ if (!(kIOMemoryPageable & options)) {
+ return false;
+ }
+ if (!inTask) {
+ return false;
+ }
+ _buffer = (void *) _ranges.v64->address;
+ withCopy = true;
+ }
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
+
+ // Grab IOMD bits from the Buffer MD options
+ iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+
+ if (!(kIOMemoryMapperNone & options)) {
+ IOMapper::checkForSystemMapper();
+ mapped = (NULL != IOMapper::gSystem);
+ }
+ needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
+
+ if (physicalMask && (alignment <= 1)) {
+ alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
+ highestMask = (physicalMask | alignment);
+ alignment++;
+ if (alignment < page_size) {
+ alignment = page_size;
+ }
+ }
+
+ if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
+ alignment = page_size;
+ }
+
+ if (alignment >= page_size) {
+ capacity = round_page(capacity);
+ }
+
+ if (alignment > page_size) {
+ options |= kIOMemoryPhysicallyContiguous;
+ }
+
+ _alignment = alignment;
+
+ if ((capacity + alignment) < _capacity) {
+ return false;
+ }
+
+ if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
+ return false;
+ }
+
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.alignment = _alignment;
+ mapSpec.numAddressBits = 64;
+ if (highestMask && mapped) {
+ if (highestMask <= 0xFFFFFFFF) {
+ mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+ } else {
+ mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+ }
+ highestMask = 0;
+ }
+
+ // set memory entry cache mode, pageable, purgeable
+ iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
+ if (options & kIOMemoryPageable) {
+ iomdOptions |= kIOMemoryBufferPageable;
+ if (options & kIOMemoryPurgeable) {
+ iomdOptions |= kIOMemoryBufferPurgeable;
+ }
+ } else {
+ vmmap = kernel_map;
+
+ // Buffer shouldn't auto prepare they should be prepared explicitly
+ // But it never was enforced so what are you going to do?
+ iomdOptions |= kIOMemoryAutoPrepare;
+
+ /* Allocate a wired-down buffer inside kernel space. */
+
+ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
+
+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
+ }
+
+ if (contig || highestMask || (alignment > page_size)) {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask) {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ } else if (needZero
+ && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
+ if (_buffer) {
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
+#if IOALLOCDEBUG
+ OSAddAtomic(capacity, &debug_iomalloc_size);
+#endif
+ }
+ } else if (alignment > 1) {
+ _buffer = IOMallocAligned(capacity, alignment);
+ } else {
+ _buffer = IOMalloc(capacity);
+ }
+ if (!_buffer) {
+ return false;
+ }
+ if (needZero) {
+ bzero(_buffer, capacity);
+ }
+ }
+
+ if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
+ vm_size_t size = round_page(capacity);
+
+ // initWithOptions will create memory entry
+ if (!withCopy) {
+ iomdOptions |= kIOMemoryPersistent;
+ }
+
+ if (options & kIOMemoryPageable) {
+#if IOALLOCDEBUG
+ OSAddAtomicLong(size, &debug_iomallocpageable_size);
+#endif
+ if (!withCopy) {
+ mapTask = inTask;
+ }
+ if (NULL == inTask) {
+ inTask = kernel_task;
+ }
+ } else if (options & kIOMapCacheMask) {
+ // Prefetch each page to put entries into the pmap
+ volatile UInt8 * startAddr = (UInt8 *)_buffer;
+ volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
+
+ while (startAddr < endAddr) {
+ UInt8 dummyVar = *startAddr;
+ (void) dummyVar;
+ startAddr += page_size;
+ }
+ }
+ }
+
+ _ranges.v64->address = (mach_vm_address_t) _buffer;
+ _ranges.v64->length = _capacity;
+
+ if (!super::initWithOptions(_ranges.v64, 1, 0,
+ inTask, iomdOptions, /* System mapper */ NULL)) {
+ return false;
+ }
+
+ _internalFlags |= kInternalFlagInit;
+#if IOTRACKING
+ if (!(options & kIOMemoryPageable)) {
+ trackingAccumSize(capacity);
+ }
+#endif /* IOTRACKING */
+
+ // give any system mapper the allocation params
+ if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
+ &mapSpec, sizeof(mapSpec))) {
+ return false;
+ }
+
+ if (mapTask) {
+ if (!reserved) {
+ reserved = IONew( ExpansionData, 1 );
+ if (!reserved) {
+ return false;
+ }
+ }
+ reserved->map = createMappingInTask(mapTask, 0,
+ kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
+ if (!reserved->map) {
+ _buffer = NULL;
+ return false;
+ }
+ release(); // map took a retain on this
+ reserved->map->retain();
+ removeMapping(reserved->map);
+ mach_vm_address_t buffer = reserved->map->getAddress();
+ _buffer = (void *) buffer;
+ if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
+ _ranges.v64->address = buffer;
+ }
+ }
+
+ setLength(_capacity);
+
+ return true;