+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
+ {
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
+ }
+
+ if (contig || highestMask || (alignment > page_size))
+ {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask)
+ {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ }
+ else if (needZero
+ && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
+ {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
+ if (_buffer)
+ {
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
+#if IOALLOCDEBUG
+ OSAddAtomic(capacity, &debug_iomalloc_size);
+#endif
+ }
+ }
+ else if (alignment > 1)
+ {
+ _buffer = IOMallocAligned(capacity, alignment);