+ // set memory entry cache mode, pageable, purgeable
+ iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
+ if (options & kIOMemoryPageable) {
+ iomdOptions |= kIOMemoryBufferPageable;
+ if (options & kIOMemoryPurgeable) {
+ iomdOptions |= kIOMemoryBufferPurgeable;
+ }
+ } else {
+ vmmap = kernel_map;
+
+ // Buffer shouldn't auto prepare they should be prepared explicitly
+ // But it never was enforced so what are you going to do?
+ iomdOptions |= kIOMemoryAutoPrepare;
+
+ /* Allocate a wired-down buffer inside kernel space. */
+
+ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
+
+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
+ }
+
+ if (contig || highestMask || (alignment > page_size)) {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask) {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ } else if (needZero
+ && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
+ if (_buffer) {
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
+#if IOALLOCDEBUG
+ OSAddAtomic(capacity, &debug_iomalloc_size);
+#endif
+ }
+ } else if (alignment > 1) {
+ _buffer = IOMallocAligned(capacity, alignment);
+ } else {
+ _buffer = IOMalloc(capacity);
+ }
+ if (!_buffer) {
+ return false;
+ }
+ if (needZero) {
+ bzero(_buffer, capacity);
+ }
+ }