- kr = mach_make_memory_entry( vmmap,
- &size, 0,
- memEntryCacheMode, &sharedMem,
- NULL );
+ if (contig || highestMask || (alignment > page_size))
+ {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask)
+ {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ }
+ else if (needZero
+ && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+ {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
+ if (_buffer)
+ {
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
+#if IOALLOCDEBUG
+ debug_iomalloc_size += capacity;
+#endif
+ }
+ }
+ else if (alignment > 1)
+ {
+ _buffer = IOMallocAligned(capacity, alignment);
+ }
+ else
+ {
+ _buffer = IOMalloc(capacity);
+ }
+ if (!_buffer)
+ {
+ return false;
+ }
+ if (needZero) bzero(_buffer, capacity);
+ }
+
+ if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
+ ipc_port_t sharedMem;
+ vm_size_t size = round_page(capacity);