- kern_return_t kr;
- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- mach_vm_address_t highestMask = 0;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
-
- if (!capacity)
- return false;
-
- _options = options;
- _capacity = capacity;
- _internalFlags = 0;
- _internalReserved = 0;
- _buffer = 0;
-
- _ranges.v64 = IONew(IOAddressRange, 1);
- if (!_ranges.v64)
- return (false);
- _ranges.v64->address = 0;
- _ranges.v64->length = 0;
-
- // Grab IOMD bits from the Buffer MD options
- iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
-
- if (physicalMask && (alignment <= 1))
- {
- alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
- highestMask = (physicalMask | alignment);
- alignment++;
- if (alignment < page_size)
- alignment = page_size;
- }
-
- if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
- alignment = page_size;
-
- if (alignment >= page_size)
- capacity = round_page(capacity);
-
- if (alignment > page_size)
- options |= kIOMemoryPhysicallyContiguous;
-
- _alignment = alignment;
-
- if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
- return false;
-
- // set flags for entry + object create
- vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
-
- // set memory entry cache mode
- switch (options & kIOMapCacheMask)
- {
- case kIOMapInhibitCache:
- SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
- break;
-
- case kIOMapWriteThruCache:
- SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
- break;
-
- case kIOMapWriteCombineCache:
- SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
- break;
-
- case kIOMapCopybackCache:
- SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
- break;
-
- case kIOMapDefaultCache:
- default:
- SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
- break;
- }
-
- if (options & kIOMemoryPageable)
- {
- iomdOptions |= kIOMemoryBufferPageable;
-
- // must create the entry before any pages are allocated
-
- // set flags for entry + object create
- memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
-
- if (options & kIOMemoryPurgeable)
- memEntryCacheMode |= MAP_MEM_PURGABLE;
- }
- else
- {
- memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
- vmmap = kernel_map;
-
- // Buffer shouldn't auto prepare they should be prepared explicitly
- // But it never was enforced so what are you going to do?
- iomdOptions |= kIOMemoryAutoPrepare;
-
- /* Allocate a wired-down buffer inside kernel space. */
-
- if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
- {
- _internalFlags |= kInternalFlagPhysical;
- if (highestMask)
- {
- _internalFlags |= kInternalFlagPageSized;
- capacity = round_page(capacity);
- }
- _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
- (0 != (options & kIOMemoryPhysicallyContiguous)));
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ IODMAMapSpecification mapSpec;
+ bool mapped = false;
+ bool withCopy = false;
+ bool needZero;
+
+ if (!capacity) {
+ return false;
+ }
+
+ _options = options;
+ _capacity = capacity;
+ _internalFlags = 0;
+ _internalReserved = 0;
+ _buffer = NULL;
+
+ if (!_ranges.v64) {
+ _ranges.v64 = IONew(IOAddressRange, 1);
+ if (!_ranges.v64) {
+ return false;
+ }
+ _ranges.v64->address = 0;
+ _ranges.v64->length = 0;
+ } else {
+ if (!_ranges.v64->address) {
+ return false;
+ }
+ if (!(kIOMemoryPageable & options)) {
+ return false;
+ }
+ if (!inTask) {
+ return false;
+ }
+ _buffer = (void *) _ranges.v64->address;
+ withCopy = true;
+ }
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
+
+ // Grab IOMD bits from the Buffer MD options
+ iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+
+ if (!(kIOMemoryMapperNone & options)) {
+ IOMapper::checkForSystemMapper();
+ mapped = (NULL != IOMapper::gSystem);
+ }
+ needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
+
+ if (physicalMask && (alignment <= 1)) {
+ alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
+ highestMask = (physicalMask | alignment);
+ alignment++;
+ if (alignment < page_size) {
+ alignment = page_size;
+ }