-bool IOBufferMemoryDescriptor::initWithPhysicalMask(
- task_t inTask,
- IOOptionBits options,
- mach_vm_size_t capacity,
- mach_vm_address_t alignment,
- mach_vm_address_t physicalMask)
-{
- kern_return_t kr;
- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- mach_vm_address_t highestMask = 0;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
- IODMAMapSpecification mapSpec;
- bool mapped = false;
- bool needZero;
-
- if (!capacity)
- return false;
-
- _options = options;
- _capacity = capacity;
- _internalFlags = 0;
- _internalReserved = 0;
- _buffer = 0;
-
- _ranges.v64 = IONew(IOAddressRange, 1);
- if (!_ranges.v64)
- return (false);
- _ranges.v64->address = 0;
- _ranges.v64->length = 0;
- // make sure super::free doesn't dealloc _ranges before super::init
- _flags = kIOMemoryAsReference;
-
- // Grab IOMD bits from the Buffer MD options
- iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
-
- if (!(kIOMemoryMapperNone & options))
- {
- IOMapper::checkForSystemMapper();
- mapped = (0 != IOMapper::gSystem);
- }
- needZero = mapped;
-
- if (physicalMask && (alignment <= 1))
- {
- alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
- highestMask = (physicalMask | alignment);
- alignment++;
- if (alignment < page_size)
- alignment = page_size;
- }
-
- if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
- alignment = page_size;
-
- if (alignment >= page_size)
- capacity = round_page(capacity);
-
- if (alignment > page_size)
- options |= kIOMemoryPhysicallyContiguous;
-
- _alignment = alignment;
-
- if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
- return false;
-
- bzero(&mapSpec, sizeof(mapSpec));
- mapSpec.alignment = _alignment;
- mapSpec.numAddressBits = 64;
- if (highestMask && mapped)
- {
- if (highestMask <= 0xFFFFFFFF)
- mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
- else
- mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
- highestMask = 0;
- }
-
- // set flags for entry + object create
- vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
-
- // set memory entry cache mode
- switch (options & kIOMapCacheMask)
- {
- case kIOMapInhibitCache:
- SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
- break;
-
- case kIOMapWriteThruCache:
- SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
- break;
-
- case kIOMapWriteCombineCache:
- SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
- break;
-
- case kIOMapCopybackCache:
- SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
- break;
-
- case kIOMapCopybackInnerCache:
- SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
- break;
-
- case kIOMapDefaultCache:
- default:
- SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
- break;
- }
-
- if (options & kIOMemoryPageable)
- {
- iomdOptions |= kIOMemoryBufferPageable;
-
- // must create the entry before any pages are allocated
-
- // set flags for entry + object create
- memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
-
- if (options & kIOMemoryPurgeable)
- memEntryCacheMode |= MAP_MEM_PURGABLE;
- }
- else
- {
- memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
- vmmap = kernel_map;
-
- // Buffer shouldn't auto prepare they should be prepared explicitly
- // But it never was enforced so what are you going to do?
- iomdOptions |= kIOMemoryAutoPrepare;
-
- /* Allocate a wired-down buffer inside kernel space. */
-
- bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
-
- if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
- {
- contig |= (!mapped);
- contig |= (0 != (kIOMemoryMapperNone & options));
-#if 0
- // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
- contig |= true;
-#endif