- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- mach_vm_address_t highestMask = 0;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
- IODMAMapSpecification mapSpec;
- bool mapped = false;
- bool needZero;
-
- if (!capacity) return false;
-
- _options = options;
- _capacity = capacity;
- _internalFlags = 0;
- _internalReserved = 0;
- _buffer = 0;
-
- _ranges.v64 = IONew(IOAddressRange, 1);
- if (!_ranges.v64)
- return (false);
- _ranges.v64->address = 0;
- _ranges.v64->length = 0;
- // make sure super::free doesn't dealloc _ranges before super::init
- _flags = kIOMemoryAsReference;
-
- // Grab IOMD bits from the Buffer MD options
- iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
-
- if (!(kIOMemoryMapperNone & options))
- {
- IOMapper::checkForSystemMapper();
- mapped = (0 != IOMapper::gSystem);
- }
- needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
-
- if (physicalMask && (alignment <= 1))
- {
- alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
- highestMask = (physicalMask | alignment);
- alignment++;
- if (alignment < page_size)
- alignment = page_size;
- }
-
- if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
- alignment = page_size;
-
- if (alignment >= page_size)
- capacity = round_page(capacity);
-
- if (alignment > page_size)
- options |= kIOMemoryPhysicallyContiguous;
-
- _alignment = alignment;
-
- if ((capacity + alignment) < _capacity) return (false);
-
- if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
- return false;
-
- bzero(&mapSpec, sizeof(mapSpec));
- mapSpec.alignment = _alignment;
- mapSpec.numAddressBits = 64;
- if (highestMask && mapped)
- {
- if (highestMask <= 0xFFFFFFFF)
- mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
- else
- mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
- highestMask = 0;
- }
-
- // set memory entry cache mode, pageable, purgeable
- iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
- if (options & kIOMemoryPageable)
- {
- iomdOptions |= kIOMemoryBufferPageable;
- if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
- }
- else
- {
- vmmap = kernel_map;
-
- // Buffer shouldn't auto prepare they should be prepared explicitly
- // But it never was enforced so what are you going to do?
- iomdOptions |= kIOMemoryAutoPrepare;
-
- /* Allocate a wired-down buffer inside kernel space. */
-
- bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
-
- if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
- {
- contig |= (!mapped);
- contig |= (0 != (kIOMemoryMapperNone & options));
-#if 0
- // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
- contig |= true;
-#endif
+ IOBufferMemoryDescriptor * inst;
+ kern_return_t err;
+ vm_map_copy_t copy;
+ vm_map_address_t address;
+
+ copy = NULL;
+ do {
+ err = kIOReturnNoMemory;
+ inst = new IOBufferMemoryDescriptor;
+ if (!inst) {
+ break;
+ }
+ inst->_ranges.v64 = IONew(IOAddressRange, 1);
+ if (!inst->_ranges.v64) {
+ break;
+ }
+
+ err = vm_map_copyin(sourceMap, source, size,
+ false /* src_destroy */, ©);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+
+ err = vm_map_copyout(get_task_map(inTask), &address, copy);
+ if (KERN_SUCCESS != err) {
+ break;
+ }
+ copy = NULL;
+
+ inst->_ranges.v64->address = address;
+ inst->_ranges.v64->length = size;
+
+ if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
+ err = kIOReturnError;
+ }
+ } while (false);
+
+ if (KERN_SUCCESS == err) {
+ return inst;