- else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
- IOVirtualRange *ranges = (IOVirtualRange *) buffers;
-
- /*
- * Initialize the memory descriptor.
- */
-
- _length = 0;
- _pages = 0;
- for (unsigned ind = 0; ind < count; ind++) {
- IOVirtualRange cur = ranges[ind];
-
- _length += cur.length;
- _pages += atop_32(cur.address + cur.length + PAGE_MASK)
- - atop_32(cur.address);
- }
-
- _ranges.v = 0;
- _rangesIsAllocated = !(options & kIOMemoryAsReference);
- _rangesCount = count;
-
- if (options & kIOMemoryAsReference)
- _ranges.v = ranges;
- else {
- _ranges.v = IONew(IOVirtualRange, count);
- if (!_ranges.v)
- return false;
- bcopy(/* from */ ranges, _ranges.v,
- count * sizeof(IOVirtualRange));
- }
+ else {
+ // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
+ // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
+
+ // Initialize the memory descriptor
+ if (options & kIOMemoryAsReference) {
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+
+ // Hack assignment to get the buffer arg into _ranges.
+ // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
+ // work, C++ sigh.
+ // This also initialises the uio & physical ranges.
+ _ranges.v = (IOVirtualRange *) buffers;
+ }
+ else {
+#ifndef __LP64__
+ _rangesIsAllocated = true;
+#endif /* !__LP64__ */
+ switch (type)
+ {
+ case kIOMemoryTypeUIO:
+ _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
+ break;
+
+#ifndef __LP64__
+ case kIOMemoryTypeVirtual64:
+ case kIOMemoryTypePhysical64:
+ if (count == 1
+ && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
+ ) {
+ if (kIOMemoryTypeVirtual64 == type)
+ type = kIOMemoryTypeVirtual;
+ else
+ type = kIOMemoryTypePhysical;
+ _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
+ _rangesIsAllocated = false;
+ _ranges.v = &_singleRange.v;
+ _singleRange.v.address = ((IOAddressRange *) buffers)->address;
+ _singleRange.v.length = ((IOAddressRange *) buffers)->length;
+ break;
+ }
+ _ranges.v64 = IONew(IOAddressRange, count);
+ if (!_ranges.v64)
+ return false;
+ bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
+ break;
+#endif /* !__LP64__ */
+ case kIOMemoryTypeVirtual:
+ case kIOMemoryTypePhysical:
+ if (count == 1) {
+ _flags |= kIOMemoryAsReference;
+#ifndef __LP64__
+ _rangesIsAllocated = false;
+#endif /* !__LP64__ */
+ _ranges.v = &_singleRange.v;
+ } else {
+ _ranges.v = IONew(IOVirtualRange, count);
+ if (!_ranges.v)
+ return false;
+ }
+ bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
+ break;
+ }
+ }
+
+ // Find starting address within the vector of ranges
+ Ranges vec = _ranges;
+ UInt32 length = 0;
+ UInt32 pages = 0;
+ for (unsigned ind = 0; ind < count; ind++) {
+ user_addr_t addr;
+ IOPhysicalLength len;
+
+ // addr & len are returned by this function
+ getAddrLenForInd(addr, len, type, vec, ind);
+ pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
+ len += length;
+ assert(len >= length); // Check for 32 bit wrap around
+ length = len;
+
+ if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
+ {
+ ppnum_t highPage = atop_64(addr + len - 1);
+ if (highPage > _highestPage)
+ _highestPage = highPage;
+ }
+ }
+ _length = length;
+ _pages = pages;
+ _rangesCount = count;