+
+ if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
+ return false;
+
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.alignment = _alignment;
+ mapSpec.numAddressBits = 64;
+ if (highestMask && mapped)
+ {
+ if (highestMask <= 0xFFFFFFFF)
+ mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+ else
+ mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+ highestMask = 0;
+ }
+
+ // set flags for entry + object create
+ vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
+
+ // set memory entry cache mode
+ switch (options & kIOMapCacheMask)
+ {
+ case kIOMapInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackInnerCache:
+ SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapDefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
+ break;
+ }
+