+ iomdOptions |= kIOMemoryBufferPageable;
+
+ ipc_port_t sharedMem;
+ vm_size_t size = round_page_32(capacity);
+
+ // must create the entry before any pages are allocated
+
+ // set flags for entry + object create
+ vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
+ | MAP_MEM_NAMED_CREATE;
+
+ if (options & kIOMemoryPurgeable)
+ memEntryCacheMode |= MAP_MEM_PURGABLE;
+
+ // set memory entry cache mode
+ switch (options & kIOMapCacheMask)
+ {
+ case kIOMapInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapDefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
+ break;
+ }
+
+ kr = mach_make_memory_entry( vmmap,
+ &size, 0,
+ memEntryCacheMode, &sharedMem,
+ NULL );
+
+ if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
+ ipc_port_release_send( sharedMem );
+ kr = kIOReturnVMError;
+ }
+ if( KERN_SUCCESS != kr)
+ return( false );
+
+ _memEntry = (void *) sharedMem;
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size += size;
+#endif
+ if ((NULL == inTask) && (options & kIOMemoryPageable))
+ inTask = kernel_task;
+ else if (inTask == kernel_task)