+ if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
+ ipc_port_t sharedMem;
+ vm_size_t size = round_page(capacity);
+
+ kr = mach_make_memory_entry(vmmap,
+ &size, (vm_offset_t)_buffer,
+ memEntryCacheMode, &sharedMem,
+ NULL );
+
+ if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
+ ipc_port_release_send( sharedMem );
+ kr = kIOReturnVMError;
+ }
+ if( KERN_SUCCESS != kr)
+ return( false );
+
+ _memEntry = (void *) sharedMem;
+
+ if( options & kIOMemoryPageable) {
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size += size;
+#endif
+ mapTask = inTask;
+ if (NULL == inTask)
+ inTask = kernel_task;
+ }
+ else if (options & kIOMapCacheMask)
+ {
+ // Prefetch each page to put entries into the pmap
+ volatile UInt8 * startAddr = (UInt8 *)_buffer;
+ volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
+
+ while (startAddr < endAddr)
+ {
+ *startAddr;
+ startAddr += page_size;
+ }
+ }
+ }