]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-2782.1.97.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
index 1b177591633b43f2853c0c78da0f27788a9c1a06..1c486fa0f7ad6ed916d073830d4a5786327193ba 100644 (file)
@@ -113,7 +113,6 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                                mach_vm_address_t alignment,
                                mach_vm_address_t physicalMask)
 {
-    kern_return_t        kr;
     task_t               mapTask = NULL;
     vm_map_t             vmmap = NULL;
     mach_vm_address_t     highestMask = 0;
@@ -122,8 +121,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
     bool                  mapped = false;
     bool                  needZero;
 
-    if (!capacity)
-        return false;
+    if (!capacity) return false;
 
     _options                 = options;
     _capacity         = capacity;
@@ -147,7 +145,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
        IOMapper::checkForSystemMapper();
        mapped = (0 != IOMapper::gSystem);
     }
-    needZero = mapped;
+    needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
 
     if (physicalMask && (alignment <= 1))
     {
@@ -184,53 +182,15 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
        highestMask = 0;
     }
 
-    // set flags for entry + object create
-    vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
-
-    // set memory entry cache mode
-    switch (options & kIOMapCacheMask)
-    {
-       case kIOMapInhibitCache:
-           SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
-           break;
-
-       case kIOMapWriteThruCache:
-           SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
-           break;
-
-       case kIOMapWriteCombineCache:
-           SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
-           break;
-
-       case kIOMapCopybackCache:
-           SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
-           break;
-
-       case kIOMapCopybackInnerCache:
-           SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
-           break;
-
-       case kIOMapDefaultCache:
-       default:
-           SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
-           break;
-    }
-
+    // set memory entry cache mode, pageable, purgeable
+    iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
     if (options & kIOMemoryPageable)
     {
        iomdOptions |= kIOMemoryBufferPageable;
-
-       // must create the entry before any pages are allocated
-
-       // set flags for entry + object create
-       memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
-
-       if (options & kIOMemoryPurgeable)
-           memEntryCacheMode |= MAP_MEM_PURGABLE;
+       if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
     }
     else
     {
-       memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
        vmmap = kernel_map;
 
        // Buffer shouldn't auto prepare they should be prepared explicitly
@@ -263,7 +223,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                                        capacity, highestMask, alignment, contig);
        }
        else if (needZero
-                 && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+                 && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
        {
             _internalFlags |= kInternalFlagPageAllocated;
             needZero        = false;
@@ -292,22 +252,10 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
     }
 
     if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
-       ipc_port_t      sharedMem;
        vm_size_t       size = round_page(capacity);
 
-       kr = mach_make_memory_entry(vmmap,
-                                   &size, (vm_offset_t)_buffer,
-                                   memEntryCacheMode, &sharedMem,
-                                   NULL );
-
-       if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
-           ipc_port_release_send( sharedMem );
-           kr = kIOReturnVMError;
-       }
-       if( KERN_SUCCESS != kr)
-           return( false );
-
-       _memEntry = (void *) sharedMem;
+       // initWithOptions will create memory entry
+       iomdOptions |= kIOMemoryPersistent;
 
        if( options & kIOMemoryPageable) {
 #if IOALLOCDEBUG
@@ -352,7 +300,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                return( false );
        }
        reserved->map = createMappingInTask(mapTask, 0, 
-                           kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
+                           kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
        if (!reserved->map)
        {
            _buffer = 0;