/*
* Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <IOKit/assert.h>
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
+#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
+#include "IOKitKernelInternal.h"
+#include "IOCopyMapper.h"
+
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
#include <vm/pmap.h>
vm_map_t IOPageableMapForAddress( vm_address_t address );
__END_DECLS
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+volatile ppnum_t gIOHighestAllocatedPage;
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
#define super IOGeneralMemoryDescriptor
OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
IOGeneralMemoryDescriptor);
vm_offset_t alignment,
task_t inTask)
{
- vm_map_t map = 0;
- IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
+ mach_vm_address_t physicalMask = 0;
+ return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
+}
+
+bool IOBufferMemoryDescriptor::initWithPhysicalMask(
+ task_t inTask,
+ IOOptionBits options,
+ mach_vm_size_t capacity,
+ mach_vm_address_t alignment,
+ mach_vm_address_t physicalMask)
+{
+ kern_return_t kr;
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ addr64_t lastIOAddr;
+ IOAddressRange range;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64;
if (!capacity)
return false;
_physAddrs = 0;
_physSegCount = 0;
_buffer = 0;
+ range.address = 0;
+ range.length = 0;
+ _ranges.v64 = ⦥
// Grab the direction and the Auto Prepare bits from the Buffer MD options
iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
- if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
- alignment = page_size;
+ if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size))
+ alignment = page_size;
- if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
- return false;
+ if (physicalMask && (alignment <= 1))
+ alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
_alignment = alignment;
+
+ if (((inTask != kernel_task) && !(options & kIOMemoryPageable)) ||
+ (physicalMask && (options & kIOMapCacheMask)))
+ return false;
+
+ if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
+ physicalMask = 0xFFFFFFFF;
+
+ // set flags for entry + object create
+ vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
+
+ // set memory entry cache mode
+ switch (options & kIOMapCacheMask)
+ {
+ case kIOMapInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapDefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
+ break;
+ }
+
if (options & kIOMemoryPageable)
{
- iomdOptions |= kIOMemoryBufferPageable;
- if (inTask == kernel_task)
+ iomdOptions |= kIOMemoryBufferPageable;
+
+ // must create the entry before any pages are allocated
+
+ // set flags for entry + object create
+ memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
+
+ if (options & kIOMemoryPurgeable)
+ memEntryCacheMode |= MAP_MEM_PURGABLE;
+ }
+ else
+ {
+ memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
+
+ if (IOMapper::gSystem)
+ // assuming mapped space is 2G
+ lastIOAddr = (1UL << 31) - PAGE_SIZE;
+ else
+ lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
+
+ if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
{
- /* Allocate some kernel address space. */
- _buffer = IOMallocPageable(capacity, alignment);
- if (_buffer)
- map = IOPageableMapForAddress((vm_address_t) _buffer);
+ mach_vm_address_t address;
+ iomdOptions &= ~kIOMemoryTypeVirtual64;
+ iomdOptions |= kIOMemoryTypePhysical64;
+
+ address = IOMallocPhysical(capacity, physicalMask);
+ _buffer = (void *) address;
+ if (!_buffer)
+ return false;
+
+ mapTask = inTask;
+ inTask = 0;
}
else
{
- kern_return_t kr;
+ vmmap = kernel_map;
- if( !reserved) {
- reserved = IONew( ExpansionData, 1 );
- if( !reserved)
- return( false );
- }
- map = get_task_map(inTask);
- vm_map_reference(map);
- reserved->map = map;
- kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
- VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
- if( KERN_SUCCESS != kr)
- return( false );
+ // Buffer shouldn't auto prepare they should be prepared explicitly
+ // But it never was enforced so what are you going to do?
+ iomdOptions |= kIOMemoryAutoPrepare;
- // we have to make sure that these pages don't get copied on fork.
- kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
- if( KERN_SUCCESS != kr)
- return( false );
+ /* Allocate a wired-down buffer inside kernel space. */
+ if (options & kIOMemoryPhysicallyContiguous)
+ _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
+ else if (alignment > 1)
+ _buffer = IOMallocAligned(capacity, alignment);
+ else
+ _buffer = IOMalloc(capacity);
+ if (!_buffer)
+ return false;
}
}
- else
- {
- // @@@ gvdl: Need to remove this
- // Buffer should never auto prepare they should be prepared explicitly
- // But it never was enforced so what are you going to do?
- iomdOptions |= kIOMemoryAutoPrepare;
-
- /* Allocate a wired-down buffer inside kernel space. */
- if (options & kIOMemoryPhysicallyContiguous)
- _buffer = IOMallocContiguous(capacity, alignment, 0);
- else if (alignment > 1)
- _buffer = IOMallocAligned(capacity, alignment);
- else
- _buffer = IOMalloc(capacity);
+
+ if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions))
+ && (options & (kIOMemoryPageable | kIOMapCacheMask))) {
+ ipc_port_t sharedMem;
+ vm_size_t size = round_page_32(capacity);
+
+ kr = mach_make_memory_entry(vmmap,
+ &size, (vm_offset_t)_buffer,
+ memEntryCacheMode, &sharedMem,
+ NULL );
+
+ if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
+ ipc_port_release_send( sharedMem );
+ kr = kIOReturnVMError;
+ }
+ if( KERN_SUCCESS != kr)
+ return( false );
+
+ _memEntry = (void *) sharedMem;
+
+ if( options & kIOMemoryPageable) {
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size += size;
+#endif
+ mapTask = inTask;
+ if (NULL == inTask)
+ inTask = kernel_task;
+ }
+ else if (options & kIOMapCacheMask)
+ {
+ // Prefetch each page to put entries into the pmap
+ volatile UInt8 * startAddr = (UInt8 *)_buffer;
+ volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
+
+ while (startAddr < endAddr)
+ {
+ *startAddr;
+ startAddr += page_size;
+ }
+ }
}
- if (!_buffer)
+ range.address = (mach_vm_address_t) _buffer;
+ range.length = capacity;
+
+ if (!super::initWithOptions(&range, 1, 0,
+ inTask, iomdOptions, /* System mapper */ 0))
return false;
- _singleRange.v.address = (vm_address_t) _buffer;
- _singleRange.v.length = capacity;
+ if (physicalMask && !IOMapper::gSystem)
+ {
+ IOMDDMACharacteristics mdSummary;
- if (!super::initWithOptions(&_singleRange.v, 1, 0,
- inTask, iomdOptions, /* System mapper */ 0))
- return false;
+ bzero(&mdSummary, sizeof(mdSummary));
+ IOReturn rtn = dmaCommandOperation(
+ kIOMDGetCharacteristics,
+ &mdSummary, sizeof(mdSummary));
+ if (rtn)
+ return false;
+
+ if (mdSummary.fHighestPage)
+ {
+ ppnum_t highest;
+ while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
+ {
+ if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
+ (UInt32 *) &gIOHighestAllocatedPage))
+ break;
+ }
+ lastIOAddr = ptoa_64(mdSummary.fHighestPage);
+ }
+ else
+ lastIOAddr = ptoa_64(gIOLastPage);
+
+ if (lastIOAddr != (lastIOAddr & physicalMask))
+ {
+ if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask))
+ {
+ // flag a retry
+ _physSegCount = 1;
+ }
+ return false;
+ }
+ }
- if (options & kIOMemoryPageable) {
- kern_return_t kr;
- ipc_port_t sharedMem = (ipc_port_t) _memEntry;
- vm_size_t size = round_page_32(_ranges.v[0].length);
-
- // must create the entry before any pages are allocated
- if( 0 == sharedMem) {
-
- // set memory entry cache
- vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
- switch (options & kIOMapCacheMask)
- {
- case kIOMapInhibitCache:
- SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
- break;
-
- case kIOMapWriteThruCache:
- SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
- break;
-
- case kIOMapWriteCombineCache:
- SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
- break;
-
- case kIOMapCopybackCache:
- SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
- break;
-
- case kIOMapDefaultCache:
- default:
- SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
- break;
- }
-
- kr = mach_make_memory_entry( map,
- &size, _ranges.v[0].address,
- memEntryCacheMode, &sharedMem,
- NULL );
-
- if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
- ipc_port_release_send( sharedMem );
- kr = kIOReturnVMError;
- }
- if( KERN_SUCCESS != kr)
- sharedMem = 0;
- _memEntry = (void *) sharedMem;
- }
+ if (mapTask)
+ {
+ if (!reserved) {
+ reserved = IONew( ExpansionData, 1 );
+ if( !reserved)
+ return( false );
+ }
+ reserved->map = map(mapTask, 0, kIOMapAnywhere, 0, 0);
+ if (!reserved->map)
+ {
+ _buffer = 0;
+ return( false );
+ }
+ release(); // map took a retain on this
+ mach_vm_address_t buffer = reserved->map->getAddress();
+ _buffer = (void *) buffer;
+ if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
+ _ranges.v64->address = buffer;
}
setLength(capacity);
-
+
return true;
}
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
+ bool retry = me->_physSegCount;
me->release();
me = 0;
+ if (retry)
+ {
+ me = new IOBufferMemoryDescriptor;
+ if (me && !me->initWithOptions(options, capacity, alignment, inTask))
+ {
+ me->release();
+ me = 0;
+ }
+ }
+ }
+ return me;
+}
+
+IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
+ task_t inTask,
+ IOOptionBits options,
+ mach_vm_size_t capacity,
+ mach_vm_address_t physicalMask)
+{
+ IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
+
+ if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
+ {
+ bool retry = me->_physSegCount;
+ me->release();
+ me = 0;
+ if (retry)
+ {
+ me = new IOBufferMemoryDescriptor;
+ if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
+ {
+ me->release();
+ me = 0;
+ }
+ }
}
return me;
}
vm_size_t capacity,
vm_offset_t alignment)
{
- IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
-
- if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
- me->release();
- me = 0;
- }
- return me;
+ return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
}
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
- me->release();
- me = 0;
+ if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
+ {
+ bool retry = me->_physSegCount;
+ me->release();
+ me = 0;
+ if (retry)
+ {
+ me = new IOBufferMemoryDescriptor;
+ if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
+ {
+ me->release();
+ me = 0;
+ }
+ }
+
}
return me;
}
{
// Cache all of the relevant information on the stack for use
// after we call super::free()!
- IOOptionBits options = _options;
- vm_size_t size = _capacity;
- void * buffer = _buffer;
- vm_map_t map = 0;
- vm_offset_t alignment = _alignment;
+ IOOptionBits flags = _flags;
+ IOOptionBits options = _options;
+ vm_size_t size = _capacity;
+ void * buffer = _buffer;
+ mach_vm_address_t source = (_ranges.v) ? _ranges.v64->address : 0;
+ IOMemoryMap * map = 0;
+ vm_offset_t alignment = _alignment;
if (reserved)
{
map = reserved->map;
IODelete( reserved, ExpansionData, 1 );
+ if (map)
+ map->release();
}
/* super::free may unwire - deallocate buffer afterwards */
super::free();
- if (buffer)
+ if (options & kIOMemoryPageable)
{
- if (options & kIOMemoryPageable)
- {
- if (map)
- vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
- else
- IOFreePageable(buffer, size);
- }
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size -= round_page_32(size);
+#endif
+ }
+ else if (buffer)
+ {
+ if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask))
+ IOFreePhysical(source, size);
+ else if (options & kIOMemoryPhysicallyContiguous)
+ IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
+ else if (alignment > 1)
+ IOFreeAligned(buffer, size);
else
- {
- if (options & kIOMemoryPhysicallyContiguous)
- IOFreeContiguous(buffer, size);
- else if (alignment > 1)
- IOFreeAligned(buffer, size);
- else
- IOFree(buffer, size);
- }
+ IOFree(buffer, size);
}
- if (map)
- vm_map_deallocate(map);
}
/*
assert(length <= _capacity);
_length = length;
- _singleRange.v.length = length;
+ _ranges.v64->length = length;
}
/*
bool
IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
{
- vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
+ vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
+ IOByteCount offset;
assert(_length <= _capacity);
- bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
- actualBytesToCopy);
+
+ offset = _length;
_length += actualBytesToCopy;
- _singleRange.v.length += actualBytesToCopy;
+ _ranges.v64->length += actualBytesToCopy;
+
+ if (_task == kernel_task)
+ bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
+ actualBytesToCopy);
+ else
+ writeBytes(offset, bytes, actualBytesToCopy);
return true;
}
*/
void * IOBufferMemoryDescriptor::getBytesNoCopy()
{
- return (void *)_singleRange.v.address;
+ if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
+ return _buffer;
+ else
+ return (void *)_ranges.v64->address;
}
+
/*
* getBytesNoCopy:
*
void *
IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
{
- if (start < _length && (start + withLength) <= _length)
- return (void *)(_singleRange.v.address + start);
+ IOVirtualAddress address;
+ if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
+ address = (IOVirtualAddress) _buffer;
+ else
+ address = _ranges.v64->address;
+
+ if (start < _length && (start + withLength) <= _length)
+ return (void *)(address + start);
return 0;
}
+/* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+/* DEPRECATED */ IOByteCount * lengthOfSegment)
+{
+ void * bytes = getBytesNoCopy(offset, 0);
+
+ if (bytes && lengthOfSegment)
+ *lengthOfSegment = _length - offset;
+
+ return bytes;
+}
+
OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
-OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
+OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);