/*
* Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+
+#define _IOMEMORYDESCRIPTOR_INTERNAL_
+
#include <IOKit/assert.h>
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
+#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
+#include <libkern/OSDebug.h>
+
+#include "IOKitKernelInternal.h"
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
-__END_DECLS
-
-extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address );
+#include <vm/pmap.h>
-#define super IOGeneralMemoryDescriptor
-OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
- IOGeneralMemoryDescriptor);
+__END_DECLS
-bool IOBufferMemoryDescriptor::initWithAddress(
- void * /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ )
-{
- return false;
-}
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-bool IOBufferMemoryDescriptor::initWithAddress(
- vm_address_t /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ ,
- task_t /* withTask */ )
+enum
{
- return false;
-}
+ kInternalFlagPhysical = 0x00000001,
+ kInternalFlagPageSized = 0x00000002
+};
-bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
- IOPhysicalAddress /* address */ ,
- IOByteCount /* withLength */ ,
- IODirection /* withDirection */ )
-{
- return false;
-}
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
- IOPhysicalRange * /* ranges */ ,
- UInt32 /* withCount */ ,
- IODirection /* withDirection */ ,
- bool /* asReference */ )
-{
- return false;
-}
+#define super IOGeneralMemoryDescriptor
+OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
+ IOGeneralMemoryDescriptor);
-bool IOBufferMemoryDescriptor::initWithRanges(
- IOVirtualRange * /* ranges */ ,
- UInt32 /* withCount */ ,
- IODirection /* withDirection */ ,
- task_t /* withTask */ ,
- bool /* asReference */ )
-{
- return false;
-}
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#ifndef __LP64__
bool IOBufferMemoryDescriptor::initWithOptions(
IOOptionBits options,
vm_size_t capacity,
- vm_offset_t alignment)
+ vm_offset_t alignment,
+ task_t inTask)
+{
+ mach_vm_address_t physicalMask = 0;
+ return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
+}
+#endif /* !__LP64__ */
+
+bool IOBufferMemoryDescriptor::initWithPhysicalMask(
+ task_t inTask,
+ IOOptionBits options,
+ mach_vm_size_t capacity,
+ mach_vm_address_t alignment,
+ mach_vm_address_t physicalMask)
{
+ kern_return_t kr;
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+
if (!capacity)
return false;
- _options = options;
- _capacity = capacity;
- _physAddrs = 0;
- _physSegCount = 0;
- _buffer = 0;
+ _options = options;
+ _capacity = capacity;
+ _internalFlags = 0;
+ _internalReserved = 0;
+ _buffer = 0;
+
+ _ranges.v64 = IONew(IOAddressRange, 1);
+ if (!_ranges.v64)
+ return (false);
+ _ranges.v64->address = 0;
+ _ranges.v64->length = 0;
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
+
+ // Grab IOMD bits from the Buffer MD options
+ iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+
+ if (physicalMask && (alignment <= 1))
+ {
+ alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
+ highestMask = (physicalMask | alignment);
+ alignment++;
+ if (alignment < page_size)
+ alignment = page_size;
+ }
+
+ if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
+ alignment = page_size;
+
+ if (alignment >= page_size)
+ capacity = round_page(capacity);
- if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
- alignment = page_size;
+ if (alignment > page_size)
+ options |= kIOMemoryPhysicallyContiguous;
_alignment = alignment;
+
+ if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
+ return false;
+
+ // set flags for entry + object create
+ vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
+
+ // set memory entry cache mode
+ switch (options & kIOMapCacheMask)
+ {
+ case kIOMapInhibitCache:
+ SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteThruCache:
+ SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
+ break;
+
+ case kIOMapWriteCombineCache:
+ SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackCache:
+ SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapCopybackInnerCache:
+ SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
+ break;
+
+ case kIOMapDefaultCache:
+ default:
+ SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
+ break;
+ }
+
if (options & kIOMemoryPageable)
- /* Allocate some kernel address space. */
- _buffer = IOMallocPageable(capacity, alignment);
- /* Allocate a wired-down buffer inside kernel space. */
- else if (options & kIOMemoryPhysicallyContiguous)
- _buffer = IOMallocContiguous(capacity, alignment, 0);
- else if (alignment > 1)
- _buffer = IOMallocAligned(capacity, alignment);
+ {
+ iomdOptions |= kIOMemoryBufferPageable;
+
+ // must create the entry before any pages are allocated
+
+ // set flags for entry + object create
+ memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
+
+ if (options & kIOMemoryPurgeable)
+ memEntryCacheMode |= MAP_MEM_PURGABLE;
+ }
else
- _buffer = IOMalloc(capacity);
+ {
+ memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
+ vmmap = kernel_map;
+
+ // Buffer shouldn't auto prepare they should be prepared explicitly
+ // But it never was enforced so what are you going to do?
+ iomdOptions |= kIOMemoryAutoPrepare;
+
+ /* Allocate a wired-down buffer inside kernel space. */
+
+ if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
+ {
+ _internalFlags |= kInternalFlagPhysical;
+ if (highestMask)
+ {
+ _internalFlags |= kInternalFlagPageSized;
+ capacity = round_page(capacity);
+ }
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
+ (0 != (options & kIOMemoryPhysicallyContiguous)));
+ }
+ else if (alignment > 1)
+ {
+ _buffer = IOMallocAligned(capacity, alignment);
+ }
+ else
+ {
+ _buffer = IOMalloc(capacity);
+ }
+
+ if (!_buffer)
+ {
+ return false;
+ }
+ }
- if (!_buffer)
- return false;
+ if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
+ ipc_port_t sharedMem;
+ vm_size_t size = round_page(capacity);
+
+ kr = mach_make_memory_entry(vmmap,
+ &size, (vm_offset_t)_buffer,
+ memEntryCacheMode, &sharedMem,
+ NULL );
+
+ if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
+ ipc_port_release_send( sharedMem );
+ kr = kIOReturnVMError;
+ }
+ if( KERN_SUCCESS != kr)
+ return( false );
+
+ _memEntry = (void *) sharedMem;
+
+ if( options & kIOMemoryPageable) {
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size += size;
+#endif
+ mapTask = inTask;
+ if (NULL == inTask)
+ inTask = kernel_task;
+ }
+ else if (options & kIOMapCacheMask)
+ {
+ // Prefetch each page to put entries into the pmap
+ volatile UInt8 * startAddr = (UInt8 *)_buffer;
+ volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
+
+ while (startAddr < endAddr)
+ {
+ *startAddr;
+ startAddr += page_size;
+ }
+ }
+ }
- _singleRange.v.address = (vm_address_t) _buffer;
- _singleRange.v.length = capacity;
+ _ranges.v64->address = (mach_vm_address_t) _buffer;;
+ _ranges.v64->length = _capacity;
- if (!super::initWithRanges(&_singleRange.v, 1,
- (IODirection) (options & kIOMemoryDirectionMask),
- kernel_task, true))
+ if (!super::initWithOptions(_ranges.v64, 1, 0,
+ inTask, iomdOptions, /* System mapper */ 0))
return false;
- if (options & kIOMemoryPageable) {
- _flags |= kIOMemoryRequiresWire;
-
- kern_return_t kr;
- ipc_port_t sharedMem = (ipc_port_t) _memEntry;
- vm_size_t size = _ranges.v[0].length;
-
- // must create the entry before any pages are allocated
- if( 0 == sharedMem) {
- kr = mach_make_memory_entry( IOPageableMapForAddress( _ranges.v[0].address ),
- &size, _ranges.v[0].address,
- VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
- NULL );
- if( (KERN_SUCCESS == kr) && (size != _ranges.v[0].length)) {
- ipc_port_release_send( sharedMem );
- kr = kIOReturnVMError;
- }
- if( KERN_SUCCESS != kr)
- sharedMem = 0;
- _memEntry = (void *) sharedMem;
- }
-
- } else {
- /* Precompute virtual-to-physical page mappings. */
- vm_address_t inBuffer = (vm_address_t) _buffer;
- _physSegCount = atop(trunc_page(inBuffer + capacity - 1) -
- trunc_page(inBuffer)) + 1;
- _physAddrs = IONew(IOPhysicalAddress, _physSegCount);
- if (!_physAddrs)
- return false;
+ if (mapTask)
+ {
+ if (!reserved) {
+ reserved = IONew( ExpansionData, 1 );
+ if( !reserved)
+ return( false );
+ }
+ reserved->map = createMappingInTask(mapTask, 0,
+ kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
+ if (!reserved->map)
+ {
+ _buffer = 0;
+ return( false );
+ }
+ release(); // map took a retain on this
+ reserved->map->retain();
+ removeMapping(reserved->map);
+ mach_vm_address_t buffer = reserved->map->getAddress();
+ _buffer = (void *) buffer;
+ if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
+ _ranges.v64->address = buffer;
+ }
+
+ setLength(_capacity);
+
+ return true;
+}
+
+IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
+ task_t inTask,
+ IOOptionBits options,
+ vm_size_t capacity,
+ vm_offset_t alignment)
+{
+ IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
- inBuffer = trunc_page(inBuffer);
- for (unsigned i = 0; i < _physSegCount; i++) {
- _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer);
- assert(_physAddrs[i]); /* supposed to be wired */
- inBuffer += page_size;
- }
+ if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
+ me->release();
+ me = 0;
}
+ return me;
+}
- setLength(capacity);
+IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
+ task_t inTask,
+ IOOptionBits options,
+ mach_vm_size_t capacity,
+ mach_vm_address_t physicalMask)
+{
+ IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
- return true;
+ if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
+ {
+ me->release();
+ me = 0;
+ }
+ return me;
+}
+
+#ifndef __LP64__
+bool IOBufferMemoryDescriptor::initWithOptions(
+ IOOptionBits options,
+ vm_size_t capacity,
+ vm_offset_t alignment)
+{
+ return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
}
+#endif /* !__LP64__ */
IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
IOOptionBits options,
vm_size_t capacity,
- vm_offset_t alignment = 1)
+ vm_offset_t alignment)
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithOptions(options, capacity, alignment)) {
+ if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
me->release();
me = 0;
}
inCapacity, inContiguous ? inCapacity : 1 ));
}
+#ifndef __LP64__
/*
* initWithBytes:
*
IODirection inDirection,
bool inContiguous)
{
- if (!initWithOptions(
- inDirection | kIOMemoryUnshared
- | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
- inLength, inLength ))
+ if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
+ | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
+ inLength, inLength, (mach_vm_address_t)0))
return false;
// start out with no data
return true;
}
+#endif /* !__LP64__ */
/*
* withBytes:
{
IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
- if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
- me->release();
- me = 0;
+ if (me && !me->initWithPhysicalMask(
+ kernel_task, inDirection | kIOMemoryUnshared
+ | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
+ inLength, inLength, 0 ))
+ {
+ me->release();
+ me = 0;
+ }
+
+ if (me)
+ {
+ // start out with no data
+ me->setLength(0);
+
+ if (!me->appendBytes(inBytes, inLength))
+ {
+ me->release();
+ me = 0;
+ }
}
return me;
}
*/
void IOBufferMemoryDescriptor::free()
{
- IOOptionBits options = _options;
- vm_size_t size = _capacity;
- void * buffer = _buffer;
- vm_offset_t alignment = _alignment;
-
- if (_physAddrs)
- IODelete(_physAddrs, IOPhysicalAddress, _physSegCount);
+ // Cache all of the relevant information on the stack for use
+ // after we call super::free()!
+ IOOptionBits flags = _flags;
+ IOOptionBits internalFlags = _internalFlags;
+ IOOptionBits options = _options;
+ vm_size_t size = _capacity;
+ void * buffer = _buffer;
+ IOMemoryMap * map = 0;
+ IOAddressRange * range = _ranges.v64;
+ vm_offset_t alignment = _alignment;
+
+ if (alignment >= page_size)
+ size = round_page(size);
+
+ if (reserved)
+ {
+ map = reserved->map;
+ IODelete( reserved, ExpansionData, 1 );
+ if (map)
+ map->release();
+ }
/* super::free may unwire - deallocate buffer afterwards */
super::free();
- if (buffer) {
- if (options & kIOMemoryPageable)
- IOFreePageable(buffer, size);
- else {
- if (options & kIOMemoryPhysicallyContiguous)
- IOFreeContiguous(buffer, size);
- else if (alignment > 1)
- IOFreeAligned(buffer, size);
- else
- IOFree(buffer, size);
+ if (options & kIOMemoryPageable)
+ {
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size -= round_page(size);
+#endif
+ }
+ else if (buffer)
+ {
+ if (internalFlags & kInternalFlagPhysical)
+ {
+ if (kInternalFlagPageSized & internalFlags)
+ size = round_page(size);
+ IOKernelFreePhysical((mach_vm_address_t) buffer, size);
}
+ else if (alignment > 1)
+ IOFreeAligned(buffer, size);
+ else
+ IOFree(buffer, size);
}
+ if (range && (kIOMemoryAsReference & flags))
+ IODelete(range, IOAddressRange, 1);
}
/*
assert(length <= _capacity);
_length = length;
- _singleRange.v.length = length;
+ _ranges.v64->length = length;
}
/*
*/
void IOBufferMemoryDescriptor::setDirection(IODirection direction)
{
- _direction = direction;
+ _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
+#ifndef __LP64__
+ _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
+#endif /* !__LP64__ */
}
/*
bool
IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
{
- vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
+ vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
+ IOByteCount offset;
assert(_length <= _capacity);
- bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
- actualBytesToCopy);
+
+ offset = _length;
_length += actualBytesToCopy;
- _singleRange.v.length += actualBytesToCopy;
+ _ranges.v64->length += actualBytesToCopy;
+
+ if (_task == kernel_task)
+ bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
+ actualBytesToCopy);
+ else
+ writeBytes(offset, bytes, actualBytesToCopy);
return true;
}
*/
void * IOBufferMemoryDescriptor::getBytesNoCopy()
{
- return (void *)_singleRange.v.address;
+ if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
+ return _buffer;
+ else
+ return (void *)_ranges.v64->address;
}
+
/*
* getBytesNoCopy:
*
void *
IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
{
- if (start < _length && (start + withLength) <= _length)
- return (void *)(_singleRange.v.address + start);
+ IOVirtualAddress address;
+ if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
+ address = (IOVirtualAddress) _buffer;
+ else
+ address = _ranges.v64->address;
+
+ if (start < _length && (start + withLength) <= _length)
+ return (void *)(address + start);
return 0;
}
-/*
- * getPhysicalSegment:
- *
- * Get the physical address of the buffer, relative to the current position.
- * If the current position is at the end of the buffer, a zero is returned.
- */
-IOPhysicalAddress
-IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
- IOByteCount * lengthOfSegment)
+#ifndef __LP64__
+void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
+ IOByteCount * lengthOfSegment)
{
- IOPhysicalAddress physAddr;
-
- if( offset != _position)
- setPosition( offset );
-
- assert(_position <= _length);
-
- /* Fail gracefully if the position is at (or past) the end-of-buffer. */
- if (_position >= _length) {
- *lengthOfSegment = 0;
- return 0;
- }
-
- if (_options & kIOMemoryPageable) {
- physAddr = super::getPhysicalSegment(offset, lengthOfSegment);
-
- } else {
- /* Compute the largest contiguous physical length possible. */
- vm_address_t actualPos = _singleRange.v.address + _position;
- vm_address_t actualPage = trunc_page(actualPos);
- unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address));
+ void * bytes = getBytesNoCopy(offset, 0);
- vm_size_t physicalLength = actualPage + page_size - actualPos;
- for (unsigned index = physInd + 1; index < _physSegCount &&
- _physAddrs[index] == _physAddrs[index-1] + page_size; index++) {
- physicalLength += page_size;
- }
-
- /* Clip contiguous physical length at the end-of-buffer. */
- if (physicalLength > _length - _position)
- physicalLength = _length - _position;
-
- *lengthOfSegment = physicalLength;
- physAddr = _physAddrs[physInd] + (actualPos - actualPage);
- }
+ if (bytes && lengthOfSegment)
+ *lengthOfSegment = _length - offset;
- return physAddr;
+ return bytes;
}
+#endif /* !__LP64__ */
+#ifdef __LP64__
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
+#else /* !__LP64__ */
+OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
+OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
+#endif /* !__LP64__ */
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);