2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
23 #include <IOKit/assert.h>
24 #include <IOKit/system.h>
26 #include <IOKit/IOLib.h>
27 #include <IOKit/IOBufferMemoryDescriptor.h>
29 #include "IOKitKernelInternal.h"
32 void ipc_port_release_send(ipc_port_t port
);
35 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
38 #define super IOGeneralMemoryDescriptor
39 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor
,
40 IOGeneralMemoryDescriptor
);
42 bool IOBufferMemoryDescriptor::initWithAddress(
43 void * /* address */ ,
44 IOByteCount
/* withLength */ ,
45 IODirection
/* withDirection */ )
50 bool IOBufferMemoryDescriptor::initWithAddress(
51 vm_address_t
/* address */ ,
52 IOByteCount
/* withLength */ ,
53 IODirection
/* withDirection */ ,
54 task_t
/* withTask */ )
59 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
60 IOPhysicalAddress
/* address */ ,
61 IOByteCount
/* withLength */ ,
62 IODirection
/* withDirection */ )
67 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
68 IOPhysicalRange
* /* ranges */ ,
69 UInt32
/* withCount */ ,
70 IODirection
/* withDirection */ ,
71 bool /* asReference */ )
76 bool IOBufferMemoryDescriptor::initWithRanges(
77 IOVirtualRange
* /* ranges */ ,
78 UInt32
/* withCount */ ,
79 IODirection
/* withDirection */ ,
80 task_t
/* withTask */ ,
81 bool /* asReference */ )
86 bool IOBufferMemoryDescriptor::initWithOptions(
89 vm_offset_t alignment
,
94 IOOptionBits iomdOptions
= kIOMemoryAsReference
| kIOMemoryTypeVirtual
;
100 _capacity
= capacity
;
105 // Grab the direction and the Auto Prepare bits from the Buffer MD options
106 iomdOptions
|= options
& (kIOMemoryDirectionMask
| kIOMemoryAutoPrepare
);
108 if ((options
& kIOMemorySharingTypeMask
) && (alignment
< page_size
))
109 alignment
= page_size
;
111 if ((inTask
!= kernel_task
) && !(options
& kIOMemoryPageable
))
114 _alignment
= alignment
;
115 if (options
& kIOMemoryPageable
)
117 iomdOptions
|= kIOMemoryBufferPageable
;
119 ipc_port_t sharedMem
;
120 vm_size_t size
= round_page_32(capacity
);
122 // must create the entry before any pages are allocated
124 // set flags for entry + object create
125 vm_prot_t memEntryCacheMode
= VM_PROT_READ
| VM_PROT_WRITE
126 | MAP_MEM_NAMED_CREATE
;
128 if (options
& kIOMemoryPurgeable
)
129 memEntryCacheMode
|= MAP_MEM_PURGABLE
;
131 // set memory entry cache mode
132 switch (options
& kIOMapCacheMask
)
134 case kIOMapInhibitCache
:
135 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
138 case kIOMapWriteThruCache
:
139 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
142 case kIOMapWriteCombineCache
:
143 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
146 case kIOMapCopybackCache
:
147 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
150 case kIOMapDefaultCache
:
152 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
156 kr
= mach_make_memory_entry( vmmap
,
158 memEntryCacheMode
, &sharedMem
,
161 if( (KERN_SUCCESS
== kr
) && (size
!= round_page_32(capacity
))) {
162 ipc_port_release_send( sharedMem
);
163 kr
= kIOReturnVMError
;
165 if( KERN_SUCCESS
!= kr
)
168 _memEntry
= (void *) sharedMem
;
170 debug_iomallocpageable_size
+= size
;
172 if ((NULL
== inTask
) && (options
& kIOMemoryPageable
))
173 inTask
= kernel_task
;
174 else if (inTask
== kernel_task
)
182 reserved
= IONew( ExpansionData
, 1 );
186 vmmap
= get_task_map(inTask
);
187 vm_map_reference(vmmap
);
188 reserved
->map
= vmmap
;
193 // @@@ gvdl: Need to remove this
194 // Buffer should never auto prepare they should be prepared explicitly
195 // But it never was enforced so what are you going to do?
196 iomdOptions
|= kIOMemoryAutoPrepare
;
198 /* Allocate a wired-down buffer inside kernel space. */
199 if (options
& kIOMemoryPhysicallyContiguous
)
200 _buffer
= IOMallocContiguous(capacity
, alignment
, 0);
201 else if (alignment
> 1)
202 _buffer
= IOMallocAligned(capacity
, alignment
);
204 _buffer
= IOMalloc(capacity
);
210 _singleRange
.v
.address
= (vm_address_t
) _buffer
;
211 _singleRange
.v
.length
= capacity
;
213 if (!super::initWithOptions(&_singleRange
.v
, 1, 0,
214 inTask
, iomdOptions
, /* System mapper */ 0))
217 if (options
& kIOMemoryPageable
)
223 kr
= doMap(vmmap
, (IOVirtualAddress
*) &_buffer
, kIOMapAnywhere
, 0, round_page_32(capacity
));
224 if (KERN_SUCCESS
!= kr
)
229 _singleRange
.v
.address
= (vm_address_t
) _buffer
;
238 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithOptions(
240 IOOptionBits options
,
242 vm_offset_t alignment
)
244 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
246 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, inTask
)) {
253 bool IOBufferMemoryDescriptor::initWithOptions(
254 IOOptionBits options
,
256 vm_offset_t alignment
)
258 return( initWithOptions(options
, capacity
, alignment
, kernel_task
) );
261 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::withOptions(
262 IOOptionBits options
,
264 vm_offset_t alignment
)
266 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
268 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, kernel_task
)) {
279 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
280 * hold capacity bytes. The descriptor's length is initially set to the capacity.
282 IOBufferMemoryDescriptor
*
283 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity
,
284 IODirection inDirection
,
287 return( IOBufferMemoryDescriptor::withOptions(
288 inDirection
| kIOMemoryUnshared
289 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
290 inCapacity
, inContiguous
? inCapacity
: 1 ));
296 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
297 * The descriptor's length and capacity are set to the input buffer's size.
299 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes
,
301 IODirection inDirection
,
304 if (!initWithOptions(
305 inDirection
| kIOMemoryUnshared
306 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
307 inLength
, inLength
))
310 // start out with no data
313 if (!appendBytes(inBytes
, inLength
))
322 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
323 * The descriptor's length and capacity are set to the input buffer's size.
325 IOBufferMemoryDescriptor
*
326 IOBufferMemoryDescriptor::withBytes(const void * inBytes
,
328 IODirection inDirection
,
331 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
333 if (me
&& !me
->initWithBytes(inBytes
, inLength
, inDirection
, inContiguous
)){
345 void IOBufferMemoryDescriptor::free()
347 // Cache all of the relevant information on the stack for use
348 // after we call super::free()!
349 IOOptionBits options
= _options
;
350 vm_size_t size
= _capacity
;
351 void * buffer
= _buffer
;
353 vm_offset_t alignment
= _alignment
;
357 vmmap
= reserved
->map
;
358 IODelete( reserved
, ExpansionData
, 1 );
361 /* super::free may unwire - deallocate buffer afterwards */
364 if (options
& kIOMemoryPageable
)
367 if (!buffer
|| vmmap
)
368 debug_iomallocpageable_size
-= round_page_32(size
);
373 vm_deallocate(vmmap
, (vm_address_t
) buffer
, round_page_32(size
));
375 IOFreePageable(buffer
, size
);
380 if (options
& kIOMemoryPhysicallyContiguous
)
381 IOFreeContiguous(buffer
, size
);
382 else if (alignment
> 1)
383 IOFreeAligned(buffer
, size
);
385 IOFree(buffer
, size
);
388 vm_map_deallocate(vmmap
);
394 * Get the buffer capacity
396 vm_size_t
IOBufferMemoryDescriptor::getCapacity() const
404 * Change the buffer length of the memory descriptor. When a new buffer
405 * is created, the initial length of the buffer is set to be the same as
406 * the capacity. The length can be adjusted via setLength for a shorter
407 * transfer (there is no need to create more buffer descriptors when you
408 * can reuse an existing one, even for different transfer sizes). Note
409 * that the specified length must not exceed the capacity of the buffer.
411 void IOBufferMemoryDescriptor::setLength(vm_size_t length
)
413 assert(length
<= _capacity
);
416 _singleRange
.v
.length
= length
;
422 * Change the direction of the transfer. This method allows one to redirect
423 * the descriptor's transfer direction. This eliminates the need to destroy
424 * and create new buffers when different transfer directions are needed.
426 void IOBufferMemoryDescriptor::setDirection(IODirection direction
)
428 _direction
= direction
;
434 * Add some data to the end of the buffer. This method automatically
435 * maintains the memory descriptor buffer length. Note that appendBytes
436 * will not copy past the end of the memory descriptor's current capacity.
439 IOBufferMemoryDescriptor::appendBytes(const void * bytes
, vm_size_t withLength
)
441 vm_size_t actualBytesToCopy
= min(withLength
, _capacity
- _length
);
443 assert(_length
<= _capacity
);
444 bcopy(/* from */ bytes
, (void *)(_singleRange
.v
.address
+ _length
),
446 _length
+= actualBytesToCopy
;
447 _singleRange
.v
.length
+= actualBytesToCopy
;
455 * Return the virtual address of the beginning of the buffer
457 void * IOBufferMemoryDescriptor::getBytesNoCopy()
459 return (void *)_singleRange
.v
.address
;
465 * Return the virtual address of an offset from the beginning of the buffer
468 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start
, vm_size_t withLength
)
470 if (start
< _length
&& (start
+ withLength
) <= _length
)
471 return (void *)(_singleRange
.v
.address
+ start
);
475 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 0);
476 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 1);
477 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 2);
478 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 3);
479 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 4);
480 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 5);
481 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 6);
482 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 7);
483 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 8);
484 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 9);
485 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 10);
486 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 11);
487 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 12);
488 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 13);
489 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 14);
490 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 15);