2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 #include <IOKit/assert.h>
26 #include <IOKit/system.h>
28 #include <IOKit/IOLib.h>
29 #include <IOKit/IOBufferMemoryDescriptor.h>
32 void ipc_port_release_send(ipc_port_t port
);
35 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
38 #define super IOGeneralMemoryDescriptor
39 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor
,
40 IOGeneralMemoryDescriptor
);
42 bool IOBufferMemoryDescriptor::initWithAddress(
43 void * /* address */ ,
44 IOByteCount
/* withLength */ ,
45 IODirection
/* withDirection */ )
50 bool IOBufferMemoryDescriptor::initWithAddress(
51 vm_address_t
/* address */ ,
52 IOByteCount
/* withLength */ ,
53 IODirection
/* withDirection */ ,
54 task_t
/* withTask */ )
59 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
60 IOPhysicalAddress
/* address */ ,
61 IOByteCount
/* withLength */ ,
62 IODirection
/* withDirection */ )
67 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
68 IOPhysicalRange
* /* ranges */ ,
69 UInt32
/* withCount */ ,
70 IODirection
/* withDirection */ ,
71 bool /* asReference */ )
76 bool IOBufferMemoryDescriptor::initWithRanges(
77 IOVirtualRange
* /* ranges */ ,
78 UInt32
/* withCount */ ,
79 IODirection
/* withDirection */ ,
80 task_t
/* withTask */ ,
81 bool /* asReference */ )
86 bool IOBufferMemoryDescriptor::initWithOptions(
89 vm_offset_t alignment
,
93 IOOptionBits iomdOptions
= kIOMemoryAsReference
| kIOMemoryTypeVirtual
;
104 // Grab the direction and the Auto Prepare bits from the Buffer MD options
105 iomdOptions
|= options
& (kIOMemoryDirectionMask
| kIOMemoryAutoPrepare
);
107 if ((options
& kIOMemorySharingTypeMask
) && (alignment
< page_size
))
108 alignment
= page_size
;
110 if ((inTask
!= kernel_task
) && !(options
& kIOMemoryPageable
))
113 _alignment
= alignment
;
114 if (options
& kIOMemoryPageable
)
116 iomdOptions
|= kIOMemoryBufferPageable
;
117 if (inTask
== kernel_task
)
119 /* Allocate some kernel address space. */
120 _buffer
= IOMallocPageable(capacity
, alignment
);
122 map
= IOPageableMapForAddress((vm_address_t
) _buffer
);
129 reserved
= IONew( ExpansionData
, 1 );
133 map
= get_task_map(inTask
);
134 vm_map_reference(map
);
136 kr
= vm_allocate( map
, (vm_address_t
*) &_buffer
, round_page_32(capacity
),
137 VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
138 if( KERN_SUCCESS
!= kr
)
141 // we have to make sure that these pages don't get copied on fork.
142 kr
= vm_inherit( map
, (vm_address_t
) _buffer
, round_page_32(capacity
), VM_INHERIT_NONE
);
143 if( KERN_SUCCESS
!= kr
)
149 // @@@ gvdl: Need to remove this
150 // Buffer should never auto prepare they should be prepared explicitly
151 // But it never was enforced so what are you going to do?
152 iomdOptions
|= kIOMemoryAutoPrepare
;
154 /* Allocate a wired-down buffer inside kernel space. */
155 if (options
& kIOMemoryPhysicallyContiguous
)
156 _buffer
= IOMallocContiguous(capacity
, alignment
, 0);
157 else if (alignment
> 1)
158 _buffer
= IOMallocAligned(capacity
, alignment
);
160 _buffer
= IOMalloc(capacity
);
166 _singleRange
.v
.address
= (vm_address_t
) _buffer
;
167 _singleRange
.v
.length
= capacity
;
169 if (!super::initWithOptions(&_singleRange
.v
, 1, 0,
170 inTask
, iomdOptions
, /* System mapper */ 0))
173 if (options
& kIOMemoryPageable
) {
175 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
176 vm_size_t size
= round_page_32(_ranges
.v
[0].length
);
178 // must create the entry before any pages are allocated
179 if( 0 == sharedMem
) {
181 // set memory entry cache
182 vm_prot_t memEntryCacheMode
= VM_PROT_READ
| VM_PROT_WRITE
;
183 switch (options
& kIOMapCacheMask
)
185 case kIOMapInhibitCache
:
186 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
189 case kIOMapWriteThruCache
:
190 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
193 case kIOMapWriteCombineCache
:
194 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
197 case kIOMapCopybackCache
:
198 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
201 case kIOMapDefaultCache
:
203 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
207 kr
= mach_make_memory_entry( map
,
208 &size
, _ranges
.v
[0].address
,
209 memEntryCacheMode
, &sharedMem
,
212 if( (KERN_SUCCESS
== kr
) && (size
!= round_page_32(_ranges
.v
[0].length
))) {
213 ipc_port_release_send( sharedMem
);
214 kr
= kIOReturnVMError
;
216 if( KERN_SUCCESS
!= kr
)
218 _memEntry
= (void *) sharedMem
;
227 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithOptions(
229 IOOptionBits options
,
231 vm_offset_t alignment
)
233 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
235 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, inTask
)) {
242 bool IOBufferMemoryDescriptor::initWithOptions(
243 IOOptionBits options
,
245 vm_offset_t alignment
)
247 return( initWithOptions(options
, capacity
, alignment
, kernel_task
) );
250 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::withOptions(
251 IOOptionBits options
,
253 vm_offset_t alignment
)
255 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
257 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, kernel_task
)) {
268 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
269 * hold capacity bytes. The descriptor's length is initially set to the capacity.
271 IOBufferMemoryDescriptor
*
272 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity
,
273 IODirection inDirection
,
276 return( IOBufferMemoryDescriptor::withOptions(
277 inDirection
| kIOMemoryUnshared
278 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
279 inCapacity
, inContiguous
? inCapacity
: 1 ));
285 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
286 * The descriptor's length and capacity are set to the input buffer's size.
288 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes
,
290 IODirection inDirection
,
293 if (!initWithOptions(
294 inDirection
| kIOMemoryUnshared
295 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
296 inLength
, inLength
))
299 // start out with no data
302 if (!appendBytes(inBytes
, inLength
))
311 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
312 * The descriptor's length and capacity are set to the input buffer's size.
314 IOBufferMemoryDescriptor
*
315 IOBufferMemoryDescriptor::withBytes(const void * inBytes
,
317 IODirection inDirection
,
320 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
322 if (me
&& !me
->initWithBytes(inBytes
, inLength
, inDirection
, inContiguous
)){
334 void IOBufferMemoryDescriptor::free()
336 // Cache all of the relevant information on the stack for use
337 // after we call super::free()!
338 IOOptionBits options
= _options
;
339 vm_size_t size
= _capacity
;
340 void * buffer
= _buffer
;
342 vm_offset_t alignment
= _alignment
;
347 IODelete( reserved
, ExpansionData
, 1 );
350 /* super::free may unwire - deallocate buffer afterwards */
355 if (options
& kIOMemoryPageable
)
358 vm_deallocate(map
, (vm_address_t
) buffer
, round_page_32(size
));
360 IOFreePageable(buffer
, size
);
364 if (options
& kIOMemoryPhysicallyContiguous
)
365 IOFreeContiguous(buffer
, size
);
366 else if (alignment
> 1)
367 IOFreeAligned(buffer
, size
);
369 IOFree(buffer
, size
);
373 vm_map_deallocate(map
);
379 * Get the buffer capacity
381 vm_size_t
IOBufferMemoryDescriptor::getCapacity() const
389 * Change the buffer length of the memory descriptor. When a new buffer
390 * is created, the initial length of the buffer is set to be the same as
391 * the capacity. The length can be adjusted via setLength for a shorter
392 * transfer (there is no need to create more buffer descriptors when you
393 * can reuse an existing one, even for different transfer sizes). Note
394 * that the specified length must not exceed the capacity of the buffer.
396 void IOBufferMemoryDescriptor::setLength(vm_size_t length
)
398 assert(length
<= _capacity
);
401 _singleRange
.v
.length
= length
;
407 * Change the direction of the transfer. This method allows one to redirect
408 * the descriptor's transfer direction. This eliminates the need to destroy
409 * and create new buffers when different transfer directions are needed.
411 void IOBufferMemoryDescriptor::setDirection(IODirection direction
)
413 _direction
= direction
;
419 * Add some data to the end of the buffer. This method automatically
420 * maintains the memory descriptor buffer length. Note that appendBytes
421 * will not copy past the end of the memory descriptor's current capacity.
424 IOBufferMemoryDescriptor::appendBytes(const void * bytes
, vm_size_t withLength
)
426 vm_size_t actualBytesToCopy
= min(withLength
, _capacity
- _length
);
428 assert(_length
<= _capacity
);
429 bcopy(/* from */ bytes
, (void *)(_singleRange
.v
.address
+ _length
),
431 _length
+= actualBytesToCopy
;
432 _singleRange
.v
.length
+= actualBytesToCopy
;
440 * Return the virtual address of the beginning of the buffer
442 void * IOBufferMemoryDescriptor::getBytesNoCopy()
444 return (void *)_singleRange
.v
.address
;
450 * Return the virtual address of an offset from the beginning of the buffer
453 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start
, vm_size_t withLength
)
455 if (start
< _length
&& (start
+ withLength
) <= _length
)
456 return (void *)(_singleRange
.v
.address
+ start
);
460 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 0);
461 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 1);
462 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 2);
463 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 3);
464 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 4);
465 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 5);
466 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 6);
467 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 7);
468 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 8);
469 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 9);
470 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 10);
471 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 11);
472 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 12);
473 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 13);
474 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 14);
475 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 15);