2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <IOKit/assert.h>
29 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMapper.h>
33 #include <IOKit/IOBufferMemoryDescriptor.h>
35 #include "IOKitKernelInternal.h"
36 #include "IOCopyMapper.h"
39 void ipc_port_release_send(ipc_port_t port
);
42 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 volatile ppnum_t gIOHighestAllocatedPage
;
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
51 #define super IOGeneralMemoryDescriptor
52 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor
,
53 IOGeneralMemoryDescriptor
);
55 bool IOBufferMemoryDescriptor::initWithAddress(
56 void * /* address */ ,
57 IOByteCount
/* withLength */ ,
58 IODirection
/* withDirection */ )
63 bool IOBufferMemoryDescriptor::initWithAddress(
64 vm_address_t
/* address */ ,
65 IOByteCount
/* withLength */ ,
66 IODirection
/* withDirection */ ,
67 task_t
/* withTask */ )
72 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
73 IOPhysicalAddress
/* address */ ,
74 IOByteCount
/* withLength */ ,
75 IODirection
/* withDirection */ )
80 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
81 IOPhysicalRange
* /* ranges */ ,
82 UInt32
/* withCount */ ,
83 IODirection
/* withDirection */ ,
84 bool /* asReference */ )
89 bool IOBufferMemoryDescriptor::initWithRanges(
90 IOVirtualRange
* /* ranges */ ,
91 UInt32
/* withCount */ ,
92 IODirection
/* withDirection */ ,
93 task_t
/* withTask */ ,
94 bool /* asReference */ )
99 bool IOBufferMemoryDescriptor::initWithOptions(
100 IOOptionBits options
,
102 vm_offset_t alignment
,
105 mach_vm_address_t physicalMask
= 0;
106 return (initWithPhysicalMask(inTask
, options
, capacity
, alignment
, physicalMask
));
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
111 IOOptionBits options
,
112 mach_vm_size_t capacity
,
113 mach_vm_address_t alignment
,
114 mach_vm_address_t physicalMask
)
119 IOOptionBits iomdOptions
= kIOMemoryAsReference
| kIOMemoryTypeVirtual
;
125 _capacity
= capacity
;
130 // Grab the direction and the Auto Prepare bits from the Buffer MD options
131 iomdOptions
|= options
& (kIOMemoryDirectionMask
| kIOMemoryAutoPrepare
);
133 if ((options
& kIOMemorySharingTypeMask
) && (alignment
< page_size
))
134 alignment
= page_size
;
136 if ((inTask
!= kernel_task
) && !(options
& kIOMemoryPageable
))
139 if (physicalMask
&& (alignment
<= 1))
140 alignment
= ((physicalMask
^ PAGE_MASK
) & PAGE_MASK
) + 1;
142 if ((options
& kIOMemoryPhysicallyContiguous
) && !physicalMask
)
143 physicalMask
= 0xFFFFFFFF;
145 _alignment
= alignment
;
146 if (options
& kIOMemoryPageable
)
148 iomdOptions
|= kIOMemoryBufferPageable
;
150 ipc_port_t sharedMem
;
151 vm_size_t size
= round_page_32(capacity
);
153 // must create the entry before any pages are allocated
155 // set flags for entry + object create
156 vm_prot_t memEntryCacheMode
= VM_PROT_READ
| VM_PROT_WRITE
157 | MAP_MEM_NAMED_CREATE
;
159 if (options
& kIOMemoryPurgeable
)
160 memEntryCacheMode
|= MAP_MEM_PURGABLE
;
162 // set memory entry cache mode
163 switch (options
& kIOMapCacheMask
)
165 case kIOMapInhibitCache
:
166 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
169 case kIOMapWriteThruCache
:
170 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
173 case kIOMapWriteCombineCache
:
174 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
177 case kIOMapCopybackCache
:
178 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
181 case kIOMapDefaultCache
:
183 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
187 kr
= mach_make_memory_entry( vmmap
,
189 memEntryCacheMode
, &sharedMem
,
192 if( (KERN_SUCCESS
== kr
) && (size
!= round_page_32(capacity
))) {
193 ipc_port_release_send( sharedMem
);
194 kr
= kIOReturnVMError
;
196 if( KERN_SUCCESS
!= kr
)
199 _memEntry
= (void *) sharedMem
;
201 debug_iomallocpageable_size
+= size
;
204 inTask
= kernel_task
;
205 else if (inTask
== kernel_task
)
212 reserved
= IONew( ExpansionData
, 1 );
216 vmmap
= get_task_map(inTask
);
217 vm_map_reference(vmmap
);
218 reserved
->map
= vmmap
;
223 if (IOMapper::gSystem
)
224 // assuming mapped space is 2G
225 lastIOAddr
= (1UL << 31) - PAGE_SIZE
;
227 lastIOAddr
= ptoa_64(gIOHighestAllocatedPage
);
229 if (physicalMask
&& (lastIOAddr
!= (lastIOAddr
& physicalMask
)))
231 mach_vm_address_t address
;
232 iomdOptions
&= ~kIOMemoryTypeVirtual
;
233 iomdOptions
|= kIOMemoryTypePhysical
;
235 address
= IOMallocPhysical(capacity
, physicalMask
);
236 _buffer
= (void *) address
;
240 if (inTask
== kernel_task
)
244 else if (NULL
!= inTask
)
247 reserved
= IONew( ExpansionData
, 1 );
251 vmmap
= get_task_map(inTask
);
252 vm_map_reference(vmmap
);
253 reserved
->map
= vmmap
;
259 // Buffer shouldn't auto prepare they should be prepared explicitly
260 // But it never was enforced so what are you going to do?
261 iomdOptions
|= kIOMemoryAutoPrepare
;
263 /* Allocate a wired-down buffer inside kernel space. */
264 if (options
& kIOMemoryPhysicallyContiguous
)
265 _buffer
= (void *) IOKernelAllocateContiguous(capacity
, alignment
);
266 else if (alignment
> 1)
267 _buffer
= IOMallocAligned(capacity
, alignment
);
269 _buffer
= IOMalloc(capacity
);
275 _singleRange
.v
.address
= (vm_address_t
) _buffer
;
276 _singleRange
.v
.length
= capacity
;
278 if (!super::initWithOptions(&_singleRange
.v
, 1, 0,
279 inTask
, iomdOptions
, /* System mapper */ 0))
282 if (physicalMask
&& !IOMapper::gSystem
)
284 IOMDDMACharacteristics mdSummary
;
286 bzero(&mdSummary
, sizeof(mdSummary
));
287 IOReturn rtn
= dmaCommandOperation(
288 kIOMDGetCharacteristics
,
289 &mdSummary
, sizeof(mdSummary
));
293 if (mdSummary
.fHighestPage
)
296 while (mdSummary
.fHighestPage
> (highest
= gIOHighestAllocatedPage
))
298 if (OSCompareAndSwap(highest
, mdSummary
.fHighestPage
,
299 (UInt32
*) &gIOHighestAllocatedPage
))
302 lastIOAddr
= ptoa_64(mdSummary
.fHighestPage
);
305 lastIOAddr
= ptoa_64(gIOLastPage
);
307 if (lastIOAddr
!= (lastIOAddr
& physicalMask
))
309 if (kIOMemoryTypePhysical
!= (_flags
& kIOMemoryTypeMask
))
320 kr
= doMap(vmmap
, (IOVirtualAddress
*) &_buffer
, kIOMapAnywhere
, 0, capacity
);
321 if (KERN_SUCCESS
!= kr
)
327 if (kIOMemoryTypeVirtual
& iomdOptions
)
328 _singleRange
.v
.address
= (vm_address_t
) _buffer
;
336 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithOptions(
338 IOOptionBits options
,
340 vm_offset_t alignment
)
342 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
344 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, inTask
)) {
345 bool retry
= me
->_physSegCount
;
350 me
= new IOBufferMemoryDescriptor
;
351 if (me
&& !me
->initWithOptions(options
, capacity
, alignment
, inTask
))
361 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
363 IOOptionBits options
,
364 mach_vm_size_t capacity
,
365 mach_vm_address_t physicalMask
)
367 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
369 if (me
&& !me
->initWithPhysicalMask(inTask
, options
, capacity
, 1, physicalMask
))
371 bool retry
= me
->_physSegCount
;
376 me
= new IOBufferMemoryDescriptor
;
377 if (me
&& !me
->initWithPhysicalMask(inTask
, options
, capacity
, 1, physicalMask
))
387 bool IOBufferMemoryDescriptor::initWithOptions(
388 IOOptionBits options
,
390 vm_offset_t alignment
)
392 return( initWithOptions(options
, capacity
, alignment
, kernel_task
) );
395 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::withOptions(
396 IOOptionBits options
,
398 vm_offset_t alignment
)
400 return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, options
, capacity
, alignment
));
407 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
408 * hold capacity bytes. The descriptor's length is initially set to the capacity.
410 IOBufferMemoryDescriptor
*
411 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity
,
412 IODirection inDirection
,
415 return( IOBufferMemoryDescriptor::withOptions(
416 inDirection
| kIOMemoryUnshared
417 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
418 inCapacity
, inContiguous
? inCapacity
: 1 ));
424 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
425 * The descriptor's length and capacity are set to the input buffer's size.
427 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes
,
429 IODirection inDirection
,
432 if (!initWithOptions(
433 inDirection
| kIOMemoryUnshared
434 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
435 inLength
, inLength
))
438 // start out with no data
441 if (!appendBytes(inBytes
, inLength
))
450 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
451 * The descriptor's length and capacity are set to the input buffer's size.
453 IOBufferMemoryDescriptor
*
454 IOBufferMemoryDescriptor::withBytes(const void * inBytes
,
456 IODirection inDirection
,
459 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
461 if (me
&& !me
->initWithBytes(inBytes
, inLength
, inDirection
, inContiguous
))
463 bool retry
= me
->_physSegCount
;
468 me
= new IOBufferMemoryDescriptor
;
469 if (me
&& !me
->initWithBytes(inBytes
, inLength
, inDirection
, inContiguous
))
485 void IOBufferMemoryDescriptor::free()
487 // Cache all of the relevant information on the stack for use
488 // after we call super::free()!
489 IOOptionBits flags
= _flags
;
490 IOOptionBits options
= _options
;
491 vm_size_t size
= _capacity
;
492 void * buffer
= _buffer
;
493 IOVirtualAddress source
= _singleRange
.v
.address
;
495 vm_offset_t alignment
= _alignment
;
499 vmmap
= reserved
->map
;
500 IODelete( reserved
, ExpansionData
, 1 );
503 /* super::free may unwire - deallocate buffer afterwards */
506 if (options
& kIOMemoryPageable
)
509 if (!buffer
|| vmmap
)
510 debug_iomallocpageable_size
-= round_page_32(size
);
515 vm_deallocate(vmmap
, (vm_address_t
) buffer
, round_page_32(size
));
517 IOFreePageable(buffer
, size
);
522 if (kIOMemoryTypePhysical
== (flags
& kIOMemoryTypeMask
))
525 vm_deallocate(vmmap
, (vm_address_t
) buffer
, round_page_32(size
));
526 IOFreePhysical((mach_vm_address_t
) source
, size
);
528 else if (options
& kIOMemoryPhysicallyContiguous
)
529 IOKernelFreeContiguous((mach_vm_address_t
) buffer
, size
);
530 else if (alignment
> 1)
531 IOFreeAligned(buffer
, size
);
533 IOFree(buffer
, size
);
536 vm_map_deallocate(vmmap
);
542 * Get the buffer capacity
544 vm_size_t
IOBufferMemoryDescriptor::getCapacity() const
552 * Change the buffer length of the memory descriptor. When a new buffer
553 * is created, the initial length of the buffer is set to be the same as
554 * the capacity. The length can be adjusted via setLength for a shorter
555 * transfer (there is no need to create more buffer descriptors when you
556 * can reuse an existing one, even for different transfer sizes). Note
557 * that the specified length must not exceed the capacity of the buffer.
559 void IOBufferMemoryDescriptor::setLength(vm_size_t length
)
561 assert(length
<= _capacity
);
564 _singleRange
.v
.length
= length
;
570 * Change the direction of the transfer. This method allows one to redirect
571 * the descriptor's transfer direction. This eliminates the need to destroy
572 * and create new buffers when different transfer directions are needed.
574 void IOBufferMemoryDescriptor::setDirection(IODirection direction
)
576 _direction
= direction
;
582 * Add some data to the end of the buffer. This method automatically
583 * maintains the memory descriptor buffer length. Note that appendBytes
584 * will not copy past the end of the memory descriptor's current capacity.
587 IOBufferMemoryDescriptor::appendBytes(const void * bytes
, vm_size_t withLength
)
589 vm_size_t actualBytesToCopy
= min(withLength
, _capacity
- _length
);
592 assert(_length
<= _capacity
);
595 _length
+= actualBytesToCopy
;
596 _singleRange
.v
.length
+= actualBytesToCopy
;
598 if (_task
== kernel_task
)
599 bcopy(/* from */ bytes
, (void *)(_singleRange
.v
.address
+ offset
),
602 writeBytes(offset
, bytes
, actualBytesToCopy
);
610 * Return the virtual address of the beginning of the buffer
612 void * IOBufferMemoryDescriptor::getBytesNoCopy()
614 if (kIOMemoryTypePhysical
== (_flags
& kIOMemoryTypeMask
))
617 return (void *)_singleRange
.v
.address
;
624 * Return the virtual address of an offset from the beginning of the buffer
627 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start
, vm_size_t withLength
)
629 IOVirtualAddress address
;
630 if (kIOMemoryTypePhysical
== (_flags
& kIOMemoryTypeMask
))
631 address
= (IOVirtualAddress
) _buffer
;
633 address
= _singleRange
.v
.address
;
635 if (start
< _length
&& (start
+ withLength
) <= _length
)
636 return (void *)(address
+ start
);
640 /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
641 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
643 void * bytes
= getBytesNoCopy(offset
, 0);
645 if (bytes
&& lengthOfSegment
)
646 *lengthOfSegment
= _length
- offset
;
651 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 0);
652 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 1);
653 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 2);
654 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 3);
655 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 4);
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 5);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 6);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 7);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 8);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 9);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 10);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 11);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 12);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 13);
665 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 14);
666 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 15);