2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
40 #include "IOKitKernelInternal.h"
43 #include <libkern/c++/OSCPPDebug.h>
45 #include <IOKit/IOStatisticsPrivate.h>
48 #define IOStatisticsAlloc(type, size) \
50 IOStatistics::countAlloc(type, size); \
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
58 void ipc_port_release_send(ipc_port_t port
);
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
67 kInternalFlagPhysical
= 0x00000001,
68 kInternalFlagPageSized
= 0x00000002,
69 kInternalFlagPageAllocated
= 0x00000004
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
77 ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
82 kIOPageAllocChunkBytes
= (PAGE_SIZE
/ 64),
83 kIOPageAllocSignature
= 'iopa'
92 typedef struct io_pagealloc_t io_pagealloc_t
;
94 typedef char io_pagealloc_t_assert
[(sizeof(io_pagealloc_t
) <= kIOPageAllocChunkBytes
) ? 1 : -1];
96 IOSimpleLock
* gIOPageAllocLock
;
97 queue_head_t gIOPageAllocList
;
98 vm_size_t gIOPageAllocCount
;
99 vm_size_t gIOPageAllocBytes
;
101 static io_pagealloc_t
*
106 vm_address_t vmaddr
= 0;
108 int options
= 0; // KMA_LOMEM;
109 kr
= kernel_memory_allocate(kernel_map
, &vmaddr
,
110 page_size
, 0, options
);
111 if (KERN_SUCCESS
!= kr
) return (0);
113 bzero((void *) vmaddr
, page_size
);
114 pa
= (typeof(pa
)) (vmaddr
+ page_size
- kIOPageAllocChunkBytes
);
116 pa
->signature
= kIOPageAllocSignature
;
123 iopa_freepage(io_pagealloc_t
* pa
)
125 kmem_free( kernel_map
, trunc_page((uintptr_t) pa
), page_size
);
129 iopa_allocinpage(io_pagealloc_t
* pa
, uint32_t count
, uint64_t align
)
132 uint64_t avail
= pa
->avail
;
136 // find strings of count 1 bits in avail
137 for (n
= count
; n
> 1; n
-= s
)
140 avail
= avail
& (avail
<< s
);
147 n
= __builtin_clzll(avail
);
148 pa
->avail
&= ~((-1ULL << (64 - count
)) >> n
);
149 if (!pa
->avail
&& pa
->link
.next
)
154 return (n
* kIOPageAllocChunkBytes
+ trunc_page((uintptr_t) pa
));
161 log2up(uint32_t size
)
163 if (size
<= 1) size
= 0;
164 else size
= 32 - __builtin_clz(size
- 1);
169 iopa_alloc(vm_size_t bytes
, uint32_t balign
)
171 static const uint64_t align_masks
[] = {
185 if (!bytes
) bytes
= 1;
186 count
= (bytes
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
;
187 align
= align_masks
[log2up((balign
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
)];
189 IOSimpleLockLock(gIOPageAllocLock
);
190 pa
= (typeof(pa
)) queue_first(&gIOPageAllocList
);
191 while (!queue_end(&gIOPageAllocList
, &pa
->link
))
193 addr
= iopa_allocinpage(pa
, count
, align
);
196 gIOPageAllocBytes
+= bytes
;
199 pa
= (typeof(pa
)) queue_next(&pa
->link
);
201 IOSimpleLockUnlock(gIOPageAllocLock
);
204 pa
= iopa_allocpage();
207 addr
= iopa_allocinpage(pa
, count
, align
);
208 IOSimpleLockLock(gIOPageAllocLock
);
209 if (pa
->avail
) enqueue_head(&gIOPageAllocList
, &pa
->link
);
211 if (addr
) gIOPageAllocBytes
+= bytes
;
212 IOSimpleLockUnlock(gIOPageAllocLock
);
218 assert((addr
& ((1 << log2up(balign
)) - 1)) == 0);
219 IOStatisticsAlloc(kIOStatisticsMallocAligned
, bytes
);
221 debug_iomalloc_size
+= bytes
;
229 iopa_free(uintptr_t addr
, vm_size_t bytes
)
235 if (!bytes
) bytes
= 1;
237 chunk
= (addr
& page_mask
);
238 assert(0 == (chunk
& (kIOPageAllocChunkBytes
- 1)));
240 pa
= (typeof(pa
)) (addr
| (page_size
- kIOPageAllocChunkBytes
));
241 assert(kIOPageAllocSignature
== pa
->signature
);
243 count
= (bytes
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
;
244 chunk
/= kIOPageAllocChunkBytes
;
246 IOSimpleLockLock(gIOPageAllocLock
);
249 assert(!pa
->link
.next
);
250 enqueue_tail(&gIOPageAllocList
, &pa
->link
);
252 pa
->avail
|= ((-1ULL << (64 - count
)) >> chunk
);
253 if (pa
->avail
!= -2ULL) pa
= 0;
261 gIOPageAllocBytes
-= bytes
;
262 IOSimpleLockUnlock(gIOPageAllocLock
);
263 if (pa
) iopa_freepage(pa
);
266 debug_iomalloc_size
-= bytes
;
268 IOStatisticsAlloc(kIOStatisticsFreeAligned
, bytes
);
271 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
273 #define super IOGeneralMemoryDescriptor
274 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor
,
275 IOGeneralMemoryDescriptor
);
277 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
280 bool IOBufferMemoryDescriptor::initWithOptions(
281 IOOptionBits options
,
283 vm_offset_t alignment
,
286 mach_vm_address_t physicalMask
= 0;
287 return (initWithPhysicalMask(inTask
, options
, capacity
, alignment
, physicalMask
));
289 #endif /* !__LP64__ */
291 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
293 IOOptionBits options
,
294 mach_vm_size_t capacity
,
295 mach_vm_address_t alignment
,
296 mach_vm_address_t physicalMask
)
299 task_t mapTask
= NULL
;
300 vm_map_t vmmap
= NULL
;
301 mach_vm_address_t highestMask
= 0;
302 IOOptionBits iomdOptions
= kIOMemoryTypeVirtual64
| kIOMemoryAsReference
;
303 IODMAMapSpecification mapSpec
;
311 _capacity
= capacity
;
313 _internalReserved
= 0;
316 _ranges
.v64
= IONew(IOAddressRange
, 1);
319 _ranges
.v64
->address
= 0;
320 _ranges
.v64
->length
= 0;
321 // make sure super::free doesn't dealloc _ranges before super::init
322 _flags
= kIOMemoryAsReference
;
324 // Grab IOMD bits from the Buffer MD options
325 iomdOptions
|= (options
& kIOBufferDescriptorMemoryFlags
);
327 if (!(kIOMemoryMapperNone
& options
))
329 IOMapper::checkForSystemMapper();
330 mapped
= (0 != IOMapper::gSystem
);
334 if (physicalMask
&& (alignment
<= 1))
336 alignment
= ((physicalMask
^ (-1ULL)) & (physicalMask
- 1));
337 highestMask
= (physicalMask
| alignment
);
339 if (alignment
< page_size
)
340 alignment
= page_size
;
343 if ((options
& (kIOMemorySharingTypeMask
| kIOMapCacheMask
| kIOMemoryClearEncrypt
)) && (alignment
< page_size
))
344 alignment
= page_size
;
346 if (alignment
>= page_size
)
347 capacity
= round_page(capacity
);
349 if (alignment
> page_size
)
350 options
|= kIOMemoryPhysicallyContiguous
;
352 _alignment
= alignment
;
354 if ((inTask
!= kernel_task
) && !(options
& kIOMemoryPageable
))
357 bzero(&mapSpec
, sizeof(mapSpec
));
358 mapSpec
.alignment
= _alignment
;
359 mapSpec
.numAddressBits
= 64;
360 if (highestMask
&& mapped
)
362 if (highestMask
<= 0xFFFFFFFF)
363 mapSpec
.numAddressBits
= (32 - __builtin_clz((unsigned int) highestMask
));
365 mapSpec
.numAddressBits
= (64 - __builtin_clz((unsigned int) (highestMask
>> 32)));
369 // set flags for entry + object create
370 vm_prot_t memEntryCacheMode
= VM_PROT_READ
| VM_PROT_WRITE
;
372 // set memory entry cache mode
373 switch (options
& kIOMapCacheMask
)
375 case kIOMapInhibitCache
:
376 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
379 case kIOMapWriteThruCache
:
380 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
383 case kIOMapWriteCombineCache
:
384 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
387 case kIOMapCopybackCache
:
388 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
391 case kIOMapCopybackInnerCache
:
392 SET_MAP_MEM(MAP_MEM_INNERWBACK
, memEntryCacheMode
);
395 case kIOMapDefaultCache
:
397 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
401 if (options
& kIOMemoryPageable
)
403 iomdOptions
|= kIOMemoryBufferPageable
;
405 // must create the entry before any pages are allocated
407 // set flags for entry + object create
408 memEntryCacheMode
|= MAP_MEM_NAMED_CREATE
;
410 if (options
& kIOMemoryPurgeable
)
411 memEntryCacheMode
|= MAP_MEM_PURGABLE
;
415 memEntryCacheMode
|= MAP_MEM_NAMED_REUSE
;
418 // Buffer shouldn't auto prepare they should be prepared explicitly
419 // But it never was enforced so what are you going to do?
420 iomdOptions
|= kIOMemoryAutoPrepare
;
422 /* Allocate a wired-down buffer inside kernel space. */
424 bool contig
= (0 != (options
& kIOMemoryHostPhysicallyContiguous
));
426 if (!contig
&& (0 != (options
& kIOMemoryPhysicallyContiguous
)))
429 contig
|= (0 != (kIOMemoryMapperNone
& options
));
431 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
436 if (contig
|| highestMask
|| (alignment
> page_size
))
438 _internalFlags
|= kInternalFlagPhysical
;
441 _internalFlags
|= kInternalFlagPageSized
;
442 capacity
= round_page(capacity
);
444 _buffer
= (void *) IOKernelAllocateWithPhysicalRestrict(
445 capacity
, highestMask
, alignment
, contig
);
448 && ((capacity
+ alignment
) <= (page_size
- kIOPageAllocChunkBytes
)))
450 _internalFlags
|= kInternalFlagPageAllocated
;
452 _buffer
= (void *) iopa_alloc(capacity
, alignment
);
454 else if (alignment
> 1)
456 _buffer
= IOMallocAligned(capacity
, alignment
);
460 _buffer
= IOMalloc(capacity
);
466 if (needZero
) bzero(_buffer
, capacity
);
469 if( (options
& (kIOMemoryPageable
| kIOMapCacheMask
))) {
470 ipc_port_t sharedMem
;
471 vm_size_t size
= round_page(capacity
);
473 kr
= mach_make_memory_entry(vmmap
,
474 &size
, (vm_offset_t
)_buffer
,
475 memEntryCacheMode
, &sharedMem
,
478 if( (KERN_SUCCESS
== kr
) && (size
!= round_page(capacity
))) {
479 ipc_port_release_send( sharedMem
);
480 kr
= kIOReturnVMError
;
482 if( KERN_SUCCESS
!= kr
)
485 _memEntry
= (void *) sharedMem
;
487 if( options
& kIOMemoryPageable
) {
489 debug_iomallocpageable_size
+= size
;
493 inTask
= kernel_task
;
495 else if (options
& kIOMapCacheMask
)
497 // Prefetch each page to put entries into the pmap
498 volatile UInt8
* startAddr
= (UInt8
*)_buffer
;
499 volatile UInt8
* endAddr
= (UInt8
*)_buffer
+ capacity
;
501 while (startAddr
< endAddr
)
504 startAddr
+= page_size
;
509 _ranges
.v64
->address
= (mach_vm_address_t
) _buffer
;;
510 _ranges
.v64
->length
= _capacity
;
512 if (!super::initWithOptions(_ranges
.v64
, 1, 0,
513 inTask
, iomdOptions
, /* System mapper */ 0))
516 // give any system mapper the allocation params
517 if (kIOReturnSuccess
!= dmaCommandOperation(kIOMDAddDMAMapSpec
,
518 &mapSpec
, sizeof(mapSpec
)))
524 reserved
= IONew( ExpansionData
, 1 );
528 reserved
->map
= createMappingInTask(mapTask
, 0,
529 kIOMapAnywhere
| (options
& kIOMapCacheMask
), 0, 0);
535 release(); // map took a retain on this
536 reserved
->map
->retain();
537 removeMapping(reserved
->map
);
538 mach_vm_address_t buffer
= reserved
->map
->getAddress();
539 _buffer
= (void *) buffer
;
540 if (kIOMemoryTypeVirtual64
== (kIOMemoryTypeMask
& iomdOptions
))
541 _ranges
.v64
->address
= buffer
;
544 setLength(_capacity
);
549 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithOptions(
551 IOOptionBits options
,
553 vm_offset_t alignment
)
555 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
557 if (me
&& !me
->initWithPhysicalMask(inTask
, options
, capacity
, alignment
, 0)) {
564 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
566 IOOptionBits options
,
567 mach_vm_size_t capacity
,
568 mach_vm_address_t physicalMask
)
570 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
572 if (me
&& !me
->initWithPhysicalMask(inTask
, options
, capacity
, 1, physicalMask
))
581 bool IOBufferMemoryDescriptor::initWithOptions(
582 IOOptionBits options
,
584 vm_offset_t alignment
)
586 return (initWithPhysicalMask(kernel_task
, options
, capacity
, alignment
, (mach_vm_address_t
)0));
588 #endif /* !__LP64__ */
590 IOBufferMemoryDescriptor
* IOBufferMemoryDescriptor::withOptions(
591 IOOptionBits options
,
593 vm_offset_t alignment
)
595 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
597 if (me
&& !me
->initWithPhysicalMask(kernel_task
, options
, capacity
, alignment
, 0)) {
608 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
609 * hold capacity bytes. The descriptor's length is initially set to the capacity.
611 IOBufferMemoryDescriptor
*
612 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity
,
613 IODirection inDirection
,
616 return( IOBufferMemoryDescriptor::withOptions(
617 inDirection
| kIOMemoryUnshared
618 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
619 inCapacity
, inContiguous
? inCapacity
: 1 ));
626 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
627 * The descriptor's length and capacity are set to the input buffer's size.
629 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes
,
631 IODirection inDirection
,
634 if (!initWithPhysicalMask(kernel_task
, inDirection
| kIOMemoryUnshared
635 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
636 inLength
, inLength
, (mach_vm_address_t
)0))
639 // start out with no data
642 if (!appendBytes(inBytes
, inLength
))
647 #endif /* !__LP64__ */
652 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
653 * The descriptor's length and capacity are set to the input buffer's size.
655 IOBufferMemoryDescriptor
*
656 IOBufferMemoryDescriptor::withBytes(const void * inBytes
,
658 IODirection inDirection
,
661 IOBufferMemoryDescriptor
*me
= new IOBufferMemoryDescriptor
;
663 if (me
&& !me
->initWithPhysicalMask(
664 kernel_task
, inDirection
| kIOMemoryUnshared
665 | (inContiguous
? kIOMemoryPhysicallyContiguous
: 0),
666 inLength
, inLength
, 0 ))
674 // start out with no data
677 if (!me
->appendBytes(inBytes
, inLength
))
691 void IOBufferMemoryDescriptor::free()
693 // Cache all of the relevant information on the stack for use
694 // after we call super::free()!
695 IOOptionBits flags
= _flags
;
696 IOOptionBits internalFlags
= _internalFlags
;
697 IOOptionBits options
= _options
;
698 vm_size_t size
= _capacity
;
699 void * buffer
= _buffer
;
700 IOMemoryMap
* map
= 0;
701 IOAddressRange
* range
= _ranges
.v64
;
702 vm_offset_t alignment
= _alignment
;
704 if (alignment
>= page_size
)
705 size
= round_page(size
);
710 IODelete( reserved
, ExpansionData
, 1 );
715 /* super::free may unwire - deallocate buffer afterwards */
718 if (options
& kIOMemoryPageable
)
721 debug_iomallocpageable_size
-= round_page(size
);
726 if (kInternalFlagPageSized
& internalFlags
) size
= round_page(size
);
728 if (kInternalFlagPhysical
& internalFlags
)
730 IOKernelFreePhysical((mach_vm_address_t
) buffer
, size
);
732 else if (kInternalFlagPageAllocated
& internalFlags
)
734 iopa_free((uintptr_t) buffer
, size
);
736 else if (alignment
> 1)
738 IOFreeAligned(buffer
, size
);
742 IOFree(buffer
, size
);
745 if (range
&& (kIOMemoryAsReference
& flags
))
746 IODelete(range
, IOAddressRange
, 1);
752 * Get the buffer capacity
754 vm_size_t
IOBufferMemoryDescriptor::getCapacity() const
762 * Change the buffer length of the memory descriptor. When a new buffer
763 * is created, the initial length of the buffer is set to be the same as
764 * the capacity. The length can be adjusted via setLength for a shorter
765 * transfer (there is no need to create more buffer descriptors when you
766 * can reuse an existing one, even for different transfer sizes). Note
767 * that the specified length must not exceed the capacity of the buffer.
769 void IOBufferMemoryDescriptor::setLength(vm_size_t length
)
771 assert(length
<= _capacity
);
774 _ranges
.v64
->length
= length
;
780 * Change the direction of the transfer. This method allows one to redirect
781 * the descriptor's transfer direction. This eliminates the need to destroy
782 * and create new buffers when different transfer directions are needed.
784 void IOBufferMemoryDescriptor::setDirection(IODirection direction
)
786 _flags
= (_flags
& ~kIOMemoryDirectionMask
) | direction
;
788 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
789 #endif /* !__LP64__ */
795 * Add some data to the end of the buffer. This method automatically
796 * maintains the memory descriptor buffer length. Note that appendBytes
797 * will not copy past the end of the memory descriptor's current capacity.
800 IOBufferMemoryDescriptor::appendBytes(const void * bytes
, vm_size_t withLength
)
802 vm_size_t actualBytesToCopy
= min(withLength
, _capacity
- _length
);
805 assert(_length
<= _capacity
);
808 _length
+= actualBytesToCopy
;
809 _ranges
.v64
->length
+= actualBytesToCopy
;
811 if (_task
== kernel_task
)
812 bcopy(/* from */ bytes
, (void *)(_ranges
.v64
->address
+ offset
),
815 writeBytes(offset
, bytes
, actualBytesToCopy
);
823 * Return the virtual address of the beginning of the buffer
825 void * IOBufferMemoryDescriptor::getBytesNoCopy()
827 if (kIOMemoryTypePhysical64
== (_flags
& kIOMemoryTypeMask
))
830 return (void *)_ranges
.v64
->address
;
837 * Return the virtual address of an offset from the beginning of the buffer
840 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start
, vm_size_t withLength
)
842 IOVirtualAddress address
;
843 if (kIOMemoryTypePhysical64
== (_flags
& kIOMemoryTypeMask
))
844 address
= (IOVirtualAddress
) _buffer
;
846 address
= _ranges
.v64
->address
;
848 if (start
< _length
&& (start
+ withLength
) <= _length
)
849 return (void *)(address
+ start
);
854 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
855 IOByteCount
* lengthOfSegment
)
857 void * bytes
= getBytesNoCopy(offset
, 0);
859 if (bytes
&& lengthOfSegment
)
860 *lengthOfSegment
= _length
- offset
;
864 #endif /* !__LP64__ */
867 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 0);
868 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 1);
869 #else /* !__LP64__ */
870 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 0);
871 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 1);
872 #endif /* !__LP64__ */
873 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 2);
874 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 3);
875 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 4);
876 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 5);
877 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 6);
878 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 7);
879 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 8);
880 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 9);
881 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 10);
882 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 11);
883 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 12);
884 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 13);
885 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 14);
886 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 15);