]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
91aeca2a87e2245ccfe7609248107d2056290344
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
39
40 #include "IOKitKernelInternal.h"
41
42 #ifdef IOALLOCDEBUG
43 #include <libkern/c++/OSCPPDebug.h>
44 #endif
45 #include <IOKit/IOStatisticsPrivate.h>
46
47 #if IOKITSTATS
48 #define IOStatisticsAlloc(type, size) \
49 do { \
50 IOStatistics::countAlloc(type, size); \
51 } while (0)
52 #else
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
55
56
57 __BEGIN_DECLS
58 void ipc_port_release_send(ipc_port_t port);
59 #include <vm/pmap.h>
60
61 __END_DECLS
62
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64
65 enum{
66 kInternalFlagPhysical = 0x00000001,
67 kInternalFlagPageSized = 0x00000002,
68 kInternalFlagPageAllocated = 0x00000004,
69 kInternalFlagInit = 0x00000008
70 };
71
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73
74 #define super IOGeneralMemoryDescriptor
75 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
76 IOGeneralMemoryDescriptor);
77
78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
79
80 static uintptr_t
81 IOBMDPageProc(iopa_t * a)
82 {
83 kern_return_t kr;
84 vm_address_t vmaddr = 0;
85 int options = 0;// KMA_LOMEM;
86
87 kr = kernel_memory_allocate(kernel_map, &vmaddr,
88 page_size, 0, options, VM_KERN_MEMORY_IOKIT);
89
90 if (KERN_SUCCESS != kr) {
91 vmaddr = 0;
92 } else {
93 bzero((void *) vmaddr, page_size);
94 }
95
96 return (uintptr_t) vmaddr;
97 }
98
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100
101 #ifndef __LP64__
102 bool
103 IOBufferMemoryDescriptor::initWithOptions(
104 IOOptionBits options,
105 vm_size_t capacity,
106 vm_offset_t alignment,
107 task_t inTask)
108 {
109 mach_vm_address_t physicalMask = 0;
110 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
111 }
112 #endif /* !__LP64__ */
113
114 bool
115 IOBufferMemoryDescriptor::initWithPhysicalMask(
116 task_t inTask,
117 IOOptionBits options,
118 mach_vm_size_t capacity,
119 mach_vm_address_t alignment,
120 mach_vm_address_t physicalMask)
121 {
122 task_t mapTask = NULL;
123 vm_map_t vmmap = NULL;
124 mach_vm_address_t highestMask = 0;
125 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
126 IODMAMapSpecification mapSpec;
127 bool mapped = false;
128 bool needZero;
129
130 if (!capacity) {
131 return false;
132 }
133
134 _options = options;
135 _capacity = capacity;
136 _internalFlags = 0;
137 _internalReserved = 0;
138 _buffer = 0;
139
140 _ranges.v64 = IONew(IOAddressRange, 1);
141 if (!_ranges.v64) {
142 return false;
143 }
144 _ranges.v64->address = 0;
145 _ranges.v64->length = 0;
146 // make sure super::free doesn't dealloc _ranges before super::init
147 _flags = kIOMemoryAsReference;
148
149 // Grab IOMD bits from the Buffer MD options
150 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
151
152 if (!(kIOMemoryMapperNone & options)) {
153 IOMapper::checkForSystemMapper();
154 mapped = (0 != IOMapper::gSystem);
155 }
156 needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
157
158 if (physicalMask && (alignment <= 1)) {
159 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
160 highestMask = (physicalMask | alignment);
161 alignment++;
162 if (alignment < page_size) {
163 alignment = page_size;
164 }
165 }
166
167 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
168 alignment = page_size;
169 }
170
171 if (alignment >= page_size) {
172 capacity = round_page(capacity);
173 }
174
175 if (alignment > page_size) {
176 options |= kIOMemoryPhysicallyContiguous;
177 }
178
179 _alignment = alignment;
180
181 if ((capacity + alignment) < _capacity) {
182 return false;
183 }
184
185 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
186 return false;
187 }
188
189 bzero(&mapSpec, sizeof(mapSpec));
190 mapSpec.alignment = _alignment;
191 mapSpec.numAddressBits = 64;
192 if (highestMask && mapped) {
193 if (highestMask <= 0xFFFFFFFF) {
194 mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
195 } else {
196 mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
197 }
198 highestMask = 0;
199 }
200
201 // set memory entry cache mode, pageable, purgeable
202 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
203 if (options & kIOMemoryPageable) {
204 iomdOptions |= kIOMemoryBufferPageable;
205 if (options & kIOMemoryPurgeable) {
206 iomdOptions |= kIOMemoryBufferPurgeable;
207 }
208 } else {
209 vmmap = kernel_map;
210
211 // Buffer shouldn't auto prepare they should be prepared explicitly
212 // But it never was enforced so what are you going to do?
213 iomdOptions |= kIOMemoryAutoPrepare;
214
215 /* Allocate a wired-down buffer inside kernel space. */
216
217 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
218
219 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
220 contig |= (!mapped);
221 contig |= (0 != (kIOMemoryMapperNone & options));
222 #if 0
223 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
224 contig |= true;
225 #endif
226 }
227
228 if (contig || highestMask || (alignment > page_size)) {
229 _internalFlags |= kInternalFlagPhysical;
230 if (highestMask) {
231 _internalFlags |= kInternalFlagPageSized;
232 capacity = round_page(capacity);
233 }
234 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
235 capacity, highestMask, alignment, contig);
236 } else if (needZero
237 && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) {
238 _internalFlags |= kInternalFlagPageAllocated;
239 needZero = false;
240 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
241 if (_buffer) {
242 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
243 #if IOALLOCDEBUG
244 OSAddAtomic(capacity, &debug_iomalloc_size);
245 #endif
246 }
247 } else if (alignment > 1) {
248 _buffer = IOMallocAligned(capacity, alignment);
249 } else {
250 _buffer = IOMalloc(capacity);
251 }
252 if (!_buffer) {
253 return false;
254 }
255 if (needZero) {
256 bzero(_buffer, capacity);
257 }
258 }
259
260 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
261 vm_size_t size = round_page(capacity);
262
263 // initWithOptions will create memory entry
264 iomdOptions |= kIOMemoryPersistent;
265
266 if (options & kIOMemoryPageable) {
267 #if IOALLOCDEBUG
268 OSAddAtomicLong(size, &debug_iomallocpageable_size);
269 #endif
270 mapTask = inTask;
271 if (NULL == inTask) {
272 inTask = kernel_task;
273 }
274 } else if (options & kIOMapCacheMask) {
275 // Prefetch each page to put entries into the pmap
276 volatile UInt8 * startAddr = (UInt8 *)_buffer;
277 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
278
279 while (startAddr < endAddr) {
280 UInt8 dummyVar = *startAddr;
281 (void) dummyVar;
282 startAddr += page_size;
283 }
284 }
285 }
286
287 _ranges.v64->address = (mach_vm_address_t) _buffer;;
288 _ranges.v64->length = _capacity;
289
290 if (!super::initWithOptions(_ranges.v64, 1, 0,
291 inTask, iomdOptions, /* System mapper */ 0)) {
292 return false;
293 }
294
295 _internalFlags |= kInternalFlagInit;
296 #if IOTRACKING
297 if (!(options & kIOMemoryPageable)) {
298 trackingAccumSize(capacity);
299 }
300 #endif /* IOTRACKING */
301
302 // give any system mapper the allocation params
303 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
304 &mapSpec, sizeof(mapSpec))) {
305 return false;
306 }
307
308 if (mapTask) {
309 if (!reserved) {
310 reserved = IONew( ExpansionData, 1 );
311 if (!reserved) {
312 return false;
313 }
314 }
315 reserved->map = createMappingInTask(mapTask, 0,
316 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
317 if (!reserved->map) {
318 _buffer = 0;
319 return false;
320 }
321 release(); // map took a retain on this
322 reserved->map->retain();
323 removeMapping(reserved->map);
324 mach_vm_address_t buffer = reserved->map->getAddress();
325 _buffer = (void *) buffer;
326 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
327 _ranges.v64->address = buffer;
328 }
329 }
330
331 setLength(_capacity);
332
333 return true;
334 }
335
336 IOBufferMemoryDescriptor *
337 IOBufferMemoryDescriptor::inTaskWithOptions(
338 task_t inTask,
339 IOOptionBits options,
340 vm_size_t capacity,
341 vm_offset_t alignment)
342 {
343 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
344
345 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
346 me->release();
347 me = 0;
348 }
349 return me;
350 }
351
352 IOBufferMemoryDescriptor *
353 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
354 task_t inTask,
355 IOOptionBits options,
356 mach_vm_size_t capacity,
357 mach_vm_address_t physicalMask)
358 {
359 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
360
361 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
362 me->release();
363 me = 0;
364 }
365 return me;
366 }
367
368 #ifndef __LP64__
369 bool
370 IOBufferMemoryDescriptor::initWithOptions(
371 IOOptionBits options,
372 vm_size_t capacity,
373 vm_offset_t alignment)
374 {
375 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
376 }
377 #endif /* !__LP64__ */
378
379 IOBufferMemoryDescriptor *
380 IOBufferMemoryDescriptor::withOptions(
381 IOOptionBits options,
382 vm_size_t capacity,
383 vm_offset_t alignment)
384 {
385 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
386
387 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
388 me->release();
389 me = 0;
390 }
391 return me;
392 }
393
394
395 /*
396 * withCapacity:
397 *
398 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
399 * hold capacity bytes. The descriptor's length is initially set to the capacity.
400 */
401 IOBufferMemoryDescriptor *
402 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
403 IODirection inDirection,
404 bool inContiguous)
405 {
406 return IOBufferMemoryDescriptor::withOptions(
407 inDirection | kIOMemoryUnshared
408 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
409 inCapacity, inContiguous ? inCapacity : 1 );
410 }
411
412 #ifndef __LP64__
413 /*
414 * initWithBytes:
415 *
416 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
417 * The descriptor's length and capacity are set to the input buffer's size.
418 */
419 bool
420 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
421 vm_size_t inLength,
422 IODirection inDirection,
423 bool inContiguous)
424 {
425 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
426 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
427 inLength, inLength, (mach_vm_address_t)0)) {
428 return false;
429 }
430
431 // start out with no data
432 setLength(0);
433
434 if (!appendBytes(inBytes, inLength)) {
435 return false;
436 }
437
438 return true;
439 }
440 #endif /* !__LP64__ */
441
442 /*
443 * withBytes:
444 *
445 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
446 * The descriptor's length and capacity are set to the input buffer's size.
447 */
448 IOBufferMemoryDescriptor *
449 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
450 vm_size_t inLength,
451 IODirection inDirection,
452 bool inContiguous)
453 {
454 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
455
456 if (me && !me->initWithPhysicalMask(
457 kernel_task, inDirection | kIOMemoryUnshared
458 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
459 inLength, inLength, 0 )) {
460 me->release();
461 me = 0;
462 }
463
464 if (me) {
465 // start out with no data
466 me->setLength(0);
467
468 if (!me->appendBytes(inBytes, inLength)) {
469 me->release();
470 me = 0;
471 }
472 }
473 return me;
474 }
475
476 /*
477 * free:
478 *
479 * Free resources
480 */
481 void
482 IOBufferMemoryDescriptor::free()
483 {
484 // Cache all of the relevant information on the stack for use
485 // after we call super::free()!
486 IOOptionBits flags = _flags;
487 IOOptionBits internalFlags = _internalFlags;
488 IOOptionBits options = _options;
489 vm_size_t size = _capacity;
490 void * buffer = _buffer;
491 IOMemoryMap * map = 0;
492 IOAddressRange * range = _ranges.v64;
493 vm_offset_t alignment = _alignment;
494
495 if (alignment >= page_size) {
496 size = round_page(size);
497 }
498
499 if (reserved) {
500 map = reserved->map;
501 IODelete( reserved, ExpansionData, 1 );
502 if (map) {
503 map->release();
504 }
505 }
506
507 if ((options & kIOMemoryPageable)
508 || (kInternalFlagPageSized & internalFlags)) {
509 size = round_page(size);
510 }
511
512 #if IOTRACKING
513 if (!(options & kIOMemoryPageable)
514 && buffer
515 && (kInternalFlagInit & _internalFlags)) {
516 trackingAccumSize(-size);
517 }
518 #endif /* IOTRACKING */
519
520 /* super::free may unwire - deallocate buffer afterwards */
521 super::free();
522
523 if (options & kIOMemoryPageable) {
524 #if IOALLOCDEBUG
525 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
526 #endif
527 } else if (buffer) {
528 if (kInternalFlagPhysical & internalFlags) {
529 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
530 } else if (kInternalFlagPageAllocated & internalFlags) {
531 uintptr_t page;
532 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
533 if (page) {
534 kmem_free(kernel_map, page, page_size);
535 }
536 #if IOALLOCDEBUG
537 OSAddAtomic(-size, &debug_iomalloc_size);
538 #endif
539 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
540 } else if (alignment > 1) {
541 IOFreeAligned(buffer, size);
542 } else {
543 IOFree(buffer, size);
544 }
545 }
546 if (range && (kIOMemoryAsReference & flags)) {
547 IODelete(range, IOAddressRange, 1);
548 }
549 }
550
551 /*
552 * getCapacity:
553 *
554 * Get the buffer capacity
555 */
556 vm_size_t
557 IOBufferMemoryDescriptor::getCapacity() const
558 {
559 return _capacity;
560 }
561
562 /*
563 * setLength:
564 *
565 * Change the buffer length of the memory descriptor. When a new buffer
566 * is created, the initial length of the buffer is set to be the same as
567 * the capacity. The length can be adjusted via setLength for a shorter
568 * transfer (there is no need to create more buffer descriptors when you
569 * can reuse an existing one, even for different transfer sizes). Note
570 * that the specified length must not exceed the capacity of the buffer.
571 */
572 void
573 IOBufferMemoryDescriptor::setLength(vm_size_t length)
574 {
575 assert(length <= _capacity);
576 if (length > _capacity) {
577 return;
578 }
579
580 _length = length;
581 _ranges.v64->length = length;
582 }
583
584 /*
585 * setDirection:
586 *
587 * Change the direction of the transfer. This method allows one to redirect
588 * the descriptor's transfer direction. This eliminates the need to destroy
589 * and create new buffers when different transfer directions are needed.
590 */
591 void
592 IOBufferMemoryDescriptor::setDirection(IODirection direction)
593 {
594 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
595 #ifndef __LP64__
596 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
597 #endif /* !__LP64__ */
598 }
599
600 /*
601 * appendBytes:
602 *
603 * Add some data to the end of the buffer. This method automatically
604 * maintains the memory descriptor buffer length. Note that appendBytes
605 * will not copy past the end of the memory descriptor's current capacity.
606 */
607 bool
608 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
609 {
610 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
611 IOByteCount offset;
612
613 assert(_length <= _capacity);
614
615 offset = _length;
616 _length += actualBytesToCopy;
617 _ranges.v64->length += actualBytesToCopy;
618
619 if (_task == kernel_task) {
620 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
621 actualBytesToCopy);
622 } else {
623 writeBytes(offset, bytes, actualBytesToCopy);
624 }
625
626 return true;
627 }
628
629 /*
630 * getBytesNoCopy:
631 *
632 * Return the virtual address of the beginning of the buffer
633 */
634 void *
635 IOBufferMemoryDescriptor::getBytesNoCopy()
636 {
637 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
638 return _buffer;
639 } else {
640 return (void *)_ranges.v64->address;
641 }
642 }
643
644
645 /*
646 * getBytesNoCopy:
647 *
648 * Return the virtual address of an offset from the beginning of the buffer
649 */
650 void *
651 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
652 {
653 IOVirtualAddress address;
654
655 if ((start + withLength) < start) {
656 return 0;
657 }
658
659 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
660 address = (IOVirtualAddress) _buffer;
661 } else {
662 address = _ranges.v64->address;
663 }
664
665 if (start < _length && (start + withLength) <= _length) {
666 return (void *)(address + start);
667 }
668 return 0;
669 }
670
671 #ifndef __LP64__
672 void *
673 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
674 IOByteCount * lengthOfSegment)
675 {
676 void * bytes = getBytesNoCopy(offset, 0);
677
678 if (bytes && lengthOfSegment) {
679 *lengthOfSegment = _length - offset;
680 }
681
682 return bytes;
683 }
684 #endif /* !__LP64__ */
685
686 #ifdef __LP64__
687 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
688 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
689 #else /* !__LP64__ */
690 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
691 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
692 #endif /* !__LP64__ */
693 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
694 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
695 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
696 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
697 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
698 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
699 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
700 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
701 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
702 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
703 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
704 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
705 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
706 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);