]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-1699.32.7.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29#define _IOMEMORYDESCRIPTOR_INTERNAL_
30
1c79356b
A
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33
34#include <IOKit/IOLib.h>
0c530ab8 35#include <IOKit/IOMapper.h>
1c79356b 36#include <IOKit/IOBufferMemoryDescriptor.h>
c910b4d9 37#include <libkern/OSDebug.h>
1c79356b 38
91447636
A
39#include "IOKitKernelInternal.h"
40
1c79356b
A
41__BEGIN_DECLS
42void ipc_port_release_send(ipc_port_t port);
9bccf70c 43#include <vm/pmap.h>
1c79356b 44
55e303ae 45__END_DECLS
de355530 46
0c530ab8
A
47/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
b0d623f7
A
49enum
50{
0b4c1975
A
51 kInternalFlagPhysical = 0x00000001,
52 kInternalFlagPageSized = 0x00000002
b0d623f7
A
53};
54
0c530ab8
A
55/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
56
1c79356b
A
57#define super IOGeneralMemoryDescriptor
58OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
59 IOGeneralMemoryDescriptor);
60
b0d623f7 61/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b 62
b0d623f7 63#ifndef __LP64__
1c79356b
A
64bool IOBufferMemoryDescriptor::initWithOptions(
65 IOOptionBits options,
66 vm_size_t capacity,
9bccf70c
A
67 vm_offset_t alignment,
68 task_t inTask)
0c530ab8
A
69{
70 mach_vm_address_t physicalMask = 0;
71 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
72}
b0d623f7 73#endif /* !__LP64__ */
0c530ab8
A
74
75bool IOBufferMemoryDescriptor::initWithPhysicalMask(
76 task_t inTask,
77 IOOptionBits options,
78 mach_vm_size_t capacity,
79 mach_vm_address_t alignment,
80 mach_vm_address_t physicalMask)
1c79356b 81{
91447636 82 kern_return_t kr;
2d21ac55
A
83 task_t mapTask = NULL;
84 vm_map_t vmmap = NULL;
b0d623f7 85 mach_vm_address_t highestMask = 0;
b0d623f7 86 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
9bccf70c 87
1c79356b
A
88 if (!capacity)
89 return false;
90
b0d623f7
A
91 _options = options;
92 _capacity = capacity;
93 _internalFlags = 0;
94 _internalReserved = 0;
95 _buffer = 0;
96
97 _ranges.v64 = IONew(IOAddressRange, 1);
98 if (!_ranges.v64)
99 return (false);
100 _ranges.v64->address = 0;
101 _ranges.v64->length = 0;
1c79356b 102
c910b4d9
A
103 // Grab IOMD bits from the Buffer MD options
104 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
55e303ae 105
b0d623f7
A
106 if (physicalMask && (alignment <= 1))
107 {
108 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
109 highestMask = (physicalMask | alignment);
110 alignment++;
0b4c1975
A
111 if (alignment < page_size)
112 alignment = page_size;
b0d623f7
A
113 }
114
0b4c1975 115 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
2d21ac55 116 alignment = page_size;
9bccf70c 117
b0d623f7
A
118 if (alignment >= page_size)
119 capacity = round_page(capacity);
120
121 if (alignment > page_size)
122 options |= kIOMemoryPhysicallyContiguous;
0c530ab8 123
1c79356b 124 _alignment = alignment;
91447636 125
b0d623f7 126 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
2d21ac55 127 return false;
91447636 128
2d21ac55
A
129 // set flags for entry + object create
130 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
91447636 131
2d21ac55
A
132 // set memory entry cache mode
133 switch (options & kIOMapCacheMask)
134 {
135 case kIOMapInhibitCache:
136 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
137 break;
138
139 case kIOMapWriteThruCache:
140 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
141 break;
142
143 case kIOMapWriteCombineCache:
144 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
145 break;
146
147 case kIOMapCopybackCache:
148 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
149 break;
150
151 case kIOMapDefaultCache:
152 default:
153 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
154 break;
155 }
91447636 156
2d21ac55
A
157 if (options & kIOMemoryPageable)
158 {
159 iomdOptions |= kIOMemoryBufferPageable;
91447636 160
2d21ac55 161 // must create the entry before any pages are allocated
91447636 162
2d21ac55
A
163 // set flags for entry + object create
164 memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
91447636 165
2d21ac55
A
166 if (options & kIOMemoryPurgeable)
167 memEntryCacheMode |= MAP_MEM_PURGABLE;
9bccf70c 168 }
0c530ab8 169 else
9bccf70c 170 {
2d21ac55 171 memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
0b4c1975 172 vmmap = kernel_map;
2d21ac55 173
0b4c1975
A
174 // Buffer shouldn't auto prepare they should be prepared explicitly
175 // But it never was enforced so what are you going to do?
176 iomdOptions |= kIOMemoryAutoPrepare;
4452a7af 177
0b4c1975 178 /* Allocate a wired-down buffer inside kernel space. */
b0d623f7 179
0b4c1975 180 if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
b0d623f7 181 {
0b4c1975
A
182 _internalFlags |= kInternalFlagPhysical;
183 if (highestMask)
184 {
185 _internalFlags |= kInternalFlagPageSized;
186 capacity = round_page(capacity);
187 }
188 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
189 (0 != (options & kIOMemoryPhysicallyContiguous)));
b0d623f7 190 }
0b4c1975 191 else if (alignment > 1)
0c530ab8 192 {
0b4c1975 193 _buffer = IOMallocAligned(capacity, alignment);
0c530ab8
A
194 }
195 else
196 {
0b4c1975
A
197 _buffer = IOMalloc(capacity);
198 }
0c530ab8 199
0b4c1975
A
200 if (!_buffer)
201 {
202 return false;
0c530ab8 203 }
91447636 204 }
1c79356b 205
0b4c1975 206 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
2d21ac55 207 ipc_port_t sharedMem;
b0d623f7 208 vm_size_t size = round_page(capacity);
2d21ac55
A
209
210 kr = mach_make_memory_entry(vmmap,
211 &size, (vm_offset_t)_buffer,
212 memEntryCacheMode, &sharedMem,
213 NULL );
214
b0d623f7 215 if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
2d21ac55
A
216 ipc_port_release_send( sharedMem );
217 kr = kIOReturnVMError;
218 }
219 if( KERN_SUCCESS != kr)
220 return( false );
221
222 _memEntry = (void *) sharedMem;
1c79356b 223
2d21ac55
A
224 if( options & kIOMemoryPageable) {
225#if IOALLOCDEBUG
226 debug_iomallocpageable_size += size;
227#endif
228 mapTask = inTask;
229 if (NULL == inTask)
230 inTask = kernel_task;
231 }
232 else if (options & kIOMapCacheMask)
233 {
234 // Prefetch each page to put entries into the pmap
235 volatile UInt8 * startAddr = (UInt8 *)_buffer;
236 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
237
238 while (startAddr < endAddr)
239 {
240 *startAddr;
241 startAddr += page_size;
242 }
243 }
244 }
245
b0d623f7
A
246 _ranges.v64->address = (mach_vm_address_t) _buffer;;
247 _ranges.v64->length = _capacity;
2d21ac55 248
b0d623f7 249 if (!super::initWithOptions(_ranges.v64, 1, 0,
2d21ac55 250 inTask, iomdOptions, /* System mapper */ 0))
1c79356b
A
251 return false;
252
2d21ac55 253 if (mapTask)
0c530ab8 254 {
2d21ac55
A
255 if (!reserved) {
256 reserved = IONew( ExpansionData, 1 );
257 if( !reserved)
258 return( false );
259 }
b0d623f7
A
260 reserved->map = createMappingInTask(mapTask, 0,
261 kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
2d21ac55 262 if (!reserved->map)
0c530ab8
A
263 {
264 _buffer = 0;
265 return( false );
266 }
2d21ac55 267 release(); // map took a retain on this
b0d623f7
A
268 reserved->map->retain();
269 removeMapping(reserved->map);
2d21ac55
A
270 mach_vm_address_t buffer = reserved->map->getAddress();
271 _buffer = (void *) buffer;
272 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
273 _ranges.v64->address = buffer;
0c530ab8
A
274 }
275
b0d623f7 276 setLength(_capacity);
2d21ac55 277
1c79356b
A
278 return true;
279}
280
9bccf70c
A
281IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
282 task_t inTask,
283 IOOptionBits options,
284 vm_size_t capacity,
55e303ae 285 vm_offset_t alignment)
9bccf70c
A
286{
287 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
288
b0d623f7 289 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
4452a7af
A
290 me->release();
291 me = 0;
0c530ab8
A
292 }
293 return me;
294}
295
296IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
297 task_t inTask,
298 IOOptionBits options,
299 mach_vm_size_t capacity,
300 mach_vm_address_t physicalMask)
301{
302 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
303
304 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
305 {
0c530ab8
A
306 me->release();
307 me = 0;
9bccf70c
A
308 }
309 return me;
310}
311
b0d623f7 312#ifndef __LP64__
9bccf70c
A
313bool IOBufferMemoryDescriptor::initWithOptions(
314 IOOptionBits options,
315 vm_size_t capacity,
316 vm_offset_t alignment)
317{
b0d623f7 318 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
9bccf70c 319}
b0d623f7 320#endif /* !__LP64__ */
9bccf70c 321
1c79356b
A
322IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
323 IOOptionBits options,
324 vm_size_t capacity,
55e303ae 325 vm_offset_t alignment)
1c79356b 326{
b0d623f7
A
327 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
328
329 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
b0d623f7
A
330 me->release();
331 me = 0;
b0d623f7
A
332 }
333 return me;
1c79356b
A
334}
335
336
337/*
338 * withCapacity:
339 *
340 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
341 * hold capacity bytes. The descriptor's length is initially set to the capacity.
342 */
343IOBufferMemoryDescriptor *
344IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
345 IODirection inDirection,
346 bool inContiguous)
347{
348 return( IOBufferMemoryDescriptor::withOptions(
349 inDirection | kIOMemoryUnshared
350 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
351 inCapacity, inContiguous ? inCapacity : 1 ));
352}
353
b0d623f7 354#ifndef __LP64__
1c79356b
A
355/*
356 * initWithBytes:
357 *
358 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
359 * The descriptor's length and capacity are set to the input buffer's size.
360 */
361bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
362 vm_size_t inLength,
363 IODirection inDirection,
364 bool inContiguous)
365{
b0d623f7
A
366 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
367 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
368 inLength, inLength, (mach_vm_address_t)0))
1c79356b
A
369 return false;
370
371 // start out with no data
372 setLength(0);
373
374 if (!appendBytes(inBytes, inLength))
375 return false;
376
377 return true;
378}
b0d623f7 379#endif /* !__LP64__ */
1c79356b
A
380
381/*
382 * withBytes:
383 *
384 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
385 * The descriptor's length and capacity are set to the input buffer's size.
386 */
387IOBufferMemoryDescriptor *
388IOBufferMemoryDescriptor::withBytes(const void * inBytes,
389 vm_size_t inLength,
390 IODirection inDirection,
391 bool inContiguous)
392{
393 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
394
b0d623f7
A
395 if (me && !me->initWithPhysicalMask(
396 kernel_task, inDirection | kIOMemoryUnshared
397 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
398 inLength, inLength, 0 ))
0c530ab8 399 {
0c530ab8
A
400 me->release();
401 me = 0;
1c79356b 402 }
b0d623f7
A
403
404 if (me)
405 {
406 // start out with no data
407 me->setLength(0);
408
409 if (!me->appendBytes(inBytes, inLength))
410 {
411 me->release();
412 me = 0;
413 }
414 }
1c79356b
A
415 return me;
416}
417
418/*
419 * free:
420 *
421 * Free resources
422 */
423void IOBufferMemoryDescriptor::free()
424{
55e303ae
A
425 // Cache all of the relevant information on the stack for use
426 // after we call super::free()!
0b4c1975
A
427 IOOptionBits flags = _flags;
428 IOOptionBits internalFlags = _internalFlags;
0c530ab8
A
429 IOOptionBits options = _options;
430 vm_size_t size = _capacity;
431 void * buffer = _buffer;
2d21ac55 432 IOMemoryMap * map = 0;
b0d623f7 433 IOAddressRange * range = _ranges.v64;
0c530ab8 434 vm_offset_t alignment = _alignment;
1c79356b 435
b0d623f7
A
436 if (alignment >= page_size)
437 size = round_page(size);
438
9bccf70c
A
439 if (reserved)
440 {
2d21ac55 441 map = reserved->map;
9bccf70c 442 IODelete( reserved, ExpansionData, 1 );
2d21ac55
A
443 if (map)
444 map->release();
9bccf70c
A
445 }
446
1c79356b
A
447 /* super::free may unwire - deallocate buffer afterwards */
448 super::free();
449
91447636 450 if (options & kIOMemoryPageable)
9bccf70c 451 {
91447636 452#if IOALLOCDEBUG
b0d623f7 453 debug_iomallocpageable_size -= round_page(size);
91447636 454#endif
1c79356b 455 }
91447636
A
456 else if (buffer)
457 {
0b4c1975
A
458 if (internalFlags & kInternalFlagPhysical)
459 {
460 if (kInternalFlagPageSized & internalFlags)
461 size = round_page(size);
462 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
463 }
91447636
A
464 else if (alignment > 1)
465 IOFreeAligned(buffer, size);
466 else
467 IOFree(buffer, size);
468 }
b0d623f7
A
469 if (range && (kIOMemoryAsReference & flags))
470 IODelete(range, IOAddressRange, 1);
1c79356b
A
471}
472
473/*
474 * getCapacity:
475 *
476 * Get the buffer capacity
477 */
478vm_size_t IOBufferMemoryDescriptor::getCapacity() const
479{
480 return _capacity;
481}
482
483/*
484 * setLength:
485 *
486 * Change the buffer length of the memory descriptor. When a new buffer
487 * is created, the initial length of the buffer is set to be the same as
488 * the capacity. The length can be adjusted via setLength for a shorter
489 * transfer (there is no need to create more buffer descriptors when you
490 * can reuse an existing one, even for different transfer sizes). Note
491 * that the specified length must not exceed the capacity of the buffer.
492 */
493void IOBufferMemoryDescriptor::setLength(vm_size_t length)
494{
495 assert(length <= _capacity);
496
497 _length = length;
2d21ac55 498 _ranges.v64->length = length;
1c79356b
A
499}
500
501/*
502 * setDirection:
503 *
504 * Change the direction of the transfer. This method allows one to redirect
505 * the descriptor's transfer direction. This eliminates the need to destroy
506 * and create new buffers when different transfer directions are needed.
507 */
508void IOBufferMemoryDescriptor::setDirection(IODirection direction)
509{
b0d623f7
A
510 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
511#ifndef __LP64__
512 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
513#endif /* !__LP64__ */
1c79356b
A
514}
515
516/*
517 * appendBytes:
518 *
519 * Add some data to the end of the buffer. This method automatically
520 * maintains the memory descriptor buffer length. Note that appendBytes
521 * will not copy past the end of the memory descriptor's current capacity.
522 */
523bool
524IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
525{
0c530ab8
A
526 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
527 IOByteCount offset;
1c79356b
A
528
529 assert(_length <= _capacity);
0c530ab8
A
530
531 offset = _length;
1c79356b 532 _length += actualBytesToCopy;
2d21ac55 533 _ranges.v64->length += actualBytesToCopy;
1c79356b 534
0c530ab8 535 if (_task == kernel_task)
2d21ac55 536 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
0c530ab8
A
537 actualBytesToCopy);
538 else
539 writeBytes(offset, bytes, actualBytesToCopy);
540
1c79356b
A
541 return true;
542}
543
544/*
545 * getBytesNoCopy:
546 *
547 * Return the virtual address of the beginning of the buffer
548 */
549void * IOBufferMemoryDescriptor::getBytesNoCopy()
550{
2d21ac55 551 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
0c530ab8
A
552 return _buffer;
553 else
2d21ac55 554 return (void *)_ranges.v64->address;
1c79356b
A
555}
556
0c530ab8 557
1c79356b
A
558/*
559 * getBytesNoCopy:
560 *
561 * Return the virtual address of an offset from the beginning of the buffer
562 */
563void *
564IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
565{
0c530ab8 566 IOVirtualAddress address;
2d21ac55 567 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
0c530ab8
A
568 address = (IOVirtualAddress) _buffer;
569 else
2d21ac55 570 address = _ranges.v64->address;
0c530ab8
A
571
572 if (start < _length && (start + withLength) <= _length)
573 return (void *)(address + start);
1c79356b
A
574 return 0;
575}
576
b0d623f7
A
577#ifndef __LP64__
578void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
579 IOByteCount * lengthOfSegment)
0c530ab8
A
580{
581 void * bytes = getBytesNoCopy(offset, 0);
582
583 if (bytes && lengthOfSegment)
584 *lengthOfSegment = _length - offset;
585
586 return bytes;
587}
b0d623f7 588#endif /* !__LP64__ */
0c530ab8 589
b0d623f7
A
590#ifdef __LP64__
591OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
592OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
593#else /* !__LP64__ */
9bccf70c 594OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
0c530ab8 595OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
b0d623f7 596#endif /* !__LP64__ */
1c79356b
A
597OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
598OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
599OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
600OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
601OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
602OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
603OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
604OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
605OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
606OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
607OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
608OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
609OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
610OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);