]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-2050.9.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29#define _IOMEMORYDESCRIPTOR_INTERNAL_
30
1c79356b
A
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33
34#include <IOKit/IOLib.h>
0c530ab8 35#include <IOKit/IOMapper.h>
1c79356b 36#include <IOKit/IOBufferMemoryDescriptor.h>
c910b4d9 37#include <libkern/OSDebug.h>
1c79356b 38
91447636
A
39#include "IOKitKernelInternal.h"
40
1c79356b
A
41__BEGIN_DECLS
42void ipc_port_release_send(ipc_port_t port);
9bccf70c 43#include <vm/pmap.h>
1c79356b 44
55e303ae 45__END_DECLS
de355530 46
0c530ab8
A
47/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
b0d623f7
A
49enum
50{
0b4c1975
A
51 kInternalFlagPhysical = 0x00000001,
52 kInternalFlagPageSized = 0x00000002
b0d623f7
A
53};
54
0c530ab8
A
55/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
56
1c79356b
A
57#define super IOGeneralMemoryDescriptor
58OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
59 IOGeneralMemoryDescriptor);
60
b0d623f7 61/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1c79356b 62
b0d623f7 63#ifndef __LP64__
1c79356b
A
64bool IOBufferMemoryDescriptor::initWithOptions(
65 IOOptionBits options,
66 vm_size_t capacity,
9bccf70c
A
67 vm_offset_t alignment,
68 task_t inTask)
0c530ab8
A
69{
70 mach_vm_address_t physicalMask = 0;
71 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
72}
b0d623f7 73#endif /* !__LP64__ */
0c530ab8
A
74
75bool IOBufferMemoryDescriptor::initWithPhysicalMask(
76 task_t inTask,
77 IOOptionBits options,
78 mach_vm_size_t capacity,
79 mach_vm_address_t alignment,
80 mach_vm_address_t physicalMask)
1c79356b 81{
91447636 82 kern_return_t kr;
2d21ac55
A
83 task_t mapTask = NULL;
84 vm_map_t vmmap = NULL;
b0d623f7 85 mach_vm_address_t highestMask = 0;
b0d623f7 86 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
9bccf70c 87
1c79356b
A
88 if (!capacity)
89 return false;
90
b0d623f7
A
91 _options = options;
92 _capacity = capacity;
93 _internalFlags = 0;
94 _internalReserved = 0;
95 _buffer = 0;
96
97 _ranges.v64 = IONew(IOAddressRange, 1);
98 if (!_ranges.v64)
99 return (false);
100 _ranges.v64->address = 0;
101 _ranges.v64->length = 0;
316670eb
A
102 // make sure super::free doesn't dealloc _ranges before super::init
103 _flags = kIOMemoryAsReference;
1c79356b 104
c910b4d9
A
105 // Grab IOMD bits from the Buffer MD options
106 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
55e303ae 107
b0d623f7
A
108 if (physicalMask && (alignment <= 1))
109 {
110 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
111 highestMask = (physicalMask | alignment);
112 alignment++;
0b4c1975
A
113 if (alignment < page_size)
114 alignment = page_size;
b0d623f7
A
115 }
116
0b4c1975 117 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
2d21ac55 118 alignment = page_size;
9bccf70c 119
b0d623f7
A
120 if (alignment >= page_size)
121 capacity = round_page(capacity);
122
123 if (alignment > page_size)
124 options |= kIOMemoryPhysicallyContiguous;
0c530ab8 125
1c79356b 126 _alignment = alignment;
91447636 127
b0d623f7 128 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
2d21ac55 129 return false;
91447636 130
2d21ac55
A
131 // set flags for entry + object create
132 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
91447636 133
2d21ac55
A
134 // set memory entry cache mode
135 switch (options & kIOMapCacheMask)
136 {
137 case kIOMapInhibitCache:
138 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
139 break;
140
141 case kIOMapWriteThruCache:
142 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
143 break;
144
145 case kIOMapWriteCombineCache:
146 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
147 break;
148
149 case kIOMapCopybackCache:
150 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
151 break;
152
316670eb
A
153 case kIOMapCopybackInnerCache:
154 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
155 break;
156
2d21ac55
A
157 case kIOMapDefaultCache:
158 default:
159 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
160 break;
161 }
91447636 162
2d21ac55
A
163 if (options & kIOMemoryPageable)
164 {
165 iomdOptions |= kIOMemoryBufferPageable;
91447636 166
2d21ac55 167 // must create the entry before any pages are allocated
91447636 168
2d21ac55
A
169 // set flags for entry + object create
170 memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
91447636 171
2d21ac55
A
172 if (options & kIOMemoryPurgeable)
173 memEntryCacheMode |= MAP_MEM_PURGABLE;
9bccf70c 174 }
0c530ab8 175 else
9bccf70c 176 {
2d21ac55 177 memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
0b4c1975 178 vmmap = kernel_map;
2d21ac55 179
0b4c1975
A
180 // Buffer shouldn't auto prepare they should be prepared explicitly
181 // But it never was enforced so what are you going to do?
182 iomdOptions |= kIOMemoryAutoPrepare;
4452a7af 183
0b4c1975 184 /* Allocate a wired-down buffer inside kernel space. */
b0d623f7 185
0b4c1975 186 if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
b0d623f7 187 {
0b4c1975
A
188 _internalFlags |= kInternalFlagPhysical;
189 if (highestMask)
190 {
191 _internalFlags |= kInternalFlagPageSized;
192 capacity = round_page(capacity);
193 }
194 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
195 (0 != (options & kIOMemoryPhysicallyContiguous)));
b0d623f7 196 }
0b4c1975 197 else if (alignment > 1)
0c530ab8 198 {
0b4c1975 199 _buffer = IOMallocAligned(capacity, alignment);
0c530ab8
A
200 }
201 else
202 {
0b4c1975
A
203 _buffer = IOMalloc(capacity);
204 }
0c530ab8 205
0b4c1975
A
206 if (!_buffer)
207 {
208 return false;
0c530ab8 209 }
91447636 210 }
1c79356b 211
0b4c1975 212 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
2d21ac55 213 ipc_port_t sharedMem;
b0d623f7 214 vm_size_t size = round_page(capacity);
2d21ac55
A
215
216 kr = mach_make_memory_entry(vmmap,
217 &size, (vm_offset_t)_buffer,
218 memEntryCacheMode, &sharedMem,
219 NULL );
220
b0d623f7 221 if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
2d21ac55
A
222 ipc_port_release_send( sharedMem );
223 kr = kIOReturnVMError;
224 }
225 if( KERN_SUCCESS != kr)
226 return( false );
227
228 _memEntry = (void *) sharedMem;
1c79356b 229
2d21ac55
A
230 if( options & kIOMemoryPageable) {
231#if IOALLOCDEBUG
232 debug_iomallocpageable_size += size;
233#endif
234 mapTask = inTask;
235 if (NULL == inTask)
236 inTask = kernel_task;
237 }
238 else if (options & kIOMapCacheMask)
239 {
240 // Prefetch each page to put entries into the pmap
241 volatile UInt8 * startAddr = (UInt8 *)_buffer;
242 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
243
244 while (startAddr < endAddr)
245 {
246 *startAddr;
247 startAddr += page_size;
248 }
249 }
250 }
251
b0d623f7
A
252 _ranges.v64->address = (mach_vm_address_t) _buffer;;
253 _ranges.v64->length = _capacity;
2d21ac55 254
b0d623f7 255 if (!super::initWithOptions(_ranges.v64, 1, 0,
2d21ac55 256 inTask, iomdOptions, /* System mapper */ 0))
1c79356b
A
257 return false;
258
2d21ac55 259 if (mapTask)
0c530ab8 260 {
2d21ac55
A
261 if (!reserved) {
262 reserved = IONew( ExpansionData, 1 );
263 if( !reserved)
264 return( false );
265 }
b0d623f7
A
266 reserved->map = createMappingInTask(mapTask, 0,
267 kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
2d21ac55 268 if (!reserved->map)
0c530ab8
A
269 {
270 _buffer = 0;
271 return( false );
272 }
2d21ac55 273 release(); // map took a retain on this
b0d623f7
A
274 reserved->map->retain();
275 removeMapping(reserved->map);
2d21ac55
A
276 mach_vm_address_t buffer = reserved->map->getAddress();
277 _buffer = (void *) buffer;
278 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
279 _ranges.v64->address = buffer;
0c530ab8
A
280 }
281
b0d623f7 282 setLength(_capacity);
2d21ac55 283
1c79356b
A
284 return true;
285}
286
9bccf70c
A
287IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
288 task_t inTask,
289 IOOptionBits options,
290 vm_size_t capacity,
55e303ae 291 vm_offset_t alignment)
9bccf70c
A
292{
293 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
294
b0d623f7 295 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
4452a7af
A
296 me->release();
297 me = 0;
0c530ab8
A
298 }
299 return me;
300}
301
302IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
303 task_t inTask,
304 IOOptionBits options,
305 mach_vm_size_t capacity,
306 mach_vm_address_t physicalMask)
307{
308 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
309
310 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
311 {
0c530ab8
A
312 me->release();
313 me = 0;
9bccf70c
A
314 }
315 return me;
316}
317
b0d623f7 318#ifndef __LP64__
9bccf70c
A
319bool IOBufferMemoryDescriptor::initWithOptions(
320 IOOptionBits options,
321 vm_size_t capacity,
322 vm_offset_t alignment)
323{
b0d623f7 324 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
9bccf70c 325}
b0d623f7 326#endif /* !__LP64__ */
9bccf70c 327
1c79356b
A
328IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
329 IOOptionBits options,
330 vm_size_t capacity,
55e303ae 331 vm_offset_t alignment)
1c79356b 332{
b0d623f7
A
333 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
334
335 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
b0d623f7
A
336 me->release();
337 me = 0;
b0d623f7
A
338 }
339 return me;
1c79356b
A
340}
341
342
343/*
344 * withCapacity:
345 *
346 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
347 * hold capacity bytes. The descriptor's length is initially set to the capacity.
348 */
349IOBufferMemoryDescriptor *
350IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
351 IODirection inDirection,
352 bool inContiguous)
353{
354 return( IOBufferMemoryDescriptor::withOptions(
355 inDirection | kIOMemoryUnshared
356 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
357 inCapacity, inContiguous ? inCapacity : 1 ));
358}
359
b0d623f7 360#ifndef __LP64__
1c79356b
A
361/*
362 * initWithBytes:
363 *
364 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
365 * The descriptor's length and capacity are set to the input buffer's size.
366 */
367bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
368 vm_size_t inLength,
369 IODirection inDirection,
370 bool inContiguous)
371{
b0d623f7
A
372 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
373 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
374 inLength, inLength, (mach_vm_address_t)0))
1c79356b
A
375 return false;
376
377 // start out with no data
378 setLength(0);
379
380 if (!appendBytes(inBytes, inLength))
381 return false;
382
383 return true;
384}
b0d623f7 385#endif /* !__LP64__ */
1c79356b
A
386
387/*
388 * withBytes:
389 *
390 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
391 * The descriptor's length and capacity are set to the input buffer's size.
392 */
393IOBufferMemoryDescriptor *
394IOBufferMemoryDescriptor::withBytes(const void * inBytes,
395 vm_size_t inLength,
396 IODirection inDirection,
397 bool inContiguous)
398{
399 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
400
b0d623f7
A
401 if (me && !me->initWithPhysicalMask(
402 kernel_task, inDirection | kIOMemoryUnshared
403 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
404 inLength, inLength, 0 ))
0c530ab8 405 {
0c530ab8
A
406 me->release();
407 me = 0;
1c79356b 408 }
b0d623f7
A
409
410 if (me)
411 {
412 // start out with no data
413 me->setLength(0);
414
415 if (!me->appendBytes(inBytes, inLength))
416 {
417 me->release();
418 me = 0;
419 }
420 }
1c79356b
A
421 return me;
422}
423
424/*
425 * free:
426 *
427 * Free resources
428 */
429void IOBufferMemoryDescriptor::free()
430{
55e303ae
A
431 // Cache all of the relevant information on the stack for use
432 // after we call super::free()!
0b4c1975
A
433 IOOptionBits flags = _flags;
434 IOOptionBits internalFlags = _internalFlags;
0c530ab8
A
435 IOOptionBits options = _options;
436 vm_size_t size = _capacity;
437 void * buffer = _buffer;
2d21ac55 438 IOMemoryMap * map = 0;
b0d623f7 439 IOAddressRange * range = _ranges.v64;
0c530ab8 440 vm_offset_t alignment = _alignment;
1c79356b 441
b0d623f7
A
442 if (alignment >= page_size)
443 size = round_page(size);
444
9bccf70c
A
445 if (reserved)
446 {
2d21ac55 447 map = reserved->map;
9bccf70c 448 IODelete( reserved, ExpansionData, 1 );
2d21ac55
A
449 if (map)
450 map->release();
9bccf70c
A
451 }
452
1c79356b
A
453 /* super::free may unwire - deallocate buffer afterwards */
454 super::free();
455
91447636 456 if (options & kIOMemoryPageable)
9bccf70c 457 {
91447636 458#if IOALLOCDEBUG
b0d623f7 459 debug_iomallocpageable_size -= round_page(size);
91447636 460#endif
1c79356b 461 }
91447636
A
462 else if (buffer)
463 {
0b4c1975
A
464 if (internalFlags & kInternalFlagPhysical)
465 {
466 if (kInternalFlagPageSized & internalFlags)
467 size = round_page(size);
468 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
469 }
91447636
A
470 else if (alignment > 1)
471 IOFreeAligned(buffer, size);
472 else
473 IOFree(buffer, size);
474 }
b0d623f7
A
475 if (range && (kIOMemoryAsReference & flags))
476 IODelete(range, IOAddressRange, 1);
1c79356b
A
477}
478
479/*
480 * getCapacity:
481 *
482 * Get the buffer capacity
483 */
484vm_size_t IOBufferMemoryDescriptor::getCapacity() const
485{
486 return _capacity;
487}
488
489/*
490 * setLength:
491 *
492 * Change the buffer length of the memory descriptor. When a new buffer
493 * is created, the initial length of the buffer is set to be the same as
494 * the capacity. The length can be adjusted via setLength for a shorter
495 * transfer (there is no need to create more buffer descriptors when you
496 * can reuse an existing one, even for different transfer sizes). Note
497 * that the specified length must not exceed the capacity of the buffer.
498 */
499void IOBufferMemoryDescriptor::setLength(vm_size_t length)
500{
501 assert(length <= _capacity);
502
503 _length = length;
2d21ac55 504 _ranges.v64->length = length;
1c79356b
A
505}
506
507/*
508 * setDirection:
509 *
510 * Change the direction of the transfer. This method allows one to redirect
511 * the descriptor's transfer direction. This eliminates the need to destroy
512 * and create new buffers when different transfer directions are needed.
513 */
514void IOBufferMemoryDescriptor::setDirection(IODirection direction)
515{
b0d623f7
A
516 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
517#ifndef __LP64__
518 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
519#endif /* !__LP64__ */
1c79356b
A
520}
521
522/*
523 * appendBytes:
524 *
525 * Add some data to the end of the buffer. This method automatically
526 * maintains the memory descriptor buffer length. Note that appendBytes
527 * will not copy past the end of the memory descriptor's current capacity.
528 */
529bool
530IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
531{
0c530ab8
A
532 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
533 IOByteCount offset;
1c79356b
A
534
535 assert(_length <= _capacity);
0c530ab8
A
536
537 offset = _length;
1c79356b 538 _length += actualBytesToCopy;
2d21ac55 539 _ranges.v64->length += actualBytesToCopy;
1c79356b 540
0c530ab8 541 if (_task == kernel_task)
2d21ac55 542 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
0c530ab8
A
543 actualBytesToCopy);
544 else
545 writeBytes(offset, bytes, actualBytesToCopy);
546
1c79356b
A
547 return true;
548}
549
550/*
551 * getBytesNoCopy:
552 *
553 * Return the virtual address of the beginning of the buffer
554 */
555void * IOBufferMemoryDescriptor::getBytesNoCopy()
556{
2d21ac55 557 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
0c530ab8
A
558 return _buffer;
559 else
2d21ac55 560 return (void *)_ranges.v64->address;
1c79356b
A
561}
562
0c530ab8 563
1c79356b
A
564/*
565 * getBytesNoCopy:
566 *
567 * Return the virtual address of an offset from the beginning of the buffer
568 */
569void *
570IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
571{
0c530ab8 572 IOVirtualAddress address;
2d21ac55 573 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
0c530ab8
A
574 address = (IOVirtualAddress) _buffer;
575 else
2d21ac55 576 address = _ranges.v64->address;
0c530ab8
A
577
578 if (start < _length && (start + withLength) <= _length)
579 return (void *)(address + start);
1c79356b
A
580 return 0;
581}
582
b0d623f7
A
583#ifndef __LP64__
584void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
585 IOByteCount * lengthOfSegment)
0c530ab8
A
586{
587 void * bytes = getBytesNoCopy(offset, 0);
588
589 if (bytes && lengthOfSegment)
590 *lengthOfSegment = _length - offset;
591
592 return bytes;
593}
b0d623f7 594#endif /* !__LP64__ */
0c530ab8 595
b0d623f7
A
596#ifdef __LP64__
597OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
598OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
599#else /* !__LP64__ */
9bccf70c 600OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
0c530ab8 601OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
b0d623f7 602#endif /* !__LP64__ */
1c79356b
A
603OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
604OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
605OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
606OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
607OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
608OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
609OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
610OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
611OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
612OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
613OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
614OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
615OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
616OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);