]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#define IOKIT_ENABLE_SHARED_PTR
29
30#define _IOMEMORYDESCRIPTOR_INTERNAL_
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34
35#include <IOKit/IOLib.h>
36#include <IOKit/IOMapper.h>
37#include <IOKit/IOBufferMemoryDescriptor.h>
38#include <libkern/OSDebug.h>
39#include <mach/mach_vm.h>
40
41#include "IOKitKernelInternal.h"
42
43#ifdef IOALLOCDEBUG
44#include <libkern/c++/OSCPPDebug.h>
45#endif
46#include <IOKit/IOStatisticsPrivate.h>
47
48#if IOKITSTATS
49#define IOStatisticsAlloc(type, size) \
50do { \
51 IOStatistics::countAlloc(type, size); \
52} while (0)
53#else
54#define IOStatisticsAlloc(type, size)
55#endif /* IOKITSTATS */
56
57
58__BEGIN_DECLS
59void ipc_port_release_send(ipc_port_t port);
60#include <vm/pmap.h>
61
62__END_DECLS
63
64/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66enum{
67 kInternalFlagPhysical = 0x00000001,
68 kInternalFlagPageSized = 0x00000002,
69 kInternalFlagPageAllocated = 0x00000004,
70 kInternalFlagInit = 0x00000008
71};
72
73/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
74
75#define super IOGeneralMemoryDescriptor
76OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
77 IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
78
79/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
80
81static uintptr_t
82IOBMDPageProc(iopa_t * a)
83{
84 kern_return_t kr;
85 vm_address_t vmaddr = 0;
86
87 kr = kernel_memory_allocate(kernel_map, &vmaddr,
88 page_size, 0, KMA_NONE, VM_KERN_MEMORY_IOKIT);
89
90 if (KERN_SUCCESS != kr) {
91 vmaddr = 0;
92 } else {
93 bzero((void *) vmaddr, page_size);
94 }
95
96 return (uintptr_t) vmaddr;
97}
98
99/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100
101#ifndef __LP64__
102bool
103IOBufferMemoryDescriptor::initWithOptions(
104 IOOptionBits options,
105 vm_size_t capacity,
106 vm_offset_t alignment,
107 task_t inTask)
108{
109 mach_vm_address_t physicalMask = 0;
110 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
111}
112#endif /* !__LP64__ */
113
114OSSharedPtr<IOBufferMemoryDescriptor>
115IOBufferMemoryDescriptor::withCopy(
116 task_t inTask,
117 IOOptionBits options,
118 vm_map_t sourceMap,
119 mach_vm_address_t source,
120 mach_vm_size_t size)
121{
122 OSSharedPtr<IOBufferMemoryDescriptor> inst;
123 kern_return_t err;
124 vm_map_copy_t copy;
125 vm_map_address_t address;
126
127 copy = NULL;
128 do {
129 err = kIOReturnNoMemory;
130 inst = OSMakeShared<IOBufferMemoryDescriptor>();
131 if (!inst) {
132 break;
133 }
134 inst->_ranges.v64 = IONew(IOAddressRange, 1);
135 if (!inst->_ranges.v64) {
136 break;
137 }
138
139 err = vm_map_copyin(sourceMap, source, size,
140 false /* src_destroy */, &copy);
141 if (KERN_SUCCESS != err) {
142 break;
143 }
144
145 err = vm_map_copyout(get_task_map(inTask), &address, copy);
146 if (KERN_SUCCESS != err) {
147 break;
148 }
149 copy = NULL;
150
151 inst->_ranges.v64->address = address;
152 inst->_ranges.v64->length = size;
153
154 if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
155 err = kIOReturnError;
156 }
157 } while (false);
158
159 if (KERN_SUCCESS == err) {
160 return inst;
161 }
162
163 if (copy) {
164 vm_map_copy_discard(copy);
165 }
166
167 return nullptr;
168}
169
170
171bool
172IOBufferMemoryDescriptor::initWithPhysicalMask(
173 task_t inTask,
174 IOOptionBits options,
175 mach_vm_size_t capacity,
176 mach_vm_address_t alignment,
177 mach_vm_address_t physicalMask)
178{
179 task_t mapTask = NULL;
180 vm_map_t vmmap = NULL;
181 mach_vm_address_t highestMask = 0;
182 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
183 IODMAMapSpecification mapSpec;
184 bool mapped = false;
185 bool withCopy = false;
186 bool mappedOrShared = false;
187
188 if (!capacity) {
189 return false;
190 }
191
192 _options = options;
193 _capacity = capacity;
194 _internalFlags = 0;
195 _internalReserved = 0;
196 _buffer = NULL;
197
198 if (!_ranges.v64) {
199 _ranges.v64 = IONew(IOAddressRange, 1);
200 if (!_ranges.v64) {
201 return false;
202 }
203 _ranges.v64->address = 0;
204 _ranges.v64->length = 0;
205 } else {
206 if (!_ranges.v64->address) {
207 return false;
208 }
209 if (!(kIOMemoryPageable & options)) {
210 return false;
211 }
212 if (!inTask) {
213 return false;
214 }
215 _buffer = (void *) _ranges.v64->address;
216 withCopy = true;
217 }
218 // make sure super::free doesn't dealloc _ranges before super::init
219 _flags = kIOMemoryAsReference;
220
221 // Grab IOMD bits from the Buffer MD options
222 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
223
224 if (!(kIOMemoryMapperNone & options)) {
225 IOMapper::checkForSystemMapper();
226 mapped = (NULL != IOMapper::gSystem);
227 }
228
229 if (physicalMask && (alignment <= 1)) {
230 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
231 highestMask = (physicalMask | alignment);
232 alignment++;
233 if (alignment < page_size) {
234 alignment = page_size;
235 }
236 }
237
238 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
239 alignment = page_size;
240 }
241
242 if (alignment >= page_size) {
243 if (round_page_overflow(capacity, &capacity)) {
244 return false;
245 }
246 }
247
248 if (alignment > page_size) {
249 options |= kIOMemoryPhysicallyContiguous;
250 }
251
252 _alignment = alignment;
253
254 if ((capacity + alignment) < _capacity) {
255 return false;
256 }
257
258 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
259 return false;
260 }
261
262 bzero(&mapSpec, sizeof(mapSpec));
263 mapSpec.alignment = _alignment;
264 mapSpec.numAddressBits = 64;
265 if (highestMask && mapped) {
266 if (highestMask <= 0xFFFFFFFF) {
267 mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
268 } else {
269 mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
270 }
271 highestMask = 0;
272 }
273
274 // set memory entry cache mode, pageable, purgeable
275 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
276 if (options & kIOMemoryPageable) {
277 iomdOptions |= kIOMemoryBufferPageable;
278 if (options & kIOMemoryPurgeable) {
279 iomdOptions |= kIOMemoryBufferPurgeable;
280 }
281 } else {
282 vmmap = kernel_map;
283
284 // Buffer shouldn't auto prepare they should be prepared explicitly
285 // But it never was enforced so what are you going to do?
286 iomdOptions |= kIOMemoryAutoPrepare;
287
288 /* Allocate a wired-down buffer inside kernel space. */
289
290 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
291
292 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
293 contig |= (!mapped);
294 contig |= (0 != (kIOMemoryMapperNone & options));
295#if 0
296 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
297 contig |= true;
298#endif
299 }
300
301 mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
302 if (contig || highestMask || (alignment > page_size)) {
303 _internalFlags |= kInternalFlagPhysical;
304 if (highestMask) {
305 _internalFlags |= kInternalFlagPageSized;
306 if (round_page_overflow(capacity, &capacity)) {
307 return false;
308 }
309 }
310 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
311 capacity, highestMask, alignment, contig);
312 } else if (mappedOrShared
313 && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
314 _internalFlags |= kInternalFlagPageAllocated;
315 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
316 if (_buffer) {
317 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
318#if IOALLOCDEBUG
319 OSAddAtomicLong(capacity, &debug_iomalloc_size);
320#endif
321 }
322 } else if (alignment > 1) {
323 _buffer = IOMallocAligned(capacity, alignment);
324 } else {
325 _buffer = IOMalloc(capacity);
326 }
327 if (!_buffer) {
328 return false;
329 }
330 bzero(_buffer, capacity);
331 }
332
333 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
334 vm_size_t size = round_page(capacity);
335
336 // initWithOptions will create memory entry
337 if (!withCopy) {
338 iomdOptions |= kIOMemoryPersistent;
339 }
340
341 if (options & kIOMemoryPageable) {
342#if IOALLOCDEBUG
343 OSAddAtomicLong(size, &debug_iomallocpageable_size);
344#endif
345 if (!withCopy) {
346 mapTask = inTask;
347 }
348 if (NULL == inTask) {
349 inTask = kernel_task;
350 }
351 } else if (options & kIOMapCacheMask) {
352 // Prefetch each page to put entries into the pmap
353 volatile UInt8 * startAddr = (UInt8 *)_buffer;
354 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
355
356 while (startAddr < endAddr) {
357 UInt8 dummyVar = *startAddr;
358 (void) dummyVar;
359 startAddr += page_size;
360 }
361 }
362 }
363
364 _ranges.v64->address = (mach_vm_address_t) _buffer;
365 _ranges.v64->length = _capacity;
366
367 if (!super::initWithOptions(_ranges.v64, 1, 0,
368 inTask, iomdOptions, /* System mapper */ NULL)) {
369 return false;
370 }
371
372 _internalFlags |= kInternalFlagInit;
373#if IOTRACKING
374 if (!(options & kIOMemoryPageable)) {
375 trackingAccumSize(capacity);
376 }
377#endif /* IOTRACKING */
378
379 // give any system mapper the allocation params
380 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
381 &mapSpec, sizeof(mapSpec))) {
382 return false;
383 }
384
385 if (mapTask) {
386 if (!reserved) {
387 reserved = IONew( ExpansionData, 1 );
388 if (!reserved) {
389 return false;
390 }
391 }
392 reserved->map = createMappingInTask(mapTask, 0,
393 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
394 if (!reserved->map) {
395 _buffer = NULL;
396 return false;
397 }
398 release(); // map took a retain on this
399 reserved->map->retain();
400 removeMapping(reserved->map);
401 mach_vm_address_t buffer = reserved->map->getAddress();
402 _buffer = (void *) buffer;
403 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
404 _ranges.v64->address = buffer;
405 }
406 }
407
408 setLength(_capacity);
409
410 return true;
411}
412
413OSSharedPtr<IOBufferMemoryDescriptor>
414IOBufferMemoryDescriptor::inTaskWithOptions(
415 task_t inTask,
416 IOOptionBits options,
417 vm_size_t capacity,
418 vm_offset_t alignment)
419{
420 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
421
422 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
423 me.reset();
424 }
425 return me;
426}
427
428OSSharedPtr<IOBufferMemoryDescriptor>
429IOBufferMemoryDescriptor::inTaskWithOptions(
430 task_t inTask,
431 IOOptionBits options,
432 vm_size_t capacity,
433 vm_offset_t alignment,
434 uint32_t kernTag,
435 uint32_t userTag)
436{
437 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
438
439 if (me) {
440 me->setVMTags(kernTag, userTag);
441
442 if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
443 me.reset();
444 }
445 }
446 return me;
447}
448
449OSSharedPtr<IOBufferMemoryDescriptor>
450IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
451 task_t inTask,
452 IOOptionBits options,
453 mach_vm_size_t capacity,
454 mach_vm_address_t physicalMask)
455{
456 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
457
458 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
459 me.reset();
460 }
461 return me;
462}
463
464#ifndef __LP64__
465bool
466IOBufferMemoryDescriptor::initWithOptions(
467 IOOptionBits options,
468 vm_size_t capacity,
469 vm_offset_t alignment)
470{
471 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
472}
473#endif /* !__LP64__ */
474
475OSSharedPtr<IOBufferMemoryDescriptor>
476IOBufferMemoryDescriptor::withOptions(
477 IOOptionBits options,
478 vm_size_t capacity,
479 vm_offset_t alignment)
480{
481 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
482
483 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
484 me.reset();
485 }
486 return me;
487}
488
489
490/*
491 * withCapacity:
492 *
493 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
494 * hold capacity bytes. The descriptor's length is initially set to the capacity.
495 */
496OSSharedPtr<IOBufferMemoryDescriptor>
497IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
498 IODirection inDirection,
499 bool inContiguous)
500{
501 return IOBufferMemoryDescriptor::withOptions(
502 inDirection | kIOMemoryUnshared
503 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
504 inCapacity, inContiguous ? inCapacity : 1 );
505}
506
507#ifndef __LP64__
508/*
509 * initWithBytes:
510 *
511 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
512 * The descriptor's length and capacity are set to the input buffer's size.
513 */
514bool
515IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
516 vm_size_t inLength,
517 IODirection inDirection,
518 bool inContiguous)
519{
520 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
521 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
522 inLength, inLength, (mach_vm_address_t)0)) {
523 return false;
524 }
525
526 // start out with no data
527 setLength(0);
528
529 if (!appendBytes(inBytes, inLength)) {
530 return false;
531 }
532
533 return true;
534}
535#endif /* !__LP64__ */
536
537/*
538 * withBytes:
539 *
540 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
541 * The descriptor's length and capacity are set to the input buffer's size.
542 */
543OSSharedPtr<IOBufferMemoryDescriptor>
544IOBufferMemoryDescriptor::withBytes(const void * inBytes,
545 vm_size_t inLength,
546 IODirection inDirection,
547 bool inContiguous)
548{
549 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
550
551 if (me && !me->initWithPhysicalMask(
552 kernel_task, inDirection | kIOMemoryUnshared
553 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
554 inLength, inLength, 0 )) {
555 me.reset();
556 }
557
558 if (me) {
559 // start out with no data
560 me->setLength(0);
561
562 if (!me->appendBytes(inBytes, inLength)) {
563 me.reset();
564 }
565 }
566 return me;
567}
568
569/*
570 * free:
571 *
572 * Free resources
573 */
574void
575IOBufferMemoryDescriptor::free()
576{
577 // Cache all of the relevant information on the stack for use
578 // after we call super::free()!
579 IOOptionBits flags = _flags;
580 IOOptionBits internalFlags = _internalFlags;
581 IOOptionBits options = _options;
582 vm_size_t size = _capacity;
583 void * buffer = _buffer;
584 IOMemoryMap * map = NULL;
585 IOAddressRange * range = _ranges.v64;
586 vm_offset_t alignment = _alignment;
587
588 if (alignment >= page_size) {
589 size = round_page(size);
590 }
591
592 if (reserved) {
593 map = reserved->map;
594 IODelete( reserved, ExpansionData, 1 );
595 if (map) {
596 map->release();
597 }
598 }
599
600 if ((options & kIOMemoryPageable)
601 || (kInternalFlagPageSized & internalFlags)) {
602 size = round_page(size);
603 }
604
605#if IOTRACKING
606 if (!(options & kIOMemoryPageable)
607 && buffer
608 && (kInternalFlagInit & _internalFlags)) {
609 trackingAccumSize(-size);
610 }
611#endif /* IOTRACKING */
612
613 /* super::free may unwire - deallocate buffer afterwards */
614 super::free();
615
616 if (options & kIOMemoryPageable) {
617#if IOALLOCDEBUG
618 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
619#endif
620 } else if (buffer) {
621 if (kInternalFlagPhysical & internalFlags) {
622 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
623 } else if (kInternalFlagPageAllocated & internalFlags) {
624 uintptr_t page;
625 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
626 if (page) {
627 kmem_free(kernel_map, page, page_size);
628 }
629#if IOALLOCDEBUG
630 OSAddAtomicLong(-size, &debug_iomalloc_size);
631#endif
632 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
633 } else if (alignment > 1) {
634 IOFreeAligned(buffer, size);
635 } else {
636 IOFree(buffer, size);
637 }
638 }
639 if (range && (kIOMemoryAsReference & flags)) {
640 IODelete(range, IOAddressRange, 1);
641 }
642}
643
644/*
645 * getCapacity:
646 *
647 * Get the buffer capacity
648 */
649vm_size_t
650IOBufferMemoryDescriptor::getCapacity() const
651{
652 return _capacity;
653}
654
655/*
656 * setLength:
657 *
658 * Change the buffer length of the memory descriptor. When a new buffer
659 * is created, the initial length of the buffer is set to be the same as
660 * the capacity. The length can be adjusted via setLength for a shorter
661 * transfer (there is no need to create more buffer descriptors when you
662 * can reuse an existing one, even for different transfer sizes). Note
663 * that the specified length must not exceed the capacity of the buffer.
664 */
665void
666IOBufferMemoryDescriptor::setLength(vm_size_t length)
667{
668 assert(length <= _capacity);
669 if (length > _capacity) {
670 return;
671 }
672
673 _length = length;
674 _ranges.v64->length = length;
675}
676
677/*
678 * setDirection:
679 *
680 * Change the direction of the transfer. This method allows one to redirect
681 * the descriptor's transfer direction. This eliminates the need to destroy
682 * and create new buffers when different transfer directions are needed.
683 */
684void
685IOBufferMemoryDescriptor::setDirection(IODirection direction)
686{
687 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
688#ifndef __LP64__
689 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
690#endif /* !__LP64__ */
691}
692
693/*
694 * appendBytes:
695 *
696 * Add some data to the end of the buffer. This method automatically
697 * maintains the memory descriptor buffer length. Note that appendBytes
698 * will not copy past the end of the memory descriptor's current capacity.
699 */
700bool
701IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
702{
703 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
704 IOByteCount offset;
705
706 assert(_length <= _capacity);
707
708 offset = _length;
709 _length += actualBytesToCopy;
710 _ranges.v64->length += actualBytesToCopy;
711
712 if (_task == kernel_task) {
713 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
714 actualBytesToCopy);
715 } else {
716 writeBytes(offset, bytes, actualBytesToCopy);
717 }
718
719 return true;
720}
721
722/*
723 * getBytesNoCopy:
724 *
725 * Return the virtual address of the beginning of the buffer
726 */
727void *
728IOBufferMemoryDescriptor::getBytesNoCopy()
729{
730 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
731 return _buffer;
732 } else {
733 return (void *)_ranges.v64->address;
734 }
735}
736
737
738/*
739 * getBytesNoCopy:
740 *
741 * Return the virtual address of an offset from the beginning of the buffer
742 */
743void *
744IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
745{
746 IOVirtualAddress address;
747
748 if ((start + withLength) < start) {
749 return NULL;
750 }
751
752 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
753 address = (IOVirtualAddress) _buffer;
754 } else {
755 address = _ranges.v64->address;
756 }
757
758 if (start < _length && (start + withLength) <= _length) {
759 return (void *)(address + start);
760 }
761 return NULL;
762}
763
764#ifndef __LP64__
765void *
766IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
767 IOByteCount * lengthOfSegment)
768{
769 void * bytes = getBytesNoCopy(offset, 0);
770
771 if (bytes && lengthOfSegment) {
772 *lengthOfSegment = _length - offset;
773 }
774
775 return bytes;
776}
777#endif /* !__LP64__ */
778
779#ifdef __LP64__
780OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
781OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
782#else /* !__LP64__ */
783OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
784OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
785#endif /* !__LP64__ */
786OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
787OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
788OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
789OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
790OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
791OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
792OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
793OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
794OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
795OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
796OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
797OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
798OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
799OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);