]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
7b670bf53e2c6ba5b30f02131b871aff31df4060
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define IOKIT_ENABLE_SHARED_PTR
29
30 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOBufferMemoryDescriptor.h>
38 #include <libkern/OSDebug.h>
39 #include <mach/mach_vm.h>
40
41 #include "IOKitKernelInternal.h"
42
43 #ifdef IOALLOCDEBUG
44 #include <libkern/c++/OSCPPDebug.h>
45 #endif
46 #include <IOKit/IOStatisticsPrivate.h>
47
48 #if IOKITSTATS
49 #define IOStatisticsAlloc(type, size) \
50 do { \
51 IOStatistics::countAlloc(type, size); \
52 } while (0)
53 #else
54 #define IOStatisticsAlloc(type, size)
55 #endif /* IOKITSTATS */
56
57
58 __BEGIN_DECLS
59 void ipc_port_release_send(ipc_port_t port);
60 #include <vm/pmap.h>
61
62 __END_DECLS
63
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66 enum{
67 kInternalFlagPhysical = 0x00000001,
68 kInternalFlagPageSized = 0x00000002,
69 kInternalFlagPageAllocated = 0x00000004,
70 kInternalFlagInit = 0x00000008
71 };
72
73 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
74
75 #define super IOGeneralMemoryDescriptor
76 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
77 IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
78
79 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
80
81 static uintptr_t
82 IOBMDPageProc(iopa_t * a)
83 {
84 kern_return_t kr;
85 vm_address_t vmaddr = 0;
86 int options = 0;// KMA_LOMEM;
87
88 kr = kernel_memory_allocate(kernel_map, &vmaddr,
89 page_size, 0, options, VM_KERN_MEMORY_IOKIT);
90
91 if (KERN_SUCCESS != kr) {
92 vmaddr = 0;
93 } else {
94 bzero((void *) vmaddr, page_size);
95 }
96
97 return (uintptr_t) vmaddr;
98 }
99
100 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
101
102 #ifndef __LP64__
103 bool
104 IOBufferMemoryDescriptor::initWithOptions(
105 IOOptionBits options,
106 vm_size_t capacity,
107 vm_offset_t alignment,
108 task_t inTask)
109 {
110 mach_vm_address_t physicalMask = 0;
111 return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
112 }
113 #endif /* !__LP64__ */
114
115 OSSharedPtr<IOBufferMemoryDescriptor>
116 IOBufferMemoryDescriptor::withCopy(
117 task_t inTask,
118 IOOptionBits options,
119 vm_map_t sourceMap,
120 mach_vm_address_t source,
121 mach_vm_size_t size)
122 {
123 OSSharedPtr<IOBufferMemoryDescriptor> inst;
124 kern_return_t err;
125 vm_map_copy_t copy;
126 vm_map_address_t address;
127
128 copy = NULL;
129 do {
130 err = kIOReturnNoMemory;
131 inst = OSMakeShared<IOBufferMemoryDescriptor>();
132 if (!inst) {
133 break;
134 }
135 inst->_ranges.v64 = IONew(IOAddressRange, 1);
136 if (!inst->_ranges.v64) {
137 break;
138 }
139
140 err = vm_map_copyin(sourceMap, source, size,
141 false /* src_destroy */, &copy);
142 if (KERN_SUCCESS != err) {
143 break;
144 }
145
146 err = vm_map_copyout(get_task_map(inTask), &address, copy);
147 if (KERN_SUCCESS != err) {
148 break;
149 }
150 copy = NULL;
151
152 inst->_ranges.v64->address = address;
153 inst->_ranges.v64->length = size;
154
155 if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
156 err = kIOReturnError;
157 }
158 } while (false);
159
160 if (KERN_SUCCESS == err) {
161 return inst;
162 }
163
164 if (copy) {
165 vm_map_copy_discard(copy);
166 }
167
168 return nullptr;
169 }
170
171
172 bool
173 IOBufferMemoryDescriptor::initWithPhysicalMask(
174 task_t inTask,
175 IOOptionBits options,
176 mach_vm_size_t capacity,
177 mach_vm_address_t alignment,
178 mach_vm_address_t physicalMask)
179 {
180 task_t mapTask = NULL;
181 vm_map_t vmmap = NULL;
182 mach_vm_address_t highestMask = 0;
183 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
184 IODMAMapSpecification mapSpec;
185 bool mapped = false;
186 bool withCopy = false;
187 bool mappedOrShared = false;
188
189 if (!capacity) {
190 return false;
191 }
192
193 _options = options;
194 _capacity = capacity;
195 _internalFlags = 0;
196 _internalReserved = 0;
197 _buffer = NULL;
198
199 if (!_ranges.v64) {
200 _ranges.v64 = IONew(IOAddressRange, 1);
201 if (!_ranges.v64) {
202 return false;
203 }
204 _ranges.v64->address = 0;
205 _ranges.v64->length = 0;
206 } else {
207 if (!_ranges.v64->address) {
208 return false;
209 }
210 if (!(kIOMemoryPageable & options)) {
211 return false;
212 }
213 if (!inTask) {
214 return false;
215 }
216 _buffer = (void *) _ranges.v64->address;
217 withCopy = true;
218 }
219 // make sure super::free doesn't dealloc _ranges before super::init
220 _flags = kIOMemoryAsReference;
221
222 // Grab IOMD bits from the Buffer MD options
223 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
224
225 if (!(kIOMemoryMapperNone & options)) {
226 IOMapper::checkForSystemMapper();
227 mapped = (NULL != IOMapper::gSystem);
228 }
229
230 if (physicalMask && (alignment <= 1)) {
231 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
232 highestMask = (physicalMask | alignment);
233 alignment++;
234 if (alignment < page_size) {
235 alignment = page_size;
236 }
237 }
238
239 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
240 alignment = page_size;
241 }
242
243 if (alignment >= page_size) {
244 if (round_page_overflow(capacity, &capacity)) {
245 return false;
246 }
247 }
248
249 if (alignment > page_size) {
250 options |= kIOMemoryPhysicallyContiguous;
251 }
252
253 _alignment = alignment;
254
255 if ((capacity + alignment) < _capacity) {
256 return false;
257 }
258
259 if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
260 return false;
261 }
262
263 bzero(&mapSpec, sizeof(mapSpec));
264 mapSpec.alignment = _alignment;
265 mapSpec.numAddressBits = 64;
266 if (highestMask && mapped) {
267 if (highestMask <= 0xFFFFFFFF) {
268 mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
269 } else {
270 mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
271 }
272 highestMask = 0;
273 }
274
275 // set memory entry cache mode, pageable, purgeable
276 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
277 if (options & kIOMemoryPageable) {
278 iomdOptions |= kIOMemoryBufferPageable;
279 if (options & kIOMemoryPurgeable) {
280 iomdOptions |= kIOMemoryBufferPurgeable;
281 }
282 } else {
283 vmmap = kernel_map;
284
285 // Buffer shouldn't auto prepare they should be prepared explicitly
286 // But it never was enforced so what are you going to do?
287 iomdOptions |= kIOMemoryAutoPrepare;
288
289 /* Allocate a wired-down buffer inside kernel space. */
290
291 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
292
293 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
294 contig |= (!mapped);
295 contig |= (0 != (kIOMemoryMapperNone & options));
296 #if 0
297 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
298 contig |= true;
299 #endif
300 }
301
302 mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
303 if (contig || highestMask || (alignment > page_size)) {
304 _internalFlags |= kInternalFlagPhysical;
305 if (highestMask) {
306 _internalFlags |= kInternalFlagPageSized;
307 if (round_page_overflow(capacity, &capacity)) {
308 return false;
309 }
310 }
311 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
312 capacity, highestMask, alignment, contig);
313 } else if (mappedOrShared
314 && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
315 _internalFlags |= kInternalFlagPageAllocated;
316 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
317 if (_buffer) {
318 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
319 #if IOALLOCDEBUG
320 OSAddAtomicLong(capacity, &debug_iomalloc_size);
321 #endif
322 }
323 } else if (alignment > 1) {
324 _buffer = IOMallocAligned(capacity, alignment);
325 } else {
326 _buffer = IOMalloc(capacity);
327 }
328 if (!_buffer) {
329 return false;
330 }
331 bzero(_buffer, capacity);
332 }
333
334 if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
335 vm_size_t size = round_page(capacity);
336
337 // initWithOptions will create memory entry
338 if (!withCopy) {
339 iomdOptions |= kIOMemoryPersistent;
340 }
341
342 if (options & kIOMemoryPageable) {
343 #if IOALLOCDEBUG
344 OSAddAtomicLong(size, &debug_iomallocpageable_size);
345 #endif
346 if (!withCopy) {
347 mapTask = inTask;
348 }
349 if (NULL == inTask) {
350 inTask = kernel_task;
351 }
352 } else if (options & kIOMapCacheMask) {
353 // Prefetch each page to put entries into the pmap
354 volatile UInt8 * startAddr = (UInt8 *)_buffer;
355 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
356
357 while (startAddr < endAddr) {
358 UInt8 dummyVar = *startAddr;
359 (void) dummyVar;
360 startAddr += page_size;
361 }
362 }
363 }
364
365 _ranges.v64->address = (mach_vm_address_t) _buffer;
366 _ranges.v64->length = _capacity;
367
368 if (!super::initWithOptions(_ranges.v64, 1, 0,
369 inTask, iomdOptions, /* System mapper */ NULL)) {
370 return false;
371 }
372
373 _internalFlags |= kInternalFlagInit;
374 #if IOTRACKING
375 if (!(options & kIOMemoryPageable)) {
376 trackingAccumSize(capacity);
377 }
378 #endif /* IOTRACKING */
379
380 // give any system mapper the allocation params
381 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
382 &mapSpec, sizeof(mapSpec))) {
383 return false;
384 }
385
386 if (mapTask) {
387 if (!reserved) {
388 reserved = IONew( ExpansionData, 1 );
389 if (!reserved) {
390 return false;
391 }
392 }
393 reserved->map = createMappingInTask(mapTask, 0,
394 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
395 if (!reserved->map) {
396 _buffer = NULL;
397 return false;
398 }
399 release(); // map took a retain on this
400 reserved->map->retain();
401 removeMapping(reserved->map);
402 mach_vm_address_t buffer = reserved->map->getAddress();
403 _buffer = (void *) buffer;
404 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
405 _ranges.v64->address = buffer;
406 }
407 }
408
409 setLength(_capacity);
410
411 return true;
412 }
413
414 OSSharedPtr<IOBufferMemoryDescriptor>
415 IOBufferMemoryDescriptor::inTaskWithOptions(
416 task_t inTask,
417 IOOptionBits options,
418 vm_size_t capacity,
419 vm_offset_t alignment)
420 {
421 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
422
423 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
424 me.reset();
425 }
426 return me;
427 }
428
429 OSSharedPtr<IOBufferMemoryDescriptor>
430 IOBufferMemoryDescriptor::inTaskWithOptions(
431 task_t inTask,
432 IOOptionBits options,
433 vm_size_t capacity,
434 vm_offset_t alignment,
435 uint32_t kernTag,
436 uint32_t userTag)
437 {
438 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
439
440 if (me) {
441 me->setVMTags(kernTag, userTag);
442
443 if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
444 me.reset();
445 }
446 }
447 return me;
448 }
449
450 OSSharedPtr<IOBufferMemoryDescriptor>
451 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
452 task_t inTask,
453 IOOptionBits options,
454 mach_vm_size_t capacity,
455 mach_vm_address_t physicalMask)
456 {
457 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
458
459 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
460 me.reset();
461 }
462 return me;
463 }
464
465 #ifndef __LP64__
466 bool
467 IOBufferMemoryDescriptor::initWithOptions(
468 IOOptionBits options,
469 vm_size_t capacity,
470 vm_offset_t alignment)
471 {
472 return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
473 }
474 #endif /* !__LP64__ */
475
476 OSSharedPtr<IOBufferMemoryDescriptor>
477 IOBufferMemoryDescriptor::withOptions(
478 IOOptionBits options,
479 vm_size_t capacity,
480 vm_offset_t alignment)
481 {
482 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
483
484 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
485 me.reset();
486 }
487 return me;
488 }
489
490
491 /*
492 * withCapacity:
493 *
494 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
495 * hold capacity bytes. The descriptor's length is initially set to the capacity.
496 */
497 OSSharedPtr<IOBufferMemoryDescriptor>
498 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
499 IODirection inDirection,
500 bool inContiguous)
501 {
502 return IOBufferMemoryDescriptor::withOptions(
503 inDirection | kIOMemoryUnshared
504 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
505 inCapacity, inContiguous ? inCapacity : 1 );
506 }
507
508 #ifndef __LP64__
509 /*
510 * initWithBytes:
511 *
512 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
513 * The descriptor's length and capacity are set to the input buffer's size.
514 */
515 bool
516 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
517 vm_size_t inLength,
518 IODirection inDirection,
519 bool inContiguous)
520 {
521 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
522 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
523 inLength, inLength, (mach_vm_address_t)0)) {
524 return false;
525 }
526
527 // start out with no data
528 setLength(0);
529
530 if (!appendBytes(inBytes, inLength)) {
531 return false;
532 }
533
534 return true;
535 }
536 #endif /* !__LP64__ */
537
538 /*
539 * withBytes:
540 *
541 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
542 * The descriptor's length and capacity are set to the input buffer's size.
543 */
544 OSSharedPtr<IOBufferMemoryDescriptor>
545 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
546 vm_size_t inLength,
547 IODirection inDirection,
548 bool inContiguous)
549 {
550 OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
551
552 if (me && !me->initWithPhysicalMask(
553 kernel_task, inDirection | kIOMemoryUnshared
554 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
555 inLength, inLength, 0 )) {
556 me.reset();
557 }
558
559 if (me) {
560 // start out with no data
561 me->setLength(0);
562
563 if (!me->appendBytes(inBytes, inLength)) {
564 me.reset();
565 }
566 }
567 return me;
568 }
569
570 /*
571 * free:
572 *
573 * Free resources
574 */
575 void
576 IOBufferMemoryDescriptor::free()
577 {
578 // Cache all of the relevant information on the stack for use
579 // after we call super::free()!
580 IOOptionBits flags = _flags;
581 IOOptionBits internalFlags = _internalFlags;
582 IOOptionBits options = _options;
583 vm_size_t size = _capacity;
584 void * buffer = _buffer;
585 IOMemoryMap * map = NULL;
586 IOAddressRange * range = _ranges.v64;
587 vm_offset_t alignment = _alignment;
588
589 if (alignment >= page_size) {
590 size = round_page(size);
591 }
592
593 if (reserved) {
594 map = reserved->map;
595 IODelete( reserved, ExpansionData, 1 );
596 if (map) {
597 map->release();
598 }
599 }
600
601 if ((options & kIOMemoryPageable)
602 || (kInternalFlagPageSized & internalFlags)) {
603 size = round_page(size);
604 }
605
606 #if IOTRACKING
607 if (!(options & kIOMemoryPageable)
608 && buffer
609 && (kInternalFlagInit & _internalFlags)) {
610 trackingAccumSize(-size);
611 }
612 #endif /* IOTRACKING */
613
614 /* super::free may unwire - deallocate buffer afterwards */
615 super::free();
616
617 if (options & kIOMemoryPageable) {
618 #if IOALLOCDEBUG
619 OSAddAtomicLong(-size, &debug_iomallocpageable_size);
620 #endif
621 } else if (buffer) {
622 if (kInternalFlagPhysical & internalFlags) {
623 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
624 } else if (kInternalFlagPageAllocated & internalFlags) {
625 uintptr_t page;
626 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
627 if (page) {
628 kmem_free(kernel_map, page, page_size);
629 }
630 #if IOALLOCDEBUG
631 OSAddAtomicLong(-size, &debug_iomalloc_size);
632 #endif
633 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
634 } else if (alignment > 1) {
635 IOFreeAligned(buffer, size);
636 } else {
637 IOFree(buffer, size);
638 }
639 }
640 if (range && (kIOMemoryAsReference & flags)) {
641 IODelete(range, IOAddressRange, 1);
642 }
643 }
644
645 /*
646 * getCapacity:
647 *
648 * Get the buffer capacity
649 */
650 vm_size_t
651 IOBufferMemoryDescriptor::getCapacity() const
652 {
653 return _capacity;
654 }
655
656 /*
657 * setLength:
658 *
659 * Change the buffer length of the memory descriptor. When a new buffer
660 * is created, the initial length of the buffer is set to be the same as
661 * the capacity. The length can be adjusted via setLength for a shorter
662 * transfer (there is no need to create more buffer descriptors when you
663 * can reuse an existing one, even for different transfer sizes). Note
664 * that the specified length must not exceed the capacity of the buffer.
665 */
666 void
667 IOBufferMemoryDescriptor::setLength(vm_size_t length)
668 {
669 assert(length <= _capacity);
670 if (length > _capacity) {
671 return;
672 }
673
674 _length = length;
675 _ranges.v64->length = length;
676 }
677
678 /*
679 * setDirection:
680 *
681 * Change the direction of the transfer. This method allows one to redirect
682 * the descriptor's transfer direction. This eliminates the need to destroy
683 * and create new buffers when different transfer directions are needed.
684 */
685 void
686 IOBufferMemoryDescriptor::setDirection(IODirection direction)
687 {
688 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
689 #ifndef __LP64__
690 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
691 #endif /* !__LP64__ */
692 }
693
694 /*
695 * appendBytes:
696 *
697 * Add some data to the end of the buffer. This method automatically
698 * maintains the memory descriptor buffer length. Note that appendBytes
699 * will not copy past the end of the memory descriptor's current capacity.
700 */
701 bool
702 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
703 {
704 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
705 IOByteCount offset;
706
707 assert(_length <= _capacity);
708
709 offset = _length;
710 _length += actualBytesToCopy;
711 _ranges.v64->length += actualBytesToCopy;
712
713 if (_task == kernel_task) {
714 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
715 actualBytesToCopy);
716 } else {
717 writeBytes(offset, bytes, actualBytesToCopy);
718 }
719
720 return true;
721 }
722
723 /*
724 * getBytesNoCopy:
725 *
726 * Return the virtual address of the beginning of the buffer
727 */
728 void *
729 IOBufferMemoryDescriptor::getBytesNoCopy()
730 {
731 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
732 return _buffer;
733 } else {
734 return (void *)_ranges.v64->address;
735 }
736 }
737
738
739 /*
740 * getBytesNoCopy:
741 *
742 * Return the virtual address of an offset from the beginning of the buffer
743 */
744 void *
745 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
746 {
747 IOVirtualAddress address;
748
749 if ((start + withLength) < start) {
750 return NULL;
751 }
752
753 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
754 address = (IOVirtualAddress) _buffer;
755 } else {
756 address = _ranges.v64->address;
757 }
758
759 if (start < _length && (start + withLength) <= _length) {
760 return (void *)(address + start);
761 }
762 return NULL;
763 }
764
765 #ifndef __LP64__
766 void *
767 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
768 IOByteCount * lengthOfSegment)
769 {
770 void * bytes = getBytesNoCopy(offset, 0);
771
772 if (bytes && lengthOfSegment) {
773 *lengthOfSegment = _length - offset;
774 }
775
776 return bytes;
777 }
778 #endif /* !__LP64__ */
779
780 #ifdef __LP64__
781 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
782 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
783 #else /* !__LP64__ */
784 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
785 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
786 #endif /* !__LP64__ */
787 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
788 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
789 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
790 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
791 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
792 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
793 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
794 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
795 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
796 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
797 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
798 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
799 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
800 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);