]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-3248.30.4.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
39
40 #include "IOKitKernelInternal.h"
41
42 #ifdef IOALLOCDEBUG
43 #include <libkern/c++/OSCPPDebug.h>
44 #endif
45 #include <IOKit/IOStatisticsPrivate.h>
46
47 #if IOKITSTATS
48 #define IOStatisticsAlloc(type, size) \
49 do { \
50 IOStatistics::countAlloc(type, size); \
51 } while (0)
52 #else
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
55
56
57 __BEGIN_DECLS
58 void ipc_port_release_send(ipc_port_t port);
59 #include <vm/pmap.h>
60
61 __END_DECLS
62
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64
65 enum
66 {
67 kInternalFlagPhysical = 0x00000001,
68 kInternalFlagPageSized = 0x00000002,
69 kInternalFlagPageAllocated = 0x00000004
70 };
71
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73
74 #define super IOGeneralMemoryDescriptor
75 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
76 IOGeneralMemoryDescriptor);
77
78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
79
80 static uintptr_t IOBMDPageProc(iopa_t * a)
81 {
82 kern_return_t kr;
83 vm_address_t vmaddr = 0;
84 int options = 0; // KMA_LOMEM;
85
86 kr = kernel_memory_allocate(kernel_map, &vmaddr,
87 page_size, 0, options, VM_KERN_MEMORY_IOKIT);
88
89 if (KERN_SUCCESS != kr) vmaddr = 0;
90 else bzero((void *) vmaddr, page_size);
91
92 return ((uintptr_t) vmaddr);
93 }
94
95 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96
97 #ifndef __LP64__
98 bool IOBufferMemoryDescriptor::initWithOptions(
99 IOOptionBits options,
100 vm_size_t capacity,
101 vm_offset_t alignment,
102 task_t inTask)
103 {
104 mach_vm_address_t physicalMask = 0;
105 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
106 }
107 #endif /* !__LP64__ */
108
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
110 task_t inTask,
111 IOOptionBits options,
112 mach_vm_size_t capacity,
113 mach_vm_address_t alignment,
114 mach_vm_address_t physicalMask)
115 {
116 task_t mapTask = NULL;
117 vm_map_t vmmap = NULL;
118 mach_vm_address_t highestMask = 0;
119 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
120 IODMAMapSpecification mapSpec;
121 bool mapped = false;
122 bool needZero;
123
124 if (!capacity) return false;
125
126 _options = options;
127 _capacity = capacity;
128 _internalFlags = 0;
129 _internalReserved = 0;
130 _buffer = 0;
131
132 _ranges.v64 = IONew(IOAddressRange, 1);
133 if (!_ranges.v64)
134 return (false);
135 _ranges.v64->address = 0;
136 _ranges.v64->length = 0;
137 // make sure super::free doesn't dealloc _ranges before super::init
138 _flags = kIOMemoryAsReference;
139
140 // Grab IOMD bits from the Buffer MD options
141 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
142
143 if (!(kIOMemoryMapperNone & options))
144 {
145 IOMapper::checkForSystemMapper();
146 mapped = (0 != IOMapper::gSystem);
147 }
148 needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
149
150 if (physicalMask && (alignment <= 1))
151 {
152 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
153 highestMask = (physicalMask | alignment);
154 alignment++;
155 if (alignment < page_size)
156 alignment = page_size;
157 }
158
159 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
160 alignment = page_size;
161
162 if (alignment >= page_size)
163 capacity = round_page(capacity);
164
165 if (alignment > page_size)
166 options |= kIOMemoryPhysicallyContiguous;
167
168 _alignment = alignment;
169
170 if ((capacity + alignment) < _capacity) return (false);
171
172 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
173 return false;
174
175 bzero(&mapSpec, sizeof(mapSpec));
176 mapSpec.alignment = _alignment;
177 mapSpec.numAddressBits = 64;
178 if (highestMask && mapped)
179 {
180 if (highestMask <= 0xFFFFFFFF)
181 mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
182 else
183 mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
184 highestMask = 0;
185 }
186
187 // set memory entry cache mode, pageable, purgeable
188 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
189 if (options & kIOMemoryPageable)
190 {
191 iomdOptions |= kIOMemoryBufferPageable;
192 if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
193 }
194 else
195 {
196 vmmap = kernel_map;
197
198 // Buffer shouldn't auto prepare they should be prepared explicitly
199 // But it never was enforced so what are you going to do?
200 iomdOptions |= kIOMemoryAutoPrepare;
201
202 /* Allocate a wired-down buffer inside kernel space. */
203
204 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
205
206 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
207 {
208 contig |= (!mapped);
209 contig |= (0 != (kIOMemoryMapperNone & options));
210 #if 0
211 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
212 contig |= true;
213 #endif
214 }
215
216 if (contig || highestMask || (alignment > page_size))
217 {
218 _internalFlags |= kInternalFlagPhysical;
219 if (highestMask)
220 {
221 _internalFlags |= kInternalFlagPageSized;
222 capacity = round_page(capacity);
223 }
224 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
225 capacity, highestMask, alignment, contig);
226 }
227 else if (needZero
228 && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
229 {
230 _internalFlags |= kInternalFlagPageAllocated;
231 needZero = false;
232 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
233 if (_buffer)
234 {
235 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
236 #if IOALLOCDEBUG
237 OSAddAtomic(capacity, &debug_iomalloc_size);
238 #endif
239 }
240 }
241 else if (alignment > 1)
242 {
243 _buffer = IOMallocAligned(capacity, alignment);
244 }
245 else
246 {
247 _buffer = IOMalloc(capacity);
248 }
249 if (!_buffer)
250 {
251 return false;
252 }
253 if (needZero) bzero(_buffer, capacity);
254 }
255
256 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
257 vm_size_t size = round_page(capacity);
258
259 // initWithOptions will create memory entry
260 iomdOptions |= kIOMemoryPersistent;
261
262 if( options & kIOMemoryPageable) {
263 #if IOALLOCDEBUG
264 OSAddAtomicLong(size, &debug_iomallocpageable_size);
265 #endif
266 mapTask = inTask;
267 if (NULL == inTask)
268 inTask = kernel_task;
269 }
270 else if (options & kIOMapCacheMask)
271 {
272 // Prefetch each page to put entries into the pmap
273 volatile UInt8 * startAddr = (UInt8 *)_buffer;
274 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
275
276 while (startAddr < endAddr)
277 {
278 UInt8 dummyVar = *startAddr;
279 (void) dummyVar;
280 startAddr += page_size;
281 }
282 }
283 }
284
285 _ranges.v64->address = (mach_vm_address_t) _buffer;;
286 _ranges.v64->length = _capacity;
287
288 if (!super::initWithOptions(_ranges.v64, 1, 0,
289 inTask, iomdOptions, /* System mapper */ 0))
290 return false;
291
292 // give any system mapper the allocation params
293 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
294 &mapSpec, sizeof(mapSpec)))
295 return false;
296
297 if (mapTask)
298 {
299 if (!reserved) {
300 reserved = IONew( ExpansionData, 1 );
301 if( !reserved)
302 return( false );
303 }
304 reserved->map = createMappingInTask(mapTask, 0,
305 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
306 if (!reserved->map)
307 {
308 _buffer = 0;
309 return( false );
310 }
311 release(); // map took a retain on this
312 reserved->map->retain();
313 removeMapping(reserved->map);
314 mach_vm_address_t buffer = reserved->map->getAddress();
315 _buffer = (void *) buffer;
316 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
317 _ranges.v64->address = buffer;
318 }
319
320 setLength(_capacity);
321
322 return true;
323 }
324
325 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
326 task_t inTask,
327 IOOptionBits options,
328 vm_size_t capacity,
329 vm_offset_t alignment)
330 {
331 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
332
333 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
334 me->release();
335 me = 0;
336 }
337 return me;
338 }
339
340 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
341 task_t inTask,
342 IOOptionBits options,
343 mach_vm_size_t capacity,
344 mach_vm_address_t physicalMask)
345 {
346 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
347
348 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
349 {
350 me->release();
351 me = 0;
352 }
353 return me;
354 }
355
356 #ifndef __LP64__
357 bool IOBufferMemoryDescriptor::initWithOptions(
358 IOOptionBits options,
359 vm_size_t capacity,
360 vm_offset_t alignment)
361 {
362 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
363 }
364 #endif /* !__LP64__ */
365
366 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
367 IOOptionBits options,
368 vm_size_t capacity,
369 vm_offset_t alignment)
370 {
371 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
372
373 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
374 me->release();
375 me = 0;
376 }
377 return me;
378 }
379
380
381 /*
382 * withCapacity:
383 *
384 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
385 * hold capacity bytes. The descriptor's length is initially set to the capacity.
386 */
387 IOBufferMemoryDescriptor *
388 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
389 IODirection inDirection,
390 bool inContiguous)
391 {
392 return( IOBufferMemoryDescriptor::withOptions(
393 inDirection | kIOMemoryUnshared
394 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
395 inCapacity, inContiguous ? inCapacity : 1 ));
396 }
397
398 #ifndef __LP64__
399 /*
400 * initWithBytes:
401 *
402 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
403 * The descriptor's length and capacity are set to the input buffer's size.
404 */
405 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
406 vm_size_t inLength,
407 IODirection inDirection,
408 bool inContiguous)
409 {
410 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
411 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
412 inLength, inLength, (mach_vm_address_t)0))
413 return false;
414
415 // start out with no data
416 setLength(0);
417
418 if (!appendBytes(inBytes, inLength))
419 return false;
420
421 return true;
422 }
423 #endif /* !__LP64__ */
424
425 /*
426 * withBytes:
427 *
428 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
429 * The descriptor's length and capacity are set to the input buffer's size.
430 */
431 IOBufferMemoryDescriptor *
432 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
433 vm_size_t inLength,
434 IODirection inDirection,
435 bool inContiguous)
436 {
437 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
438
439 if (me && !me->initWithPhysicalMask(
440 kernel_task, inDirection | kIOMemoryUnshared
441 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
442 inLength, inLength, 0 ))
443 {
444 me->release();
445 me = 0;
446 }
447
448 if (me)
449 {
450 // start out with no data
451 me->setLength(0);
452
453 if (!me->appendBytes(inBytes, inLength))
454 {
455 me->release();
456 me = 0;
457 }
458 }
459 return me;
460 }
461
462 /*
463 * free:
464 *
465 * Free resources
466 */
467 void IOBufferMemoryDescriptor::free()
468 {
469 // Cache all of the relevant information on the stack for use
470 // after we call super::free()!
471 IOOptionBits flags = _flags;
472 IOOptionBits internalFlags = _internalFlags;
473 IOOptionBits options = _options;
474 vm_size_t size = _capacity;
475 void * buffer = _buffer;
476 IOMemoryMap * map = 0;
477 IOAddressRange * range = _ranges.v64;
478 vm_offset_t alignment = _alignment;
479
480 if (alignment >= page_size)
481 size = round_page(size);
482
483 if (reserved)
484 {
485 map = reserved->map;
486 IODelete( reserved, ExpansionData, 1 );
487 if (map)
488 map->release();
489 }
490
491 /* super::free may unwire - deallocate buffer afterwards */
492 super::free();
493
494 if (options & kIOMemoryPageable)
495 {
496 #if IOALLOCDEBUG
497 OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size);
498 #endif
499 }
500 else if (buffer)
501 {
502 if (kInternalFlagPageSized & internalFlags) size = round_page(size);
503
504 if (kInternalFlagPhysical & internalFlags)
505 {
506 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
507 }
508 else if (kInternalFlagPageAllocated & internalFlags)
509 {
510 uintptr_t page;
511 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
512 if (page)
513 {
514 kmem_free(kernel_map, page, page_size);
515 }
516 #if IOALLOCDEBUG
517 OSAddAtomic(-size, &debug_iomalloc_size);
518 #endif
519 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
520 }
521 else if (alignment > 1)
522 {
523 IOFreeAligned(buffer, size);
524 }
525 else
526 {
527 IOFree(buffer, size);
528 }
529 }
530 if (range && (kIOMemoryAsReference & flags))
531 IODelete(range, IOAddressRange, 1);
532 }
533
534 /*
535 * getCapacity:
536 *
537 * Get the buffer capacity
538 */
539 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
540 {
541 return _capacity;
542 }
543
544 /*
545 * setLength:
546 *
547 * Change the buffer length of the memory descriptor. When a new buffer
548 * is created, the initial length of the buffer is set to be the same as
549 * the capacity. The length can be adjusted via setLength for a shorter
550 * transfer (there is no need to create more buffer descriptors when you
551 * can reuse an existing one, even for different transfer sizes). Note
552 * that the specified length must not exceed the capacity of the buffer.
553 */
554 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
555 {
556 assert(length <= _capacity);
557 if (length > _capacity) return;
558
559 _length = length;
560 _ranges.v64->length = length;
561 }
562
563 /*
564 * setDirection:
565 *
566 * Change the direction of the transfer. This method allows one to redirect
567 * the descriptor's transfer direction. This eliminates the need to destroy
568 * and create new buffers when different transfer directions are needed.
569 */
570 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
571 {
572 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
573 #ifndef __LP64__
574 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
575 #endif /* !__LP64__ */
576 }
577
578 /*
579 * appendBytes:
580 *
581 * Add some data to the end of the buffer. This method automatically
582 * maintains the memory descriptor buffer length. Note that appendBytes
583 * will not copy past the end of the memory descriptor's current capacity.
584 */
585 bool
586 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
587 {
588 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
589 IOByteCount offset;
590
591 assert(_length <= _capacity);
592
593 offset = _length;
594 _length += actualBytesToCopy;
595 _ranges.v64->length += actualBytesToCopy;
596
597 if (_task == kernel_task)
598 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
599 actualBytesToCopy);
600 else
601 writeBytes(offset, bytes, actualBytesToCopy);
602
603 return true;
604 }
605
606 /*
607 * getBytesNoCopy:
608 *
609 * Return the virtual address of the beginning of the buffer
610 */
611 void * IOBufferMemoryDescriptor::getBytesNoCopy()
612 {
613 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
614 return _buffer;
615 else
616 return (void *)_ranges.v64->address;
617 }
618
619
620 /*
621 * getBytesNoCopy:
622 *
623 * Return the virtual address of an offset from the beginning of the buffer
624 */
625 void *
626 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
627 {
628 IOVirtualAddress address;
629 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
630 address = (IOVirtualAddress) _buffer;
631 else
632 address = _ranges.v64->address;
633
634 if (start < _length && (start + withLength) <= _length)
635 return (void *)(address + start);
636 return 0;
637 }
638
639 #ifndef __LP64__
640 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
641 IOByteCount * lengthOfSegment)
642 {
643 void * bytes = getBytesNoCopy(offset, 0);
644
645 if (bytes && lengthOfSegment)
646 *lengthOfSegment = _length - offset;
647
648 return bytes;
649 }
650 #endif /* !__LP64__ */
651
652 #ifdef __LP64__
653 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
654 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
655 #else /* !__LP64__ */
656 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
657 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
658 #endif /* !__LP64__ */
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
665 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
666 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
667 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
668 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
669 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
670 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
671 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
672 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);