]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
1c486fa0f7ad6ed916d073830d4a5786327193ba
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
39
40 #include "IOKitKernelInternal.h"
41
42 #ifdef IOALLOCDEBUG
43 #include <libkern/c++/OSCPPDebug.h>
44 #endif
45 #include <IOKit/IOStatisticsPrivate.h>
46
47 #if IOKITSTATS
48 #define IOStatisticsAlloc(type, size) \
49 do { \
50 IOStatistics::countAlloc(type, size); \
51 } while (0)
52 #else
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
55
56
57 __BEGIN_DECLS
58 void ipc_port_release_send(ipc_port_t port);
59 #include <vm/pmap.h>
60
61 __END_DECLS
62
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64
65 enum
66 {
67 kInternalFlagPhysical = 0x00000001,
68 kInternalFlagPageSized = 0x00000002,
69 kInternalFlagPageAllocated = 0x00000004
70 };
71
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73
74 #define super IOGeneralMemoryDescriptor
75 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
76 IOGeneralMemoryDescriptor);
77
78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
79
80 static uintptr_t IOBMDPageProc(iopa_t * a)
81 {
82 kern_return_t kr;
83 vm_address_t vmaddr = 0;
84 int options = 0; // KMA_LOMEM;
85
86 kr = kernel_memory_allocate(kernel_map, &vmaddr,
87 page_size, 0, options);
88
89 if (KERN_SUCCESS != kr) vmaddr = 0;
90 else bzero((void *) vmaddr, page_size);
91
92 return ((uintptr_t) vmaddr);
93 }
94
95 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96
97 #ifndef __LP64__
98 bool IOBufferMemoryDescriptor::initWithOptions(
99 IOOptionBits options,
100 vm_size_t capacity,
101 vm_offset_t alignment,
102 task_t inTask)
103 {
104 mach_vm_address_t physicalMask = 0;
105 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
106 }
107 #endif /* !__LP64__ */
108
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
110 task_t inTask,
111 IOOptionBits options,
112 mach_vm_size_t capacity,
113 mach_vm_address_t alignment,
114 mach_vm_address_t physicalMask)
115 {
116 task_t mapTask = NULL;
117 vm_map_t vmmap = NULL;
118 mach_vm_address_t highestMask = 0;
119 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
120 IODMAMapSpecification mapSpec;
121 bool mapped = false;
122 bool needZero;
123
124 if (!capacity) return false;
125
126 _options = options;
127 _capacity = capacity;
128 _internalFlags = 0;
129 _internalReserved = 0;
130 _buffer = 0;
131
132 _ranges.v64 = IONew(IOAddressRange, 1);
133 if (!_ranges.v64)
134 return (false);
135 _ranges.v64->address = 0;
136 _ranges.v64->length = 0;
137 // make sure super::free doesn't dealloc _ranges before super::init
138 _flags = kIOMemoryAsReference;
139
140 // Grab IOMD bits from the Buffer MD options
141 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
142
143 if (!(kIOMemoryMapperNone & options))
144 {
145 IOMapper::checkForSystemMapper();
146 mapped = (0 != IOMapper::gSystem);
147 }
148 needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
149
150 if (physicalMask && (alignment <= 1))
151 {
152 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
153 highestMask = (physicalMask | alignment);
154 alignment++;
155 if (alignment < page_size)
156 alignment = page_size;
157 }
158
159 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
160 alignment = page_size;
161
162 if (alignment >= page_size)
163 capacity = round_page(capacity);
164
165 if (alignment > page_size)
166 options |= kIOMemoryPhysicallyContiguous;
167
168 _alignment = alignment;
169
170 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
171 return false;
172
173 bzero(&mapSpec, sizeof(mapSpec));
174 mapSpec.alignment = _alignment;
175 mapSpec.numAddressBits = 64;
176 if (highestMask && mapped)
177 {
178 if (highestMask <= 0xFFFFFFFF)
179 mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
180 else
181 mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
182 highestMask = 0;
183 }
184
185 // set memory entry cache mode, pageable, purgeable
186 iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
187 if (options & kIOMemoryPageable)
188 {
189 iomdOptions |= kIOMemoryBufferPageable;
190 if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
191 }
192 else
193 {
194 vmmap = kernel_map;
195
196 // Buffer shouldn't auto prepare they should be prepared explicitly
197 // But it never was enforced so what are you going to do?
198 iomdOptions |= kIOMemoryAutoPrepare;
199
200 /* Allocate a wired-down buffer inside kernel space. */
201
202 bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
203
204 if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
205 {
206 contig |= (!mapped);
207 contig |= (0 != (kIOMemoryMapperNone & options));
208 #if 0
209 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
210 contig |= true;
211 #endif
212 }
213
214 if (contig || highestMask || (alignment > page_size))
215 {
216 _internalFlags |= kInternalFlagPhysical;
217 if (highestMask)
218 {
219 _internalFlags |= kInternalFlagPageSized;
220 capacity = round_page(capacity);
221 }
222 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
223 capacity, highestMask, alignment, contig);
224 }
225 else if (needZero
226 && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
227 {
228 _internalFlags |= kInternalFlagPageAllocated;
229 needZero = false;
230 _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
231 if (_buffer)
232 {
233 IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
234 #if IOALLOCDEBUG
235 debug_iomalloc_size += capacity;
236 #endif
237 }
238 }
239 else if (alignment > 1)
240 {
241 _buffer = IOMallocAligned(capacity, alignment);
242 }
243 else
244 {
245 _buffer = IOMalloc(capacity);
246 }
247 if (!_buffer)
248 {
249 return false;
250 }
251 if (needZero) bzero(_buffer, capacity);
252 }
253
254 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
255 vm_size_t size = round_page(capacity);
256
257 // initWithOptions will create memory entry
258 iomdOptions |= kIOMemoryPersistent;
259
260 if( options & kIOMemoryPageable) {
261 #if IOALLOCDEBUG
262 debug_iomallocpageable_size += size;
263 #endif
264 mapTask = inTask;
265 if (NULL == inTask)
266 inTask = kernel_task;
267 }
268 else if (options & kIOMapCacheMask)
269 {
270 // Prefetch each page to put entries into the pmap
271 volatile UInt8 * startAddr = (UInt8 *)_buffer;
272 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
273
274 while (startAddr < endAddr)
275 {
276 UInt8 dummyVar = *startAddr;
277 (void) dummyVar;
278 startAddr += page_size;
279 }
280 }
281 }
282
283 _ranges.v64->address = (mach_vm_address_t) _buffer;;
284 _ranges.v64->length = _capacity;
285
286 if (!super::initWithOptions(_ranges.v64, 1, 0,
287 inTask, iomdOptions, /* System mapper */ 0))
288 return false;
289
290 // give any system mapper the allocation params
291 if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
292 &mapSpec, sizeof(mapSpec)))
293 return false;
294
295 if (mapTask)
296 {
297 if (!reserved) {
298 reserved = IONew( ExpansionData, 1 );
299 if( !reserved)
300 return( false );
301 }
302 reserved->map = createMappingInTask(mapTask, 0,
303 kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
304 if (!reserved->map)
305 {
306 _buffer = 0;
307 return( false );
308 }
309 release(); // map took a retain on this
310 reserved->map->retain();
311 removeMapping(reserved->map);
312 mach_vm_address_t buffer = reserved->map->getAddress();
313 _buffer = (void *) buffer;
314 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
315 _ranges.v64->address = buffer;
316 }
317
318 setLength(_capacity);
319
320 return true;
321 }
322
323 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
324 task_t inTask,
325 IOOptionBits options,
326 vm_size_t capacity,
327 vm_offset_t alignment)
328 {
329 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
330
331 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
332 me->release();
333 me = 0;
334 }
335 return me;
336 }
337
338 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
339 task_t inTask,
340 IOOptionBits options,
341 mach_vm_size_t capacity,
342 mach_vm_address_t physicalMask)
343 {
344 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
345
346 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
347 {
348 me->release();
349 me = 0;
350 }
351 return me;
352 }
353
354 #ifndef __LP64__
355 bool IOBufferMemoryDescriptor::initWithOptions(
356 IOOptionBits options,
357 vm_size_t capacity,
358 vm_offset_t alignment)
359 {
360 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
361 }
362 #endif /* !__LP64__ */
363
364 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
365 IOOptionBits options,
366 vm_size_t capacity,
367 vm_offset_t alignment)
368 {
369 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
370
371 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
372 me->release();
373 me = 0;
374 }
375 return me;
376 }
377
378
379 /*
380 * withCapacity:
381 *
382 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
383 * hold capacity bytes. The descriptor's length is initially set to the capacity.
384 */
385 IOBufferMemoryDescriptor *
386 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
387 IODirection inDirection,
388 bool inContiguous)
389 {
390 return( IOBufferMemoryDescriptor::withOptions(
391 inDirection | kIOMemoryUnshared
392 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
393 inCapacity, inContiguous ? inCapacity : 1 ));
394 }
395
396 #ifndef __LP64__
397 /*
398 * initWithBytes:
399 *
400 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
401 * The descriptor's length and capacity are set to the input buffer's size.
402 */
403 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
404 vm_size_t inLength,
405 IODirection inDirection,
406 bool inContiguous)
407 {
408 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
409 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
410 inLength, inLength, (mach_vm_address_t)0))
411 return false;
412
413 // start out with no data
414 setLength(0);
415
416 if (!appendBytes(inBytes, inLength))
417 return false;
418
419 return true;
420 }
421 #endif /* !__LP64__ */
422
423 /*
424 * withBytes:
425 *
426 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
427 * The descriptor's length and capacity are set to the input buffer's size.
428 */
429 IOBufferMemoryDescriptor *
430 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
431 vm_size_t inLength,
432 IODirection inDirection,
433 bool inContiguous)
434 {
435 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
436
437 if (me && !me->initWithPhysicalMask(
438 kernel_task, inDirection | kIOMemoryUnshared
439 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
440 inLength, inLength, 0 ))
441 {
442 me->release();
443 me = 0;
444 }
445
446 if (me)
447 {
448 // start out with no data
449 me->setLength(0);
450
451 if (!me->appendBytes(inBytes, inLength))
452 {
453 me->release();
454 me = 0;
455 }
456 }
457 return me;
458 }
459
460 /*
461 * free:
462 *
463 * Free resources
464 */
465 void IOBufferMemoryDescriptor::free()
466 {
467 // Cache all of the relevant information on the stack for use
468 // after we call super::free()!
469 IOOptionBits flags = _flags;
470 IOOptionBits internalFlags = _internalFlags;
471 IOOptionBits options = _options;
472 vm_size_t size = _capacity;
473 void * buffer = _buffer;
474 IOMemoryMap * map = 0;
475 IOAddressRange * range = _ranges.v64;
476 vm_offset_t alignment = _alignment;
477
478 if (alignment >= page_size)
479 size = round_page(size);
480
481 if (reserved)
482 {
483 map = reserved->map;
484 IODelete( reserved, ExpansionData, 1 );
485 if (map)
486 map->release();
487 }
488
489 /* super::free may unwire - deallocate buffer afterwards */
490 super::free();
491
492 if (options & kIOMemoryPageable)
493 {
494 #if IOALLOCDEBUG
495 debug_iomallocpageable_size -= round_page(size);
496 #endif
497 }
498 else if (buffer)
499 {
500 if (kInternalFlagPageSized & internalFlags) size = round_page(size);
501
502 if (kInternalFlagPhysical & internalFlags)
503 {
504 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
505 }
506 else if (kInternalFlagPageAllocated & internalFlags)
507 {
508 uintptr_t page;
509 page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
510 if (page)
511 {
512 kmem_free(kernel_map, page, page_size);
513 }
514 #if IOALLOCDEBUG
515 debug_iomalloc_size -= size;
516 #endif
517 IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
518 }
519 else if (alignment > 1)
520 {
521 IOFreeAligned(buffer, size);
522 }
523 else
524 {
525 IOFree(buffer, size);
526 }
527 }
528 if (range && (kIOMemoryAsReference & flags))
529 IODelete(range, IOAddressRange, 1);
530 }
531
532 /*
533 * getCapacity:
534 *
535 * Get the buffer capacity
536 */
537 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
538 {
539 return _capacity;
540 }
541
542 /*
543 * setLength:
544 *
545 * Change the buffer length of the memory descriptor. When a new buffer
546 * is created, the initial length of the buffer is set to be the same as
547 * the capacity. The length can be adjusted via setLength for a shorter
548 * transfer (there is no need to create more buffer descriptors when you
549 * can reuse an existing one, even for different transfer sizes). Note
550 * that the specified length must not exceed the capacity of the buffer.
551 */
552 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
553 {
554 assert(length <= _capacity);
555
556 _length = length;
557 _ranges.v64->length = length;
558 }
559
560 /*
561 * setDirection:
562 *
563 * Change the direction of the transfer. This method allows one to redirect
564 * the descriptor's transfer direction. This eliminates the need to destroy
565 * and create new buffers when different transfer directions are needed.
566 */
567 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
568 {
569 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
570 #ifndef __LP64__
571 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
572 #endif /* !__LP64__ */
573 }
574
575 /*
576 * appendBytes:
577 *
578 * Add some data to the end of the buffer. This method automatically
579 * maintains the memory descriptor buffer length. Note that appendBytes
580 * will not copy past the end of the memory descriptor's current capacity.
581 */
582 bool
583 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
584 {
585 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
586 IOByteCount offset;
587
588 assert(_length <= _capacity);
589
590 offset = _length;
591 _length += actualBytesToCopy;
592 _ranges.v64->length += actualBytesToCopy;
593
594 if (_task == kernel_task)
595 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
596 actualBytesToCopy);
597 else
598 writeBytes(offset, bytes, actualBytesToCopy);
599
600 return true;
601 }
602
603 /*
604 * getBytesNoCopy:
605 *
606 * Return the virtual address of the beginning of the buffer
607 */
608 void * IOBufferMemoryDescriptor::getBytesNoCopy()
609 {
610 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
611 return _buffer;
612 else
613 return (void *)_ranges.v64->address;
614 }
615
616
617 /*
618 * getBytesNoCopy:
619 *
620 * Return the virtual address of an offset from the beginning of the buffer
621 */
622 void *
623 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
624 {
625 IOVirtualAddress address;
626 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
627 address = (IOVirtualAddress) _buffer;
628 else
629 address = _ranges.v64->address;
630
631 if (start < _length && (start + withLength) <= _length)
632 return (void *)(address + start);
633 return 0;
634 }
635
636 #ifndef __LP64__
637 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
638 IOByteCount * lengthOfSegment)
639 {
640 void * bytes = getBytesNoCopy(offset, 0);
641
642 if (bytes && lengthOfSegment)
643 *lengthOfSegment = _length - offset;
644
645 return bytes;
646 }
647 #endif /* !__LP64__ */
648
649 #ifdef __LP64__
650 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
651 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
652 #else /* !__LP64__ */
653 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
654 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
655 #endif /* !__LP64__ */
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
665 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
666 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
667 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
668 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
669 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);