]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
563059600b59c1a31850aaf736144e55bde62402
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38
39 #include "IOKitKernelInternal.h"
40
41 __BEGIN_DECLS
42 void ipc_port_release_send(ipc_port_t port);
43 #include <vm/pmap.h>
44
45 __END_DECLS
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 enum
50 {
51 kInternalFlagPhysical = 0x00000001,
52 kInternalFlagPageSized = 0x00000002
53 };
54
55 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
56
57 #define super IOGeneralMemoryDescriptor
58 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
59 IOGeneralMemoryDescriptor);
60
61 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
62
63 #ifndef __LP64__
64 bool IOBufferMemoryDescriptor::initWithOptions(
65 IOOptionBits options,
66 vm_size_t capacity,
67 vm_offset_t alignment,
68 task_t inTask)
69 {
70 mach_vm_address_t physicalMask = 0;
71 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
72 }
73 #endif /* !__LP64__ */
74
75 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
76 task_t inTask,
77 IOOptionBits options,
78 mach_vm_size_t capacity,
79 mach_vm_address_t alignment,
80 mach_vm_address_t physicalMask)
81 {
82 kern_return_t kr;
83 task_t mapTask = NULL;
84 vm_map_t vmmap = NULL;
85 mach_vm_address_t highestMask = 0;
86 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
87
88 if (!capacity)
89 return false;
90
91 _options = options;
92 _capacity = capacity;
93 _internalFlags = 0;
94 _internalReserved = 0;
95 _buffer = 0;
96
97 _ranges.v64 = IONew(IOAddressRange, 1);
98 if (!_ranges.v64)
99 return (false);
100 _ranges.v64->address = 0;
101 _ranges.v64->length = 0;
102
103 // Grab IOMD bits from the Buffer MD options
104 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
105
106 if (physicalMask && (alignment <= 1))
107 {
108 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
109 highestMask = (physicalMask | alignment);
110 alignment++;
111 if (alignment < page_size)
112 alignment = page_size;
113 }
114
115 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
116 alignment = page_size;
117
118 if (alignment >= page_size)
119 capacity = round_page(capacity);
120
121 if (alignment > page_size)
122 options |= kIOMemoryPhysicallyContiguous;
123
124 _alignment = alignment;
125
126 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
127 return false;
128
129 // set flags for entry + object create
130 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
131
132 // set memory entry cache mode
133 switch (options & kIOMapCacheMask)
134 {
135 case kIOMapInhibitCache:
136 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
137 break;
138
139 case kIOMapWriteThruCache:
140 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
141 break;
142
143 case kIOMapWriteCombineCache:
144 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
145 break;
146
147 case kIOMapCopybackCache:
148 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
149 break;
150
151 case kIOMapDefaultCache:
152 default:
153 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
154 break;
155 }
156
157 if (options & kIOMemoryPageable)
158 {
159 iomdOptions |= kIOMemoryBufferPageable;
160
161 // must create the entry before any pages are allocated
162
163 // set flags for entry + object create
164 memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
165
166 if (options & kIOMemoryPurgeable)
167 memEntryCacheMode |= MAP_MEM_PURGABLE;
168 }
169 else
170 {
171 memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
172 vmmap = kernel_map;
173
174 // Buffer shouldn't auto prepare they should be prepared explicitly
175 // But it never was enforced so what are you going to do?
176 iomdOptions |= kIOMemoryAutoPrepare;
177
178 /* Allocate a wired-down buffer inside kernel space. */
179
180 if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
181 {
182 _internalFlags |= kInternalFlagPhysical;
183 if (highestMask)
184 {
185 _internalFlags |= kInternalFlagPageSized;
186 capacity = round_page(capacity);
187 }
188 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
189 (0 != (options & kIOMemoryPhysicallyContiguous)));
190 }
191 else if (alignment > 1)
192 {
193 _buffer = IOMallocAligned(capacity, alignment);
194 }
195 else
196 {
197 _buffer = IOMalloc(capacity);
198 }
199
200 if (!_buffer)
201 {
202 return false;
203 }
204 }
205
206 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
207 ipc_port_t sharedMem;
208 vm_size_t size = round_page(capacity);
209
210 kr = mach_make_memory_entry(vmmap,
211 &size, (vm_offset_t)_buffer,
212 memEntryCacheMode, &sharedMem,
213 NULL );
214
215 if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
216 ipc_port_release_send( sharedMem );
217 kr = kIOReturnVMError;
218 }
219 if( KERN_SUCCESS != kr)
220 return( false );
221
222 _memEntry = (void *) sharedMem;
223
224 if( options & kIOMemoryPageable) {
225 #if IOALLOCDEBUG
226 debug_iomallocpageable_size += size;
227 #endif
228 mapTask = inTask;
229 if (NULL == inTask)
230 inTask = kernel_task;
231 }
232 else if (options & kIOMapCacheMask)
233 {
234 // Prefetch each page to put entries into the pmap
235 volatile UInt8 * startAddr = (UInt8 *)_buffer;
236 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
237
238 while (startAddr < endAddr)
239 {
240 *startAddr;
241 startAddr += page_size;
242 }
243 }
244 }
245
246 _ranges.v64->address = (mach_vm_address_t) _buffer;;
247 _ranges.v64->length = _capacity;
248
249 if (!super::initWithOptions(_ranges.v64, 1, 0,
250 inTask, iomdOptions, /* System mapper */ 0))
251 return false;
252
253 if (mapTask)
254 {
255 if (!reserved) {
256 reserved = IONew( ExpansionData, 1 );
257 if( !reserved)
258 return( false );
259 }
260 reserved->map = createMappingInTask(mapTask, 0,
261 kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
262 if (!reserved->map)
263 {
264 _buffer = 0;
265 return( false );
266 }
267 release(); // map took a retain on this
268 reserved->map->retain();
269 removeMapping(reserved->map);
270 mach_vm_address_t buffer = reserved->map->getAddress();
271 _buffer = (void *) buffer;
272 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
273 _ranges.v64->address = buffer;
274 }
275
276 setLength(_capacity);
277
278 return true;
279 }
280
281 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
282 task_t inTask,
283 IOOptionBits options,
284 vm_size_t capacity,
285 vm_offset_t alignment)
286 {
287 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
288
289 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
290 me->release();
291 me = 0;
292 }
293 return me;
294 }
295
296 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
297 task_t inTask,
298 IOOptionBits options,
299 mach_vm_size_t capacity,
300 mach_vm_address_t physicalMask)
301 {
302 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
303
304 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
305 {
306 me->release();
307 me = 0;
308 }
309 return me;
310 }
311
312 #ifndef __LP64__
313 bool IOBufferMemoryDescriptor::initWithOptions(
314 IOOptionBits options,
315 vm_size_t capacity,
316 vm_offset_t alignment)
317 {
318 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
319 }
320 #endif /* !__LP64__ */
321
322 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
323 IOOptionBits options,
324 vm_size_t capacity,
325 vm_offset_t alignment)
326 {
327 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
328
329 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
330 me->release();
331 me = 0;
332 }
333 return me;
334 }
335
336
337 /*
338 * withCapacity:
339 *
340 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
341 * hold capacity bytes. The descriptor's length is initially set to the capacity.
342 */
343 IOBufferMemoryDescriptor *
344 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
345 IODirection inDirection,
346 bool inContiguous)
347 {
348 return( IOBufferMemoryDescriptor::withOptions(
349 inDirection | kIOMemoryUnshared
350 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
351 inCapacity, inContiguous ? inCapacity : 1 ));
352 }
353
354 #ifndef __LP64__
355 /*
356 * initWithBytes:
357 *
358 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
359 * The descriptor's length and capacity are set to the input buffer's size.
360 */
361 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
362 vm_size_t inLength,
363 IODirection inDirection,
364 bool inContiguous)
365 {
366 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
367 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
368 inLength, inLength, (mach_vm_address_t)0))
369 return false;
370
371 // start out with no data
372 setLength(0);
373
374 if (!appendBytes(inBytes, inLength))
375 return false;
376
377 return true;
378 }
379 #endif /* !__LP64__ */
380
381 /*
382 * withBytes:
383 *
384 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
385 * The descriptor's length and capacity are set to the input buffer's size.
386 */
387 IOBufferMemoryDescriptor *
388 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
389 vm_size_t inLength,
390 IODirection inDirection,
391 bool inContiguous)
392 {
393 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
394
395 if (me && !me->initWithPhysicalMask(
396 kernel_task, inDirection | kIOMemoryUnshared
397 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
398 inLength, inLength, 0 ))
399 {
400 me->release();
401 me = 0;
402 }
403
404 if (me)
405 {
406 // start out with no data
407 me->setLength(0);
408
409 if (!me->appendBytes(inBytes, inLength))
410 {
411 me->release();
412 me = 0;
413 }
414 }
415 return me;
416 }
417
418 /*
419 * free:
420 *
421 * Free resources
422 */
423 void IOBufferMemoryDescriptor::free()
424 {
425 // Cache all of the relevant information on the stack for use
426 // after we call super::free()!
427 IOOptionBits flags = _flags;
428 IOOptionBits internalFlags = _internalFlags;
429 IOOptionBits options = _options;
430 vm_size_t size = _capacity;
431 void * buffer = _buffer;
432 IOMemoryMap * map = 0;
433 IOAddressRange * range = _ranges.v64;
434 vm_offset_t alignment = _alignment;
435
436 if (alignment >= page_size)
437 size = round_page(size);
438
439 if (reserved)
440 {
441 map = reserved->map;
442 IODelete( reserved, ExpansionData, 1 );
443 if (map)
444 map->release();
445 }
446
447 /* super::free may unwire - deallocate buffer afterwards */
448 super::free();
449
450 if (options & kIOMemoryPageable)
451 {
452 #if IOALLOCDEBUG
453 debug_iomallocpageable_size -= round_page(size);
454 #endif
455 }
456 else if (buffer)
457 {
458 if (internalFlags & kInternalFlagPhysical)
459 {
460 if (kInternalFlagPageSized & internalFlags)
461 size = round_page(size);
462 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
463 }
464 else if (alignment > 1)
465 IOFreeAligned(buffer, size);
466 else
467 IOFree(buffer, size);
468 }
469 if (range && (kIOMemoryAsReference & flags))
470 IODelete(range, IOAddressRange, 1);
471 }
472
473 /*
474 * getCapacity:
475 *
476 * Get the buffer capacity
477 */
478 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
479 {
480 return _capacity;
481 }
482
483 /*
484 * setLength:
485 *
486 * Change the buffer length of the memory descriptor. When a new buffer
487 * is created, the initial length of the buffer is set to be the same as
488 * the capacity. The length can be adjusted via setLength for a shorter
489 * transfer (there is no need to create more buffer descriptors when you
490 * can reuse an existing one, even for different transfer sizes). Note
491 * that the specified length must not exceed the capacity of the buffer.
492 */
493 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
494 {
495 assert(length <= _capacity);
496
497 _length = length;
498 _ranges.v64->length = length;
499 }
500
501 /*
502 * setDirection:
503 *
504 * Change the direction of the transfer. This method allows one to redirect
505 * the descriptor's transfer direction. This eliminates the need to destroy
506 * and create new buffers when different transfer directions are needed.
507 */
508 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
509 {
510 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
511 #ifndef __LP64__
512 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
513 #endif /* !__LP64__ */
514 }
515
516 /*
517 * appendBytes:
518 *
519 * Add some data to the end of the buffer. This method automatically
520 * maintains the memory descriptor buffer length. Note that appendBytes
521 * will not copy past the end of the memory descriptor's current capacity.
522 */
523 bool
524 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
525 {
526 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
527 IOByteCount offset;
528
529 assert(_length <= _capacity);
530
531 offset = _length;
532 _length += actualBytesToCopy;
533 _ranges.v64->length += actualBytesToCopy;
534
535 if (_task == kernel_task)
536 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
537 actualBytesToCopy);
538 else
539 writeBytes(offset, bytes, actualBytesToCopy);
540
541 return true;
542 }
543
544 /*
545 * getBytesNoCopy:
546 *
547 * Return the virtual address of the beginning of the buffer
548 */
549 void * IOBufferMemoryDescriptor::getBytesNoCopy()
550 {
551 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
552 return _buffer;
553 else
554 return (void *)_ranges.v64->address;
555 }
556
557
558 /*
559 * getBytesNoCopy:
560 *
561 * Return the virtual address of an offset from the beginning of the buffer
562 */
563 void *
564 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
565 {
566 IOVirtualAddress address;
567 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
568 address = (IOVirtualAddress) _buffer;
569 else
570 address = _ranges.v64->address;
571
572 if (start < _length && (start + withLength) <= _length)
573 return (void *)(address + start);
574 return 0;
575 }
576
577 #ifndef __LP64__
578 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
579 IOByteCount * lengthOfSegment)
580 {
581 void * bytes = getBytesNoCopy(offset, 0);
582
583 if (bytes && lengthOfSegment)
584 *lengthOfSegment = _length - offset;
585
586 return bytes;
587 }
588 #endif /* !__LP64__ */
589
590 #ifdef __LP64__
591 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
592 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
593 #else /* !__LP64__ */
594 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
595 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
596 #endif /* !__LP64__ */
597 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
598 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
599 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
600 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
601 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
602 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
603 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
604 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
605 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
606 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
607 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
608 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
609 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
610 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);