]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
004d2ec89d046a4bbbeb7425a9f42e29f72e5b3b
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38
39 #include "IOKitKernelInternal.h"
40
41 __BEGIN_DECLS
42 void ipc_port_release_send(ipc_port_t port);
43 #include <vm/pmap.h>
44
45 __END_DECLS
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 enum
50 {
51 kInternalFlagPhysical = 0x00000001,
52 kInternalFlagPageSized = 0x00000002
53 };
54
55 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
56
57 #define super IOGeneralMemoryDescriptor
58 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
59 IOGeneralMemoryDescriptor);
60
61 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
62
63 #ifndef __LP64__
64 bool IOBufferMemoryDescriptor::initWithOptions(
65 IOOptionBits options,
66 vm_size_t capacity,
67 vm_offset_t alignment,
68 task_t inTask)
69 {
70 mach_vm_address_t physicalMask = 0;
71 return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
72 }
73 #endif /* !__LP64__ */
74
75 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
76 task_t inTask,
77 IOOptionBits options,
78 mach_vm_size_t capacity,
79 mach_vm_address_t alignment,
80 mach_vm_address_t physicalMask)
81 {
82 kern_return_t kr;
83 task_t mapTask = NULL;
84 vm_map_t vmmap = NULL;
85 mach_vm_address_t highestMask = 0;
86 IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
87
88 if (!capacity)
89 return false;
90
91 _options = options;
92 _capacity = capacity;
93 _internalFlags = 0;
94 _internalReserved = 0;
95 _buffer = 0;
96
97 _ranges.v64 = IONew(IOAddressRange, 1);
98 if (!_ranges.v64)
99 return (false);
100 _ranges.v64->address = 0;
101 _ranges.v64->length = 0;
102
103 // Grab IOMD bits from the Buffer MD options
104 iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
105
106 #if 0
107 // workarounds-
108 if ((options & kIOMemoryPhysicallyContiguous) || ((capacity == 0x1000) && (inTask == kernel_task))
109 && !physicalMask)
110 {
111 highestMask = physicalMask = 0xFFFFF000;
112 }
113 //-
114 #endif
115
116 if (physicalMask && (alignment <= 1))
117 {
118 alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
119 highestMask = (physicalMask | alignment);
120 alignment++;
121 if (alignment < page_size)
122 alignment = page_size;
123 }
124
125 if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
126 alignment = page_size;
127
128 if (alignment >= page_size)
129 capacity = round_page(capacity);
130
131 if (alignment > page_size)
132 options |= kIOMemoryPhysicallyContiguous;
133
134 _alignment = alignment;
135
136 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
137 return false;
138
139 // set flags for entry + object create
140 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
141
142 // set memory entry cache mode
143 switch (options & kIOMapCacheMask)
144 {
145 case kIOMapInhibitCache:
146 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
147 break;
148
149 case kIOMapWriteThruCache:
150 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
151 break;
152
153 case kIOMapWriteCombineCache:
154 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
155 break;
156
157 case kIOMapCopybackCache:
158 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
159 break;
160
161 case kIOMapDefaultCache:
162 default:
163 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
164 break;
165 }
166
167 if (options & kIOMemoryPageable)
168 {
169 iomdOptions |= kIOMemoryBufferPageable;
170
171 // must create the entry before any pages are allocated
172
173 // set flags for entry + object create
174 memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
175
176 if (options & kIOMemoryPurgeable)
177 memEntryCacheMode |= MAP_MEM_PURGABLE;
178 }
179 else
180 {
181 memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
182 vmmap = kernel_map;
183
184 // Buffer shouldn't auto prepare they should be prepared explicitly
185 // But it never was enforced so what are you going to do?
186 iomdOptions |= kIOMemoryAutoPrepare;
187
188 /* Allocate a wired-down buffer inside kernel space. */
189
190 if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
191 {
192 _internalFlags |= kInternalFlagPhysical;
193 if (highestMask)
194 {
195 _internalFlags |= kInternalFlagPageSized;
196 capacity = round_page(capacity);
197 }
198 _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
199 (0 != (options & kIOMemoryPhysicallyContiguous)));
200 }
201 else if (alignment > 1)
202 {
203 _buffer = IOMallocAligned(capacity, alignment);
204 }
205 else
206 {
207 _buffer = IOMalloc(capacity);
208 }
209
210 if (!_buffer)
211 {
212 return false;
213 }
214 }
215
216 if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
217 ipc_port_t sharedMem;
218 vm_size_t size = round_page(capacity);
219
220 kr = mach_make_memory_entry(vmmap,
221 &size, (vm_offset_t)_buffer,
222 memEntryCacheMode, &sharedMem,
223 NULL );
224
225 if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
226 ipc_port_release_send( sharedMem );
227 kr = kIOReturnVMError;
228 }
229 if( KERN_SUCCESS != kr)
230 return( false );
231
232 _memEntry = (void *) sharedMem;
233
234 if( options & kIOMemoryPageable) {
235 #if IOALLOCDEBUG
236 debug_iomallocpageable_size += size;
237 #endif
238 mapTask = inTask;
239 if (NULL == inTask)
240 inTask = kernel_task;
241 }
242 else if (options & kIOMapCacheMask)
243 {
244 // Prefetch each page to put entries into the pmap
245 volatile UInt8 * startAddr = (UInt8 *)_buffer;
246 volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity;
247
248 while (startAddr < endAddr)
249 {
250 *startAddr;
251 startAddr += page_size;
252 }
253 }
254 }
255
256 _ranges.v64->address = (mach_vm_address_t) _buffer;;
257 _ranges.v64->length = _capacity;
258
259 if (!super::initWithOptions(_ranges.v64, 1, 0,
260 inTask, iomdOptions, /* System mapper */ 0))
261 return false;
262
263 if (mapTask)
264 {
265 if (!reserved) {
266 reserved = IONew( ExpansionData, 1 );
267 if( !reserved)
268 return( false );
269 }
270 reserved->map = createMappingInTask(mapTask, 0,
271 kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
272 if (!reserved->map)
273 {
274 _buffer = 0;
275 return( false );
276 }
277 release(); // map took a retain on this
278 reserved->map->retain();
279 removeMapping(reserved->map);
280 mach_vm_address_t buffer = reserved->map->getAddress();
281 _buffer = (void *) buffer;
282 if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
283 _ranges.v64->address = buffer;
284 }
285
286 setLength(_capacity);
287
288 return true;
289 }
290
291 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
292 task_t inTask,
293 IOOptionBits options,
294 vm_size_t capacity,
295 vm_offset_t alignment)
296 {
297 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
298
299 if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
300 me->release();
301 me = 0;
302 }
303 return me;
304 }
305
306 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
307 task_t inTask,
308 IOOptionBits options,
309 mach_vm_size_t capacity,
310 mach_vm_address_t physicalMask)
311 {
312 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
313
314 if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
315 {
316 me->release();
317 me = 0;
318 }
319 return me;
320 }
321
322 #ifndef __LP64__
323 bool IOBufferMemoryDescriptor::initWithOptions(
324 IOOptionBits options,
325 vm_size_t capacity,
326 vm_offset_t alignment)
327 {
328 return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
329 }
330 #endif /* !__LP64__ */
331
332 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
333 IOOptionBits options,
334 vm_size_t capacity,
335 vm_offset_t alignment)
336 {
337 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
338
339 if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
340 me->release();
341 me = 0;
342 }
343 return me;
344 }
345
346
347 /*
348 * withCapacity:
349 *
350 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
351 * hold capacity bytes. The descriptor's length is initially set to the capacity.
352 */
353 IOBufferMemoryDescriptor *
354 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
355 IODirection inDirection,
356 bool inContiguous)
357 {
358 return( IOBufferMemoryDescriptor::withOptions(
359 inDirection | kIOMemoryUnshared
360 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
361 inCapacity, inContiguous ? inCapacity : 1 ));
362 }
363
364 #ifndef __LP64__
365 /*
366 * initWithBytes:
367 *
368 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
369 * The descriptor's length and capacity are set to the input buffer's size.
370 */
371 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
372 vm_size_t inLength,
373 IODirection inDirection,
374 bool inContiguous)
375 {
376 if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
377 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
378 inLength, inLength, (mach_vm_address_t)0))
379 return false;
380
381 // start out with no data
382 setLength(0);
383
384 if (!appendBytes(inBytes, inLength))
385 return false;
386
387 return true;
388 }
389 #endif /* !__LP64__ */
390
391 /*
392 * withBytes:
393 *
394 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
395 * The descriptor's length and capacity are set to the input buffer's size.
396 */
397 IOBufferMemoryDescriptor *
398 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
399 vm_size_t inLength,
400 IODirection inDirection,
401 bool inContiguous)
402 {
403 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
404
405 if (me && !me->initWithPhysicalMask(
406 kernel_task, inDirection | kIOMemoryUnshared
407 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
408 inLength, inLength, 0 ))
409 {
410 me->release();
411 me = 0;
412 }
413
414 if (me)
415 {
416 // start out with no data
417 me->setLength(0);
418
419 if (!me->appendBytes(inBytes, inLength))
420 {
421 me->release();
422 me = 0;
423 }
424 }
425 return me;
426 }
427
428 /*
429 * free:
430 *
431 * Free resources
432 */
433 void IOBufferMemoryDescriptor::free()
434 {
435 // Cache all of the relevant information on the stack for use
436 // after we call super::free()!
437 IOOptionBits flags = _flags;
438 IOOptionBits internalFlags = _internalFlags;
439 IOOptionBits options = _options;
440 vm_size_t size = _capacity;
441 void * buffer = _buffer;
442 IOMemoryMap * map = 0;
443 IOAddressRange * range = _ranges.v64;
444 vm_offset_t alignment = _alignment;
445
446 if (alignment >= page_size)
447 size = round_page(size);
448
449 if (reserved)
450 {
451 map = reserved->map;
452 IODelete( reserved, ExpansionData, 1 );
453 if (map)
454 map->release();
455 }
456
457 /* super::free may unwire - deallocate buffer afterwards */
458 super::free();
459
460 if (options & kIOMemoryPageable)
461 {
462 #if IOALLOCDEBUG
463 debug_iomallocpageable_size -= round_page(size);
464 #endif
465 }
466 else if (buffer)
467 {
468 if (internalFlags & kInternalFlagPhysical)
469 {
470 if (kInternalFlagPageSized & internalFlags)
471 size = round_page(size);
472 IOKernelFreePhysical((mach_vm_address_t) buffer, size);
473 }
474 else if (alignment > 1)
475 IOFreeAligned(buffer, size);
476 else
477 IOFree(buffer, size);
478 }
479 if (range && (kIOMemoryAsReference & flags))
480 IODelete(range, IOAddressRange, 1);
481 }
482
483 /*
484 * getCapacity:
485 *
486 * Get the buffer capacity
487 */
488 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
489 {
490 return _capacity;
491 }
492
493 /*
494 * setLength:
495 *
496 * Change the buffer length of the memory descriptor. When a new buffer
497 * is created, the initial length of the buffer is set to be the same as
498 * the capacity. The length can be adjusted via setLength for a shorter
499 * transfer (there is no need to create more buffer descriptors when you
500 * can reuse an existing one, even for different transfer sizes). Note
501 * that the specified length must not exceed the capacity of the buffer.
502 */
503 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
504 {
505 assert(length <= _capacity);
506
507 _length = length;
508 _ranges.v64->length = length;
509 }
510
511 /*
512 * setDirection:
513 *
514 * Change the direction of the transfer. This method allows one to redirect
515 * the descriptor's transfer direction. This eliminates the need to destroy
516 * and create new buffers when different transfer directions are needed.
517 */
518 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
519 {
520 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
521 #ifndef __LP64__
522 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
523 #endif /* !__LP64__ */
524 }
525
526 /*
527 * appendBytes:
528 *
529 * Add some data to the end of the buffer. This method automatically
530 * maintains the memory descriptor buffer length. Note that appendBytes
531 * will not copy past the end of the memory descriptor's current capacity.
532 */
533 bool
534 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
535 {
536 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
537 IOByteCount offset;
538
539 assert(_length <= _capacity);
540
541 offset = _length;
542 _length += actualBytesToCopy;
543 _ranges.v64->length += actualBytesToCopy;
544
545 if (_task == kernel_task)
546 bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
547 actualBytesToCopy);
548 else
549 writeBytes(offset, bytes, actualBytesToCopy);
550
551 return true;
552 }
553
554 /*
555 * getBytesNoCopy:
556 *
557 * Return the virtual address of the beginning of the buffer
558 */
559 void * IOBufferMemoryDescriptor::getBytesNoCopy()
560 {
561 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
562 return _buffer;
563 else
564 return (void *)_ranges.v64->address;
565 }
566
567
568 /*
569 * getBytesNoCopy:
570 *
571 * Return the virtual address of an offset from the beginning of the buffer
572 */
573 void *
574 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
575 {
576 IOVirtualAddress address;
577 if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
578 address = (IOVirtualAddress) _buffer;
579 else
580 address = _ranges.v64->address;
581
582 if (start < _length && (start + withLength) <= _length)
583 return (void *)(address + start);
584 return 0;
585 }
586
587 #ifndef __LP64__
588 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
589 IOByteCount * lengthOfSegment)
590 {
591 void * bytes = getBytesNoCopy(offset, 0);
592
593 if (bytes && lengthOfSegment)
594 *lengthOfSegment = _length - offset;
595
596 return bytes;
597 }
598 #endif /* !__LP64__ */
599
600 #ifdef __LP64__
601 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
602 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
603 #else /* !__LP64__ */
604 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
605 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
606 #endif /* !__LP64__ */
607 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
608 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
609 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
610 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
611 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
612 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
613 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
614 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
615 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
616 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
617 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
618 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
619 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
620 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);