]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-344.12.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <IOKit/assert.h>
23 #include <IOKit/system.h>
24
25 #include <IOKit/IOLib.h>
26 #include <IOKit/IOBufferMemoryDescriptor.h>
27
28 __BEGIN_DECLS
29 void ipc_port_release_send(ipc_port_t port);
30 #include <vm/pmap.h>
31 __END_DECLS
32
33 extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address );
34
35 #define super IOGeneralMemoryDescriptor
36 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
37 IOGeneralMemoryDescriptor);
38
39 bool IOBufferMemoryDescriptor::initWithAddress(
40 void * /* address */ ,
41 IOByteCount /* withLength */ ,
42 IODirection /* withDirection */ )
43 {
44 return false;
45 }
46
47 bool IOBufferMemoryDescriptor::initWithAddress(
48 vm_address_t /* address */ ,
49 IOByteCount /* withLength */ ,
50 IODirection /* withDirection */ ,
51 task_t /* withTask */ )
52 {
53 return false;
54 }
55
56 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
57 IOPhysicalAddress /* address */ ,
58 IOByteCount /* withLength */ ,
59 IODirection /* withDirection */ )
60 {
61 return false;
62 }
63
64 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
65 IOPhysicalRange * /* ranges */ ,
66 UInt32 /* withCount */ ,
67 IODirection /* withDirection */ ,
68 bool /* asReference */ )
69 {
70 return false;
71 }
72
73 bool IOBufferMemoryDescriptor::initWithRanges(
74 IOVirtualRange * /* ranges */ ,
75 UInt32 /* withCount */ ,
76 IODirection /* withDirection */ ,
77 task_t /* withTask */ ,
78 bool /* asReference */ )
79 {
80 return false;
81 }
82
83 bool IOBufferMemoryDescriptor::initWithOptions(
84 IOOptionBits options,
85 vm_size_t capacity,
86 vm_offset_t alignment,
87 task_t inTask)
88 {
89 vm_map_t map = 0;
90
91 if (!capacity)
92 return false;
93
94 _options = options;
95 _capacity = capacity;
96 _physAddrs = 0;
97 _physSegCount = 0;
98 _buffer = 0;
99
100 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
101 alignment = page_size;
102
103 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
104 return false;
105
106 _alignment = alignment;
107 if (options & kIOMemoryPageable)
108 {
109 if (inTask == kernel_task)
110 {
111 /* Allocate some kernel address space. */
112 _buffer = IOMallocPageable(capacity, alignment);
113 if (_buffer)
114 map = IOPageableMapForAddress((vm_address_t) _buffer);
115 }
116 else
117 {
118 kern_return_t kr;
119
120 if( !reserved) {
121 reserved = IONew( ExpansionData, 1 );
122 if( !reserved)
123 return( false );
124 }
125 map = get_task_map(inTask);
126 vm_map_reference(map);
127 reserved->map = map;
128 kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page(capacity),
129 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
130 if( KERN_SUCCESS != kr)
131 return( false );
132
133 // we have to make sure that these pages don't get copied on fork.
134 kr = vm_inherit( map, (vm_address_t) _buffer, round_page(capacity), VM_INHERIT_NONE);
135 if( KERN_SUCCESS != kr)
136 return( false );
137 }
138 }
139 else
140 {
141 /* Allocate a wired-down buffer inside kernel space. */
142 if (options & kIOMemoryPhysicallyContiguous)
143 _buffer = IOMallocContiguous(capacity, alignment, 0);
144 else if (alignment > 1)
145 _buffer = IOMallocAligned(capacity, alignment);
146 else
147 _buffer = IOMalloc(capacity);
148 }
149
150 if (!_buffer)
151 return false;
152
153 _singleRange.v.address = (vm_address_t) _buffer;
154 _singleRange.v.length = capacity;
155
156 if (!super::initWithRanges(&_singleRange.v, 1,
157 (IODirection) (options & kIOMemoryDirectionMask),
158 inTask, true))
159 return false;
160
161 if (options & kIOMemoryPageable)
162 {
163 _flags |= kIOMemoryRequiresWire;
164
165 kern_return_t kr;
166 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
167 vm_size_t size = round_page(_ranges.v[0].length);
168
169 // must create the entry before any pages are allocated
170 if( 0 == sharedMem) {
171 kr = mach_make_memory_entry( map,
172 &size, _ranges.v[0].address,
173 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
174 NULL );
175 if( (KERN_SUCCESS == kr) && (size != round_page(_ranges.v[0].length))) {
176 ipc_port_release_send( sharedMem );
177 kr = kIOReturnVMError;
178 }
179 if( KERN_SUCCESS != kr)
180 sharedMem = 0;
181 _memEntry = (void *) sharedMem;
182 }
183 }
184 else
185 {
186 /* Precompute virtual-to-physical page mappings. */
187 vm_address_t inBuffer = (vm_address_t) _buffer;
188 _physSegCount = atop(trunc_page(inBuffer + capacity - 1) -
189 trunc_page(inBuffer)) + 1;
190 _physAddrs = IONew(IOPhysicalAddress, _physSegCount);
191 if (!_physAddrs)
192 return false;
193
194 inBuffer = trunc_page(inBuffer);
195 for (unsigned i = 0; i < _physSegCount; i++) {
196 _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer);
197 assert(_physAddrs[i]); /* supposed to be wired */
198 inBuffer += page_size;
199 }
200 }
201
202 setLength(capacity);
203
204 return true;
205 }
206
207 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
208 task_t inTask,
209 IOOptionBits options,
210 vm_size_t capacity,
211 vm_offset_t alignment = 1)
212 {
213 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
214
215 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
216 me->release();
217 me = 0;
218 }
219 return me;
220 }
221
222 bool IOBufferMemoryDescriptor::initWithOptions(
223 IOOptionBits options,
224 vm_size_t capacity,
225 vm_offset_t alignment)
226 {
227 return( initWithOptions(options, capacity, alignment, kernel_task) );
228 }
229
230 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
231 IOOptionBits options,
232 vm_size_t capacity,
233 vm_offset_t alignment = 1)
234 {
235 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
236
237 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
238 me->release();
239 me = 0;
240 }
241 return me;
242 }
243
244
245 /*
246 * withCapacity:
247 *
248 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
249 * hold capacity bytes. The descriptor's length is initially set to the capacity.
250 */
251 IOBufferMemoryDescriptor *
252 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
253 IODirection inDirection,
254 bool inContiguous)
255 {
256 return( IOBufferMemoryDescriptor::withOptions(
257 inDirection | kIOMemoryUnshared
258 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
259 inCapacity, inContiguous ? inCapacity : 1 ));
260 }
261
262 /*
263 * initWithBytes:
264 *
265 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
266 * The descriptor's length and capacity are set to the input buffer's size.
267 */
268 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
269 vm_size_t inLength,
270 IODirection inDirection,
271 bool inContiguous)
272 {
273 if (!initWithOptions(
274 inDirection | kIOMemoryUnshared
275 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
276 inLength, inLength ))
277 return false;
278
279 // start out with no data
280 setLength(0);
281
282 if (!appendBytes(inBytes, inLength))
283 return false;
284
285 return true;
286 }
287
288 /*
289 * withBytes:
290 *
291 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
292 * The descriptor's length and capacity are set to the input buffer's size.
293 */
294 IOBufferMemoryDescriptor *
295 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
296 vm_size_t inLength,
297 IODirection inDirection,
298 bool inContiguous)
299 {
300 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
301
302 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
303 me->release();
304 me = 0;
305 }
306 return me;
307 }
308
309 /*
310 * free:
311 *
312 * Free resources
313 */
314 void IOBufferMemoryDescriptor::free()
315 {
316 IOOptionBits options = _options;
317 vm_size_t size = _capacity;
318 void * buffer = _buffer;
319 vm_map_t map = 0;
320 vm_offset_t alignment = _alignment;
321
322 if (_physAddrs)
323 IODelete(_physAddrs, IOPhysicalAddress, _physSegCount);
324
325 if (reserved)
326 {
327 map = reserved->map;
328 IODelete( reserved, ExpansionData, 1 );
329 }
330
331 /* super::free may unwire - deallocate buffer afterwards */
332 super::free();
333
334 if (buffer)
335 {
336 if (options & kIOMemoryPageable)
337 {
338 if (map)
339 vm_deallocate(map, (vm_address_t) buffer, round_page(size));
340 else
341 IOFreePageable(buffer, size);
342 }
343 else
344 {
345 if (options & kIOMemoryPhysicallyContiguous)
346 IOFreeContiguous(buffer, size);
347 else if (alignment > 1)
348 IOFreeAligned(buffer, size);
349 else
350 IOFree(buffer, size);
351 }
352 }
353 if (map)
354 vm_map_deallocate(map);
355 }
356
357 /*
358 * getCapacity:
359 *
360 * Get the buffer capacity
361 */
362 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
363 {
364 return _capacity;
365 }
366
367 /*
368 * setLength:
369 *
370 * Change the buffer length of the memory descriptor. When a new buffer
371 * is created, the initial length of the buffer is set to be the same as
372 * the capacity. The length can be adjusted via setLength for a shorter
373 * transfer (there is no need to create more buffer descriptors when you
374 * can reuse an existing one, even for different transfer sizes). Note
375 * that the specified length must not exceed the capacity of the buffer.
376 */
377 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
378 {
379 assert(length <= _capacity);
380
381 _length = length;
382 _singleRange.v.length = length;
383 }
384
385 /*
386 * setDirection:
387 *
388 * Change the direction of the transfer. This method allows one to redirect
389 * the descriptor's transfer direction. This eliminates the need to destroy
390 * and create new buffers when different transfer directions are needed.
391 */
392 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
393 {
394 _direction = direction;
395 }
396
397 /*
398 * appendBytes:
399 *
400 * Add some data to the end of the buffer. This method automatically
401 * maintains the memory descriptor buffer length. Note that appendBytes
402 * will not copy past the end of the memory descriptor's current capacity.
403 */
404 bool
405 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
406 {
407 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
408
409 assert(_length <= _capacity);
410 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
411 actualBytesToCopy);
412 _length += actualBytesToCopy;
413 _singleRange.v.length += actualBytesToCopy;
414
415 return true;
416 }
417
418 /*
419 * getBytesNoCopy:
420 *
421 * Return the virtual address of the beginning of the buffer
422 */
423 void * IOBufferMemoryDescriptor::getBytesNoCopy()
424 {
425 return (void *)_singleRange.v.address;
426 }
427
428 /*
429 * getBytesNoCopy:
430 *
431 * Return the virtual address of an offset from the beginning of the buffer
432 */
433 void *
434 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
435 {
436 if (start < _length && (start + withLength) <= _length)
437 return (void *)(_singleRange.v.address + start);
438 return 0;
439 }
440
441 /*
442 * getPhysicalSegment:
443 *
444 * Get the physical address of the buffer, relative to the current position.
445 * If the current position is at the end of the buffer, a zero is returned.
446 */
447 IOPhysicalAddress
448 IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
449 IOByteCount * lengthOfSegment)
450 {
451 IOPhysicalAddress physAddr;
452
453 if( offset != _position)
454 setPosition( offset );
455
456 assert(_position <= _length);
457
458 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
459 if (_position >= _length) {
460 *lengthOfSegment = 0;
461 return 0;
462 }
463
464 if (_options & kIOMemoryPageable) {
465 physAddr = super::getPhysicalSegment(offset, lengthOfSegment);
466
467 } else {
468 /* Compute the largest contiguous physical length possible. */
469 vm_address_t actualPos = _singleRange.v.address + _position;
470 vm_address_t actualPage = trunc_page(actualPos);
471 unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address));
472
473 vm_size_t physicalLength = actualPage + page_size - actualPos;
474 for (unsigned index = physInd + 1; index < _physSegCount &&
475 _physAddrs[index] == _physAddrs[index-1] + page_size; index++) {
476 physicalLength += page_size;
477 }
478
479 /* Clip contiguous physical length at the end-of-buffer. */
480 if (physicalLength > _length - _position)
481 physicalLength = _length - _position;
482
483 *lengthOfSegment = physicalLength;
484 physAddr = _physAddrs[physInd] + (actualPos - actualPage);
485 }
486
487 return physAddr;
488 }
489
490 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
491 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
492 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
493 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
494 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
495 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
496 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
497 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
498 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
499 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
500 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
501 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
502 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
503 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
504 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
505 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);