]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
69c7875463a418a139ea7a398796bb730275172b
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <IOKit/assert.h>
31 #include <IOKit/system.h>
32
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOBufferMemoryDescriptor.h>
35
36 #include "IOKitKernelInternal.h"
37
38 __BEGIN_DECLS
39 void ipc_port_release_send(ipc_port_t port);
40 #include <vm/pmap.h>
41
42 vm_map_t IOPageableMapForAddress( vm_address_t address );
43 __END_DECLS
44
45 #define super IOGeneralMemoryDescriptor
46 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
47 IOGeneralMemoryDescriptor);
48
49 bool IOBufferMemoryDescriptor::initWithAddress(
50 void * /* address */ ,
51 IOByteCount /* withLength */ ,
52 IODirection /* withDirection */ )
53 {
54 return false;
55 }
56
57 bool IOBufferMemoryDescriptor::initWithAddress(
58 vm_address_t /* address */ ,
59 IOByteCount /* withLength */ ,
60 IODirection /* withDirection */ ,
61 task_t /* withTask */ )
62 {
63 return false;
64 }
65
66 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
67 IOPhysicalAddress /* address */ ,
68 IOByteCount /* withLength */ ,
69 IODirection /* withDirection */ )
70 {
71 return false;
72 }
73
74 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
75 IOPhysicalRange * /* ranges */ ,
76 UInt32 /* withCount */ ,
77 IODirection /* withDirection */ ,
78 bool /* asReference */ )
79 {
80 return false;
81 }
82
83 bool IOBufferMemoryDescriptor::initWithRanges(
84 IOVirtualRange * /* ranges */ ,
85 UInt32 /* withCount */ ,
86 IODirection /* withDirection */ ,
87 task_t /* withTask */ ,
88 bool /* asReference */ )
89 {
90 return false;
91 }
92
93 bool IOBufferMemoryDescriptor::initWithOptions(
94 IOOptionBits options,
95 vm_size_t capacity,
96 vm_offset_t alignment,
97 task_t inTask)
98 {
99 kern_return_t kr;
100 vm_map_t vmmap = 0;
101 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
102
103 if (!capacity)
104 return false;
105
106 _options = options;
107 _capacity = capacity;
108 _physAddrs = 0;
109 _physSegCount = 0;
110 _buffer = 0;
111
112 // Grab the direction and the Auto Prepare bits from the Buffer MD options
113 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
114
115 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
116 alignment = page_size;
117
118 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
119 return false;
120
121 _alignment = alignment;
122 if (options & kIOMemoryPageable)
123 {
124 iomdOptions |= kIOMemoryBufferPageable;
125
126 ipc_port_t sharedMem;
127 vm_size_t size = round_page_32(capacity);
128
129 // must create the entry before any pages are allocated
130
131 // set flags for entry + object create
132 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
133 | MAP_MEM_NAMED_CREATE;
134
135 if (options & kIOMemoryPurgeable)
136 memEntryCacheMode |= MAP_MEM_PURGABLE;
137
138 // set memory entry cache mode
139 switch (options & kIOMapCacheMask)
140 {
141 case kIOMapInhibitCache:
142 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
143 break;
144
145 case kIOMapWriteThruCache:
146 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
147 break;
148
149 case kIOMapWriteCombineCache:
150 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
151 break;
152
153 case kIOMapCopybackCache:
154 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
155 break;
156
157 case kIOMapDefaultCache:
158 default:
159 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
160 break;
161 }
162
163 kr = mach_make_memory_entry( vmmap,
164 &size, 0,
165 memEntryCacheMode, &sharedMem,
166 NULL );
167
168 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
169 ipc_port_release_send( sharedMem );
170 kr = kIOReturnVMError;
171 }
172 if( KERN_SUCCESS != kr)
173 return( false );
174
175 _memEntry = (void *) sharedMem;
176 #if IOALLOCDEBUG
177 debug_iomallocpageable_size += size;
178 #endif
179 if ((NULL == inTask) && (options & kIOMemoryPageable))
180 inTask = kernel_task;
181 else if (inTask == kernel_task)
182 {
183 vmmap = kernel_map;
184 }
185 else
186 {
187
188 if( !reserved) {
189 reserved = IONew( ExpansionData, 1 );
190 if( !reserved)
191 return( false );
192 }
193 vmmap = get_task_map(inTask);
194 vm_map_reference(vmmap);
195 reserved->map = vmmap;
196 }
197 }
198 else
199 {
200 // @@@ gvdl: Need to remove this
201 // Buffer should never auto prepare they should be prepared explicitly
202 // But it never was enforced so what are you going to do?
203 iomdOptions |= kIOMemoryAutoPrepare;
204
205 /* Allocate a wired-down buffer inside kernel space. */
206 if (options & kIOMemoryPhysicallyContiguous)
207 _buffer = IOMallocContiguous(capacity, alignment, 0);
208 else if (alignment > 1)
209 _buffer = IOMallocAligned(capacity, alignment);
210 else
211 _buffer = IOMalloc(capacity);
212
213 if (!_buffer)
214 return false;
215 }
216
217 _singleRange.v.address = (vm_address_t) _buffer;
218 _singleRange.v.length = capacity;
219
220 if (!super::initWithOptions(&_singleRange.v, 1, 0,
221 inTask, iomdOptions, /* System mapper */ 0))
222 return false;
223
224 if (options & kIOMemoryPageable)
225 {
226 kern_return_t kr;
227
228 if (vmmap)
229 {
230 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, round_page_32(capacity));
231 if (KERN_SUCCESS != kr)
232 {
233 _buffer = 0;
234 return( false );
235 }
236 _singleRange.v.address = (vm_address_t) _buffer;
237 }
238 }
239
240 setLength(capacity);
241
242 return true;
243 }
244
245 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
246 task_t inTask,
247 IOOptionBits options,
248 vm_size_t capacity,
249 vm_offset_t alignment)
250 {
251 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
252
253 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
254 me->release();
255 me = 0;
256 }
257 return me;
258 }
259
260 bool IOBufferMemoryDescriptor::initWithOptions(
261 IOOptionBits options,
262 vm_size_t capacity,
263 vm_offset_t alignment)
264 {
265 return( initWithOptions(options, capacity, alignment, kernel_task) );
266 }
267
268 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
269 IOOptionBits options,
270 vm_size_t capacity,
271 vm_offset_t alignment)
272 {
273 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
274
275 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
276 me->release();
277 me = 0;
278 }
279 return me;
280 }
281
282
283 /*
284 * withCapacity:
285 *
286 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
287 * hold capacity bytes. The descriptor's length is initially set to the capacity.
288 */
289 IOBufferMemoryDescriptor *
290 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
291 IODirection inDirection,
292 bool inContiguous)
293 {
294 return( IOBufferMemoryDescriptor::withOptions(
295 inDirection | kIOMemoryUnshared
296 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
297 inCapacity, inContiguous ? inCapacity : 1 ));
298 }
299
300 /*
301 * initWithBytes:
302 *
303 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
304 * The descriptor's length and capacity are set to the input buffer's size.
305 */
306 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
307 vm_size_t inLength,
308 IODirection inDirection,
309 bool inContiguous)
310 {
311 if (!initWithOptions(
312 inDirection | kIOMemoryUnshared
313 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
314 inLength, inLength ))
315 return false;
316
317 // start out with no data
318 setLength(0);
319
320 if (!appendBytes(inBytes, inLength))
321 return false;
322
323 return true;
324 }
325
326 /*
327 * withBytes:
328 *
329 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
330 * The descriptor's length and capacity are set to the input buffer's size.
331 */
332 IOBufferMemoryDescriptor *
333 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
334 vm_size_t inLength,
335 IODirection inDirection,
336 bool inContiguous)
337 {
338 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
339
340 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
341 me->release();
342 me = 0;
343 }
344 return me;
345 }
346
347 /*
348 * free:
349 *
350 * Free resources
351 */
352 void IOBufferMemoryDescriptor::free()
353 {
354 // Cache all of the relevant information on the stack for use
355 // after we call super::free()!
356 IOOptionBits options = _options;
357 vm_size_t size = _capacity;
358 void * buffer = _buffer;
359 vm_map_t vmmap = 0;
360 vm_offset_t alignment = _alignment;
361
362 if (reserved)
363 {
364 vmmap = reserved->map;
365 IODelete( reserved, ExpansionData, 1 );
366 }
367
368 /* super::free may unwire - deallocate buffer afterwards */
369 super::free();
370
371 if (options & kIOMemoryPageable)
372 {
373 #if IOALLOCDEBUG
374 if (!buffer || vmmap)
375 debug_iomallocpageable_size -= round_page_32(size);
376 #endif
377 if (buffer)
378 {
379 if (vmmap)
380 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
381 else
382 IOFreePageable(buffer, size);
383 }
384 }
385 else if (buffer)
386 {
387 if (options & kIOMemoryPhysicallyContiguous)
388 IOFreeContiguous(buffer, size);
389 else if (alignment > 1)
390 IOFreeAligned(buffer, size);
391 else
392 IOFree(buffer, size);
393 }
394 if (vmmap)
395 vm_map_deallocate(vmmap);
396 }
397
398 /*
399 * getCapacity:
400 *
401 * Get the buffer capacity
402 */
403 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
404 {
405 return _capacity;
406 }
407
408 /*
409 * setLength:
410 *
411 * Change the buffer length of the memory descriptor. When a new buffer
412 * is created, the initial length of the buffer is set to be the same as
413 * the capacity. The length can be adjusted via setLength for a shorter
414 * transfer (there is no need to create more buffer descriptors when you
415 * can reuse an existing one, even for different transfer sizes). Note
416 * that the specified length must not exceed the capacity of the buffer.
417 */
418 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
419 {
420 assert(length <= _capacity);
421
422 _length = length;
423 _singleRange.v.length = length;
424 }
425
426 /*
427 * setDirection:
428 *
429 * Change the direction of the transfer. This method allows one to redirect
430 * the descriptor's transfer direction. This eliminates the need to destroy
431 * and create new buffers when different transfer directions are needed.
432 */
433 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
434 {
435 _direction = direction;
436 }
437
438 /*
439 * appendBytes:
440 *
441 * Add some data to the end of the buffer. This method automatically
442 * maintains the memory descriptor buffer length. Note that appendBytes
443 * will not copy past the end of the memory descriptor's current capacity.
444 */
445 bool
446 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
447 {
448 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
449
450 assert(_length <= _capacity);
451 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
452 actualBytesToCopy);
453 _length += actualBytesToCopy;
454 _singleRange.v.length += actualBytesToCopy;
455
456 return true;
457 }
458
459 /*
460 * getBytesNoCopy:
461 *
462 * Return the virtual address of the beginning of the buffer
463 */
464 void * IOBufferMemoryDescriptor::getBytesNoCopy()
465 {
466 return (void *)_singleRange.v.address;
467 }
468
469 /*
470 * getBytesNoCopy:
471 *
472 * Return the virtual address of an offset from the beginning of the buffer
473 */
474 void *
475 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
476 {
477 if (start < _length && (start + withLength) <= _length)
478 return (void *)(_singleRange.v.address + start);
479 return 0;
480 }
481
482 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
483 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
484 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
485 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
486 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
487 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
488 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
489 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
490 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
491 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
492 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
493 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
494 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
495 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
496 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
497 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);