]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
16a664db1fea696124ef51a1d9ecfc8155df7de6
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 #include <IOKit/assert.h>
24 #include <IOKit/system.h>
25
26 #include <IOKit/IOLib.h>
27 #include <IOKit/IOBufferMemoryDescriptor.h>
28
29 #include "IOKitKernelInternal.h"
30
31 __BEGIN_DECLS
32 void ipc_port_release_send(ipc_port_t port);
33 #include <vm/pmap.h>
34
35 vm_map_t IOPageableMapForAddress( vm_address_t address );
36 __END_DECLS
37
38 #define super IOGeneralMemoryDescriptor
39 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
40 IOGeneralMemoryDescriptor);
41
42 bool IOBufferMemoryDescriptor::initWithAddress(
43 void * /* address */ ,
44 IOByteCount /* withLength */ ,
45 IODirection /* withDirection */ )
46 {
47 return false;
48 }
49
50 bool IOBufferMemoryDescriptor::initWithAddress(
51 vm_address_t /* address */ ,
52 IOByteCount /* withLength */ ,
53 IODirection /* withDirection */ ,
54 task_t /* withTask */ )
55 {
56 return false;
57 }
58
59 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
60 IOPhysicalAddress /* address */ ,
61 IOByteCount /* withLength */ ,
62 IODirection /* withDirection */ )
63 {
64 return false;
65 }
66
67 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
68 IOPhysicalRange * /* ranges */ ,
69 UInt32 /* withCount */ ,
70 IODirection /* withDirection */ ,
71 bool /* asReference */ )
72 {
73 return false;
74 }
75
76 bool IOBufferMemoryDescriptor::initWithRanges(
77 IOVirtualRange * /* ranges */ ,
78 UInt32 /* withCount */ ,
79 IODirection /* withDirection */ ,
80 task_t /* withTask */ ,
81 bool /* asReference */ )
82 {
83 return false;
84 }
85
86 bool IOBufferMemoryDescriptor::initWithOptions(
87 IOOptionBits options,
88 vm_size_t capacity,
89 vm_offset_t alignment,
90 task_t inTask)
91 {
92 kern_return_t kr;
93 vm_map_t vmmap = 0;
94 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
95
96 if (!capacity)
97 return false;
98
99 _options = options;
100 _capacity = capacity;
101 _physAddrs = 0;
102 _physSegCount = 0;
103 _buffer = 0;
104
105 // Grab the direction and the Auto Prepare bits from the Buffer MD options
106 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
107
108 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
109 alignment = page_size;
110
111 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
112 return false;
113
114 _alignment = alignment;
115 if (options & kIOMemoryPageable)
116 {
117 iomdOptions |= kIOMemoryBufferPageable;
118
119 ipc_port_t sharedMem;
120 vm_size_t size = round_page_32(capacity);
121
122 // must create the entry before any pages are allocated
123
124 // set flags for entry + object create
125 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
126 | MAP_MEM_NAMED_CREATE;
127
128 if (options & kIOMemoryPurgeable)
129 memEntryCacheMode |= MAP_MEM_PURGABLE;
130
131 // set memory entry cache mode
132 switch (options & kIOMapCacheMask)
133 {
134 case kIOMapInhibitCache:
135 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
136 break;
137
138 case kIOMapWriteThruCache:
139 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
140 break;
141
142 case kIOMapWriteCombineCache:
143 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
144 break;
145
146 case kIOMapCopybackCache:
147 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
148 break;
149
150 case kIOMapDefaultCache:
151 default:
152 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
153 break;
154 }
155
156 kr = mach_make_memory_entry( vmmap,
157 &size, 0,
158 memEntryCacheMode, &sharedMem,
159 NULL );
160
161 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
162 ipc_port_release_send( sharedMem );
163 kr = kIOReturnVMError;
164 }
165 if( KERN_SUCCESS != kr)
166 return( false );
167
168 _memEntry = (void *) sharedMem;
169 #if IOALLOCDEBUG
170 debug_iomallocpageable_size += size;
171 #endif
172 if ((NULL == inTask) && (options & kIOMemoryPageable))
173 inTask = kernel_task;
174 else if (inTask == kernel_task)
175 {
176 vmmap = kernel_map;
177 }
178 else
179 {
180
181 if( !reserved) {
182 reserved = IONew( ExpansionData, 1 );
183 if( !reserved)
184 return( false );
185 }
186 vmmap = get_task_map(inTask);
187 vm_map_reference(vmmap);
188 reserved->map = vmmap;
189 }
190 }
191 else
192 {
193 // @@@ gvdl: Need to remove this
194 // Buffer should never auto prepare they should be prepared explicitly
195 // But it never was enforced so what are you going to do?
196 iomdOptions |= kIOMemoryAutoPrepare;
197
198 /* Allocate a wired-down buffer inside kernel space. */
199 if (options & kIOMemoryPhysicallyContiguous)
200 _buffer = IOMallocContiguous(capacity, alignment, 0);
201 else if (alignment > 1)
202 _buffer = IOMallocAligned(capacity, alignment);
203 else
204 _buffer = IOMalloc(capacity);
205
206 if (!_buffer)
207 return false;
208 }
209
210 _singleRange.v.address = (vm_address_t) _buffer;
211 _singleRange.v.length = capacity;
212
213 if (!super::initWithOptions(&_singleRange.v, 1, 0,
214 inTask, iomdOptions, /* System mapper */ 0))
215 return false;
216
217 if (options & kIOMemoryPageable)
218 {
219 kern_return_t kr;
220
221 if (vmmap)
222 {
223 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, round_page_32(capacity));
224 if (KERN_SUCCESS != kr)
225 {
226 _buffer = 0;
227 return( false );
228 }
229 _singleRange.v.address = (vm_address_t) _buffer;
230 }
231 }
232
233 setLength(capacity);
234
235 return true;
236 }
237
238 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
239 task_t inTask,
240 IOOptionBits options,
241 vm_size_t capacity,
242 vm_offset_t alignment)
243 {
244 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
245
246 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
247 me->release();
248 me = 0;
249 }
250 return me;
251 }
252
253 bool IOBufferMemoryDescriptor::initWithOptions(
254 IOOptionBits options,
255 vm_size_t capacity,
256 vm_offset_t alignment)
257 {
258 return( initWithOptions(options, capacity, alignment, kernel_task) );
259 }
260
261 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
262 IOOptionBits options,
263 vm_size_t capacity,
264 vm_offset_t alignment)
265 {
266 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
267
268 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
269 me->release();
270 me = 0;
271 }
272 return me;
273 }
274
275
276 /*
277 * withCapacity:
278 *
279 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
280 * hold capacity bytes. The descriptor's length is initially set to the capacity.
281 */
282 IOBufferMemoryDescriptor *
283 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
284 IODirection inDirection,
285 bool inContiguous)
286 {
287 return( IOBufferMemoryDescriptor::withOptions(
288 inDirection | kIOMemoryUnshared
289 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
290 inCapacity, inContiguous ? inCapacity : 1 ));
291 }
292
293 /*
294 * initWithBytes:
295 *
296 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
297 * The descriptor's length and capacity are set to the input buffer's size.
298 */
299 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
300 vm_size_t inLength,
301 IODirection inDirection,
302 bool inContiguous)
303 {
304 if (!initWithOptions(
305 inDirection | kIOMemoryUnshared
306 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
307 inLength, inLength ))
308 return false;
309
310 // start out with no data
311 setLength(0);
312
313 if (!appendBytes(inBytes, inLength))
314 return false;
315
316 return true;
317 }
318
319 /*
320 * withBytes:
321 *
322 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
323 * The descriptor's length and capacity are set to the input buffer's size.
324 */
325 IOBufferMemoryDescriptor *
326 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
327 vm_size_t inLength,
328 IODirection inDirection,
329 bool inContiguous)
330 {
331 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
332
333 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
334 me->release();
335 me = 0;
336 }
337 return me;
338 }
339
340 /*
341 * free:
342 *
343 * Free resources
344 */
345 void IOBufferMemoryDescriptor::free()
346 {
347 // Cache all of the relevant information on the stack for use
348 // after we call super::free()!
349 IOOptionBits options = _options;
350 vm_size_t size = _capacity;
351 void * buffer = _buffer;
352 vm_map_t vmmap = 0;
353 vm_offset_t alignment = _alignment;
354
355 if (reserved)
356 {
357 vmmap = reserved->map;
358 IODelete( reserved, ExpansionData, 1 );
359 }
360
361 /* super::free may unwire - deallocate buffer afterwards */
362 super::free();
363
364 if (options & kIOMemoryPageable)
365 {
366 #if IOALLOCDEBUG
367 if (!buffer || vmmap)
368 debug_iomallocpageable_size -= round_page_32(size);
369 #endif
370 if (buffer)
371 {
372 if (vmmap)
373 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
374 else
375 IOFreePageable(buffer, size);
376 }
377 }
378 else if (buffer)
379 {
380 if (options & kIOMemoryPhysicallyContiguous)
381 IOFreeContiguous(buffer, size);
382 else if (alignment > 1)
383 IOFreeAligned(buffer, size);
384 else
385 IOFree(buffer, size);
386 }
387 if (vmmap)
388 vm_map_deallocate(vmmap);
389 }
390
391 /*
392 * getCapacity:
393 *
394 * Get the buffer capacity
395 */
396 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
397 {
398 return _capacity;
399 }
400
401 /*
402 * setLength:
403 *
404 * Change the buffer length of the memory descriptor. When a new buffer
405 * is created, the initial length of the buffer is set to be the same as
406 * the capacity. The length can be adjusted via setLength for a shorter
407 * transfer (there is no need to create more buffer descriptors when you
408 * can reuse an existing one, even for different transfer sizes). Note
409 * that the specified length must not exceed the capacity of the buffer.
410 */
411 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
412 {
413 assert(length <= _capacity);
414
415 _length = length;
416 _singleRange.v.length = length;
417 }
418
419 /*
420 * setDirection:
421 *
422 * Change the direction of the transfer. This method allows one to redirect
423 * the descriptor's transfer direction. This eliminates the need to destroy
424 * and create new buffers when different transfer directions are needed.
425 */
426 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
427 {
428 _direction = direction;
429 }
430
431 /*
432 * appendBytes:
433 *
434 * Add some data to the end of the buffer. This method automatically
435 * maintains the memory descriptor buffer length. Note that appendBytes
436 * will not copy past the end of the memory descriptor's current capacity.
437 */
438 bool
439 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
440 {
441 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
442
443 assert(_length <= _capacity);
444 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
445 actualBytesToCopy);
446 _length += actualBytesToCopy;
447 _singleRange.v.length += actualBytesToCopy;
448
449 return true;
450 }
451
452 /*
453 * getBytesNoCopy:
454 *
455 * Return the virtual address of the beginning of the buffer
456 */
457 void * IOBufferMemoryDescriptor::getBytesNoCopy()
458 {
459 return (void *)_singleRange.v.address;
460 }
461
462 /*
463 * getBytesNoCopy:
464 *
465 * Return the virtual address of an offset from the beginning of the buffer
466 */
467 void *
468 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
469 {
470 if (start < _length && (start + withLength) <= _length)
471 return (void *)(_singleRange.v.address + start);
472 return 0;
473 }
474
475 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
476 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
477 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
478 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
479 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
480 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
481 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
482 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
483 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
484 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
485 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
486 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
487 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
488 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
489 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
490 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);