]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-792.6.76.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <IOKit/assert.h>
23#include <IOKit/system.h>
24
25#include <IOKit/IOLib.h>
26#include <IOKit/IOBufferMemoryDescriptor.h>
27
28#include "IOKitKernelInternal.h"
29
30__BEGIN_DECLS
31void ipc_port_release_send(ipc_port_t port);
32#include <vm/pmap.h>
33
34vm_map_t IOPageableMapForAddress( vm_address_t address );
35__END_DECLS
36
37#define super IOGeneralMemoryDescriptor
38OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
39 IOGeneralMemoryDescriptor);
40
41bool IOBufferMemoryDescriptor::initWithAddress(
42 void * /* address */ ,
43 IOByteCount /* withLength */ ,
44 IODirection /* withDirection */ )
45{
46 return false;
47}
48
49bool IOBufferMemoryDescriptor::initWithAddress(
50 vm_address_t /* address */ ,
51 IOByteCount /* withLength */ ,
52 IODirection /* withDirection */ ,
53 task_t /* withTask */ )
54{
55 return false;
56}
57
58bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
59 IOPhysicalAddress /* address */ ,
60 IOByteCount /* withLength */ ,
61 IODirection /* withDirection */ )
62{
63 return false;
64}
65
66bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
67 IOPhysicalRange * /* ranges */ ,
68 UInt32 /* withCount */ ,
69 IODirection /* withDirection */ ,
70 bool /* asReference */ )
71{
72 return false;
73}
74
75bool IOBufferMemoryDescriptor::initWithRanges(
76 IOVirtualRange * /* ranges */ ,
77 UInt32 /* withCount */ ,
78 IODirection /* withDirection */ ,
79 task_t /* withTask */ ,
80 bool /* asReference */ )
81{
82 return false;
83}
84
85bool IOBufferMemoryDescriptor::initWithOptions(
86 IOOptionBits options,
87 vm_size_t capacity,
88 vm_offset_t alignment,
89 task_t inTask)
90{
91 kern_return_t kr;
92 vm_map_t vmmap = 0;
93 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
94
95 if (!capacity)
96 return false;
97
98 _options = options;
99 _capacity = capacity;
100 _physAddrs = 0;
101 _physSegCount = 0;
102 _buffer = 0;
103
104 // Grab the direction and the Auto Prepare bits from the Buffer MD options
105 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
106
107 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
108 alignment = page_size;
109
110 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
111 return false;
112
113 _alignment = alignment;
114 if (options & kIOMemoryPageable)
115 {
116 iomdOptions |= kIOMemoryBufferPageable;
117
118 ipc_port_t sharedMem;
119 vm_size_t size = round_page_32(capacity);
120
121 // must create the entry before any pages are allocated
122
123 // set flags for entry + object create
124 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
125 | MAP_MEM_NAMED_CREATE;
126
127 if (options & kIOMemoryPurgeable)
128 memEntryCacheMode |= MAP_MEM_PURGABLE;
129
130 // set memory entry cache mode
131 switch (options & kIOMapCacheMask)
132 {
133 case kIOMapInhibitCache:
134 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
135 break;
136
137 case kIOMapWriteThruCache:
138 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
139 break;
140
141 case kIOMapWriteCombineCache:
142 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
143 break;
144
145 case kIOMapCopybackCache:
146 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
147 break;
148
149 case kIOMapDefaultCache:
150 default:
151 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
152 break;
153 }
154
155 kr = mach_make_memory_entry( vmmap,
156 &size, 0,
157 memEntryCacheMode, &sharedMem,
158 NULL );
159
160 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
161 ipc_port_release_send( sharedMem );
162 kr = kIOReturnVMError;
163 }
164 if( KERN_SUCCESS != kr)
165 return( false );
166
167 _memEntry = (void *) sharedMem;
168#if IOALLOCDEBUG
169 debug_iomallocpageable_size += size;
170#endif
171 if ((NULL == inTask) && (options & kIOMemoryPageable))
172 inTask = kernel_task;
173 else if (inTask == kernel_task)
174 {
175 vmmap = kernel_map;
176 }
177 else
178 {
179
180 if( !reserved) {
181 reserved = IONew( ExpansionData, 1 );
182 if( !reserved)
183 return( false );
184 }
185 vmmap = get_task_map(inTask);
186 vm_map_reference(vmmap);
187 reserved->map = vmmap;
188 }
189 }
190 else
191 {
192 // @@@ gvdl: Need to remove this
193 // Buffer should never auto prepare they should be prepared explicitly
194 // But it never was enforced so what are you going to do?
195 iomdOptions |= kIOMemoryAutoPrepare;
196
197 /* Allocate a wired-down buffer inside kernel space. */
198 if (options & kIOMemoryPhysicallyContiguous)
199 _buffer = IOMallocContiguous(capacity, alignment, 0);
200 else if (alignment > 1)
201 _buffer = IOMallocAligned(capacity, alignment);
202 else
203 _buffer = IOMalloc(capacity);
204
205 if (!_buffer)
206 return false;
207 }
208
209 _singleRange.v.address = (vm_address_t) _buffer;
210 _singleRange.v.length = capacity;
211
212 if (!super::initWithOptions(&_singleRange.v, 1, 0,
213 inTask, iomdOptions, /* System mapper */ 0))
214 return false;
215
216 if (options & kIOMemoryPageable)
217 {
218 kern_return_t kr;
219
220 if (vmmap)
221 {
222 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, round_page_32(capacity));
223 if (KERN_SUCCESS != kr)
224 {
225 _buffer = 0;
226 return( false );
227 }
228 _singleRange.v.address = (vm_address_t) _buffer;
229 }
230 }
231
232 setLength(capacity);
233
234 return true;
235}
236
237IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
238 task_t inTask,
239 IOOptionBits options,
240 vm_size_t capacity,
241 vm_offset_t alignment)
242{
243 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
244
245 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
246 me->release();
247 me = 0;
248 }
249 return me;
250}
251
252bool IOBufferMemoryDescriptor::initWithOptions(
253 IOOptionBits options,
254 vm_size_t capacity,
255 vm_offset_t alignment)
256{
257 return( initWithOptions(options, capacity, alignment, kernel_task) );
258}
259
260IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
261 IOOptionBits options,
262 vm_size_t capacity,
263 vm_offset_t alignment)
264{
265 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
266
267 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
268 me->release();
269 me = 0;
270 }
271 return me;
272}
273
274
275/*
276 * withCapacity:
277 *
278 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
279 * hold capacity bytes. The descriptor's length is initially set to the capacity.
280 */
281IOBufferMemoryDescriptor *
282IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
283 IODirection inDirection,
284 bool inContiguous)
285{
286 return( IOBufferMemoryDescriptor::withOptions(
287 inDirection | kIOMemoryUnshared
288 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
289 inCapacity, inContiguous ? inCapacity : 1 ));
290}
291
292/*
293 * initWithBytes:
294 *
295 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
296 * The descriptor's length and capacity are set to the input buffer's size.
297 */
298bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
299 vm_size_t inLength,
300 IODirection inDirection,
301 bool inContiguous)
302{
303 if (!initWithOptions(
304 inDirection | kIOMemoryUnshared
305 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
306 inLength, inLength ))
307 return false;
308
309 // start out with no data
310 setLength(0);
311
312 if (!appendBytes(inBytes, inLength))
313 return false;
314
315 return true;
316}
317
318/*
319 * withBytes:
320 *
321 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
322 * The descriptor's length and capacity are set to the input buffer's size.
323 */
324IOBufferMemoryDescriptor *
325IOBufferMemoryDescriptor::withBytes(const void * inBytes,
326 vm_size_t inLength,
327 IODirection inDirection,
328 bool inContiguous)
329{
330 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
331
332 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
333 me->release();
334 me = 0;
335 }
336 return me;
337}
338
339/*
340 * free:
341 *
342 * Free resources
343 */
344void IOBufferMemoryDescriptor::free()
345{
346 // Cache all of the relevant information on the stack for use
347 // after we call super::free()!
348 IOOptionBits options = _options;
349 vm_size_t size = _capacity;
350 void * buffer = _buffer;
351 vm_map_t vmmap = 0;
352 vm_offset_t alignment = _alignment;
353
354 if (reserved)
355 {
356 vmmap = reserved->map;
357 IODelete( reserved, ExpansionData, 1 );
358 }
359
360 /* super::free may unwire - deallocate buffer afterwards */
361 super::free();
362
363 if (options & kIOMemoryPageable)
364 {
365#if IOALLOCDEBUG
366 if (!buffer || vmmap)
367 debug_iomallocpageable_size -= round_page_32(size);
368#endif
369 if (buffer)
370 {
371 if (vmmap)
372 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
373 else
374 IOFreePageable(buffer, size);
375 }
376 }
377 else if (buffer)
378 {
379 if (options & kIOMemoryPhysicallyContiguous)
380 IOFreeContiguous(buffer, size);
381 else if (alignment > 1)
382 IOFreeAligned(buffer, size);
383 else
384 IOFree(buffer, size);
385 }
386 if (vmmap)
387 vm_map_deallocate(vmmap);
388}
389
390/*
391 * getCapacity:
392 *
393 * Get the buffer capacity
394 */
395vm_size_t IOBufferMemoryDescriptor::getCapacity() const
396{
397 return _capacity;
398}
399
400/*
401 * setLength:
402 *
403 * Change the buffer length of the memory descriptor. When a new buffer
404 * is created, the initial length of the buffer is set to be the same as
405 * the capacity. The length can be adjusted via setLength for a shorter
406 * transfer (there is no need to create more buffer descriptors when you
407 * can reuse an existing one, even for different transfer sizes). Note
408 * that the specified length must not exceed the capacity of the buffer.
409 */
410void IOBufferMemoryDescriptor::setLength(vm_size_t length)
411{
412 assert(length <= _capacity);
413
414 _length = length;
415 _singleRange.v.length = length;
416}
417
418/*
419 * setDirection:
420 *
421 * Change the direction of the transfer. This method allows one to redirect
422 * the descriptor's transfer direction. This eliminates the need to destroy
423 * and create new buffers when different transfer directions are needed.
424 */
425void IOBufferMemoryDescriptor::setDirection(IODirection direction)
426{
427 _direction = direction;
428}
429
430/*
431 * appendBytes:
432 *
433 * Add some data to the end of the buffer. This method automatically
434 * maintains the memory descriptor buffer length. Note that appendBytes
435 * will not copy past the end of the memory descriptor's current capacity.
436 */
437bool
438IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
439{
440 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
441
442 assert(_length <= _capacity);
443 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
444 actualBytesToCopy);
445 _length += actualBytesToCopy;
446 _singleRange.v.length += actualBytesToCopy;
447
448 return true;
449}
450
451/*
452 * getBytesNoCopy:
453 *
454 * Return the virtual address of the beginning of the buffer
455 */
456void * IOBufferMemoryDescriptor::getBytesNoCopy()
457{
458 return (void *)_singleRange.v.address;
459}
460
461/*
462 * getBytesNoCopy:
463 *
464 * Return the virtual address of an offset from the beginning of the buffer
465 */
466void *
467IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
468{
469 if (start < _length && (start + withLength) <= _length)
470 return (void *)(_singleRange.v.address + start);
471 return 0;
472}
473
474OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
475OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
476OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
477OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
478OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
479OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
480OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
481OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
482OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
483OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
484OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
485OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
486OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
487OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
488OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
489OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);