]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-792.21.3.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <IOKit/assert.h>
29 #include <IOKit/system.h>
30
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOBufferMemoryDescriptor.h>
33
34 #include "IOKitKernelInternal.h"
35
36 __BEGIN_DECLS
37 void ipc_port_release_send(ipc_port_t port);
38 #include <vm/pmap.h>
39
40 vm_map_t IOPageableMapForAddress( vm_address_t address );
41 __END_DECLS
42
43 #define super IOGeneralMemoryDescriptor
44 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
45 IOGeneralMemoryDescriptor);
46
47 bool IOBufferMemoryDescriptor::initWithAddress(
48 void * /* address */ ,
49 IOByteCount /* withLength */ ,
50 IODirection /* withDirection */ )
51 {
52 return false;
53 }
54
55 bool IOBufferMemoryDescriptor::initWithAddress(
56 vm_address_t /* address */ ,
57 IOByteCount /* withLength */ ,
58 IODirection /* withDirection */ ,
59 task_t /* withTask */ )
60 {
61 return false;
62 }
63
64 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
65 IOPhysicalAddress /* address */ ,
66 IOByteCount /* withLength */ ,
67 IODirection /* withDirection */ )
68 {
69 return false;
70 }
71
72 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
73 IOPhysicalRange * /* ranges */ ,
74 UInt32 /* withCount */ ,
75 IODirection /* withDirection */ ,
76 bool /* asReference */ )
77 {
78 return false;
79 }
80
81 bool IOBufferMemoryDescriptor::initWithRanges(
82 IOVirtualRange * /* ranges */ ,
83 UInt32 /* withCount */ ,
84 IODirection /* withDirection */ ,
85 task_t /* withTask */ ,
86 bool /* asReference */ )
87 {
88 return false;
89 }
90
91 bool IOBufferMemoryDescriptor::initWithOptions(
92 IOOptionBits options,
93 vm_size_t capacity,
94 vm_offset_t alignment,
95 task_t inTask)
96 {
97 kern_return_t kr;
98 vm_map_t vmmap = 0;
99 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
100
101 if (!capacity)
102 return false;
103
104 _options = options;
105 _capacity = capacity;
106 _physAddrs = 0;
107 _physSegCount = 0;
108 _buffer = 0;
109
110 // Grab the direction and the Auto Prepare bits from the Buffer MD options
111 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
112
113 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
114 alignment = page_size;
115
116 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
117 return false;
118
119 _alignment = alignment;
120 if (options & kIOMemoryPageable)
121 {
122 iomdOptions |= kIOMemoryBufferPageable;
123
124 ipc_port_t sharedMem;
125 vm_size_t size = round_page_32(capacity);
126
127 // must create the entry before any pages are allocated
128
129 // set flags for entry + object create
130 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE
131 | MAP_MEM_NAMED_CREATE;
132
133 if (options & kIOMemoryPurgeable)
134 memEntryCacheMode |= MAP_MEM_PURGABLE;
135
136 // set memory entry cache mode
137 switch (options & kIOMapCacheMask)
138 {
139 case kIOMapInhibitCache:
140 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
141 break;
142
143 case kIOMapWriteThruCache:
144 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
145 break;
146
147 case kIOMapWriteCombineCache:
148 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
149 break;
150
151 case kIOMapCopybackCache:
152 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
153 break;
154
155 case kIOMapDefaultCache:
156 default:
157 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
158 break;
159 }
160
161 kr = mach_make_memory_entry( vmmap,
162 &size, 0,
163 memEntryCacheMode, &sharedMem,
164 NULL );
165
166 if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
167 ipc_port_release_send( sharedMem );
168 kr = kIOReturnVMError;
169 }
170 if( KERN_SUCCESS != kr)
171 return( false );
172
173 _memEntry = (void *) sharedMem;
174 #if IOALLOCDEBUG
175 debug_iomallocpageable_size += size;
176 #endif
177 if ((NULL == inTask) && (options & kIOMemoryPageable))
178 inTask = kernel_task;
179 else if (inTask == kernel_task)
180 {
181 vmmap = kernel_map;
182 }
183 else
184 {
185
186 if( !reserved) {
187 reserved = IONew( ExpansionData, 1 );
188 if( !reserved)
189 return( false );
190 }
191 vmmap = get_task_map(inTask);
192 vm_map_reference(vmmap);
193 reserved->map = vmmap;
194 }
195 }
196 else
197 {
198 // @@@ gvdl: Need to remove this
199 // Buffer should never auto prepare they should be prepared explicitly
200 // But it never was enforced so what are you going to do?
201 iomdOptions |= kIOMemoryAutoPrepare;
202
203 /* Allocate a wired-down buffer inside kernel space. */
204 if (options & kIOMemoryPhysicallyContiguous)
205 _buffer = IOMallocContiguous(capacity, alignment, 0);
206 else if (alignment > 1)
207 _buffer = IOMallocAligned(capacity, alignment);
208 else
209 _buffer = IOMalloc(capacity);
210
211 if (!_buffer)
212 return false;
213 }
214
215 _singleRange.v.address = (vm_address_t) _buffer;
216 _singleRange.v.length = capacity;
217
218 if (!super::initWithOptions(&_singleRange.v, 1, 0,
219 inTask, iomdOptions, /* System mapper */ 0))
220 return false;
221
222 if (options & kIOMemoryPageable)
223 {
224 kern_return_t kr;
225
226 if (vmmap)
227 {
228 kr = doMap(vmmap, (IOVirtualAddress *) &_buffer, kIOMapAnywhere, 0, round_page_32(capacity));
229 if (KERN_SUCCESS != kr)
230 {
231 _buffer = 0;
232 return( false );
233 }
234 _singleRange.v.address = (vm_address_t) _buffer;
235 }
236 }
237
238 setLength(capacity);
239
240 return true;
241 }
242
243 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
244 task_t inTask,
245 IOOptionBits options,
246 vm_size_t capacity,
247 vm_offset_t alignment)
248 {
249 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
250
251 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
252 me->release();
253 me = 0;
254 }
255 return me;
256 }
257
258 bool IOBufferMemoryDescriptor::initWithOptions(
259 IOOptionBits options,
260 vm_size_t capacity,
261 vm_offset_t alignment)
262 {
263 return( initWithOptions(options, capacity, alignment, kernel_task) );
264 }
265
266 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
267 IOOptionBits options,
268 vm_size_t capacity,
269 vm_offset_t alignment)
270 {
271 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
272
273 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
274 me->release();
275 me = 0;
276 }
277 return me;
278 }
279
280
281 /*
282 * withCapacity:
283 *
284 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
285 * hold capacity bytes. The descriptor's length is initially set to the capacity.
286 */
287 IOBufferMemoryDescriptor *
288 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
289 IODirection inDirection,
290 bool inContiguous)
291 {
292 return( IOBufferMemoryDescriptor::withOptions(
293 inDirection | kIOMemoryUnshared
294 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
295 inCapacity, inContiguous ? inCapacity : 1 ));
296 }
297
298 /*
299 * initWithBytes:
300 *
301 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
302 * The descriptor's length and capacity are set to the input buffer's size.
303 */
304 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
305 vm_size_t inLength,
306 IODirection inDirection,
307 bool inContiguous)
308 {
309 if (!initWithOptions(
310 inDirection | kIOMemoryUnshared
311 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
312 inLength, inLength ))
313 return false;
314
315 // start out with no data
316 setLength(0);
317
318 if (!appendBytes(inBytes, inLength))
319 return false;
320
321 return true;
322 }
323
324 /*
325 * withBytes:
326 *
327 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
328 * The descriptor's length and capacity are set to the input buffer's size.
329 */
330 IOBufferMemoryDescriptor *
331 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
332 vm_size_t inLength,
333 IODirection inDirection,
334 bool inContiguous)
335 {
336 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
337
338 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
339 me->release();
340 me = 0;
341 }
342 return me;
343 }
344
345 /*
346 * free:
347 *
348 * Free resources
349 */
350 void IOBufferMemoryDescriptor::free()
351 {
352 // Cache all of the relevant information on the stack for use
353 // after we call super::free()!
354 IOOptionBits options = _options;
355 vm_size_t size = _capacity;
356 void * buffer = _buffer;
357 vm_map_t vmmap = 0;
358 vm_offset_t alignment = _alignment;
359
360 if (reserved)
361 {
362 vmmap = reserved->map;
363 IODelete( reserved, ExpansionData, 1 );
364 }
365
366 /* super::free may unwire - deallocate buffer afterwards */
367 super::free();
368
369 if (options & kIOMemoryPageable)
370 {
371 #if IOALLOCDEBUG
372 if (!buffer || vmmap)
373 debug_iomallocpageable_size -= round_page_32(size);
374 #endif
375 if (buffer)
376 {
377 if (vmmap)
378 vm_deallocate(vmmap, (vm_address_t) buffer, round_page_32(size));
379 else
380 IOFreePageable(buffer, size);
381 }
382 }
383 else if (buffer)
384 {
385 if (options & kIOMemoryPhysicallyContiguous)
386 IOFreeContiguous(buffer, size);
387 else if (alignment > 1)
388 IOFreeAligned(buffer, size);
389 else
390 IOFree(buffer, size);
391 }
392 if (vmmap)
393 vm_map_deallocate(vmmap);
394 }
395
396 /*
397 * getCapacity:
398 *
399 * Get the buffer capacity
400 */
401 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
402 {
403 return _capacity;
404 }
405
406 /*
407 * setLength:
408 *
409 * Change the buffer length of the memory descriptor. When a new buffer
410 * is created, the initial length of the buffer is set to be the same as
411 * the capacity. The length can be adjusted via setLength for a shorter
412 * transfer (there is no need to create more buffer descriptors when you
413 * can reuse an existing one, even for different transfer sizes). Note
414 * that the specified length must not exceed the capacity of the buffer.
415 */
416 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
417 {
418 assert(length <= _capacity);
419
420 _length = length;
421 _singleRange.v.length = length;
422 }
423
424 /*
425 * setDirection:
426 *
427 * Change the direction of the transfer. This method allows one to redirect
428 * the descriptor's transfer direction. This eliminates the need to destroy
429 * and create new buffers when different transfer directions are needed.
430 */
431 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
432 {
433 _direction = direction;
434 }
435
436 /*
437 * appendBytes:
438 *
439 * Add some data to the end of the buffer. This method automatically
440 * maintains the memory descriptor buffer length. Note that appendBytes
441 * will not copy past the end of the memory descriptor's current capacity.
442 */
443 bool
444 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
445 {
446 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
447
448 assert(_length <= _capacity);
449 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
450 actualBytesToCopy);
451 _length += actualBytesToCopy;
452 _singleRange.v.length += actualBytesToCopy;
453
454 return true;
455 }
456
457 /*
458 * getBytesNoCopy:
459 *
460 * Return the virtual address of the beginning of the buffer
461 */
462 void * IOBufferMemoryDescriptor::getBytesNoCopy()
463 {
464 return (void *)_singleRange.v.address;
465 }
466
467 /*
468 * getBytesNoCopy:
469 *
470 * Return the virtual address of an offset from the beginning of the buffer
471 */
472 void *
473 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
474 {
475 if (start < _length && (start + withLength) <= _length)
476 return (void *)(_singleRange.v.address + start);
477 return 0;
478 }
479
480 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
481 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
482 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
483 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
484 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
485 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
486 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
487 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
488 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
489 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
490 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
491 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
492 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
493 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
494 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
495 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);