]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOBufferMemoryDescriptor.cpp
xnu-517.7.21.tar.gz
[apple/xnu.git] / iokit / Kernel / IOBufferMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <IOKit/assert.h>
23 #include <IOKit/system.h>
24
25 #include <IOKit/IOLib.h>
26 #include <IOKit/IOBufferMemoryDescriptor.h>
27
28 __BEGIN_DECLS
29 void ipc_port_release_send(ipc_port_t port);
30 #include <vm/pmap.h>
31
32 vm_map_t IOPageableMapForAddress( vm_address_t address );
33 __END_DECLS
34
35 #define super IOGeneralMemoryDescriptor
36 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
37 IOGeneralMemoryDescriptor);
38
39 bool IOBufferMemoryDescriptor::initWithAddress(
40 void * /* address */ ,
41 IOByteCount /* withLength */ ,
42 IODirection /* withDirection */ )
43 {
44 return false;
45 }
46
47 bool IOBufferMemoryDescriptor::initWithAddress(
48 vm_address_t /* address */ ,
49 IOByteCount /* withLength */ ,
50 IODirection /* withDirection */ ,
51 task_t /* withTask */ )
52 {
53 return false;
54 }
55
56 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
57 IOPhysicalAddress /* address */ ,
58 IOByteCount /* withLength */ ,
59 IODirection /* withDirection */ )
60 {
61 return false;
62 }
63
64 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
65 IOPhysicalRange * /* ranges */ ,
66 UInt32 /* withCount */ ,
67 IODirection /* withDirection */ ,
68 bool /* asReference */ )
69 {
70 return false;
71 }
72
73 bool IOBufferMemoryDescriptor::initWithRanges(
74 IOVirtualRange * /* ranges */ ,
75 UInt32 /* withCount */ ,
76 IODirection /* withDirection */ ,
77 task_t /* withTask */ ,
78 bool /* asReference */ )
79 {
80 return false;
81 }
82
83 bool IOBufferMemoryDescriptor::initWithOptions(
84 IOOptionBits options,
85 vm_size_t capacity,
86 vm_offset_t alignment,
87 task_t inTask)
88 {
89 vm_map_t map = 0;
90 IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
91
92 if (!capacity)
93 return false;
94
95 _options = options;
96 _capacity = capacity;
97 _physAddrs = 0;
98 _physSegCount = 0;
99 _buffer = 0;
100
101 // Grab the direction and the Auto Prepare bits from the Buffer MD options
102 iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
103
104 if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
105 alignment = page_size;
106
107 if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
108 return false;
109
110 _alignment = alignment;
111 if (options & kIOMemoryPageable)
112 {
113 iomdOptions |= kIOMemoryBufferPageable;
114 if (inTask == kernel_task)
115 {
116 /* Allocate some kernel address space. */
117 _buffer = IOMallocPageable(capacity, alignment);
118 if (_buffer)
119 map = IOPageableMapForAddress((vm_address_t) _buffer);
120 }
121 else
122 {
123 kern_return_t kr;
124
125 if( !reserved) {
126 reserved = IONew( ExpansionData, 1 );
127 if( !reserved)
128 return( false );
129 }
130 map = get_task_map(inTask);
131 vm_map_reference(map);
132 reserved->map = map;
133 kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
134 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
135 if( KERN_SUCCESS != kr)
136 return( false );
137
138 // we have to make sure that these pages don't get copied on fork.
139 kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
140 if( KERN_SUCCESS != kr)
141 return( false );
142 }
143 }
144 else
145 {
146 // @@@ gvdl: Need to remove this
147 // Buffer should never auto prepare they should be prepared explicitly
148 // But it never was enforced so what are you going to do?
149 iomdOptions |= kIOMemoryAutoPrepare;
150
151 /* Allocate a wired-down buffer inside kernel space. */
152 if (options & kIOMemoryPhysicallyContiguous)
153 _buffer = IOMallocContiguous(capacity, alignment, 0);
154 else if (alignment > 1)
155 _buffer = IOMallocAligned(capacity, alignment);
156 else
157 _buffer = IOMalloc(capacity);
158 }
159
160 if (!_buffer)
161 return false;
162
163 _singleRange.v.address = (vm_address_t) _buffer;
164 _singleRange.v.length = capacity;
165
166 if (!super::initWithOptions(&_singleRange.v, 1, 0,
167 inTask, iomdOptions, /* System mapper */ 0))
168 return false;
169
170 if (options & kIOMemoryPageable) {
171 kern_return_t kr;
172 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
173 vm_size_t size = round_page_32(_ranges.v[0].length);
174
175 // must create the entry before any pages are allocated
176 if( 0 == sharedMem) {
177
178 // set memory entry cache
179 vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
180 switch (options & kIOMapCacheMask)
181 {
182 case kIOMapInhibitCache:
183 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
184 break;
185
186 case kIOMapWriteThruCache:
187 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
188 break;
189
190 case kIOMapWriteCombineCache:
191 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
192 break;
193
194 case kIOMapCopybackCache:
195 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
196 break;
197
198 case kIOMapDefaultCache:
199 default:
200 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
201 break;
202 }
203
204 kr = mach_make_memory_entry( map,
205 &size, _ranges.v[0].address,
206 memEntryCacheMode, &sharedMem,
207 NULL );
208
209 if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
210 ipc_port_release_send( sharedMem );
211 kr = kIOReturnVMError;
212 }
213 if( KERN_SUCCESS != kr)
214 sharedMem = 0;
215 _memEntry = (void *) sharedMem;
216 }
217 }
218
219 setLength(capacity);
220
221 return true;
222 }
223
224 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
225 task_t inTask,
226 IOOptionBits options,
227 vm_size_t capacity,
228 vm_offset_t alignment)
229 {
230 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
231
232 if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
233 me->release();
234 me = 0;
235 }
236 return me;
237 }
238
239 bool IOBufferMemoryDescriptor::initWithOptions(
240 IOOptionBits options,
241 vm_size_t capacity,
242 vm_offset_t alignment)
243 {
244 return( initWithOptions(options, capacity, alignment, kernel_task) );
245 }
246
247 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
248 IOOptionBits options,
249 vm_size_t capacity,
250 vm_offset_t alignment)
251 {
252 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
253
254 if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
255 me->release();
256 me = 0;
257 }
258 return me;
259 }
260
261
262 /*
263 * withCapacity:
264 *
265 * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
266 * hold capacity bytes. The descriptor's length is initially set to the capacity.
267 */
268 IOBufferMemoryDescriptor *
269 IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity,
270 IODirection inDirection,
271 bool inContiguous)
272 {
273 return( IOBufferMemoryDescriptor::withOptions(
274 inDirection | kIOMemoryUnshared
275 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
276 inCapacity, inContiguous ? inCapacity : 1 ));
277 }
278
279 /*
280 * initWithBytes:
281 *
282 * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
283 * The descriptor's length and capacity are set to the input buffer's size.
284 */
285 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
286 vm_size_t inLength,
287 IODirection inDirection,
288 bool inContiguous)
289 {
290 if (!initWithOptions(
291 inDirection | kIOMemoryUnshared
292 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
293 inLength, inLength ))
294 return false;
295
296 // start out with no data
297 setLength(0);
298
299 if (!appendBytes(inBytes, inLength))
300 return false;
301
302 return true;
303 }
304
305 /*
306 * withBytes:
307 *
308 * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
309 * The descriptor's length and capacity are set to the input buffer's size.
310 */
311 IOBufferMemoryDescriptor *
312 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
313 vm_size_t inLength,
314 IODirection inDirection,
315 bool inContiguous)
316 {
317 IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
318
319 if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
320 me->release();
321 me = 0;
322 }
323 return me;
324 }
325
326 /*
327 * free:
328 *
329 * Free resources
330 */
331 void IOBufferMemoryDescriptor::free()
332 {
333 // Cache all of the relevant information on the stack for use
334 // after we call super::free()!
335 IOOptionBits options = _options;
336 vm_size_t size = _capacity;
337 void * buffer = _buffer;
338 vm_map_t map = 0;
339 vm_offset_t alignment = _alignment;
340
341 if (reserved)
342 {
343 map = reserved->map;
344 IODelete( reserved, ExpansionData, 1 );
345 }
346
347 /* super::free may unwire - deallocate buffer afterwards */
348 super::free();
349
350 if (buffer)
351 {
352 if (options & kIOMemoryPageable)
353 {
354 if (map)
355 vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
356 else
357 IOFreePageable(buffer, size);
358 }
359 else
360 {
361 if (options & kIOMemoryPhysicallyContiguous)
362 IOFreeContiguous(buffer, size);
363 else if (alignment > 1)
364 IOFreeAligned(buffer, size);
365 else
366 IOFree(buffer, size);
367 }
368 }
369 if (map)
370 vm_map_deallocate(map);
371 }
372
373 /*
374 * getCapacity:
375 *
376 * Get the buffer capacity
377 */
378 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
379 {
380 return _capacity;
381 }
382
383 /*
384 * setLength:
385 *
386 * Change the buffer length of the memory descriptor. When a new buffer
387 * is created, the initial length of the buffer is set to be the same as
388 * the capacity. The length can be adjusted via setLength for a shorter
389 * transfer (there is no need to create more buffer descriptors when you
390 * can reuse an existing one, even for different transfer sizes). Note
391 * that the specified length must not exceed the capacity of the buffer.
392 */
393 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
394 {
395 assert(length <= _capacity);
396
397 _length = length;
398 _singleRange.v.length = length;
399 }
400
401 /*
402 * setDirection:
403 *
404 * Change the direction of the transfer. This method allows one to redirect
405 * the descriptor's transfer direction. This eliminates the need to destroy
406 * and create new buffers when different transfer directions are needed.
407 */
408 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
409 {
410 _direction = direction;
411 }
412
413 /*
414 * appendBytes:
415 *
416 * Add some data to the end of the buffer. This method automatically
417 * maintains the memory descriptor buffer length. Note that appendBytes
418 * will not copy past the end of the memory descriptor's current capacity.
419 */
420 bool
421 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
422 {
423 vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
424
425 assert(_length <= _capacity);
426 bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
427 actualBytesToCopy);
428 _length += actualBytesToCopy;
429 _singleRange.v.length += actualBytesToCopy;
430
431 return true;
432 }
433
434 /*
435 * getBytesNoCopy:
436 *
437 * Return the virtual address of the beginning of the buffer
438 */
439 void * IOBufferMemoryDescriptor::getBytesNoCopy()
440 {
441 return (void *)_singleRange.v.address;
442 }
443
444 /*
445 * getBytesNoCopy:
446 *
447 * Return the virtual address of an offset from the beginning of the buffer
448 */
449 void *
450 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
451 {
452 if (start < _length && (start + withLength) <= _length)
453 return (void *)(_singleRange.v.address + start);
454 return 0;
455 }
456
457 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
458 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
459 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
460 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
461 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
462 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
463 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
464 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
465 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
466 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
467 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
468 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
469 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
470 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
471 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
472 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);