]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | #include <IOKit/assert.h> | |
26 | #include <IOKit/system.h> | |
27 | ||
28 | #include <IOKit/IOLib.h> | |
29 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
30 | ||
31 | __BEGIN_DECLS | |
32 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 33 | #include <vm/pmap.h> |
d7e50217 | 34 | __END_DECLS |
1c79356b | 35 | |
de355530 A |
36 | extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); |
37 | ||
1c79356b A |
38 | #define super IOGeneralMemoryDescriptor |
39 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
40 | IOGeneralMemoryDescriptor); | |
41 | ||
42 | bool IOBufferMemoryDescriptor::initWithAddress( | |
43 | void * /* address */ , | |
44 | IOByteCount /* withLength */ , | |
45 | IODirection /* withDirection */ ) | |
46 | { | |
47 | return false; | |
48 | } | |
49 | ||
50 | bool IOBufferMemoryDescriptor::initWithAddress( | |
51 | vm_address_t /* address */ , | |
52 | IOByteCount /* withLength */ , | |
53 | IODirection /* withDirection */ , | |
54 | task_t /* withTask */ ) | |
55 | { | |
56 | return false; | |
57 | } | |
58 | ||
59 | bool IOBufferMemoryDescriptor::initWithPhysicalAddress( | |
60 | IOPhysicalAddress /* address */ , | |
61 | IOByteCount /* withLength */ , | |
62 | IODirection /* withDirection */ ) | |
63 | { | |
64 | return false; | |
65 | } | |
66 | ||
67 | bool IOBufferMemoryDescriptor::initWithPhysicalRanges( | |
68 | IOPhysicalRange * /* ranges */ , | |
69 | UInt32 /* withCount */ , | |
70 | IODirection /* withDirection */ , | |
71 | bool /* asReference */ ) | |
72 | { | |
73 | return false; | |
74 | } | |
75 | ||
76 | bool IOBufferMemoryDescriptor::initWithRanges( | |
77 | IOVirtualRange * /* ranges */ , | |
78 | UInt32 /* withCount */ , | |
79 | IODirection /* withDirection */ , | |
80 | task_t /* withTask */ , | |
81 | bool /* asReference */ ) | |
82 | { | |
83 | return false; | |
84 | } | |
85 | ||
86 | bool IOBufferMemoryDescriptor::initWithOptions( | |
87 | IOOptionBits options, | |
88 | vm_size_t capacity, | |
9bccf70c A |
89 | vm_offset_t alignment, |
90 | task_t inTask) | |
1c79356b | 91 | { |
9bccf70c A |
92 | vm_map_t map = 0; |
93 | ||
1c79356b A |
94 | if (!capacity) |
95 | return false; | |
96 | ||
97 | _options = options; | |
98 | _capacity = capacity; | |
99 | _physAddrs = 0; | |
100 | _physSegCount = 0; | |
101 | _buffer = 0; | |
102 | ||
103 | if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) | |
104 | alignment = page_size; | |
105 | ||
9bccf70c A |
106 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
107 | return false; | |
108 | ||
1c79356b A |
109 | _alignment = alignment; |
110 | if (options & kIOMemoryPageable) | |
9bccf70c A |
111 | { |
112 | if (inTask == kernel_task) | |
113 | { | |
114 | /* Allocate some kernel address space. */ | |
115 | _buffer = IOMallocPageable(capacity, alignment); | |
116 | if (_buffer) | |
117 | map = IOPageableMapForAddress((vm_address_t) _buffer); | |
118 | } | |
119 | else | |
120 | { | |
121 | kern_return_t kr; | |
122 | ||
123 | if( !reserved) { | |
124 | reserved = IONew( ExpansionData, 1 ); | |
125 | if( !reserved) | |
126 | return( false ); | |
127 | } | |
128 | map = get_task_map(inTask); | |
129 | vm_map_reference(map); | |
130 | reserved->map = map; | |
de355530 | 131 | kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page(capacity), |
9bccf70c A |
132 | VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); |
133 | if( KERN_SUCCESS != kr) | |
134 | return( false ); | |
135 | ||
136 | // we have to make sure that these pages don't get copied on fork. | |
de355530 | 137 | kr = vm_inherit( map, (vm_address_t) _buffer, round_page(capacity), VM_INHERIT_NONE); |
9bccf70c A |
138 | if( KERN_SUCCESS != kr) |
139 | return( false ); | |
140 | } | |
141 | } | |
142 | else | |
143 | { | |
144 | /* Allocate a wired-down buffer inside kernel space. */ | |
145 | if (options & kIOMemoryPhysicallyContiguous) | |
146 | _buffer = IOMallocContiguous(capacity, alignment, 0); | |
147 | else if (alignment > 1) | |
148 | _buffer = IOMallocAligned(capacity, alignment); | |
149 | else | |
150 | _buffer = IOMalloc(capacity); | |
151 | } | |
1c79356b A |
152 | |
153 | if (!_buffer) | |
9bccf70c | 154 | return false; |
1c79356b A |
155 | |
156 | _singleRange.v.address = (vm_address_t) _buffer; | |
157 | _singleRange.v.length = capacity; | |
158 | ||
de355530 A |
159 | if (!super::initWithRanges(&_singleRange.v, 1, |
160 | (IODirection) (options & kIOMemoryDirectionMask), | |
161 | inTask, true)) | |
1c79356b A |
162 | return false; |
163 | ||
de355530 A |
164 | if (options & kIOMemoryPageable) |
165 | { | |
166 | _flags |= kIOMemoryRequiresWire; | |
167 | ||
1c79356b A |
168 | kern_return_t kr; |
169 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; | |
de355530 | 170 | vm_size_t size = round_page(_ranges.v[0].length); |
1c79356b A |
171 | |
172 | // must create the entry before any pages are allocated | |
173 | if( 0 == sharedMem) { | |
9bccf70c | 174 | kr = mach_make_memory_entry( map, |
1c79356b | 175 | &size, _ranges.v[0].address, |
de355530 | 176 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, |
1c79356b | 177 | NULL ); |
de355530 | 178 | if( (KERN_SUCCESS == kr) && (size != round_page(_ranges.v[0].length))) { |
1c79356b A |
179 | ipc_port_release_send( sharedMem ); |
180 | kr = kIOReturnVMError; | |
181 | } | |
182 | if( KERN_SUCCESS != kr) | |
183 | sharedMem = 0; | |
184 | _memEntry = (void *) sharedMem; | |
185 | } | |
9bccf70c | 186 | } |
de355530 A |
187 | else |
188 | { | |
189 | /* Precompute virtual-to-physical page mappings. */ | |
190 | vm_address_t inBuffer = (vm_address_t) _buffer; | |
191 | _physSegCount = atop(trunc_page(inBuffer + capacity - 1) - | |
192 | trunc_page(inBuffer)) + 1; | |
193 | _physAddrs = IONew(IOPhysicalAddress, _physSegCount); | |
194 | if (!_physAddrs) | |
195 | return false; | |
196 | ||
197 | inBuffer = trunc_page(inBuffer); | |
198 | for (unsigned i = 0; i < _physSegCount; i++) { | |
199 | _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer); | |
200 | assert(_physAddrs[i]); /* supposed to be wired */ | |
201 | inBuffer += page_size; | |
202 | } | |
203 | } | |
1c79356b A |
204 | |
205 | setLength(capacity); | |
206 | ||
207 | return true; | |
208 | } | |
209 | ||
9bccf70c A |
210 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
211 | task_t inTask, | |
212 | IOOptionBits options, | |
213 | vm_size_t capacity, | |
de355530 | 214 | vm_offset_t alignment = 1) |
9bccf70c A |
215 | { |
216 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
217 | ||
218 | if (me && !me->initWithOptions(options, capacity, alignment, inTask)) { | |
219 | me->release(); | |
220 | me = 0; | |
221 | } | |
222 | return me; | |
223 | } | |
224 | ||
225 | bool IOBufferMemoryDescriptor::initWithOptions( | |
226 | IOOptionBits options, | |
227 | vm_size_t capacity, | |
228 | vm_offset_t alignment) | |
229 | { | |
230 | return( initWithOptions(options, capacity, alignment, kernel_task) ); | |
231 | } | |
232 | ||
1c79356b A |
233 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
234 | IOOptionBits options, | |
235 | vm_size_t capacity, | |
de355530 | 236 | vm_offset_t alignment = 1) |
1c79356b A |
237 | { |
238 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
239 | ||
9bccf70c | 240 | if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) { |
1c79356b A |
241 | me->release(); |
242 | me = 0; | |
243 | } | |
244 | return me; | |
245 | } | |
246 | ||
247 | ||
248 | /* | |
249 | * withCapacity: | |
250 | * | |
251 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
252 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
253 | */ | |
254 | IOBufferMemoryDescriptor * | |
255 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
256 | IODirection inDirection, | |
257 | bool inContiguous) | |
258 | { | |
259 | return( IOBufferMemoryDescriptor::withOptions( | |
260 | inDirection | kIOMemoryUnshared | |
261 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
262 | inCapacity, inContiguous ? inCapacity : 1 )); | |
263 | } | |
264 | ||
265 | /* | |
266 | * initWithBytes: | |
267 | * | |
268 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
269 | * The descriptor's length and capacity are set to the input buffer's size. | |
270 | */ | |
271 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
272 | vm_size_t inLength, | |
273 | IODirection inDirection, | |
274 | bool inContiguous) | |
275 | { | |
276 | if (!initWithOptions( | |
277 | inDirection | kIOMemoryUnshared | |
278 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
279 | inLength, inLength )) | |
280 | return false; | |
281 | ||
282 | // start out with no data | |
283 | setLength(0); | |
284 | ||
285 | if (!appendBytes(inBytes, inLength)) | |
286 | return false; | |
287 | ||
288 | return true; | |
289 | } | |
290 | ||
291 | /* | |
292 | * withBytes: | |
293 | * | |
294 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
295 | * The descriptor's length and capacity are set to the input buffer's size. | |
296 | */ | |
297 | IOBufferMemoryDescriptor * | |
298 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
299 | vm_size_t inLength, | |
300 | IODirection inDirection, | |
301 | bool inContiguous) | |
302 | { | |
303 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
304 | ||
305 | if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){ | |
306 | me->release(); | |
307 | me = 0; | |
308 | } | |
309 | return me; | |
310 | } | |
311 | ||
312 | /* | |
313 | * free: | |
314 | * | |
315 | * Free resources | |
316 | */ | |
317 | void IOBufferMemoryDescriptor::free() | |
318 | { | |
319 | IOOptionBits options = _options; | |
320 | vm_size_t size = _capacity; | |
321 | void * buffer = _buffer; | |
9bccf70c | 322 | vm_map_t map = 0; |
1c79356b A |
323 | vm_offset_t alignment = _alignment; |
324 | ||
de355530 A |
325 | if (_physAddrs) |
326 | IODelete(_physAddrs, IOPhysicalAddress, _physSegCount); | |
327 | ||
9bccf70c A |
328 | if (reserved) |
329 | { | |
330 | map = reserved->map; | |
331 | IODelete( reserved, ExpansionData, 1 ); | |
332 | } | |
333 | ||
1c79356b A |
334 | /* super::free may unwire - deallocate buffer afterwards */ |
335 | super::free(); | |
336 | ||
9bccf70c A |
337 | if (buffer) |
338 | { | |
1c79356b | 339 | if (options & kIOMemoryPageable) |
9bccf70c A |
340 | { |
341 | if (map) | |
de355530 | 342 | vm_deallocate(map, (vm_address_t) buffer, round_page(size)); |
9bccf70c A |
343 | else |
344 | IOFreePageable(buffer, size); | |
345 | } | |
346 | else | |
347 | { | |
1c79356b A |
348 | if (options & kIOMemoryPhysicallyContiguous) |
349 | IOFreeContiguous(buffer, size); | |
350 | else if (alignment > 1) | |
351 | IOFreeAligned(buffer, size); | |
352 | else | |
353 | IOFree(buffer, size); | |
354 | } | |
355 | } | |
9bccf70c A |
356 | if (map) |
357 | vm_map_deallocate(map); | |
1c79356b A |
358 | } |
359 | ||
360 | /* | |
361 | * getCapacity: | |
362 | * | |
363 | * Get the buffer capacity | |
364 | */ | |
365 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
366 | { | |
367 | return _capacity; | |
368 | } | |
369 | ||
370 | /* | |
371 | * setLength: | |
372 | * | |
373 | * Change the buffer length of the memory descriptor. When a new buffer | |
374 | * is created, the initial length of the buffer is set to be the same as | |
375 | * the capacity. The length can be adjusted via setLength for a shorter | |
376 | * transfer (there is no need to create more buffer descriptors when you | |
377 | * can reuse an existing one, even for different transfer sizes). Note | |
378 | * that the specified length must not exceed the capacity of the buffer. | |
379 | */ | |
380 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
381 | { | |
382 | assert(length <= _capacity); | |
383 | ||
384 | _length = length; | |
385 | _singleRange.v.length = length; | |
386 | } | |
387 | ||
388 | /* | |
389 | * setDirection: | |
390 | * | |
391 | * Change the direction of the transfer. This method allows one to redirect | |
392 | * the descriptor's transfer direction. This eliminates the need to destroy | |
393 | * and create new buffers when different transfer directions are needed. | |
394 | */ | |
395 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
396 | { | |
397 | _direction = direction; | |
398 | } | |
399 | ||
400 | /* | |
401 | * appendBytes: | |
402 | * | |
403 | * Add some data to the end of the buffer. This method automatically | |
404 | * maintains the memory descriptor buffer length. Note that appendBytes | |
405 | * will not copy past the end of the memory descriptor's current capacity. | |
406 | */ | |
407 | bool | |
408 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
409 | { | |
410 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); | |
411 | ||
412 | assert(_length <= _capacity); | |
413 | bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length), | |
414 | actualBytesToCopy); | |
415 | _length += actualBytesToCopy; | |
416 | _singleRange.v.length += actualBytesToCopy; | |
417 | ||
418 | return true; | |
419 | } | |
420 | ||
421 | /* | |
422 | * getBytesNoCopy: | |
423 | * | |
424 | * Return the virtual address of the beginning of the buffer | |
425 | */ | |
426 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
427 | { | |
428 | return (void *)_singleRange.v.address; | |
429 | } | |
430 | ||
431 | /* | |
432 | * getBytesNoCopy: | |
433 | * | |
434 | * Return the virtual address of an offset from the beginning of the buffer | |
435 | */ | |
436 | void * | |
437 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
438 | { | |
439 | if (start < _length && (start + withLength) <= _length) | |
440 | return (void *)(_singleRange.v.address + start); | |
441 | return 0; | |
442 | } | |
443 | ||
de355530 A |
444 | /* |
445 | * getPhysicalSegment: | |
446 | * | |
447 | * Get the physical address of the buffer, relative to the current position. | |
448 | * If the current position is at the end of the buffer, a zero is returned. | |
449 | */ | |
450 | IOPhysicalAddress | |
451 | IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset, | |
452 | IOByteCount * lengthOfSegment) | |
453 | { | |
454 | IOPhysicalAddress physAddr; | |
455 | ||
456 | if( offset != _position) | |
457 | setPosition( offset ); | |
458 | ||
459 | assert(_position <= _length); | |
460 | ||
461 | /* Fail gracefully if the position is at (or past) the end-of-buffer. */ | |
462 | if (_position >= _length) { | |
463 | *lengthOfSegment = 0; | |
464 | return 0; | |
465 | } | |
466 | ||
467 | if (_options & kIOMemoryPageable) { | |
468 | physAddr = super::getPhysicalSegment(offset, lengthOfSegment); | |
469 | ||
470 | } else { | |
471 | /* Compute the largest contiguous physical length possible. */ | |
472 | vm_address_t actualPos = _singleRange.v.address + _position; | |
473 | vm_address_t actualPage = trunc_page(actualPos); | |
474 | unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address)); | |
475 | ||
476 | vm_size_t physicalLength = actualPage + page_size - actualPos; | |
477 | for (unsigned index = physInd + 1; index < _physSegCount && | |
478 | _physAddrs[index] == _physAddrs[index-1] + page_size; index++) { | |
479 | physicalLength += page_size; | |
480 | } | |
481 | ||
482 | /* Clip contiguous physical length at the end-of-buffer. */ | |
483 | if (physicalLength > _length - _position) | |
484 | physicalLength = _length - _position; | |
485 | ||
486 | *lengthOfSegment = physicalLength; | |
487 | physAddr = _physAddrs[physInd] + (actualPos - actualPage); | |
488 | } | |
489 | ||
490 | return physAddr; | |
491 | } | |
492 | ||
9bccf70c | 493 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
1c79356b A |
494 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); |
495 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); | |
496 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
497 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
498 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
499 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
500 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
501 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
502 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
503 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
504 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
505 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
506 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
507 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
508 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |