]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | #include <IOKit/assert.h> | |
23 | #include <IOKit/system.h> | |
24 | ||
25 | #include <IOKit/IOLib.h> | |
26 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
27 | ||
28 | __BEGIN_DECLS | |
29 | void ipc_port_release_send(ipc_port_t port); | |
30 | __END_DECLS | |
31 | ||
32 | extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); | |
33 | ||
34 | #define super IOGeneralMemoryDescriptor | |
35 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
36 | IOGeneralMemoryDescriptor); | |
37 | ||
38 | bool IOBufferMemoryDescriptor::initWithAddress( | |
39 | void * /* address */ , | |
40 | IOByteCount /* withLength */ , | |
41 | IODirection /* withDirection */ ) | |
42 | { | |
43 | return false; | |
44 | } | |
45 | ||
46 | bool IOBufferMemoryDescriptor::initWithAddress( | |
47 | vm_address_t /* address */ , | |
48 | IOByteCount /* withLength */ , | |
49 | IODirection /* withDirection */ , | |
50 | task_t /* withTask */ ) | |
51 | { | |
52 | return false; | |
53 | } | |
54 | ||
55 | bool IOBufferMemoryDescriptor::initWithPhysicalAddress( | |
56 | IOPhysicalAddress /* address */ , | |
57 | IOByteCount /* withLength */ , | |
58 | IODirection /* withDirection */ ) | |
59 | { | |
60 | return false; | |
61 | } | |
62 | ||
63 | bool IOBufferMemoryDescriptor::initWithPhysicalRanges( | |
64 | IOPhysicalRange * /* ranges */ , | |
65 | UInt32 /* withCount */ , | |
66 | IODirection /* withDirection */ , | |
67 | bool /* asReference */ ) | |
68 | { | |
69 | return false; | |
70 | } | |
71 | ||
72 | bool IOBufferMemoryDescriptor::initWithRanges( | |
73 | IOVirtualRange * /* ranges */ , | |
74 | UInt32 /* withCount */ , | |
75 | IODirection /* withDirection */ , | |
76 | task_t /* withTask */ , | |
77 | bool /* asReference */ ) | |
78 | { | |
79 | return false; | |
80 | } | |
81 | ||
82 | bool IOBufferMemoryDescriptor::initWithOptions( | |
83 | IOOptionBits options, | |
84 | vm_size_t capacity, | |
85 | vm_offset_t alignment) | |
86 | { | |
87 | if (!capacity) | |
88 | return false; | |
89 | ||
90 | _options = options; | |
91 | _capacity = capacity; | |
92 | _physAddrs = 0; | |
93 | _physSegCount = 0; | |
94 | _buffer = 0; | |
95 | ||
96 | if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) | |
97 | alignment = page_size; | |
98 | ||
99 | _alignment = alignment; | |
100 | if (options & kIOMemoryPageable) | |
101 | /* Allocate some kernel address space. */ | |
102 | _buffer = IOMallocPageable(capacity, alignment); | |
103 | /* Allocate a wired-down buffer inside kernel space. */ | |
104 | else if (options & kIOMemoryPhysicallyContiguous) | |
105 | _buffer = IOMallocContiguous(capacity, alignment, 0); | |
106 | else if (alignment > 1) | |
107 | _buffer = IOMallocAligned(capacity, alignment); | |
108 | else | |
109 | _buffer = IOMalloc(capacity); | |
110 | ||
111 | if (!_buffer) | |
112 | return false; | |
113 | ||
114 | _singleRange.v.address = (vm_address_t) _buffer; | |
115 | _singleRange.v.length = capacity; | |
116 | ||
117 | if (!super::initWithRanges(&_singleRange.v, 1, | |
118 | (IODirection) (options & kIOMemoryDirectionMask), | |
119 | kernel_task, true)) | |
120 | return false; | |
121 | ||
122 | if (options & kIOMemoryPageable) { | |
123 | _flags |= kIOMemoryRequiresWire; | |
124 | ||
125 | kern_return_t kr; | |
126 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; | |
0b4e3aa0 | 127 | vm_size_t size = round_page(_ranges.v[0].length); |
1c79356b A |
128 | |
129 | // must create the entry before any pages are allocated | |
130 | if( 0 == sharedMem) { | |
131 | kr = mach_make_memory_entry( IOPageableMapForAddress( _ranges.v[0].address ), | |
132 | &size, _ranges.v[0].address, | |
133 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, | |
134 | NULL ); | |
0b4e3aa0 | 135 | if( (KERN_SUCCESS == kr) && (size != round_page(_ranges.v[0].length))) { |
1c79356b A |
136 | ipc_port_release_send( sharedMem ); |
137 | kr = kIOReturnVMError; | |
138 | } | |
139 | if( KERN_SUCCESS != kr) | |
140 | sharedMem = 0; | |
141 | _memEntry = (void *) sharedMem; | |
142 | } | |
143 | ||
144 | } else { | |
145 | /* Precompute virtual-to-physical page mappings. */ | |
146 | vm_address_t inBuffer = (vm_address_t) _buffer; | |
147 | _physSegCount = atop(trunc_page(inBuffer + capacity - 1) - | |
148 | trunc_page(inBuffer)) + 1; | |
149 | _physAddrs = IONew(IOPhysicalAddress, _physSegCount); | |
150 | if (!_physAddrs) | |
151 | return false; | |
152 | ||
153 | inBuffer = trunc_page(inBuffer); | |
154 | for (unsigned i = 0; i < _physSegCount; i++) { | |
155 | _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer); | |
156 | assert(_physAddrs[i]); /* supposed to be wired */ | |
157 | inBuffer += page_size; | |
158 | } | |
159 | } | |
160 | ||
161 | setLength(capacity); | |
162 | ||
163 | return true; | |
164 | } | |
165 | ||
166 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( | |
167 | IOOptionBits options, | |
168 | vm_size_t capacity, | |
169 | vm_offset_t alignment = 1) | |
170 | { | |
171 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
172 | ||
173 | if (me && !me->initWithOptions(options, capacity, alignment)) { | |
174 | me->release(); | |
175 | me = 0; | |
176 | } | |
177 | return me; | |
178 | } | |
179 | ||
180 | ||
181 | /* | |
182 | * withCapacity: | |
183 | * | |
184 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
185 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
186 | */ | |
187 | IOBufferMemoryDescriptor * | |
188 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
189 | IODirection inDirection, | |
190 | bool inContiguous) | |
191 | { | |
192 | return( IOBufferMemoryDescriptor::withOptions( | |
193 | inDirection | kIOMemoryUnshared | |
194 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
195 | inCapacity, inContiguous ? inCapacity : 1 )); | |
196 | } | |
197 | ||
198 | /* | |
199 | * initWithBytes: | |
200 | * | |
201 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
202 | * The descriptor's length and capacity are set to the input buffer's size. | |
203 | */ | |
204 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
205 | vm_size_t inLength, | |
206 | IODirection inDirection, | |
207 | bool inContiguous) | |
208 | { | |
209 | if (!initWithOptions( | |
210 | inDirection | kIOMemoryUnshared | |
211 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
212 | inLength, inLength )) | |
213 | return false; | |
214 | ||
215 | // start out with no data | |
216 | setLength(0); | |
217 | ||
218 | if (!appendBytes(inBytes, inLength)) | |
219 | return false; | |
220 | ||
221 | return true; | |
222 | } | |
223 | ||
224 | /* | |
225 | * withBytes: | |
226 | * | |
227 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
228 | * The descriptor's length and capacity are set to the input buffer's size. | |
229 | */ | |
230 | IOBufferMemoryDescriptor * | |
231 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
232 | vm_size_t inLength, | |
233 | IODirection inDirection, | |
234 | bool inContiguous) | |
235 | { | |
236 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
237 | ||
238 | if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){ | |
239 | me->release(); | |
240 | me = 0; | |
241 | } | |
242 | return me; | |
243 | } | |
244 | ||
245 | /* | |
246 | * free: | |
247 | * | |
248 | * Free resources | |
249 | */ | |
250 | void IOBufferMemoryDescriptor::free() | |
251 | { | |
252 | IOOptionBits options = _options; | |
253 | vm_size_t size = _capacity; | |
254 | void * buffer = _buffer; | |
255 | vm_offset_t alignment = _alignment; | |
256 | ||
257 | if (_physAddrs) | |
258 | IODelete(_physAddrs, IOPhysicalAddress, _physSegCount); | |
259 | ||
260 | /* super::free may unwire - deallocate buffer afterwards */ | |
261 | super::free(); | |
262 | ||
263 | if (buffer) { | |
264 | if (options & kIOMemoryPageable) | |
265 | IOFreePageable(buffer, size); | |
266 | else { | |
267 | if (options & kIOMemoryPhysicallyContiguous) | |
268 | IOFreeContiguous(buffer, size); | |
269 | else if (alignment > 1) | |
270 | IOFreeAligned(buffer, size); | |
271 | else | |
272 | IOFree(buffer, size); | |
273 | } | |
274 | } | |
275 | } | |
276 | ||
277 | /* | |
278 | * getCapacity: | |
279 | * | |
280 | * Get the buffer capacity | |
281 | */ | |
282 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
283 | { | |
284 | return _capacity; | |
285 | } | |
286 | ||
287 | /* | |
288 | * setLength: | |
289 | * | |
290 | * Change the buffer length of the memory descriptor. When a new buffer | |
291 | * is created, the initial length of the buffer is set to be the same as | |
292 | * the capacity. The length can be adjusted via setLength for a shorter | |
293 | * transfer (there is no need to create more buffer descriptors when you | |
294 | * can reuse an existing one, even for different transfer sizes). Note | |
295 | * that the specified length must not exceed the capacity of the buffer. | |
296 | */ | |
297 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
298 | { | |
299 | assert(length <= _capacity); | |
300 | ||
301 | _length = length; | |
302 | _singleRange.v.length = length; | |
303 | } | |
304 | ||
305 | /* | |
306 | * setDirection: | |
307 | * | |
308 | * Change the direction of the transfer. This method allows one to redirect | |
309 | * the descriptor's transfer direction. This eliminates the need to destroy | |
310 | * and create new buffers when different transfer directions are needed. | |
311 | */ | |
312 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
313 | { | |
314 | _direction = direction; | |
315 | } | |
316 | ||
317 | /* | |
318 | * appendBytes: | |
319 | * | |
320 | * Add some data to the end of the buffer. This method automatically | |
321 | * maintains the memory descriptor buffer length. Note that appendBytes | |
322 | * will not copy past the end of the memory descriptor's current capacity. | |
323 | */ | |
324 | bool | |
325 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
326 | { | |
327 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); | |
328 | ||
329 | assert(_length <= _capacity); | |
330 | bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length), | |
331 | actualBytesToCopy); | |
332 | _length += actualBytesToCopy; | |
333 | _singleRange.v.length += actualBytesToCopy; | |
334 | ||
335 | return true; | |
336 | } | |
337 | ||
338 | /* | |
339 | * getBytesNoCopy: | |
340 | * | |
341 | * Return the virtual address of the beginning of the buffer | |
342 | */ | |
343 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
344 | { | |
345 | return (void *)_singleRange.v.address; | |
346 | } | |
347 | ||
348 | /* | |
349 | * getBytesNoCopy: | |
350 | * | |
351 | * Return the virtual address of an offset from the beginning of the buffer | |
352 | */ | |
353 | void * | |
354 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
355 | { | |
356 | if (start < _length && (start + withLength) <= _length) | |
357 | return (void *)(_singleRange.v.address + start); | |
358 | return 0; | |
359 | } | |
360 | ||
361 | /* | |
362 | * getPhysicalSegment: | |
363 | * | |
364 | * Get the physical address of the buffer, relative to the current position. | |
365 | * If the current position is at the end of the buffer, a zero is returned. | |
366 | */ | |
367 | IOPhysicalAddress | |
368 | IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset, | |
369 | IOByteCount * lengthOfSegment) | |
370 | { | |
371 | IOPhysicalAddress physAddr; | |
372 | ||
373 | if( offset != _position) | |
374 | setPosition( offset ); | |
375 | ||
376 | assert(_position <= _length); | |
377 | ||
378 | /* Fail gracefully if the position is at (or past) the end-of-buffer. */ | |
379 | if (_position >= _length) { | |
380 | *lengthOfSegment = 0; | |
381 | return 0; | |
382 | } | |
383 | ||
384 | if (_options & kIOMemoryPageable) { | |
385 | physAddr = super::getPhysicalSegment(offset, lengthOfSegment); | |
386 | ||
387 | } else { | |
388 | /* Compute the largest contiguous physical length possible. */ | |
389 | vm_address_t actualPos = _singleRange.v.address + _position; | |
390 | vm_address_t actualPage = trunc_page(actualPos); | |
391 | unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address)); | |
392 | ||
393 | vm_size_t physicalLength = actualPage + page_size - actualPos; | |
394 | for (unsigned index = physInd + 1; index < _physSegCount && | |
395 | _physAddrs[index] == _physAddrs[index-1] + page_size; index++) { | |
396 | physicalLength += page_size; | |
397 | } | |
398 | ||
399 | /* Clip contiguous physical length at the end-of-buffer. */ | |
400 | if (physicalLength > _length - _position) | |
401 | physicalLength = _length - _position; | |
402 | ||
403 | *lengthOfSegment = physicalLength; | |
404 | physAddr = _physAddrs[physInd] + (actualPos - actualPage); | |
405 | } | |
406 | ||
407 | return physAddr; | |
408 | } | |
409 | ||
410 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
411 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
412 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); | |
413 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
414 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
415 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
416 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
417 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
418 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
419 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
420 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
421 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
422 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
423 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
424 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
425 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |