]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
b0d623f7 A |
28 | |
29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
30 | ||
1c79356b A |
31 | #include <IOKit/assert.h> |
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOLib.h> | |
0c530ab8 | 35 | #include <IOKit/IOMapper.h> |
1c79356b | 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 37 | #include <libkern/OSDebug.h> |
1c79356b | 38 | |
91447636 | 39 | #include "IOKitKernelInternal.h" |
0c530ab8 | 40 | #include "IOCopyMapper.h" |
91447636 | 41 | |
1c79356b A |
42 | __BEGIN_DECLS |
43 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 44 | #include <vm/pmap.h> |
1c79356b | 45 | |
55e303ae | 46 | __END_DECLS |
de355530 | 47 | |
0c530ab8 A |
48 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
49 | ||
b0d623f7 A |
50 | enum |
51 | { | |
52 | kInternalFlagRealloc = 0x00000001, | |
53 | }; | |
54 | ||
0c530ab8 A |
55 | volatile ppnum_t gIOHighestAllocatedPage; |
56 | ||
57 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
58 | ||
1c79356b A |
59 | #define super IOGeneralMemoryDescriptor |
60 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
61 | IOGeneralMemoryDescriptor); | |
62 | ||
b0d623f7 | 63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
1c79356b | 64 | |
b0d623f7 | 65 | #ifndef __LP64__ |
1c79356b A |
66 | bool IOBufferMemoryDescriptor::initWithOptions( |
67 | IOOptionBits options, | |
68 | vm_size_t capacity, | |
9bccf70c A |
69 | vm_offset_t alignment, |
70 | task_t inTask) | |
0c530ab8 A |
71 | { |
72 | mach_vm_address_t physicalMask = 0; | |
73 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); | |
74 | } | |
b0d623f7 | 75 | #endif /* !__LP64__ */ |
0c530ab8 A |
76 | |
77 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( | |
78 | task_t inTask, | |
79 | IOOptionBits options, | |
80 | mach_vm_size_t capacity, | |
81 | mach_vm_address_t alignment, | |
82 | mach_vm_address_t physicalMask) | |
1c79356b | 83 | { |
91447636 | 84 | kern_return_t kr; |
2d21ac55 A |
85 | task_t mapTask = NULL; |
86 | vm_map_t vmmap = NULL; | |
0c530ab8 | 87 | addr64_t lastIOAddr; |
b0d623f7 A |
88 | mach_vm_address_t highestMask = 0; |
89 | bool usePhys; | |
90 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
9bccf70c | 91 | |
1c79356b A |
92 | if (!capacity) |
93 | return false; | |
94 | ||
b0d623f7 A |
95 | _options = options; |
96 | _capacity = capacity; | |
97 | _internalFlags = 0; | |
98 | _internalReserved = 0; | |
99 | _buffer = 0; | |
100 | ||
101 | _ranges.v64 = IONew(IOAddressRange, 1); | |
102 | if (!_ranges.v64) | |
103 | return (false); | |
104 | _ranges.v64->address = 0; | |
105 | _ranges.v64->length = 0; | |
1c79356b | 106 | |
c910b4d9 A |
107 | // Grab IOMD bits from the Buffer MD options |
108 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
55e303ae | 109 | |
b0d623f7 A |
110 | if (physicalMask && (alignment <= 1)) |
111 | { | |
112 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
113 | highestMask = (physicalMask | alignment); | |
114 | alignment++; | |
115 | } | |
116 | ||
2d21ac55 A |
117 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size)) |
118 | alignment = page_size; | |
9bccf70c | 119 | |
b0d623f7 A |
120 | if (alignment >= page_size) |
121 | capacity = round_page(capacity); | |
122 | ||
123 | if (alignment > page_size) | |
124 | options |= kIOMemoryPhysicallyContiguous; | |
0c530ab8 | 125 | |
1c79356b | 126 | _alignment = alignment; |
91447636 | 127 | |
b0d623f7 | 128 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
2d21ac55 | 129 | return false; |
91447636 | 130 | |
2d21ac55 A |
131 | if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask) |
132 | physicalMask = 0xFFFFFFFF; | |
91447636 | 133 | |
2d21ac55 A |
134 | // set flags for entry + object create |
135 | vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; | |
91447636 | 136 | |
2d21ac55 A |
137 | // set memory entry cache mode |
138 | switch (options & kIOMapCacheMask) | |
139 | { | |
140 | case kIOMapInhibitCache: | |
141 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
142 | break; | |
143 | ||
144 | case kIOMapWriteThruCache: | |
145 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
146 | break; | |
147 | ||
148 | case kIOMapWriteCombineCache: | |
149 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
150 | break; | |
151 | ||
152 | case kIOMapCopybackCache: | |
153 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
154 | break; | |
155 | ||
156 | case kIOMapDefaultCache: | |
157 | default: | |
158 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
159 | break; | |
160 | } | |
91447636 | 161 | |
2d21ac55 A |
162 | if (options & kIOMemoryPageable) |
163 | { | |
164 | iomdOptions |= kIOMemoryBufferPageable; | |
91447636 | 165 | |
2d21ac55 | 166 | // must create the entry before any pages are allocated |
91447636 | 167 | |
2d21ac55 A |
168 | // set flags for entry + object create |
169 | memEntryCacheMode |= MAP_MEM_NAMED_CREATE; | |
91447636 | 170 | |
2d21ac55 A |
171 | if (options & kIOMemoryPurgeable) |
172 | memEntryCacheMode |= MAP_MEM_PURGABLE; | |
9bccf70c | 173 | } |
0c530ab8 | 174 | else |
9bccf70c | 175 | { |
2d21ac55 A |
176 | memEntryCacheMode |= MAP_MEM_NAMED_REUSE; |
177 | ||
0c530ab8 A |
178 | if (IOMapper::gSystem) |
179 | // assuming mapped space is 2G | |
180 | lastIOAddr = (1UL << 31) - PAGE_SIZE; | |
9bccf70c | 181 | else |
0c530ab8 | 182 | lastIOAddr = ptoa_64(gIOHighestAllocatedPage); |
4452a7af | 183 | |
b0d623f7 A |
184 | usePhys = (highestMask && (lastIOAddr != (lastIOAddr & highestMask)) |
185 | && (alignment <= page_size)); | |
186 | ||
187 | if (!usePhys && (options & kIOMemoryPhysicallyContiguous)) | |
188 | { | |
189 | _buffer = (void *) IOKernelAllocateContiguous(capacity, highestMask, alignment); | |
190 | usePhys = (NULL == _buffer); | |
191 | } | |
192 | if (usePhys) | |
0c530ab8 A |
193 | { |
194 | mach_vm_address_t address; | |
2d21ac55 A |
195 | iomdOptions &= ~kIOMemoryTypeVirtual64; |
196 | iomdOptions |= kIOMemoryTypePhysical64; | |
0c530ab8 | 197 | |
b0d623f7 | 198 | address = IOMallocPhysical(capacity, highestMask); |
0c530ab8 A |
199 | _buffer = (void *) address; |
200 | if (!_buffer) | |
201 | return false; | |
202 | ||
2d21ac55 | 203 | mapTask = inTask; |
0c530ab8 A |
204 | inTask = 0; |
205 | } | |
206 | else | |
207 | { | |
2d21ac55 A |
208 | vmmap = kernel_map; |
209 | ||
0c530ab8 A |
210 | // Buffer shouldn't auto prepare they should be prepared explicitly |
211 | // But it never was enforced so what are you going to do? | |
212 | iomdOptions |= kIOMemoryAutoPrepare; | |
213 | ||
214 | /* Allocate a wired-down buffer inside kernel space. */ | |
215 | if (options & kIOMemoryPhysicallyContiguous) | |
b0d623f7 A |
216 | { |
217 | // attempted allocate already | |
218 | } | |
0c530ab8 | 219 | else if (alignment > 1) |
b0d623f7 | 220 | { |
0c530ab8 | 221 | _buffer = IOMallocAligned(capacity, alignment); |
b0d623f7 | 222 | } |
0c530ab8 | 223 | else |
b0d623f7 | 224 | { |
0c530ab8 | 225 | _buffer = IOMalloc(capacity); |
b0d623f7 | 226 | } |
0c530ab8 A |
227 | if (!_buffer) |
228 | return false; | |
229 | } | |
91447636 | 230 | } |
1c79356b | 231 | |
2d21ac55 A |
232 | if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions)) |
233 | && (options & (kIOMemoryPageable | kIOMapCacheMask))) { | |
234 | ipc_port_t sharedMem; | |
b0d623f7 | 235 | vm_size_t size = round_page(capacity); |
2d21ac55 A |
236 | |
237 | kr = mach_make_memory_entry(vmmap, | |
238 | &size, (vm_offset_t)_buffer, | |
239 | memEntryCacheMode, &sharedMem, | |
240 | NULL ); | |
241 | ||
b0d623f7 | 242 | if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { |
2d21ac55 A |
243 | ipc_port_release_send( sharedMem ); |
244 | kr = kIOReturnVMError; | |
245 | } | |
246 | if( KERN_SUCCESS != kr) | |
247 | return( false ); | |
248 | ||
249 | _memEntry = (void *) sharedMem; | |
1c79356b | 250 | |
2d21ac55 A |
251 | if( options & kIOMemoryPageable) { |
252 | #if IOALLOCDEBUG | |
253 | debug_iomallocpageable_size += size; | |
254 | #endif | |
255 | mapTask = inTask; | |
256 | if (NULL == inTask) | |
257 | inTask = kernel_task; | |
258 | } | |
259 | else if (options & kIOMapCacheMask) | |
260 | { | |
261 | // Prefetch each page to put entries into the pmap | |
262 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
263 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
264 | ||
265 | while (startAddr < endAddr) | |
266 | { | |
267 | *startAddr; | |
268 | startAddr += page_size; | |
269 | } | |
270 | } | |
271 | } | |
272 | ||
b0d623f7 A |
273 | _ranges.v64->address = (mach_vm_address_t) _buffer;; |
274 | _ranges.v64->length = _capacity; | |
2d21ac55 | 275 | |
b0d623f7 | 276 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
2d21ac55 | 277 | inTask, iomdOptions, /* System mapper */ 0)) |
1c79356b A |
278 | return false; |
279 | ||
b0d623f7 | 280 | if (highestMask && !IOMapper::gSystem) |
91447636 | 281 | { |
0c530ab8 | 282 | IOMDDMACharacteristics mdSummary; |
4452a7af | 283 | |
0c530ab8 A |
284 | bzero(&mdSummary, sizeof(mdSummary)); |
285 | IOReturn rtn = dmaCommandOperation( | |
286 | kIOMDGetCharacteristics, | |
287 | &mdSummary, sizeof(mdSummary)); | |
288 | if (rtn) | |
289 | return false; | |
290 | ||
291 | if (mdSummary.fHighestPage) | |
c0fea474 | 292 | { |
0c530ab8 A |
293 | ppnum_t highest; |
294 | while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage)) | |
c0fea474 | 295 | { |
0c530ab8 A |
296 | if (OSCompareAndSwap(highest, mdSummary.fHighestPage, |
297 | (UInt32 *) &gIOHighestAllocatedPage)) | |
298 | break; | |
c0fea474 | 299 | } |
0c530ab8 A |
300 | lastIOAddr = ptoa_64(mdSummary.fHighestPage); |
301 | } | |
302 | else | |
303 | lastIOAddr = ptoa_64(gIOLastPage); | |
304 | ||
b0d623f7 | 305 | if (lastIOAddr != (lastIOAddr & highestMask)) |
0c530ab8 | 306 | { |
2d21ac55 | 307 | if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
308 | { |
309 | // flag a retry | |
b0d623f7 | 310 | _internalFlags |= kInternalFlagRealloc; |
0c530ab8 A |
311 | } |
312 | return false; | |
6601e61a | 313 | } |
4452a7af A |
314 | } |
315 | ||
2d21ac55 | 316 | if (mapTask) |
0c530ab8 | 317 | { |
2d21ac55 A |
318 | if (!reserved) { |
319 | reserved = IONew( ExpansionData, 1 ); | |
320 | if( !reserved) | |
321 | return( false ); | |
322 | } | |
b0d623f7 A |
323 | reserved->map = createMappingInTask(mapTask, 0, |
324 | kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); | |
2d21ac55 | 325 | if (!reserved->map) |
0c530ab8 A |
326 | { |
327 | _buffer = 0; | |
328 | return( false ); | |
329 | } | |
2d21ac55 | 330 | release(); // map took a retain on this |
b0d623f7 A |
331 | reserved->map->retain(); |
332 | removeMapping(reserved->map); | |
2d21ac55 A |
333 | mach_vm_address_t buffer = reserved->map->getAddress(); |
334 | _buffer = (void *) buffer; | |
335 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) | |
336 | _ranges.v64->address = buffer; | |
0c530ab8 A |
337 | } |
338 | ||
b0d623f7 | 339 | setLength(_capacity); |
2d21ac55 | 340 | |
1c79356b A |
341 | return true; |
342 | } | |
343 | ||
9bccf70c A |
344 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
345 | task_t inTask, | |
346 | IOOptionBits options, | |
347 | vm_size_t capacity, | |
55e303ae | 348 | vm_offset_t alignment) |
9bccf70c A |
349 | { |
350 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
351 | ||
b0d623f7 A |
352 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { |
353 | bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags)); | |
4452a7af A |
354 | me->release(); |
355 | me = 0; | |
0c530ab8 A |
356 | if (retry) |
357 | { | |
358 | me = new IOBufferMemoryDescriptor; | |
b0d623f7 | 359 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) |
0c530ab8 A |
360 | { |
361 | me->release(); | |
362 | me = 0; | |
363 | } | |
364 | } | |
365 | } | |
366 | return me; | |
367 | } | |
368 | ||
369 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
370 | task_t inTask, | |
371 | IOOptionBits options, | |
372 | mach_vm_size_t capacity, | |
373 | mach_vm_address_t physicalMask) | |
374 | { | |
375 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
376 | ||
377 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
378 | { | |
b0d623f7 | 379 | bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags)); |
0c530ab8 A |
380 | me->release(); |
381 | me = 0; | |
382 | if (retry) | |
383 | { | |
384 | me = new IOBufferMemoryDescriptor; | |
385 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
386 | { | |
387 | me->release(); | |
388 | me = 0; | |
389 | } | |
390 | } | |
9bccf70c A |
391 | } |
392 | return me; | |
393 | } | |
394 | ||
b0d623f7 | 395 | #ifndef __LP64__ |
9bccf70c A |
396 | bool IOBufferMemoryDescriptor::initWithOptions( |
397 | IOOptionBits options, | |
398 | vm_size_t capacity, | |
399 | vm_offset_t alignment) | |
400 | { | |
b0d623f7 | 401 | return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); |
9bccf70c | 402 | } |
b0d623f7 | 403 | #endif /* !__LP64__ */ |
9bccf70c | 404 | |
1c79356b A |
405 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
406 | IOOptionBits options, | |
407 | vm_size_t capacity, | |
55e303ae | 408 | vm_offset_t alignment) |
1c79356b | 409 | { |
b0d623f7 A |
410 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
411 | ||
412 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
413 | bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags)); | |
414 | me->release(); | |
415 | me = 0; | |
416 | if (retry) | |
417 | { | |
418 | me = new IOBufferMemoryDescriptor; | |
419 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) | |
420 | { | |
421 | me->release(); | |
422 | me = 0; | |
423 | } | |
424 | } | |
425 | } | |
426 | return me; | |
1c79356b A |
427 | } |
428 | ||
429 | ||
430 | /* | |
431 | * withCapacity: | |
432 | * | |
433 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
434 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
435 | */ | |
436 | IOBufferMemoryDescriptor * | |
437 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
438 | IODirection inDirection, | |
439 | bool inContiguous) | |
440 | { | |
441 | return( IOBufferMemoryDescriptor::withOptions( | |
442 | inDirection | kIOMemoryUnshared | |
443 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
444 | inCapacity, inContiguous ? inCapacity : 1 )); | |
445 | } | |
446 | ||
b0d623f7 | 447 | #ifndef __LP64__ |
1c79356b A |
448 | /* |
449 | * initWithBytes: | |
450 | * | |
451 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
452 | * The descriptor's length and capacity are set to the input buffer's size. | |
453 | */ | |
454 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
455 | vm_size_t inLength, | |
456 | IODirection inDirection, | |
457 | bool inContiguous) | |
458 | { | |
b0d623f7 A |
459 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
460 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
461 | inLength, inLength, (mach_vm_address_t)0)) | |
1c79356b A |
462 | return false; |
463 | ||
464 | // start out with no data | |
465 | setLength(0); | |
466 | ||
467 | if (!appendBytes(inBytes, inLength)) | |
468 | return false; | |
469 | ||
470 | return true; | |
471 | } | |
b0d623f7 | 472 | #endif /* !__LP64__ */ |
1c79356b A |
473 | |
474 | /* | |
475 | * withBytes: | |
476 | * | |
477 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
478 | * The descriptor's length and capacity are set to the input buffer's size. | |
479 | */ | |
480 | IOBufferMemoryDescriptor * | |
481 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
482 | vm_size_t inLength, | |
483 | IODirection inDirection, | |
484 | bool inContiguous) | |
485 | { | |
486 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
487 | ||
b0d623f7 A |
488 | if (me && !me->initWithPhysicalMask( |
489 | kernel_task, inDirection | kIOMemoryUnshared | |
490 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
491 | inLength, inLength, 0 )) | |
0c530ab8 | 492 | { |
b0d623f7 | 493 | bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags)); |
0c530ab8 A |
494 | me->release(); |
495 | me = 0; | |
496 | if (retry) | |
497 | { | |
498 | me = new IOBufferMemoryDescriptor; | |
b0d623f7 A |
499 | if (me && !me->initWithPhysicalMask( |
500 | kernel_task, inDirection | kIOMemoryUnshared | |
501 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
502 | inLength, inLength, 0 )) | |
0c530ab8 A |
503 | { |
504 | me->release(); | |
505 | me = 0; | |
506 | } | |
507 | } | |
508 | ||
1c79356b | 509 | } |
b0d623f7 A |
510 | |
511 | if (me) | |
512 | { | |
513 | // start out with no data | |
514 | me->setLength(0); | |
515 | ||
516 | if (!me->appendBytes(inBytes, inLength)) | |
517 | { | |
518 | me->release(); | |
519 | me = 0; | |
520 | } | |
521 | } | |
1c79356b A |
522 | return me; |
523 | } | |
524 | ||
525 | /* | |
526 | * free: | |
527 | * | |
528 | * Free resources | |
529 | */ | |
530 | void IOBufferMemoryDescriptor::free() | |
531 | { | |
55e303ae A |
532 | // Cache all of the relevant information on the stack for use |
533 | // after we call super::free()! | |
0c530ab8 A |
534 | IOOptionBits flags = _flags; |
535 | IOOptionBits options = _options; | |
536 | vm_size_t size = _capacity; | |
537 | void * buffer = _buffer; | |
2d21ac55 | 538 | IOMemoryMap * map = 0; |
b0d623f7 A |
539 | IOAddressRange * range = _ranges.v64; |
540 | mach_vm_address_t source = range ? range->address : 0; | |
0c530ab8 | 541 | vm_offset_t alignment = _alignment; |
1c79356b | 542 | |
b0d623f7 A |
543 | if (alignment >= page_size) |
544 | size = round_page(size); | |
545 | ||
9bccf70c A |
546 | if (reserved) |
547 | { | |
2d21ac55 | 548 | map = reserved->map; |
9bccf70c | 549 | IODelete( reserved, ExpansionData, 1 ); |
2d21ac55 A |
550 | if (map) |
551 | map->release(); | |
9bccf70c A |
552 | } |
553 | ||
1c79356b A |
554 | /* super::free may unwire - deallocate buffer afterwards */ |
555 | super::free(); | |
556 | ||
91447636 | 557 | if (options & kIOMemoryPageable) |
9bccf70c | 558 | { |
91447636 | 559 | #if IOALLOCDEBUG |
b0d623f7 | 560 | debug_iomallocpageable_size -= round_page(size); |
91447636 | 561 | #endif |
1c79356b | 562 | } |
91447636 A |
563 | else if (buffer) |
564 | { | |
2d21ac55 | 565 | if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask)) |
4a3eedf9 | 566 | IOFreePhysical(source, size); |
0c530ab8 A |
567 | else if (options & kIOMemoryPhysicallyContiguous) |
568 | IOKernelFreeContiguous((mach_vm_address_t) buffer, size); | |
91447636 A |
569 | else if (alignment > 1) |
570 | IOFreeAligned(buffer, size); | |
571 | else | |
572 | IOFree(buffer, size); | |
573 | } | |
b0d623f7 A |
574 | if (range && (kIOMemoryAsReference & flags)) |
575 | IODelete(range, IOAddressRange, 1); | |
1c79356b A |
576 | } |
577 | ||
578 | /* | |
579 | * getCapacity: | |
580 | * | |
581 | * Get the buffer capacity | |
582 | */ | |
583 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
584 | { | |
585 | return _capacity; | |
586 | } | |
587 | ||
588 | /* | |
589 | * setLength: | |
590 | * | |
591 | * Change the buffer length of the memory descriptor. When a new buffer | |
592 | * is created, the initial length of the buffer is set to be the same as | |
593 | * the capacity. The length can be adjusted via setLength for a shorter | |
594 | * transfer (there is no need to create more buffer descriptors when you | |
595 | * can reuse an existing one, even for different transfer sizes). Note | |
596 | * that the specified length must not exceed the capacity of the buffer. | |
597 | */ | |
598 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
599 | { | |
600 | assert(length <= _capacity); | |
601 | ||
602 | _length = length; | |
2d21ac55 | 603 | _ranges.v64->length = length; |
1c79356b A |
604 | } |
605 | ||
606 | /* | |
607 | * setDirection: | |
608 | * | |
609 | * Change the direction of the transfer. This method allows one to redirect | |
610 | * the descriptor's transfer direction. This eliminates the need to destroy | |
611 | * and create new buffers when different transfer directions are needed. | |
612 | */ | |
613 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
614 | { | |
b0d623f7 A |
615 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
616 | #ifndef __LP64__ | |
617 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
618 | #endif /* !__LP64__ */ | |
1c79356b A |
619 | } |
620 | ||
621 | /* | |
622 | * appendBytes: | |
623 | * | |
624 | * Add some data to the end of the buffer. This method automatically | |
625 | * maintains the memory descriptor buffer length. Note that appendBytes | |
626 | * will not copy past the end of the memory descriptor's current capacity. | |
627 | */ | |
628 | bool | |
629 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
630 | { | |
0c530ab8 A |
631 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
632 | IOByteCount offset; | |
1c79356b A |
633 | |
634 | assert(_length <= _capacity); | |
0c530ab8 A |
635 | |
636 | offset = _length; | |
1c79356b | 637 | _length += actualBytesToCopy; |
2d21ac55 | 638 | _ranges.v64->length += actualBytesToCopy; |
1c79356b | 639 | |
0c530ab8 | 640 | if (_task == kernel_task) |
2d21ac55 | 641 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
0c530ab8 A |
642 | actualBytesToCopy); |
643 | else | |
644 | writeBytes(offset, bytes, actualBytesToCopy); | |
645 | ||
1c79356b A |
646 | return true; |
647 | } | |
648 | ||
649 | /* | |
650 | * getBytesNoCopy: | |
651 | * | |
652 | * Return the virtual address of the beginning of the buffer | |
653 | */ | |
654 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
655 | { | |
2d21ac55 | 656 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
657 | return _buffer; |
658 | else | |
2d21ac55 | 659 | return (void *)_ranges.v64->address; |
1c79356b A |
660 | } |
661 | ||
0c530ab8 | 662 | |
1c79356b A |
663 | /* |
664 | * getBytesNoCopy: | |
665 | * | |
666 | * Return the virtual address of an offset from the beginning of the buffer | |
667 | */ | |
668 | void * | |
669 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
670 | { | |
0c530ab8 | 671 | IOVirtualAddress address; |
2d21ac55 | 672 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
673 | address = (IOVirtualAddress) _buffer; |
674 | else | |
2d21ac55 | 675 | address = _ranges.v64->address; |
0c530ab8 A |
676 | |
677 | if (start < _length && (start + withLength) <= _length) | |
678 | return (void *)(address + start); | |
1c79356b A |
679 | return 0; |
680 | } | |
681 | ||
b0d623f7 A |
682 | #ifndef __LP64__ |
683 | void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
684 | IOByteCount * lengthOfSegment) | |
0c530ab8 A |
685 | { |
686 | void * bytes = getBytesNoCopy(offset, 0); | |
687 | ||
688 | if (bytes && lengthOfSegment) | |
689 | *lengthOfSegment = _length - offset; | |
690 | ||
691 | return bytes; | |
692 | } | |
b0d623f7 | 693 | #endif /* !__LP64__ */ |
0c530ab8 | 694 | |
b0d623f7 A |
695 | #ifdef __LP64__ |
696 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
697 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
698 | #else /* !__LP64__ */ | |
9bccf70c | 699 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 700 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
b0d623f7 | 701 | #endif /* !__LP64__ */ |
1c79356b A |
702 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
703 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
704 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
705 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
706 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
707 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
708 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
709 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
710 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
711 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
712 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
713 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
714 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
715 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |