]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
b0d623f7 A |
28 | |
29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
30 | ||
1c79356b A |
31 | #include <IOKit/assert.h> |
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOLib.h> | |
0c530ab8 | 35 | #include <IOKit/IOMapper.h> |
1c79356b | 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 37 | #include <libkern/OSDebug.h> |
99c3a104 | 38 | #include <mach/mach_vm.h> |
1c79356b | 39 | |
91447636 A |
40 | #include "IOKitKernelInternal.h" |
41 | ||
99c3a104 A |
42 | #ifdef IOALLOCDEBUG |
43 | #include <libkern/c++/OSCPPDebug.h> | |
44 | #endif | |
45 | #include <IOKit/IOStatisticsPrivate.h> | |
46 | ||
47 | #if IOKITSTATS | |
48 | #define IOStatisticsAlloc(type, size) \ | |
49 | do { \ | |
50 | IOStatistics::countAlloc(type, size); \ | |
51 | } while (0) | |
52 | #else | |
53 | #define IOStatisticsAlloc(type, size) | |
54 | #endif /* IOKITSTATS */ | |
55 | ||
56 | ||
1c79356b A |
57 | __BEGIN_DECLS |
58 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 59 | #include <vm/pmap.h> |
1c79356b | 60 | |
55e303ae | 61 | __END_DECLS |
de355530 | 62 | |
0c530ab8 A |
63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
64 | ||
b0d623f7 A |
65 | enum |
66 | { | |
99c3a104 A |
67 | kInternalFlagPhysical = 0x00000001, |
68 | kInternalFlagPageSized = 0x00000002, | |
69 | kInternalFlagPageAllocated = 0x00000004 | |
70 | }; | |
71 | ||
72 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
73 | ||
39236c6e A |
74 | #define super IOGeneralMemoryDescriptor |
75 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
76 | IOGeneralMemoryDescriptor); | |
99c3a104 | 77 | |
39236c6e | 78 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
99c3a104 | 79 | |
39236c6e | 80 | static uintptr_t IOBMDPageProc(iopa_t * a) |
99c3a104 | 81 | { |
39236c6e A |
82 | kern_return_t kr; |
83 | vm_address_t vmaddr = 0; | |
84 | int options = 0; // KMA_LOMEM; | |
99c3a104 | 85 | |
99c3a104 A |
86 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
87 | page_size, 0, options); | |
99c3a104 | 88 | |
39236c6e A |
89 | if (KERN_SUCCESS != kr) vmaddr = 0; |
90 | else bzero((void *) vmaddr, page_size); | |
99c3a104 | 91 | |
39236c6e | 92 | return ((uintptr_t) vmaddr); |
99c3a104 | 93 | } |
b0d623f7 | 94 | |
0c530ab8 A |
95 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
96 | ||
b0d623f7 | 97 | #ifndef __LP64__ |
1c79356b A |
98 | bool IOBufferMemoryDescriptor::initWithOptions( |
99 | IOOptionBits options, | |
100 | vm_size_t capacity, | |
9bccf70c A |
101 | vm_offset_t alignment, |
102 | task_t inTask) | |
0c530ab8 A |
103 | { |
104 | mach_vm_address_t physicalMask = 0; | |
105 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); | |
106 | } | |
b0d623f7 | 107 | #endif /* !__LP64__ */ |
0c530ab8 A |
108 | |
109 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( | |
110 | task_t inTask, | |
111 | IOOptionBits options, | |
112 | mach_vm_size_t capacity, | |
113 | mach_vm_address_t alignment, | |
114 | mach_vm_address_t physicalMask) | |
1c79356b | 115 | { |
99c3a104 A |
116 | kern_return_t kr; |
117 | task_t mapTask = NULL; | |
118 | vm_map_t vmmap = NULL; | |
119 | mach_vm_address_t highestMask = 0; | |
120 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
121 | IODMAMapSpecification mapSpec; | |
122 | bool mapped = false; | |
123 | bool needZero; | |
9bccf70c | 124 | |
1c79356b A |
125 | if (!capacity) |
126 | return false; | |
127 | ||
b0d623f7 A |
128 | _options = options; |
129 | _capacity = capacity; | |
130 | _internalFlags = 0; | |
131 | _internalReserved = 0; | |
132 | _buffer = 0; | |
133 | ||
134 | _ranges.v64 = IONew(IOAddressRange, 1); | |
135 | if (!_ranges.v64) | |
136 | return (false); | |
137 | _ranges.v64->address = 0; | |
138 | _ranges.v64->length = 0; | |
99c3a104 A |
139 | // make sure super::free doesn't dealloc _ranges before super::init |
140 | _flags = kIOMemoryAsReference; | |
1c79356b | 141 | |
c910b4d9 A |
142 | // Grab IOMD bits from the Buffer MD options |
143 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
55e303ae | 144 | |
99c3a104 A |
145 | if (!(kIOMemoryMapperNone & options)) |
146 | { | |
147 | IOMapper::checkForSystemMapper(); | |
148 | mapped = (0 != IOMapper::gSystem); | |
149 | } | |
150 | needZero = mapped; | |
151 | ||
b0d623f7 A |
152 | if (physicalMask && (alignment <= 1)) |
153 | { | |
154 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
155 | highestMask = (physicalMask | alignment); | |
156 | alignment++; | |
0b4c1975 A |
157 | if (alignment < page_size) |
158 | alignment = page_size; | |
b0d623f7 A |
159 | } |
160 | ||
0b4c1975 | 161 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) |
2d21ac55 | 162 | alignment = page_size; |
9bccf70c | 163 | |
b0d623f7 A |
164 | if (alignment >= page_size) |
165 | capacity = round_page(capacity); | |
166 | ||
167 | if (alignment > page_size) | |
168 | options |= kIOMemoryPhysicallyContiguous; | |
0c530ab8 | 169 | |
1c79356b | 170 | _alignment = alignment; |
91447636 | 171 | |
b0d623f7 | 172 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
2d21ac55 | 173 | return false; |
91447636 | 174 | |
99c3a104 A |
175 | bzero(&mapSpec, sizeof(mapSpec)); |
176 | mapSpec.alignment = _alignment; | |
177 | mapSpec.numAddressBits = 64; | |
178 | if (highestMask && mapped) | |
179 | { | |
180 | if (highestMask <= 0xFFFFFFFF) | |
181 | mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); | |
182 | else | |
183 | mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); | |
184 | highestMask = 0; | |
185 | } | |
186 | ||
2d21ac55 A |
187 | // set flags for entry + object create |
188 | vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; | |
91447636 | 189 | |
2d21ac55 A |
190 | // set memory entry cache mode |
191 | switch (options & kIOMapCacheMask) | |
192 | { | |
193 | case kIOMapInhibitCache: | |
194 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
195 | break; | |
196 | ||
197 | case kIOMapWriteThruCache: | |
198 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
199 | break; | |
200 | ||
201 | case kIOMapWriteCombineCache: | |
202 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
203 | break; | |
204 | ||
205 | case kIOMapCopybackCache: | |
206 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
207 | break; | |
208 | ||
316670eb A |
209 | case kIOMapCopybackInnerCache: |
210 | SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode); | |
211 | break; | |
212 | ||
2d21ac55 A |
213 | case kIOMapDefaultCache: |
214 | default: | |
215 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
216 | break; | |
217 | } | |
91447636 | 218 | |
2d21ac55 A |
219 | if (options & kIOMemoryPageable) |
220 | { | |
221 | iomdOptions |= kIOMemoryBufferPageable; | |
91447636 | 222 | |
2d21ac55 | 223 | // must create the entry before any pages are allocated |
91447636 | 224 | |
2d21ac55 A |
225 | // set flags for entry + object create |
226 | memEntryCacheMode |= MAP_MEM_NAMED_CREATE; | |
91447636 | 227 | |
2d21ac55 A |
228 | if (options & kIOMemoryPurgeable) |
229 | memEntryCacheMode |= MAP_MEM_PURGABLE; | |
9bccf70c | 230 | } |
0c530ab8 | 231 | else |
9bccf70c | 232 | { |
2d21ac55 | 233 | memEntryCacheMode |= MAP_MEM_NAMED_REUSE; |
0b4c1975 | 234 | vmmap = kernel_map; |
2d21ac55 | 235 | |
0b4c1975 A |
236 | // Buffer shouldn't auto prepare they should be prepared explicitly |
237 | // But it never was enforced so what are you going to do? | |
238 | iomdOptions |= kIOMemoryAutoPrepare; | |
4452a7af | 239 | |
0b4c1975 | 240 | /* Allocate a wired-down buffer inside kernel space. */ |
b0d623f7 | 241 | |
99c3a104 A |
242 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
243 | ||
244 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) | |
245 | { | |
246 | contig |= (!mapped); | |
247 | contig |= (0 != (kIOMemoryMapperNone & options)); | |
248 | #if 0 | |
249 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now | |
250 | contig |= true; | |
251 | #endif | |
252 | } | |
253 | ||
254 | if (contig || highestMask || (alignment > page_size)) | |
b0d623f7 | 255 | { |
0b4c1975 A |
256 | _internalFlags |= kInternalFlagPhysical; |
257 | if (highestMask) | |
258 | { | |
259 | _internalFlags |= kInternalFlagPageSized; | |
260 | capacity = round_page(capacity); | |
261 | } | |
99c3a104 A |
262 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( |
263 | capacity, highestMask, alignment, contig); | |
264 | } | |
265 | else if (needZero | |
266 | && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes))) | |
267 | { | |
268 | _internalFlags |= kInternalFlagPageAllocated; | |
269 | needZero = false; | |
39236c6e A |
270 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); |
271 | if (_buffer) | |
272 | { | |
273 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); | |
274 | #if IOALLOCDEBUG | |
275 | debug_iomalloc_size += capacity; | |
276 | #endif | |
277 | } | |
b0d623f7 | 278 | } |
0b4c1975 | 279 | else if (alignment > 1) |
0c530ab8 | 280 | { |
0b4c1975 | 281 | _buffer = IOMallocAligned(capacity, alignment); |
0c530ab8 A |
282 | } |
283 | else | |
284 | { | |
0b4c1975 A |
285 | _buffer = IOMalloc(capacity); |
286 | } | |
0b4c1975 A |
287 | if (!_buffer) |
288 | { | |
289 | return false; | |
0c530ab8 | 290 | } |
99c3a104 | 291 | if (needZero) bzero(_buffer, capacity); |
91447636 | 292 | } |
1c79356b | 293 | |
0b4c1975 | 294 | if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { |
2d21ac55 | 295 | ipc_port_t sharedMem; |
b0d623f7 | 296 | vm_size_t size = round_page(capacity); |
2d21ac55 A |
297 | |
298 | kr = mach_make_memory_entry(vmmap, | |
299 | &size, (vm_offset_t)_buffer, | |
300 | memEntryCacheMode, &sharedMem, | |
301 | NULL ); | |
302 | ||
b0d623f7 | 303 | if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { |
2d21ac55 A |
304 | ipc_port_release_send( sharedMem ); |
305 | kr = kIOReturnVMError; | |
306 | } | |
307 | if( KERN_SUCCESS != kr) | |
308 | return( false ); | |
309 | ||
310 | _memEntry = (void *) sharedMem; | |
1c79356b | 311 | |
2d21ac55 A |
312 | if( options & kIOMemoryPageable) { |
313 | #if IOALLOCDEBUG | |
314 | debug_iomallocpageable_size += size; | |
315 | #endif | |
316 | mapTask = inTask; | |
317 | if (NULL == inTask) | |
318 | inTask = kernel_task; | |
319 | } | |
320 | else if (options & kIOMapCacheMask) | |
321 | { | |
322 | // Prefetch each page to put entries into the pmap | |
323 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
324 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
325 | ||
326 | while (startAddr < endAddr) | |
327 | { | |
39236c6e A |
328 | UInt8 dummyVar = *startAddr; |
329 | (void) dummyVar; | |
2d21ac55 | 330 | startAddr += page_size; |
99c3a104 | 331 | } |
2d21ac55 A |
332 | } |
333 | } | |
334 | ||
b0d623f7 A |
335 | _ranges.v64->address = (mach_vm_address_t) _buffer;; |
336 | _ranges.v64->length = _capacity; | |
2d21ac55 | 337 | |
b0d623f7 | 338 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
2d21ac55 | 339 | inTask, iomdOptions, /* System mapper */ 0)) |
1c79356b A |
340 | return false; |
341 | ||
99c3a104 A |
342 | // give any system mapper the allocation params |
343 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, | |
344 | &mapSpec, sizeof(mapSpec))) | |
345 | return false; | |
346 | ||
2d21ac55 | 347 | if (mapTask) |
0c530ab8 | 348 | { |
2d21ac55 A |
349 | if (!reserved) { |
350 | reserved = IONew( ExpansionData, 1 ); | |
351 | if( !reserved) | |
352 | return( false ); | |
353 | } | |
b0d623f7 A |
354 | reserved->map = createMappingInTask(mapTask, 0, |
355 | kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); | |
2d21ac55 | 356 | if (!reserved->map) |
0c530ab8 A |
357 | { |
358 | _buffer = 0; | |
359 | return( false ); | |
360 | } | |
2d21ac55 | 361 | release(); // map took a retain on this |
b0d623f7 A |
362 | reserved->map->retain(); |
363 | removeMapping(reserved->map); | |
2d21ac55 A |
364 | mach_vm_address_t buffer = reserved->map->getAddress(); |
365 | _buffer = (void *) buffer; | |
366 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) | |
367 | _ranges.v64->address = buffer; | |
0c530ab8 A |
368 | } |
369 | ||
b0d623f7 | 370 | setLength(_capacity); |
2d21ac55 | 371 | |
1c79356b A |
372 | return true; |
373 | } | |
374 | ||
9bccf70c A |
375 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
376 | task_t inTask, | |
377 | IOOptionBits options, | |
378 | vm_size_t capacity, | |
55e303ae | 379 | vm_offset_t alignment) |
9bccf70c A |
380 | { |
381 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
382 | ||
b0d623f7 | 383 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { |
4452a7af A |
384 | me->release(); |
385 | me = 0; | |
0c530ab8 A |
386 | } |
387 | return me; | |
388 | } | |
389 | ||
390 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
391 | task_t inTask, | |
392 | IOOptionBits options, | |
393 | mach_vm_size_t capacity, | |
394 | mach_vm_address_t physicalMask) | |
395 | { | |
396 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
397 | ||
398 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
399 | { | |
0c530ab8 A |
400 | me->release(); |
401 | me = 0; | |
9bccf70c A |
402 | } |
403 | return me; | |
404 | } | |
405 | ||
b0d623f7 | 406 | #ifndef __LP64__ |
9bccf70c A |
407 | bool IOBufferMemoryDescriptor::initWithOptions( |
408 | IOOptionBits options, | |
409 | vm_size_t capacity, | |
410 | vm_offset_t alignment) | |
411 | { | |
b0d623f7 | 412 | return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); |
9bccf70c | 413 | } |
b0d623f7 | 414 | #endif /* !__LP64__ */ |
9bccf70c | 415 | |
1c79356b A |
416 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
417 | IOOptionBits options, | |
418 | vm_size_t capacity, | |
55e303ae | 419 | vm_offset_t alignment) |
1c79356b | 420 | { |
b0d623f7 A |
421 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
422 | ||
423 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
b0d623f7 A |
424 | me->release(); |
425 | me = 0; | |
b0d623f7 A |
426 | } |
427 | return me; | |
1c79356b A |
428 | } |
429 | ||
430 | ||
431 | /* | |
432 | * withCapacity: | |
433 | * | |
434 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
435 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
436 | */ | |
437 | IOBufferMemoryDescriptor * | |
438 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
439 | IODirection inDirection, | |
440 | bool inContiguous) | |
441 | { | |
442 | return( IOBufferMemoryDescriptor::withOptions( | |
443 | inDirection | kIOMemoryUnshared | |
444 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
445 | inCapacity, inContiguous ? inCapacity : 1 )); | |
446 | } | |
447 | ||
b0d623f7 | 448 | #ifndef __LP64__ |
1c79356b A |
449 | /* |
450 | * initWithBytes: | |
451 | * | |
452 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
453 | * The descriptor's length and capacity are set to the input buffer's size. | |
454 | */ | |
455 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
456 | vm_size_t inLength, | |
457 | IODirection inDirection, | |
458 | bool inContiguous) | |
459 | { | |
b0d623f7 A |
460 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
461 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
462 | inLength, inLength, (mach_vm_address_t)0)) | |
1c79356b A |
463 | return false; |
464 | ||
465 | // start out with no data | |
466 | setLength(0); | |
467 | ||
468 | if (!appendBytes(inBytes, inLength)) | |
469 | return false; | |
470 | ||
471 | return true; | |
472 | } | |
b0d623f7 | 473 | #endif /* !__LP64__ */ |
1c79356b A |
474 | |
475 | /* | |
476 | * withBytes: | |
477 | * | |
478 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
479 | * The descriptor's length and capacity are set to the input buffer's size. | |
480 | */ | |
481 | IOBufferMemoryDescriptor * | |
482 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
483 | vm_size_t inLength, | |
484 | IODirection inDirection, | |
485 | bool inContiguous) | |
486 | { | |
487 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
488 | ||
b0d623f7 A |
489 | if (me && !me->initWithPhysicalMask( |
490 | kernel_task, inDirection | kIOMemoryUnshared | |
491 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
492 | inLength, inLength, 0 )) | |
0c530ab8 | 493 | { |
0c530ab8 A |
494 | me->release(); |
495 | me = 0; | |
1c79356b | 496 | } |
b0d623f7 A |
497 | |
498 | if (me) | |
499 | { | |
500 | // start out with no data | |
501 | me->setLength(0); | |
502 | ||
503 | if (!me->appendBytes(inBytes, inLength)) | |
504 | { | |
505 | me->release(); | |
506 | me = 0; | |
507 | } | |
508 | } | |
1c79356b A |
509 | return me; |
510 | } | |
511 | ||
512 | /* | |
513 | * free: | |
514 | * | |
515 | * Free resources | |
516 | */ | |
517 | void IOBufferMemoryDescriptor::free() | |
518 | { | |
55e303ae A |
519 | // Cache all of the relevant information on the stack for use |
520 | // after we call super::free()! | |
0b4c1975 A |
521 | IOOptionBits flags = _flags; |
522 | IOOptionBits internalFlags = _internalFlags; | |
0c530ab8 A |
523 | IOOptionBits options = _options; |
524 | vm_size_t size = _capacity; | |
525 | void * buffer = _buffer; | |
2d21ac55 | 526 | IOMemoryMap * map = 0; |
b0d623f7 | 527 | IOAddressRange * range = _ranges.v64; |
0c530ab8 | 528 | vm_offset_t alignment = _alignment; |
1c79356b | 529 | |
b0d623f7 A |
530 | if (alignment >= page_size) |
531 | size = round_page(size); | |
532 | ||
9bccf70c A |
533 | if (reserved) |
534 | { | |
2d21ac55 | 535 | map = reserved->map; |
9bccf70c | 536 | IODelete( reserved, ExpansionData, 1 ); |
2d21ac55 A |
537 | if (map) |
538 | map->release(); | |
9bccf70c A |
539 | } |
540 | ||
1c79356b A |
541 | /* super::free may unwire - deallocate buffer afterwards */ |
542 | super::free(); | |
543 | ||
91447636 | 544 | if (options & kIOMemoryPageable) |
9bccf70c | 545 | { |
91447636 | 546 | #if IOALLOCDEBUG |
b0d623f7 | 547 | debug_iomallocpageable_size -= round_page(size); |
91447636 | 548 | #endif |
1c79356b | 549 | } |
91447636 A |
550 | else if (buffer) |
551 | { | |
99c3a104 A |
552 | if (kInternalFlagPageSized & internalFlags) size = round_page(size); |
553 | ||
554 | if (kInternalFlagPhysical & internalFlags) | |
0b4c1975 | 555 | { |
0b4c1975 | 556 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); |
99c3a104 A |
557 | } |
558 | else if (kInternalFlagPageAllocated & internalFlags) | |
559 | { | |
39236c6e A |
560 | uintptr_t page; |
561 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); | |
562 | if (page) | |
563 | { | |
564 | kmem_free(kernel_map, page, page_size); | |
565 | } | |
566 | #if IOALLOCDEBUG | |
567 | debug_iomalloc_size -= size; | |
568 | #endif | |
569 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); | |
99c3a104 | 570 | } |
91447636 | 571 | else if (alignment > 1) |
99c3a104 | 572 | { |
91447636 | 573 | IOFreeAligned(buffer, size); |
99c3a104 | 574 | } |
91447636 | 575 | else |
99c3a104 | 576 | { |
91447636 | 577 | IOFree(buffer, size); |
99c3a104 | 578 | } |
91447636 | 579 | } |
b0d623f7 A |
580 | if (range && (kIOMemoryAsReference & flags)) |
581 | IODelete(range, IOAddressRange, 1); | |
1c79356b A |
582 | } |
583 | ||
584 | /* | |
585 | * getCapacity: | |
586 | * | |
587 | * Get the buffer capacity | |
588 | */ | |
589 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
590 | { | |
591 | return _capacity; | |
592 | } | |
593 | ||
594 | /* | |
595 | * setLength: | |
596 | * | |
597 | * Change the buffer length of the memory descriptor. When a new buffer | |
598 | * is created, the initial length of the buffer is set to be the same as | |
599 | * the capacity. The length can be adjusted via setLength for a shorter | |
600 | * transfer (there is no need to create more buffer descriptors when you | |
601 | * can reuse an existing one, even for different transfer sizes). Note | |
602 | * that the specified length must not exceed the capacity of the buffer. | |
603 | */ | |
604 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
605 | { | |
606 | assert(length <= _capacity); | |
607 | ||
608 | _length = length; | |
2d21ac55 | 609 | _ranges.v64->length = length; |
1c79356b A |
610 | } |
611 | ||
612 | /* | |
613 | * setDirection: | |
614 | * | |
615 | * Change the direction of the transfer. This method allows one to redirect | |
616 | * the descriptor's transfer direction. This eliminates the need to destroy | |
617 | * and create new buffers when different transfer directions are needed. | |
618 | */ | |
619 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
620 | { | |
b0d623f7 A |
621 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
622 | #ifndef __LP64__ | |
623 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
624 | #endif /* !__LP64__ */ | |
1c79356b A |
625 | } |
626 | ||
627 | /* | |
628 | * appendBytes: | |
629 | * | |
630 | * Add some data to the end of the buffer. This method automatically | |
631 | * maintains the memory descriptor buffer length. Note that appendBytes | |
632 | * will not copy past the end of the memory descriptor's current capacity. | |
633 | */ | |
634 | bool | |
635 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
636 | { | |
0c530ab8 A |
637 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
638 | IOByteCount offset; | |
1c79356b A |
639 | |
640 | assert(_length <= _capacity); | |
0c530ab8 A |
641 | |
642 | offset = _length; | |
1c79356b | 643 | _length += actualBytesToCopy; |
2d21ac55 | 644 | _ranges.v64->length += actualBytesToCopy; |
1c79356b | 645 | |
0c530ab8 | 646 | if (_task == kernel_task) |
2d21ac55 | 647 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
0c530ab8 A |
648 | actualBytesToCopy); |
649 | else | |
650 | writeBytes(offset, bytes, actualBytesToCopy); | |
651 | ||
1c79356b A |
652 | return true; |
653 | } | |
654 | ||
655 | /* | |
656 | * getBytesNoCopy: | |
657 | * | |
658 | * Return the virtual address of the beginning of the buffer | |
659 | */ | |
660 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
661 | { | |
2d21ac55 | 662 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
663 | return _buffer; |
664 | else | |
2d21ac55 | 665 | return (void *)_ranges.v64->address; |
1c79356b A |
666 | } |
667 | ||
0c530ab8 | 668 | |
1c79356b A |
669 | /* |
670 | * getBytesNoCopy: | |
671 | * | |
672 | * Return the virtual address of an offset from the beginning of the buffer | |
673 | */ | |
674 | void * | |
675 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
676 | { | |
0c530ab8 | 677 | IOVirtualAddress address; |
2d21ac55 | 678 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
679 | address = (IOVirtualAddress) _buffer; |
680 | else | |
2d21ac55 | 681 | address = _ranges.v64->address; |
0c530ab8 A |
682 | |
683 | if (start < _length && (start + withLength) <= _length) | |
684 | return (void *)(address + start); | |
1c79356b A |
685 | return 0; |
686 | } | |
687 | ||
b0d623f7 A |
688 | #ifndef __LP64__ |
689 | void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
690 | IOByteCount * lengthOfSegment) | |
0c530ab8 A |
691 | { |
692 | void * bytes = getBytesNoCopy(offset, 0); | |
693 | ||
694 | if (bytes && lengthOfSegment) | |
695 | *lengthOfSegment = _length - offset; | |
696 | ||
697 | return bytes; | |
698 | } | |
b0d623f7 | 699 | #endif /* !__LP64__ */ |
0c530ab8 | 700 | |
b0d623f7 A |
701 | #ifdef __LP64__ |
702 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
703 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
704 | #else /* !__LP64__ */ | |
9bccf70c | 705 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 706 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
b0d623f7 | 707 | #endif /* !__LP64__ */ |
1c79356b A |
708 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
709 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
710 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
711 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
712 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
713 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
714 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
715 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
716 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
717 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
718 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
719 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
720 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
721 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |