]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
b0d623f7 A |
28 | |
29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
30 | ||
1c79356b A |
31 | #include <IOKit/assert.h> |
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOLib.h> | |
0c530ab8 | 35 | #include <IOKit/IOMapper.h> |
1c79356b | 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 37 | #include <libkern/OSDebug.h> |
99c3a104 | 38 | #include <mach/mach_vm.h> |
1c79356b | 39 | |
91447636 A |
40 | #include "IOKitKernelInternal.h" |
41 | ||
99c3a104 A |
42 | #ifdef IOALLOCDEBUG |
43 | #include <libkern/c++/OSCPPDebug.h> | |
44 | #endif | |
45 | #include <IOKit/IOStatisticsPrivate.h> | |
46 | ||
47 | #if IOKITSTATS | |
48 | #define IOStatisticsAlloc(type, size) \ | |
49 | do { \ | |
50 | IOStatistics::countAlloc(type, size); \ | |
51 | } while (0) | |
52 | #else | |
53 | #define IOStatisticsAlloc(type, size) | |
54 | #endif /* IOKITSTATS */ | |
55 | ||
56 | ||
1c79356b A |
57 | __BEGIN_DECLS |
58 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 59 | #include <vm/pmap.h> |
1c79356b | 60 | |
55e303ae | 61 | __END_DECLS |
de355530 | 62 | |
0c530ab8 A |
63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
64 | ||
0a7de745 A |
65 | enum{ |
66 | kInternalFlagPhysical = 0x00000001, | |
67 | kInternalFlagPageSized = 0x00000002, | |
68 | kInternalFlagPageAllocated = 0x00000004, | |
69 | kInternalFlagInit = 0x00000008 | |
99c3a104 A |
70 | }; |
71 | ||
72 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
73 | ||
39236c6e A |
74 | #define super IOGeneralMemoryDescriptor |
75 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
0a7de745 | 76 | IOGeneralMemoryDescriptor); |
99c3a104 | 77 | |
39236c6e | 78 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
99c3a104 | 79 | |
0a7de745 A |
80 | static uintptr_t |
81 | IOBMDPageProc(iopa_t * a) | |
99c3a104 | 82 | { |
0a7de745 A |
83 | kern_return_t kr; |
84 | vm_address_t vmaddr = 0; | |
85 | int options = 0;// KMA_LOMEM; | |
99c3a104 | 86 | |
0a7de745 A |
87 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
88 | page_size, 0, options, VM_KERN_MEMORY_IOKIT); | |
99c3a104 | 89 | |
0a7de745 A |
90 | if (KERN_SUCCESS != kr) { |
91 | vmaddr = 0; | |
92 | } else { | |
93 | bzero((void *) vmaddr, page_size); | |
94 | } | |
99c3a104 | 95 | |
0a7de745 | 96 | return (uintptr_t) vmaddr; |
99c3a104 | 97 | } |
b0d623f7 | 98 | |
0c530ab8 A |
99 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
100 | ||
b0d623f7 | 101 | #ifndef __LP64__ |
0a7de745 A |
102 | bool |
103 | IOBufferMemoryDescriptor::initWithOptions( | |
104 | IOOptionBits options, | |
105 | vm_size_t capacity, | |
106 | vm_offset_t alignment, | |
107 | task_t inTask) | |
0c530ab8 | 108 | { |
0a7de745 A |
109 | mach_vm_address_t physicalMask = 0; |
110 | return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask); | |
0c530ab8 | 111 | } |
b0d623f7 | 112 | #endif /* !__LP64__ */ |
0c530ab8 | 113 | |
cb323159 A |
114 | IOBufferMemoryDescriptor * |
115 | IOBufferMemoryDescriptor::withCopy( | |
116 | task_t inTask, | |
117 | IOOptionBits options, | |
118 | vm_map_t sourceMap, | |
119 | mach_vm_address_t source, | |
120 | mach_vm_size_t size) | |
121 | { | |
122 | IOBufferMemoryDescriptor * inst; | |
123 | kern_return_t err; | |
124 | vm_map_copy_t copy; | |
125 | vm_map_address_t address; | |
126 | ||
127 | copy = NULL; | |
128 | do { | |
129 | err = kIOReturnNoMemory; | |
130 | inst = new IOBufferMemoryDescriptor; | |
131 | if (!inst) { | |
132 | break; | |
133 | } | |
134 | inst->_ranges.v64 = IONew(IOAddressRange, 1); | |
135 | if (!inst->_ranges.v64) { | |
136 | break; | |
137 | } | |
138 | ||
139 | err = vm_map_copyin(sourceMap, source, size, | |
140 | false /* src_destroy */, ©); | |
141 | if (KERN_SUCCESS != err) { | |
142 | break; | |
143 | } | |
144 | ||
145 | err = vm_map_copyout(get_task_map(inTask), &address, copy); | |
146 | if (KERN_SUCCESS != err) { | |
147 | break; | |
148 | } | |
149 | copy = NULL; | |
150 | ||
151 | inst->_ranges.v64->address = address; | |
152 | inst->_ranges.v64->length = size; | |
153 | ||
154 | if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) { | |
155 | err = kIOReturnError; | |
156 | } | |
157 | } while (false); | |
158 | ||
159 | if (KERN_SUCCESS == err) { | |
160 | return inst; | |
161 | } | |
162 | ||
163 | if (copy) { | |
164 | vm_map_copy_discard(copy); | |
165 | } | |
166 | OSSafeReleaseNULL(inst); | |
167 | return NULL; | |
168 | } | |
169 | ||
170 | ||
0a7de745 A |
171 | bool |
172 | IOBufferMemoryDescriptor::initWithPhysicalMask( | |
173 | task_t inTask, | |
174 | IOOptionBits options, | |
175 | mach_vm_size_t capacity, | |
176 | mach_vm_address_t alignment, | |
177 | mach_vm_address_t physicalMask) | |
1c79356b | 178 | { |
0a7de745 A |
179 | task_t mapTask = NULL; |
180 | vm_map_t vmmap = NULL; | |
181 | mach_vm_address_t highestMask = 0; | |
182 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
183 | IODMAMapSpecification mapSpec; | |
184 | bool mapped = false; | |
cb323159 | 185 | bool withCopy = false; |
0a7de745 A |
186 | bool needZero; |
187 | ||
188 | if (!capacity) { | |
189 | return false; | |
99c3a104 A |
190 | } |
191 | ||
0a7de745 A |
192 | _options = options; |
193 | _capacity = capacity; | |
194 | _internalFlags = 0; | |
195 | _internalReserved = 0; | |
cb323159 | 196 | _buffer = NULL; |
0a7de745 | 197 | |
0a7de745 | 198 | if (!_ranges.v64) { |
cb323159 A |
199 | _ranges.v64 = IONew(IOAddressRange, 1); |
200 | if (!_ranges.v64) { | |
201 | return false; | |
202 | } | |
203 | _ranges.v64->address = 0; | |
204 | _ranges.v64->length = 0; | |
205 | } else { | |
206 | if (!_ranges.v64->address) { | |
207 | return false; | |
208 | } | |
209 | if (!(kIOMemoryPageable & options)) { | |
210 | return false; | |
211 | } | |
212 | if (!inTask) { | |
213 | return false; | |
214 | } | |
215 | _buffer = (void *) _ranges.v64->address; | |
216 | withCopy = true; | |
0a7de745 | 217 | } |
0a7de745 A |
218 | // make sure super::free doesn't dealloc _ranges before super::init |
219 | _flags = kIOMemoryAsReference; | |
220 | ||
221 | // Grab IOMD bits from the Buffer MD options | |
222 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
223 | ||
224 | if (!(kIOMemoryMapperNone & options)) { | |
225 | IOMapper::checkForSystemMapper(); | |
cb323159 | 226 | mapped = (NULL != IOMapper::gSystem); |
0a7de745 A |
227 | } |
228 | needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); | |
229 | ||
230 | if (physicalMask && (alignment <= 1)) { | |
231 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
232 | highestMask = (physicalMask | alignment); | |
233 | alignment++; | |
234 | if (alignment < page_size) { | |
235 | alignment = page_size; | |
236 | } | |
237 | } | |
238 | ||
239 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) { | |
240 | alignment = page_size; | |
b0d623f7 | 241 | } |
0a7de745 A |
242 | |
243 | if (alignment >= page_size) { | |
244 | capacity = round_page(capacity); | |
245 | } | |
246 | ||
247 | if (alignment > page_size) { | |
248 | options |= kIOMemoryPhysicallyContiguous; | |
249 | } | |
250 | ||
251 | _alignment = alignment; | |
252 | ||
253 | if ((capacity + alignment) < _capacity) { | |
254 | return false; | |
0c530ab8 | 255 | } |
0a7de745 A |
256 | |
257 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) { | |
258 | return false; | |
0b4c1975 | 259 | } |
0a7de745 A |
260 | |
261 | bzero(&mapSpec, sizeof(mapSpec)); | |
262 | mapSpec.alignment = _alignment; | |
263 | mapSpec.numAddressBits = 64; | |
264 | if (highestMask && mapped) { | |
265 | if (highestMask <= 0xFFFFFFFF) { | |
266 | mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); | |
267 | } else { | |
268 | mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); | |
269 | } | |
270 | highestMask = 0; | |
0c530ab8 | 271 | } |
1c79356b | 272 | |
0a7de745 A |
273 | // set memory entry cache mode, pageable, purgeable |
274 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; | |
275 | if (options & kIOMemoryPageable) { | |
276 | iomdOptions |= kIOMemoryBufferPageable; | |
277 | if (options & kIOMemoryPurgeable) { | |
278 | iomdOptions |= kIOMemoryBufferPurgeable; | |
279 | } | |
280 | } else { | |
281 | vmmap = kernel_map; | |
282 | ||
283 | // Buffer shouldn't auto prepare they should be prepared explicitly | |
284 | // But it never was enforced so what are you going to do? | |
285 | iomdOptions |= kIOMemoryAutoPrepare; | |
286 | ||
287 | /* Allocate a wired-down buffer inside kernel space. */ | |
2d21ac55 | 288 | |
0a7de745 | 289 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
1c79356b | 290 | |
0a7de745 A |
291 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { |
292 | contig |= (!mapped); | |
293 | contig |= (0 != (kIOMemoryMapperNone & options)); | |
294 | #if 0 | |
295 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now | |
296 | contig |= true; | |
297 | #endif | |
298 | } | |
299 | ||
300 | if (contig || highestMask || (alignment > page_size)) { | |
301 | _internalFlags |= kInternalFlagPhysical; | |
302 | if (highestMask) { | |
303 | _internalFlags |= kInternalFlagPageSized; | |
304 | capacity = round_page(capacity); | |
305 | } | |
306 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( | |
307 | capacity, highestMask, alignment, contig); | |
308 | } else if (needZero | |
309 | && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { | |
310 | _internalFlags |= kInternalFlagPageAllocated; | |
311 | needZero = false; | |
312 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); | |
313 | if (_buffer) { | |
314 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); | |
2d21ac55 | 315 | #if IOALLOCDEBUG |
0a7de745 | 316 | OSAddAtomic(capacity, &debug_iomalloc_size); |
2d21ac55 | 317 | #endif |
0a7de745 A |
318 | } |
319 | } else if (alignment > 1) { | |
320 | _buffer = IOMallocAligned(capacity, alignment); | |
321 | } else { | |
322 | _buffer = IOMalloc(capacity); | |
323 | } | |
324 | if (!_buffer) { | |
325 | return false; | |
326 | } | |
327 | if (needZero) { | |
328 | bzero(_buffer, capacity); | |
329 | } | |
2d21ac55 | 330 | } |
2d21ac55 | 331 | |
0a7de745 A |
332 | if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { |
333 | vm_size_t size = round_page(capacity); | |
334 | ||
335 | // initWithOptions will create memory entry | |
cb323159 A |
336 | if (!withCopy) { |
337 | iomdOptions |= kIOMemoryPersistent; | |
338 | } | |
0a7de745 A |
339 | |
340 | if (options & kIOMemoryPageable) { | |
341 | #if IOALLOCDEBUG | |
342 | OSAddAtomicLong(size, &debug_iomallocpageable_size); | |
343 | #endif | |
cb323159 A |
344 | if (!withCopy) { |
345 | mapTask = inTask; | |
346 | } | |
0a7de745 A |
347 | if (NULL == inTask) { |
348 | inTask = kernel_task; | |
349 | } | |
350 | } else if (options & kIOMapCacheMask) { | |
351 | // Prefetch each page to put entries into the pmap | |
352 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
353 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
354 | ||
355 | while (startAddr < endAddr) { | |
356 | UInt8 dummyVar = *startAddr; | |
357 | (void) dummyVar; | |
358 | startAddr += page_size; | |
359 | } | |
360 | } | |
2d21ac55 | 361 | } |
2d21ac55 | 362 | |
cb323159 | 363 | _ranges.v64->address = (mach_vm_address_t) _buffer; |
0a7de745 | 364 | _ranges.v64->length = _capacity; |
2d21ac55 | 365 | |
0a7de745 | 366 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
cb323159 | 367 | inTask, iomdOptions, /* System mapper */ NULL)) { |
0a7de745 A |
368 | return false; |
369 | } | |
1c79356b | 370 | |
0a7de745 | 371 | _internalFlags |= kInternalFlagInit; |
5ba3f43e | 372 | #if IOTRACKING |
0a7de745 A |
373 | if (!(options & kIOMemoryPageable)) { |
374 | trackingAccumSize(capacity); | |
375 | } | |
5ba3f43e A |
376 | #endif /* IOTRACKING */ |
377 | ||
0a7de745 A |
378 | // give any system mapper the allocation params |
379 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, | |
380 | &mapSpec, sizeof(mapSpec))) { | |
381 | return false; | |
382 | } | |
383 | ||
384 | if (mapTask) { | |
385 | if (!reserved) { | |
386 | reserved = IONew( ExpansionData, 1 ); | |
387 | if (!reserved) { | |
388 | return false; | |
389 | } | |
390 | } | |
391 | reserved->map = createMappingInTask(mapTask, 0, | |
392 | kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); | |
393 | if (!reserved->map) { | |
cb323159 | 394 | _buffer = NULL; |
0a7de745 A |
395 | return false; |
396 | } | |
397 | release(); // map took a retain on this | |
398 | reserved->map->retain(); | |
399 | removeMapping(reserved->map); | |
400 | mach_vm_address_t buffer = reserved->map->getAddress(); | |
401 | _buffer = (void *) buffer; | |
402 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) { | |
403 | _ranges.v64->address = buffer; | |
404 | } | |
405 | } | |
406 | ||
407 | setLength(_capacity); | |
408 | ||
409 | return true; | |
1c79356b A |
410 | } |
411 | ||
0a7de745 A |
412 | IOBufferMemoryDescriptor * |
413 | IOBufferMemoryDescriptor::inTaskWithOptions( | |
414 | task_t inTask, | |
415 | IOOptionBits options, | |
416 | vm_size_t capacity, | |
417 | vm_offset_t alignment) | |
9bccf70c | 418 | { |
0a7de745 A |
419 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
420 | ||
421 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { | |
422 | me->release(); | |
cb323159 | 423 | me = NULL; |
0a7de745 A |
424 | } |
425 | return me; | |
0c530ab8 A |
426 | } |
427 | ||
4ba76501 A |
428 | IOBufferMemoryDescriptor * |
429 | IOBufferMemoryDescriptor::inTaskWithOptions( | |
430 | task_t inTask, | |
431 | IOOptionBits options, | |
432 | vm_size_t capacity, | |
433 | vm_offset_t alignment, | |
434 | uint32_t kernTag, | |
435 | uint32_t userTag) | |
436 | { | |
437 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
438 | ||
439 | if (me) { | |
440 | me->setVMTags(kernTag, userTag); | |
441 | ||
442 | if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { | |
443 | me->release(); | |
444 | me = NULL; | |
445 | } | |
446 | } | |
447 | return me; | |
448 | } | |
449 | ||
0a7de745 A |
450 | IOBufferMemoryDescriptor * |
451 | IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
452 | task_t inTask, | |
453 | IOOptionBits options, | |
454 | mach_vm_size_t capacity, | |
455 | mach_vm_address_t physicalMask) | |
0c530ab8 | 456 | { |
0a7de745 A |
457 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
458 | ||
459 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { | |
460 | me->release(); | |
cb323159 | 461 | me = NULL; |
0a7de745 A |
462 | } |
463 | return me; | |
9bccf70c A |
464 | } |
465 | ||
b0d623f7 | 466 | #ifndef __LP64__ |
0a7de745 A |
467 | bool |
468 | IOBufferMemoryDescriptor::initWithOptions( | |
469 | IOOptionBits options, | |
470 | vm_size_t capacity, | |
471 | vm_offset_t alignment) | |
9bccf70c | 472 | { |
0a7de745 | 473 | return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0); |
9bccf70c | 474 | } |
b0d623f7 | 475 | #endif /* !__LP64__ */ |
9bccf70c | 476 | |
0a7de745 A |
477 | IOBufferMemoryDescriptor * |
478 | IOBufferMemoryDescriptor::withOptions( | |
479 | IOOptionBits options, | |
480 | vm_size_t capacity, | |
481 | vm_offset_t alignment) | |
1c79356b | 482 | { |
0a7de745 A |
483 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
484 | ||
485 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
486 | me->release(); | |
cb323159 | 487 | me = NULL; |
0a7de745 A |
488 | } |
489 | return me; | |
1c79356b A |
490 | } |
491 | ||
492 | ||
493 | /* | |
494 | * withCapacity: | |
495 | * | |
496 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
497 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
498 | */ | |
499 | IOBufferMemoryDescriptor * | |
500 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
0a7de745 A |
501 | IODirection inDirection, |
502 | bool inContiguous) | |
1c79356b | 503 | { |
0a7de745 A |
504 | return IOBufferMemoryDescriptor::withOptions( |
505 | inDirection | kIOMemoryUnshared | |
506 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
507 | inCapacity, inContiguous ? inCapacity : 1 ); | |
1c79356b A |
508 | } |
509 | ||
b0d623f7 | 510 | #ifndef __LP64__ |
1c79356b A |
511 | /* |
512 | * initWithBytes: | |
513 | * | |
514 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
515 | * The descriptor's length and capacity are set to the input buffer's size. | |
516 | */ | |
0a7de745 A |
517 | bool |
518 | IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
519 | vm_size_t inLength, | |
520 | IODirection inDirection, | |
521 | bool inContiguous) | |
1c79356b | 522 | { |
0a7de745 A |
523 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
524 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
525 | inLength, inLength, (mach_vm_address_t)0)) { | |
526 | return false; | |
527 | } | |
1c79356b | 528 | |
0a7de745 A |
529 | // start out with no data |
530 | setLength(0); | |
1c79356b | 531 | |
0a7de745 A |
532 | if (!appendBytes(inBytes, inLength)) { |
533 | return false; | |
534 | } | |
1c79356b | 535 | |
0a7de745 | 536 | return true; |
1c79356b | 537 | } |
b0d623f7 | 538 | #endif /* !__LP64__ */ |
1c79356b A |
539 | |
540 | /* | |
541 | * withBytes: | |
542 | * | |
543 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
544 | * The descriptor's length and capacity are set to the input buffer's size. | |
545 | */ | |
546 | IOBufferMemoryDescriptor * | |
547 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
0a7de745 A |
548 | vm_size_t inLength, |
549 | IODirection inDirection, | |
550 | bool inContiguous) | |
1c79356b | 551 | { |
0a7de745 A |
552 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
553 | ||
554 | if (me && !me->initWithPhysicalMask( | |
555 | kernel_task, inDirection | kIOMemoryUnshared | |
556 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
557 | inLength, inLength, 0 )) { | |
558 | me->release(); | |
cb323159 | 559 | me = NULL; |
0a7de745 A |
560 | } |
561 | ||
562 | if (me) { | |
563 | // start out with no data | |
564 | me->setLength(0); | |
b0d623f7 | 565 | |
0a7de745 A |
566 | if (!me->appendBytes(inBytes, inLength)) { |
567 | me->release(); | |
cb323159 | 568 | me = NULL; |
0a7de745 | 569 | } |
b0d623f7 | 570 | } |
0a7de745 | 571 | return me; |
1c79356b A |
572 | } |
573 | ||
574 | /* | |
575 | * free: | |
576 | * | |
577 | * Free resources | |
578 | */ | |
0a7de745 A |
579 | void |
580 | IOBufferMemoryDescriptor::free() | |
1c79356b | 581 | { |
0a7de745 A |
582 | // Cache all of the relevant information on the stack for use |
583 | // after we call super::free()! | |
584 | IOOptionBits flags = _flags; | |
585 | IOOptionBits internalFlags = _internalFlags; | |
586 | IOOptionBits options = _options; | |
587 | vm_size_t size = _capacity; | |
588 | void * buffer = _buffer; | |
cb323159 | 589 | IOMemoryMap * map = NULL; |
0a7de745 A |
590 | IOAddressRange * range = _ranges.v64; |
591 | vm_offset_t alignment = _alignment; | |
592 | ||
593 | if (alignment >= page_size) { | |
594 | size = round_page(size); | |
595 | } | |
596 | ||
597 | if (reserved) { | |
598 | map = reserved->map; | |
599 | IODelete( reserved, ExpansionData, 1 ); | |
600 | if (map) { | |
601 | map->release(); | |
602 | } | |
603 | } | |
604 | ||
605 | if ((options & kIOMemoryPageable) | |
606 | || (kInternalFlagPageSized & internalFlags)) { | |
607 | size = round_page(size); | |
608 | } | |
5ba3f43e A |
609 | |
610 | #if IOTRACKING | |
0a7de745 A |
611 | if (!(options & kIOMemoryPageable) |
612 | && buffer | |
613 | && (kInternalFlagInit & _internalFlags)) { | |
614 | trackingAccumSize(-size); | |
615 | } | |
5ba3f43e A |
616 | #endif /* IOTRACKING */ |
617 | ||
0a7de745 A |
618 | /* super::free may unwire - deallocate buffer afterwards */ |
619 | super::free(); | |
1c79356b | 620 | |
0a7de745 | 621 | if (options & kIOMemoryPageable) { |
91447636 | 622 | #if IOALLOCDEBUG |
0a7de745 | 623 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); |
91447636 | 624 | #endif |
0a7de745 A |
625 | } else if (buffer) { |
626 | if (kInternalFlagPhysical & internalFlags) { | |
627 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); | |
628 | } else if (kInternalFlagPageAllocated & internalFlags) { | |
629 | uintptr_t page; | |
630 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); | |
631 | if (page) { | |
632 | kmem_free(kernel_map, page, page_size); | |
633 | } | |
39236c6e | 634 | #if IOALLOCDEBUG |
0a7de745 | 635 | OSAddAtomic(-size, &debug_iomalloc_size); |
39236c6e | 636 | #endif |
0a7de745 A |
637 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); |
638 | } else if (alignment > 1) { | |
639 | IOFreeAligned(buffer, size); | |
640 | } else { | |
641 | IOFree(buffer, size); | |
642 | } | |
99c3a104 | 643 | } |
0a7de745 A |
644 | if (range && (kIOMemoryAsReference & flags)) { |
645 | IODelete(range, IOAddressRange, 1); | |
99c3a104 | 646 | } |
1c79356b A |
647 | } |
648 | ||
649 | /* | |
650 | * getCapacity: | |
651 | * | |
652 | * Get the buffer capacity | |
653 | */ | |
0a7de745 A |
654 | vm_size_t |
655 | IOBufferMemoryDescriptor::getCapacity() const | |
1c79356b | 656 | { |
0a7de745 | 657 | return _capacity; |
1c79356b A |
658 | } |
659 | ||
660 | /* | |
661 | * setLength: | |
662 | * | |
663 | * Change the buffer length of the memory descriptor. When a new buffer | |
664 | * is created, the initial length of the buffer is set to be the same as | |
665 | * the capacity. The length can be adjusted via setLength for a shorter | |
666 | * transfer (there is no need to create more buffer descriptors when you | |
667 | * can reuse an existing one, even for different transfer sizes). Note | |
668 | * that the specified length must not exceed the capacity of the buffer. | |
669 | */ | |
0a7de745 A |
670 | void |
671 | IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
1c79356b | 672 | { |
0a7de745 A |
673 | assert(length <= _capacity); |
674 | if (length > _capacity) { | |
675 | return; | |
676 | } | |
1c79356b | 677 | |
0a7de745 A |
678 | _length = length; |
679 | _ranges.v64->length = length; | |
1c79356b A |
680 | } |
681 | ||
682 | /* | |
683 | * setDirection: | |
684 | * | |
685 | * Change the direction of the transfer. This method allows one to redirect | |
686 | * the descriptor's transfer direction. This eliminates the need to destroy | |
687 | * and create new buffers when different transfer directions are needed. | |
688 | */ | |
0a7de745 A |
689 | void |
690 | IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
1c79356b | 691 | { |
0a7de745 | 692 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
b0d623f7 | 693 | #ifndef __LP64__ |
0a7de745 | 694 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
b0d623f7 | 695 | #endif /* !__LP64__ */ |
1c79356b A |
696 | } |
697 | ||
698 | /* | |
699 | * appendBytes: | |
700 | * | |
701 | * Add some data to the end of the buffer. This method automatically | |
702 | * maintains the memory descriptor buffer length. Note that appendBytes | |
703 | * will not copy past the end of the memory descriptor's current capacity. | |
704 | */ | |
705 | bool | |
706 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
707 | { | |
0a7de745 A |
708 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
709 | IOByteCount offset; | |
1c79356b | 710 | |
0a7de745 | 711 | assert(_length <= _capacity); |
0c530ab8 | 712 | |
0a7de745 A |
713 | offset = _length; |
714 | _length += actualBytesToCopy; | |
715 | _ranges.v64->length += actualBytesToCopy; | |
1c79356b | 716 | |
0a7de745 A |
717 | if (_task == kernel_task) { |
718 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), | |
719 | actualBytesToCopy); | |
720 | } else { | |
721 | writeBytes(offset, bytes, actualBytesToCopy); | |
722 | } | |
0c530ab8 | 723 | |
0a7de745 | 724 | return true; |
1c79356b A |
725 | } |
726 | ||
727 | /* | |
728 | * getBytesNoCopy: | |
729 | * | |
730 | * Return the virtual address of the beginning of the buffer | |
731 | */ | |
0a7de745 A |
732 | void * |
733 | IOBufferMemoryDescriptor::getBytesNoCopy() | |
1c79356b | 734 | { |
0a7de745 A |
735 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
736 | return _buffer; | |
737 | } else { | |
738 | return (void *)_ranges.v64->address; | |
739 | } | |
1c79356b A |
740 | } |
741 | ||
0c530ab8 | 742 | |
1c79356b A |
743 | /* |
744 | * getBytesNoCopy: | |
745 | * | |
746 | * Return the virtual address of an offset from the beginning of the buffer | |
747 | */ | |
748 | void * | |
749 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
750 | { | |
0a7de745 | 751 | IOVirtualAddress address; |
39037602 | 752 | |
0a7de745 | 753 | if ((start + withLength) < start) { |
cb323159 | 754 | return NULL; |
0a7de745 | 755 | } |
39037602 | 756 | |
0a7de745 A |
757 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
758 | address = (IOVirtualAddress) _buffer; | |
759 | } else { | |
760 | address = _ranges.v64->address; | |
761 | } | |
0c530ab8 | 762 | |
0a7de745 A |
763 | if (start < _length && (start + withLength) <= _length) { |
764 | return (void *)(address + start); | |
765 | } | |
cb323159 | 766 | return NULL; |
1c79356b A |
767 | } |
768 | ||
b0d623f7 | 769 | #ifndef __LP64__ |
0a7de745 A |
770 | void * |
771 | IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
772 | IOByteCount * lengthOfSegment) | |
0c530ab8 | 773 | { |
0a7de745 A |
774 | void * bytes = getBytesNoCopy(offset, 0); |
775 | ||
776 | if (bytes && lengthOfSegment) { | |
777 | *lengthOfSegment = _length - offset; | |
778 | } | |
0c530ab8 | 779 | |
0a7de745 | 780 | return bytes; |
0c530ab8 | 781 | } |
b0d623f7 | 782 | #endif /* !__LP64__ */ |
0c530ab8 | 783 | |
b0d623f7 A |
784 | #ifdef __LP64__ |
785 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
786 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
787 | #else /* !__LP64__ */ | |
9bccf70c | 788 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 789 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
b0d623f7 | 790 | #endif /* !__LP64__ */ |
1c79356b A |
791 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
792 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
793 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
794 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
795 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
796 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
797 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
798 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
799 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
800 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
801 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
802 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
803 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
804 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |