]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
f427ee49 | 28 | #define IOKIT_ENABLE_SHARED_PTR |
b0d623f7 A |
29 | |
30 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
31 | ||
1c79356b A |
32 | #include <IOKit/assert.h> |
33 | #include <IOKit/system.h> | |
34 | ||
35 | #include <IOKit/IOLib.h> | |
0c530ab8 | 36 | #include <IOKit/IOMapper.h> |
1c79356b | 37 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 38 | #include <libkern/OSDebug.h> |
99c3a104 | 39 | #include <mach/mach_vm.h> |
1c79356b | 40 | |
91447636 A |
41 | #include "IOKitKernelInternal.h" |
42 | ||
99c3a104 A |
43 | #ifdef IOALLOCDEBUG |
44 | #include <libkern/c++/OSCPPDebug.h> | |
45 | #endif | |
46 | #include <IOKit/IOStatisticsPrivate.h> | |
47 | ||
48 | #if IOKITSTATS | |
49 | #define IOStatisticsAlloc(type, size) \ | |
50 | do { \ | |
51 | IOStatistics::countAlloc(type, size); \ | |
52 | } while (0) | |
53 | #else | |
54 | #define IOStatisticsAlloc(type, size) | |
55 | #endif /* IOKITSTATS */ | |
56 | ||
57 | ||
1c79356b A |
58 | __BEGIN_DECLS |
59 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 60 | #include <vm/pmap.h> |
1c79356b | 61 | |
55e303ae | 62 | __END_DECLS |
de355530 | 63 | |
0c530ab8 A |
64 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
65 | ||
0a7de745 A |
66 | enum{ |
67 | kInternalFlagPhysical = 0x00000001, | |
68 | kInternalFlagPageSized = 0x00000002, | |
69 | kInternalFlagPageAllocated = 0x00000004, | |
70 | kInternalFlagInit = 0x00000008 | |
99c3a104 A |
71 | }; |
72 | ||
73 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
74 | ||
39236c6e | 75 | #define super IOGeneralMemoryDescriptor |
f427ee49 A |
76 | OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor, |
77 | IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM); | |
99c3a104 | 78 | |
39236c6e | 79 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
99c3a104 | 80 | |
0a7de745 A |
81 | static uintptr_t |
82 | IOBMDPageProc(iopa_t * a) | |
99c3a104 | 83 | { |
0a7de745 A |
84 | kern_return_t kr; |
85 | vm_address_t vmaddr = 0; | |
86 | int options = 0;// KMA_LOMEM; | |
99c3a104 | 87 | |
0a7de745 A |
88 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
89 | page_size, 0, options, VM_KERN_MEMORY_IOKIT); | |
99c3a104 | 90 | |
0a7de745 A |
91 | if (KERN_SUCCESS != kr) { |
92 | vmaddr = 0; | |
93 | } else { | |
94 | bzero((void *) vmaddr, page_size); | |
95 | } | |
99c3a104 | 96 | |
0a7de745 | 97 | return (uintptr_t) vmaddr; |
99c3a104 | 98 | } |
b0d623f7 | 99 | |
0c530ab8 A |
100 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
101 | ||
b0d623f7 | 102 | #ifndef __LP64__ |
0a7de745 A |
103 | bool |
104 | IOBufferMemoryDescriptor::initWithOptions( | |
105 | IOOptionBits options, | |
106 | vm_size_t capacity, | |
107 | vm_offset_t alignment, | |
108 | task_t inTask) | |
0c530ab8 | 109 | { |
0a7de745 A |
110 | mach_vm_address_t physicalMask = 0; |
111 | return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask); | |
0c530ab8 | 112 | } |
b0d623f7 | 113 | #endif /* !__LP64__ */ |
0c530ab8 | 114 | |
f427ee49 | 115 | OSSharedPtr<IOBufferMemoryDescriptor> |
cb323159 A |
116 | IOBufferMemoryDescriptor::withCopy( |
117 | task_t inTask, | |
118 | IOOptionBits options, | |
119 | vm_map_t sourceMap, | |
120 | mach_vm_address_t source, | |
121 | mach_vm_size_t size) | |
122 | { | |
f427ee49 | 123 | OSSharedPtr<IOBufferMemoryDescriptor> inst; |
cb323159 A |
124 | kern_return_t err; |
125 | vm_map_copy_t copy; | |
126 | vm_map_address_t address; | |
127 | ||
128 | copy = NULL; | |
129 | do { | |
130 | err = kIOReturnNoMemory; | |
f427ee49 | 131 | inst = OSMakeShared<IOBufferMemoryDescriptor>(); |
cb323159 A |
132 | if (!inst) { |
133 | break; | |
134 | } | |
135 | inst->_ranges.v64 = IONew(IOAddressRange, 1); | |
136 | if (!inst->_ranges.v64) { | |
137 | break; | |
138 | } | |
139 | ||
140 | err = vm_map_copyin(sourceMap, source, size, | |
141 | false /* src_destroy */, ©); | |
142 | if (KERN_SUCCESS != err) { | |
143 | break; | |
144 | } | |
145 | ||
146 | err = vm_map_copyout(get_task_map(inTask), &address, copy); | |
147 | if (KERN_SUCCESS != err) { | |
148 | break; | |
149 | } | |
150 | copy = NULL; | |
151 | ||
152 | inst->_ranges.v64->address = address; | |
153 | inst->_ranges.v64->length = size; | |
154 | ||
155 | if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) { | |
156 | err = kIOReturnError; | |
157 | } | |
158 | } while (false); | |
159 | ||
160 | if (KERN_SUCCESS == err) { | |
161 | return inst; | |
162 | } | |
163 | ||
164 | if (copy) { | |
165 | vm_map_copy_discard(copy); | |
166 | } | |
f427ee49 A |
167 | |
168 | return nullptr; | |
cb323159 A |
169 | } |
170 | ||
171 | ||
0a7de745 A |
172 | bool |
173 | IOBufferMemoryDescriptor::initWithPhysicalMask( | |
174 | task_t inTask, | |
175 | IOOptionBits options, | |
176 | mach_vm_size_t capacity, | |
177 | mach_vm_address_t alignment, | |
178 | mach_vm_address_t physicalMask) | |
1c79356b | 179 | { |
0a7de745 A |
180 | task_t mapTask = NULL; |
181 | vm_map_t vmmap = NULL; | |
182 | mach_vm_address_t highestMask = 0; | |
183 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
184 | IODMAMapSpecification mapSpec; | |
185 | bool mapped = false; | |
cb323159 | 186 | bool withCopy = false; |
f427ee49 | 187 | bool mappedOrShared = false; |
0a7de745 A |
188 | |
189 | if (!capacity) { | |
190 | return false; | |
99c3a104 A |
191 | } |
192 | ||
0a7de745 A |
193 | _options = options; |
194 | _capacity = capacity; | |
195 | _internalFlags = 0; | |
196 | _internalReserved = 0; | |
cb323159 | 197 | _buffer = NULL; |
0a7de745 | 198 | |
0a7de745 | 199 | if (!_ranges.v64) { |
cb323159 A |
200 | _ranges.v64 = IONew(IOAddressRange, 1); |
201 | if (!_ranges.v64) { | |
202 | return false; | |
203 | } | |
204 | _ranges.v64->address = 0; | |
205 | _ranges.v64->length = 0; | |
206 | } else { | |
207 | if (!_ranges.v64->address) { | |
208 | return false; | |
209 | } | |
210 | if (!(kIOMemoryPageable & options)) { | |
211 | return false; | |
212 | } | |
213 | if (!inTask) { | |
214 | return false; | |
215 | } | |
216 | _buffer = (void *) _ranges.v64->address; | |
217 | withCopy = true; | |
0a7de745 | 218 | } |
0a7de745 A |
219 | // make sure super::free doesn't dealloc _ranges before super::init |
220 | _flags = kIOMemoryAsReference; | |
221 | ||
222 | // Grab IOMD bits from the Buffer MD options | |
223 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
224 | ||
225 | if (!(kIOMemoryMapperNone & options)) { | |
226 | IOMapper::checkForSystemMapper(); | |
cb323159 | 227 | mapped = (NULL != IOMapper::gSystem); |
0a7de745 | 228 | } |
0a7de745 A |
229 | |
230 | if (physicalMask && (alignment <= 1)) { | |
231 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
232 | highestMask = (physicalMask | alignment); | |
233 | alignment++; | |
234 | if (alignment < page_size) { | |
235 | alignment = page_size; | |
236 | } | |
237 | } | |
238 | ||
239 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) { | |
240 | alignment = page_size; | |
b0d623f7 | 241 | } |
0a7de745 A |
242 | |
243 | if (alignment >= page_size) { | |
f427ee49 A |
244 | if (round_page_overflow(capacity, &capacity)) { |
245 | return false; | |
246 | } | |
0a7de745 A |
247 | } |
248 | ||
249 | if (alignment > page_size) { | |
250 | options |= kIOMemoryPhysicallyContiguous; | |
251 | } | |
252 | ||
253 | _alignment = alignment; | |
254 | ||
255 | if ((capacity + alignment) < _capacity) { | |
256 | return false; | |
0c530ab8 | 257 | } |
0a7de745 A |
258 | |
259 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) { | |
260 | return false; | |
0b4c1975 | 261 | } |
0a7de745 A |
262 | |
263 | bzero(&mapSpec, sizeof(mapSpec)); | |
264 | mapSpec.alignment = _alignment; | |
265 | mapSpec.numAddressBits = 64; | |
266 | if (highestMask && mapped) { | |
267 | if (highestMask <= 0xFFFFFFFF) { | |
f427ee49 | 268 | mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask)); |
0a7de745 | 269 | } else { |
f427ee49 | 270 | mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32))); |
0a7de745 A |
271 | } |
272 | highestMask = 0; | |
0c530ab8 | 273 | } |
1c79356b | 274 | |
0a7de745 A |
275 | // set memory entry cache mode, pageable, purgeable |
276 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; | |
277 | if (options & kIOMemoryPageable) { | |
278 | iomdOptions |= kIOMemoryBufferPageable; | |
279 | if (options & kIOMemoryPurgeable) { | |
280 | iomdOptions |= kIOMemoryBufferPurgeable; | |
281 | } | |
282 | } else { | |
283 | vmmap = kernel_map; | |
284 | ||
285 | // Buffer shouldn't auto prepare they should be prepared explicitly | |
286 | // But it never was enforced so what are you going to do? | |
287 | iomdOptions |= kIOMemoryAutoPrepare; | |
288 | ||
289 | /* Allocate a wired-down buffer inside kernel space. */ | |
2d21ac55 | 290 | |
0a7de745 | 291 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
1c79356b | 292 | |
0a7de745 A |
293 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { |
294 | contig |= (!mapped); | |
295 | contig |= (0 != (kIOMemoryMapperNone & options)); | |
296 | #if 0 | |
297 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now | |
298 | contig |= true; | |
299 | #endif | |
300 | } | |
301 | ||
f427ee49 | 302 | mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options))); |
0a7de745 A |
303 | if (contig || highestMask || (alignment > page_size)) { |
304 | _internalFlags |= kInternalFlagPhysical; | |
305 | if (highestMask) { | |
306 | _internalFlags |= kInternalFlagPageSized; | |
f427ee49 A |
307 | if (round_page_overflow(capacity, &capacity)) { |
308 | return false; | |
309 | } | |
0a7de745 A |
310 | } |
311 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( | |
312 | capacity, highestMask, alignment, contig); | |
f427ee49 A |
313 | } else if (mappedOrShared |
314 | && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) { | |
0a7de745 | 315 | _internalFlags |= kInternalFlagPageAllocated; |
0a7de745 A |
316 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); |
317 | if (_buffer) { | |
318 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); | |
2d21ac55 | 319 | #if IOALLOCDEBUG |
f427ee49 | 320 | OSAddAtomicLong(capacity, &debug_iomalloc_size); |
2d21ac55 | 321 | #endif |
0a7de745 A |
322 | } |
323 | } else if (alignment > 1) { | |
324 | _buffer = IOMallocAligned(capacity, alignment); | |
325 | } else { | |
326 | _buffer = IOMalloc(capacity); | |
327 | } | |
328 | if (!_buffer) { | |
329 | return false; | |
330 | } | |
f427ee49 | 331 | bzero(_buffer, capacity); |
2d21ac55 | 332 | } |
2d21ac55 | 333 | |
0a7de745 A |
334 | if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { |
335 | vm_size_t size = round_page(capacity); | |
336 | ||
337 | // initWithOptions will create memory entry | |
cb323159 A |
338 | if (!withCopy) { |
339 | iomdOptions |= kIOMemoryPersistent; | |
340 | } | |
0a7de745 A |
341 | |
342 | if (options & kIOMemoryPageable) { | |
343 | #if IOALLOCDEBUG | |
344 | OSAddAtomicLong(size, &debug_iomallocpageable_size); | |
345 | #endif | |
cb323159 A |
346 | if (!withCopy) { |
347 | mapTask = inTask; | |
348 | } | |
0a7de745 A |
349 | if (NULL == inTask) { |
350 | inTask = kernel_task; | |
351 | } | |
352 | } else if (options & kIOMapCacheMask) { | |
353 | // Prefetch each page to put entries into the pmap | |
354 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
355 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
356 | ||
357 | while (startAddr < endAddr) { | |
358 | UInt8 dummyVar = *startAddr; | |
359 | (void) dummyVar; | |
360 | startAddr += page_size; | |
361 | } | |
362 | } | |
2d21ac55 | 363 | } |
2d21ac55 | 364 | |
cb323159 | 365 | _ranges.v64->address = (mach_vm_address_t) _buffer; |
0a7de745 | 366 | _ranges.v64->length = _capacity; |
2d21ac55 | 367 | |
0a7de745 | 368 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
cb323159 | 369 | inTask, iomdOptions, /* System mapper */ NULL)) { |
0a7de745 A |
370 | return false; |
371 | } | |
1c79356b | 372 | |
0a7de745 | 373 | _internalFlags |= kInternalFlagInit; |
5ba3f43e | 374 | #if IOTRACKING |
0a7de745 A |
375 | if (!(options & kIOMemoryPageable)) { |
376 | trackingAccumSize(capacity); | |
377 | } | |
5ba3f43e A |
378 | #endif /* IOTRACKING */ |
379 | ||
0a7de745 A |
380 | // give any system mapper the allocation params |
381 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, | |
382 | &mapSpec, sizeof(mapSpec))) { | |
383 | return false; | |
384 | } | |
385 | ||
386 | if (mapTask) { | |
387 | if (!reserved) { | |
388 | reserved = IONew( ExpansionData, 1 ); | |
389 | if (!reserved) { | |
390 | return false; | |
391 | } | |
392 | } | |
393 | reserved->map = createMappingInTask(mapTask, 0, | |
f427ee49 | 394 | kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach(); |
0a7de745 | 395 | if (!reserved->map) { |
cb323159 | 396 | _buffer = NULL; |
0a7de745 A |
397 | return false; |
398 | } | |
399 | release(); // map took a retain on this | |
400 | reserved->map->retain(); | |
401 | removeMapping(reserved->map); | |
402 | mach_vm_address_t buffer = reserved->map->getAddress(); | |
403 | _buffer = (void *) buffer; | |
404 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) { | |
405 | _ranges.v64->address = buffer; | |
406 | } | |
407 | } | |
408 | ||
409 | setLength(_capacity); | |
410 | ||
411 | return true; | |
1c79356b A |
412 | } |
413 | ||
f427ee49 | 414 | OSSharedPtr<IOBufferMemoryDescriptor> |
0a7de745 A |
415 | IOBufferMemoryDescriptor::inTaskWithOptions( |
416 | task_t inTask, | |
417 | IOOptionBits options, | |
418 | vm_size_t capacity, | |
419 | vm_offset_t alignment) | |
9bccf70c | 420 | { |
f427ee49 | 421 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
0a7de745 A |
422 | |
423 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { | |
f427ee49 | 424 | me.reset(); |
0a7de745 A |
425 | } |
426 | return me; | |
0c530ab8 A |
427 | } |
428 | ||
f427ee49 | 429 | OSSharedPtr<IOBufferMemoryDescriptor> |
4ba76501 A |
430 | IOBufferMemoryDescriptor::inTaskWithOptions( |
431 | task_t inTask, | |
432 | IOOptionBits options, | |
433 | vm_size_t capacity, | |
434 | vm_offset_t alignment, | |
435 | uint32_t kernTag, | |
436 | uint32_t userTag) | |
437 | { | |
f427ee49 | 438 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
4ba76501 A |
439 | |
440 | if (me) { | |
441 | me->setVMTags(kernTag, userTag); | |
442 | ||
443 | if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { | |
f427ee49 | 444 | me.reset(); |
4ba76501 A |
445 | } |
446 | } | |
447 | return me; | |
448 | } | |
449 | ||
f427ee49 | 450 | OSSharedPtr<IOBufferMemoryDescriptor> |
0a7de745 A |
451 | IOBufferMemoryDescriptor::inTaskWithPhysicalMask( |
452 | task_t inTask, | |
453 | IOOptionBits options, | |
454 | mach_vm_size_t capacity, | |
455 | mach_vm_address_t physicalMask) | |
0c530ab8 | 456 | { |
f427ee49 | 457 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
0a7de745 A |
458 | |
459 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { | |
f427ee49 | 460 | me.reset(); |
0a7de745 A |
461 | } |
462 | return me; | |
9bccf70c A |
463 | } |
464 | ||
b0d623f7 | 465 | #ifndef __LP64__ |
0a7de745 A |
466 | bool |
467 | IOBufferMemoryDescriptor::initWithOptions( | |
468 | IOOptionBits options, | |
469 | vm_size_t capacity, | |
470 | vm_offset_t alignment) | |
9bccf70c | 471 | { |
0a7de745 | 472 | return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0); |
9bccf70c | 473 | } |
b0d623f7 | 474 | #endif /* !__LP64__ */ |
9bccf70c | 475 | |
f427ee49 | 476 | OSSharedPtr<IOBufferMemoryDescriptor> |
0a7de745 A |
477 | IOBufferMemoryDescriptor::withOptions( |
478 | IOOptionBits options, | |
479 | vm_size_t capacity, | |
480 | vm_offset_t alignment) | |
1c79356b | 481 | { |
f427ee49 | 482 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
0a7de745 A |
483 | |
484 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
f427ee49 | 485 | me.reset(); |
0a7de745 A |
486 | } |
487 | return me; | |
1c79356b A |
488 | } |
489 | ||
490 | ||
491 | /* | |
492 | * withCapacity: | |
493 | * | |
494 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
495 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
496 | */ | |
f427ee49 | 497 | OSSharedPtr<IOBufferMemoryDescriptor> |
1c79356b | 498 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, |
0a7de745 A |
499 | IODirection inDirection, |
500 | bool inContiguous) | |
1c79356b | 501 | { |
0a7de745 A |
502 | return IOBufferMemoryDescriptor::withOptions( |
503 | inDirection | kIOMemoryUnshared | |
504 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
505 | inCapacity, inContiguous ? inCapacity : 1 ); | |
1c79356b A |
506 | } |
507 | ||
b0d623f7 | 508 | #ifndef __LP64__ |
1c79356b A |
509 | /* |
510 | * initWithBytes: | |
511 | * | |
512 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
513 | * The descriptor's length and capacity are set to the input buffer's size. | |
514 | */ | |
0a7de745 A |
515 | bool |
516 | IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
517 | vm_size_t inLength, | |
518 | IODirection inDirection, | |
519 | bool inContiguous) | |
1c79356b | 520 | { |
0a7de745 A |
521 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
522 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
523 | inLength, inLength, (mach_vm_address_t)0)) { | |
524 | return false; | |
525 | } | |
1c79356b | 526 | |
0a7de745 A |
527 | // start out with no data |
528 | setLength(0); | |
1c79356b | 529 | |
0a7de745 A |
530 | if (!appendBytes(inBytes, inLength)) { |
531 | return false; | |
532 | } | |
1c79356b | 533 | |
0a7de745 | 534 | return true; |
1c79356b | 535 | } |
b0d623f7 | 536 | #endif /* !__LP64__ */ |
1c79356b A |
537 | |
538 | /* | |
539 | * withBytes: | |
540 | * | |
541 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
542 | * The descriptor's length and capacity are set to the input buffer's size. | |
543 | */ | |
f427ee49 | 544 | OSSharedPtr<IOBufferMemoryDescriptor> |
1c79356b | 545 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, |
0a7de745 A |
546 | vm_size_t inLength, |
547 | IODirection inDirection, | |
548 | bool inContiguous) | |
1c79356b | 549 | { |
f427ee49 | 550 | OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>(); |
0a7de745 A |
551 | |
552 | if (me && !me->initWithPhysicalMask( | |
553 | kernel_task, inDirection | kIOMemoryUnshared | |
554 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
555 | inLength, inLength, 0 )) { | |
f427ee49 | 556 | me.reset(); |
0a7de745 A |
557 | } |
558 | ||
559 | if (me) { | |
560 | // start out with no data | |
561 | me->setLength(0); | |
b0d623f7 | 562 | |
0a7de745 | 563 | if (!me->appendBytes(inBytes, inLength)) { |
f427ee49 | 564 | me.reset(); |
0a7de745 | 565 | } |
b0d623f7 | 566 | } |
0a7de745 | 567 | return me; |
1c79356b A |
568 | } |
569 | ||
570 | /* | |
571 | * free: | |
572 | * | |
573 | * Free resources | |
574 | */ | |
0a7de745 A |
575 | void |
576 | IOBufferMemoryDescriptor::free() | |
1c79356b | 577 | { |
0a7de745 A |
578 | // Cache all of the relevant information on the stack for use |
579 | // after we call super::free()! | |
580 | IOOptionBits flags = _flags; | |
581 | IOOptionBits internalFlags = _internalFlags; | |
582 | IOOptionBits options = _options; | |
583 | vm_size_t size = _capacity; | |
584 | void * buffer = _buffer; | |
cb323159 | 585 | IOMemoryMap * map = NULL; |
0a7de745 A |
586 | IOAddressRange * range = _ranges.v64; |
587 | vm_offset_t alignment = _alignment; | |
588 | ||
589 | if (alignment >= page_size) { | |
590 | size = round_page(size); | |
591 | } | |
592 | ||
593 | if (reserved) { | |
594 | map = reserved->map; | |
595 | IODelete( reserved, ExpansionData, 1 ); | |
596 | if (map) { | |
597 | map->release(); | |
598 | } | |
599 | } | |
600 | ||
601 | if ((options & kIOMemoryPageable) | |
602 | || (kInternalFlagPageSized & internalFlags)) { | |
603 | size = round_page(size); | |
604 | } | |
5ba3f43e A |
605 | |
606 | #if IOTRACKING | |
0a7de745 A |
607 | if (!(options & kIOMemoryPageable) |
608 | && buffer | |
609 | && (kInternalFlagInit & _internalFlags)) { | |
610 | trackingAccumSize(-size); | |
611 | } | |
5ba3f43e A |
612 | #endif /* IOTRACKING */ |
613 | ||
0a7de745 A |
614 | /* super::free may unwire - deallocate buffer afterwards */ |
615 | super::free(); | |
1c79356b | 616 | |
0a7de745 | 617 | if (options & kIOMemoryPageable) { |
91447636 | 618 | #if IOALLOCDEBUG |
0a7de745 | 619 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); |
91447636 | 620 | #endif |
0a7de745 A |
621 | } else if (buffer) { |
622 | if (kInternalFlagPhysical & internalFlags) { | |
623 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); | |
624 | } else if (kInternalFlagPageAllocated & internalFlags) { | |
625 | uintptr_t page; | |
626 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); | |
627 | if (page) { | |
628 | kmem_free(kernel_map, page, page_size); | |
629 | } | |
39236c6e | 630 | #if IOALLOCDEBUG |
f427ee49 | 631 | OSAddAtomicLong(-size, &debug_iomalloc_size); |
39236c6e | 632 | #endif |
0a7de745 A |
633 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); |
634 | } else if (alignment > 1) { | |
635 | IOFreeAligned(buffer, size); | |
636 | } else { | |
637 | IOFree(buffer, size); | |
638 | } | |
99c3a104 | 639 | } |
0a7de745 A |
640 | if (range && (kIOMemoryAsReference & flags)) { |
641 | IODelete(range, IOAddressRange, 1); | |
99c3a104 | 642 | } |
1c79356b A |
643 | } |
644 | ||
645 | /* | |
646 | * getCapacity: | |
647 | * | |
648 | * Get the buffer capacity | |
649 | */ | |
0a7de745 A |
650 | vm_size_t |
651 | IOBufferMemoryDescriptor::getCapacity() const | |
1c79356b | 652 | { |
0a7de745 | 653 | return _capacity; |
1c79356b A |
654 | } |
655 | ||
656 | /* | |
657 | * setLength: | |
658 | * | |
659 | * Change the buffer length of the memory descriptor. When a new buffer | |
660 | * is created, the initial length of the buffer is set to be the same as | |
661 | * the capacity. The length can be adjusted via setLength for a shorter | |
662 | * transfer (there is no need to create more buffer descriptors when you | |
663 | * can reuse an existing one, even for different transfer sizes). Note | |
664 | * that the specified length must not exceed the capacity of the buffer. | |
665 | */ | |
0a7de745 A |
666 | void |
667 | IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
1c79356b | 668 | { |
0a7de745 A |
669 | assert(length <= _capacity); |
670 | if (length > _capacity) { | |
671 | return; | |
672 | } | |
1c79356b | 673 | |
0a7de745 A |
674 | _length = length; |
675 | _ranges.v64->length = length; | |
1c79356b A |
676 | } |
677 | ||
678 | /* | |
679 | * setDirection: | |
680 | * | |
681 | * Change the direction of the transfer. This method allows one to redirect | |
682 | * the descriptor's transfer direction. This eliminates the need to destroy | |
683 | * and create new buffers when different transfer directions are needed. | |
684 | */ | |
0a7de745 A |
685 | void |
686 | IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
1c79356b | 687 | { |
0a7de745 | 688 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
b0d623f7 | 689 | #ifndef __LP64__ |
0a7de745 | 690 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
b0d623f7 | 691 | #endif /* !__LP64__ */ |
1c79356b A |
692 | } |
693 | ||
694 | /* | |
695 | * appendBytes: | |
696 | * | |
697 | * Add some data to the end of the buffer. This method automatically | |
698 | * maintains the memory descriptor buffer length. Note that appendBytes | |
699 | * will not copy past the end of the memory descriptor's current capacity. | |
700 | */ | |
701 | bool | |
702 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
703 | { | |
0a7de745 A |
704 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
705 | IOByteCount offset; | |
1c79356b | 706 | |
0a7de745 | 707 | assert(_length <= _capacity); |
0c530ab8 | 708 | |
0a7de745 A |
709 | offset = _length; |
710 | _length += actualBytesToCopy; | |
711 | _ranges.v64->length += actualBytesToCopy; | |
1c79356b | 712 | |
0a7de745 A |
713 | if (_task == kernel_task) { |
714 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), | |
715 | actualBytesToCopy); | |
716 | } else { | |
717 | writeBytes(offset, bytes, actualBytesToCopy); | |
718 | } | |
0c530ab8 | 719 | |
0a7de745 | 720 | return true; |
1c79356b A |
721 | } |
722 | ||
723 | /* | |
724 | * getBytesNoCopy: | |
725 | * | |
726 | * Return the virtual address of the beginning of the buffer | |
727 | */ | |
0a7de745 A |
728 | void * |
729 | IOBufferMemoryDescriptor::getBytesNoCopy() | |
1c79356b | 730 | { |
0a7de745 A |
731 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
732 | return _buffer; | |
733 | } else { | |
734 | return (void *)_ranges.v64->address; | |
735 | } | |
1c79356b A |
736 | } |
737 | ||
0c530ab8 | 738 | |
1c79356b A |
739 | /* |
740 | * getBytesNoCopy: | |
741 | * | |
742 | * Return the virtual address of an offset from the beginning of the buffer | |
743 | */ | |
744 | void * | |
745 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
746 | { | |
0a7de745 | 747 | IOVirtualAddress address; |
39037602 | 748 | |
0a7de745 | 749 | if ((start + withLength) < start) { |
cb323159 | 750 | return NULL; |
0a7de745 | 751 | } |
39037602 | 752 | |
0a7de745 A |
753 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { |
754 | address = (IOVirtualAddress) _buffer; | |
755 | } else { | |
756 | address = _ranges.v64->address; | |
757 | } | |
0c530ab8 | 758 | |
0a7de745 A |
759 | if (start < _length && (start + withLength) <= _length) { |
760 | return (void *)(address + start); | |
761 | } | |
cb323159 | 762 | return NULL; |
1c79356b A |
763 | } |
764 | ||
b0d623f7 | 765 | #ifndef __LP64__ |
0a7de745 A |
766 | void * |
767 | IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
768 | IOByteCount * lengthOfSegment) | |
0c530ab8 | 769 | { |
0a7de745 A |
770 | void * bytes = getBytesNoCopy(offset, 0); |
771 | ||
772 | if (bytes && lengthOfSegment) { | |
773 | *lengthOfSegment = _length - offset; | |
774 | } | |
0c530ab8 | 775 | |
0a7de745 | 776 | return bytes; |
0c530ab8 | 777 | } |
b0d623f7 | 778 | #endif /* !__LP64__ */ |
0c530ab8 | 779 | |
b0d623f7 A |
780 | #ifdef __LP64__ |
781 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
782 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
783 | #else /* !__LP64__ */ | |
f427ee49 A |
784 | OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0); |
785 | OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1); | |
b0d623f7 | 786 | #endif /* !__LP64__ */ |
1c79356b A |
787 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
788 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
789 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
790 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
791 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
792 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
793 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
794 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
795 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
796 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
797 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
798 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
799 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
800 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |