]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
b0d623f7 A |
28 | |
29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
30 | ||
1c79356b A |
31 | #include <IOKit/assert.h> |
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOLib.h> | |
0c530ab8 | 35 | #include <IOKit/IOMapper.h> |
1c79356b | 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 37 | #include <libkern/OSDebug.h> |
99c3a104 | 38 | #include <mach/mach_vm.h> |
1c79356b | 39 | |
91447636 A |
40 | #include "IOKitKernelInternal.h" |
41 | ||
99c3a104 A |
42 | #ifdef IOALLOCDEBUG |
43 | #include <libkern/c++/OSCPPDebug.h> | |
44 | #endif | |
45 | #include <IOKit/IOStatisticsPrivate.h> | |
46 | ||
47 | #if IOKITSTATS | |
48 | #define IOStatisticsAlloc(type, size) \ | |
49 | do { \ | |
50 | IOStatistics::countAlloc(type, size); \ | |
51 | } while (0) | |
52 | #else | |
53 | #define IOStatisticsAlloc(type, size) | |
54 | #endif /* IOKITSTATS */ | |
55 | ||
56 | ||
1c79356b A |
57 | __BEGIN_DECLS |
58 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 59 | #include <vm/pmap.h> |
1c79356b | 60 | |
55e303ae | 61 | __END_DECLS |
de355530 | 62 | |
0c530ab8 A |
63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
64 | ||
b0d623f7 A |
65 | enum |
66 | { | |
99c3a104 A |
67 | kInternalFlagPhysical = 0x00000001, |
68 | kInternalFlagPageSized = 0x00000002, | |
5ba3f43e A |
69 | kInternalFlagPageAllocated = 0x00000004, |
70 | kInternalFlagInit = 0x00000008 | |
99c3a104 A |
71 | }; |
72 | ||
73 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
74 | ||
39236c6e A |
75 | #define super IOGeneralMemoryDescriptor |
76 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
77 | IOGeneralMemoryDescriptor); | |
99c3a104 | 78 | |
39236c6e | 79 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
99c3a104 | 80 | |
39236c6e | 81 | static uintptr_t IOBMDPageProc(iopa_t * a) |
99c3a104 | 82 | { |
39236c6e A |
83 | kern_return_t kr; |
84 | vm_address_t vmaddr = 0; | |
85 | int options = 0; // KMA_LOMEM; | |
99c3a104 | 86 | |
99c3a104 | 87 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
3e170ce0 | 88 | page_size, 0, options, VM_KERN_MEMORY_IOKIT); |
99c3a104 | 89 | |
39236c6e A |
90 | if (KERN_SUCCESS != kr) vmaddr = 0; |
91 | else bzero((void *) vmaddr, page_size); | |
99c3a104 | 92 | |
39236c6e | 93 | return ((uintptr_t) vmaddr); |
99c3a104 | 94 | } |
b0d623f7 | 95 | |
0c530ab8 A |
96 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
97 | ||
b0d623f7 | 98 | #ifndef __LP64__ |
1c79356b A |
99 | bool IOBufferMemoryDescriptor::initWithOptions( |
100 | IOOptionBits options, | |
101 | vm_size_t capacity, | |
9bccf70c A |
102 | vm_offset_t alignment, |
103 | task_t inTask) | |
0c530ab8 A |
104 | { |
105 | mach_vm_address_t physicalMask = 0; | |
106 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); | |
107 | } | |
b0d623f7 | 108 | #endif /* !__LP64__ */ |
0c530ab8 A |
109 | |
110 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( | |
111 | task_t inTask, | |
112 | IOOptionBits options, | |
113 | mach_vm_size_t capacity, | |
114 | mach_vm_address_t alignment, | |
115 | mach_vm_address_t physicalMask) | |
1c79356b | 116 | { |
99c3a104 A |
117 | task_t mapTask = NULL; |
118 | vm_map_t vmmap = NULL; | |
119 | mach_vm_address_t highestMask = 0; | |
120 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
121 | IODMAMapSpecification mapSpec; | |
122 | bool mapped = false; | |
123 | bool needZero; | |
9bccf70c | 124 | |
fe8ab488 | 125 | if (!capacity) return false; |
1c79356b | 126 | |
b0d623f7 A |
127 | _options = options; |
128 | _capacity = capacity; | |
129 | _internalFlags = 0; | |
130 | _internalReserved = 0; | |
131 | _buffer = 0; | |
132 | ||
133 | _ranges.v64 = IONew(IOAddressRange, 1); | |
134 | if (!_ranges.v64) | |
135 | return (false); | |
136 | _ranges.v64->address = 0; | |
137 | _ranges.v64->length = 0; | |
99c3a104 A |
138 | // make sure super::free doesn't dealloc _ranges before super::init |
139 | _flags = kIOMemoryAsReference; | |
1c79356b | 140 | |
c910b4d9 A |
141 | // Grab IOMD bits from the Buffer MD options |
142 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
55e303ae | 143 | |
99c3a104 A |
144 | if (!(kIOMemoryMapperNone & options)) |
145 | { | |
146 | IOMapper::checkForSystemMapper(); | |
147 | mapped = (0 != IOMapper::gSystem); | |
148 | } | |
fe8ab488 | 149 | needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); |
99c3a104 | 150 | |
b0d623f7 A |
151 | if (physicalMask && (alignment <= 1)) |
152 | { | |
153 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
154 | highestMask = (physicalMask | alignment); | |
155 | alignment++; | |
0b4c1975 A |
156 | if (alignment < page_size) |
157 | alignment = page_size; | |
b0d623f7 A |
158 | } |
159 | ||
0b4c1975 | 160 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) |
2d21ac55 | 161 | alignment = page_size; |
9bccf70c | 162 | |
b0d623f7 A |
163 | if (alignment >= page_size) |
164 | capacity = round_page(capacity); | |
165 | ||
166 | if (alignment > page_size) | |
167 | options |= kIOMemoryPhysicallyContiguous; | |
0c530ab8 | 168 | |
1c79356b | 169 | _alignment = alignment; |
91447636 | 170 | |
3e170ce0 A |
171 | if ((capacity + alignment) < _capacity) return (false); |
172 | ||
b0d623f7 | 173 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
2d21ac55 | 174 | return false; |
91447636 | 175 | |
99c3a104 A |
176 | bzero(&mapSpec, sizeof(mapSpec)); |
177 | mapSpec.alignment = _alignment; | |
178 | mapSpec.numAddressBits = 64; | |
179 | if (highestMask && mapped) | |
180 | { | |
181 | if (highestMask <= 0xFFFFFFFF) | |
182 | mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); | |
183 | else | |
184 | mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); | |
185 | highestMask = 0; | |
186 | } | |
187 | ||
fe8ab488 A |
188 | // set memory entry cache mode, pageable, purgeable |
189 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; | |
2d21ac55 A |
190 | if (options & kIOMemoryPageable) |
191 | { | |
192 | iomdOptions |= kIOMemoryBufferPageable; | |
fe8ab488 | 193 | if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; |
9bccf70c | 194 | } |
0c530ab8 | 195 | else |
9bccf70c | 196 | { |
0b4c1975 | 197 | vmmap = kernel_map; |
2d21ac55 | 198 | |
0b4c1975 A |
199 | // Buffer shouldn't auto prepare they should be prepared explicitly |
200 | // But it never was enforced so what are you going to do? | |
201 | iomdOptions |= kIOMemoryAutoPrepare; | |
4452a7af | 202 | |
0b4c1975 | 203 | /* Allocate a wired-down buffer inside kernel space. */ |
b0d623f7 | 204 | |
99c3a104 A |
205 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
206 | ||
207 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) | |
208 | { | |
209 | contig |= (!mapped); | |
210 | contig |= (0 != (kIOMemoryMapperNone & options)); | |
211 | #if 0 | |
212 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now | |
213 | contig |= true; | |
214 | #endif | |
215 | } | |
216 | ||
217 | if (contig || highestMask || (alignment > page_size)) | |
b0d623f7 | 218 | { |
0b4c1975 A |
219 | _internalFlags |= kInternalFlagPhysical; |
220 | if (highestMask) | |
221 | { | |
222 | _internalFlags |= kInternalFlagPageSized; | |
223 | capacity = round_page(capacity); | |
224 | } | |
99c3a104 A |
225 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( |
226 | capacity, highestMask, alignment, contig); | |
227 | } | |
228 | else if (needZero | |
fe8ab488 | 229 | && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) |
99c3a104 A |
230 | { |
231 | _internalFlags |= kInternalFlagPageAllocated; | |
232 | needZero = false; | |
39236c6e A |
233 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); |
234 | if (_buffer) | |
235 | { | |
236 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); | |
237 | #if IOALLOCDEBUG | |
3e170ce0 | 238 | OSAddAtomic(capacity, &debug_iomalloc_size); |
39236c6e A |
239 | #endif |
240 | } | |
b0d623f7 | 241 | } |
0b4c1975 | 242 | else if (alignment > 1) |
0c530ab8 | 243 | { |
0b4c1975 | 244 | _buffer = IOMallocAligned(capacity, alignment); |
0c530ab8 A |
245 | } |
246 | else | |
247 | { | |
0b4c1975 A |
248 | _buffer = IOMalloc(capacity); |
249 | } | |
0b4c1975 A |
250 | if (!_buffer) |
251 | { | |
252 | return false; | |
0c530ab8 | 253 | } |
99c3a104 | 254 | if (needZero) bzero(_buffer, capacity); |
91447636 | 255 | } |
1c79356b | 256 | |
0b4c1975 | 257 | if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { |
b0d623f7 | 258 | vm_size_t size = round_page(capacity); |
2d21ac55 | 259 | |
fe8ab488 A |
260 | // initWithOptions will create memory entry |
261 | iomdOptions |= kIOMemoryPersistent; | |
1c79356b | 262 | |
2d21ac55 A |
263 | if( options & kIOMemoryPageable) { |
264 | #if IOALLOCDEBUG | |
3e170ce0 | 265 | OSAddAtomicLong(size, &debug_iomallocpageable_size); |
2d21ac55 A |
266 | #endif |
267 | mapTask = inTask; | |
268 | if (NULL == inTask) | |
269 | inTask = kernel_task; | |
270 | } | |
271 | else if (options & kIOMapCacheMask) | |
272 | { | |
273 | // Prefetch each page to put entries into the pmap | |
274 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
275 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
276 | ||
277 | while (startAddr < endAddr) | |
278 | { | |
39236c6e A |
279 | UInt8 dummyVar = *startAddr; |
280 | (void) dummyVar; | |
2d21ac55 | 281 | startAddr += page_size; |
99c3a104 | 282 | } |
2d21ac55 A |
283 | } |
284 | } | |
285 | ||
b0d623f7 A |
286 | _ranges.v64->address = (mach_vm_address_t) _buffer;; |
287 | _ranges.v64->length = _capacity; | |
2d21ac55 | 288 | |
b0d623f7 | 289 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
2d21ac55 | 290 | inTask, iomdOptions, /* System mapper */ 0)) |
1c79356b A |
291 | return false; |
292 | ||
5ba3f43e A |
293 | _internalFlags |= kInternalFlagInit; |
294 | #if IOTRACKING | |
295 | if (!(options & kIOMemoryPageable)) trackingAccumSize(capacity); | |
296 | #endif /* IOTRACKING */ | |
297 | ||
99c3a104 A |
298 | // give any system mapper the allocation params |
299 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, | |
300 | &mapSpec, sizeof(mapSpec))) | |
301 | return false; | |
302 | ||
2d21ac55 | 303 | if (mapTask) |
0c530ab8 | 304 | { |
2d21ac55 A |
305 | if (!reserved) { |
306 | reserved = IONew( ExpansionData, 1 ); | |
307 | if( !reserved) | |
308 | return( false ); | |
309 | } | |
b0d623f7 | 310 | reserved->map = createMappingInTask(mapTask, 0, |
fe8ab488 | 311 | kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); |
2d21ac55 | 312 | if (!reserved->map) |
0c530ab8 A |
313 | { |
314 | _buffer = 0; | |
315 | return( false ); | |
316 | } | |
2d21ac55 | 317 | release(); // map took a retain on this |
b0d623f7 A |
318 | reserved->map->retain(); |
319 | removeMapping(reserved->map); | |
2d21ac55 A |
320 | mach_vm_address_t buffer = reserved->map->getAddress(); |
321 | _buffer = (void *) buffer; | |
322 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) | |
323 | _ranges.v64->address = buffer; | |
0c530ab8 A |
324 | } |
325 | ||
b0d623f7 | 326 | setLength(_capacity); |
2d21ac55 | 327 | |
1c79356b A |
328 | return true; |
329 | } | |
330 | ||
9bccf70c A |
331 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
332 | task_t inTask, | |
333 | IOOptionBits options, | |
334 | vm_size_t capacity, | |
55e303ae | 335 | vm_offset_t alignment) |
9bccf70c A |
336 | { |
337 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
338 | ||
b0d623f7 | 339 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { |
4452a7af A |
340 | me->release(); |
341 | me = 0; | |
0c530ab8 A |
342 | } |
343 | return me; | |
344 | } | |
345 | ||
346 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
347 | task_t inTask, | |
348 | IOOptionBits options, | |
349 | mach_vm_size_t capacity, | |
350 | mach_vm_address_t physicalMask) | |
351 | { | |
352 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
353 | ||
354 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
355 | { | |
0c530ab8 A |
356 | me->release(); |
357 | me = 0; | |
9bccf70c A |
358 | } |
359 | return me; | |
360 | } | |
361 | ||
b0d623f7 | 362 | #ifndef __LP64__ |
9bccf70c A |
363 | bool IOBufferMemoryDescriptor::initWithOptions( |
364 | IOOptionBits options, | |
365 | vm_size_t capacity, | |
366 | vm_offset_t alignment) | |
367 | { | |
b0d623f7 | 368 | return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); |
9bccf70c | 369 | } |
b0d623f7 | 370 | #endif /* !__LP64__ */ |
9bccf70c | 371 | |
1c79356b A |
372 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
373 | IOOptionBits options, | |
374 | vm_size_t capacity, | |
55e303ae | 375 | vm_offset_t alignment) |
1c79356b | 376 | { |
b0d623f7 A |
377 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
378 | ||
379 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
b0d623f7 A |
380 | me->release(); |
381 | me = 0; | |
b0d623f7 A |
382 | } |
383 | return me; | |
1c79356b A |
384 | } |
385 | ||
386 | ||
387 | /* | |
388 | * withCapacity: | |
389 | * | |
390 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
391 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
392 | */ | |
393 | IOBufferMemoryDescriptor * | |
394 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
395 | IODirection inDirection, | |
396 | bool inContiguous) | |
397 | { | |
398 | return( IOBufferMemoryDescriptor::withOptions( | |
399 | inDirection | kIOMemoryUnshared | |
400 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
401 | inCapacity, inContiguous ? inCapacity : 1 )); | |
402 | } | |
403 | ||
b0d623f7 | 404 | #ifndef __LP64__ |
1c79356b A |
405 | /* |
406 | * initWithBytes: | |
407 | * | |
408 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
409 | * The descriptor's length and capacity are set to the input buffer's size. | |
410 | */ | |
411 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
412 | vm_size_t inLength, | |
413 | IODirection inDirection, | |
414 | bool inContiguous) | |
415 | { | |
b0d623f7 A |
416 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
417 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
418 | inLength, inLength, (mach_vm_address_t)0)) | |
1c79356b A |
419 | return false; |
420 | ||
421 | // start out with no data | |
422 | setLength(0); | |
423 | ||
424 | if (!appendBytes(inBytes, inLength)) | |
425 | return false; | |
426 | ||
427 | return true; | |
428 | } | |
b0d623f7 | 429 | #endif /* !__LP64__ */ |
1c79356b A |
430 | |
431 | /* | |
432 | * withBytes: | |
433 | * | |
434 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
435 | * The descriptor's length and capacity are set to the input buffer's size. | |
436 | */ | |
437 | IOBufferMemoryDescriptor * | |
438 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
439 | vm_size_t inLength, | |
440 | IODirection inDirection, | |
441 | bool inContiguous) | |
442 | { | |
443 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
444 | ||
b0d623f7 A |
445 | if (me && !me->initWithPhysicalMask( |
446 | kernel_task, inDirection | kIOMemoryUnshared | |
447 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
448 | inLength, inLength, 0 )) | |
0c530ab8 | 449 | { |
0c530ab8 A |
450 | me->release(); |
451 | me = 0; | |
1c79356b | 452 | } |
b0d623f7 A |
453 | |
454 | if (me) | |
455 | { | |
456 | // start out with no data | |
457 | me->setLength(0); | |
458 | ||
459 | if (!me->appendBytes(inBytes, inLength)) | |
460 | { | |
461 | me->release(); | |
462 | me = 0; | |
463 | } | |
464 | } | |
1c79356b A |
465 | return me; |
466 | } | |
467 | ||
468 | /* | |
469 | * free: | |
470 | * | |
471 | * Free resources | |
472 | */ | |
473 | void IOBufferMemoryDescriptor::free() | |
474 | { | |
55e303ae A |
475 | // Cache all of the relevant information on the stack for use |
476 | // after we call super::free()! | |
0b4c1975 A |
477 | IOOptionBits flags = _flags; |
478 | IOOptionBits internalFlags = _internalFlags; | |
0c530ab8 A |
479 | IOOptionBits options = _options; |
480 | vm_size_t size = _capacity; | |
481 | void * buffer = _buffer; | |
2d21ac55 | 482 | IOMemoryMap * map = 0; |
b0d623f7 | 483 | IOAddressRange * range = _ranges.v64; |
0c530ab8 | 484 | vm_offset_t alignment = _alignment; |
1c79356b | 485 | |
b0d623f7 A |
486 | if (alignment >= page_size) |
487 | size = round_page(size); | |
488 | ||
9bccf70c A |
489 | if (reserved) |
490 | { | |
2d21ac55 | 491 | map = reserved->map; |
9bccf70c | 492 | IODelete( reserved, ExpansionData, 1 ); |
2d21ac55 A |
493 | if (map) |
494 | map->release(); | |
9bccf70c A |
495 | } |
496 | ||
5ba3f43e A |
497 | if ((options & kIOMemoryPageable) |
498 | || (kInternalFlagPageSized & internalFlags)) size = round_page(size); | |
499 | ||
500 | #if IOTRACKING | |
501 | if (!(options & kIOMemoryPageable) | |
502 | && buffer | |
503 | && (kInternalFlagInit & _internalFlags)) trackingAccumSize(-size); | |
504 | #endif /* IOTRACKING */ | |
505 | ||
1c79356b A |
506 | /* super::free may unwire - deallocate buffer afterwards */ |
507 | super::free(); | |
508 | ||
91447636 | 509 | if (options & kIOMemoryPageable) |
9bccf70c | 510 | { |
91447636 | 511 | #if IOALLOCDEBUG |
5ba3f43e | 512 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); |
91447636 | 513 | #endif |
1c79356b | 514 | } |
91447636 A |
515 | else if (buffer) |
516 | { | |
99c3a104 | 517 | if (kInternalFlagPhysical & internalFlags) |
0b4c1975 | 518 | { |
0b4c1975 | 519 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); |
99c3a104 A |
520 | } |
521 | else if (kInternalFlagPageAllocated & internalFlags) | |
522 | { | |
39236c6e A |
523 | uintptr_t page; |
524 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); | |
525 | if (page) | |
526 | { | |
527 | kmem_free(kernel_map, page, page_size); | |
528 | } | |
529 | #if IOALLOCDEBUG | |
3e170ce0 | 530 | OSAddAtomic(-size, &debug_iomalloc_size); |
39236c6e A |
531 | #endif |
532 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); | |
99c3a104 | 533 | } |
91447636 | 534 | else if (alignment > 1) |
99c3a104 | 535 | { |
91447636 | 536 | IOFreeAligned(buffer, size); |
99c3a104 | 537 | } |
91447636 | 538 | else |
99c3a104 | 539 | { |
91447636 | 540 | IOFree(buffer, size); |
99c3a104 | 541 | } |
91447636 | 542 | } |
b0d623f7 A |
543 | if (range && (kIOMemoryAsReference & flags)) |
544 | IODelete(range, IOAddressRange, 1); | |
1c79356b A |
545 | } |
546 | ||
547 | /* | |
548 | * getCapacity: | |
549 | * | |
550 | * Get the buffer capacity | |
551 | */ | |
552 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
553 | { | |
554 | return _capacity; | |
555 | } | |
556 | ||
557 | /* | |
558 | * setLength: | |
559 | * | |
560 | * Change the buffer length of the memory descriptor. When a new buffer | |
561 | * is created, the initial length of the buffer is set to be the same as | |
562 | * the capacity. The length can be adjusted via setLength for a shorter | |
563 | * transfer (there is no need to create more buffer descriptors when you | |
564 | * can reuse an existing one, even for different transfer sizes). Note | |
565 | * that the specified length must not exceed the capacity of the buffer. | |
566 | */ | |
567 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
568 | { | |
569 | assert(length <= _capacity); | |
c7d2c2c6 | 570 | if (length > _capacity) return; |
1c79356b A |
571 | |
572 | _length = length; | |
2d21ac55 | 573 | _ranges.v64->length = length; |
1c79356b A |
574 | } |
575 | ||
576 | /* | |
577 | * setDirection: | |
578 | * | |
579 | * Change the direction of the transfer. This method allows one to redirect | |
580 | * the descriptor's transfer direction. This eliminates the need to destroy | |
581 | * and create new buffers when different transfer directions are needed. | |
582 | */ | |
583 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
584 | { | |
b0d623f7 A |
585 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
586 | #ifndef __LP64__ | |
587 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
588 | #endif /* !__LP64__ */ | |
1c79356b A |
589 | } |
590 | ||
591 | /* | |
592 | * appendBytes: | |
593 | * | |
594 | * Add some data to the end of the buffer. This method automatically | |
595 | * maintains the memory descriptor buffer length. Note that appendBytes | |
596 | * will not copy past the end of the memory descriptor's current capacity. | |
597 | */ | |
598 | bool | |
599 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
600 | { | |
0c530ab8 A |
601 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
602 | IOByteCount offset; | |
1c79356b A |
603 | |
604 | assert(_length <= _capacity); | |
0c530ab8 A |
605 | |
606 | offset = _length; | |
1c79356b | 607 | _length += actualBytesToCopy; |
2d21ac55 | 608 | _ranges.v64->length += actualBytesToCopy; |
1c79356b | 609 | |
0c530ab8 | 610 | if (_task == kernel_task) |
2d21ac55 | 611 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
0c530ab8 A |
612 | actualBytesToCopy); |
613 | else | |
614 | writeBytes(offset, bytes, actualBytesToCopy); | |
615 | ||
1c79356b A |
616 | return true; |
617 | } | |
618 | ||
619 | /* | |
620 | * getBytesNoCopy: | |
621 | * | |
622 | * Return the virtual address of the beginning of the buffer | |
623 | */ | |
624 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
625 | { | |
2d21ac55 | 626 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
627 | return _buffer; |
628 | else | |
2d21ac55 | 629 | return (void *)_ranges.v64->address; |
1c79356b A |
630 | } |
631 | ||
0c530ab8 | 632 | |
1c79356b A |
633 | /* |
634 | * getBytesNoCopy: | |
635 | * | |
636 | * Return the virtual address of an offset from the beginning of the buffer | |
637 | */ | |
638 | void * | |
639 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
640 | { | |
0c530ab8 | 641 | IOVirtualAddress address; |
39037602 A |
642 | |
643 | if ((start + withLength) < start) return 0; | |
644 | ||
2d21ac55 | 645 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
646 | address = (IOVirtualAddress) _buffer; |
647 | else | |
2d21ac55 | 648 | address = _ranges.v64->address; |
0c530ab8 A |
649 | |
650 | if (start < _length && (start + withLength) <= _length) | |
651 | return (void *)(address + start); | |
1c79356b A |
652 | return 0; |
653 | } | |
654 | ||
b0d623f7 A |
655 | #ifndef __LP64__ |
656 | void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
657 | IOByteCount * lengthOfSegment) | |
0c530ab8 A |
658 | { |
659 | void * bytes = getBytesNoCopy(offset, 0); | |
660 | ||
661 | if (bytes && lengthOfSegment) | |
662 | *lengthOfSegment = _length - offset; | |
663 | ||
664 | return bytes; | |
665 | } | |
b0d623f7 | 666 | #endif /* !__LP64__ */ |
0c530ab8 | 667 | |
b0d623f7 A |
668 | #ifdef __LP64__ |
669 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
670 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
671 | #else /* !__LP64__ */ | |
9bccf70c | 672 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 673 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
b0d623f7 | 674 | #endif /* !__LP64__ */ |
1c79356b A |
675 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
676 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
677 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
678 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
679 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
680 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
681 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
682 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
683 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
684 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
685 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
686 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
687 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
688 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |