]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
b0d623f7 A |
28 | |
29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ | |
30 | ||
1c79356b A |
31 | #include <IOKit/assert.h> |
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOLib.h> | |
0c530ab8 | 35 | #include <IOKit/IOMapper.h> |
1c79356b | 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 37 | #include <libkern/OSDebug.h> |
99c3a104 | 38 | #include <mach/mach_vm.h> |
1c79356b | 39 | |
91447636 A |
40 | #include "IOKitKernelInternal.h" |
41 | ||
99c3a104 A |
42 | #ifdef IOALLOCDEBUG |
43 | #include <libkern/c++/OSCPPDebug.h> | |
44 | #endif | |
45 | #include <IOKit/IOStatisticsPrivate.h> | |
46 | ||
47 | #if IOKITSTATS | |
48 | #define IOStatisticsAlloc(type, size) \ | |
49 | do { \ | |
50 | IOStatistics::countAlloc(type, size); \ | |
51 | } while (0) | |
52 | #else | |
53 | #define IOStatisticsAlloc(type, size) | |
54 | #endif /* IOKITSTATS */ | |
55 | ||
56 | ||
1c79356b A |
57 | __BEGIN_DECLS |
58 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 59 | #include <vm/pmap.h> |
1c79356b | 60 | |
55e303ae | 61 | __END_DECLS |
de355530 | 62 | |
0c530ab8 A |
63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
64 | ||
b0d623f7 A |
65 | enum |
66 | { | |
99c3a104 A |
67 | kInternalFlagPhysical = 0x00000001, |
68 | kInternalFlagPageSized = 0x00000002, | |
69 | kInternalFlagPageAllocated = 0x00000004 | |
70 | }; | |
71 | ||
72 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
73 | ||
39236c6e A |
74 | #define super IOGeneralMemoryDescriptor |
75 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
76 | IOGeneralMemoryDescriptor); | |
99c3a104 | 77 | |
39236c6e | 78 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
99c3a104 | 79 | |
39236c6e | 80 | static uintptr_t IOBMDPageProc(iopa_t * a) |
99c3a104 | 81 | { |
39236c6e A |
82 | kern_return_t kr; |
83 | vm_address_t vmaddr = 0; | |
84 | int options = 0; // KMA_LOMEM; | |
99c3a104 | 85 | |
99c3a104 | 86 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
3e170ce0 | 87 | page_size, 0, options, VM_KERN_MEMORY_IOKIT); |
99c3a104 | 88 | |
39236c6e A |
89 | if (KERN_SUCCESS != kr) vmaddr = 0; |
90 | else bzero((void *) vmaddr, page_size); | |
99c3a104 | 91 | |
39236c6e | 92 | return ((uintptr_t) vmaddr); |
99c3a104 | 93 | } |
b0d623f7 | 94 | |
0c530ab8 A |
95 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
96 | ||
b0d623f7 | 97 | #ifndef __LP64__ |
1c79356b A |
98 | bool IOBufferMemoryDescriptor::initWithOptions( |
99 | IOOptionBits options, | |
100 | vm_size_t capacity, | |
9bccf70c A |
101 | vm_offset_t alignment, |
102 | task_t inTask) | |
0c530ab8 A |
103 | { |
104 | mach_vm_address_t physicalMask = 0; | |
105 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); | |
106 | } | |
b0d623f7 | 107 | #endif /* !__LP64__ */ |
0c530ab8 A |
108 | |
109 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( | |
110 | task_t inTask, | |
111 | IOOptionBits options, | |
112 | mach_vm_size_t capacity, | |
113 | mach_vm_address_t alignment, | |
114 | mach_vm_address_t physicalMask) | |
1c79356b | 115 | { |
99c3a104 A |
116 | task_t mapTask = NULL; |
117 | vm_map_t vmmap = NULL; | |
118 | mach_vm_address_t highestMask = 0; | |
119 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; | |
120 | IODMAMapSpecification mapSpec; | |
121 | bool mapped = false; | |
122 | bool needZero; | |
9bccf70c | 123 | |
fe8ab488 | 124 | if (!capacity) return false; |
1c79356b | 125 | |
b0d623f7 A |
126 | _options = options; |
127 | _capacity = capacity; | |
128 | _internalFlags = 0; | |
129 | _internalReserved = 0; | |
130 | _buffer = 0; | |
131 | ||
132 | _ranges.v64 = IONew(IOAddressRange, 1); | |
133 | if (!_ranges.v64) | |
134 | return (false); | |
135 | _ranges.v64->address = 0; | |
136 | _ranges.v64->length = 0; | |
99c3a104 A |
137 | // make sure super::free doesn't dealloc _ranges before super::init |
138 | _flags = kIOMemoryAsReference; | |
1c79356b | 139 | |
c910b4d9 A |
140 | // Grab IOMD bits from the Buffer MD options |
141 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
55e303ae | 142 | |
99c3a104 A |
143 | if (!(kIOMemoryMapperNone & options)) |
144 | { | |
145 | IOMapper::checkForSystemMapper(); | |
146 | mapped = (0 != IOMapper::gSystem); | |
147 | } | |
fe8ab488 | 148 | needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); |
99c3a104 | 149 | |
b0d623f7 A |
150 | if (physicalMask && (alignment <= 1)) |
151 | { | |
152 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); | |
153 | highestMask = (physicalMask | alignment); | |
154 | alignment++; | |
0b4c1975 A |
155 | if (alignment < page_size) |
156 | alignment = page_size; | |
b0d623f7 A |
157 | } |
158 | ||
0b4c1975 | 159 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) |
2d21ac55 | 160 | alignment = page_size; |
9bccf70c | 161 | |
b0d623f7 A |
162 | if (alignment >= page_size) |
163 | capacity = round_page(capacity); | |
164 | ||
165 | if (alignment > page_size) | |
166 | options |= kIOMemoryPhysicallyContiguous; | |
0c530ab8 | 167 | |
1c79356b | 168 | _alignment = alignment; |
91447636 | 169 | |
3e170ce0 A |
170 | if ((capacity + alignment) < _capacity) return (false); |
171 | ||
b0d623f7 | 172 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
2d21ac55 | 173 | return false; |
91447636 | 174 | |
99c3a104 A |
175 | bzero(&mapSpec, sizeof(mapSpec)); |
176 | mapSpec.alignment = _alignment; | |
177 | mapSpec.numAddressBits = 64; | |
178 | if (highestMask && mapped) | |
179 | { | |
180 | if (highestMask <= 0xFFFFFFFF) | |
181 | mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); | |
182 | else | |
183 | mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); | |
184 | highestMask = 0; | |
185 | } | |
186 | ||
fe8ab488 A |
187 | // set memory entry cache mode, pageable, purgeable |
188 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; | |
2d21ac55 A |
189 | if (options & kIOMemoryPageable) |
190 | { | |
191 | iomdOptions |= kIOMemoryBufferPageable; | |
fe8ab488 | 192 | if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; |
9bccf70c | 193 | } |
0c530ab8 | 194 | else |
9bccf70c | 195 | { |
0b4c1975 | 196 | vmmap = kernel_map; |
2d21ac55 | 197 | |
0b4c1975 A |
198 | // Buffer shouldn't auto prepare they should be prepared explicitly |
199 | // But it never was enforced so what are you going to do? | |
200 | iomdOptions |= kIOMemoryAutoPrepare; | |
4452a7af | 201 | |
0b4c1975 | 202 | /* Allocate a wired-down buffer inside kernel space. */ |
b0d623f7 | 203 | |
99c3a104 A |
204 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
205 | ||
206 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) | |
207 | { | |
208 | contig |= (!mapped); | |
209 | contig |= (0 != (kIOMemoryMapperNone & options)); | |
210 | #if 0 | |
211 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now | |
212 | contig |= true; | |
213 | #endif | |
214 | } | |
215 | ||
216 | if (contig || highestMask || (alignment > page_size)) | |
b0d623f7 | 217 | { |
0b4c1975 A |
218 | _internalFlags |= kInternalFlagPhysical; |
219 | if (highestMask) | |
220 | { | |
221 | _internalFlags |= kInternalFlagPageSized; | |
222 | capacity = round_page(capacity); | |
223 | } | |
99c3a104 A |
224 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( |
225 | capacity, highestMask, alignment, contig); | |
226 | } | |
227 | else if (needZero | |
fe8ab488 | 228 | && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) |
99c3a104 A |
229 | { |
230 | _internalFlags |= kInternalFlagPageAllocated; | |
231 | needZero = false; | |
39236c6e A |
232 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); |
233 | if (_buffer) | |
234 | { | |
235 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); | |
236 | #if IOALLOCDEBUG | |
3e170ce0 | 237 | OSAddAtomic(capacity, &debug_iomalloc_size); |
39236c6e A |
238 | #endif |
239 | } | |
b0d623f7 | 240 | } |
0b4c1975 | 241 | else if (alignment > 1) |
0c530ab8 | 242 | { |
0b4c1975 | 243 | _buffer = IOMallocAligned(capacity, alignment); |
0c530ab8 A |
244 | } |
245 | else | |
246 | { | |
0b4c1975 A |
247 | _buffer = IOMalloc(capacity); |
248 | } | |
0b4c1975 A |
249 | if (!_buffer) |
250 | { | |
251 | return false; | |
0c530ab8 | 252 | } |
99c3a104 | 253 | if (needZero) bzero(_buffer, capacity); |
91447636 | 254 | } |
1c79356b | 255 | |
0b4c1975 | 256 | if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { |
b0d623f7 | 257 | vm_size_t size = round_page(capacity); |
2d21ac55 | 258 | |
fe8ab488 A |
259 | // initWithOptions will create memory entry |
260 | iomdOptions |= kIOMemoryPersistent; | |
1c79356b | 261 | |
2d21ac55 A |
262 | if( options & kIOMemoryPageable) { |
263 | #if IOALLOCDEBUG | |
3e170ce0 | 264 | OSAddAtomicLong(size, &debug_iomallocpageable_size); |
2d21ac55 A |
265 | #endif |
266 | mapTask = inTask; | |
267 | if (NULL == inTask) | |
268 | inTask = kernel_task; | |
269 | } | |
270 | else if (options & kIOMapCacheMask) | |
271 | { | |
272 | // Prefetch each page to put entries into the pmap | |
273 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
274 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
275 | ||
276 | while (startAddr < endAddr) | |
277 | { | |
39236c6e A |
278 | UInt8 dummyVar = *startAddr; |
279 | (void) dummyVar; | |
2d21ac55 | 280 | startAddr += page_size; |
99c3a104 | 281 | } |
2d21ac55 A |
282 | } |
283 | } | |
284 | ||
b0d623f7 A |
285 | _ranges.v64->address = (mach_vm_address_t) _buffer;; |
286 | _ranges.v64->length = _capacity; | |
2d21ac55 | 287 | |
b0d623f7 | 288 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
2d21ac55 | 289 | inTask, iomdOptions, /* System mapper */ 0)) |
1c79356b A |
290 | return false; |
291 | ||
99c3a104 A |
292 | // give any system mapper the allocation params |
293 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, | |
294 | &mapSpec, sizeof(mapSpec))) | |
295 | return false; | |
296 | ||
2d21ac55 | 297 | if (mapTask) |
0c530ab8 | 298 | { |
2d21ac55 A |
299 | if (!reserved) { |
300 | reserved = IONew( ExpansionData, 1 ); | |
301 | if( !reserved) | |
302 | return( false ); | |
303 | } | |
b0d623f7 | 304 | reserved->map = createMappingInTask(mapTask, 0, |
fe8ab488 | 305 | kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); |
2d21ac55 | 306 | if (!reserved->map) |
0c530ab8 A |
307 | { |
308 | _buffer = 0; | |
309 | return( false ); | |
310 | } | |
2d21ac55 | 311 | release(); // map took a retain on this |
b0d623f7 A |
312 | reserved->map->retain(); |
313 | removeMapping(reserved->map); | |
2d21ac55 A |
314 | mach_vm_address_t buffer = reserved->map->getAddress(); |
315 | _buffer = (void *) buffer; | |
316 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) | |
317 | _ranges.v64->address = buffer; | |
0c530ab8 A |
318 | } |
319 | ||
b0d623f7 | 320 | setLength(_capacity); |
2d21ac55 | 321 | |
1c79356b A |
322 | return true; |
323 | } | |
324 | ||
9bccf70c A |
325 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
326 | task_t inTask, | |
327 | IOOptionBits options, | |
328 | vm_size_t capacity, | |
55e303ae | 329 | vm_offset_t alignment) |
9bccf70c A |
330 | { |
331 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
332 | ||
b0d623f7 | 333 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { |
4452a7af A |
334 | me->release(); |
335 | me = 0; | |
0c530ab8 A |
336 | } |
337 | return me; | |
338 | } | |
339 | ||
340 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
341 | task_t inTask, | |
342 | IOOptionBits options, | |
343 | mach_vm_size_t capacity, | |
344 | mach_vm_address_t physicalMask) | |
345 | { | |
346 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
347 | ||
348 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
349 | { | |
0c530ab8 A |
350 | me->release(); |
351 | me = 0; | |
9bccf70c A |
352 | } |
353 | return me; | |
354 | } | |
355 | ||
b0d623f7 | 356 | #ifndef __LP64__ |
9bccf70c A |
357 | bool IOBufferMemoryDescriptor::initWithOptions( |
358 | IOOptionBits options, | |
359 | vm_size_t capacity, | |
360 | vm_offset_t alignment) | |
361 | { | |
b0d623f7 | 362 | return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); |
9bccf70c | 363 | } |
b0d623f7 | 364 | #endif /* !__LP64__ */ |
9bccf70c | 365 | |
1c79356b A |
366 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
367 | IOOptionBits options, | |
368 | vm_size_t capacity, | |
55e303ae | 369 | vm_offset_t alignment) |
1c79356b | 370 | { |
b0d623f7 A |
371 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
372 | ||
373 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { | |
b0d623f7 A |
374 | me->release(); |
375 | me = 0; | |
b0d623f7 A |
376 | } |
377 | return me; | |
1c79356b A |
378 | } |
379 | ||
380 | ||
381 | /* | |
382 | * withCapacity: | |
383 | * | |
384 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
385 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
386 | */ | |
387 | IOBufferMemoryDescriptor * | |
388 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
389 | IODirection inDirection, | |
390 | bool inContiguous) | |
391 | { | |
392 | return( IOBufferMemoryDescriptor::withOptions( | |
393 | inDirection | kIOMemoryUnshared | |
394 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
395 | inCapacity, inContiguous ? inCapacity : 1 )); | |
396 | } | |
397 | ||
b0d623f7 | 398 | #ifndef __LP64__ |
1c79356b A |
399 | /* |
400 | * initWithBytes: | |
401 | * | |
402 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
403 | * The descriptor's length and capacity are set to the input buffer's size. | |
404 | */ | |
405 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
406 | vm_size_t inLength, | |
407 | IODirection inDirection, | |
408 | bool inContiguous) | |
409 | { | |
b0d623f7 A |
410 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
411 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
412 | inLength, inLength, (mach_vm_address_t)0)) | |
1c79356b A |
413 | return false; |
414 | ||
415 | // start out with no data | |
416 | setLength(0); | |
417 | ||
418 | if (!appendBytes(inBytes, inLength)) | |
419 | return false; | |
420 | ||
421 | return true; | |
422 | } | |
b0d623f7 | 423 | #endif /* !__LP64__ */ |
1c79356b A |
424 | |
425 | /* | |
426 | * withBytes: | |
427 | * | |
428 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
429 | * The descriptor's length and capacity are set to the input buffer's size. | |
430 | */ | |
431 | IOBufferMemoryDescriptor * | |
432 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
433 | vm_size_t inLength, | |
434 | IODirection inDirection, | |
435 | bool inContiguous) | |
436 | { | |
437 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
438 | ||
b0d623f7 A |
439 | if (me && !me->initWithPhysicalMask( |
440 | kernel_task, inDirection | kIOMemoryUnshared | |
441 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
442 | inLength, inLength, 0 )) | |
0c530ab8 | 443 | { |
0c530ab8 A |
444 | me->release(); |
445 | me = 0; | |
1c79356b | 446 | } |
b0d623f7 A |
447 | |
448 | if (me) | |
449 | { | |
450 | // start out with no data | |
451 | me->setLength(0); | |
452 | ||
453 | if (!me->appendBytes(inBytes, inLength)) | |
454 | { | |
455 | me->release(); | |
456 | me = 0; | |
457 | } | |
458 | } | |
1c79356b A |
459 | return me; |
460 | } | |
461 | ||
462 | /* | |
463 | * free: | |
464 | * | |
465 | * Free resources | |
466 | */ | |
467 | void IOBufferMemoryDescriptor::free() | |
468 | { | |
55e303ae A |
469 | // Cache all of the relevant information on the stack for use |
470 | // after we call super::free()! | |
0b4c1975 A |
471 | IOOptionBits flags = _flags; |
472 | IOOptionBits internalFlags = _internalFlags; | |
0c530ab8 A |
473 | IOOptionBits options = _options; |
474 | vm_size_t size = _capacity; | |
475 | void * buffer = _buffer; | |
2d21ac55 | 476 | IOMemoryMap * map = 0; |
b0d623f7 | 477 | IOAddressRange * range = _ranges.v64; |
0c530ab8 | 478 | vm_offset_t alignment = _alignment; |
1c79356b | 479 | |
b0d623f7 A |
480 | if (alignment >= page_size) |
481 | size = round_page(size); | |
482 | ||
9bccf70c A |
483 | if (reserved) |
484 | { | |
2d21ac55 | 485 | map = reserved->map; |
9bccf70c | 486 | IODelete( reserved, ExpansionData, 1 ); |
2d21ac55 A |
487 | if (map) |
488 | map->release(); | |
9bccf70c A |
489 | } |
490 | ||
1c79356b A |
491 | /* super::free may unwire - deallocate buffer afterwards */ |
492 | super::free(); | |
493 | ||
91447636 | 494 | if (options & kIOMemoryPageable) |
9bccf70c | 495 | { |
91447636 | 496 | #if IOALLOCDEBUG |
3e170ce0 | 497 | OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size); |
91447636 | 498 | #endif |
1c79356b | 499 | } |
91447636 A |
500 | else if (buffer) |
501 | { | |
99c3a104 A |
502 | if (kInternalFlagPageSized & internalFlags) size = round_page(size); |
503 | ||
504 | if (kInternalFlagPhysical & internalFlags) | |
0b4c1975 | 505 | { |
0b4c1975 | 506 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); |
99c3a104 A |
507 | } |
508 | else if (kInternalFlagPageAllocated & internalFlags) | |
509 | { | |
39236c6e A |
510 | uintptr_t page; |
511 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); | |
512 | if (page) | |
513 | { | |
514 | kmem_free(kernel_map, page, page_size); | |
515 | } | |
516 | #if IOALLOCDEBUG | |
3e170ce0 | 517 | OSAddAtomic(-size, &debug_iomalloc_size); |
39236c6e A |
518 | #endif |
519 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); | |
99c3a104 | 520 | } |
91447636 | 521 | else if (alignment > 1) |
99c3a104 | 522 | { |
91447636 | 523 | IOFreeAligned(buffer, size); |
99c3a104 | 524 | } |
91447636 | 525 | else |
99c3a104 | 526 | { |
91447636 | 527 | IOFree(buffer, size); |
99c3a104 | 528 | } |
91447636 | 529 | } |
b0d623f7 A |
530 | if (range && (kIOMemoryAsReference & flags)) |
531 | IODelete(range, IOAddressRange, 1); | |
1c79356b A |
532 | } |
533 | ||
534 | /* | |
535 | * getCapacity: | |
536 | * | |
537 | * Get the buffer capacity | |
538 | */ | |
539 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
540 | { | |
541 | return _capacity; | |
542 | } | |
543 | ||
544 | /* | |
545 | * setLength: | |
546 | * | |
547 | * Change the buffer length of the memory descriptor. When a new buffer | |
548 | * is created, the initial length of the buffer is set to be the same as | |
549 | * the capacity. The length can be adjusted via setLength for a shorter | |
550 | * transfer (there is no need to create more buffer descriptors when you | |
551 | * can reuse an existing one, even for different transfer sizes). Note | |
552 | * that the specified length must not exceed the capacity of the buffer. | |
553 | */ | |
554 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
555 | { | |
556 | assert(length <= _capacity); | |
c7d2c2c6 | 557 | if (length > _capacity) return; |
1c79356b A |
558 | |
559 | _length = length; | |
2d21ac55 | 560 | _ranges.v64->length = length; |
1c79356b A |
561 | } |
562 | ||
563 | /* | |
564 | * setDirection: | |
565 | * | |
566 | * Change the direction of the transfer. This method allows one to redirect | |
567 | * the descriptor's transfer direction. This eliminates the need to destroy | |
568 | * and create new buffers when different transfer directions are needed. | |
569 | */ | |
570 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
571 | { | |
b0d623f7 A |
572 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
573 | #ifndef __LP64__ | |
574 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
575 | #endif /* !__LP64__ */ | |
1c79356b A |
576 | } |
577 | ||
578 | /* | |
579 | * appendBytes: | |
580 | * | |
581 | * Add some data to the end of the buffer. This method automatically | |
582 | * maintains the memory descriptor buffer length. Note that appendBytes | |
583 | * will not copy past the end of the memory descriptor's current capacity. | |
584 | */ | |
585 | bool | |
586 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
587 | { | |
0c530ab8 A |
588 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
589 | IOByteCount offset; | |
1c79356b A |
590 | |
591 | assert(_length <= _capacity); | |
0c530ab8 A |
592 | |
593 | offset = _length; | |
1c79356b | 594 | _length += actualBytesToCopy; |
2d21ac55 | 595 | _ranges.v64->length += actualBytesToCopy; |
1c79356b | 596 | |
0c530ab8 | 597 | if (_task == kernel_task) |
2d21ac55 | 598 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
0c530ab8 A |
599 | actualBytesToCopy); |
600 | else | |
601 | writeBytes(offset, bytes, actualBytesToCopy); | |
602 | ||
1c79356b A |
603 | return true; |
604 | } | |
605 | ||
606 | /* | |
607 | * getBytesNoCopy: | |
608 | * | |
609 | * Return the virtual address of the beginning of the buffer | |
610 | */ | |
611 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
612 | { | |
2d21ac55 | 613 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
614 | return _buffer; |
615 | else | |
2d21ac55 | 616 | return (void *)_ranges.v64->address; |
1c79356b A |
617 | } |
618 | ||
0c530ab8 | 619 | |
1c79356b A |
620 | /* |
621 | * getBytesNoCopy: | |
622 | * | |
623 | * Return the virtual address of an offset from the beginning of the buffer | |
624 | */ | |
625 | void * | |
626 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
627 | { | |
0c530ab8 | 628 | IOVirtualAddress address; |
2d21ac55 | 629 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
630 | address = (IOVirtualAddress) _buffer; |
631 | else | |
2d21ac55 | 632 | address = _ranges.v64->address; |
0c530ab8 A |
633 | |
634 | if (start < _length && (start + withLength) <= _length) | |
635 | return (void *)(address + start); | |
1c79356b A |
636 | return 0; |
637 | } | |
638 | ||
b0d623f7 A |
639 | #ifndef __LP64__ |
640 | void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
641 | IOByteCount * lengthOfSegment) | |
0c530ab8 A |
642 | { |
643 | void * bytes = getBytesNoCopy(offset, 0); | |
644 | ||
645 | if (bytes && lengthOfSegment) | |
646 | *lengthOfSegment = _length - offset; | |
647 | ||
648 | return bytes; | |
649 | } | |
b0d623f7 | 650 | #endif /* !__LP64__ */ |
0c530ab8 | 651 | |
b0d623f7 A |
652 | #ifdef __LP64__ |
653 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); | |
654 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); | |
655 | #else /* !__LP64__ */ | |
9bccf70c | 656 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 657 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
b0d623f7 | 658 | #endif /* !__LP64__ */ |
1c79356b A |
659 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
660 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
661 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
662 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
663 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
664 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
665 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
666 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
667 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
668 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
669 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
670 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
671 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
672 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |