]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | #include <IOKit/assert.h> | |
29 | #include <IOKit/system.h> | |
30 | ||
31 | #include <IOKit/IOLib.h> | |
0c530ab8 | 32 | #include <IOKit/IOMapper.h> |
1c79356b | 33 | #include <IOKit/IOBufferMemoryDescriptor.h> |
c910b4d9 | 34 | #include <libkern/OSDebug.h> |
1c79356b | 35 | |
91447636 | 36 | #include "IOKitKernelInternal.h" |
0c530ab8 | 37 | #include "IOCopyMapper.h" |
91447636 | 38 | |
1c79356b A |
39 | __BEGIN_DECLS |
40 | void ipc_port_release_send(ipc_port_t port); | |
9bccf70c | 41 | #include <vm/pmap.h> |
1c79356b | 42 | |
55e303ae A |
43 | vm_map_t IOPageableMapForAddress( vm_address_t address ); |
44 | __END_DECLS | |
de355530 | 45 | |
0c530ab8 A |
46 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
47 | ||
48 | volatile ppnum_t gIOHighestAllocatedPage; | |
49 | ||
50 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
51 | ||
1c79356b A |
52 | #define super IOGeneralMemoryDescriptor |
53 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, | |
54 | IOGeneralMemoryDescriptor); | |
55 | ||
56 | bool IOBufferMemoryDescriptor::initWithAddress( | |
57 | void * /* address */ , | |
58 | IOByteCount /* withLength */ , | |
59 | IODirection /* withDirection */ ) | |
60 | { | |
61 | return false; | |
62 | } | |
63 | ||
64 | bool IOBufferMemoryDescriptor::initWithAddress( | |
65 | vm_address_t /* address */ , | |
66 | IOByteCount /* withLength */ , | |
67 | IODirection /* withDirection */ , | |
68 | task_t /* withTask */ ) | |
69 | { | |
70 | return false; | |
71 | } | |
72 | ||
73 | bool IOBufferMemoryDescriptor::initWithPhysicalAddress( | |
74 | IOPhysicalAddress /* address */ , | |
75 | IOByteCount /* withLength */ , | |
76 | IODirection /* withDirection */ ) | |
77 | { | |
78 | return false; | |
79 | } | |
80 | ||
81 | bool IOBufferMemoryDescriptor::initWithPhysicalRanges( | |
82 | IOPhysicalRange * /* ranges */ , | |
83 | UInt32 /* withCount */ , | |
84 | IODirection /* withDirection */ , | |
85 | bool /* asReference */ ) | |
86 | { | |
87 | return false; | |
88 | } | |
89 | ||
90 | bool IOBufferMemoryDescriptor::initWithRanges( | |
91 | IOVirtualRange * /* ranges */ , | |
92 | UInt32 /* withCount */ , | |
93 | IODirection /* withDirection */ , | |
94 | task_t /* withTask */ , | |
95 | bool /* asReference */ ) | |
96 | { | |
97 | return false; | |
98 | } | |
99 | ||
100 | bool IOBufferMemoryDescriptor::initWithOptions( | |
101 | IOOptionBits options, | |
102 | vm_size_t capacity, | |
9bccf70c A |
103 | vm_offset_t alignment, |
104 | task_t inTask) | |
0c530ab8 A |
105 | { |
106 | mach_vm_address_t physicalMask = 0; | |
107 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); | |
108 | } | |
109 | ||
110 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( | |
111 | task_t inTask, | |
112 | IOOptionBits options, | |
113 | mach_vm_size_t capacity, | |
114 | mach_vm_address_t alignment, | |
115 | mach_vm_address_t physicalMask) | |
1c79356b | 116 | { |
91447636 | 117 | kern_return_t kr; |
2d21ac55 A |
118 | task_t mapTask = NULL; |
119 | vm_map_t vmmap = NULL; | |
0c530ab8 | 120 | addr64_t lastIOAddr; |
2d21ac55 A |
121 | IOAddressRange range; |
122 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64; | |
9bccf70c | 123 | |
1c79356b A |
124 | if (!capacity) |
125 | return false; | |
126 | ||
127 | _options = options; | |
128 | _capacity = capacity; | |
129 | _physAddrs = 0; | |
130 | _physSegCount = 0; | |
131 | _buffer = 0; | |
2d21ac55 A |
132 | range.address = 0; |
133 | range.length = 0; | |
134 | _ranges.v64 = ⦥ | |
1c79356b | 135 | |
c910b4d9 A |
136 | // Grab IOMD bits from the Buffer MD options |
137 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); | |
55e303ae | 138 | |
2d21ac55 A |
139 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size)) |
140 | alignment = page_size; | |
9bccf70c | 141 | |
0c530ab8 A |
142 | if (physicalMask && (alignment <= 1)) |
143 | alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1; | |
144 | ||
1c79356b | 145 | _alignment = alignment; |
91447636 | 146 | |
2d21ac55 A |
147 | if (((inTask != kernel_task) && !(options & kIOMemoryPageable)) || |
148 | (physicalMask && (options & kIOMapCacheMask))) | |
149 | return false; | |
91447636 | 150 | |
2d21ac55 A |
151 | if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask) |
152 | physicalMask = 0xFFFFFFFF; | |
91447636 | 153 | |
2d21ac55 A |
154 | // set flags for entry + object create |
155 | vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; | |
91447636 | 156 | |
2d21ac55 A |
157 | // set memory entry cache mode |
158 | switch (options & kIOMapCacheMask) | |
159 | { | |
160 | case kIOMapInhibitCache: | |
161 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
162 | break; | |
163 | ||
164 | case kIOMapWriteThruCache: | |
165 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
166 | break; | |
167 | ||
168 | case kIOMapWriteCombineCache: | |
169 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
170 | break; | |
171 | ||
172 | case kIOMapCopybackCache: | |
173 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
174 | break; | |
175 | ||
176 | case kIOMapDefaultCache: | |
177 | default: | |
178 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
179 | break; | |
180 | } | |
91447636 | 181 | |
2d21ac55 A |
182 | if (options & kIOMemoryPageable) |
183 | { | |
184 | iomdOptions |= kIOMemoryBufferPageable; | |
91447636 | 185 | |
2d21ac55 | 186 | // must create the entry before any pages are allocated |
91447636 | 187 | |
2d21ac55 A |
188 | // set flags for entry + object create |
189 | memEntryCacheMode |= MAP_MEM_NAMED_CREATE; | |
91447636 | 190 | |
2d21ac55 A |
191 | if (options & kIOMemoryPurgeable) |
192 | memEntryCacheMode |= MAP_MEM_PURGABLE; | |
9bccf70c | 193 | } |
0c530ab8 | 194 | else |
9bccf70c | 195 | { |
2d21ac55 A |
196 | memEntryCacheMode |= MAP_MEM_NAMED_REUSE; |
197 | ||
0c530ab8 A |
198 | if (IOMapper::gSystem) |
199 | // assuming mapped space is 2G | |
200 | lastIOAddr = (1UL << 31) - PAGE_SIZE; | |
9bccf70c | 201 | else |
0c530ab8 | 202 | lastIOAddr = ptoa_64(gIOHighestAllocatedPage); |
4452a7af | 203 | |
0c530ab8 A |
204 | if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask))) |
205 | { | |
206 | mach_vm_address_t address; | |
2d21ac55 A |
207 | iomdOptions &= ~kIOMemoryTypeVirtual64; |
208 | iomdOptions |= kIOMemoryTypePhysical64; | |
0c530ab8 A |
209 | |
210 | address = IOMallocPhysical(capacity, physicalMask); | |
211 | _buffer = (void *) address; | |
212 | if (!_buffer) | |
213 | return false; | |
214 | ||
2d21ac55 | 215 | mapTask = inTask; |
0c530ab8 A |
216 | inTask = 0; |
217 | } | |
218 | else | |
219 | { | |
2d21ac55 A |
220 | vmmap = kernel_map; |
221 | ||
0c530ab8 A |
222 | // Buffer shouldn't auto prepare they should be prepared explicitly |
223 | // But it never was enforced so what are you going to do? | |
224 | iomdOptions |= kIOMemoryAutoPrepare; | |
225 | ||
226 | /* Allocate a wired-down buffer inside kernel space. */ | |
227 | if (options & kIOMemoryPhysicallyContiguous) | |
228 | _buffer = (void *) IOKernelAllocateContiguous(capacity, alignment); | |
229 | else if (alignment > 1) | |
230 | _buffer = IOMallocAligned(capacity, alignment); | |
231 | else | |
232 | _buffer = IOMalloc(capacity); | |
233 | if (!_buffer) | |
234 | return false; | |
235 | } | |
91447636 | 236 | } |
1c79356b | 237 | |
2d21ac55 A |
238 | if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions)) |
239 | && (options & (kIOMemoryPageable | kIOMapCacheMask))) { | |
240 | ipc_port_t sharedMem; | |
241 | vm_size_t size = round_page_32(capacity); | |
242 | ||
243 | kr = mach_make_memory_entry(vmmap, | |
244 | &size, (vm_offset_t)_buffer, | |
245 | memEntryCacheMode, &sharedMem, | |
246 | NULL ); | |
247 | ||
248 | if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) { | |
249 | ipc_port_release_send( sharedMem ); | |
250 | kr = kIOReturnVMError; | |
251 | } | |
252 | if( KERN_SUCCESS != kr) | |
253 | return( false ); | |
254 | ||
255 | _memEntry = (void *) sharedMem; | |
1c79356b | 256 | |
2d21ac55 A |
257 | if( options & kIOMemoryPageable) { |
258 | #if IOALLOCDEBUG | |
259 | debug_iomallocpageable_size += size; | |
260 | #endif | |
261 | mapTask = inTask; | |
262 | if (NULL == inTask) | |
263 | inTask = kernel_task; | |
264 | } | |
265 | else if (options & kIOMapCacheMask) | |
266 | { | |
267 | // Prefetch each page to put entries into the pmap | |
268 | volatile UInt8 * startAddr = (UInt8 *)_buffer; | |
269 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; | |
270 | ||
271 | while (startAddr < endAddr) | |
272 | { | |
273 | *startAddr; | |
274 | startAddr += page_size; | |
275 | } | |
276 | } | |
277 | } | |
278 | ||
279 | range.address = (mach_vm_address_t) _buffer; | |
280 | range.length = capacity; | |
281 | ||
282 | if (!super::initWithOptions(&range, 1, 0, | |
283 | inTask, iomdOptions, /* System mapper */ 0)) | |
1c79356b A |
284 | return false; |
285 | ||
0c530ab8 | 286 | if (physicalMask && !IOMapper::gSystem) |
91447636 | 287 | { |
0c530ab8 | 288 | IOMDDMACharacteristics mdSummary; |
4452a7af | 289 | |
0c530ab8 A |
290 | bzero(&mdSummary, sizeof(mdSummary)); |
291 | IOReturn rtn = dmaCommandOperation( | |
292 | kIOMDGetCharacteristics, | |
293 | &mdSummary, sizeof(mdSummary)); | |
294 | if (rtn) | |
295 | return false; | |
296 | ||
297 | if (mdSummary.fHighestPage) | |
c0fea474 | 298 | { |
0c530ab8 A |
299 | ppnum_t highest; |
300 | while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage)) | |
c0fea474 | 301 | { |
0c530ab8 A |
302 | if (OSCompareAndSwap(highest, mdSummary.fHighestPage, |
303 | (UInt32 *) &gIOHighestAllocatedPage)) | |
304 | break; | |
c0fea474 | 305 | } |
0c530ab8 A |
306 | lastIOAddr = ptoa_64(mdSummary.fHighestPage); |
307 | } | |
308 | else | |
309 | lastIOAddr = ptoa_64(gIOLastPage); | |
310 | ||
311 | if (lastIOAddr != (lastIOAddr & physicalMask)) | |
312 | { | |
2d21ac55 | 313 | if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
314 | { |
315 | // flag a retry | |
316 | _physSegCount = 1; | |
317 | } | |
318 | return false; | |
6601e61a | 319 | } |
4452a7af A |
320 | } |
321 | ||
2d21ac55 | 322 | if (mapTask) |
0c530ab8 | 323 | { |
2d21ac55 A |
324 | if (!reserved) { |
325 | reserved = IONew( ExpansionData, 1 ); | |
326 | if( !reserved) | |
327 | return( false ); | |
328 | } | |
329 | reserved->map = map(mapTask, 0, kIOMapAnywhere, 0, 0); | |
330 | if (!reserved->map) | |
0c530ab8 A |
331 | { |
332 | _buffer = 0; | |
333 | return( false ); | |
334 | } | |
2d21ac55 A |
335 | release(); // map took a retain on this |
336 | mach_vm_address_t buffer = reserved->map->getAddress(); | |
337 | _buffer = (void *) buffer; | |
338 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) | |
339 | _ranges.v64->address = buffer; | |
0c530ab8 A |
340 | } |
341 | ||
1c79356b | 342 | setLength(capacity); |
2d21ac55 | 343 | |
1c79356b A |
344 | return true; |
345 | } | |
346 | ||
9bccf70c A |
347 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
348 | task_t inTask, | |
349 | IOOptionBits options, | |
350 | vm_size_t capacity, | |
55e303ae | 351 | vm_offset_t alignment) |
9bccf70c A |
352 | { |
353 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
354 | ||
355 | if (me && !me->initWithOptions(options, capacity, alignment, inTask)) { | |
0c530ab8 | 356 | bool retry = me->_physSegCount; |
4452a7af A |
357 | me->release(); |
358 | me = 0; | |
0c530ab8 A |
359 | if (retry) |
360 | { | |
361 | me = new IOBufferMemoryDescriptor; | |
362 | if (me && !me->initWithOptions(options, capacity, alignment, inTask)) | |
363 | { | |
364 | me->release(); | |
365 | me = 0; | |
366 | } | |
367 | } | |
368 | } | |
369 | return me; | |
370 | } | |
371 | ||
372 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( | |
373 | task_t inTask, | |
374 | IOOptionBits options, | |
375 | mach_vm_size_t capacity, | |
376 | mach_vm_address_t physicalMask) | |
377 | { | |
378 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
379 | ||
380 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
381 | { | |
382 | bool retry = me->_physSegCount; | |
383 | me->release(); | |
384 | me = 0; | |
385 | if (retry) | |
386 | { | |
387 | me = new IOBufferMemoryDescriptor; | |
388 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) | |
389 | { | |
390 | me->release(); | |
391 | me = 0; | |
392 | } | |
393 | } | |
9bccf70c A |
394 | } |
395 | return me; | |
396 | } | |
397 | ||
398 | bool IOBufferMemoryDescriptor::initWithOptions( | |
399 | IOOptionBits options, | |
400 | vm_size_t capacity, | |
401 | vm_offset_t alignment) | |
402 | { | |
403 | return( initWithOptions(options, capacity, alignment, kernel_task) ); | |
404 | } | |
405 | ||
1c79356b A |
406 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
407 | IOOptionBits options, | |
408 | vm_size_t capacity, | |
55e303ae | 409 | vm_offset_t alignment) |
1c79356b | 410 | { |
0c530ab8 | 411 | return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment)); |
1c79356b A |
412 | } |
413 | ||
414 | ||
415 | /* | |
416 | * withCapacity: | |
417 | * | |
418 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to | |
419 | * hold capacity bytes. The descriptor's length is initially set to the capacity. | |
420 | */ | |
421 | IOBufferMemoryDescriptor * | |
422 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, | |
423 | IODirection inDirection, | |
424 | bool inContiguous) | |
425 | { | |
426 | return( IOBufferMemoryDescriptor::withOptions( | |
427 | inDirection | kIOMemoryUnshared | |
428 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
429 | inCapacity, inContiguous ? inCapacity : 1 )); | |
430 | } | |
431 | ||
432 | /* | |
433 | * initWithBytes: | |
434 | * | |
435 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
436 | * The descriptor's length and capacity are set to the input buffer's size. | |
437 | */ | |
438 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, | |
439 | vm_size_t inLength, | |
440 | IODirection inDirection, | |
441 | bool inContiguous) | |
442 | { | |
443 | if (!initWithOptions( | |
444 | inDirection | kIOMemoryUnshared | |
445 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), | |
446 | inLength, inLength )) | |
447 | return false; | |
448 | ||
449 | // start out with no data | |
450 | setLength(0); | |
451 | ||
452 | if (!appendBytes(inBytes, inLength)) | |
453 | return false; | |
454 | ||
455 | return true; | |
456 | } | |
457 | ||
458 | /* | |
459 | * withBytes: | |
460 | * | |
461 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). | |
462 | * The descriptor's length and capacity are set to the input buffer's size. | |
463 | */ | |
464 | IOBufferMemoryDescriptor * | |
465 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, | |
466 | vm_size_t inLength, | |
467 | IODirection inDirection, | |
468 | bool inContiguous) | |
469 | { | |
470 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; | |
471 | ||
0c530ab8 A |
472 | if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)) |
473 | { | |
474 | bool retry = me->_physSegCount; | |
475 | me->release(); | |
476 | me = 0; | |
477 | if (retry) | |
478 | { | |
479 | me = new IOBufferMemoryDescriptor; | |
480 | if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)) | |
481 | { | |
482 | me->release(); | |
483 | me = 0; | |
484 | } | |
485 | } | |
486 | ||
1c79356b A |
487 | } |
488 | return me; | |
489 | } | |
490 | ||
491 | /* | |
492 | * free: | |
493 | * | |
494 | * Free resources | |
495 | */ | |
496 | void IOBufferMemoryDescriptor::free() | |
497 | { | |
55e303ae A |
498 | // Cache all of the relevant information on the stack for use |
499 | // after we call super::free()! | |
0c530ab8 A |
500 | IOOptionBits flags = _flags; |
501 | IOOptionBits options = _options; | |
502 | vm_size_t size = _capacity; | |
503 | void * buffer = _buffer; | |
4a3eedf9 | 504 | mach_vm_address_t source = (_ranges.v) ? _ranges.v64->address : 0; |
2d21ac55 | 505 | IOMemoryMap * map = 0; |
0c530ab8 | 506 | vm_offset_t alignment = _alignment; |
1c79356b | 507 | |
9bccf70c A |
508 | if (reserved) |
509 | { | |
2d21ac55 | 510 | map = reserved->map; |
9bccf70c | 511 | IODelete( reserved, ExpansionData, 1 ); |
2d21ac55 A |
512 | if (map) |
513 | map->release(); | |
9bccf70c A |
514 | } |
515 | ||
1c79356b A |
516 | /* super::free may unwire - deallocate buffer afterwards */ |
517 | super::free(); | |
518 | ||
91447636 | 519 | if (options & kIOMemoryPageable) |
9bccf70c | 520 | { |
91447636 | 521 | #if IOALLOCDEBUG |
2d21ac55 | 522 | debug_iomallocpageable_size -= round_page_32(size); |
91447636 | 523 | #endif |
1c79356b | 524 | } |
91447636 A |
525 | else if (buffer) |
526 | { | |
2d21ac55 | 527 | if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask)) |
4a3eedf9 | 528 | IOFreePhysical(source, size); |
0c530ab8 A |
529 | else if (options & kIOMemoryPhysicallyContiguous) |
530 | IOKernelFreeContiguous((mach_vm_address_t) buffer, size); | |
91447636 A |
531 | else if (alignment > 1) |
532 | IOFreeAligned(buffer, size); | |
533 | else | |
534 | IOFree(buffer, size); | |
535 | } | |
1c79356b A |
536 | } |
537 | ||
538 | /* | |
539 | * getCapacity: | |
540 | * | |
541 | * Get the buffer capacity | |
542 | */ | |
543 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const | |
544 | { | |
545 | return _capacity; | |
546 | } | |
547 | ||
548 | /* | |
549 | * setLength: | |
550 | * | |
551 | * Change the buffer length of the memory descriptor. When a new buffer | |
552 | * is created, the initial length of the buffer is set to be the same as | |
553 | * the capacity. The length can be adjusted via setLength for a shorter | |
554 | * transfer (there is no need to create more buffer descriptors when you | |
555 | * can reuse an existing one, even for different transfer sizes). Note | |
556 | * that the specified length must not exceed the capacity of the buffer. | |
557 | */ | |
558 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) | |
559 | { | |
560 | assert(length <= _capacity); | |
561 | ||
562 | _length = length; | |
2d21ac55 | 563 | _ranges.v64->length = length; |
1c79356b A |
564 | } |
565 | ||
566 | /* | |
567 | * setDirection: | |
568 | * | |
569 | * Change the direction of the transfer. This method allows one to redirect | |
570 | * the descriptor's transfer direction. This eliminates the need to destroy | |
571 | * and create new buffers when different transfer directions are needed. | |
572 | */ | |
573 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) | |
574 | { | |
575 | _direction = direction; | |
576 | } | |
577 | ||
578 | /* | |
579 | * appendBytes: | |
580 | * | |
581 | * Add some data to the end of the buffer. This method automatically | |
582 | * maintains the memory descriptor buffer length. Note that appendBytes | |
583 | * will not copy past the end of the memory descriptor's current capacity. | |
584 | */ | |
585 | bool | |
586 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) | |
587 | { | |
0c530ab8 A |
588 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
589 | IOByteCount offset; | |
1c79356b A |
590 | |
591 | assert(_length <= _capacity); | |
0c530ab8 A |
592 | |
593 | offset = _length; | |
1c79356b | 594 | _length += actualBytesToCopy; |
2d21ac55 | 595 | _ranges.v64->length += actualBytesToCopy; |
1c79356b | 596 | |
0c530ab8 | 597 | if (_task == kernel_task) |
2d21ac55 | 598 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
0c530ab8 A |
599 | actualBytesToCopy); |
600 | else | |
601 | writeBytes(offset, bytes, actualBytesToCopy); | |
602 | ||
1c79356b A |
603 | return true; |
604 | } | |
605 | ||
606 | /* | |
607 | * getBytesNoCopy: | |
608 | * | |
609 | * Return the virtual address of the beginning of the buffer | |
610 | */ | |
611 | void * IOBufferMemoryDescriptor::getBytesNoCopy() | |
612 | { | |
2d21ac55 | 613 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
614 | return _buffer; |
615 | else | |
2d21ac55 | 616 | return (void *)_ranges.v64->address; |
1c79356b A |
617 | } |
618 | ||
0c530ab8 | 619 | |
1c79356b A |
620 | /* |
621 | * getBytesNoCopy: | |
622 | * | |
623 | * Return the virtual address of an offset from the beginning of the buffer | |
624 | */ | |
625 | void * | |
626 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) | |
627 | { | |
0c530ab8 | 628 | IOVirtualAddress address; |
2d21ac55 | 629 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
0c530ab8 A |
630 | address = (IOVirtualAddress) _buffer; |
631 | else | |
2d21ac55 | 632 | address = _ranges.v64->address; |
0c530ab8 A |
633 | |
634 | if (start < _length && (start + withLength) <= _length) | |
635 | return (void *)(address + start); | |
1c79356b A |
636 | return 0; |
637 | } | |
638 | ||
0c530ab8 A |
639 | /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
640 | /* DEPRECATED */ IOByteCount * lengthOfSegment) | |
641 | { | |
642 | void * bytes = getBytesNoCopy(offset, 0); | |
643 | ||
644 | if (bytes && lengthOfSegment) | |
645 | *lengthOfSegment = _length - offset; | |
646 | ||
647 | return bytes; | |
648 | } | |
649 | ||
9bccf70c | 650 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
0c530ab8 | 651 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
1c79356b A |
652 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
653 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); | |
654 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); | |
655 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); | |
656 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); | |
657 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); | |
658 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); | |
659 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); | |
660 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); | |
661 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); | |
662 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); | |
663 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); | |
664 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); | |
665 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |