]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOInterleavedMemoryDescriptor.cpp
xnu-1456.1.26.tar.gz
[apple/xnu.git] / iokit / Kernel / IOInterleavedMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/IOLib.h>
30 #include <IOKit/IOInterleavedMemoryDescriptor.h>
31
32 #define super IOMemoryDescriptor
33 OSDefineMetaClassAndStructors(IOInterleavedMemoryDescriptor, IOMemoryDescriptor)
34
35 IOInterleavedMemoryDescriptor * IOInterleavedMemoryDescriptor::withCapacity(
36 IOByteCount capacity,
37 IODirection direction )
38 {
39 //
40 // Create a new IOInterleavedMemoryDescriptor. The "buffer" will be made up
41 // of several memory descriptors, that are to be chained end-to-end to make up
42 // a single memory descriptor.
43 //
44
45 IOInterleavedMemoryDescriptor * me = new IOInterleavedMemoryDescriptor;
46
47 if ( me && !me->initWithCapacity(
48 /* capacity */ capacity,
49 /* direction */ direction ))
50 {
51 me->release();
52 me = 0;
53 }
54
55 return me;
56 }
57
58 bool IOInterleavedMemoryDescriptor::initWithCapacity(
59 IOByteCount capacity,
60 IODirection direction )
61 {
62 //
63 // Initialize an IOInterleavedMemoryDescriptor. The "buffer" will be made up
64 // of several memory descriptors, that are to be chained end-to-end to make up
65 // a single memory descriptor.
66 //
67
68 assert(capacity);
69
70 // Ask our superclass' opinion.
71 if ( super::init() == false ) return false;
72
73 // Initialize our minimal state.
74
75 _flags = direction;
76 #ifndef __LP64__
77 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
78 #endif /* !__LP64__ */
79 _length = 0;
80 _mappings = 0;
81 _tag = 0;
82 _descriptorCount = 0;
83 _descriptors = IONew(IOMemoryDescriptor *, capacity);
84 _descriptorOffsets = IONew(IOByteCount, capacity);
85 _descriptorLengths = IONew(IOByteCount, capacity);
86
87 if ( (_descriptors == 0) || (_descriptorOffsets == 0) || (_descriptorLengths == 0) )
88 return false;
89
90 _descriptorCapacity = capacity;
91
92 return true;
93 }
94
95 void IOInterleavedMemoryDescriptor::clearMemoryDescriptors( IODirection direction )
96 {
97 UInt32 index;
98
99 for ( index = 0; index < _descriptorCount; index++ )
100 {
101 if ( _descriptorPrepared )
102 _descriptors[index]->complete(getDirection());
103
104 _descriptors[index]->release();
105 _descriptors[index] = 0;
106
107 _descriptorOffsets[index] = 0;
108 _descriptorLengths[index] = 0;
109 }
110
111 if ( direction != kIODirectionNone )
112 {
113 _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
114 #ifndef __LP64__
115 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
116 #endif /* !__LP64__ */
117 }
118
119 _descriptorCount = 0;
120 _length = 0;
121 _mappings = 0;
122 _tag = 0;
123
124 };
125
126 bool IOInterleavedMemoryDescriptor::setMemoryDescriptor(
127 IOMemoryDescriptor * descriptor,
128 IOByteCount offset,
129 IOByteCount length )
130 {
131 if ( _descriptorPrepared || (_descriptorCount == _descriptorCapacity) )
132 return false;
133
134 if ( (offset + length) > descriptor->getLength() )
135 return false;
136
137 // if ( descriptor->getDirection() != getDirection() )
138 // return false;
139
140 descriptor->retain();
141 _descriptors[_descriptorCount] = descriptor;
142 _descriptorOffsets[_descriptorCount] = offset;
143 _descriptorLengths[_descriptorCount] = length;
144
145 _descriptorCount++;
146
147 _length += length;
148
149 return true;
150 }
151
152 void IOInterleavedMemoryDescriptor::free()
153 {
154 //
155 // Free all of this object's outstanding resources.
156 //
157
158 if ( _descriptors )
159 {
160 for ( unsigned index = 0; index < _descriptorCount; index++ )
161 _descriptors[index]->release();
162
163 if ( _descriptors != 0 )
164 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorCapacity);
165
166 if ( _descriptorOffsets != 0 )
167 IODelete(_descriptorOffsets, IOMemoryDescriptor *, _descriptorCapacity);
168
169 if ( _descriptorLengths != 0 )
170 IODelete(_descriptorLengths, IOMemoryDescriptor *, _descriptorCapacity);
171 }
172
173 super::free();
174 }
175
176 IOReturn IOInterleavedMemoryDescriptor::prepare(IODirection forDirection)
177 {
178 //
179 // Prepare the memory for an I/O transfer.
180 //
181 // This involves paging in the memory and wiring it down for the duration
182 // of the transfer. The complete() method finishes the processing of the
183 // memory after the I/O transfer finishes.
184 //
185
186 unsigned index;
187 IOReturn status = kIOReturnSuccess;
188 IOReturn statusUndo;
189
190 if ( forDirection == kIODirectionNone )
191 {
192 forDirection = getDirection();
193 }
194
195 for ( index = 0; index < _descriptorCount; index++ )
196 {
197 status = _descriptors[index]->prepare(forDirection);
198 if ( status != kIOReturnSuccess ) break;
199 }
200
201 if ( status != kIOReturnSuccess )
202 {
203 for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ )
204 {
205 statusUndo = _descriptors[index]->complete(forDirection);
206 assert(statusUndo == kIOReturnSuccess);
207 }
208 }
209
210 if ( status == kIOReturnSuccess ) _descriptorPrepared = true;
211
212 return status;
213 }
214
215 IOReturn IOInterleavedMemoryDescriptor::complete(IODirection forDirection)
216 {
217 //
218 // Complete processing of the memory after an I/O transfer finishes.
219 //
220 // This method shouldn't be called unless a prepare() was previously issued;
221 // the prepare() and complete() must occur in pairs, before and after an I/O
222 // transfer.
223 //
224
225 IOReturn status;
226 IOReturn statusFinal = kIOReturnSuccess;
227
228 if ( forDirection == kIODirectionNone )
229 {
230 forDirection = getDirection();
231 }
232
233 for ( unsigned index = 0; index < _descriptorCount; index++ )
234 {
235 status = _descriptors[index]->complete(forDirection);
236 if ( status != kIOReturnSuccess ) statusFinal = status;
237 assert(status == kIOReturnSuccess);
238 }
239
240 _descriptorPrepared = false;
241
242 return statusFinal;
243 }
244
245 addr64_t IOInterleavedMemoryDescriptor::getPhysicalSegment(
246 IOByteCount offset,
247 IOByteCount * length,
248 IOOptionBits options )
249 {
250 //
251 // This method returns the physical address of the byte at the given offset
252 // into the memory, and optionally the length of the physically contiguous
253 // segment from that offset.
254 //
255
256 addr64_t pa;
257
258 assert(offset <= _length);
259
260 for ( unsigned index = 0; index < _descriptorCount; index++ )
261 {
262 if ( offset < _descriptorLengths[index] )
263 {
264 pa = _descriptors[index]->getPhysicalSegment(_descriptorOffsets[index] + offset, length, options);
265 if ((_descriptorLengths[index] - offset) < *length) *length = _descriptorLengths[index] - offset;
266 return pa;
267 }
268 offset -= _descriptorLengths[index];
269 }
270
271 if ( length ) *length = 0;
272
273 return 0;
274 }