]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* IOMbufMemoryCursor.cpp created by gvdl on 1999-1-20 */ | |
23 | ||
24 | #include <sys/cdefs.h> | |
25 | ||
26 | __BEGIN_DECLS | |
27 | #include <IOKit/assert.h> | |
28 | ||
29 | #include <sys/param.h> | |
30 | #include <sys/mbuf.h> | |
31 | #include <architecture/byte_order.h> | |
32 | __END_DECLS | |
33 | ||
34 | #include <IOKit/network/IOMbufMemoryCursor.h> | |
35 | #include <IOKit/IOLib.h> | |
36 | ||
37 | #ifndef MIN | |
38 | #define MIN(a,b) (((a)<(b))?(a):(b)) | |
39 | #endif /* MIN */ | |
40 | ||
41 | #define next_page(x) trunc_page(x + PAGE_SIZE) | |
42 | ||
43 | /* Define the meta class stuff for the entire file here */ | |
44 | OSDefineMetaClassAndAbstractStructors(IOMbufMemoryCursor, IOMemoryCursor) | |
45 | OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 0); | |
46 | OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 1); | |
47 | OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 2); | |
48 | OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 3); | |
49 | ||
50 | OSDefineMetaClassAndStructors(IOMbufNaturalMemoryCursor, IOMbufMemoryCursor) | |
51 | OSDefineMetaClassAndStructors(IOMbufBigMemoryCursor, IOMbufMemoryCursor) | |
52 | OSDefineMetaClassAndStructors(IOMbufLittleMemoryCursor, IOMbufMemoryCursor) | |
53 | ||
54 | #ifdef __ppc__ | |
55 | OSDefineMetaClassAndStructors(IOMbufDBDMAMemoryCursor, IOMbufMemoryCursor) | |
56 | #endif /* __ppc__ */ | |
57 | ||
58 | /*********************** class IOMbufMemoryCursor ***********************/ | |
59 | #define super IOMemoryCursor | |
60 | ||
61 | bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc outSeg, | |
62 | UInt32 maxSegmentSize, | |
63 | UInt32 maxTransferSize, | |
64 | UInt32 align) | |
65 | { | |
66 | return false; | |
67 | } | |
68 | ||
69 | bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc inOutSeg, | |
70 | UInt32 inMaxSegmentSize, | |
71 | UInt32 inMaxNumSegments) | |
72 | { | |
73 | if (!super::initWithSpecification(inOutSeg, inMaxSegmentSize, 0, 1)) | |
74 | return false; | |
75 | ||
76 | #if 0 | |
77 | // It is too confusing to force the max segment size to be at least | |
78 | // as large as a page. Most Enet devices only have 11-12 bit fields, | |
79 | // enough for a full size frame, and also the PAGE_SIZE parameter | |
80 | // may be architecture dependent. | |
81 | ||
82 | assert(inMaxSegmentSize >= PAGE_SIZE); | |
83 | if (inMaxSegmentSize < PAGE_SIZE) | |
84 | return false; | |
85 | #else | |
86 | if (!inMaxSegmentSize) | |
87 | return false; | |
88 | #endif | |
89 | ||
90 | maxSegmentSize = MIN(maxSegmentSize, PAGE_SIZE); | |
91 | maxNumSegments = inMaxNumSegments; | |
92 | coalesceCount = 0; | |
93 | ||
94 | return true; | |
95 | } | |
96 | ||
97 | // | |
98 | // Copy the src packet into the destination packet. The amount to copy is | |
99 | // determined by the dstm->m_len, which is setup by analyseSegments, see below. | |
100 | // The source mbuf is not freed nor modified. | |
101 | // | |
102 | #define BCOPY(s, d, l) do { bcopy((void *) s, (void *) d, l); } while(0) | |
103 | ||
104 | static inline void coalesceSegments(struct mbuf *srcm, struct mbuf *dstm) | |
105 | { | |
106 | vm_offset_t src, dst; | |
107 | SInt32 srcLen, dstLen; | |
108 | struct mbuf *temp; | |
109 | ||
110 | srcLen = srcm->m_len; | |
111 | src = mtod(srcm, vm_offset_t); | |
112 | ||
113 | dstLen = dstm->m_len; | |
114 | dst = mtod(dstm, vm_offset_t); | |
115 | ||
116 | for (;;) { | |
117 | if (srcLen < dstLen) { | |
118 | ||
119 | // Copy remainder of src mbuf to current dst. | |
120 | BCOPY(src, dst, srcLen); | |
121 | dst += srcLen; | |
122 | dstLen -= srcLen; | |
123 | ||
124 | // Move on to the next source mbuf. | |
125 | temp = srcm->m_next; assert(temp); | |
126 | srcm = temp; | |
127 | ||
128 | srcLen = srcm->m_len; | |
129 | src = mtod(srcm, vm_offset_t); | |
130 | } | |
131 | else if (srcLen > dstLen) { | |
132 | ||
133 | // Copy some of src mbuf to remaining space in dst mbuf. | |
134 | BCOPY(src, dst, dstLen); | |
135 | src += dstLen; | |
136 | srcLen -= dstLen; | |
137 | ||
138 | // Move on to the next destination mbuf. | |
139 | temp = dstm->m_next; assert(temp); | |
140 | dstm = temp; | |
141 | ||
142 | dstLen = dstm->m_len; | |
143 | dst = mtod(dstm, vm_offset_t); | |
144 | } | |
145 | else { /* (srcLen == dstLen) */ | |
146 | ||
147 | // copy remainder of src into remaining space of current dst | |
148 | BCOPY(src, dst, srcLen); | |
149 | ||
150 | // Free current mbuf and move the current onto the next | |
151 | srcm = srcm->m_next; | |
152 | ||
153 | // Do we have any data left to copy? | |
154 | if (!dstm->m_next) | |
155 | break; | |
156 | dstm = dstm->m_next; | |
157 | ||
158 | assert(srcm); | |
159 | dstLen = dstm->m_len; | |
160 | dst = mtod(dstm, vm_offset_t); | |
161 | srcLen = srcm->m_len; | |
162 | src = mtod(srcm, vm_offset_t); | |
163 | } | |
164 | } | |
165 | } | |
166 | ||
167 | static const UInt32 kMBufDataCacheSize = 16; | |
168 | ||
169 | static inline bool analyseSegments( | |
170 | struct mbuf *packet, /* input packet mbuf */ | |
171 | const UInt32 mbufsInCache, /* number of entries in segsPerMBuf[] */ | |
172 | const UInt32 segsPerMBuf[], /* segments required per mbuf */ | |
173 | SInt32 numSegs, /* total number of segments */ | |
174 | const UInt32 maxSegs) /* max controller segments per mbuf */ | |
175 | { | |
176 | struct mbuf *newPacket; // output mbuf chain. | |
177 | struct mbuf *out; // current output mbuf link. | |
178 | SInt32 outSize; // size of current output mbuf link. | |
179 | SInt32 outSegs; // segments for current output mbuf link. | |
180 | SInt32 doneSegs; // segments for output mbuf chain. | |
181 | SInt32 outLen; // remaining length of input buffer. | |
182 | ||
183 | struct mbuf *in = packet; // save the original input packet pointer. | |
184 | UInt32 inIndex = 0; | |
185 | ||
186 | // Allocate a mbuf (non header mbuf) to begin the output mbuf chain. | |
187 | // | |
188 | MGET(newPacket, M_DONTWAIT, MT_DATA); | |
189 | if (!newPacket) { | |
190 | IOLog("analyseSegments: MGET() 1 error\n"); | |
191 | return false; | |
192 | } | |
193 | ||
194 | /* Initialise outgoing packet controls */ | |
195 | out = newPacket; | |
196 | outSize = MLEN; | |
197 | doneSegs = outSegs = outLen = 0; | |
198 | ||
199 | // numSegs stores the delta between the total and the max. For each | |
200 | // input mbuf consumed, we decrement numSegs. | |
201 | // | |
202 | numSegs -= maxSegs; | |
203 | ||
204 | // Loop through the input packet mbuf 'in' and construct a new mbuf chain | |
205 | // large enough to make (numSegs + doneSegs + outSegs) less than or | |
206 | // equal to zero. | |
207 | // | |
208 | do { | |
209 | vm_offset_t vmo; | |
210 | ||
211 | outLen += in->m_len; | |
212 | ||
213 | while (outLen > outSize) { | |
214 | // Oh dear the current outgoing length is too big. | |
215 | if (outSize != MCLBYTES) { | |
216 | // Current mbuf is not yet a cluster so promote, then | |
217 | // check for error. | |
218 | ||
219 | MCLGET(out, M_DONTWAIT); | |
220 | if ( !(out->m_flags & M_EXT) ) { | |
221 | IOLog("analyseSegments: MCLGET() error\n"); | |
222 | goto bombAnalysis; | |
223 | } | |
224 | ||
225 | outSize = MCLBYTES; | |
226 | ||
227 | continue; | |
228 | } | |
229 | ||
230 | vmo = mtod(out, vm_offset_t); | |
231 | out->m_len = MCLBYTES; /* Fill in target copy size */ | |
232 | doneSegs += (round_page(vmo + MCLBYTES) - trunc_page(vmo)) | |
233 | / PAGE_SIZE; | |
234 | ||
235 | // If the number of segments of the output chain, plus | |
236 | // the segment for the mbuf we are about to allocate is greater | |
237 | // than maxSegs, then abort. | |
238 | // | |
239 | if (doneSegs + 1 > (int) maxSegs) { | |
240 | IOLog("analyseSegments: maxSegs limit 1 reached! %ld %ld\n", | |
241 | doneSegs, maxSegs); | |
242 | goto bombAnalysis; | |
243 | } | |
244 | ||
245 | MGET(out->m_next, M_DONTWAIT, MT_DATA); | |
246 | if (!out->m_next) { | |
247 | IOLog("analyseSegments: MGET() error\n"); | |
248 | goto bombAnalysis; | |
249 | } | |
250 | ||
251 | out = out->m_next; | |
252 | outSize = MLEN; | |
253 | outLen -= MCLBYTES; | |
254 | } | |
255 | ||
256 | // Compute number of segment in current outgoing mbuf. | |
257 | vmo = mtod(out, vm_offset_t); | |
258 | outSegs = (round_page(vmo + outLen) - trunc_page(vmo)) / PAGE_SIZE; | |
259 | if (doneSegs + outSegs > (int) maxSegs) { | |
260 | IOLog("analyseSegments: maxSegs limit 2 reached! %ld %ld %ld\n", | |
261 | doneSegs, outSegs, maxSegs); | |
262 | goto bombAnalysis; | |
263 | } | |
264 | ||
265 | // Get the number of segments in the current inbuf | |
266 | if (inIndex < mbufsInCache) | |
267 | numSegs -= segsPerMBuf[inIndex]; // Yeah, in cache | |
268 | else { | |
269 | // Hmm, we have to recompute from scratch. Copy code from genPhys. | |
270 | int thisLen = 0, mbufLen; | |
271 | ||
272 | vmo = mtod(in, vm_offset_t); | |
273 | for (mbufLen = in->m_len; mbufLen; mbufLen -= thisLen) { | |
274 | thisLen = MIN(next_page(vmo), vmo + mbufLen) - vmo; | |
275 | vmo += thisLen; | |
276 | numSegs--; | |
277 | } | |
278 | } | |
279 | ||
280 | // Walk the incoming buffer on one. | |
281 | in = in->m_next; | |
282 | inIndex++; | |
283 | ||
284 | // continue looping until the total number of segments has dropped | |
285 | // to an acceptable level, or if we ran out of mbuf links. | |
286 | ||
287 | } while (in && ((numSegs + doneSegs + outSegs) > 0)); | |
288 | ||
289 | if ( (int) (numSegs + doneSegs + outSegs) <= 0) { // success | |
290 | ||
291 | out->m_len = outLen; // Set last mbuf with the remaining length. | |
292 | ||
293 | // The amount to copy is determine by the segment length in each | |
294 | // mbuf linked to newPacket. The sum can be smaller than | |
295 | // packet->pkthdr.len; | |
296 | // | |
297 | coalesceSegments(packet, newPacket); | |
298 | ||
299 | // Copy complete. | |
300 | ||
301 | // If 'in' is non zero, then it means that we only need to copy part | |
302 | // of the input packet, beginning at the start. The mbuf chain | |
303 | // beginning at 'in' must be preserved and linked to the new | |
304 | // output packet chain. Everything before 'in', except for the | |
305 | // header mbuf can be freed. | |
306 | // | |
307 | struct mbuf *m = packet->m_next; | |
308 | while (m != in) | |
309 | m = m_free(m); | |
310 | ||
311 | // The initial header mbuf is preserved, its length set to zero, and | |
312 | // linked to the new packet chain. | |
313 | ||
314 | packet->m_len = 0; | |
315 | packet->m_next = newPacket; | |
316 | newPacket->m_next = in; | |
317 | ||
318 | return true; | |
319 | } | |
320 | ||
321 | bombAnalysis: | |
322 | ||
323 | m_freem(newPacket); | |
324 | return false; | |
325 | } | |
326 | ||
327 | UInt32 IOMbufMemoryCursor::genPhysicalSegments(struct mbuf *packet, void *vector, | |
328 | UInt32 maxSegs, bool doCoalesce) | |
329 | { | |
330 | bool doneCoalesce = false; | |
331 | ||
332 | if (!packet || !(packet->m_flags & M_PKTHDR)) | |
333 | return 0; | |
334 | ||
335 | if (!maxSegs) | |
336 | maxSegs = maxNumSegments; | |
337 | if (!maxSegs) | |
338 | return 0; | |
339 | ||
340 | if ( packet->m_next == 0 ) | |
341 | { | |
342 | vm_offset_t src; | |
343 | struct IOPhysicalSegment physSeg; | |
344 | /* | |
345 | * the packet consists of only 1 mbuf | |
346 | * so if the data buffer doesn't span a page boundary | |
347 | * we can take the simple way out | |
348 | */ | |
349 | src = mtod(packet, vm_offset_t); | |
350 | ||
351 | if ( trunc_page(src) == trunc_page(src+packet->m_len-1) ) | |
352 | { | |
353 | if ((physSeg.location = | |
354 | (IOPhysicalAddress)mcl_to_paddr((char *)src)) == 0) | |
355 | physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, src); | |
356 | if (!physSeg.location) | |
357 | return 0; | |
358 | physSeg.length = packet->m_len; | |
359 | (*outSeg)(physSeg, vector, 0); | |
360 | ||
361 | return 1; | |
362 | } | |
363 | } | |
364 | ||
365 | if ( doCoalesce == true && maxSegs == 1 ) | |
366 | { | |
367 | vm_offset_t src; | |
368 | vm_offset_t dst; | |
369 | struct mbuf *m; | |
370 | struct mbuf *mnext; | |
371 | struct mbuf *out; | |
372 | UInt32 len = 0; | |
373 | struct IOPhysicalSegment physSeg; | |
374 | ||
375 | m = packet; | |
376 | ||
377 | MGET(out, M_DONTWAIT, MT_DATA); | |
378 | if ( out == 0 ) return 0; | |
379 | ||
380 | MCLGET(out, M_DONTWAIT); | |
381 | if ( !(out->m_flags & M_EXT) ) | |
382 | { | |
383 | m_free( out ); | |
384 | return 0; | |
385 | } | |
386 | dst = mtod(out, vm_offset_t); | |
387 | ||
388 | do | |
389 | { | |
390 | src = mtod(m, vm_offset_t); | |
391 | BCOPY( src, dst, m->m_len ); | |
392 | dst += m->m_len; | |
393 | len += m->m_len; | |
394 | } while ( (m = m->m_next) != 0 ); | |
395 | ||
396 | out->m_len = len; | |
397 | ||
398 | dst = mtod(out, vm_offset_t); | |
399 | if ((physSeg.location = (IOPhysicalAddress)mcl_to_paddr((char *)dst)) == 0) | |
400 | physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, dst); | |
401 | if (!physSeg.location) | |
402 | return 0; | |
403 | physSeg.length = out->m_len; | |
404 | (*outSeg)(physSeg, vector, 0); | |
405 | ||
406 | m = packet->m_next; | |
407 | while (m != 0) | |
408 | { | |
409 | mnext = m->m_next; | |
410 | m_free(m); | |
411 | m = mnext; | |
412 | } | |
413 | ||
414 | // The initial header mbuf is preserved, its length set to zero, and | |
415 | // linked to the new packet chain. | |
416 | ||
417 | packet->m_len = 0; | |
418 | packet->m_next = out; | |
419 | out->m_next = 0; | |
420 | ||
421 | return 1; | |
422 | } | |
423 | ||
424 | ||
425 | // | |
426 | // Iterate over the mbuf, translating segments were allowed. When we | |
427 | // are not allowed to translate segments then accumulate segment | |
428 | // statistics up to kMBufDataCacheSize of mbufs. Finally | |
429 | // if we overflow our cache just count how many segments this | |
430 | // packet represents. | |
431 | // | |
432 | UInt32 segsPerMBuf[kMBufDataCacheSize]; | |
433 | ||
434 | tryAgain: | |
435 | UInt32 curMBufIndex = 0; | |
436 | UInt32 curSegIndex = 0; | |
437 | UInt32 lastSegCount = 0; | |
438 | struct mbuf *m = packet; | |
439 | ||
440 | // For each mbuf in incoming packet. | |
441 | do { | |
442 | vm_size_t mbufLen, thisLen = 0; | |
443 | vm_offset_t src; | |
444 | ||
445 | // Step through each segment in the current mbuf | |
446 | for (mbufLen = m->m_len, src = mtod(m, vm_offset_t); | |
447 | mbufLen; | |
448 | src += thisLen, mbufLen -= thisLen) | |
449 | { | |
450 | // If maxSegmentSize is atleast PAGE_SIZE, then | |
451 | // thisLen = MIN(next_page(src), src + mbufLen) - src; | |
452 | ||
453 | thisLen = MIN(mbufLen, maxSegmentSize); | |
454 | thisLen = MIN(next_page(src), src + thisLen) - src; | |
455 | ||
456 | // If room left then find the current segment addr and output | |
457 | if (curSegIndex < maxSegs) { | |
458 | struct IOPhysicalSegment physSeg; | |
459 | ||
460 | if ((physSeg.location = | |
461 | (IOPhysicalAddress)mcl_to_paddr((char *)src)) == 0) | |
462 | physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, src); | |
463 | if (!physSeg.location) | |
464 | return 0; | |
465 | physSeg.length = thisLen; | |
466 | (*outSeg)(physSeg, vector, curSegIndex); | |
467 | } | |
468 | // Count segments if we are coalescing. | |
469 | curSegIndex++; | |
470 | } | |
471 | ||
472 | // Cache the segment count data if room is available. | |
473 | if (curMBufIndex < kMBufDataCacheSize) { | |
474 | segsPerMBuf[curMBufIndex] = curSegIndex - lastSegCount; | |
475 | lastSegCount = curSegIndex; | |
476 | } | |
477 | ||
478 | // Move on to next imcoming mbuf | |
479 | curMBufIndex++; | |
480 | m = m->m_next; | |
481 | } while (m); | |
482 | ||
483 | // If we finished cleanly return number of segments found | |
484 | if (curSegIndex <= maxSegs) | |
485 | return curSegIndex; | |
486 | if (!doCoalesce) | |
487 | return 0; // if !coalescing we've got a problem. | |
488 | ||
489 | ||
490 | // If we are coalescing and it is possible then attempt coalesce, | |
491 | if (!doneCoalesce | |
492 | && (UInt) packet->m_pkthdr.len <= maxSegs * maxSegmentSize) { | |
493 | // Hmm, we have to do some coalescing. | |
494 | bool analysisRet; | |
495 | ||
496 | analysisRet = analyseSegments(packet, | |
497 | MIN(curMBufIndex, kMBufDataCacheSize), | |
498 | segsPerMBuf, | |
499 | curSegIndex, maxSegs); | |
500 | if (analysisRet) { | |
501 | doneCoalesce = true; | |
502 | coalesceCount++; | |
503 | goto tryAgain; | |
504 | } | |
505 | } | |
506 | ||
507 | assert(!doneCoalesce); // Problem in Coalesce code. | |
508 | packetTooBigErrors++; | |
509 | return 0; | |
510 | } | |
511 | ||
512 | /********************* class IOMbufBigMemoryCursor **********************/ | |
513 | IOMbufBigMemoryCursor * | |
514 | IOMbufBigMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) | |
515 | { | |
516 | IOMbufBigMemoryCursor *me = new IOMbufBigMemoryCursor; | |
517 | ||
518 | if (me && !me->initWithSpecification(&bigOutputSegment, | |
519 | maxSegSize, maxNumSegs)) { | |
520 | me->release(); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | return me; | |
525 | } | |
526 | ||
527 | ||
528 | /******************* class IOMbufNaturalMemoryCursor ********************/ | |
529 | IOMbufNaturalMemoryCursor * | |
530 | IOMbufNaturalMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) | |
531 | { | |
532 | IOMbufNaturalMemoryCursor *me = new IOMbufNaturalMemoryCursor; | |
533 | ||
534 | if (me && !me->initWithSpecification(&naturalOutputSegment, | |
535 | maxSegSize, maxNumSegs)) { | |
536 | me->release(); | |
537 | return 0; | |
538 | } | |
539 | ||
540 | return me; | |
541 | } | |
542 | ||
543 | ||
544 | /******************** class IOMbufLittleMemoryCursor ********************/ | |
545 | IOMbufLittleMemoryCursor * | |
546 | IOMbufLittleMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) | |
547 | { | |
548 | IOMbufLittleMemoryCursor *me = new IOMbufLittleMemoryCursor; | |
549 | ||
550 | if (me && !me->initWithSpecification(&littleOutputSegment, | |
551 | maxSegSize, maxNumSegs)) { | |
552 | me->release(); | |
553 | return 0; | |
554 | } | |
555 | ||
556 | return me; | |
557 | } | |
558 | ||
559 | ||
560 | /******************** class IOMbufDBDMAMemoryCursor *********************/ | |
561 | #ifdef __ppc__ | |
562 | ||
563 | IOMbufDBDMAMemoryCursor * | |
564 | IOMbufDBDMAMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) | |
565 | { | |
566 | IOMbufDBDMAMemoryCursor *me = new IOMbufDBDMAMemoryCursor; | |
567 | ||
568 | if (me && !me->initWithSpecification(&dbdmaOutputSegment, | |
569 | maxSegSize, maxNumSegs)) { | |
570 | me->release(); | |
571 | return 0; | |
572 | } | |
573 | ||
574 | return me; | |
575 | } | |
576 | #endif /* __ppc__ */ |