2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* IOMbufMemoryCursor.cpp created by gvdl on 1999-1-20 */
24 #include <sys/cdefs.h>
27 #include <IOKit/assert.h>
29 #include <sys/param.h>
31 #include <architecture/byte_order.h>
34 #include <IOKit/network/IOMbufMemoryCursor.h>
35 #include <IOKit/IOLib.h>
38 #define MIN(a,b) (((a)<(b))?(a):(b))
41 #define next_page(x) trunc_page(x + PAGE_SIZE)
43 /* Define the meta class stuff for the entire file here */
44 OSDefineMetaClassAndAbstractStructors(IOMbufMemoryCursor
, IOMemoryCursor
)
45 OSMetaClassDefineReservedUnused( IOMbufMemoryCursor
, 0);
46 OSMetaClassDefineReservedUnused( IOMbufMemoryCursor
, 1);
47 OSMetaClassDefineReservedUnused( IOMbufMemoryCursor
, 2);
48 OSMetaClassDefineReservedUnused( IOMbufMemoryCursor
, 3);
50 OSDefineMetaClassAndStructors(IOMbufNaturalMemoryCursor
, IOMbufMemoryCursor
)
51 OSDefineMetaClassAndStructors(IOMbufBigMemoryCursor
, IOMbufMemoryCursor
)
52 OSDefineMetaClassAndStructors(IOMbufLittleMemoryCursor
, IOMbufMemoryCursor
)
55 OSDefineMetaClassAndStructors(IOMbufDBDMAMemoryCursor
, IOMbufMemoryCursor
)
58 /*********************** class IOMbufMemoryCursor ***********************/
59 #define super IOMemoryCursor
61 bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc outSeg
,
62 UInt32 maxSegmentSize
,
63 UInt32 maxTransferSize
,
69 bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc inOutSeg
,
70 UInt32 inMaxSegmentSize
,
71 UInt32 inMaxNumSegments
)
73 if (!super::initWithSpecification(inOutSeg
, inMaxSegmentSize
, 0, 1))
77 // It is too confusing to force the max segment size to be at least
78 // as large as a page. Most Enet devices only have 11-12 bit fields,
79 // enough for a full size frame, and also the PAGE_SIZE parameter
80 // may be architecture dependent.
82 assert(inMaxSegmentSize
>= PAGE_SIZE
);
83 if (inMaxSegmentSize
< PAGE_SIZE
)
86 if (!inMaxSegmentSize
)
90 maxSegmentSize
= MIN(maxSegmentSize
, PAGE_SIZE
);
91 maxNumSegments
= inMaxNumSegments
;
98 // Copy the src packet into the destination packet. The amount to copy is
99 // determined by the dstm->m_len, which is setup by analyseSegments, see below.
100 // The source mbuf is not freed nor modified.
102 #define BCOPY(s, d, l) do { bcopy((void *) s, (void *) d, l); } while(0)
104 static inline void coalesceSegments(struct mbuf
*srcm
, struct mbuf
*dstm
)
106 vm_offset_t src
, dst
;
107 SInt32 srcLen
, dstLen
;
110 srcLen
= srcm
->m_len
;
111 src
= mtod(srcm
, vm_offset_t
);
113 dstLen
= dstm
->m_len
;
114 dst
= mtod(dstm
, vm_offset_t
);
117 if (srcLen
< dstLen
) {
119 // Copy remainder of src mbuf to current dst.
120 BCOPY(src
, dst
, srcLen
);
124 // Move on to the next source mbuf.
125 temp
= srcm
->m_next
; assert(temp
);
128 srcLen
= srcm
->m_len
;
129 src
= mtod(srcm
, vm_offset_t
);
131 else if (srcLen
> dstLen
) {
133 // Copy some of src mbuf to remaining space in dst mbuf.
134 BCOPY(src
, dst
, dstLen
);
138 // Move on to the next destination mbuf.
139 temp
= dstm
->m_next
; assert(temp
);
142 dstLen
= dstm
->m_len
;
143 dst
= mtod(dstm
, vm_offset_t
);
145 else { /* (srcLen == dstLen) */
147 // copy remainder of src into remaining space of current dst
148 BCOPY(src
, dst
, srcLen
);
150 // Free current mbuf and move the current onto the next
153 // Do we have any data left to copy?
159 dstLen
= dstm
->m_len
;
160 dst
= mtod(dstm
, vm_offset_t
);
161 srcLen
= srcm
->m_len
;
162 src
= mtod(srcm
, vm_offset_t
);
167 static const UInt32 kMBufDataCacheSize
= 16;
169 static inline bool analyseSegments(
170 struct mbuf
*packet
, /* input packet mbuf */
171 const UInt32 mbufsInCache
, /* number of entries in segsPerMBuf[] */
172 const UInt32 segsPerMBuf
[], /* segments required per mbuf */
173 SInt32 numSegs
, /* total number of segments */
174 const UInt32 maxSegs
) /* max controller segments per mbuf */
176 struct mbuf
*newPacket
; // output mbuf chain.
177 struct mbuf
*out
; // current output mbuf link.
178 SInt32 outSize
; // size of current output mbuf link.
179 SInt32 outSegs
; // segments for current output mbuf link.
180 SInt32 doneSegs
; // segments for output mbuf chain.
181 SInt32 outLen
; // remaining length of input buffer.
183 struct mbuf
*in
= packet
; // save the original input packet pointer.
186 // Allocate a mbuf (non header mbuf) to begin the output mbuf chain.
188 MGET(newPacket
, M_DONTWAIT
, MT_DATA
);
190 IOLog("analyseSegments: MGET() 1 error\n");
194 /* Initialise outgoing packet controls */
197 doneSegs
= outSegs
= outLen
= 0;
199 // numSegs stores the delta between the total and the max. For each
200 // input mbuf consumed, we decrement numSegs.
204 // Loop through the input packet mbuf 'in' and construct a new mbuf chain
205 // large enough to make (numSegs + doneSegs + outSegs) less than or
213 while (outLen
> outSize
) {
214 // Oh dear the current outgoing length is too big.
215 if (outSize
!= MCLBYTES
) {
216 // Current mbuf is not yet a cluster so promote, then
219 MCLGET(out
, M_DONTWAIT
);
220 if ( !(out
->m_flags
& M_EXT
) ) {
221 IOLog("analyseSegments: MCLGET() error\n");
230 vmo
= mtod(out
, vm_offset_t
);
231 out
->m_len
= MCLBYTES
; /* Fill in target copy size */
232 doneSegs
+= (round_page(vmo
+ MCLBYTES
) - trunc_page(vmo
))
235 // If the number of segments of the output chain, plus
236 // the segment for the mbuf we are about to allocate is greater
237 // than maxSegs, then abort.
239 if (doneSegs
+ 1 > (int) maxSegs
) {
240 IOLog("analyseSegments: maxSegs limit 1 reached! %ld %ld\n",
245 MGET(out
->m_next
, M_DONTWAIT
, MT_DATA
);
247 IOLog("analyseSegments: MGET() error\n");
256 // Compute number of segment in current outgoing mbuf.
257 vmo
= mtod(out
, vm_offset_t
);
258 outSegs
= (round_page(vmo
+ outLen
) - trunc_page(vmo
)) / PAGE_SIZE
;
259 if (doneSegs
+ outSegs
> (int) maxSegs
) {
260 IOLog("analyseSegments: maxSegs limit 2 reached! %ld %ld %ld\n",
261 doneSegs
, outSegs
, maxSegs
);
265 // Get the number of segments in the current inbuf
266 if (inIndex
< mbufsInCache
)
267 numSegs
-= segsPerMBuf
[inIndex
]; // Yeah, in cache
269 // Hmm, we have to recompute from scratch. Copy code from genPhys.
270 int thisLen
= 0, mbufLen
;
272 vmo
= mtod(in
, vm_offset_t
);
273 for (mbufLen
= in
->m_len
; mbufLen
; mbufLen
-= thisLen
) {
274 thisLen
= MIN(next_page(vmo
), vmo
+ mbufLen
) - vmo
;
280 // Walk the incoming buffer on one.
284 // continue looping until the total number of segments has dropped
285 // to an acceptable level, or if we ran out of mbuf links.
287 } while (in
&& ((numSegs
+ doneSegs
+ outSegs
) > 0));
289 if ( (int) (numSegs
+ doneSegs
+ outSegs
) <= 0) { // success
291 out
->m_len
= outLen
; // Set last mbuf with the remaining length.
293 // The amount to copy is determine by the segment length in each
294 // mbuf linked to newPacket. The sum can be smaller than
295 // packet->pkthdr.len;
297 coalesceSegments(packet
, newPacket
);
301 // If 'in' is non zero, then it means that we only need to copy part
302 // of the input packet, beginning at the start. The mbuf chain
303 // beginning at 'in' must be preserved and linked to the new
304 // output packet chain. Everything before 'in', except for the
305 // header mbuf can be freed.
307 struct mbuf
*m
= packet
->m_next
;
311 // The initial header mbuf is preserved, its length set to zero, and
312 // linked to the new packet chain.
315 packet
->m_next
= newPacket
;
316 newPacket
->m_next
= in
;
327 UInt32
IOMbufMemoryCursor::genPhysicalSegments(struct mbuf
*packet
, void *vector
,
328 UInt32 maxSegs
, bool doCoalesce
)
330 bool doneCoalesce
= false;
332 if (!packet
|| !(packet
->m_flags
& M_PKTHDR
))
336 maxSegs
= maxNumSegments
;
340 if ( packet
->m_next
== 0 )
343 struct IOPhysicalSegment physSeg
;
345 * the packet consists of only 1 mbuf
346 * so if the data buffer doesn't span a page boundary
347 * we can take the simple way out
349 src
= mtod(packet
, vm_offset_t
);
351 if ( trunc_page(src
) == trunc_page(src
+packet
->m_len
-1) )
353 if ((physSeg
.location
=
354 (IOPhysicalAddress
)mcl_to_paddr((char *)src
)) == 0)
355 physSeg
.location
= (IOPhysicalAddress
)pmap_extract(kernel_pmap
, src
);
356 if (!physSeg
.location
)
358 physSeg
.length
= packet
->m_len
;
359 (*outSeg
)(physSeg
, vector
, 0);
365 if ( doCoalesce
== true && maxSegs
== 1 )
373 struct IOPhysicalSegment physSeg
;
377 MGET(out
, M_DONTWAIT
, MT_DATA
);
378 if ( out
== 0 ) return 0;
380 MCLGET(out
, M_DONTWAIT
);
381 if ( !(out
->m_flags
& M_EXT
) )
386 dst
= mtod(out
, vm_offset_t
);
390 src
= mtod(m
, vm_offset_t
);
391 BCOPY( src
, dst
, m
->m_len
);
394 } while ( (m
= m
->m_next
) != 0 );
398 dst
= mtod(out
, vm_offset_t
);
399 if ((physSeg
.location
= (IOPhysicalAddress
)mcl_to_paddr((char *)dst
)) == 0)
400 physSeg
.location
= (IOPhysicalAddress
)pmap_extract(kernel_pmap
, dst
);
401 if (!physSeg
.location
)
403 physSeg
.length
= out
->m_len
;
404 (*outSeg
)(physSeg
, vector
, 0);
414 // The initial header mbuf is preserved, its length set to zero, and
415 // linked to the new packet chain.
418 packet
->m_next
= out
;
426 // Iterate over the mbuf, translating segments were allowed. When we
427 // are not allowed to translate segments then accumulate segment
428 // statistics up to kMBufDataCacheSize of mbufs. Finally
429 // if we overflow our cache just count how many segments this
430 // packet represents.
432 UInt32 segsPerMBuf
[kMBufDataCacheSize
];
435 UInt32 curMBufIndex
= 0;
436 UInt32 curSegIndex
= 0;
437 UInt32 lastSegCount
= 0;
438 struct mbuf
*m
= packet
;
440 // For each mbuf in incoming packet.
442 vm_size_t mbufLen
, thisLen
= 0;
445 // Step through each segment in the current mbuf
446 for (mbufLen
= m
->m_len
, src
= mtod(m
, vm_offset_t
);
448 src
+= thisLen
, mbufLen
-= thisLen
)
450 // If maxSegmentSize is atleast PAGE_SIZE, then
451 // thisLen = MIN(next_page(src), src + mbufLen) - src;
453 thisLen
= MIN(mbufLen
, maxSegmentSize
);
454 thisLen
= MIN(next_page(src
), src
+ thisLen
) - src
;
456 // If room left then find the current segment addr and output
457 if (curSegIndex
< maxSegs
) {
458 struct IOPhysicalSegment physSeg
;
460 if ((physSeg
.location
=
461 (IOPhysicalAddress
)mcl_to_paddr((char *)src
)) == 0)
462 physSeg
.location
= (IOPhysicalAddress
)pmap_extract(kernel_pmap
, src
);
463 if (!physSeg
.location
)
465 physSeg
.length
= thisLen
;
466 (*outSeg
)(physSeg
, vector
, curSegIndex
);
468 // Count segments if we are coalescing.
472 // Cache the segment count data if room is available.
473 if (curMBufIndex
< kMBufDataCacheSize
) {
474 segsPerMBuf
[curMBufIndex
] = curSegIndex
- lastSegCount
;
475 lastSegCount
= curSegIndex
;
478 // Move on to next imcoming mbuf
483 // If we finished cleanly return number of segments found
484 if (curSegIndex
<= maxSegs
)
487 return 0; // if !coalescing we've got a problem.
490 // If we are coalescing and it is possible then attempt coalesce,
492 && (UInt
) packet
->m_pkthdr
.len
<= maxSegs
* maxSegmentSize
) {
493 // Hmm, we have to do some coalescing.
496 analysisRet
= analyseSegments(packet
,
497 MIN(curMBufIndex
, kMBufDataCacheSize
),
499 curSegIndex
, maxSegs
);
507 assert(!doneCoalesce
); // Problem in Coalesce code.
508 packetTooBigErrors
++;
512 /********************* class IOMbufBigMemoryCursor **********************/
513 IOMbufBigMemoryCursor
*
514 IOMbufBigMemoryCursor::withSpecification(UInt32 maxSegSize
, UInt32 maxNumSegs
)
516 IOMbufBigMemoryCursor
*me
= new IOMbufBigMemoryCursor
;
518 if (me
&& !me
->initWithSpecification(&bigOutputSegment
,
519 maxSegSize
, maxNumSegs
)) {
528 /******************* class IOMbufNaturalMemoryCursor ********************/
529 IOMbufNaturalMemoryCursor
*
530 IOMbufNaturalMemoryCursor::withSpecification(UInt32 maxSegSize
, UInt32 maxNumSegs
)
532 IOMbufNaturalMemoryCursor
*me
= new IOMbufNaturalMemoryCursor
;
534 if (me
&& !me
->initWithSpecification(&naturalOutputSegment
,
535 maxSegSize
, maxNumSegs
)) {
544 /******************** class IOMbufLittleMemoryCursor ********************/
545 IOMbufLittleMemoryCursor
*
546 IOMbufLittleMemoryCursor::withSpecification(UInt32 maxSegSize
, UInt32 maxNumSegs
)
548 IOMbufLittleMemoryCursor
*me
= new IOMbufLittleMemoryCursor
;
550 if (me
&& !me
->initWithSpecification(&littleOutputSegment
,
551 maxSegSize
, maxNumSegs
)) {
560 /******************** class IOMbufDBDMAMemoryCursor *********************/
563 IOMbufDBDMAMemoryCursor
*
564 IOMbufDBDMAMemoryCursor::withSpecification(UInt32 maxSegSize
, UInt32 maxNumSegs
)
566 IOMbufDBDMAMemoryCursor
*me
= new IOMbufDBDMAMemoryCursor
;
568 if (me
&& !me
->initWithSpecification(&dbdmaOutputSegment
,
569 maxSegSize
, maxNumSegs
)) {