]> git.saurik.com Git - apple/xnu.git/blob - iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.cpp
xnu-124.13.tar.gz
[apple/xnu.git] / iokit / Drivers / network / drvPPCUniN / UniNEnetPrivate.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1998-1999 Apple Computer
24 *
25 * Implementation for hardware dependent (relatively) code
26 * for the Sun GEM Ethernet controller.
27 *
28 * HISTORY
29 *
30 * 10-Sept-97
31 * Created.
32 *
33 */
34 #include "UniNEnetPrivate.h"
35
36 extern void *kernel_pmap;
37
38 /*
39 * Private functions
40 */
41 bool UniNEnet::allocateMemory()
42 {
43 UInt32 rxRingSize, txRingSize;
44 UInt32 i, n;
45 UInt8 *virtAddr;
46 UInt32 physBase;
47 UInt32 physAddr;
48 TxQueueElement *txElement;
49
50 /*
51 * Calculate total space for DMA channel commands
52 */
53 txRingSize = (TX_RING_LENGTH * sizeof(enet_txdma_cmd_t) + 2048 - 1) & ~(2048-1);
54 rxRingSize = (RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + 2048 - 1) & ~(2048-1);
55
56 dmaCommandsSize = round_page( txRingSize + rxRingSize );
57 /*
58 * Allocate required memory
59 */
60 if ( !dmaCommands )
61 {
62 dmaCommands = (UInt8 *)IOMallocContiguous( dmaCommandsSize, PAGE_SIZE, 0 );
63
64 if ( dmaCommands == 0 )
65 {
66 IOLog( "Ethernet(UniN): Cant allocate channel dma commands\n\r" );
67 return false;
68 }
69 }
70
71 /*
72 * If we needed more than one page, then make sure we received contiguous memory.
73 */
74 n = (dmaCommandsSize - PAGE_SIZE) / PAGE_SIZE;
75 physBase = pmap_extract(kernel_pmap, (vm_address_t) dmaCommands);
76
77 virtAddr = (UInt8 *) dmaCommands;
78 for( i=0; i < n; i++, virtAddr += PAGE_SIZE )
79 {
80 physAddr = pmap_extract(kernel_pmap, (vm_address_t) virtAddr);
81 if (physAddr != (physBase + i * PAGE_SIZE) )
82 {
83 IOLog( "Ethernet(UniN): Cant allocate contiguous memory for dma commands\n\r" );
84 return false;
85 }
86 }
87
88 /* Setup the receive ring pointer */
89 rxDMACommands = (enet_dma_cmd_t*)dmaCommands;
90
91 /* Setup the transmit ring pointer */
92 txDMACommands = (enet_txdma_cmd_t*)(dmaCommands + rxRingSize);
93
94
95 queue_init( &txActiveQueue );
96 queue_init( &txFreeQueue );
97
98 for ( i = 0; i < TX_MAX_MBUFS; i++ )
99 {
100 txElement = (TxQueueElement *)IOMalloc( sizeof(TxQueueElement) );
101 if ( txElement == 0 )
102 {
103 return false;
104 }
105
106 bzero( txElement, sizeof(TxQueueElement) );
107
108 releaseTxElement( txElement );
109 }
110
111 return true;
112 }
113
114 /*-------------------------------------------------------------------------
115 *
116 * Setup the Transmit Ring
117 * -----------------------
118 * Each transmit ring entry consists of two words to transmit data from buffer
119 * segments (possibly) spanning a page boundary. This is followed by two DMA commands
120 * which read transmit frame status and interrupt status from the UniN chip. The last
121 * DMA command in each transmit ring entry generates a host interrupt.
122 * The last entry in the ring is followed by a DMA branch to the first
123 * entry.
124 *-------------------------------------------------------------------------*/
125
126 bool UniNEnet::initTxRing()
127 {
128 TxQueueElement * txElement;
129 UInt32 i;
130
131 /*
132 * Clear the transmit DMA command memory
133 */
134 bzero( (void *)txDMACommands, sizeof(enet_txdma_cmd_t) * TX_RING_LENGTH);
135 txCommandHead = 0;
136 txCommandTail = 0;
137
138 txDMACommandsPhys = pmap_extract(kernel_pmap, (vm_address_t) txDMACommands);
139
140 if ( txDMACommandsPhys == 0 )
141 {
142 IOLog( "Ethernet(UniN): Bad dma command buf - %08x\n\r",
143 (int)txDMACommands );
144 }
145
146 for ( i=0; i < TX_RING_LENGTH; i++ )
147 {
148 txElement = txElementPtrs[i];
149
150 if ( txElement && ( --txElement->count == 0 ) )
151 {
152 freePacket( txElement->mbuf );
153 releaseTxElement( txElement );
154 }
155
156 txElementPtrs[i] = 0;
157 }
158
159 txCommandsAvail = TX_RING_LENGTH - 1;
160
161 txIntCnt = 0;
162 txWDCount = 0;
163
164 return true;
165 }
166
167 /*-------------------------------------------------------------------------
168 *
169 * Setup the Receive ring
170 * ----------------------
171 * Each receive ring entry consists of two DMA commands to receive data
172 * into a network buffer (possibly) spanning a page boundary. The second
173 * DMA command in each entry generates a host interrupt.
174 * The last entry in the ring is followed by a DMA branch to the first
175 * entry.
176 *
177 *-------------------------------------------------------------------------*/
178
179 bool UniNEnet::initRxRing()
180 {
181 UInt32 i;
182 bool status;
183
184 /* Clear the receive DMA command memory */
185 bzero( (void*)rxDMACommands, sizeof( enet_dma_cmd_t ) * RX_RING_LENGTH );
186
187 rxDMACommandsPhys = pmap_extract(kernel_pmap, (vm_address_t) rxDMACommands);
188 if ( rxDMACommandsPhys == 0 )
189 {
190 IOLog( "Ethernet(UniN): Bad dma command buf - %08x\n\r",
191 (int) rxDMACommands );
192 return false;
193 }
194
195 /* Allocate a receive buffer for each entry in the Receive ring */
196 for ( i = 0; i < RX_RING_LENGTH; i++ )
197 {
198 if (rxMbuf[i] == NULL)
199 {
200 rxMbuf[i] = allocatePacket(NETWORK_BUFSIZE);
201 if (rxMbuf[i] == NULL)
202 {
203 IOLog("Ethernet(UniN): NULL packet in initRxRing\n");
204 return false;
205 }
206 }
207
208 /*
209 * Set the DMA commands for the ring entry to transfer data to the Mbuf.
210 */
211 status = updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true);
212 if (status == false)
213 {
214 IOLog("Ethernet(UniN): updateDescriptorFromMbuf error in "
215 "initRxRing\n");
216 return false;
217 }
218 }
219
220 /*
221 * Set the receive queue head to point to the first entry in the ring.
222 * Set the receive queue tail to point to a DMA Stop command after the
223 * last ring entry
224 */
225 i-=4;
226 rxCommandHead = 0;
227 rxCommandTail = i;
228
229 return true;
230 }
231
232 /*-------------------------------------------------------------------------
233 *
234 *
235 *
236 *-------------------------------------------------------------------------*/
237
238 void UniNEnet::flushRings()
239 {
240 // Free all mbufs from the receive ring:
241
242 for ( UInt32 i = 0; i < RX_RING_LENGTH; i++ )
243 {
244 if (rxMbuf[i])
245 {
246 freePacket( rxMbuf[i] );
247 rxMbuf[i] = 0;
248 }
249 }
250
251 // Free all mbufs from the transmit ring.
252 // The TxElement is moved back to the free list.
253
254 for ( UInt32 i = 0; i < TX_RING_LENGTH; i++ )
255 {
256 TxQueueElement * txElement = txElementPtrs[i];
257 txElementPtrs[i] = 0;
258
259 if ( txElement && ( --txElement->count == 0 ) )
260 {
261 freePacket( txElement->mbuf );
262 releaseTxElement( txElement );
263 }
264 }
265 }
266
267 /*-------------------------------------------------------------------------
268 *
269 *
270 *
271 *-------------------------------------------------------------------------*/
272
273 void UniNEnet::startChip()
274 {
275 UInt32 gemReg;
276
277 // dumpRegisters();
278
279 gemReg = READ_REGISTER( TxConfiguration );
280 gemReg |= kTxConfiguration_Tx_DMA_Enable;
281 WRITE_REGISTER( TxConfiguration, gemReg );
282
283 IOSleep( 20 );
284
285 gemReg = READ_REGISTER( RxConfiguration );
286 /// gemReg |= kRxConfiguration_Rx_DMA_Enable | kRxConfiguration_Batch_Disable;
287 gemReg |= kRxConfiguration_Rx_DMA_Enable;
288 WRITE_REGISTER( RxConfiguration, gemReg );
289
290 IOSleep( 20 );
291
292 gemReg = READ_REGISTER( TxMACConfiguration );
293 gemReg |= kTxMACConfiguration_TxMac_Enable;
294 WRITE_REGISTER( TxMACConfiguration, gemReg );
295
296 IOSleep( 20 );
297
298 rxMacConfigReg = READ_REGISTER( RxMACConfiguration );
299 rxMacConfigReg |= kRxMACConfiguration_Rx_Mac_Enable;
300 WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg );
301
302 return;
303 }
304
305 /*-------------------------------------------------------------------------
306 *
307 *
308 *
309 *-------------------------------------------------------------------------*/
310
311 void UniNEnet::stopChip()
312 {
313 UInt32 gemReg;
314
315 gemReg = READ_REGISTER( TxConfiguration );
316 gemReg &= ~kTxConfiguration_Tx_DMA_Enable;
317 WRITE_REGISTER( TxConfiguration, gemReg );
318
319 IOSleep( 20 );
320
321 gemReg = READ_REGISTER( RxConfiguration );
322 gemReg &= ~kRxConfiguration_Rx_DMA_Enable;
323 WRITE_REGISTER( RxConfiguration, gemReg );
324
325 IOSleep( 20 );
326
327 gemReg = READ_REGISTER( TxMACConfiguration );
328 gemReg &= ~kTxMACConfiguration_TxMac_Enable;
329 WRITE_REGISTER( TxMACConfiguration, gemReg );
330
331 IOSleep( 20 );
332
333 rxMacConfigReg = READ_REGISTER( RxMACConfiguration );
334 rxMacConfigReg &= ~kRxMACConfiguration_Rx_Mac_Enable;
335 WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg );
336
337 return;
338 }
339
340
341
342 /*-------------------------------------------------------------------------
343 *
344 *
345 *
346 *-------------------------------------------------------------------------*/
347
348 bool UniNEnet::resetChip()
349 {
350 UInt32 resetReg;
351 UInt16 * pPhyType;
352 UInt16 phyWord;
353
354 WRITE_REGISTER( SoftwareReset, kSoftwareReset_TX | kSoftwareReset_RX );
355 do
356 {
357 resetReg = READ_REGISTER( SoftwareReset );
358 }
359 while( resetReg & (kSoftwareReset_TX | kSoftwareReset_RX) );
360
361 /*
362 * Determine if PHY chip is configured. Reset and enable it (if present).
363 */
364 if ( phyId == 0xff )
365 {
366 /*
367 * Generate a hardware PHY reset.
368 */
369 resetPHYChip();
370
371 if ( miiFindPHY(&phyId) == true )
372 {
373 miiResetPHY( phyId );
374
375 pPhyType = (UInt16 *)&phyType;
376 miiReadWord( pPhyType, MII_ID0, phyId );
377 miiReadWord( pPhyType+1, MII_ID1, phyId );
378 if ( ((phyType & MII_BCM5400_MASK) == MII_BCM5400_ID)
379 || (((phyType & MII_BCM5400_MASK) == MII_BCM5401_ID)) ) /// mlj temporary quick fix
380 {
381 phyBCMType = 5400;
382
383 miiReadWord( &phyWord, MII_BCM5400_AUXCONTROL, phyId );
384 phyWord |= MII_BCM5400_AUXCONTROL_PWR10BASET;
385 miiWriteWord( phyWord, MII_BCM5400_AUXCONTROL, phyId );
386
387 miiReadWord( &phyWord, MII_BCM5400_1000BASETCONTROL, phyId );
388 phyWord |= MII_BCM5400_1000BASETCONTROL_FULLDUPLEXCAP;
389 miiWriteWord( phyWord, MII_BCM5400_1000BASETCONTROL, phyId );
390
391 IODelay(100);
392
393 miiResetPHY( 0x1F );
394
395 miiReadWord( &phyWord, MII_BCM5201_MULTIPHY, 0x1F );
396 phyWord |= MII_BCM5201_MULTIPHY_SERIALMODE;
397 miiWriteWord( phyWord, MII_BCM5201_MULTIPHY, 0x1F );
398
399 miiReadWord( &phyWord, MII_BCM5400_AUXCONTROL, phyId );
400 phyWord &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
401 miiWriteWord( phyWord, MII_BCM5400_AUXCONTROL, phyId );
402
403 }
404 else if ( (phyType & MII_BCM5201_MASK) == MII_BCM5201_ID )
405 {
406 phyBCMType = 5201;
407 }
408 else
409 {
410 phyBCMType = 0;
411 }
412 // IOLog("DEBUG:UniNEnet: phy type = %d\n", phyBCMType);
413 }
414 }
415
416 return true;
417 }
418
419 /*-------------------------------------------------------------------------
420 *
421 *
422 *
423 *-------------------------------------------------------------------------*/
424
425 bool UniNEnet::initChip()
426 {
427 UInt32 i, temp;
428 mach_timespec_t timeStamp;
429 UInt32 rxFifoSize;
430 UInt32 rxOff;
431 UInt32 rxOn;
432 u_int16_t *p16;
433
434 if ( phyId == 0xff )
435 {
436 WRITE_REGISTER( DatapathMode, kDatapathMode_ExtSERDESMode );
437 WRITE_REGISTER( SerialinkControl, kSerialinkControl_DisableLoopback
438 | kSerialinkControl_EnableSyncDet );
439 WRITE_REGISTER( Advertisement, kAdvertisement_Full_Duplex
440 | kAdvertisement_PAUSE );
441 WRITE_REGISTER( PCSMIIControl, kPCSMIIControl_Auto_Negotiation_Enable
442 | kPCSMIIControl_Restart_Auto_Negotiation );
443 WRITE_REGISTER( PCSConfiguration, kPCSConfiguration_Enable );
444 WRITE_REGISTER( XIFConfiguration, kXIFConfiguration_Tx_MII_OE
445 | kXIFConfiguration_GMIIMODE
446 | kXIFConfiguration_FDPLXLED );
447 }
448 else
449 {
450 WRITE_REGISTER( DatapathMode, kDatapathMode_GMIIMode );
451 WRITE_REGISTER( XIFConfiguration, kXIFConfiguration_Tx_MII_OE
452 | kXIFConfiguration_FDPLXLED );
453 }
454
455 WRITE_REGISTER( SendPauseCommand, kSendPauseCommand_default );
456 WRITE_REGISTER( MACControlConfiguration,kMACControlConfiguration_Receive_Pause_Enable );
457 WRITE_REGISTER( InterruptMask, kInterruptMask_None );
458 WRITE_REGISTER( TxMACMask, kTxMACMask_default );
459 WRITE_REGISTER( RxMACMask, kRxMACMask_default );
460 WRITE_REGISTER( MACControlMask, kMACControlMask_default );
461 WRITE_REGISTER( Configuration, kConfiguration_TX_DMA_Limit
462 | kConfiguration_RX_DMA_Limit
463 | kConfiguration_Infinite_Burst );
464
465 WRITE_REGISTER( InterPacketGap0, kInterPacketGap0_default );
466 WRITE_REGISTER( InterPacketGap1, kInterPacketGap1_default );
467 WRITE_REGISTER( InterPacketGap2, kInterPacketGap2_default );
468 WRITE_REGISTER( SlotTime, kSlotTime_default );
469 WRITE_REGISTER( MinFrameSize, kMinFrameSize_default );
470 WRITE_REGISTER( MaxFrameSize, kMaxFrameSize_default );
471 WRITE_REGISTER( PASize, kPASize_default );
472 WRITE_REGISTER( JamSize, kJamSize_default );
473 WRITE_REGISTER( AttemptLimit, kAttemptLimit_default );
474 WRITE_REGISTER( MACControlType, kMACControlType_default );
475
476 p16 = (u_int16_t *) myAddress.bytes;
477 for ( i=0; i < sizeof(IOEthernetAddress) / 2; i++ )
478 WRITE_REGISTER( MACAddress[ i ], p16[ 2 - i ] );
479
480 for ( i=0; i < 3; i ++ )
481 {
482 WRITE_REGISTER( MACAddress[ i + 3 ], 0 );
483 WRITE_REGISTER( AddressFilter[ i ], 0 );
484 }
485
486 WRITE_REGISTER( MACAddress[ 6 ], kMACAddress_default_6 );
487 WRITE_REGISTER( MACAddress[ 7 ], kMACAddress_default_7 );
488 WRITE_REGISTER( MACAddress[ 8 ], kMACAddress_default_8 );
489
490 WRITE_REGISTER( AddressFilter2_1Mask, 0 );
491 WRITE_REGISTER( AddressFilter0Mask, 0 );
492
493 for ( i=0; i < 16; i++ )
494 WRITE_REGISTER( HashTable[ i ], 0 );
495
496 WRITE_REGISTER( NormalCollisionCounter, 0 );
497 WRITE_REGISTER( FirstAttemptSuccessfulCollisionCounter, 0 );
498 WRITE_REGISTER( ExcessiveCollisionCounter, 0 );
499 WRITE_REGISTER( LateCollisionCounter, 0 );
500 WRITE_REGISTER( DeferTimer, 0 );
501 WRITE_REGISTER( PeakAttempts, 0 );
502 WRITE_REGISTER( ReceiveFrameCounter, 0 );
503 WRITE_REGISTER( LengthErrorCounter, 0 );
504 WRITE_REGISTER( AlignmentErrorCounter, 0 );
505 WRITE_REGISTER( FCSErrorCounter, 0 );
506 WRITE_REGISTER( RxCodeViolationErrorCounter, 0 );
507
508 IOGetTime(&timeStamp);
509 WRITE_REGISTER( RandomNumberSeed, timeStamp.tv_nsec & 0xFFFF );
510
511 WRITE_REGISTER( TxDescriptorBaseLow, txDMACommandsPhys );
512 WRITE_REGISTER( TxDescriptorBaseHigh, 0 );
513
514 temp = kTxConfiguration_TxFIFO_Threshold
515 | TX_RING_LENGTH_FACTOR << kTxConfiguration_Tx_Desc_Ring_Size_Shift;
516 WRITE_REGISTER( TxConfiguration, temp );
517
518 WRITE_REGISTER( TxMACConfiguration, 0 );
519
520 setDuplexMode( (phyId == 0xff) ? true : false );
521
522 WRITE_REGISTER( RxDescriptorBaseLow, rxDMACommandsPhys );
523 WRITE_REGISTER( RxDescriptorBaseHigh, 0 );
524
525 WRITE_REGISTER( RxKick, RX_RING_LENGTH - 4 );
526
527 temp = kRxConfiguration_RX_DMA_Threshold
528 /// | kRxConfiguration_Batch_Disable may cause 4x primary interrupts
529 | RX_RING_LENGTH_FACTOR << kRxConfiguration_Rx_Desc_Ring_Size_Shift;
530 WRITE_REGISTER( RxConfiguration, temp );
531
532 rxMacConfigReg = 0;
533 WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg );
534
535 rxFifoSize = READ_REGISTER( RxFIFOSize );
536
537 rxOff = rxFifoSize - ((kGEMMacMaxFrameSize_Aligned + 8) * 2 / kPauseThresholds_Factor);
538 rxOn = rxFifoSize - ((kGEMMacMaxFrameSize_Aligned + 8) * 3 / kPauseThresholds_Factor);
539
540 WRITE_REGISTER( PauseThresholds,
541 (rxOff << kPauseThresholds_OFF_Threshold_Shift)
542 | (rxOn << kPauseThresholds_ON_Threshold_Shift) );
543
544 temp = READ_REGISTER( BIFConfiguration );
545 if ( temp & kBIFConfiguration_M66EN )
546 temp = kRxBlanking_default_66;
547 else temp = kRxBlanking_default_33;
548 WRITE_REGISTER( RxBlanking, temp );
549
550 return true;
551 }/* end initChip */
552
553 /*-------------------------------------------------------------------------
554 *
555 *
556 *
557 *-------------------------------------------------------------------------*/
558
559 void UniNEnet::disableAdapterInterrupts()
560 {
561
562 WRITE_REGISTER( InterruptMask, kInterruptMask_None );
563 return;
564 }
565
566 /*-------------------------------------------------------------------------
567 *
568 *
569 *
570 *-------------------------------------------------------------------------*/
571
572 void UniNEnet::enableAdapterInterrupts()
573 {
574 UInt32 gemReg;
575
576
577 gemReg = READ_REGISTER( InterruptMask );
578 gemReg &= ~( kStatus_TX_INT_ME | kStatus_RX_DONE );
579 WRITE_REGISTER( InterruptMask, gemReg );
580 return;
581 }
582
583 /*-------------------------------------------------------------------------
584 *
585 *
586 *
587 *-------------------------------------------------------------------------*/
588
589 void UniNEnet::setDuplexMode( bool duplexMode )
590 {
591 UInt32 txMacConfig;
592 UInt32 xifConfig;
593
594
595 isFullDuplex = duplexMode;
596 txMacConfig = READ_REGISTER( TxMACConfiguration );
597
598 WRITE_REGISTER( TxMACConfiguration, txMacConfig & ~kTxMACConfiguration_TxMac_Enable );
599 while( READ_REGISTER( TxMACConfiguration ) & kTxMACConfiguration_TxMac_Enable )
600 ;
601
602 xifConfig = READ_REGISTER( XIFConfiguration );
603
604 if ( isFullDuplex )
605 {
606 txMacConfig |= (kTxMACConfiguration_Ignore_Collisions | kTxMACConfiguration_Ignore_Carrier_Sense);
607 xifConfig &= ~kXIFConfiguration_Disable_Echo;
608 }
609 else
610 {
611 txMacConfig &= ~(kTxMACConfiguration_Ignore_Collisions | kTxMACConfiguration_Ignore_Carrier_Sense);
612 xifConfig |= kXIFConfiguration_Disable_Echo;
613 }
614
615 WRITE_REGISTER( TxMACConfiguration, txMacConfig );
616 WRITE_REGISTER( XIFConfiguration, xifConfig );
617 return;
618 }
619
620
621 /*-------------------------------------------------------------------------
622 *
623 *
624 *
625 *-------------------------------------------------------------------------*/
626
627 void UniNEnet::restartTransmitter()
628 {
629 }
630
631 /*-------------------------------------------------------------------------
632 *
633 *
634 *
635 *-------------------------------------------------------------------------*/
636
637 void UniNEnet::restartReceiver()
638 {
639 // Perform a software reset to the logic in the RX MAC.
640 // The MAC config register should be re-programmed following
641 // the reset. Everything else *should* be unaffected.
642
643 WRITE_REGISTER( RxMACSoftwareResetCommand, kRxMACSoftwareResetCommand_Reset );
644
645 // Poll until the reset bit is cleared by the hardware.
646
647 for ( int i = 0; i < 5000; i++ )
648 {
649 if ( ( READ_REGISTER( RxMACSoftwareResetCommand )
650 & kRxMACSoftwareResetCommand_Reset ) == 0 )
651 {
652 break; // 'i' is always 0 or 1
653 }
654 IODelay(1);
655 }
656
657 // Update the MAC Config register. Watch out for the programming
658 // restrictions documented in the GEM specification!!!
659 //
660 // Disable MAC before setting any other bits in the MAC config
661 // register.
662
663 WRITE_REGISTER( RxMACConfiguration, 0 );
664
665 for ( int i = 0; i < 5000; i++ )
666 {
667 if ( ( READ_REGISTER( RxMACConfiguration )
668 & kRxMACConfiguration_Rx_Mac_Enable ) == 0 )
669 {
670 break; // 'i' is always 0
671 }
672 IODelay(1);
673 }
674
675 // Update MAC config register.
676
677 WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg );
678 return;
679 }/* end restartReceiver */
680
681
682 /*-------------------------------------------------------------------------
683 *
684 * Orderly stop of receive DMA.
685 *
686 *
687 *-------------------------------------------------------------------------*/
688
689 void UniNEnet::stopReceiveDMA()
690 {
691 }
692
693 /*-------------------------------------------------------------------------
694 *
695 *
696 *
697 *-------------------------------------------------------------------------*/
698
699 void UniNEnet::stopTransmitDMA()
700 {
701 }
702
703 /*-------------------------------------------------------------------------
704 *
705 *
706 *
707 *-------------------------------------------------------------------------*/
708
709 bool UniNEnet::transmitPacket(struct mbuf *packet)
710 {
711 GEMTxDescriptor *dp; // descriptor pointer
712 UInt32 i,j,k;
713 struct mbuf *m;
714 TxQueueElement *txElement;
715 UInt32 dataPhys;
716
717
718 for ( m = packet, i=1; m->m_next; m=m->m_next, i++ )
719 ;
720
721
722 if ( i > txCommandsAvail )
723 {
724 return false;
725 }
726
727 if ( (txElement=getTxElement()) == 0 )
728 {
729 return false;
730 }
731
732 j = txCommandTail;
733
734 txElement->mbuf = packet;
735 txElement->slot = j;
736 txElement->count = i;
737
738 OSAddAtomic( -i, (SInt32*)&txCommandsAvail );
739
740 m = packet;
741
742 do
743 {
744 k = j;
745
746 txElementPtrs[j] = txElement;
747
748 dataPhys = (UInt32)mcl_to_paddr( mtod(m, char *) );
749 if ( dataPhys == 0 )
750 dataPhys = pmap_extract( kernel_pmap, mtod(m, vm_offset_t) );
751
752 dp = &txDMACommands[ j ].desc_seg[ 0 ];
753 OSWriteLittleInt32( &dp->bufferAddrLo, 0, dataPhys );
754 OSWriteLittleInt32( &dp->flags0, 0, m->m_len );
755 dp->flags1 = 0;
756 txIntCnt++;
757 j = (j + 1) & TX_RING_WRAP_MASK;
758 }
759 while ( (m=m->m_next) != 0 );
760
761 txDMACommands[ k ].desc_seg[ 0 ].flags0 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags0_EndOfFrame );
762 txDMACommands[ txCommandTail ].desc_seg[ 0 ].flags0 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags0_StartOfFrame );
763 if ( txIntCnt >= TX_DESC_PER_INT )
764 {
765 txDMACommands[ txCommandTail ].desc_seg[ 0 ].flags1 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags1_Int );
766 txIntCnt = txIntCnt % TX_DESC_PER_INT;
767 }
768 txCommandTail = j;
769
770 WRITE_REGISTER( TxKick, j );
771
772 return true;
773 }/* end transmitPacket */
774
775
776 /*-------------------------------------------------------------------------
777 * _receivePacket
778 * --------------
779 * This routine runs the receiver in polled-mode (yuk!) for the kernel debugger.
780 * Don't mess with the interrupt source here that can deadlock in the debugger
781 *
782 * The _receivePackets allocate MBufs and pass them up the stack. The kernel
783 * debugger interface passes a buffer into us. To reconsile the two interfaces,
784 * we allow the receive routine to continue to allocate its own buffers and
785 * transfer any received data to the passed-in buffer. This is handled by
786 * _receivePacket calling _packetToDebugger.
787 *-------------------------------------------------------------------------*/
788
789 void UniNEnet::receivePacket( void * pkt,
790 UInt32 * pkt_len,
791 UInt32 timeout )
792 {
793 mach_timespec_t startTime;
794 mach_timespec_t currentTime;
795 UInt32 elapsedTimeMS;
796
797 *pkt_len = 0;
798
799 if (ready == false)
800 {
801 return;
802 }
803
804 debuggerPkt = pkt;
805 debuggerPktSize = 0;
806
807 IOGetTime(&startTime);
808 do
809 {
810 receivePackets( true );
811 IOGetTime( &currentTime );
812 elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000);
813 }
814 while ( (debuggerPktSize == 0) && (elapsedTimeMS < timeout) );
815
816 *pkt_len = debuggerPktSize;
817
818 return;
819 }
820
821 /*-------------------------------------------------------------------------
822 * _packetToDebugger
823 * -----------------
824 * This is called by _receivePackets when we are polling for kernel debugger
825 * packets. It copies the MBuf contents to the buffer passed by the debugger.
826 * It also sets the var debuggerPktSize which will break the polling loop.
827 *-------------------------------------------------------------------------*/
828
829 void UniNEnet::packetToDebugger( struct mbuf * packet, u_int size )
830 {
831 debuggerPktSize = size;
832 bcopy( mtod(packet, char *), debuggerPkt, size );
833 }
834
835 /*-------------------------------------------------------------------------
836 * _sendPacket
837 * -----------
838 *
839 * This routine runs the transmitter in polled-mode (yuk!) for the kernel debugger.
840 * Don't mess with the interrupt source here that can deadlock in the debugger
841 *
842 *-------------------------------------------------------------------------*/
843
844 void UniNEnet::sendPacket( void *pkt, UInt32 pkt_len )
845 {
846 mach_timespec_t startTime;
847 mach_timespec_t currentTime;
848 UInt32 elapsedTimeMS;
849
850 if (!ready || !pkt || (pkt_len > ETHERMAXPACKET))
851 {
852 return;
853 }
854
855 /*
856 * Wait for the transmit ring to empty
857 */
858 IOGetTime(&startTime);
859 do
860 {
861 debugTransmitInterruptOccurred();
862 IOGetTime(&currentTime);
863 elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000);
864 }
865 while ( (txCommandHead != txCommandTail) && (elapsedTimeMS < TX_KDB_TIMEOUT) );
866
867 if ( txCommandHead != txCommandTail )
868 {
869 IOLog( "Ethernet(UniN): Polled tranmit timeout - 1\n\r");
870 return;
871 }
872
873 /*
874 * Allocate a MBuf and copy the debugger transmit data into it.
875 *
876 * jliu - no allocation, just recycle the same buffer dedicated to
877 * KDB transmit.
878 */
879 txDebuggerPkt->m_next = 0;
880 txDebuggerPkt->m_data = (caddr_t) pkt;
881 txDebuggerPkt->m_pkthdr.len = txDebuggerPkt->m_len = pkt_len;
882
883 /*
884 * Send the debugger packet. txDebuggerPkt must not be freed by
885 * the transmit routine.
886 */
887 transmitPacket(txDebuggerPkt);
888
889 /*
890 * Poll waiting for the transmit ring to empty again
891 */
892 do
893 {
894 debugTransmitInterruptOccurred();
895 IOGetTime(&currentTime);
896 elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000);
897 }
898 while ( (txCommandHead != txCommandTail) &&
899 (elapsedTimeMS < TX_KDB_TIMEOUT) );
900
901 if ( txCommandHead != txCommandTail )
902 {
903 IOLog( "Ethernet(UniN): Polled tranmit timeout - 2\n\r");
904 }
905
906 return;
907 }
908
909 /*-------------------------------------------------------------------------
910 * _sendDummyPacket
911 * ----------------
912 * The UniN receiver seems to be locked until we send our first packet.
913 *
914 *-------------------------------------------------------------------------*/
915 void UniNEnet::sendDummyPacket()
916 {
917 union
918 {
919 UInt8 bytes[64];
920 IOEthernetAddress enet_addr[2];
921 } dummyPacket;
922
923 bzero( &dummyPacket, sizeof(dummyPacket) );
924
925
926 dummyPacket.enet_addr[0] = myAddress;
927 dummyPacket.enet_addr[1] = myAddress;
928
929 sendPacket((void *)dummyPacket.bytes, (unsigned int)sizeof(dummyPacket));
930 }
931
932
933
934 /*-------------------------------------------------------------------------
935 *
936 *
937 *
938 *-------------------------------------------------------------------------*/
939
940 bool UniNEnet::receiveInterruptOccurred()
941 {
942 return receivePackets(false);
943 }
944
945 /*-------------------------------------------------------------------------
946 *
947 *
948 *
949 *-------------------------------------------------------------------------*/
950
951 bool UniNEnet::receivePackets( bool fDebugger )
952 {
953 struct mbuf * packet;
954 UInt32 i,last;
955 int receivedFrameSize = 0;
956 UInt16 dmaFlags;
957 UInt32 rxPktStatus = 0;
958 bool passPacketUp;
959 bool reusePkt;
960 bool status;
961 bool useNetif = !fDebugger && netifEnabled;
962 bool packetsQueued = false;
963
964
965 last = (UInt32)-1;
966 i = rxCommandHead;
967
968 while ( 1 )
969 {
970 passPacketUp = false;
971 reusePkt = false;
972
973 dmaFlags = OSReadLittleInt16( &rxDMACommands[ i ].desc_seg[ 0 ].frameDataSize, 0 );
974
975 /*
976 * If the current entry has not been written, then stop at this entry
977 */
978 if ( dmaFlags & kGEMRxDescFrameSize_Own )
979 {
980 break;
981 }
982
983
984 receivedFrameSize = dmaFlags & kGEMRxDescFrameSize_Mask;
985 rxPktStatus = OSReadLittleInt32( &rxDMACommands[ i ].desc_seg[ 0 ].flags, 0 );
986
987
988 /*
989 * Reject packets that are runts or that have other mutations.
990 */
991 if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) ||
992 receivedFrameSize > (ETHERMAXPACKET + ETHERCRC) ||
993 rxPktStatus & kGEMRxDescFlags_BadCRC )
994 {
995 reusePkt = true;
996 NETWORK_STAT_ADD( inputErrors );
997 if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) )
998 ETHERNET_STAT_ADD( dot3RxExtraEntry.frameTooShorts );
999 else ETHERNET_STAT_ADD( dot3StatsEntry.frameTooLongs );
1000 }
1001 else if ( useNetif == false )
1002 {
1003 /*
1004 * Always reuse packets in debugger mode. We also refuse to
1005 * pass anything up the stack unless the driver is open. The
1006 * hardware is enabled before the stack has opened us, to
1007 * allow earlier debug interface registration. But we must
1008 * not pass any packets up.
1009 */
1010 reusePkt = true;
1011 if (fDebugger)
1012 {
1013 packetToDebugger(rxMbuf[i], receivedFrameSize);
1014 }
1015 }
1016
1017
1018 /*
1019 * Before we pass this packet up the networking stack. Make sure we
1020 * can get a replacement. Otherwise, hold on to the current packet and
1021 * increment the input error count.
1022 * Thanks Justin!
1023 */
1024
1025 packet = 0;
1026
1027 if ( reusePkt == false )
1028 {
1029 bool replaced;
1030
1031 packet = replaceOrCopyPacket(&rxMbuf[i], receivedFrameSize, &replaced);
1032
1033 reusePkt = true;
1034
1035 if (packet && replaced)
1036 {
1037 status = updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true);
1038
1039 if (status)
1040 {
1041 reusePkt = false;
1042 }
1043 else
1044 {
1045 // Assume descriptor has not been corrupted.
1046 freePacket(rxMbuf[i]); // release new packet.
1047 rxMbuf[i] = packet; // get the old packet back.
1048 packet = 0; // pass up nothing.
1049 IOLog("Ethernet(UniN): updateDescriptorFromMbuf error\n");
1050 }
1051 }
1052
1053 if ( packet == 0 )
1054 NETWORK_STAT_ADD( inputErrors );
1055 }
1056
1057 /*
1058 * Install the new MBuf for the one we're about to pass to the network stack
1059 */
1060
1061 if ( reusePkt == true )
1062 {
1063 rxDMACommands[i].desc_seg[0].flags = 0;
1064 rxDMACommands[i].desc_seg[0].frameDataSize = OSSwapHostToLittleConstInt16( NETWORK_BUFSIZE | kGEMRxDescFrameSize_Own );
1065 }
1066
1067 last = i; /* Keep track of the last receive descriptor processed */
1068 i = (i + 1) & RX_RING_WRAP_MASK;
1069
1070 if ( (i & 3) == 0 ) // only kick modulo 4
1071 {
1072 WRITE_REGISTER( RxKick, (i - 4) & RX_RING_WRAP_MASK );
1073 }
1074
1075 if (fDebugger)
1076 {
1077 break;
1078 }
1079
1080 /*
1081 * Transfer received packet to network
1082 */
1083 if (packet)
1084 {
1085 KERNEL_DEBUG(DBG_UniN_RXCOMPLETE | DBG_FUNC_NONE, (int) packet,
1086 (int)receivedFrameSize, 0, 0, 0 );
1087
1088 networkInterface->inputPacket(packet, receivedFrameSize, true);
1089 NETWORK_STAT_ADD( inputPackets );
1090 packetsQueued = true;
1091 }
1092 }/* end WHILE */
1093
1094 if ( last != (UInt32)-1 )
1095 {
1096 rxCommandTail = last;
1097 rxCommandHead = i;
1098 }
1099
1100 return packetsQueued;
1101 }/* end receivePackets */
1102
1103
1104 /*-------------------------------------------------------------------------
1105 *
1106 *
1107 *
1108 *-------------------------------------------------------------------------*/
1109
1110 bool UniNEnet::transmitInterruptOccurred()
1111 {
1112 UInt32 i;
1113 bool serviced = false;
1114 TxQueueElement *txElement;
1115
1116
1117 i = READ_REGISTER( TxCompletion );
1118
1119 while ( i != txCommandHead ) // i and txCommandHead race each other
1120 {
1121 do // This DO reduces READ_REGISTER calls which access the PCI bus
1122 { /* Free the MBuf we just transmitted */
1123
1124 txElement = txElementPtrs[ txCommandHead ];
1125
1126 KERNEL_DEBUG( DBG_UniN_TXCOMPLETE | DBG_FUNC_NONE,
1127 (int)txElement->mbuf, 0, 0, 0, 0 );
1128
1129 txElementPtrs[ txCommandHead ] = 0;
1130 OSIncrementAtomic( (SInt32*)&txCommandsAvail );
1131
1132 if ( --txElement->count == 0 )
1133 {
1134 freePacket( txElement->mbuf, kDelayFree );
1135 releaseTxElement( txElement );
1136 NETWORK_STAT_ADD( outputPackets );
1137 }
1138
1139 txCommandHead = (txCommandHead + 1) & TX_RING_WRAP_MASK;
1140
1141 } while ( i != txCommandHead ); // loop til txCommandHead catches i
1142
1143 serviced = true;
1144 i = READ_REGISTER( TxCompletion ); // see if i advanced during last batch
1145 }/* end WHILE */
1146
1147 // Release all packets in the free queue.
1148 releaseFreePackets();
1149 return serviced;
1150 }/* end transmitInterruptOccurred */
1151
1152
1153 /*-------------------------------------------------------------------------
1154 *
1155 *
1156 *
1157 *-------------------------------------------------------------------------*/
1158
1159 bool UniNEnet::debugTransmitInterruptOccurred()
1160 {
1161 bool fServiced = false;
1162 UInt32 i;
1163 TxQueueElement * txElement;
1164
1165 // Set the debugTxPoll flag to indicate the debugger was active
1166 // and some cleanup may be needed when the driver returns to
1167 // normal operation.
1168 //
1169 debugTxPoll = true;
1170
1171 i = READ_REGISTER( TxCompletion );
1172
1173 while ( i != txCommandHead )
1174 {
1175 fServiced = true;
1176
1177 /*
1178 * Free the mbuf we just transmitted.
1179 *
1180 * If it is the debugger packet, just remove it from the ring.
1181 * and reuse the same packet for the next sendPacket() request.
1182 */
1183
1184 /*
1185 * While in debugger mode, do not touch the mbuf pool.
1186 * Queue any used mbufs to a local queue. This queue
1187 * will get flushed after we exit from debugger mode.
1188 *
1189 * During continuous debugger transmission and
1190 * interrupt polling, we expect only the txDebuggerPkt
1191 * to show up on the transmit mbuf ring.
1192 */
1193 txElement = txElementPtrs[txCommandHead];
1194 txElementPtrs[txCommandHead] = 0;
1195 OSIncrementAtomic( (SInt32*)&txCommandsAvail );
1196
1197 KERNEL_DEBUG( DBG_UniN_TXCOMPLETE | DBG_FUNC_NONE,
1198 (int) txElement->mbuf,
1199 (int) txElement->mbuf->m_pkthdr.len, 0, 0, 0 );
1200
1201 if ( --txElement->count == 0 )
1202 {
1203 if (txElement->mbuf != txDebuggerPkt)
1204 {
1205 debugQueue->enqueue( txElement->mbuf );
1206 }
1207 releaseTxElement( txElement );
1208 }
1209
1210 txCommandHead = (txCommandHead + 1) & TX_RING_WRAP_MASK;
1211 }
1212
1213 return fServiced;
1214 }
1215
1216 /*-------------------------------------------------------------------------
1217 *
1218 *
1219 *
1220 *-------------------------------------------------------------------------*/
1221
1222 void UniNEnet::debugTransmitCleanup()
1223 {
1224 // Debugger was active, clear all packets in the debugQueue, and
1225 // issue a start(), just in case the debugger became active while the
1226 // ring was full and the output queue stopped. Since the debugger
1227 // does not restart the output queue, to avoid calling
1228 // semaphore_signal() which may reenable interrupts, we need to
1229 // make sure the output queue is not stalled after the debugger has
1230 // flushed the ring.
1231
1232 debugQueue->flush();
1233
1234 transmitQueue->start();
1235 }
1236
1237
1238 /*-------------------------------------------------------------------------
1239 *
1240 *
1241 *
1242 *-------------------------------------------------------------------------*/
1243
1244 bool UniNEnet::updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, bool isReceive)
1245 {
1246 struct IOPhysicalSegment segVector[1];
1247 UInt32 segments;
1248
1249 segments = mbufCursor->getPhysicalSegmentsWithCoalesce(m, segVector);
1250
1251 if ( segments == 0 || segments > 1 )
1252 {
1253 IOLog("Ethernet(UniN): updateDescriptorFromMbuf error, %d segments\n", (int)segments);
1254 return false;
1255 }
1256
1257 if ( isReceive )
1258 {
1259 enet_dma_cmd_t *rxCmd = (enet_dma_cmd_t *)desc;
1260
1261 OSWriteLittleInt32( &rxCmd->desc_seg[0].bufferAddrLo, 0, segVector[0].location );
1262 OSWriteLittleInt16( &rxCmd->desc_seg[0].frameDataSize, 0, segVector[0].length | kGEMRxDescFrameSize_Own );
1263 rxCmd->desc_seg[0].flags = 0;
1264 }
1265 else
1266 {
1267 enet_txdma_cmd_t *txCmd = (enet_txdma_cmd_t *)desc;
1268
1269 OSWriteLittleInt32( &txCmd->desc_seg[0].bufferAddrLo, 0, segVector[0].location );
1270 OSWriteLittleInt32( &txCmd->desc_seg[0].flags0, 0, segVector[0].length
1271 | kGEMTxDescFlags0_StartOfFrame
1272 | kGEMTxDescFlags0_EndOfFrame );
1273
1274 txCmd->desc_seg[0].flags1 = 0;
1275 txIntCnt += 1;
1276 if ( (txIntCnt % TX_DESC_PER_INT) == 0 ) /// Divide???
1277 txCmd->desc_seg[0].flags1 = OSSwapHostToLittleConstInt32( kGEMTxDescFlags1_Int );
1278 }
1279
1280 return true;
1281 }/* end updateDescriptorFromMbuf */
1282
1283 /*-------------------------------------------------------------------------
1284 *
1285 *
1286 *
1287 *-------------------------------------------------------------------------*/
1288
1289 TxQueueElement * UniNEnet::getTxElement()
1290 {
1291 TxQueueElement * txElement = 0;
1292
1293 IOSimpleLockLock( txQueueLock );
1294
1295 if ( queue_empty( &txFreeQueue ) == false )
1296 {
1297 queue_remove_first( &txFreeQueue, txElement, TxQueueElement *, next );
1298
1299 txElement->list = &txActiveQueue;
1300
1301 queue_enter( txElement->list, txElement, TxQueueElement *, next );
1302 }
1303
1304 IOSimpleLockUnlock( txQueueLock );
1305
1306 return txElement;
1307 }
1308
1309 /*-------------------------------------------------------------------------
1310 *
1311 *
1312 *
1313 *-------------------------------------------------------------------------*/
1314
1315 void UniNEnet::releaseTxElement(TxQueueElement * txElement)
1316 {
1317 IOSimpleLockLock( txQueueLock );
1318
1319 if ( txElement->list != 0 )
1320 {
1321 queue_remove( txElement->list, txElement, TxQueueElement *, next );
1322 }
1323
1324 txElement->list = &txFreeQueue;
1325
1326 queue_enter( txElement->list, txElement, TxQueueElement *, next);
1327
1328 IOSimpleLockUnlock( txQueueLock );
1329 }
1330
1331 /*-------------------------------------------------------------------------
1332 *
1333 *
1334 *
1335 *-------------------------------------------------------------------------*/
1336
1337 void UniNEnet::monitorLinkStatus( bool firstPoll )
1338 {
1339 UInt32 gemReg;
1340 UInt16 phyStatus;
1341 UInt16 linkStatus;
1342 UInt16 linkMode;
1343 UInt16 lpAbility;
1344 UInt16 phyStatusChange;
1345 bool fullDuplex = false;
1346 UInt32 linkSpeed = 0;
1347 IOMediumType mediumType = kIOMediumEthernetNone;
1348 IONetworkMedium *medium;
1349
1350
1351 if ( firstPoll )
1352 {
1353 phyStatusPrev = 0;
1354 linkStatusPrev = kLinkStatusUnknown;
1355 }
1356
1357 if ( phyId == 0xff )
1358 {
1359 phyStatus = READ_REGISTER( PCSMIIStatus ) & 0x0000FFFF;
1360 lpAbility = READ_REGISTER( PCSMIILinkPartnerAbility ) & 0x0000FFFF;
1361 }
1362 else
1363 {
1364 if ( miiReadWord( &phyStatus, MII_STATUS, phyId) != true )
1365 {
1366 return;
1367 }
1368 miiReadWord( &lpAbility, MII_STATUS, phyId);
1369 }
1370
1371 phyStatusChange = (phyStatusPrev ^ phyStatus) &
1372 ( MII_STATUS_LINK_STATUS |
1373 MII_STATUS_NEGOTIATION_COMPLETE );
1374
1375 if ( phyStatusChange || firstPoll )
1376 {
1377 if ( firstPoll )
1378 {
1379 // For the initial link status poll, wait a bit, then
1380 // re-read the status register to clear any latched bits.
1381 // Why wait? Well, the debugger can kick in shortly after
1382 // this function returns, and we want the duplex setting
1383 // on the MAC to match the PHY.
1384
1385 miiWaitForAutoNegotiation( phyId );
1386 miiReadWord(&phyStatus, MII_STATUS, phyId);
1387 miiReadWord(&phyStatus, MII_STATUS, phyId);
1388 }
1389
1390 gemReg = READ_REGISTER( MACControlConfiguration );
1391 if ( lpAbility & MII_LPAR_PAUSE )
1392 gemReg |= kMACControlConfiguration_Send_Pause_Enable;
1393 else gemReg &= ~kMACControlConfiguration_Send_Pause_Enable;
1394 WRITE_REGISTER( MACControlConfiguration, gemReg );
1395
1396 if ( (phyStatus & MII_STATUS_LINK_STATUS) &&
1397 ( firstPoll || (phyStatus & MII_STATUS_NEGOTIATION_COMPLETE) ) )
1398 {
1399 if ( phyId == 0xff )
1400 {
1401 linkSpeed = 1000;
1402 fullDuplex = true;
1403 mediumType = kIOMediumEthernet1000BaseSX;
1404 }
1405 else if ( (phyType & MII_LXT971_MASK) == MII_LXT971_ID )
1406 {
1407 miiReadWord( &linkStatus, MII_LXT971_STATUS_2, phyId );
1408 linkSpeed = (linkStatus & MII_LXT971_STATUS_2_SPEED) ?
1409 100 : 10;
1410 fullDuplex = (linkStatus & MII_LXT971_STATUS_2_DUPLEX) ?
1411 true : false;
1412 mediumType = (linkSpeed == 10) ? kIOMediumEthernet10BaseT :
1413 kIOMediumEthernet100BaseTX;
1414 }
1415 else if ( (phyType & MII_BCM5201_MASK) == MII_BCM5201_ID )
1416 {
1417 miiReadWord( &linkStatus, MII_BCM5201_AUXSTATUS, phyId );
1418 linkSpeed = (linkStatus & MII_BCM5201_AUXSTATUS_SPEED) ?
1419 100 : 10;
1420 fullDuplex = (linkStatus & MII_BCM5201_AUXSTATUS_DUPLEX) ?
1421 true : false;
1422 mediumType = (linkSpeed == 10) ? kIOMediumEthernet10BaseT :
1423 kIOMediumEthernet100BaseTX;
1424 }
1425 else if ( ((phyType & MII_BCM5400_MASK) == MII_BCM5400_ID)
1426 || ((phyType & MII_BCM5400_MASK) == MII_BCM5401_ID) ) /// mlj temporary quick fix
1427 {
1428 miiReadWord( &linkStatus, MII_BCM5400_AUXSTATUS, phyId );
1429
1430 linkMode = (linkStatus & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) /
1431 MII_BCM5400_AUXSTATUS_LINKMODE_BIT;
1432
1433 gemReg = READ_REGISTER( XIFConfiguration );
1434 if ( linkMode < 6 )
1435 gemReg &= ~kXIFConfiguration_GMIIMODE;
1436 else gemReg |= kXIFConfiguration_GMIIMODE;
1437 WRITE_REGISTER( XIFConfiguration, gemReg );
1438
1439 if ( linkMode == 0 )
1440 {
1441 linkSpeed = 0;
1442 }
1443 else if ( linkMode < 3 )
1444 {
1445 linkSpeed = 10;
1446 fullDuplex = ( linkMode < 2 ) ? false : true;
1447 mediumType = kIOMediumEthernet10BaseT;
1448 }
1449 else if ( linkMode < 6 )
1450 {
1451 linkSpeed = 100;
1452 fullDuplex = ( linkMode < 5 ) ? false : true;
1453 mediumType = kIOMediumEthernet100BaseTX;
1454 }
1455 else
1456 {
1457 linkSpeed = 1000;
1458 fullDuplex = true;
1459 mediumType = kIOMediumEthernet1000BaseTX;
1460 }
1461 }
1462
1463 if ( fullDuplex != isFullDuplex )
1464 {
1465 setDuplexMode( fullDuplex );
1466 }
1467
1468 if ( ready == true )
1469 {
1470 startChip();
1471 }
1472
1473 if ( linkSpeed != 0 )
1474 {
1475 mediumType |= (fullDuplex == true) ?
1476 kIOMediumOptionFullDuplex :
1477 kIOMediumOptionHalfDuplex;
1478 }
1479
1480 medium = IONetworkMedium::getMediumWithType( mediumDict,
1481 mediumType );
1482
1483 setLinkStatus( kIONetworkLinkActive | kIONetworkLinkValid,
1484 medium,
1485 linkSpeed * 1000000 );
1486
1487 IOLog( "Ethernet(UniN): Link is up at %ld Mbps - %s Duplex\n\r",
1488 linkSpeed,
1489 (fullDuplex) ? "Full" : "Half" );
1490
1491 linkStatusPrev = kLinkStatusUp;
1492 }
1493 else
1494 {
1495 if ( (linkStatusPrev == kLinkStatusUp) ||
1496 (linkStatusPrev == kLinkStatusUnknown) )
1497 {
1498 stopChip();
1499
1500 medium = IONetworkMedium::getMediumWithType( mediumDict,
1501 mediumType );
1502
1503 setLinkStatus( kIONetworkLinkValid,
1504 medium,
1505 0 );
1506
1507 if ( linkStatusPrev != kLinkStatusUnknown )
1508 {
1509 IOLog( "Ethernet(UniN): Link is down.\n\r" );
1510 }
1511
1512 txIntCnt = 0;
1513
1514 if ( txCommandHead != txCommandTail )
1515 {
1516 initTxRing();
1517
1518 txCommandHead = READ_REGISTER( TxCompletion );
1519 txCommandTail = txCommandHead;
1520 }
1521 }
1522
1523 linkStatusPrev = kLinkStatusDown;
1524 }
1525
1526 phyStatusPrev = phyStatus;
1527 }
1528 return;
1529 }
1530
1531
1532 /*-------------------------------------------------------------------------
1533 *
1534 *
1535 *
1536 *-------------------------------------------------------------------------*/
1537
1538 IOReturn UniNEnet::getHardwareAddress(IOEthernetAddress *ea)
1539 {
1540 UInt32 i;
1541 OSData *macEntry;
1542 UInt8 *macAddress;
1543 UInt32 len;
1544
1545 macEntry = OSDynamicCast( OSData, nub->getProperty( "local-mac-address" ) );
1546 if ( macEntry == 0 )
1547 {
1548 return kIOReturnError;
1549 }
1550
1551 macAddress = (UInt8 *)macEntry->getBytesNoCopy();
1552 if ( macAddress == 0 )
1553 {
1554 return kIOReturnError;
1555 }
1556
1557 len = macEntry->getLength();
1558 if ( len != 6 )
1559 {
1560 return kIOReturnError;
1561 }
1562
1563 for (i = 0; i < sizeof(*ea); i++)
1564 {
1565 ea->bytes[i] = macAddress[i];
1566 }
1567 return kIOReturnSuccess;
1568 }
1569
1570 /*-------------------------------------------------------------------------
1571 *
1572 *
1573 *
1574 *-------------------------------------------------------------------------*/
1575
1576 #define ENET_CRCPOLY 0x04c11db7
1577
1578 static UInt32 crc416(UInt32 current, UInt16 nxtval )
1579 {
1580 register UInt32 counter;
1581 register int highCRCBitSet, lowDataBitSet;
1582
1583 /* Swap bytes */
1584 nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8);
1585
1586 /* Compute bit-by-bit */
1587 for (counter = 0; counter != 16; ++counter)
1588 { /* is high CRC bit set? */
1589 if ((current & 0x80000000) == 0)
1590 highCRCBitSet = 0;
1591 else
1592 highCRCBitSet = 1;
1593
1594 current = current << 1;
1595
1596 if ((nxtval & 0x0001) == 0)
1597 lowDataBitSet = 0;
1598 else
1599 lowDataBitSet = 1;
1600
1601 nxtval = nxtval >> 1;
1602
1603 /* do the XOR */
1604 if (highCRCBitSet ^ lowDataBitSet)
1605 current = current ^ ENET_CRCPOLY;
1606 }
1607 return current;
1608 }
1609
1610 /*-------------------------------------------------------------------------
1611 *
1612 *
1613 *
1614 *-------------------------------------------------------------------------*/
1615
1616 static UInt32 mace_crc(UInt16 *address)
1617 {
1618 register UInt32 newcrc;
1619
1620 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
1621 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
1622 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
1623
1624 return(newcrc);
1625 }
1626
1627 /*-------------------------------------------------------------------------
1628 *
1629 *
1630 *
1631 *-------------------------------------------------------------------------*/
1632
1633 /*
1634 * Add requested mcast addr to UniN's hash table filter.
1635 *
1636 */
1637 void UniNEnet::addToHashTableMask(UInt8 *addr)
1638 {
1639 UInt32 i,j;
1640 UInt32 crcBitIndex;
1641 UInt16 mask;
1642
1643 j = mace_crc((UInt16 *)addr) & 0xFF; /* Big-endian alert! */
1644
1645 for ( crcBitIndex = i = 0; i < 8; i++ )
1646 {
1647 crcBitIndex >>= 1;
1648 crcBitIndex |= (j & 0x80);
1649 j <<= 1;
1650 }
1651
1652 crcBitIndex ^= 0xFF;
1653
1654 if (hashTableUseCount[crcBitIndex]++)
1655 return; /* This bit is already set */
1656 mask = crcBitIndex % 16;
1657 mask = 1 << mask;
1658 hashTableMask[crcBitIndex/16] |= mask;
1659 }
1660
1661 /*-------------------------------------------------------------------------
1662 *
1663 *
1664 *
1665 *-------------------------------------------------------------------------*/
1666
1667 void UniNEnet::resetHashTableMask()
1668 {
1669 bzero(hashTableUseCount, sizeof(hashTableUseCount));
1670 bzero(hashTableMask, sizeof(hashTableMask));
1671 }
1672
1673 /*-------------------------------------------------------------------------
1674 *
1675 *
1676 *
1677 *-------------------------------------------------------------------------*/
1678
1679 /*
1680 * Sync the adapter with the software copy of the multicast mask
1681 * (logical address filter).
1682 */
1683 void UniNEnet::updateHashTableMask()
1684 {
1685 UInt32 i;
1686
1687 rxMacConfigReg = READ_REGISTER( RxMACConfiguration );
1688 WRITE_REGISTER( RxMACConfiguration,
1689 rxMacConfigReg & ~(kRxMACConfiguration_Rx_Mac_Enable
1690 | kRxMACConfiguration_Hash_Filter_Enable) );
1691
1692 while ( READ_REGISTER( RxMACConfiguration ) & (kRxMACConfiguration_Rx_Mac_Enable
1693 | kRxMACConfiguration_Hash_Filter_Enable) )
1694 ;
1695
1696 for ( i= 0; i < 16; i++ )
1697 WRITE_REGISTER( HashTable[ i ], hashTableMask[ 15 - i ] );
1698
1699 rxMacConfigReg |= kRxMACConfiguration_Hash_Filter_Enable;
1700 WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg );
1701 }