2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1996 NeXT Software, Inc. All rights reserved.
32 #include <sys/param.h>
37 //---------------------------------------------------------------------------
38 // Function: IOPhysicalFromVirtual
42 static inline IOReturn
43 IOPhysicalFromVirtual(vm_address_t vaddr
, IOPhysicalAddress
* paddr
)
45 *paddr
= pmap_extract(kernel_pmap
, vaddr
);
46 return (*paddr
== 0) ? kIOReturnBadArgument
: kIOReturnSuccess
;
49 //---------------------------------------------------------------------------
53 // Acknowledge all of the pending interrupt sources.
56 // Return the interrupt status.
59 // Read/Write: SCB status
61 static inline scb_status_t
_intrACK(CSR_t
* CSR_p
)
63 scb_status_t stat_irq
= OSReadLE16(&CSR_p
->status
) & SCB_STATUS_INT_MASK
;
65 OSWriteLE16(&CSR_p
->status
, stat_irq
); // ack pending interrupts.
69 //---------------------------------------------------------------------------
70 // Function: _waitSCBCommandClear
73 // Wait for the SCB Command field to clear. Ensures that we don't
74 // overrun the NIC's command unit.
77 // true if the SCB command field was cleared.
78 // false if the SCB command field was not cleared.
84 _waitSCBCommandClear(CSR_t
* CSR_p
)
86 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
87 if (!OSReadLE8(&CSR_p
->command
))
91 return false; // hardware is not responding.
94 //---------------------------------------------------------------------------
95 // Function: _waitCUNonActive
98 // Waits for the Command Unit to become inactive.
101 // true if the CU has become inactive.
102 // false if the CU remains active.
108 _waitCUNonActive(CSR_t
* CSR_p
)
110 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
111 if (CSR_VALUE(SCB_STATUS_CUS
, OSReadLE16(&CSR_p
->status
)) !=
119 //---------------------------------------------------------------------------
120 // Function: _polledCommand:WithAddress
123 // Issue a polled command to the NIC.
125 bool Intel82557::_polledCommand(cbHeader_t
* hdr_p
, IOPhysicalAddress paddr
)
127 if (!_waitSCBCommandClear(CSR_p
)) {
128 IOLog("%s: _polledCommand:(%s): _waitSCBCommandClear failed\n",
129 CUCommandString(CSR_VALUE(CB_CMD
, OSReadLE16(&hdr_p
->command
))),
134 if (!_waitCUNonActive(CSR_p
)) {
135 IOLog("%s: _polledCommand:(%s): _waitCUNonActive failed\n",
136 CUCommandString(CSR_VALUE(CB_CMD
, OSReadLE16(&hdr_p
->command
))),
141 // Set the physical address of the command block, and issue a
142 // command unit start.
144 OSWriteLE32(&CSR_p
->pointer
, paddr
);
145 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_CUC
, SCB_CUC_START
));
147 prevCUCommand
= SCB_CUC_START
;
149 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
150 if (OSReadLE16(&hdr_p
->status
) & CB_STATUS_C
)
157 //---------------------------------------------------------------------------
158 // Function: _abortReceive
161 // Abort the receive unit.
163 bool Intel82557::_abortReceive()
165 if (!_waitSCBCommandClear(CSR_p
)) {
166 IOLog("%s: _abortReceive: _waitSCBCommandClear failed\n", getName());
170 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_RUC
, SCB_RUC_ABORT
));
172 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
173 if (CSR_VALUE(SCB_STATUS_RUS
, OSReadLE16(&CSR_p
->status
)) ==
179 IOLog("%s: _abortReceive: timeout\n", getName());
183 //---------------------------------------------------------------------------
184 // Function: _startReceive
187 // Start the receive unit
189 bool Intel82557::_startReceive()
191 if (!_waitSCBCommandClear(CSR_p
)) {
192 IOLog("%s: _startReceive: _waitSCBCommandClear failed\n", getName());
196 // Make sure the initial RFD has a link to its RBD
197 OSWriteLE32(&headRfd
->rbdAddr
, headRfd
->_rbd
._paddr
);
199 OSWriteLE32(&CSR_p
->pointer
, headRfd
->_paddr
);
200 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_RUC
, SCB_RUC_START
));
202 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
203 if (CSR_VALUE(SCB_STATUS_RUS
, OSReadLE16(&CSR_p
->status
)) ==
209 IOLog("%s: _startReceive: timeout\n", getName());
213 //---------------------------------------------------------------------------
214 // Function: _resetChip
217 // Issue a selective reset then a full reset.
218 // This is done to avoid a PCI bus hang if the 82557 is in the midst of
219 // a PCI bus cycle. The selective reset pauses the transmit and receive
222 void Intel82557::_resetChip()
226 sendPortCommand(portSelectiveReset_e
, 0);
229 } while (OSReadLE32(&CSR_p
->port
) && ++i
< 100);
231 sendPortCommand(portReset_e
, 0);
236 //---------------------------------------------------------------------------
237 // Function: issueReset
240 // Shut down the chip, and issue a reset.
242 void Intel82557::issueReset()
244 IOLog("%s: resetting adapter\n", getName());
246 etherStats
->dot3RxExtraEntry
.resets
++;
248 setActivationLevel(kActivationLevel0
);
249 if (!setActivationLevel(currentLevel
)) {
250 IOLog("%s: Reset attempt unsuccessful\n", getName());
254 //---------------------------------------------------------------------------
255 // Function: updateRFDFromMbuf
258 // Updated a RFD/RBD in order to attach it to a cluster mbuf.
259 // XXX - assume cluster will never cross page boundary.
261 bool Intel82557::updateRFDFromMbuf(rfd_t
* rfd_p
, struct mbuf
* m
)
263 struct IOPhysicalSegment vector
;
266 count
= rxMbufCursor
->getPhysicalSegments(m
, &vector
, 1);
270 // Start modifying RFD
272 rfd_p
->_rbd
.buffer
= vector
.location
; // cursor is little-endian
273 // OSWriteLE32(&rfd_p->_rbd.size, CSR_FIELD(RBD_SIZE, vector.length));
275 rfd_p
->_rbd
._mbuf
= m
;
280 //---------------------------------------------------------------------------
281 // Function: _initTcbQ
284 // Initialize the transmit control block queue. Create a circularly
285 // linked list of tcbs.
287 bool Intel82557::_initTcbQ(bool enable
= false)
291 tcbQ
.numFree
= tcbQ
.numTcbs
= NUM_TRANSMIT_FRAMES
;
292 tcbQ
.activeHead_p
= tcbQ
.activeTail_p
= tcbQ
.freeHead_p
= tcbList_p
;
294 for (i
= 0; i
< tcbQ
.numTcbs
; i
++) { /* free up buffers */
295 if (tcbList_p
[i
]._mbuf
) {
296 freePacket(tcbList_p
[i
]._mbuf
);
297 tcbList_p
[i
]._mbuf
= 0;
300 bzero(tcbList_p
, sizeof(tcb_t
) * tcbQ
.numTcbs
);
305 for (i
= 0; i
< tcbQ
.numTcbs
; i
++) {
306 IOPhysicalAddress paddr
;
308 IOReturn result
= IOPhysicalFromVirtual((vm_address_t
) &tcbList_p
[i
],
309 &tcbList_p
[i
]._paddr
);
310 if (result
!= kIOReturnSuccess
) {
311 IOLog("i82557(tcbQ): Invalid TCB address\n");
315 result
= IOPhysicalFromVirtual((vm_address_t
) &tcbList_p
[i
]._tbds
,
317 if (result
!= kIOReturnSuccess
) {
318 IOLog("i82557(tcbQ): Invalid TBD address\n");
321 OSWriteLE32(&tcbList_p
[i
].tbdAddr
, paddr
);
323 if (i
== (tcbQ
.numTcbs
- 1))
324 tcbList_p
[i
]._next
= &tcbList_p
[0];
326 tcbList_p
[i
]._next
= &tcbList_p
[i
+ 1];
328 for (i
= 0; i
< tcbQ
.numTcbs
; i
++) /* make physical links */
329 OSWriteLE32(&tcbList_p
[i
].link
, tcbList_p
[i
]._next
->_paddr
);
334 //---------------------------------------------------------------------------
335 // Function: _setupRfd
337 static void _setupRfd(rfd_t
* rfdList_p
)
339 for (int i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
340 if (i
== (NUM_RECEIVE_FRAMES
- 1)) {
341 /* mark tails and link the lists circularly */
342 OSSetLE16(&rfdList_p
[i
].command
, RFD_COMMAND_EL
);
343 rfdList_p
[i
]._next
= &rfdList_p
[0];
344 OSSetLE32(&rfdList_p
[i
]._rbd
.size
, RBD_SIZE_EL
);
345 rfdList_p
[i
]._rbd
._next
= &rfdList_p
[0]._rbd
;
348 rfdList_p
[i
]._next
= &rfdList_p
[i
+ 1];
349 rfdList_p
[i
]._rbd
._next
= &rfdList_p
[i
+ 1]._rbd
;
352 OSWriteLE32(&rfdList_p
[i
].link
, rfdList_p
[i
]._next
->_paddr
);
353 OSWriteLE32(&rfdList_p
[i
].rbdAddr
,
354 (i
== 0) ? rfdList_p
[0]._rbd
._paddr
: C_NULL
);
356 OSWriteLE32(&rfdList_p
[i
]._rbd
.link
, rfdList_p
[i
]._rbd
._next
->_paddr
);
357 OSSetLE32(&rfdList_p
[i
]._rbd
.size
, CSR_FIELD(RBD_SIZE
, MAX_BUF_SIZE
));
361 //---------------------------------------------------------------------------
362 // Function: _initRfdList
365 // Create a circularly linked list of receive frame descriptors, and
366 // populate them with receive buffers allocated from our special pool.
368 bool Intel82557::_initRfdList(bool enable
= false)
373 /* free allocated packet buffers */
374 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
375 if (rfdList_p
[i
]._rbd
._mbuf
) {
376 freePacket(rfdList_p
[i
]._rbd
._mbuf
);
377 // rfdList_p[i]._rbd._mbuf = 0;
381 /* zero out the entire structure, and re-create it */
382 bzero(rfdList_p
, sizeof(rfd_t
) * NUM_RECEIVE_FRAMES
);
387 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
388 OSSetLE16(&rfdList_p
[i
].command
, RFD_COMMAND_SF
);
390 result
= IOPhysicalFromVirtual((vm_address_t
) &rfdList_p
[i
],
391 &rfdList_p
[i
]._paddr
);
392 if (result
!= kIOReturnSuccess
) {
393 IOLog("%s: Invalid RFD address\n", getName());
396 result
= IOPhysicalFromVirtual((vm_address_t
) &rfdList_p
[i
]._rbd
,
397 &rfdList_p
[i
]._rbd
._paddr
);
398 if (result
!= kIOReturnSuccess
) {
399 IOLog("%s: Invalid RBD address\n", getName());
404 _setupRfd(rfdList_p
);
406 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
407 // Pre-load the receive ring with max size mbuf packets.
409 struct mbuf
* m
= allocatePacket(MAX_BUF_SIZE
);
413 if (updateRFDFromMbuf(&rfdList_p
[i
], m
) == false) {
414 IOLog("%s: updateRFDFromMbuf() error\n", getName());
421 tailRfd
= rfdList_p
+ NUM_RECEIVE_FRAMES
- 1;
426 //---------------------------------------------------------------------------
427 // Function: _resetRfdList
430 // Reset the RFD list before the receiver engine is restarted after
431 // a resource shortage.
433 bool Intel82557::_resetRfdList()
438 IOPhysicalAddress rbd_buffer
;
439 struct mbuf
* rbd_mbuf
;
440 IOPhysicalAddress rfd_paddr
;
441 IOPhysicalAddress rbd_paddr
;
442 } * cache_p
= (struct _cache
*) KDB_buf_p
;
444 if ((sizeof(struct _cache
) * NUM_RECEIVE_FRAMES
) > ETHERMAXPACKET
) {
445 IOLog("%s: no space for cache data\n", getName());
449 /* cache allocated packet buffers */
450 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
451 cache_p
[i
].rbd_mbuf
= rfdList_p
[i
]._rbd
._mbuf
;
452 cache_p
[i
].rbd_buffer
= rfdList_p
[i
]._rbd
.buffer
;
453 cache_p
[i
].rfd_paddr
= rfdList_p
[i
]._paddr
;
454 cache_p
[i
].rbd_paddr
= rfdList_p
[i
]._rbd
._paddr
;
457 /* zero out the entire structure, and re-create it */
458 bzero(rfdList_p
, sizeof(rfd_t
) * NUM_RECEIVE_FRAMES
);
460 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
461 OSSetLE16(&rfdList_p
[i
].command
, RFD_COMMAND_SF
);
462 rfdList_p
[i
]._paddr
= cache_p
[i
].rfd_paddr
;
463 rfdList_p
[i
]._rbd
._paddr
= cache_p
[i
].rbd_paddr
;
466 _setupRfd(rfdList_p
);
468 for (i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
469 rfdList_p
[i
]._rbd
.buffer
= cache_p
[i
].rbd_buffer
;
470 rfdList_p
[i
]._rbd
._mbuf
= cache_p
[i
].rbd_mbuf
;
474 tailRfd
= rfdList_p
+ NUM_RECEIVE_FRAMES
- 1;
479 //---------------------------------------------------------------------------
480 // Function: _mdiReadPHY:Register:Data
483 // Read the specified MDI register and return the results.
486 Intel82557::_mdiReadPHY(UInt8 phyAddress
, UInt8 regAddress
, UInt16
* data_p
)
490 mdi
= CSR_FIELD(MDI_CONTROL_PHYADDR
, phyAddress
) |
491 CSR_FIELD(MDI_CONTROL_REGADDR
, regAddress
) |
492 CSR_FIELD(MDI_CONTROL_OPCODE
, MDI_CONTROL_OP_READ
);
494 OSWriteLE32(&CSR_p
->mdiControl
, mdi
);
498 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
499 if (OSReadLE32(&CSR_p
->mdiControl
) & MDI_CONTROL_READY
) {
505 if (ready
== false) {
506 IOLog("%s: _mdiReadPHYRegisterSuccess timeout\n", getName());
510 *data_p
= CSR_VALUE(MDI_CONTROL_DATA
, OSReadLE32(&CSR_p
->mdiControl
));
514 //---------------------------------------------------------------------------
515 // Function: _mdiWritePHY:Register:Data
518 // Write the specified MDI register with the given data.
520 bool Intel82557::_mdiWritePHY(UInt8 phyAddress
, UInt8 regAddress
, UInt16 data
)
524 mdi
= CSR_FIELD(MDI_CONTROL_PHYADDR
, phyAddress
) |
525 CSR_FIELD(MDI_CONTROL_REGADDR
, regAddress
) |
526 CSR_FIELD(MDI_CONTROL_OPCODE
, MDI_CONTROL_OP_WRITE
) |
527 CSR_FIELD(MDI_CONTROL_DATA
, data
);
529 OSWriteLE32(&CSR_p
->mdiControl
, mdi
);
533 for (int i
= 0; i
< SPIN_TIMEOUT
; i
++) {
534 if (OSReadLE32(&CSR_p
->mdiControl
) & MDI_CONTROL_READY
) {
540 if (ready
== false) {
541 IOLog("%s: _mdiWritePHYRegisterData timeout\n", getName());
547 //---------------------------------------------------------------------------
551 // Issue a polled NOP command to the NIC.
553 bool Intel82557::nop()
555 cbHeader_t
* nop_p
= &overlay_p
->nop
;
557 bzero(nop_p
, sizeof(*nop_p
));
558 OSWriteLE16(&nop_p
->command
, CSR_FIELD(CB_CMD
, CB_CMD_NOP
) | CB_EL
);
559 OSWriteLE32(&nop_p
->link
, C_NULL
);
561 return _polledCommand(nop_p
, overlay_paddr
);
564 //---------------------------------------------------------------------------
568 // Issue a polled CONFIGURE command to the NIC.
570 bool Intel82557::config()
573 cb_configure_t
* cfg_p
= &overlay_p
->configure
;
576 * Fill the configure command block
578 bzero(cfg_p
, sizeof(*cfg_p
));
580 OSWriteLE16(&cfg_p
->header
.command
,
581 CSR_FIELD(CB_CMD
, CB_CMD_CONFIGURE
) | CB_EL
);
582 OSWriteLE32(&cfg_p
->header
.link
, C_NULL
);
585 cb_p
[0] = CSR_FIELD(CB_CB0_BYTE_COUNT
, CB_CONFIG_BYTE_COUNT
);
587 cb_p
[1] = CSR_FIELD(CB_CB1_TX_FIFO_LIMIT
, CB_CB1_TX_FIFO_0
) |
588 CSR_FIELD(CB_CB1_RX_FIFO_LIMIT
, CB_CB1_RX_FIFO_64
);
590 cb_p
[3] = CB_CB3_MWI_ENABLE
; // enable PCI-MWI on 82558 devices
592 cb_p
[4] = 0; // disable PCI transfer limits
595 cb_p
[6] = CB_CB6_NON_DIRECT_DMA
| CB_CB6_STD_TCB
| CB_CB6_STD_STATS
;
597 cb_p
[7] = CSR_FIELD(CB_CB7_UNDERRUN_RETRY
, CB_CB7_UNDERRUN_RETRY_1
) |
598 CB_CB7_DISC_SHORT_FRAMES
;
600 if ((eeprom
->getContents()->controllerType
!= I82558_CONTROLLER_TYPE
) &&
601 (phyAddr
!= PHY_ADDRESS_I82503
))
602 cb_p
[8] = CB_CB8_CSMA_EN
;
604 cb_p
[10] = CSR_FIELD(CB_CB10_PREAMBLE
, CB_CB10_PREAMBLE_7_BYTES
) |
607 cb_p
[12] = CSR_FIELD(CB_CB12_IFS
, CB_CB12_IFS_96_BIT_TIMES
);
609 cb_p
[13] = CSR_FIELD(CB_CB13_FC_TYPE_LSB
, CB_CB13_FC_TYPE_LSB_DEF
);
610 cb_p
[14] = CSR_FIELD(CB_CB14_FC_TYPE_MSB
, CB_CB14_FC_TYPE_MSB_DEF
);
612 cb_p
[15] = ((cb_p
[8] & CB_CB8_CSMA_EN
) ? 0 : CB_CB15_CRS_CDT
) |
613 (promiscuousEnabled
? CB_CB15_PROMISCUOUS
: 0);
615 cb_p
[16] = CSR_FIELD(CB_CB16_FC_DELAY_LSB
, CB_CB16_FC_DELAY_LSB_DEF
);
616 cb_p
[17] = CSR_FIELD(CB_CB17_FC_DELAY_MSB
, CB_CB17_FC_DELAY_MSB_DEF
);
618 cb_p
[18] = CB_CB18_PADDING
| CB_CB18_STRIPPING
;
620 #if 0 // XXX - need to fix this
622 * Force full duplex if there is a user override, or we are using Phy 0
623 * and full duplex mode is enabled. The FDX# pin is wired to Phy 1,
624 * which means that the 82557 can't autodetect the setting correctly.
626 if (forceFullDuplex
|| (phyAddr
== PHY_ADDRESS_0
&& fullDuplexMode
))
627 cb_p
[19] = CB_CB19_FORCE_FDX
;
630 cb_p
[19] = CB_CB19_AUTO_FDX
;
632 cb_p
[19] |= ( CB_CB19_TX_FC
|
633 CB_CB19_RX_FC_RESTOP
|
634 CB_CB19_RX_FC_RESTART
|
638 cb_p
[20] = CSR_FIELD(CB_CB20_FC_ADDR_LSB
, CB_CB20_FC_ADDR_LSB_DEF
);
642 return _polledCommand((cbHeader_t
*) cfg_p
, overlay_paddr
);
645 //---------------------------------------------------------------------------
649 // Issue a polled IndividualAddressSETUP command to the NIC.
651 bool Intel82557::iaSetup()
653 cb_iasetup_t
* iaSetup_p
= &overlay_p
->iasetup
;
656 * Fill the IA-setup command block
658 bzero(iaSetup_p
, sizeof(*iaSetup_p
));
660 OSWriteLE16(&iaSetup_p
->header
.command
, CSR_FIELD(CB_CMD
, CB_CMD_IASETUP
) |
662 OSWriteLE32(&iaSetup_p
->header
.link
, C_NULL
);
663 iaSetup_p
->addr
= myAddress
;
665 return _polledCommand((cbHeader_t
*) iaSetup_p
, overlay_paddr
);
668 //---------------------------------------------------------------------------
672 // Issue a polled MultiCastSETUP command to the NIC. If 'fromData' is
673 // true, then we ignore the addrs/count arguments and instead use the
674 // multicast address list property in our interface client object.
676 bool Intel82557::mcSetup(IOEthernetAddress
* addrs
,
678 bool fromData
= false)
680 cb_mcsetup_t
* mcSetup_p
;
683 IOPhysicalAddress mcSetup_paddr
;
686 // mcSetup() was not called by the setMulticastList() function.
687 // We should get the multicast list stored in the interface
688 // object's property table.
690 // mcSetup() is always executed by the default workloop thread,
691 // thus we don't have to worry about the address list being
692 // changed while we go through it.
698 OSData
* mcData
= OSDynamicCast(OSData
,
699 netif
->getProperty(kIOMulticastFilterData
));
701 addrs
= (IOEthernetAddress
*) mcData
->getBytesNoCopy();
702 count
= mcData
->getLength() / sizeof(IOEthernetAddress
);
703 assert(addrs
&& count
);
708 mcSetup_p
= (cb_mcsetup_t
*) IOMallocAligned(PAGE_SIZE
, PAGE_SIZE
);
710 IOLog("%s: mcSetup:IOMallocAligned return NULL\n", getName());
714 reserveDebuggerLock();
719 OSWriteLE16(&mcSetup_p
->header
.status
, 0);
720 OSWriteLE16(&mcSetup_p
->header
.command
,
721 CSR_FIELD(CB_CMD
, CB_CMD_MCSETUP
) | CB_EL
);
722 OSWriteLE32(&mcSetup_p
->header
.link
, C_NULL
);
724 /* fill in the addresses (count may be zero) */
725 for (UInt i
= 0; i
< count
; i
++)
726 mcSetup_p
->addrs
[i
] = addrs
[i
];
728 /* Set the number of bytes in the MC list, if the count is zero,
729 * it is equivalent to disabling the multicast filtering mechanism.
731 OSWriteLE16(&mcSetup_p
->count
, count
* sizeof(IOEthernetAddress
));
733 result
= IOPhysicalFromVirtual((vm_address_t
) mcSetup_p
,
735 if (result
!= kIOReturnSuccess
) {
736 IOLog("%s: Invalid MC-setup command block address\n", getName());
740 if (!_polledCommand((cbHeader_t
*) mcSetup_p
, mcSetup_paddr
)) {
741 IOLog("%s: MC-setup command failed 0x%x\n", getName(),
742 OSReadLE16(&mcSetup_p
->header
.status
));
746 cmdResult
= (OSReadLE16(&mcSetup_p
->header
.status
) & CB_STATUS_OK
) ?
750 releaseDebuggerLock();
752 IOFreeAligned(mcSetup_p
, PAGE_SIZE
);
757 //---------------------------------------------------------------------------
758 // Function: _selfTest
761 // Issue a PORT self test command to the NIC and verify the results.
763 bool Intel82557::_selfTest()
765 port_selftest_t
* test_p
= (port_selftest_t
*) overlay_p
;
768 OSWriteLE32(&test_p
->signature
, 0);
769 OSWriteLE32(&test_p
->results
, ~0);
770 sendPortCommand(portSelfTest_e
, overlay_paddr
);
772 if (OSReadLE32(&test_p
->signature
) == 0) {
773 IOLog("%s: Self test timed out\n", getName());
777 results
= OSReadLE32(&test_p
->results
);
778 if (results
) { /* report errors from self test */
779 if (results
& PORT_SELFTEST_ROM
)
780 IOLog("%s: Self test reports invalid ROM contents\n",
782 if (results
& PORT_SELFTEST_REGISTER
)
783 IOLog("%s: Self test reports internal register failure\n",
785 if (results
& PORT_SELFTEST_DIAGNOSE
)
786 IOLog("%s: Self test reports serial subsystem failure\n",
788 if (results
& PORT_SELFTEST_GENERAL
)
789 IOLog("%s: Self test failed\n", getName());
795 //---------------------------------------------------------------------------
796 // Function: sendPortCommand
799 // Issue an 82557 PORT command.
801 void Intel82557::sendPortCommand(port_command_t command
, UInt arg
)
803 OSWriteLE32(&CSR_p
->port
, (arg
& PORT_ADDRESS_MASK
) |
804 CSR_FIELD(PORT_FUNCTION
, command
));
808 //---------------------------------------------------------------------------
809 // Function: enableAdapterInterrupts, disableAdapterInterrupts
812 // Turn on/off interrupts at the adapter.
814 void Intel82557::enableAdapterInterrupts()
817 * For 82558, mask (disable) the ER and FCP interrupts.
820 interruptByte
= SCB_INTERRUPT_ER
| SCB_INTERRUPT_FCP
;
821 OSWriteLE8(&CSR_p
->interrupt
, interruptByte
);
822 interruptEnabled
= true;
826 void Intel82557::disableAdapterInterrupts()
829 interruptByte
= SCB_INTERRUPT_M
;
830 OSWriteLE8(&CSR_p
->interrupt
, interruptByte
);
831 interruptEnabled
= false;
835 //---------------------------------------------------------------------------
836 // Function: _logCounters
839 // If Verbose is defined as yes, log extra information about errors that
843 _logCounters(errorCounters_t
* errorCounters_p
)
845 if (errorCounters_p
->tx_good_frames
)
846 IOLog("tx_good_frames %ld\n",
847 OSReadLE32(&errorCounters_p
->tx_good_frames
));
848 if (errorCounters_p
->tx_maxcol_errors
)
849 IOLog("tx_maxcol_errors %ld\n",
850 OSReadLE32(&errorCounters_p
->tx_maxcol_errors
));
851 if (errorCounters_p
->tx_late_collision_errors
)
852 IOLog("tx_late_collision_errors %ld\n",
853 OSReadLE32(&errorCounters_p
->tx_late_collision_errors
));
854 if (errorCounters_p
->tx_underrun_errors
)
855 IOLog("tx_underrun_errors %ld\n",
856 OSReadLE32(&errorCounters_p
->tx_underrun_errors
));
857 if (errorCounters_p
->tx_lost_carrier_sense_errors
)
858 IOLog("tx_lost_carrier_sense_errors %ld\n",
859 OSReadLE32(&errorCounters_p
->tx_lost_carrier_sense_errors
));
860 if (errorCounters_p
->tx_deferred
)
861 IOLog("tx_deferred %ld\n", OSReadLE32(&errorCounters_p
->tx_deferred
));
862 if (errorCounters_p
->tx_single_collisions
)
863 IOLog("tx_single_collisions %ld\n",
864 OSReadLE32(&errorCounters_p
->tx_single_collisions
));
865 if (errorCounters_p
->tx_multiple_collisions
)
866 IOLog("tx_multiple_collisions %ld\n",
867 OSReadLE32(&errorCounters_p
->tx_multiple_collisions
));
868 if (errorCounters_p
->tx_total_collisions
)
869 IOLog("tx_total_collisions %ld\n",
870 OSReadLE32(&errorCounters_p
->tx_total_collisions
));
871 if (errorCounters_p
->rx_good_frames
)
872 IOLog("rx_good_frames %ld\n",
873 OSReadLE32(&errorCounters_p
->rx_good_frames
));
874 if (errorCounters_p
->rx_crc_errors
)
875 IOLog("rx_crc_errors %ld\n",
876 OSReadLE32(&errorCounters_p
->rx_crc_errors
));
877 if (errorCounters_p
->rx_alignment_errors
)
878 IOLog("rx_alignment_errors %ld\n",
879 OSReadLE32(&errorCounters_p
->rx_alignment_errors
));
880 if (errorCounters_p
->rx_resource_errors
)
881 IOLog("rx_resource_errors %ld\n",
882 OSReadLE32(&errorCounters_p
->rx_resource_errors
));
883 if (errorCounters_p
->rx_overrun_errors
)
884 IOLog("rx_overrun_errors %ld\n",
885 OSReadLE32(&errorCounters_p
->rx_overrun_errors
));
886 if (errorCounters_p
->rx_collision_detect_errors
)
887 IOLog("rx_collision_detect_errors %ld\n",
888 OSReadLE32(&errorCounters_p
->rx_collision_detect_errors
));
889 if (errorCounters_p
->rx_short_frame_errors
)
890 IOLog("rx_short_frame_errors %ld\n",
891 OSReadLE32(&errorCounters_p
->rx_short_frame_errors
));
895 //---------------------------------------------------------------------------
896 // Function: _dumpStatistics
899 // _dumpStatistics issues a new statistics dump command. Every few seconds,
900 // _updateStatistics is called from timeoutOccurred to check for updated
901 // statistics. If complete, update our counters, and issue a new dump
904 bool Intel82557::_dumpStatistics()
906 reserveDebuggerLock();
908 if (!_waitSCBCommandClear(CSR_p
)) {
909 IOLog("%s: _dumpStatistics: _waitSCBCommandClear failed\n", getName());
913 OSWriteLE8(&CSR_p
->command
,
914 CSR_FIELD(SCB_COMMAND_CUC
, SCB_CUC_DUMP_RESET_STAT
));
916 prevCUCommand
= SCB_CUC_DUMP_RESET_STAT
;
918 releaseDebuggerLock();
923 //---------------------------------------------------------------------------
924 // Function: _updateStatistics
927 // Gather statistics information from the adapter at regular intervals.
929 void Intel82557::_updateStatistics()
931 if (OSReadLE32(&errorCounters_p
->_status
) != DUMP_STATUS
) {
933 _logCounters(errorCounters_p
);
935 // Ethernet transmitter stats.
937 etherStats
->dot3StatsEntry
.singleCollisionFrames
+=
938 OSReadLE32(&errorCounters_p
->tx_single_collisions
);
940 etherStats
->dot3StatsEntry
.multipleCollisionFrames
+=
941 OSReadLE32(&errorCounters_p
->tx_multiple_collisions
);
943 etherStats
->dot3StatsEntry
.lateCollisions
+=
944 OSReadLE32(&errorCounters_p
->tx_late_collision_errors
);
946 etherStats
->dot3StatsEntry
.excessiveCollisions
+=
947 OSReadLE32(&errorCounters_p
->tx_maxcol_errors
);
949 etherStats
->dot3StatsEntry
.deferredTransmissions
+=
950 OSReadLE32(&errorCounters_p
->tx_deferred
);
952 etherStats
->dot3StatsEntry
.carrierSenseErrors
+=
953 OSReadLE32(&errorCounters_p
->tx_lost_carrier_sense_errors
);
955 etherStats
->dot3TxExtraEntry
.underruns
+=
956 OSReadLE32(&errorCounters_p
->tx_underrun_errors
);
958 // Ethernet receiver stats.
960 etherStats
->dot3StatsEntry
.alignmentErrors
+=
961 OSReadLE32(&errorCounters_p
->rx_alignment_errors
);
963 etherStats
->dot3StatsEntry
.fcsErrors
+=
964 OSReadLE32(&errorCounters_p
->rx_crc_errors
);
966 etherStats
->dot3RxExtraEntry
.resourceErrors
+=
967 OSReadLE32(&errorCounters_p
->rx_resource_errors
);
969 etherStats
->dot3RxExtraEntry
.overruns
+=
970 OSReadLE32(&errorCounters_p
->rx_overrun_errors
);
972 etherStats
->dot3RxExtraEntry
.collisionErrors
+=
973 OSReadLE32(&errorCounters_p
->rx_collision_detect_errors
);
975 etherStats
->dot3RxExtraEntry
.frameTooShorts
+=
976 OSReadLE32(&errorCounters_p
->rx_short_frame_errors
);
978 // Generic network stats. For the error counters, we assume
979 // the Ethernet stats will never be cleared. Thus we derive the
980 // error counters by summing the appropriate Ethernet error fields.
982 netStats
->outputErrors
=
983 ( etherStats
->dot3StatsEntry
.lateCollisions
984 + etherStats
->dot3StatsEntry
.excessiveCollisions
985 + etherStats
->dot3StatsEntry
.carrierSenseErrors
986 + etherStats
->dot3TxExtraEntry
.underruns
987 + etherStats
->dot3TxExtraEntry
.resourceErrors
);
989 netStats
->inputErrors
=
990 ( etherStats
->dot3StatsEntry
.fcsErrors
991 + etherStats
->dot3StatsEntry
.alignmentErrors
992 + etherStats
->dot3RxExtraEntry
.resourceErrors
993 + etherStats
->dot3RxExtraEntry
.overruns
994 + etherStats
->dot3RxExtraEntry
.collisionErrors
995 + etherStats
->dot3RxExtraEntry
.frameTooShorts
);
997 netStats
->collisions
+=
998 OSReadLE32(&errorCounters_p
->tx_total_collisions
);
1000 OSWriteLE32(&errorCounters_p
->_status
, DUMP_STATUS
);
1005 //---------------------------------------------------------------------------
1006 // Function: _allocateMemPage
1009 // Allocate a page of memory.
1011 bool Intel82557::_allocateMemPage(pageBlock_t
* p
)
1013 p
->memSize
= PAGE_SIZE
;
1014 p
->memPtr
= IOMallocAligned(p
->memSize
, PAGE_SIZE
);
1019 bzero(p
->memPtr
, p
->memSize
);
1020 p
->memAllocPtr
= p
->memPtr
; /* initialize for allocation routine */
1021 p
->memAvail
= p
->memSize
;
1026 //---------------------------------------------------------------------------
1027 // Function: _freeMemPage
1030 // Deallocate a page of memory.
1032 void Intel82557::_freeMemPage(pageBlock_t
* p
)
1034 IOFreeAligned(p
->memPtr
, p
->memSize
);
1037 //---------------------------------------------------------------------------
1041 // Reset/configure the chip, detect the PHY.
1043 bool Intel82557::hwInit()
1045 disableAdapterInterrupts();
1047 disableAdapterInterrupts();
1049 /* disable early RX interrupt */
1050 OSWriteLE8(&CSR_p
->earlyRxInterrupt
, 0);
1052 /* load command unit base address */
1053 if (!_waitSCBCommandClear(CSR_p
)) {
1054 IOLog("%s: hwInit: CU _waitSCBCommandClear failed\n", getName());
1057 OSWriteLE32(&CSR_p
->pointer
, 0);
1058 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_CUC
, SCB_CUC_LOAD_BASE
));
1059 prevCUCommand
= SCB_CUC_LOAD_BASE
;
1061 /* load receive unit base address */
1062 if (!_waitSCBCommandClear(CSR_p
)) {
1063 IOLog("%s: hwInit: RU _waitSCBCommandClear failed\n", getName());
1066 OSWriteLE32(&CSR_p
->pointer
, 0);
1067 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_RUC
, SCB_RUC_LOAD_BASE
));
1069 if (!_waitSCBCommandClear(CSR_p
)) {
1070 IOLog("%s: hwInit: before LOAD_DUMP_COUNTERS_ADDRESS:"
1071 " _waitSCBCommandClear failed\n", getName());
1074 OSWriteLE32(&errorCounters_p
->_status
, DUMP_STATUS
);
1075 OSWriteLE32(&CSR_p
->pointer
, errorCounters_paddr
);
1076 OSWriteLE8(&CSR_p
->command
,
1077 CSR_FIELD(SCB_COMMAND_CUC
, SCB_CUC_LOAD_DUMP_ADDR
));
1078 prevCUCommand
= SCB_CUC_LOAD_DUMP_ADDR
;
1080 if (!_waitSCBCommandClear(CSR_p
)) {
1081 IOLog("%s: hwInit: before intrACK _waitSCBCommandClear failed\n",
1086 /* Setup flow-control threshold */
1087 OSWriteLE8(&CSR_p
->flowControlThreshold
,
1088 CSR_FIELD(FC_THRESHOLD
, FC_THRESHOLD_512
));
1090 _intrACK(CSR_p
); /* ack any pending interrupts */
1094 phyID
= _phyGetID();
1095 VPRINT("%s: PHY model id is 0x%08lx\n", getName(), phyID
);
1096 phyID
&= PHY_MODEL_MASK
;
1105 _intrACK(CSR_p
); /* ack any pending interrupts */
1110 //---------------------------------------------------------------------------
1111 // Function: _memAlloc
1114 // Return the next aligned chunk of memory in our shared memory page.
1116 void * Intel82557::_memAllocFrom(pageBlock_t
* p
, UInt allocSize
, UInt align
)
1124 // Advance allocPtr to next aligned boundary.
1126 (void *)((UInt
)((UInt
) p
->memAllocPtr
+ (align
- 1)) & (~(align
- 1)));
1128 // Actual size of required storage. We need to take the alignment padding
1130 sizeReal
= allocSize
+ ((UInt
) allocPtr
- (UInt
) p
->memAllocPtr
);
1132 if (sizeReal
> p
->memAvail
)
1135 p
->memAllocPtr
= (void *)((UInt
) p
->memAllocPtr
+ sizeReal
);
1136 p
->memAvail
= p
->memSize
- ((UInt
) p
->memAllocPtr
- (UInt
) p
->memPtr
);
1140 //---------------------------------------------------------------------------
1141 // Function: coldInit
1144 // One-time initialization code. This is called by start(), before we
1145 // attach any client objects.
1147 bool Intel82557::coldInit()
1150 IOPhysicalAddress paddr
;
1152 disableAdapterInterrupts();
1154 /* allocate and initialize shared memory pointers */
1155 if (!_allocateMemPage(&shared
)) {
1156 IOLog("%s: Can't allocate shared memory page\n", getName());
1159 if (!_allocateMemPage(&txRing
)) {
1160 IOLog("%s: Can't allocate memory page for TX ring\n", getName());
1163 if (!_allocateMemPage(&rxRing
)) {
1164 IOLog("%s: Can't allocate memory page for RX ring\n", getName());
1168 /* allocate memory for shared data structures
1169 * self test needs to be
1172 overlay_p
= (overlay_t
*) _memAllocFrom(&shared
, sizeof(overlay_t
),
1173 PARAGRAPH_ALIGNMENT
);
1176 result
= IOPhysicalFromVirtual((vm_address_t
) overlay_p
, &overlay_paddr
);
1177 if (result
!= kIOReturnSuccess
) {
1178 IOLog("%s: Invalid command block address\n", getName());
1182 tcbList_p
= (tcb_t
*) _memAllocFrom(&txRing
,
1183 sizeof(tcb_t
) * NUM_TRANSMIT_FRAMES
,
1188 KDB_tcb_p
= (tcb_t
*) _memAllocFrom(&shared
,
1193 result
= IOPhysicalFromVirtual((vm_address_t
) KDB_tcb_p
,
1194 &KDB_tcb_p
->_paddr
);
1195 if (result
!= kIOReturnSuccess
) {
1196 IOLog("%s: Invalid TCB address\n", getName());
1200 result
= IOPhysicalFromVirtual((vm_address_t
) &KDB_tcb_p
->_tbds
, &paddr
);
1201 if (result
!= kIOReturnSuccess
) {
1202 IOLog("%s: Invalid TCB->_TBD address\n", getName());
1205 OSWriteLE32(&KDB_tcb_p
->tbdAddr
, paddr
);
1207 KDB_buf_p
= _memAllocFrom(&shared
, ETHERMAXPACKET
, DWORD_ALIGNMENT
);
1210 result
= IOPhysicalFromVirtual((vm_address_t
) KDB_buf_p
, &KDB_buf_paddr
);
1211 if (result
!= kIOReturnSuccess
) {
1212 IOLog("%s: Invalid address\n", getName());
1216 errorCounters_p
= (errorCounters_t
*) _memAllocFrom(&shared
,
1217 sizeof(errorCounters_t
),
1219 if (!errorCounters_p
)
1221 result
= IOPhysicalFromVirtual((vm_address_t
) errorCounters_p
,
1222 &errorCounters_paddr
);
1223 if (result
!= kIOReturnSuccess
) {
1224 IOLog("%s: Invalid errorCounters address\n", getName());
1228 rfdList_p
= (rfd_t
*) _memAllocFrom(&rxRing
,
1229 sizeof(rfd_t
) * NUM_RECEIVE_FRAMES
,
1237 myAddress
= eeprom
->getContents()->addr
;
1242 //---------------------------------------------------------------------------
1243 // Function: receiveInterruptOccurred
1246 // Hand up rceived frames.
1248 bool Intel82557::receiveInterruptOccurred()
1250 bool packetsQueued
= false;
1252 while (OSReadLE16(&headRfd
->status
) & RFD_STATUS_C
) {
1253 rbd_count_t rbd_count
= OSReadLE32(&headRfd
->_rbd
.count
);
1255 // rxCount does NOT include the Ethernet CRC (FCS).
1257 UInt rxCount
= CSR_VALUE(RBD_COUNT
, rbd_count
);
1260 // When the receive unit runs out of resources, it will
1261 // skip over RFD/RBD, making them as complete, but the RBD will
1262 // have zero bytes and the EOF bit will not be set.
1263 // We just skip over those and allow them to be recycled.
1265 // In those cases, the RFD->status word will be 0x8220.
1267 /* should have exactly 1 rbd per rfd */
1268 if (!(rbd_count
& RBD_COUNT_EOF
)) {
1269 IOLog("%s: more than 1 rbd, frame size %d\n", getName(), rxCount
);
1271 IOLog("%s: RFD status: %04x\n", getName(),
1272 OSReadLE16(&headRfd
->status
));
1279 if ((!(OSReadLE16(&headRfd
->status
) & RFD_STATUS_OK
)) ||
1280 (rxCount
< (ETHERMINPACKET
- ETHERCRC
)) ||
1282 ; /* bad or unwanted packet */
1285 struct mbuf
* m
= headRfd
->_rbd
._mbuf
;
1286 struct mbuf
* m_in
= 0; // packet to pass up to inputPacket()
1289 packetsReceived
= true;
1291 m_in
= replaceOrCopyPacket(&m
, rxCount
, &replaced
);
1293 etherStats
->dot3RxExtraEntry
.resourceErrors
++;
1297 if (replaced
&& (updateRFDFromMbuf(headRfd
, m
) == false)) {
1298 freePacket(m
); // free the new replacement mbuf.
1299 m_in
= 0; // pass up nothing.
1300 etherStats
->dot3RxExtraEntry
.resourceErrors
++;
1301 IOLog("%s: updateRFDFromMbuf() error\n", getName());
1305 netif
->inputPacket(m_in
, rxCount
, true);
1306 packetsQueued
= true;
1307 netStats
->inputPackets
++;
1311 /* clear fields in rfd */
1312 OSWriteLE16(&headRfd
->status
, 0);
1313 OSWriteLE16(&headRfd
->command
, (RFD_COMMAND_SF
| RFD_COMMAND_EL
));
1314 OSWriteLE32(&headRfd
->rbdAddr
, C_NULL
);
1315 OSWriteLE32(&headRfd
->misc
, 0);
1317 /* clear fields in rbd */
1318 OSWriteLE32(&headRfd
->_rbd
.count
, 0);
1319 OSWriteLE32(&headRfd
->_rbd
.size
, CSR_FIELD(RBD_SIZE
, MAX_BUF_SIZE
) |
1322 /* adjust tail markers */
1323 OSWriteLE32(&tailRfd
->_rbd
.size
, CSR_FIELD(RBD_SIZE
, MAX_BUF_SIZE
));
1324 OSWriteLE16(&tailRfd
->command
, RFD_COMMAND_SF
);
1326 tailRfd
= headRfd
; // new tail
1327 headRfd
= headRfd
->_next
; // new head
1330 return packetsQueued
;
1333 //---------------------------------------------------------------------------
1334 // Function: transmitInterruptOccurred
1337 // Free up packets associated with any completed TCB's.
1339 void Intel82557::transmitInterruptOccurred()
1341 tcbQ_t
* tcbQ_p
= &tcbQ
;
1344 head
= tcbQ_p
->activeHead_p
;
1345 while (tcbQ_p
->numFree
< tcbQ_p
->numTcbs
&&
1346 (OSReadLE16(&head
->status
) & TCB_STATUS_C
))
1348 OSWriteLE16(&head
->status
, 0);
1350 freePacket(head
->_mbuf
);
1353 head
= tcbQ_p
->activeHead_p
= head
->_next
;
1360 //---------------------------------------------------------------------------
1361 // Function: interruptOccurred
1364 // Field an interrupt.
1366 void Intel82557::interruptOccurred(IOInterruptEventSource
* src
, int /*count*/)
1368 scb_status_t status
;
1369 bool flushInputQ
= false;
1370 bool doService
= false;
1372 reserveDebuggerLock();
1374 if (interruptEnabled
== false) {
1376 releaseDebuggerLock();
1377 IOLog("%s: unexpected interrupt\n", getName());
1382 * Loop until the interrupt line becomes deasserted.
1385 if ((status
= _intrACK(CSR_p
)) == 0)
1391 if (status
& (SCB_STATUS_FR
| SCB_STATUS_RNR
)) {
1393 flushInputQ
= receiveInterruptOccurred() || flushInputQ
;
1395 etherStats
->dot3RxExtraEntry
.interrupts
++;
1397 if (status
& SCB_STATUS_RNR
) {
1398 etherStats
->dot3RxExtraEntry
.resets
++;
1403 if (!_startReceive()) {
1404 IOLog("%s: Unable to restart receiver\n", getName());
1405 // issueReset(); /* shouldn't need to do this. */
1413 if (status
& (SCB_STATUS_CX
| SCB_STATUS_CNA
)) {
1414 transmitInterruptOccurred();
1415 etherStats
->dot3TxExtraEntry
.interrupts
++;
1420 releaseDebuggerLock();
1422 if (enabledForNetif
) {
1423 // Flush all packets received and pass them to the network stack.
1426 netif
->flushInputQueue();
1428 // Call service() without holding the debugger lock to prevent a
1429 // deadlock when service() calls our outputPacket() function.
1432 transmitQueue
->service();
1436 //---------------------------------------------------------------------------
1437 // Function: updateTCBForMbuf
1439 // Update the TxCB pointed by tcb_p to point to the mbuf chain 'm'.
1440 // Returns the mbuf encoded onto the TxCB.
1443 Intel82557::updateTCBForMbuf(tcb_t
* tcb_p
, struct mbuf
* m
)
1445 // Set the invariant TCB fields.
1447 OSWriteLE16(&tcb_p
->status
, 0);
1449 if (++txCount
== TRANSMIT_INT_DELAY
) {
1450 OSWriteLE16(&tcb_p
->command
, CSR_FIELD(TCB_COMMAND
, CB_CMD_TRANSMIT
) |
1457 OSWriteLE16(&tcb_p
->command
, CSR_FIELD(TCB_COMMAND
, CB_CMD_TRANSMIT
) |
1461 OSWriteLE8(&tcb_p
->threshold
, TCB_TX_THRESHOLD
);
1462 OSWriteLE16(&tcb_p
->count
, 0); // all data are in the TBD's, none in TxCB
1464 // Since the format of a TBD closely matches the structure of an
1465 // 'struct IOPhysicalSegment', we shall have the cursor update the TBD list
1468 UInt segments
= txMbufCursor
->getPhysicalSegmentsWithCoalesce(m
,
1469 (struct IOPhysicalSegment
*) &tcb_p
->_tbds
[0],
1473 IOLog("%s: getPhysicalSegments error, pkt len = %d\n",
1474 getName(), m
->m_pkthdr
.len
);
1478 // Update the TBD array size count.
1480 OSWriteLE8(&tcb_p
->number
, segments
);
1485 //---------------------------------------------------------------------------
1486 // Function: outputPacket <IONetworkController>
1489 // Transmit the packet handed by our IOOutputQueue.
1490 // TCBs have the suspend bit set, so that the CU goes into the suspend
1491 // state when done. We use the CU_RESUME optimization that allows us to
1492 // issue CU_RESUMES without waiting for SCB command to clear.
1494 UInt32
Intel82557::outputPacket(struct mbuf
* m
, void * param
)
1498 if (!enabledForNetif
) { // drop the packet.
1500 return kIOReturnOutputDropped
;
1503 reserveDebuggerLock();
1505 if (tcbQ
.numFree
== 0) { // retry when more space is available.
1506 releaseDebuggerLock();
1507 return kIOReturnOutputStall
;
1510 packetsTransmitted
= true;
1511 netStats
->outputPackets
++;
1513 tcb_p
= tcbQ
.freeHead_p
;
1515 tcb_p
->_mbuf
= updateTCBForMbuf(tcb_p
, m
);
1516 if (tcb_p
->_mbuf
== 0) {
1517 etherStats
->dot3TxExtraEntry
.resourceErrors
++;
1521 /* update the queue */
1523 tcbQ
.freeHead_p
= tcbQ
.freeHead_p
->_next
;
1525 /* The TCB is already setup and the suspend bit set. Now clear the
1526 * suspend bit of the previous TCB.
1528 if (tcbQ
.activeTail_p
!= tcb_p
)
1529 OSClearLE16(&tcbQ
.activeTail_p
->command
, TCB_COMMAND_S
);
1530 tcbQ
.activeTail_p
= tcb_p
;
1533 * CUC_RESUME is optimized such that it is unnecessary to wait
1534 * for the CU to clear the SCB command word if the previous command
1535 * was a resume and the CU state is not idle.
1537 if (CSR_VALUE(SCB_STATUS_CUS
, OSReadLE16(&CSR_p
->status
)) == SCB_CUS_IDLE
)
1539 if (!_waitSCBCommandClear(CSR_p
)) {
1540 IOLog("%s: outputPacket: _waitSCBCommandClear error\n", getName());
1541 etherStats
->dot3TxExtraEntry
.timeouts
++;
1544 OSWriteLE32(&CSR_p
->pointer
, tcb_p
->_paddr
);
1545 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_CUC
, SCB_CUC_START
));
1546 prevCUCommand
= SCB_CUC_START
;
1549 if (prevCUCommand
!= SCB_CUC_RESUME
) {
1550 if (!_waitSCBCommandClear(CSR_p
)) {
1551 IOLog("%s: outputPacket: _waitSCBCommandClear error\n",
1553 etherStats
->dot3TxExtraEntry
.timeouts
++;
1557 OSWriteLE8(&CSR_p
->command
, CSR_FIELD(SCB_COMMAND_CUC
,SCB_CUC_RESUME
));
1558 prevCUCommand
= SCB_CUC_RESUME
;
1560 releaseDebuggerLock();
1561 return kIOReturnOutputSuccess
;
1566 releaseDebuggerLock();
1567 return kIOReturnOutputDropped
;
1570 //---------------------------------------------------------------------------
1571 // Function: _receivePacket
1574 // Part of kerneldebugger protocol.
1575 // Returns true if a packet was received successfully.
1577 bool Intel82557::_receivePacket(void * pkt
, UInt
* len
, UInt timeout
)
1579 bool processPacket
= true;
1581 scb_status_t status
;
1585 while ((OSReadLE16(&headRfd
->status
) & RFD_STATUS_C
) == 0) {
1586 if ((int) timeout
<= 0) {
1587 processPacket
= false;
1594 if (processPacket
) {
1595 if ((OSReadLE16(&headRfd
->status
) & RFD_STATUS_OK
) &&
1596 (OSReadLE32(&headRfd
->_rbd
.count
) & RBD_COUNT_EOF
))
1598 // Pass up good frames.
1600 *len
= CSR_VALUE(RBD_COUNT
, OSReadLE32(&headRfd
->_rbd
.count
));
1601 *len
= MIN(*len
, ETHERMAXPACKET
);
1602 bcopy(mtod(headRfd
->_rbd
._mbuf
, void *), pkt
, *len
);
1606 /* the head becomes the new tail */
1607 /* clear fields in rfd */
1608 OSWriteLE16(&headRfd
->status
, 0);
1609 OSWriteLE16(&headRfd
->command
, (RFD_COMMAND_SF
| RFD_COMMAND_EL
));
1610 OSWriteLE32(&headRfd
->rbdAddr
, C_NULL
);
1611 OSWriteLE32(&headRfd
->misc
, 0);
1613 /* clear fields in rbd */
1614 OSWriteLE32(&headRfd
->_rbd
.count
, 0);
1615 OSWriteLE32(&headRfd
->_rbd
.size
, CSR_FIELD(RBD_SIZE
, MAX_BUF_SIZE
) |
1618 /* adjust tail markers */
1619 OSWriteLE32(&tailRfd
->_rbd
.size
, CSR_FIELD(RBD_SIZE
, MAX_BUF_SIZE
));
1620 OSWriteLE16(&tailRfd
->command
, RFD_COMMAND_SF
);
1622 tailRfd
= headRfd
; // new tail
1623 headRfd
= headRfd
->_next
; // new head
1626 status
= OSReadLE16(&CSR_p
->status
) & SCB_STATUS_RNR
;
1628 OSWriteLE16(&CSR_p
->status
, status
); // ack RNR interrupt
1630 IOLog("Intel82557::%s restarting receiver\n", __FUNCTION__
);
1632 IOLog("%s::%s RUS:0x%x Index:%d\n", getName(), __FUNCTION__
,
1633 CSR_VALUE(SCB_STATUS_RUS
, OSReadLE16(&CSR_p
->status
)),
1634 tailRfd
- rfdList_p
);
1638 #if 0 // Display RFD/RBD fields
1639 for (int i
= 0; i
< NUM_RECEIVE_FRAMES
; i
++) {
1640 IOLog(" %02d: %04x %04x - %08x %08x\n", i
,
1641 OSReadLE16(&rfdList_p
[i
].command
),
1642 OSReadLE16(&rfdList_p
[i
].status
),
1643 OSReadLE32(&rfdList_p
[i
]._rbd
.size
),
1644 OSReadLE32(&rfdList_p
[i
].misc
));
1655 //---------------------------------------------------------------------------
1656 // Function: _sendPacket
1659 // Part of kerneldebugger protocol.
1660 // Returns true if the packet was sent successfully.
1662 bool Intel82557::_sendPacket(void * pkt
, UInt len
)
1666 // Set up the TCB and issue the command
1668 OSWriteLE16(&KDB_tcb_p
->status
, 0);
1669 OSWriteLE32(&KDB_tcb_p
->link
, C_NULL
);
1670 OSWriteLE8(&KDB_tcb_p
->threshold
, TCB_TX_THRESHOLD
);
1671 OSWriteLE16(&KDB_tcb_p
->command
, CSR_FIELD(TCB_COMMAND
, CB_CMD_TRANSMIT
) |
1674 OSWriteLE16(&KDB_tcb_p
->count
, 0); // all data are in the TBD's.
1675 OSWriteLE8(&KDB_tcb_p
->number
, 1); // 1 TBD only.
1677 // Copy the debugger packet to the pre-allocated buffer area.
1679 len
= MIN(len
, ETHERMAXPACKET
);
1680 len
= MAX(len
, ETHERMINPACKET
);
1681 bcopy(pkt
, KDB_buf_p
, len
);
1685 tbd_p
= &KDB_tcb_p
->_tbds
[0];
1686 OSWriteLE32(&tbd_p
->addr
, KDB_buf_paddr
);
1687 OSWriteLE32(&tbd_p
->size
, CSR_FIELD(TBD_SIZE
, len
));
1689 // Start up the command unit to send the packet.
1691 return _polledCommand((cbHeader_t
*) KDB_tcb_p
, KDB_tcb_p
->_paddr
);