]> git.saurik.com Git - apple/xnu.git/blob - iokit/Drivers/network/drvIntel82557/i82557Private.cpp
31ac8842faf0241a53fe44ec25796f5a1bb55164
[apple/xnu.git] / iokit / Drivers / network / drvIntel82557 / i82557Private.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1996 NeXT Software, Inc. All rights reserved.
24 *
25 * i82557Private.cpp
26 *
27 */
28
29 #include "i82557.h"
30
31 extern "C" {
32 #include <sys/param.h>
33 #include <sys/mbuf.h>
34 #include <string.h>
35 }
36
37 //---------------------------------------------------------------------------
38 // Function: IOPhysicalFromVirtual
39 //
40 // Hack, remove ASAP.
41
42 static inline IOReturn
43 IOPhysicalFromVirtual(vm_address_t vaddr, IOPhysicalAddress * paddr)
44 {
45 *paddr = pmap_extract(kernel_pmap, vaddr);
46 return (*paddr == 0) ? kIOReturnBadArgument : kIOReturnSuccess;
47 }
48
49 //---------------------------------------------------------------------------
50 // Function: _intrACK
51 //
52 // Purpose:
53 // Acknowledge all of the pending interrupt sources.
54 //
55 // Returns:
56 // Return the interrupt status.
57 //
58 // CSR usage:
59 // Read/Write: SCB status
60
61 static inline scb_status_t _intrACK(CSR_t * CSR_p)
62 {
63 scb_status_t stat_irq = OSReadLE16(&CSR_p->status) & SCB_STATUS_INT_MASK;
64 if (stat_irq)
65 OSWriteLE16(&CSR_p->status, stat_irq); // ack pending interrupts.
66 return (stat_irq);
67 }
68
69 //---------------------------------------------------------------------------
70 // Function: _waitSCBCommandClear
71 //
72 // Purpose:
73 // Wait for the SCB Command field to clear. Ensures that we don't
74 // overrun the NIC's command unit.
75 //
76 // Returns:
77 // true if the SCB command field was cleared.
78 // false if the SCB command field was not cleared.
79 //
80 // CSR usage:
81 // Read: SCB command
82
83 static inline bool
84 _waitSCBCommandClear(CSR_t * CSR_p)
85 {
86 for (int i = 0; i < SPIN_TIMEOUT; i++) {
87 if (!OSReadLE8(&CSR_p->command))
88 return true;
89 IODelay(SPIN_COUNT);
90 }
91 return false; // hardware is not responding.
92 }
93
94 //---------------------------------------------------------------------------
95 // Function: _waitCUNonActive
96 //
97 // Purpose:
98 // Waits for the Command Unit to become inactive.
99 //
100 // Returns:
101 // true if the CU has become inactive.
102 // false if the CU remains active.
103 //
104 // CSR usage:
105 // Read: SCB status
106
107 static inline bool
108 _waitCUNonActive(CSR_t * CSR_p)
109 {
110 for (int i = 0; i < SPIN_TIMEOUT; i++) {
111 if (CSR_VALUE(SCB_STATUS_CUS, OSReadLE16(&CSR_p->status)) !=
112 SCB_CUS_ACTIVE)
113 return true;
114 IODelay(SPIN_COUNT);
115 }
116 return false;
117 }
118
119 //---------------------------------------------------------------------------
120 // Function: _polledCommand:WithAddress
121 //
122 // Purpose:
123 // Issue a polled command to the NIC.
124
125 bool Intel82557::_polledCommand(cbHeader_t * hdr_p, IOPhysicalAddress paddr)
126 {
127 if (!_waitSCBCommandClear(CSR_p)) {
128 IOLog("%s: _polledCommand:(%s): _waitSCBCommandClear failed\n",
129 CUCommandString(CSR_VALUE(CB_CMD, OSReadLE16(&hdr_p->command))),
130 getName());
131 return false;
132 }
133
134 if (!_waitCUNonActive(CSR_p)) {
135 IOLog("%s: _polledCommand:(%s): _waitCUNonActive failed\n",
136 CUCommandString(CSR_VALUE(CB_CMD, OSReadLE16(&hdr_p->command))),
137 getName());
138 return false;
139 }
140
141 // Set the physical address of the command block, and issue a
142 // command unit start.
143 //
144 OSWriteLE32(&CSR_p->pointer, paddr);
145 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_START));
146
147 prevCUCommand = SCB_CUC_START;
148
149 for (int i = 0; i < SPIN_TIMEOUT; i++) {
150 if (OSReadLE16(&hdr_p->status) & CB_STATUS_C)
151 return true;
152 IODelay(SPIN_COUNT);
153 }
154 return false;
155 }
156
157 //---------------------------------------------------------------------------
158 // Function: _abortReceive
159 //
160 // Purpose:
161 // Abort the receive unit.
162
163 bool Intel82557::_abortReceive()
164 {
165 if (!_waitSCBCommandClear(CSR_p)) {
166 IOLog("%s: _abortReceive: _waitSCBCommandClear failed\n", getName());
167 return false;
168 }
169
170 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_ABORT));
171
172 for (int i = 0; i < SPIN_TIMEOUT; i++) {
173 if (CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)) ==
174 SCB_RUS_IDLE)
175 return true;
176 IODelay(SPIN_COUNT);
177 }
178
179 IOLog("%s: _abortReceive: timeout\n", getName());
180 return false;
181 }
182
183 //---------------------------------------------------------------------------
184 // Function: _startReceive
185 //
186 // Purpose:
187 // Start the receive unit
188
189 bool Intel82557::_startReceive()
190 {
191 if (!_waitSCBCommandClear(CSR_p)) {
192 IOLog("%s: _startReceive: _waitSCBCommandClear failed\n", getName());
193 return false;
194 }
195
196 // Make sure the initial RFD has a link to its RBD
197 OSWriteLE32(&headRfd->rbdAddr, headRfd->_rbd._paddr);
198
199 OSWriteLE32(&CSR_p->pointer, headRfd->_paddr);
200 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_START));
201
202 for (int i = 0; i < SPIN_TIMEOUT; i++) {
203 if (CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)) ==
204 SCB_RUS_READY)
205 return true;
206 IODelay(SPIN_COUNT);
207 }
208
209 IOLog("%s: _startReceive: timeout\n", getName());
210 return false;
211 }
212
213 //---------------------------------------------------------------------------
214 // Function: _resetChip
215 //
216 // Purpose:
217 // Issue a selective reset then a full reset.
218 // This is done to avoid a PCI bus hang if the 82557 is in the midst of
219 // a PCI bus cycle. The selective reset pauses the transmit and receive
220 // engines.
221 //
222 void Intel82557::_resetChip()
223 {
224 int i = 0;
225
226 sendPortCommand(portSelectiveReset_e, 0);
227 do {
228 IOSleep(1);
229 } while (OSReadLE32(&CSR_p->port) && ++i < 100);
230
231 sendPortCommand(portReset_e, 0);
232 IOSleep(1);
233 return;
234 }
235
236 //---------------------------------------------------------------------------
237 // Function: issueReset
238 //
239 // Purpose:
240 // Shut down the chip, and issue a reset.
241
242 void Intel82557::issueReset()
243 {
244 IOLog("%s: resetting adapter\n", getName());
245
246 etherStats->dot3RxExtraEntry.resets++;
247
248 setActivationLevel(kActivationLevel0);
249 if (!setActivationLevel(currentLevel)) {
250 IOLog("%s: Reset attempt unsuccessful\n", getName());
251 }
252 }
253
254 //---------------------------------------------------------------------------
255 // Function: updateRFDFromMbuf
256 //
257 // Purpose:
258 // Updated a RFD/RBD in order to attach it to a cluster mbuf.
259 // XXX - assume cluster will never cross page boundary.
260
261 bool Intel82557::updateRFDFromMbuf(rfd_t * rfd_p, struct mbuf * m)
262 {
263 struct IOPhysicalSegment vector;
264 UInt count;
265
266 count = rxMbufCursor->getPhysicalSegments(m, &vector, 1);
267 if (!count)
268 return false;
269
270 // Start modifying RFD
271 //
272 rfd_p->_rbd.buffer = vector.location; // cursor is little-endian
273 // OSWriteLE32(&rfd_p->_rbd.size, CSR_FIELD(RBD_SIZE, vector.length));
274
275 rfd_p->_rbd._mbuf = m;
276
277 return true;
278 }
279
280 //---------------------------------------------------------------------------
281 // Function: _initTcbQ
282 //
283 // Purpose:
284 // Initialize the transmit control block queue. Create a circularly
285 // linked list of tcbs.
286
287 bool Intel82557::_initTcbQ(bool enable = false)
288 {
289 int i;
290
291 tcbQ.numFree = tcbQ.numTcbs = NUM_TRANSMIT_FRAMES;
292 tcbQ.activeHead_p = tcbQ.activeTail_p = tcbQ.freeHead_p = tcbList_p;
293
294 for (i = 0; i < tcbQ.numTcbs; i++) { /* free up buffers */
295 if (tcbList_p[i]._mbuf) {
296 freePacket(tcbList_p[i]._mbuf);
297 tcbList_p[i]._mbuf = 0;
298 }
299 }
300 bzero(tcbList_p, sizeof(tcb_t) * tcbQ.numTcbs);
301
302 if (!enable)
303 return true;
304
305 for (i = 0; i < tcbQ.numTcbs; i++) {
306 IOPhysicalAddress paddr;
307
308 IOReturn result = IOPhysicalFromVirtual((vm_address_t) &tcbList_p[i],
309 &tcbList_p[i]._paddr);
310 if (result != kIOReturnSuccess) {
311 IOLog("i82557(tcbQ): Invalid TCB address\n");
312 return false;
313 }
314
315 result = IOPhysicalFromVirtual((vm_address_t) &tcbList_p[i]._tbds,
316 &paddr);
317 if (result != kIOReturnSuccess) {
318 IOLog("i82557(tcbQ): Invalid TBD address\n");
319 return false;
320 }
321 OSWriteLE32(&tcbList_p[i].tbdAddr, paddr);
322
323 if (i == (tcbQ.numTcbs - 1))
324 tcbList_p[i]._next = &tcbList_p[0];
325 else
326 tcbList_p[i]._next = &tcbList_p[i + 1];
327 }
328 for (i = 0; i < tcbQ.numTcbs; i++) /* make physical links */
329 OSWriteLE32(&tcbList_p[i].link, tcbList_p[i]._next->_paddr);
330
331 return true;
332 }
333
334 //---------------------------------------------------------------------------
335 // Function: _setupRfd
336
337 static void _setupRfd(rfd_t * rfdList_p)
338 {
339 for (int i = 0; i < NUM_RECEIVE_FRAMES; i++) {
340 if (i == (NUM_RECEIVE_FRAMES - 1)) {
341 /* mark tails and link the lists circularly */
342 OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_EL);
343 rfdList_p[i]._next = &rfdList_p[0];
344 OSSetLE32(&rfdList_p[i]._rbd.size, RBD_SIZE_EL);
345 rfdList_p[i]._rbd._next = &rfdList_p[0]._rbd;
346 }
347 else {
348 rfdList_p[i]._next = &rfdList_p[i + 1];
349 rfdList_p[i]._rbd._next = &rfdList_p[i + 1]._rbd;
350 }
351
352 OSWriteLE32(&rfdList_p[i].link, rfdList_p[i]._next->_paddr);
353 OSWriteLE32(&rfdList_p[i].rbdAddr,
354 (i == 0) ? rfdList_p[0]._rbd._paddr : C_NULL);
355
356 OSWriteLE32(&rfdList_p[i]._rbd.link, rfdList_p[i]._rbd._next->_paddr);
357 OSSetLE32(&rfdList_p[i]._rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE));
358 }
359 }
360
361 //---------------------------------------------------------------------------
362 // Function: _initRfdList
363 //
364 // Purpose:
365 // Create a circularly linked list of receive frame descriptors, and
366 // populate them with receive buffers allocated from our special pool.
367
368 bool Intel82557::_initRfdList(bool enable = false)
369 {
370 int i;
371 IOReturn result;
372
373 /* free allocated packet buffers */
374 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
375 if (rfdList_p[i]._rbd._mbuf) {
376 freePacket(rfdList_p[i]._rbd._mbuf);
377 // rfdList_p[i]._rbd._mbuf = 0;
378 }
379 }
380
381 /* zero out the entire structure, and re-create it */
382 bzero(rfdList_p, sizeof(rfd_t) * NUM_RECEIVE_FRAMES);
383
384 if (!enable)
385 return true;
386
387 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
388 OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_SF);
389
390 result = IOPhysicalFromVirtual((vm_address_t) &rfdList_p[i],
391 &rfdList_p[i]._paddr);
392 if (result != kIOReturnSuccess) {
393 IOLog("%s: Invalid RFD address\n", getName());
394 return false;
395 }
396 result = IOPhysicalFromVirtual((vm_address_t) &rfdList_p[i]._rbd,
397 &rfdList_p[i]._rbd._paddr);
398 if (result != kIOReturnSuccess) {
399 IOLog("%s: Invalid RBD address\n", getName());
400 return false;
401 }
402 }
403
404 _setupRfd(rfdList_p);
405
406 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
407 // Pre-load the receive ring with max size mbuf packets.
408 //
409 struct mbuf * m = allocatePacket(MAX_BUF_SIZE);
410 if (!m)
411 return false;
412
413 if (updateRFDFromMbuf(&rfdList_p[i], m) == false) {
414 IOLog("%s: updateRFDFromMbuf() error\n", getName());
415 freePacket(m);
416 return false;
417 }
418 }
419
420 headRfd = rfdList_p;
421 tailRfd = rfdList_p + NUM_RECEIVE_FRAMES - 1;
422
423 return true;
424 }
425
426 //---------------------------------------------------------------------------
427 // Function: _resetRfdList
428 //
429 // Purpose:
430 // Reset the RFD list before the receiver engine is restarted after
431 // a resource shortage.
432
433 bool Intel82557::_resetRfdList()
434 {
435 int i;
436
437 struct _cache {
438 IOPhysicalAddress rbd_buffer;
439 struct mbuf * rbd_mbuf;
440 IOPhysicalAddress rfd_paddr;
441 IOPhysicalAddress rbd_paddr;
442 } * cache_p = (struct _cache *) KDB_buf_p;
443
444 if ((sizeof(struct _cache) * NUM_RECEIVE_FRAMES) > ETHERMAXPACKET) {
445 IOLog("%s: no space for cache data\n", getName());
446 return false;
447 }
448
449 /* cache allocated packet buffers */
450 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
451 cache_p[i].rbd_mbuf = rfdList_p[i]._rbd._mbuf;
452 cache_p[i].rbd_buffer = rfdList_p[i]._rbd.buffer;
453 cache_p[i].rfd_paddr = rfdList_p[i]._paddr;
454 cache_p[i].rbd_paddr = rfdList_p[i]._rbd._paddr;
455 }
456
457 /* zero out the entire structure, and re-create it */
458 bzero(rfdList_p, sizeof(rfd_t) * NUM_RECEIVE_FRAMES);
459
460 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
461 OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_SF);
462 rfdList_p[i]._paddr = cache_p[i].rfd_paddr;
463 rfdList_p[i]._rbd._paddr = cache_p[i].rbd_paddr;
464 }
465
466 _setupRfd(rfdList_p);
467
468 for (i = 0; i < NUM_RECEIVE_FRAMES; i++) {
469 rfdList_p[i]._rbd.buffer = cache_p[i].rbd_buffer;
470 rfdList_p[i]._rbd._mbuf = cache_p[i].rbd_mbuf;
471 }
472
473 headRfd = rfdList_p;
474 tailRfd = rfdList_p + NUM_RECEIVE_FRAMES - 1;
475
476 return true;
477 }
478
479 //---------------------------------------------------------------------------
480 // Function: _mdiReadPHY:Register:Data
481 //
482 // Purpose:
483 // Read the specified MDI register and return the results.
484
485 bool
486 Intel82557::_mdiReadPHY(UInt8 phyAddress, UInt8 regAddress, UInt16 * data_p)
487 {
488 mdi_control_t mdi;
489
490 mdi = CSR_FIELD(MDI_CONTROL_PHYADDR, phyAddress) |
491 CSR_FIELD(MDI_CONTROL_REGADDR, regAddress) |
492 CSR_FIELD(MDI_CONTROL_OPCODE, MDI_CONTROL_OP_READ);
493
494 OSWriteLE32(&CSR_p->mdiControl, mdi);
495 IODelay(20);
496
497 bool ready = false;
498 for (int i = 0; i < SPIN_TIMEOUT; i++) {
499 if (OSReadLE32(&CSR_p->mdiControl) & MDI_CONTROL_READY) {
500 ready = true;
501 break;
502 }
503 IODelay(20);
504 }
505 if (ready == false) {
506 IOLog("%s: _mdiReadPHYRegisterSuccess timeout\n", getName());
507 return false;
508 }
509
510 *data_p = CSR_VALUE(MDI_CONTROL_DATA, OSReadLE32(&CSR_p->mdiControl));
511 return true;
512 }
513
514 //---------------------------------------------------------------------------
515 // Function: _mdiWritePHY:Register:Data
516 //
517 // Purpose:
518 // Write the specified MDI register with the given data.
519
520 bool Intel82557::_mdiWritePHY(UInt8 phyAddress, UInt8 regAddress, UInt16 data)
521 {
522 mdi_control_t mdi;
523
524 mdi = CSR_FIELD(MDI_CONTROL_PHYADDR, phyAddress) |
525 CSR_FIELD(MDI_CONTROL_REGADDR, regAddress) |
526 CSR_FIELD(MDI_CONTROL_OPCODE, MDI_CONTROL_OP_WRITE) |
527 CSR_FIELD(MDI_CONTROL_DATA, data);
528
529 OSWriteLE32(&CSR_p->mdiControl, mdi);
530 IODelay(20);
531
532 bool ready = false;
533 for (int i = 0; i < SPIN_TIMEOUT; i++) {
534 if (OSReadLE32(&CSR_p->mdiControl) & MDI_CONTROL_READY) {
535 ready = true;
536 break;
537 }
538 IODelay(20);
539 }
540 if (ready == false) {
541 IOLog("%s: _mdiWritePHYRegisterData timeout\n", getName());
542 return false;
543 }
544 return true;
545 }
546
547 //---------------------------------------------------------------------------
548 // Function: nop
549 //
550 // Purpose:
551 // Issue a polled NOP command to the NIC.
552
553 bool Intel82557::nop()
554 {
555 cbHeader_t * nop_p = &overlay_p->nop;
556
557 bzero(nop_p, sizeof(*nop_p));
558 OSWriteLE16(&nop_p->command, CSR_FIELD(CB_CMD, CB_CMD_NOP) | CB_EL);
559 OSWriteLE32(&nop_p->link, C_NULL);
560
561 return _polledCommand(nop_p, overlay_paddr);
562 }
563
564 //---------------------------------------------------------------------------
565 // Function: config
566 //
567 // Purpose:
568 // Issue a polled CONFIGURE command to the NIC.
569
570 bool Intel82557::config()
571 {
572 UInt8 * cb_p;
573 cb_configure_t * cfg_p = &overlay_p->configure;
574
575 /*
576 * Fill the configure command block
577 */
578 bzero(cfg_p, sizeof(*cfg_p));
579
580 OSWriteLE16(&cfg_p->header.command,
581 CSR_FIELD(CB_CMD, CB_CMD_CONFIGURE) | CB_EL);
582 OSWriteLE32(&cfg_p->header.link, C_NULL);
583
584 cb_p = cfg_p->byte;
585 cb_p[0] = CSR_FIELD(CB_CB0_BYTE_COUNT, CB_CONFIG_BYTE_COUNT);
586
587 cb_p[1] = CSR_FIELD(CB_CB1_TX_FIFO_LIMIT, CB_CB1_TX_FIFO_0) |
588 CSR_FIELD(CB_CB1_RX_FIFO_LIMIT, CB_CB1_RX_FIFO_64);
589
590 cb_p[3] = CB_CB3_MWI_ENABLE; // enable PCI-MWI on 82558 devices
591
592 cb_p[4] = 0; // disable PCI transfer limits
593 cb_p[5] = 0;
594
595 cb_p[6] = CB_CB6_NON_DIRECT_DMA | CB_CB6_STD_TCB | CB_CB6_STD_STATS;
596
597 cb_p[7] = CSR_FIELD(CB_CB7_UNDERRUN_RETRY, CB_CB7_UNDERRUN_RETRY_1) |
598 CB_CB7_DISC_SHORT_FRAMES;
599
600 if ((eeprom->getContents()->controllerType != I82558_CONTROLLER_TYPE) &&
601 (phyAddr != PHY_ADDRESS_I82503))
602 cb_p[8] = CB_CB8_CSMA_EN;
603
604 cb_p[10] = CSR_FIELD(CB_CB10_PREAMBLE, CB_CB10_PREAMBLE_7_BYTES) |
605 CB_CB10_NSAI;
606
607 cb_p[12] = CSR_FIELD(CB_CB12_IFS, CB_CB12_IFS_96_BIT_TIMES);
608
609 cb_p[13] = CSR_FIELD(CB_CB13_FC_TYPE_LSB, CB_CB13_FC_TYPE_LSB_DEF);
610 cb_p[14] = CSR_FIELD(CB_CB14_FC_TYPE_MSB, CB_CB14_FC_TYPE_MSB_DEF);
611
612 cb_p[15] = ((cb_p[8] & CB_CB8_CSMA_EN) ? 0 : CB_CB15_CRS_CDT) |
613 (promiscuousEnabled ? CB_CB15_PROMISCUOUS : 0);
614
615 cb_p[16] = CSR_FIELD(CB_CB16_FC_DELAY_LSB, CB_CB16_FC_DELAY_LSB_DEF);
616 cb_p[17] = CSR_FIELD(CB_CB17_FC_DELAY_MSB, CB_CB17_FC_DELAY_MSB_DEF);
617
618 cb_p[18] = CB_CB18_PADDING | CB_CB18_STRIPPING;
619
620 #if 0 // XXX - need to fix this
621 /*
622 * Force full duplex if there is a user override, or we are using Phy 0
623 * and full duplex mode is enabled. The FDX# pin is wired to Phy 1,
624 * which means that the 82557 can't autodetect the setting correctly.
625 */
626 if (forceFullDuplex || (phyAddr == PHY_ADDRESS_0 && fullDuplexMode))
627 cb_p[19] = CB_CB19_FORCE_FDX;
628 #endif
629
630 cb_p[19] = CB_CB19_AUTO_FDX;
631 if (flowControl) {
632 cb_p[19] |= ( CB_CB19_TX_FC |
633 CB_CB19_RX_FC_RESTOP |
634 CB_CB19_RX_FC_RESTART |
635 CB_CB19_REJECT_FC );
636 }
637
638 cb_p[20] = CSR_FIELD(CB_CB20_FC_ADDR_LSB, CB_CB20_FC_ADDR_LSB_DEF);
639
640 IOSync();
641
642 return _polledCommand((cbHeader_t *) cfg_p, overlay_paddr);
643 }
644
645 //---------------------------------------------------------------------------
646 // Function: iaSetup
647 //
648 // Purpose:
649 // Issue a polled IndividualAddressSETUP command to the NIC.
650 //
651 bool Intel82557::iaSetup()
652 {
653 cb_iasetup_t * iaSetup_p = &overlay_p->iasetup;
654
655 /*
656 * Fill the IA-setup command block
657 */
658 bzero(iaSetup_p, sizeof(*iaSetup_p));
659
660 OSWriteLE16(&iaSetup_p->header.command, CSR_FIELD(CB_CMD, CB_CMD_IASETUP) |
661 CB_EL);
662 OSWriteLE32(&iaSetup_p->header.link, C_NULL);
663 iaSetup_p->addr = myAddress;
664
665 return _polledCommand((cbHeader_t *) iaSetup_p, overlay_paddr);
666 }
667
668 //---------------------------------------------------------------------------
669 // Function: mcSetup
670 //
671 // Purpose:
672 // Issue a polled MultiCastSETUP command to the NIC. If 'fromData' is
673 // true, then we ignore the addrs/count arguments and instead use the
674 // multicast address list property in our interface client object.
675
676 bool Intel82557::mcSetup(IOEthernetAddress * addrs,
677 UInt count,
678 bool fromData = false)
679 {
680 cb_mcsetup_t * mcSetup_p;
681 bool cmdResult;
682 IOReturn result;
683 IOPhysicalAddress mcSetup_paddr;
684
685 if (fromData) {
686 // mcSetup() was not called by the setMulticastList() function.
687 // We should get the multicast list stored in the interface
688 // object's property table.
689 //
690 // mcSetup() is always executed by the default workloop thread,
691 // thus we don't have to worry about the address list being
692 // changed while we go through it.
693 //
694 addrs = 0;
695 count = 0;
696
697 if (netif) {
698 OSData * mcData = OSDynamicCast(OSData,
699 netif->getProperty(kIOMulticastFilterData));
700 if (mcData) {
701 addrs = (IOEthernetAddress *) mcData->getBytesNoCopy();
702 count = mcData->getLength() / sizeof(IOEthernetAddress);
703 assert(addrs && count);
704 }
705 }
706 }
707
708 mcSetup_p = (cb_mcsetup_t *) IOMallocAligned(PAGE_SIZE, PAGE_SIZE);
709 if (!mcSetup_p) {
710 IOLog("%s: mcSetup:IOMallocAligned return NULL\n", getName());
711 return false;
712 }
713
714 reserveDebuggerLock();
715
716 do {
717 cmdResult = false;
718
719 OSWriteLE16(&mcSetup_p->header.status, 0);
720 OSWriteLE16(&mcSetup_p->header.command,
721 CSR_FIELD(CB_CMD, CB_CMD_MCSETUP) | CB_EL);
722 OSWriteLE32(&mcSetup_p->header.link, C_NULL);
723
724 /* fill in the addresses (count may be zero) */
725 for (UInt i = 0; i < count; i++)
726 mcSetup_p->addrs[i] = addrs[i];
727
728 /* Set the number of bytes in the MC list, if the count is zero,
729 * it is equivalent to disabling the multicast filtering mechanism.
730 */
731 OSWriteLE16(&mcSetup_p->count, count * sizeof(IOEthernetAddress));
732
733 result = IOPhysicalFromVirtual((vm_address_t) mcSetup_p,
734 &mcSetup_paddr);
735 if (result != kIOReturnSuccess) {
736 IOLog("%s: Invalid MC-setup command block address\n", getName());
737 break;
738 }
739
740 if (!_polledCommand((cbHeader_t *) mcSetup_p, mcSetup_paddr)) {
741 IOLog("%s: MC-setup command failed 0x%x\n", getName(),
742 OSReadLE16(&mcSetup_p->header.status));
743 break;
744 }
745
746 cmdResult = (OSReadLE16(&mcSetup_p->header.status) & CB_STATUS_OK) ?
747 true : false;
748 } while (0);
749
750 releaseDebuggerLock();
751
752 IOFreeAligned(mcSetup_p, PAGE_SIZE);
753
754 return cmdResult;
755 }
756
757 //---------------------------------------------------------------------------
758 // Function: _selfTest
759 //
760 // Purpose:
761 // Issue a PORT self test command to the NIC and verify the results.
762
763 bool Intel82557::_selfTest()
764 {
765 port_selftest_t * test_p = (port_selftest_t *) overlay_p;
766 UInt32 results;
767
768 OSWriteLE32(&test_p->signature, 0);
769 OSWriteLE32(&test_p->results, ~0);
770 sendPortCommand(portSelfTest_e, overlay_paddr);
771 IOSleep(20);
772 if (OSReadLE32(&test_p->signature) == 0) {
773 IOLog("%s: Self test timed out\n", getName());
774 return false;
775 }
776
777 results = OSReadLE32(&test_p->results);
778 if (results) { /* report errors from self test */
779 if (results & PORT_SELFTEST_ROM)
780 IOLog("%s: Self test reports invalid ROM contents\n",
781 getName());
782 if (results & PORT_SELFTEST_REGISTER)
783 IOLog("%s: Self test reports internal register failure\n",
784 getName());
785 if (results & PORT_SELFTEST_DIAGNOSE)
786 IOLog("%s: Self test reports serial subsystem failure\n",
787 getName());
788 if (results & PORT_SELFTEST_GENERAL)
789 IOLog("%s: Self test failed\n", getName());
790 return false;
791 }
792 return true;
793 }
794
795 //---------------------------------------------------------------------------
796 // Function: sendPortCommand
797 //
798 // Purpose:
799 // Issue an 82557 PORT command.
800 //
801 void Intel82557::sendPortCommand(port_command_t command, UInt arg)
802 {
803 OSWriteLE32(&CSR_p->port, (arg & PORT_ADDRESS_MASK) |
804 CSR_FIELD(PORT_FUNCTION, command));
805 return;
806 }
807
808 //---------------------------------------------------------------------------
809 // Function: enableAdapterInterrupts, disableAdapterInterrupts
810 //
811 // Purpose:
812 // Turn on/off interrupts at the adapter.
813
814 void Intel82557::enableAdapterInterrupts()
815 {
816 /*
817 * For 82558, mask (disable) the ER and FCP interrupts.
818 */
819 UInt8 interruptByte;
820 interruptByte = SCB_INTERRUPT_ER | SCB_INTERRUPT_FCP;
821 OSWriteLE8(&CSR_p->interrupt, interruptByte);
822 interruptEnabled = true;
823 return;
824 }
825
826 void Intel82557::disableAdapterInterrupts()
827 {
828 UInt8 interruptByte;
829 interruptByte = SCB_INTERRUPT_M;
830 OSWriteLE8(&CSR_p->interrupt, interruptByte);
831 interruptEnabled = false;
832 return;
833 }
834
835 //---------------------------------------------------------------------------
836 // Function: _logCounters
837 //
838 // Purpose:
839 // If Verbose is defined as yes, log extra information about errors that
840 // have occurred.
841
842 static inline void
843 _logCounters(errorCounters_t * errorCounters_p)
844 {
845 if (errorCounters_p->tx_good_frames)
846 IOLog("tx_good_frames %ld\n",
847 OSReadLE32(&errorCounters_p->tx_good_frames));
848 if (errorCounters_p->tx_maxcol_errors)
849 IOLog("tx_maxcol_errors %ld\n",
850 OSReadLE32(&errorCounters_p->tx_maxcol_errors));
851 if (errorCounters_p->tx_late_collision_errors)
852 IOLog("tx_late_collision_errors %ld\n",
853 OSReadLE32(&errorCounters_p->tx_late_collision_errors));
854 if (errorCounters_p->tx_underrun_errors)
855 IOLog("tx_underrun_errors %ld\n",
856 OSReadLE32(&errorCounters_p->tx_underrun_errors));
857 if (errorCounters_p->tx_lost_carrier_sense_errors)
858 IOLog("tx_lost_carrier_sense_errors %ld\n",
859 OSReadLE32(&errorCounters_p->tx_lost_carrier_sense_errors));
860 if (errorCounters_p->tx_deferred)
861 IOLog("tx_deferred %ld\n", OSReadLE32(&errorCounters_p->tx_deferred));
862 if (errorCounters_p->tx_single_collisions)
863 IOLog("tx_single_collisions %ld\n",
864 OSReadLE32(&errorCounters_p->tx_single_collisions));
865 if (errorCounters_p->tx_multiple_collisions)
866 IOLog("tx_multiple_collisions %ld\n",
867 OSReadLE32(&errorCounters_p->tx_multiple_collisions));
868 if (errorCounters_p->tx_total_collisions)
869 IOLog("tx_total_collisions %ld\n",
870 OSReadLE32(&errorCounters_p->tx_total_collisions));
871 if (errorCounters_p->rx_good_frames)
872 IOLog("rx_good_frames %ld\n",
873 OSReadLE32(&errorCounters_p->rx_good_frames));
874 if (errorCounters_p->rx_crc_errors)
875 IOLog("rx_crc_errors %ld\n",
876 OSReadLE32(&errorCounters_p->rx_crc_errors));
877 if (errorCounters_p->rx_alignment_errors)
878 IOLog("rx_alignment_errors %ld\n",
879 OSReadLE32(&errorCounters_p->rx_alignment_errors));
880 if (errorCounters_p->rx_resource_errors)
881 IOLog("rx_resource_errors %ld\n",
882 OSReadLE32(&errorCounters_p->rx_resource_errors));
883 if (errorCounters_p->rx_overrun_errors)
884 IOLog("rx_overrun_errors %ld\n",
885 OSReadLE32(&errorCounters_p->rx_overrun_errors));
886 if (errorCounters_p->rx_collision_detect_errors)
887 IOLog("rx_collision_detect_errors %ld\n",
888 OSReadLE32(&errorCounters_p->rx_collision_detect_errors));
889 if (errorCounters_p->rx_short_frame_errors)
890 IOLog("rx_short_frame_errors %ld\n",
891 OSReadLE32(&errorCounters_p->rx_short_frame_errors));
892 return;
893 }
894
895 //---------------------------------------------------------------------------
896 // Function: _dumpStatistics
897 //
898 // Purpose:
899 // _dumpStatistics issues a new statistics dump command. Every few seconds,
900 // _updateStatistics is called from timeoutOccurred to check for updated
901 // statistics. If complete, update our counters, and issue a new dump
902 // command.
903
904 bool Intel82557::_dumpStatistics()
905 {
906 reserveDebuggerLock();
907
908 if (!_waitSCBCommandClear(CSR_p)) {
909 IOLog("%s: _dumpStatistics: _waitSCBCommandClear failed\n", getName());
910 return false;
911 }
912
913 OSWriteLE8(&CSR_p->command,
914 CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_DUMP_RESET_STAT));
915
916 prevCUCommand = SCB_CUC_DUMP_RESET_STAT;
917
918 releaseDebuggerLock();
919
920 return true;
921 }
922
923 //---------------------------------------------------------------------------
924 // Function: _updateStatistics
925 //
926 // Purpose:
927 // Gather statistics information from the adapter at regular intervals.
928
929 void Intel82557::_updateStatistics()
930 {
931 if (OSReadLE32(&errorCounters_p->_status) != DUMP_STATUS) {
932 if (verbose)
933 _logCounters(errorCounters_p);
934
935 // Ethernet transmitter stats.
936 //
937 etherStats->dot3StatsEntry.singleCollisionFrames +=
938 OSReadLE32(&errorCounters_p->tx_single_collisions);
939
940 etherStats->dot3StatsEntry.multipleCollisionFrames +=
941 OSReadLE32(&errorCounters_p->tx_multiple_collisions);
942
943 etherStats->dot3StatsEntry.lateCollisions +=
944 OSReadLE32(&errorCounters_p->tx_late_collision_errors);
945
946 etherStats->dot3StatsEntry.excessiveCollisions +=
947 OSReadLE32(&errorCounters_p->tx_maxcol_errors);
948
949 etherStats->dot3StatsEntry.deferredTransmissions +=
950 OSReadLE32(&errorCounters_p->tx_deferred);
951
952 etherStats->dot3StatsEntry.carrierSenseErrors +=
953 OSReadLE32(&errorCounters_p->tx_lost_carrier_sense_errors);
954
955 etherStats->dot3TxExtraEntry.underruns +=
956 OSReadLE32(&errorCounters_p->tx_underrun_errors);
957
958 // Ethernet receiver stats.
959 //
960 etherStats->dot3StatsEntry.alignmentErrors +=
961 OSReadLE32(&errorCounters_p->rx_alignment_errors);
962
963 etherStats->dot3StatsEntry.fcsErrors +=
964 OSReadLE32(&errorCounters_p->rx_crc_errors);
965
966 etherStats->dot3RxExtraEntry.resourceErrors +=
967 OSReadLE32(&errorCounters_p->rx_resource_errors);
968
969 etherStats->dot3RxExtraEntry.overruns +=
970 OSReadLE32(&errorCounters_p->rx_overrun_errors);
971
972 etherStats->dot3RxExtraEntry.collisionErrors +=
973 OSReadLE32(&errorCounters_p->rx_collision_detect_errors);
974
975 etherStats->dot3RxExtraEntry.frameTooShorts +=
976 OSReadLE32(&errorCounters_p->rx_short_frame_errors);
977
978 // Generic network stats. For the error counters, we assume
979 // the Ethernet stats will never be cleared. Thus we derive the
980 // error counters by summing the appropriate Ethernet error fields.
981 //
982 netStats->outputErrors =
983 ( etherStats->dot3StatsEntry.lateCollisions
984 + etherStats->dot3StatsEntry.excessiveCollisions
985 + etherStats->dot3StatsEntry.carrierSenseErrors
986 + etherStats->dot3TxExtraEntry.underruns
987 + etherStats->dot3TxExtraEntry.resourceErrors);
988
989 netStats->inputErrors =
990 ( etherStats->dot3StatsEntry.fcsErrors
991 + etherStats->dot3StatsEntry.alignmentErrors
992 + etherStats->dot3RxExtraEntry.resourceErrors
993 + etherStats->dot3RxExtraEntry.overruns
994 + etherStats->dot3RxExtraEntry.collisionErrors
995 + etherStats->dot3RxExtraEntry.frameTooShorts);
996
997 netStats->collisions +=
998 OSReadLE32(&errorCounters_p->tx_total_collisions);
999
1000 OSWriteLE32(&errorCounters_p->_status, DUMP_STATUS);
1001 _dumpStatistics();
1002 }
1003 }
1004
1005 //---------------------------------------------------------------------------
1006 // Function: _allocateMemPage
1007 //
1008 // Purpose:
1009 // Allocate a page of memory.
1010
1011 bool Intel82557::_allocateMemPage(pageBlock_t * p)
1012 {
1013 p->memSize = PAGE_SIZE;
1014 p->memPtr = IOMallocAligned(p->memSize, PAGE_SIZE);
1015
1016 if (!p->memPtr)
1017 return false;
1018
1019 bzero(p->memPtr, p->memSize);
1020 p->memAllocPtr = p->memPtr; /* initialize for allocation routine */
1021 p->memAvail = p->memSize;
1022
1023 return true;
1024 }
1025
1026 //---------------------------------------------------------------------------
1027 // Function: _freeMemPage
1028 //
1029 // Purpose:
1030 // Deallocate a page of memory.
1031 //
1032 void Intel82557::_freeMemPage(pageBlock_t * p)
1033 {
1034 IOFreeAligned(p->memPtr, p->memSize);
1035 }
1036
1037 //---------------------------------------------------------------------------
1038 // Function: hwInit
1039 //
1040 // Purpose:
1041 // Reset/configure the chip, detect the PHY.
1042
1043 bool Intel82557::hwInit()
1044 {
1045 disableAdapterInterrupts();
1046 _resetChip();
1047 disableAdapterInterrupts();
1048
1049 /* disable early RX interrupt */
1050 OSWriteLE8(&CSR_p->earlyRxInterrupt, 0);
1051
1052 /* load command unit base address */
1053 if (!_waitSCBCommandClear(CSR_p)) {
1054 IOLog("%s: hwInit: CU _waitSCBCommandClear failed\n", getName());
1055 return false;
1056 }
1057 OSWriteLE32(&CSR_p->pointer, 0);
1058 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_LOAD_BASE));
1059 prevCUCommand = SCB_CUC_LOAD_BASE;
1060
1061 /* load receive unit base address */
1062 if (!_waitSCBCommandClear(CSR_p)) {
1063 IOLog("%s: hwInit: RU _waitSCBCommandClear failed\n", getName());
1064 return false;
1065 }
1066 OSWriteLE32(&CSR_p->pointer, 0);
1067 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_LOAD_BASE));
1068
1069 if (!_waitSCBCommandClear(CSR_p)) {
1070 IOLog("%s: hwInit: before LOAD_DUMP_COUNTERS_ADDRESS:"
1071 " _waitSCBCommandClear failed\n", getName());
1072 return false;
1073 }
1074 OSWriteLE32(&errorCounters_p->_status, DUMP_STATUS);
1075 OSWriteLE32(&CSR_p->pointer, errorCounters_paddr);
1076 OSWriteLE8(&CSR_p->command,
1077 CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_LOAD_DUMP_ADDR));
1078 prevCUCommand = SCB_CUC_LOAD_DUMP_ADDR;
1079
1080 if (!_waitSCBCommandClear(CSR_p)) {
1081 IOLog("%s: hwInit: before intrACK _waitSCBCommandClear failed\n",
1082 getName());
1083 return false;
1084 }
1085
1086 /* Setup flow-control threshold */
1087 OSWriteLE8(&CSR_p->flowControlThreshold,
1088 CSR_FIELD(FC_THRESHOLD, FC_THRESHOLD_512));
1089
1090 _intrACK(CSR_p); /* ack any pending interrupts */
1091
1092 _phyProbe();
1093
1094 phyID = _phyGetID();
1095 VPRINT("%s: PHY model id is 0x%08lx\n", getName(), phyID);
1096 phyID &= PHY_MODEL_MASK;
1097
1098 if (!config())
1099 return false;
1100 IOSleep(500);
1101
1102 if (!iaSetup())
1103 return false;
1104
1105 _intrACK(CSR_p); /* ack any pending interrupts */
1106
1107 return true;
1108 }
1109
1110 //---------------------------------------------------------------------------
1111 // Function: _memAlloc
1112 //
1113 // Purpose:
1114 // Return the next aligned chunk of memory in our shared memory page.
1115
1116 void * Intel82557::_memAllocFrom(pageBlock_t * p, UInt allocSize, UInt align)
1117 {
1118 void * allocPtr;
1119 UInt sizeReal;
1120
1121 if (align == 0)
1122 return 0;
1123
1124 // Advance allocPtr to next aligned boundary.
1125 allocPtr =
1126 (void *)((UInt)((UInt) p->memAllocPtr + (align - 1)) & (~(align - 1)));
1127
1128 // Actual size of required storage. We need to take the alignment padding
1129 // into account.
1130 sizeReal = allocSize + ((UInt) allocPtr - (UInt) p->memAllocPtr);
1131
1132 if (sizeReal > p->memAvail)
1133 return 0;
1134
1135 p->memAllocPtr = (void *)((UInt) p->memAllocPtr + sizeReal);
1136 p->memAvail = p->memSize - ((UInt) p->memAllocPtr - (UInt) p->memPtr);
1137 return allocPtr;
1138 }
1139
1140 //---------------------------------------------------------------------------
1141 // Function: coldInit
1142 //
1143 // Purpose:
1144 // One-time initialization code. This is called by start(), before we
1145 // attach any client objects.
1146
1147 bool Intel82557::coldInit()
1148 {
1149 IOReturn result;
1150 IOPhysicalAddress paddr;
1151
1152 disableAdapterInterrupts();
1153
1154 /* allocate and initialize shared memory pointers */
1155 if (!_allocateMemPage(&shared)) {
1156 IOLog("%s: Can't allocate shared memory page\n", getName());
1157 return false;
1158 }
1159 if (!_allocateMemPage(&txRing)) {
1160 IOLog("%s: Can't allocate memory page for TX ring\n", getName());
1161 return false;
1162 }
1163 if (!_allocateMemPage(&rxRing)) {
1164 IOLog("%s: Can't allocate memory page for RX ring\n", getName());
1165 return false;
1166 }
1167
1168 /* allocate memory for shared data structures
1169 * self test needs to be
1170 * 16 byte aligned
1171 */
1172 overlay_p = (overlay_t *) _memAllocFrom(&shared, sizeof(overlay_t),
1173 PARAGRAPH_ALIGNMENT);
1174 if (!overlay_p)
1175 return false;
1176 result = IOPhysicalFromVirtual((vm_address_t) overlay_p, &overlay_paddr);
1177 if (result != kIOReturnSuccess) {
1178 IOLog("%s: Invalid command block address\n", getName());
1179 return false;
1180 }
1181
1182 tcbList_p = (tcb_t *) _memAllocFrom(&txRing,
1183 sizeof(tcb_t) * NUM_TRANSMIT_FRAMES,
1184 CACHE_ALIGNMENT);
1185 if (!tcbList_p)
1186 return false;
1187
1188 KDB_tcb_p = (tcb_t *) _memAllocFrom(&shared,
1189 sizeof(tcb_t),
1190 CACHE_ALIGNMENT);
1191 if (!KDB_tcb_p)
1192 return false;
1193 result = IOPhysicalFromVirtual((vm_address_t) KDB_tcb_p,
1194 &KDB_tcb_p->_paddr);
1195 if (result != kIOReturnSuccess) {
1196 IOLog("%s: Invalid TCB address\n", getName());
1197 return false;
1198 }
1199
1200 result = IOPhysicalFromVirtual((vm_address_t) &KDB_tcb_p->_tbds, &paddr);
1201 if (result != kIOReturnSuccess) {
1202 IOLog("%s: Invalid TCB->_TBD address\n", getName());
1203 return false;
1204 }
1205 OSWriteLE32(&KDB_tcb_p->tbdAddr, paddr);
1206
1207 KDB_buf_p = _memAllocFrom(&shared, ETHERMAXPACKET, DWORD_ALIGNMENT);
1208 if (!KDB_buf_p)
1209 return false;
1210 result = IOPhysicalFromVirtual((vm_address_t) KDB_buf_p, &KDB_buf_paddr);
1211 if (result != kIOReturnSuccess) {
1212 IOLog("%s: Invalid address\n", getName());
1213 return false;
1214 }
1215
1216 errorCounters_p = (errorCounters_t *) _memAllocFrom(&shared,
1217 sizeof(errorCounters_t),
1218 DWORD_ALIGNMENT);
1219 if (!errorCounters_p)
1220 return false;
1221 result = IOPhysicalFromVirtual((vm_address_t) errorCounters_p,
1222 &errorCounters_paddr);
1223 if (result != kIOReturnSuccess) {
1224 IOLog("%s: Invalid errorCounters address\n", getName());
1225 return false;
1226 }
1227
1228 rfdList_p = (rfd_t *) _memAllocFrom(&rxRing,
1229 sizeof(rfd_t) * NUM_RECEIVE_FRAMES,
1230 CACHE_ALIGNMENT);
1231 if (!rfdList_p)
1232 return false;
1233
1234 if (!_selfTest())
1235 return false;
1236
1237 myAddress = eeprom->getContents()->addr;
1238
1239 return true;
1240 }
1241
1242 //---------------------------------------------------------------------------
1243 // Function: receiveInterruptOccurred
1244 //
1245 // Purpose:
1246 // Hand up rceived frames.
1247
1248 bool Intel82557::receiveInterruptOccurred()
1249 {
1250 bool packetsQueued = false;
1251
1252 while (OSReadLE16(&headRfd->status) & RFD_STATUS_C) {
1253 rbd_count_t rbd_count = OSReadLE32(&headRfd->_rbd.count);
1254
1255 // rxCount does NOT include the Ethernet CRC (FCS).
1256 //
1257 UInt rxCount = CSR_VALUE(RBD_COUNT, rbd_count);
1258
1259 #if 0
1260 // When the receive unit runs out of resources, it will
1261 // skip over RFD/RBD, making them as complete, but the RBD will
1262 // have zero bytes and the EOF bit will not be set.
1263 // We just skip over those and allow them to be recycled.
1264 //
1265 // In those cases, the RFD->status word will be 0x8220.
1266
1267 /* should have exactly 1 rbd per rfd */
1268 if (!(rbd_count & RBD_COUNT_EOF)) {
1269 IOLog("%s: more than 1 rbd, frame size %d\n", getName(), rxCount);
1270
1271 IOLog("%s: RFD status: %04x\n", getName(),
1272 OSReadLE16(&headRfd->status));
1273
1274 issueReset();
1275 return;
1276 }
1277 #endif
1278
1279 if ((!(OSReadLE16(&headRfd->status) & RFD_STATUS_OK)) ||
1280 (rxCount < (ETHERMINPACKET - ETHERCRC)) ||
1281 !enabledForNetif) {
1282 ; /* bad or unwanted packet */
1283 }
1284 else {
1285 struct mbuf * m = headRfd->_rbd._mbuf;
1286 struct mbuf * m_in = 0; // packet to pass up to inputPacket()
1287 bool replaced;
1288
1289 packetsReceived = true;
1290
1291 m_in = replaceOrCopyPacket(&m, rxCount, &replaced);
1292 if (!m_in) {
1293 etherStats->dot3RxExtraEntry.resourceErrors++;
1294 goto RX_INTR_ABORT;
1295 }
1296
1297 if (replaced && (updateRFDFromMbuf(headRfd, m) == false)) {
1298 freePacket(m); // free the new replacement mbuf.
1299 m_in = 0; // pass up nothing.
1300 etherStats->dot3RxExtraEntry.resourceErrors++;
1301 IOLog("%s: updateRFDFromMbuf() error\n", getName());
1302 goto RX_INTR_ABORT;
1303 }
1304
1305 netif->inputPacket(m_in, rxCount, true);
1306 packetsQueued = true;
1307 netStats->inputPackets++;
1308 }
1309
1310 RX_INTR_ABORT:
1311 /* clear fields in rfd */
1312 OSWriteLE16(&headRfd->status, 0);
1313 OSWriteLE16(&headRfd->command, (RFD_COMMAND_SF | RFD_COMMAND_EL));
1314 OSWriteLE32(&headRfd->rbdAddr, C_NULL);
1315 OSWriteLE32(&headRfd->misc, 0);
1316
1317 /* clear fields in rbd */
1318 OSWriteLE32(&headRfd->_rbd.count, 0);
1319 OSWriteLE32(&headRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE) |
1320 RBD_SIZE_EL);
1321
1322 /* adjust tail markers */
1323 OSWriteLE32(&tailRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE));
1324 OSWriteLE16(&tailRfd->command, RFD_COMMAND_SF);
1325
1326 tailRfd = headRfd; // new tail
1327 headRfd = headRfd->_next; // new head
1328 } /* while */
1329
1330 return packetsQueued;
1331 }
1332
1333 //---------------------------------------------------------------------------
1334 // Function: transmitInterruptOccurred
1335 //
1336 // Purpose:
1337 // Free up packets associated with any completed TCB's.
1338
1339 void Intel82557::transmitInterruptOccurred()
1340 {
1341 tcbQ_t * tcbQ_p = &tcbQ;
1342 tcb_t * head;
1343
1344 head = tcbQ_p->activeHead_p;
1345 while (tcbQ_p->numFree < tcbQ_p->numTcbs &&
1346 (OSReadLE16(&head->status) & TCB_STATUS_C))
1347 {
1348 OSWriteLE16(&head->status, 0);
1349 if (head->_mbuf) {
1350 freePacket(head->_mbuf);
1351 head->_mbuf = 0;
1352 }
1353 head = tcbQ_p->activeHead_p = head->_next;
1354 tcbQ_p->numFree++;
1355 }
1356
1357 return;
1358 }
1359
1360 //---------------------------------------------------------------------------
1361 // Function: interruptOccurred
1362 //
1363 // Purpose:
1364 // Field an interrupt.
1365
1366 void Intel82557::interruptOccurred(IOInterruptEventSource * src, int /*count*/)
1367 {
1368 scb_status_t status;
1369 bool flushInputQ = false;
1370 bool doService = false;
1371
1372 reserveDebuggerLock();
1373
1374 if (interruptEnabled == false) {
1375 _intrACK(CSR_p);
1376 releaseDebuggerLock();
1377 IOLog("%s: unexpected interrupt\n", getName());
1378 return;
1379 }
1380
1381 /*
1382 * Loop until the interrupt line becomes deasserted.
1383 */
1384 while (1) {
1385 if ((status = _intrACK(CSR_p)) == 0)
1386 break;
1387
1388 /*
1389 * RX interrupt.
1390 */
1391 if (status & (SCB_STATUS_FR | SCB_STATUS_RNR)) {
1392
1393 flushInputQ = receiveInterruptOccurred() || flushInputQ;
1394
1395 etherStats->dot3RxExtraEntry.interrupts++;
1396
1397 if (status & SCB_STATUS_RNR) {
1398 etherStats->dot3RxExtraEntry.resets++;
1399
1400 _abortReceive();
1401 _resetRfdList();
1402
1403 if (!_startReceive()) {
1404 IOLog("%s: Unable to restart receiver\n", getName());
1405 // issueReset(); /* shouldn't need to do this. */
1406 }
1407 }
1408 }
1409
1410 /*
1411 * TX interrupt.
1412 */
1413 if (status & (SCB_STATUS_CX | SCB_STATUS_CNA)) {
1414 transmitInterruptOccurred();
1415 etherStats->dot3TxExtraEntry.interrupts++;
1416 doService = true;
1417 }
1418 }
1419
1420 releaseDebuggerLock();
1421
1422 if (enabledForNetif) {
1423 // Flush all packets received and pass them to the network stack.
1424 //
1425 if (flushInputQ)
1426 netif->flushInputQueue();
1427
1428 // Call service() without holding the debugger lock to prevent a
1429 // deadlock when service() calls our outputPacket() function.
1430 //
1431 if (doService)
1432 transmitQueue->service();
1433 }
1434 }
1435
1436 //---------------------------------------------------------------------------
1437 // Function: updateTCBForMbuf
1438 //
1439 // Update the TxCB pointed by tcb_p to point to the mbuf chain 'm'.
1440 // Returns the mbuf encoded onto the TxCB.
1441
1442 struct mbuf *
1443 Intel82557::updateTCBForMbuf(tcb_t * tcb_p, struct mbuf * m)
1444 {
1445 // Set the invariant TCB fields.
1446 //
1447 OSWriteLE16(&tcb_p->status, 0);
1448
1449 if (++txCount == TRANSMIT_INT_DELAY) {
1450 OSWriteLE16(&tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) |
1451 TCB_COMMAND_S |
1452 TCB_COMMAND_SF |
1453 TCB_COMMAND_I);
1454 txCount = 0;
1455 }
1456 else
1457 OSWriteLE16(&tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) |
1458 TCB_COMMAND_S |
1459 TCB_COMMAND_SF);
1460
1461 OSWriteLE8(&tcb_p->threshold, TCB_TX_THRESHOLD);
1462 OSWriteLE16(&tcb_p->count, 0); // all data are in the TBD's, none in TxCB
1463
1464 // Since the format of a TBD closely matches the structure of an
1465 // 'struct IOPhysicalSegment', we shall have the cursor update the TBD list
1466 // directly.
1467 //
1468 UInt segments = txMbufCursor->getPhysicalSegmentsWithCoalesce(m,
1469 (struct IOPhysicalSegment *) &tcb_p->_tbds[0],
1470 TBDS_PER_TCB);
1471
1472 if (!segments) {
1473 IOLog("%s: getPhysicalSegments error, pkt len = %d\n",
1474 getName(), m->m_pkthdr.len);
1475 return 0;
1476 }
1477
1478 // Update the TBD array size count.
1479 //
1480 OSWriteLE8(&tcb_p->number, segments);
1481
1482 return m;
1483 }
1484
1485 //---------------------------------------------------------------------------
1486 // Function: outputPacket <IONetworkController>
1487 //
1488 // Purpose:
1489 // Transmit the packet handed by our IOOutputQueue.
1490 // TCBs have the suspend bit set, so that the CU goes into the suspend
1491 // state when done. We use the CU_RESUME optimization that allows us to
1492 // issue CU_RESUMES without waiting for SCB command to clear.
1493 //
1494 UInt32 Intel82557::outputPacket(struct mbuf * m, void * param)
1495 {
1496 tcb_t * tcb_p;
1497
1498 if (!enabledForNetif) { // drop the packet.
1499 freePacket(m);
1500 return kIOReturnOutputDropped;
1501 }
1502
1503 reserveDebuggerLock();
1504
1505 if (tcbQ.numFree == 0) { // retry when more space is available.
1506 releaseDebuggerLock();
1507 return kIOReturnOutputStall;
1508 }
1509
1510 packetsTransmitted = true;
1511 netStats->outputPackets++;
1512
1513 tcb_p = tcbQ.freeHead_p;
1514
1515 tcb_p->_mbuf = updateTCBForMbuf(tcb_p, m);
1516 if (tcb_p->_mbuf == 0) {
1517 etherStats->dot3TxExtraEntry.resourceErrors++;
1518 goto fail;
1519 }
1520
1521 /* update the queue */
1522 tcbQ.numFree--;
1523 tcbQ.freeHead_p = tcbQ.freeHead_p->_next;
1524
1525 /* The TCB is already setup and the suspend bit set. Now clear the
1526 * suspend bit of the previous TCB.
1527 */
1528 if (tcbQ.activeTail_p != tcb_p)
1529 OSClearLE16(&tcbQ.activeTail_p->command, TCB_COMMAND_S);
1530 tcbQ.activeTail_p = tcb_p;
1531
1532 /*
1533 * CUC_RESUME is optimized such that it is unnecessary to wait
1534 * for the CU to clear the SCB command word if the previous command
1535 * was a resume and the CU state is not idle.
1536 */
1537 if (CSR_VALUE(SCB_STATUS_CUS, OSReadLE16(&CSR_p->status)) == SCB_CUS_IDLE)
1538 {
1539 if (!_waitSCBCommandClear(CSR_p)) {
1540 IOLog("%s: outputPacket: _waitSCBCommandClear error\n", getName());
1541 etherStats->dot3TxExtraEntry.timeouts++;
1542 goto fail;
1543 }
1544 OSWriteLE32(&CSR_p->pointer, tcb_p->_paddr);
1545 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_START));
1546 prevCUCommand = SCB_CUC_START;
1547 }
1548 else {
1549 if (prevCUCommand != SCB_CUC_RESUME) {
1550 if (!_waitSCBCommandClear(CSR_p)) {
1551 IOLog("%s: outputPacket: _waitSCBCommandClear error\n",
1552 getName());
1553 etherStats->dot3TxExtraEntry.timeouts++;
1554 goto fail;
1555 }
1556 }
1557 OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC,SCB_CUC_RESUME));
1558 prevCUCommand = SCB_CUC_RESUME;
1559 }
1560 releaseDebuggerLock();
1561 return kIOReturnOutputSuccess;
1562
1563 fail:
1564 freePacket(m);
1565 tcb_p->_mbuf = 0;
1566 releaseDebuggerLock();
1567 return kIOReturnOutputDropped;
1568 }
1569
1570 //---------------------------------------------------------------------------
1571 // Function: _receivePacket
1572 //
1573 // Purpose:
1574 // Part of kerneldebugger protocol.
1575 // Returns true if a packet was received successfully.
1576 //
1577 bool Intel82557::_receivePacket(void * pkt, UInt * len, UInt timeout)
1578 {
1579 bool processPacket = true;
1580 bool ret = false;
1581 scb_status_t status;
1582
1583 timeout *= 1000;
1584
1585 while ((OSReadLE16(&headRfd->status) & RFD_STATUS_C) == 0) {
1586 if ((int) timeout <= 0) {
1587 processPacket = false;
1588 break;
1589 }
1590 IODelay(50);
1591 timeout -= 50;
1592 }
1593
1594 if (processPacket) {
1595 if ((OSReadLE16(&headRfd->status) & RFD_STATUS_OK) &&
1596 (OSReadLE32(&headRfd->_rbd.count) & RBD_COUNT_EOF))
1597 {
1598 // Pass up good frames.
1599 //
1600 *len = CSR_VALUE(RBD_COUNT, OSReadLE32(&headRfd->_rbd.count));
1601 *len = MIN(*len, ETHERMAXPACKET);
1602 bcopy(mtod(headRfd->_rbd._mbuf, void *), pkt, *len);
1603 ret = true;
1604 }
1605
1606 /* the head becomes the new tail */
1607 /* clear fields in rfd */
1608 OSWriteLE16(&headRfd->status, 0);
1609 OSWriteLE16(&headRfd->command, (RFD_COMMAND_SF | RFD_COMMAND_EL));
1610 OSWriteLE32(&headRfd->rbdAddr, C_NULL);
1611 OSWriteLE32(&headRfd->misc, 0);
1612
1613 /* clear fields in rbd */
1614 OSWriteLE32(&headRfd->_rbd.count, 0);
1615 OSWriteLE32(&headRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE) |
1616 RBD_SIZE_EL);
1617
1618 /* adjust tail markers */
1619 OSWriteLE32(&tailRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE));
1620 OSWriteLE16(&tailRfd->command, RFD_COMMAND_SF);
1621
1622 tailRfd = headRfd; // new tail
1623 headRfd = headRfd->_next; // new head
1624 }
1625
1626 status = OSReadLE16(&CSR_p->status) & SCB_STATUS_RNR;
1627 if (status) {
1628 OSWriteLE16(&CSR_p->status, status); // ack RNR interrupt
1629
1630 IOLog("Intel82557::%s restarting receiver\n", __FUNCTION__);
1631
1632 IOLog("%s::%s RUS:0x%x Index:%d\n", getName(), __FUNCTION__,
1633 CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)),
1634 tailRfd - rfdList_p);
1635
1636 _abortReceive();
1637
1638 #if 0 // Display RFD/RBD fields
1639 for (int i = 0; i < NUM_RECEIVE_FRAMES; i++) {
1640 IOLog(" %02d: %04x %04x - %08x %08x\n", i,
1641 OSReadLE16(&rfdList_p[i].command),
1642 OSReadLE16(&rfdList_p[i].status),
1643 OSReadLE32(&rfdList_p[i]._rbd.size),
1644 OSReadLE32(&rfdList_p[i].misc));
1645 }
1646 #endif
1647
1648 _resetRfdList();
1649 _startReceive();
1650 }
1651
1652 return ret;
1653 }
1654
1655 //---------------------------------------------------------------------------
1656 // Function: _sendPacket
1657 //
1658 // Purpose:
1659 // Part of kerneldebugger protocol.
1660 // Returns true if the packet was sent successfully.
1661
1662 bool Intel82557::_sendPacket(void * pkt, UInt len)
1663 {
1664 tbd_t * tbd_p;
1665
1666 // Set up the TCB and issue the command
1667 //
1668 OSWriteLE16(&KDB_tcb_p->status, 0);
1669 OSWriteLE32(&KDB_tcb_p->link, C_NULL);
1670 OSWriteLE8(&KDB_tcb_p->threshold, TCB_TX_THRESHOLD);
1671 OSWriteLE16(&KDB_tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) |
1672 TCB_COMMAND_EL |
1673 TCB_COMMAND_SF );
1674 OSWriteLE16(&KDB_tcb_p->count, 0); // all data are in the TBD's.
1675 OSWriteLE8(&KDB_tcb_p->number, 1); // 1 TBD only.
1676
1677 // Copy the debugger packet to the pre-allocated buffer area.
1678 //
1679 len = MIN(len, ETHERMAXPACKET);
1680 len = MAX(len, ETHERMINPACKET);
1681 bcopy(pkt, KDB_buf_p, len);
1682
1683 // Update the TBD.
1684 //
1685 tbd_p = &KDB_tcb_p->_tbds[0];
1686 OSWriteLE32(&tbd_p->addr, KDB_buf_paddr);
1687 OSWriteLE32(&tbd_p->size, CSR_FIELD(TBD_SIZE, len));
1688
1689 // Start up the command unit to send the packet.
1690 //
1691 return _polledCommand((cbHeader_t *) KDB_tcb_p, KDB_tcb_p->_paddr);
1692 }