]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1995-1996 NeXT Software, Inc. | |
24 | * | |
25 | * Implementation for hardware dependent (relatively) code | |
26 | * for the Mace Ethernet controller. | |
27 | * | |
28 | * HISTORY | |
29 | * | |
30 | * 10-Sept-97 | |
31 | * Created. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include <IOKit/assert.h> | |
36 | #include <IOKit/system.h> | |
37 | #include <IOKit/IOLib.h> | |
38 | #include "MaceEnetPrivate.h" | |
39 | ||
40 | ||
41 | /***************************************************************************** | |
42 | * | |
43 | * Hacks. | |
44 | */ | |
45 | ||
46 | typedef unsigned long long ns_time_t; /* nanoseconds! */ | |
47 | ||
48 | #define NSEC_PER_SEC 1000000000 | |
49 | ||
50 | static void | |
51 | _IOGetTimestamp(ns_time_t *nsp) | |
52 | { | |
53 | mach_timespec_t now; | |
54 | ||
55 | IOGetTime(&now); | |
56 | *nsp = ((ns_time_t)now.tv_sec * NSEC_PER_SEC) + now.tv_nsec; | |
57 | } | |
58 | ||
59 | /* | |
60 | * Find a physical address (if any) for the specified virtual address. | |
61 | * | |
62 | * Note: what about vm_offset_t kvtophys(vm_offset_t va) | |
63 | */ | |
64 | static IOReturn _IOPhysicalFromVirtual( | |
65 | vm_address_t virtualAddress, | |
66 | unsigned *physicalAddress) | |
67 | { | |
68 | *physicalAddress = pmap_extract(kernel_pmap, virtualAddress); | |
69 | if(*physicalAddress == 0) { | |
70 | return kIOReturnBadArgument; | |
71 | } | |
72 | else { | |
73 | return kIOReturnSuccess; | |
74 | } | |
75 | } | |
76 | ||
77 | // From osfmk/ppc/pmap.h | |
78 | // | |
79 | extern "C" { | |
80 | extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys); | |
81 | extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys); | |
82 | } | |
83 | ||
84 | static inline void | |
85 | invalidate_cache_v(vm_offset_t va, unsigned length) | |
86 | { | |
87 | invalidate_dcache(va, length, 0); | |
88 | } | |
89 | ||
90 | static inline void | |
91 | flush_cache_v(vm_offset_t va, unsigned length) | |
92 | { | |
93 | flush_dcache(va, length, 0); | |
94 | } | |
95 | ||
96 | /****************************************************************************/ | |
97 | ||
98 | static IODBDMADescriptor dbdmaCmd_Nop; | |
99 | static IODBDMADescriptor dbdmaCmd_NopWInt; | |
100 | static IODBDMADescriptor dbdmaCmd_LoadXFS; | |
101 | static IODBDMADescriptor dbdmaCmd_LoadIntwInt; | |
102 | static IODBDMADescriptor dbdmaCmd_Stop; | |
103 | static IODBDMADescriptor dbdmaCmd_Branch; | |
104 | ||
105 | ||
106 | static u_int8_t reverseBitOrder(u_int8_t data ) | |
107 | { | |
108 | u_int8_t val = 0; | |
109 | int i; | |
110 | ||
111 | for ( i=0; i < 8; i++ ) | |
112 | { | |
113 | val <<= 1; | |
114 | if (data & 1) val |= 1; | |
115 | data >>= 1; | |
116 | } | |
117 | return( val ); | |
118 | } | |
119 | ||
120 | /* | |
121 | * Function: IOMallocPage | |
122 | * | |
123 | * Purpose: | |
124 | * Returns a pointer to a page-aligned memory block of size >= PAGE_SIZE | |
125 | * | |
126 | * Return: | |
127 | * Actual pointer and size of block returned in actual_ptr and actual_size. | |
128 | * Use these as arguments to kfree: kfree(*actual_ptr, *actual_size); | |
129 | */ | |
130 | static void * | |
131 | IOMallocPage(int request_size, void ** actual_ptr, u_int * actual_size) | |
132 | { | |
133 | void * mem_ptr; | |
134 | ||
135 | *actual_size = round_page(request_size) + PAGE_SIZE; | |
136 | mem_ptr = IOMalloc(*actual_size); | |
137 | if (mem_ptr == NULL) | |
138 | return NULL; | |
139 | *actual_ptr = mem_ptr; | |
140 | return ((void *)round_page(mem_ptr)); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Private functions | |
145 | */ | |
146 | bool MaceEnet::_allocateMemory() | |
147 | { | |
148 | u_int32_t i, n; | |
149 | unsigned char * virtAddr; | |
150 | u_int32_t physBase; | |
151 | u_int32_t physAddr; | |
152 | u_int32_t dbdmaSize; | |
153 | ||
154 | /* | |
155 | * Calculate total space for DMA channel commands | |
156 | */ | |
157 | dbdmaSize = round_page( | |
158 | RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + | |
159 | TX_RING_LENGTH * sizeof(enet_txdma_cmd_t) + | |
160 | 2 * sizeof(IODBDMADescriptor) ); | |
161 | ||
162 | /* | |
163 | * Allocate required memory | |
164 | */ | |
165 | dmaMemory.size = dbdmaSize; | |
166 | dmaMemory.ptr = (void *)IOMallocPage( | |
167 | dmaMemory.size, | |
168 | &dmaMemory.ptrReal, | |
169 | &dmaMemory.sizeReal | |
170 | ); | |
171 | ||
172 | dmaCommands = (unsigned char *) dmaMemory.ptr; | |
173 | if (!dmaCommands) { | |
174 | IOLog( "Mace: Cant allocate channel DBDMA commands\n\r" ); | |
175 | return false; | |
176 | } | |
177 | ||
178 | /* | |
179 | * If we needed more than one page, then make sure we received | |
180 | * contiguous memory. | |
181 | */ | |
182 | n = (dbdmaSize - PAGE_SIZE) / PAGE_SIZE; | |
183 | _IOPhysicalFromVirtual((vm_address_t) dmaCommands, &physBase ); | |
184 | ||
185 | virtAddr = (unsigned char *) dmaCommands; | |
186 | for( i=0; i < n; i++, virtAddr += PAGE_SIZE ) | |
187 | { | |
188 | _IOPhysicalFromVirtual( (vm_address_t) virtAddr, &physAddr ); | |
189 | if (physAddr != (physBase + i * PAGE_SIZE) ) | |
190 | { | |
191 | IOLog("Mace: Cannot allocate contiguous memory for DBDMA " | |
192 | "commands\n"); | |
193 | return false; | |
194 | } | |
195 | } | |
196 | ||
197 | /* | |
198 | * Setup the receive ring pointers | |
199 | */ | |
200 | rxDMACommands = (enet_dma_cmd_t *)dmaCommands; | |
201 | rxMaxCommand = RX_RING_LENGTH; | |
202 | ||
203 | /* | |
204 | * Setup the transmit ring pointers | |
205 | */ | |
206 | txDMACommands = (enet_txdma_cmd_t *)( | |
207 | dmaCommands + | |
208 | RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + | |
209 | sizeof(IODBDMADescriptor)); | |
210 | ||
211 | txMaxCommand = TX_RING_LENGTH; | |
212 | ||
213 | /* | |
214 | * Setup pre-initialized DBDMA commands | |
215 | */ | |
216 | IOMakeDBDMADescriptor( (&dbdmaCmd_Nop), | |
217 | kdbdmaNop, | |
218 | kdbdmaKeyStream0, | |
219 | kdbdmaIntNever, | |
220 | kdbdmaBranchNever, | |
221 | kdbdmaWaitNever, | |
222 | 0, | |
223 | 0 ); | |
224 | ||
225 | IOMakeDBDMADescriptor( (&dbdmaCmd_NopWInt), | |
226 | kdbdmaNop, | |
227 | kdbdmaKeyStream0, | |
228 | kdbdmaIntAlways, | |
229 | kdbdmaBranchNever, | |
230 | kdbdmaWaitNever, | |
231 | 0, | |
232 | 0 ); | |
233 | ||
234 | UInt32 ioBaseEnetPhys = maps[MEMORY_MAP_ENET_INDEX]->getPhysicalAddress(); | |
235 | ||
236 | IOMakeDBDMADescriptor( (&dbdmaCmd_LoadXFS), | |
237 | kdbdmaLoadQuad, | |
238 | kdbdmaKeySystem, | |
239 | kdbdmaIntNever, | |
240 | kdbdmaBranchNever, | |
241 | kdbdmaWaitNever, | |
242 | 1, | |
243 | ((int)ioBaseEnetPhys + kXmtFS) ); | |
244 | ||
245 | IOMakeDBDMADescriptor( (&dbdmaCmd_LoadIntwInt), | |
246 | kdbdmaLoadQuad, | |
247 | kdbdmaKeySystem, | |
248 | kdbdmaIntAlways, | |
249 | kdbdmaBranchNever, | |
250 | kdbdmaWaitNever, | |
251 | 1, | |
252 | ((int)ioBaseEnetPhys + kIntReg) ); | |
253 | ||
254 | IOMakeDBDMADescriptor( (&dbdmaCmd_Stop), | |
255 | kdbdmaStop, | |
256 | kdbdmaKeyStream0, | |
257 | kdbdmaIntNever, | |
258 | kdbdmaBranchNever, | |
259 | kdbdmaWaitNever, | |
260 | 0, | |
261 | 0 ); | |
262 | ||
263 | IOMakeDBDMADescriptor( (&dbdmaCmd_Branch), | |
264 | kdbdmaNop, | |
265 | kdbdmaKeyStream0, | |
266 | kdbdmaIntNever, | |
267 | kdbdmaBranchAlways, | |
268 | kdbdmaWaitNever, | |
269 | 0, | |
270 | 0 ); | |
271 | ||
272 | return true; | |
273 | } | |
274 | ||
275 | /*------------------------------------------------------------------------- | |
276 | * | |
277 | * Setup the Transmit Ring | |
278 | * ----------------------- | |
279 | * Each transmit ring entry consists of two words to transmit data from buffer | |
280 | * segments (possibly) spanning a page boundary. This is followed by two DMA | |
281 | * commands which read transmit frame status and interrupt status from the Mace | |
282 | * chip. The last DMA command in each transmit ring entry generates a host | |
283 | * interrupt. The last entry in the ring is followed by a DMA branch to the | |
284 | * first entry. | |
285 | *-------------------------------------------------------------------------*/ | |
286 | ||
287 | bool MaceEnet::_initTxRing() | |
288 | { | |
289 | bool kr; | |
290 | u_int32_t i; | |
291 | ||
292 | /* | |
293 | * Clear the transmit DMA command memory | |
294 | */ | |
295 | bzero( (void *)txDMACommands, sizeof(enet_txdma_cmd_t) * txMaxCommand); | |
296 | txCommandHead = 0; | |
297 | txCommandTail = 0; | |
298 | ||
299 | /* | |
300 | * DMA Channel commands 2,3 are the same for all DBDMA entries on transmit. | |
301 | * Initialize them now. | |
302 | */ | |
303 | for( i=0; i < txMaxCommand; i++ ) | |
304 | { | |
305 | txDMACommands[i].desc_seg[2] = dbdmaCmd_LoadXFS; | |
306 | txDMACommands[i].desc_seg[3] = dbdmaCmd_LoadIntwInt; | |
307 | } | |
308 | ||
309 | /* | |
310 | * Put a DMA Branch command after the last entry in the transmit ring. | |
311 | * Set the branch address to the physical address of the start of the | |
312 | * transmit ring. | |
313 | */ | |
314 | txDMACommands[txMaxCommand].desc_seg[0] = dbdmaCmd_Branch; | |
315 | ||
316 | kr = _IOPhysicalFromVirtual( (vm_address_t) txDMACommands, | |
317 | (u_int32_t *)&txDMACommandsPhys ); | |
318 | if ( kr != kIOReturnSuccess ) | |
319 | { | |
320 | IOLog("Mace: Bad Tx DBDMA command buf - %08x\n\r", | |
321 | (u_int32_t)txDMACommands ); | |
322 | } | |
323 | IOSetCCCmdDep( &txDMACommands[txMaxCommand].desc_seg[0], | |
324 | txDMACommandsPhys ); | |
325 | ||
326 | /* | |
327 | * Set the Transmit DMA Channel pointer to the first entry in the | |
328 | * transmit ring. | |
329 | */ | |
330 | IOSetDBDMACommandPtr( ioBaseEnetTxDMA, txDMACommandsPhys ); | |
331 | ||
332 | /* | |
333 | * Push the DMA channel words into physical memory. | |
334 | */ | |
335 | flush_cache_v( (vm_offset_t)txDMACommands, | |
336 | txMaxCommand*sizeof(enet_txdma_cmd_t) + sizeof(IODBDMADescriptor)); | |
337 | ||
338 | return true; | |
339 | } | |
340 | ||
341 | /*------------------------------------------------------------------------- | |
342 | * | |
343 | * Setup the Receive ring | |
344 | * ---------------------- | |
345 | * Each receive ring entry consists of two DMA commands to receive data | |
346 | * into a network buffer (possibly) spanning a page boundary. The second | |
347 | * DMA command in each entry generates a host interrupt. | |
348 | * The last entry in the ring is followed by a DMA branch to the first | |
349 | * entry. | |
350 | * | |
351 | *-------------------------------------------------------------------------*/ | |
352 | ||
353 | bool MaceEnet::_initRxRing() | |
354 | { | |
355 | u_int32_t i; | |
356 | bool status; | |
357 | IOReturn kr; | |
358 | ||
359 | /* | |
360 | * Clear the receive DMA command memory | |
361 | */ | |
362 | bzero( (void *)rxDMACommands, sizeof(enet_dma_cmd_t) * rxMaxCommand); | |
363 | ||
364 | kr = _IOPhysicalFromVirtual( (vm_address_t) rxDMACommands, | |
365 | (u_int32_t *)&rxDMACommandsPhys ); | |
366 | if ( kr != kIOReturnSuccess ) | |
367 | { | |
368 | IOLog("Mace: Bad Rx DBDMA command buf - %08x\n\r", | |
369 | (u_int32_t)rxDMACommands ); | |
370 | return false; | |
371 | } | |
372 | ||
373 | /* | |
374 | * Allocate a receive buffer for each entry in the Receive ring | |
375 | */ | |
376 | for (i = 0; i < rxMaxCommand-1; i++) | |
377 | { | |
378 | if (rxMbuf[i] == 0) | |
379 | { | |
380 | rxMbuf[i] = allocatePacket(NETWORK_BUFSIZE); | |
381 | ||
382 | if (!rxMbuf[i]) | |
383 | { | |
384 | IOLog("Mace: allocatePacket failed in _initRxRing()\n\r"); | |
385 | return false; | |
386 | } | |
387 | } | |
388 | ||
389 | /* | |
390 | * Set the DMA commands for the ring entry to transfer data to the | |
391 | * mbuf. | |
392 | */ | |
393 | status = _updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true); | |
394 | if (status == false) | |
395 | { | |
396 | IOLog("Mace: Cant map mbuf to physical memory in _initRxRing\n\r"); | |
397 | return false; | |
398 | } | |
399 | } | |
400 | ||
401 | /* | |
402 | * Set the receive queue head to point to the first entry in the ring. | |
403 | * Set the receive queue tail to point to a DMA Stop command after the | |
404 | * last ring entry | |
405 | */ | |
406 | rxCommandHead = 0; | |
407 | rxCommandTail = i; | |
408 | ||
409 | rxDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; | |
410 | rxDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; | |
411 | ||
412 | /* | |
413 | * Setup a DMA branch command after the stop command | |
414 | */ | |
415 | i++; | |
416 | rxDMACommands[i].desc_seg[0] = dbdmaCmd_Branch; | |
417 | ||
418 | IOSetCCCmdDep( &rxDMACommands[i].desc_seg[0], rxDMACommandsPhys ); | |
419 | ||
420 | /* | |
421 | * Set DMA command pointer to first receive entry | |
422 | */ | |
423 | IOSetDBDMACommandPtr( ioBaseEnetRxDMA, rxDMACommandsPhys ); | |
424 | ||
425 | /* | |
426 | * Push DMA commands to physical memory | |
427 | */ | |
428 | flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], | |
429 | 2 * sizeof(enet_dma_cmd_t) ); | |
430 | ||
431 | return true; | |
432 | } | |
433 | ||
434 | /*------------------------------------------------------------------------- | |
435 | * | |
436 | * | |
437 | * | |
438 | *-------------------------------------------------------------------------*/ | |
439 | ||
440 | void MaceEnet::_startChip() | |
441 | { | |
442 | WriteMaceRegister( ioBaseEnet, kMacCC, kMacCCEnXmt | kMacCCEnRcv ); | |
443 | ||
444 | // enable rx dma channel | |
445 | IODBDMAContinue( ioBaseEnetRxDMA ); | |
446 | } | |
447 | ||
448 | /*------------------------------------------------------------------------- | |
449 | * | |
450 | * | |
451 | * | |
452 | *-------------------------------------------------------------------------*/ | |
453 | ||
454 | void MaceEnet::_resetChip() | |
455 | { | |
456 | u_int8_t regValue; | |
457 | ||
458 | /* | |
459 | * Mace errata - chip reset does not clear pending interrupts | |
460 | */ | |
461 | ReadMaceRegister( ioBaseEnet, kIntReg ); | |
462 | ||
463 | IODBDMAReset( ioBaseEnetRxDMA ); | |
464 | IODBDMAReset( ioBaseEnetTxDMA ); | |
465 | ||
466 | IOSetDBDMAWaitSelect( ioBaseEnetTxDMA, | |
467 | IOSetDBDMAChannelControlBits( kdbdmaS5 ) ); | |
468 | ||
469 | IOSetDBDMABranchSelect( ioBaseEnetRxDMA, | |
470 | IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); | |
471 | ||
472 | IOSetDBDMAInterruptSelect( ioBaseEnetRxDMA, | |
473 | IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); | |
474 | ||
475 | WriteMaceRegister( ioBaseEnet, kBIUCC, kBIUCCSWRst ); | |
476 | do | |
477 | { | |
478 | regValue = ReadMaceRegister( ioBaseEnet, kBIUCC ); | |
479 | } | |
480 | while( regValue & kBIUCCSWRst ); | |
481 | } | |
482 | ||
483 | /*------------------------------------------------------------------------- | |
484 | * | |
485 | * | |
486 | * | |
487 | *-------------------------------------------------------------------------*/ | |
488 | ||
489 | bool MaceEnet::_initChip() | |
490 | { | |
491 | volatile u_int16_t regValue; | |
492 | u_int32_t i; | |
493 | ||
494 | _disableAdapterInterrupts(); | |
495 | ||
496 | chipId = ReadMaceRegister( ioBaseEnet, kMaceChipId0 ); | |
497 | chipId |= ReadMaceRegister( ioBaseEnet, kMaceChipId1 ) << 8; | |
498 | ||
499 | /* | |
500 | * Turn off ethernet header stripping | |
501 | */ | |
502 | regValue = ReadMaceRegister( ioBaseEnet, kRcvFC ); | |
503 | regValue &= ~kRcvFCAStrpRcv; | |
504 | WriteMaceRegister( ioBaseEnet, kRcvFC, regValue ); | |
505 | ||
506 | /* | |
507 | * Set Mace destination address. | |
508 | */ | |
509 | if ( chipId != kMaceRevisionA2 ) | |
510 | { | |
511 | WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACPhyAddr ); | |
512 | do | |
513 | { | |
514 | regValue = ReadMaceRegister( ioBaseEnet, kIAC ); | |
515 | } | |
516 | while( regValue & kIACAddrChg ); | |
517 | } | |
518 | else | |
519 | { | |
520 | WriteMaceRegister( ioBaseEnet, kIAC, kIACPhyAddr ); | |
521 | } | |
522 | ||
523 | for (i=0; i < sizeof(IOEthernetAddress); i++ ) | |
524 | { | |
525 | WriteMaceRegister( ioBaseEnet, kPADR, | |
526 | reverseBitOrder(((unsigned char *)ioBaseEnetROM)[i<<4]) ); | |
527 | } | |
528 | ||
529 | /* | |
530 | * Clear logical address (multicast) filter | |
531 | */ | |
532 | if ( chipId != kMaceRevisionA2 ) | |
533 | { | |
534 | WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACLogAddr ); | |
535 | do | |
536 | { | |
537 | regValue = ReadMaceRegister( ioBaseEnet, kIAC ); | |
538 | } | |
539 | while( regValue & kIACAddrChg ); | |
540 | } | |
541 | else | |
542 | { | |
543 | WriteMaceRegister( ioBaseEnet, kIAC, kIACLogAddr ); | |
544 | } | |
545 | ||
546 | for (i = 0; i < 8; i++ ) | |
547 | { | |
548 | WriteMaceRegister( ioBaseEnet, kLADRF, 0 ); | |
549 | } | |
550 | ||
551 | /* | |
552 | * Enable ethernet transceiver | |
553 | */ | |
554 | WriteMaceRegister( ioBaseEnet, kPLSCC, kPLSCCPortSelGPSI | kPLSCCEnSts ); | |
555 | ||
556 | return true; | |
557 | } | |
558 | ||
559 | ||
560 | /*------------------------------------------------------------------------- | |
561 | * | |
562 | * | |
563 | * | |
564 | *-------------------------------------------------------------------------*/ | |
565 | ||
566 | void MaceEnet::_restartChip() | |
567 | { | |
568 | /* | |
569 | * Shutdown DMA channels | |
570 | */ | |
571 | _stopReceiveDMA(); | |
572 | _stopTransmitDMA(); | |
573 | ||
574 | /* | |
575 | * Get the silicon's attention | |
576 | */ | |
577 | _resetChip(); | |
578 | _initChip(); | |
579 | ||
580 | /* | |
581 | * Restore multicast settings | |
582 | */ | |
583 | _updateHashTableMask(); | |
584 | ||
585 | if ( isPromiscuous ) | |
586 | { | |
587 | _setPromiscuousMode(kIOEnetPromiscuousModeOn); | |
588 | } | |
589 | ||
590 | /* | |
591 | * Enable receiver and transmitter | |
592 | */ | |
593 | _startChip(); | |
594 | _enableAdapterInterrupts(); | |
595 | ||
596 | /* | |
597 | * Restart transmit DMA | |
598 | */ | |
599 | IODBDMAContinue( ioBaseEnetTxDMA ); | |
600 | } | |
601 | ||
602 | /*------------------------------------------------------------------------- | |
603 | * | |
604 | * Orderly stop of receive DMA. | |
605 | * | |
606 | * | |
607 | *-------------------------------------------------------------------------*/ | |
608 | ||
609 | void MaceEnet::_stopReceiveDMA() | |
610 | { | |
611 | u_int32_t dmaStatus; | |
612 | u_int32_t dmaCmdPtr; | |
613 | u_int32_t dmaIndex; | |
614 | u_int8_t tmpBuf[16]; | |
615 | u_int8_t *p = 0; | |
616 | u_int8_t MacCCReg; | |
617 | ||
618 | /* | |
619 | * Stop the receiver and allow any frame receive in progress to complete | |
620 | */ | |
621 | MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); | |
622 | WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnRcv ); | |
623 | IODelay( RECEIVE_QUIESCE_uS ); | |
624 | ||
625 | /* | |
626 | * Capture channel status and pause the dma channel. | |
627 | */ | |
628 | dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); | |
629 | IODBDMAPause( ioBaseEnetRxDMA ); | |
630 | ||
631 | /* | |
632 | * Read the command pointer and convert it to a byte offset into the | |
633 | * DMA program. | |
634 | */ | |
635 | dmaCmdPtr = IOGetDBDMACommandPtr( ioBaseEnetRxDMA ); | |
636 | dmaIndex = (dmaCmdPtr - rxDMACommandsPhys); | |
637 | ||
638 | /* | |
639 | * If the channel status is DEAD, the DMA pointer is pointing to the | |
640 | * next command | |
641 | */ | |
642 | if ( dmaStatus & kdbdmaDead ) | |
643 | { | |
644 | dmaIndex -= sizeof(IODBDMADescriptor); | |
645 | } | |
646 | ||
647 | /* | |
648 | * Convert channel program offset to command index | |
649 | */ | |
650 | dmaIndex = dmaIndex / sizeof(enet_dma_cmd_t); | |
651 | if ( dmaIndex >= rxMaxCommand ) dmaIndex = 0; | |
652 | ||
653 | /* | |
654 | * The DMA controller doesnt like being stopped before transferring any | |
655 | * data. | |
656 | * | |
657 | * When we do so it pollutes up to 16-bytes aligned to the nearest (lower) | |
658 | * 16-byte boundary. This corruption can be outside the data transfer area | |
659 | * of the mbuf, so we capture and then restore these bytes after stopping | |
660 | * the channel. | |
661 | * | |
662 | */ | |
663 | if ( rxMbuf[dmaIndex] ) | |
664 | { | |
665 | p = mtod(rxMbuf[dmaIndex], u_int8_t *); | |
666 | } | |
667 | ||
668 | (u_int32_t)p &= ~0x0f; | |
669 | ||
670 | if ( p ) | |
671 | { | |
672 | bcopy( p, tmpBuf, 16 ); | |
673 | } | |
674 | ||
675 | IODBDMAReset( ioBaseEnetRxDMA ); | |
676 | ||
677 | if ( p ) | |
678 | { | |
679 | bcopy( tmpBuf, p, 16 ); | |
680 | } | |
681 | ||
682 | /* | |
683 | * Reset the dma channel pointer to the nearest command index | |
684 | */ | |
685 | dmaCmdPtr = rxDMACommandsPhys + sizeof(enet_dma_cmd_t) * dmaIndex; | |
686 | IOSetDBDMACommandPtr( ioBaseEnetRxDMA, dmaCmdPtr); | |
687 | } | |
688 | ||
689 | /*------------------------------------------------------------------------- | |
690 | * | |
691 | * | |
692 | * | |
693 | *-------------------------------------------------------------------------*/ | |
694 | ||
695 | void MaceEnet::_stopTransmitDMA() | |
696 | { | |
697 | u_int32_t dmaStatus; | |
698 | u_int32_t dmaCmdPtr; | |
699 | u_int32_t dmaIndex; | |
700 | u_int8_t MacCCReg; | |
701 | ||
702 | /* | |
703 | * Stop the transmitter and allow any frame transmit in progress to abort | |
704 | */ | |
705 | MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); | |
706 | WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnXmt ); | |
707 | IODelay( TRANSMIT_QUIESCE_uS ); | |
708 | ||
709 | /* | |
710 | * Capture channel status and pause the dma channel. | |
711 | */ | |
712 | dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); | |
713 | IODBDMAPause( ioBaseEnetTxDMA ); | |
714 | ||
715 | /* | |
716 | * Read the command pointer and convert it to a byte offset into the | |
717 | * DMA program. | |
718 | */ | |
719 | dmaCmdPtr = IOGetDBDMACommandPtr( ioBaseEnetTxDMA ); | |
720 | dmaIndex = (dmaCmdPtr - txDMACommandsPhys); | |
721 | ||
722 | /* | |
723 | * If the channel status is DEAD, the DMA pointer is pointing to the | |
724 | * next command | |
725 | */ | |
726 | if ( dmaStatus & kdbdmaDead ) | |
727 | { | |
728 | dmaIndex -= sizeof(IODBDMADescriptor); | |
729 | } | |
730 | ||
731 | /* | |
732 | * Convert channel program offset to command index | |
733 | */ | |
734 | dmaIndex = dmaIndex / sizeof(enet_txdma_cmd_t); | |
735 | if ( dmaIndex >= txMaxCommand ) dmaIndex = 0; | |
736 | ||
737 | IODBDMAReset( ioBaseEnetTxDMA ); | |
738 | ||
739 | /* | |
740 | * Reset the dma channel pointer to the nearest command index | |
741 | */ | |
742 | dmaCmdPtr = txDMACommandsPhys + sizeof(enet_txdma_cmd_t) * dmaIndex; | |
743 | IOSetDBDMACommandPtr( ioBaseEnetTxDMA, dmaCmdPtr ); | |
744 | } | |
745 | ||
746 | /*------------------------------------------------------------------------- | |
747 | * | |
748 | * | |
749 | * | |
750 | *-------------------------------------------------------------------------*/ | |
751 | ||
752 | void MaceEnet::_disableAdapterInterrupts() | |
753 | { | |
754 | WriteMaceRegister( ioBaseEnet, kIntMask, 0xFF ); | |
755 | } | |
756 | ||
757 | /*------------------------------------------------------------------------- | |
758 | * | |
759 | * _enableAdapterInterrupts | |
760 | * | |
761 | * It appears to make the Mace chip work properly with the DBDMA channel | |
762 | * we need to leave the transmit interrupt unmasked at the chip. This | |
763 | * is weird, but that's what happens when you try to glue a chip that | |
764 | * wasn't intended to work with a DMA engine on to a DMA. | |
765 | * | |
766 | *-------------------------------------------------------------------------*/ | |
767 | ||
768 | void MaceEnet::_enableAdapterInterrupts() | |
769 | { | |
770 | u_int8_t regValue; | |
771 | ||
772 | regValue = ReadMaceRegister( ioBaseEnet, kIntMask ); | |
773 | regValue &= ~kIntMaskXmtInt; | |
774 | WriteMaceRegister( ioBaseEnet, kIntMask, regValue ); | |
775 | IODelay(500); | |
776 | ReadMaceRegister( ioBaseEnet, kXmtFS ); | |
777 | ReadMaceRegister( ioBaseEnet, kIntReg ); | |
778 | } | |
779 | ||
780 | /*------------------------------------------------------------------------- | |
781 | * | |
782 | * | |
783 | * | |
784 | *-------------------------------------------------------------------------*/ | |
785 | ||
786 | bool MaceEnet::_transmitPacket(struct mbuf * packet) | |
787 | { | |
788 | enet_dma_cmd_t tmpCommand; | |
789 | u_int32_t i; | |
790 | ||
791 | /* | |
792 | * Check for room on the transmit ring. There should always be space | |
793 | * since it is the responsibility of the caller to verify this before | |
794 | * calling _transmitPacket. | |
795 | * | |
796 | * Get a copy of the DMA transfer commands in a temporary buffer. | |
797 | * The new DMA command is written into the channel program so that the | |
798 | * command word for the old Stop command is overwritten last. This prevents | |
799 | * the DMA engine from executing a partially written channel command. | |
800 | */ | |
801 | i = txCommandTail + 1; | |
802 | if ( i >= txMaxCommand ) i = 0; | |
803 | ||
804 | if ( (i == txCommandHead) || | |
805 | !_updateDescriptorFromMbuf(packet, &tmpCommand, false)) | |
806 | { | |
807 | IOLog("Mace: Freeing transmit packet eh?\n\r"); | |
808 | if (packet != txDebuggerPkt) | |
809 | freePacket(packet); | |
810 | return false; | |
811 | } | |
812 | ||
813 | /* | |
814 | * txCommandTail points to the current DMA Stop command for the channel. | |
815 | * We are now creating a new DMA Stop command in the next slot in the | |
816 | * transmit ring. The previous DMA Stop command will be overwritten with | |
817 | * the DMA commands to transfer the new mbuf. | |
818 | */ | |
819 | txDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; | |
820 | txDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; | |
821 | ||
822 | flush_cache_v( (vm_offset_t)&txDMACommands[i], sizeof(enet_dma_cmd_t) ); | |
823 | ||
824 | bcopy( ((u_int32_t *)&tmpCommand)+1, | |
825 | ((u_int32_t *)&txDMACommands[txCommandTail])+1, | |
826 | sizeof(enet_dma_cmd_t)-sizeof(u_int32_t) ); | |
827 | ||
828 | flush_cache_v( (vm_offset_t)&txDMACommands[txCommandTail], | |
829 | sizeof(enet_dma_cmd_t) ); | |
830 | ||
831 | txMbuf[txCommandTail] = packet; | |
832 | txDMACommands[txCommandTail].desc_seg[0].operation = | |
833 | tmpCommand.desc_seg[0].operation; | |
834 | ||
835 | flush_cache_v( (vm_offset_t)&txDMACommands[txCommandTail], | |
836 | sizeof(enet_dma_cmd_t) ); | |
837 | ||
838 | /* | |
839 | * Set the transmit tail to the new stop command. | |
840 | */ | |
841 | txCommandTail = i; | |
842 | ||
843 | /* | |
844 | * Tap the DMA channel to wake it up | |
845 | */ | |
846 | IODBDMAContinue( ioBaseEnetTxDMA ); | |
847 | ||
848 | return true; | |
849 | } | |
850 | ||
851 | /*------------------------------------------------------------------------- | |
852 | * _receivePacket | |
853 | * -------------- | |
854 | * This routine runs the receiver in polled-mode (yuk!) for the kernel | |
855 | * debugger. | |
856 | * | |
857 | * The _receivePackets allocate mbufs and pass them up the stack. The kernel | |
858 | * debugger interface passes a buffer into us. To reconcile the two interfaces, | |
859 | * we allow the receive routine to continue to allocate its own buffers and | |
860 | * transfer any received data to the passed-in buffer. This is handled by | |
861 | * _receivePacket calling _packetToDebugger. | |
862 | *-------------------------------------------------------------------------*/ | |
863 | ||
864 | void MaceEnet::_receivePacket(void *pkt, unsigned int *pkt_len, | |
865 | unsigned int timeout) | |
866 | { | |
867 | ns_time_t startTime; | |
868 | ns_time_t currentTime; | |
869 | u_int32_t elapsedTimeMS; | |
870 | ||
871 | if (!ready || !pkt || !pkt_len) | |
872 | return; | |
873 | ||
874 | *pkt_len = 0; | |
875 | ||
876 | debuggerPkt = pkt; | |
877 | debuggerPktSize = 0; | |
878 | ||
879 | _IOGetTimestamp(&startTime); | |
880 | do | |
881 | { | |
882 | _receivePackets(true); | |
883 | _IOGetTimestamp(¤tTime); | |
884 | elapsedTimeMS = (currentTime - startTime) / (1000*1000); | |
885 | } | |
886 | while ( (debuggerPktSize == 0) && (elapsedTimeMS < timeout) ); | |
887 | ||
888 | *pkt_len = debuggerPktSize; | |
889 | ||
890 | return; | |
891 | } | |
892 | ||
893 | /*------------------------------------------------------------------------- | |
894 | * _packetToDebugger | |
895 | * ----------------- | |
896 | * This is called by _receivePackets when we are polling for kernel debugger | |
897 | * packets. It copies the mbuf contents to the buffer passed by the debugger. | |
898 | * It also sets the var debuggerPktSize which will break the polling loop. | |
899 | *-------------------------------------------------------------------------*/ | |
900 | ||
901 | void MaceEnet::_packetToDebugger(struct mbuf * packet, u_int size) | |
902 | { | |
903 | debuggerPktSize = size; | |
904 | bcopy( mtod(packet, char *), debuggerPkt, size ); | |
905 | } | |
906 | ||
907 | /*------------------------------------------------------------------------- | |
908 | * _sendPacket | |
909 | * ----------- | |
910 | * | |
911 | * This routine runs the transmitter in polled-mode (yuk!) for the | |
912 | * kernel debugger. | |
913 | * | |
914 | *-------------------------------------------------------------------------*/ | |
915 | ||
916 | void MaceEnet::_sendPacket(void *pkt, unsigned int pkt_len) | |
917 | { | |
918 | ns_time_t startTime; | |
919 | ns_time_t currentTime; | |
920 | u_int32_t elapsedTimeMS; | |
921 | ||
922 | if ( !ready || !pkt || (pkt_len > ETHERMAXPACKET)) | |
923 | return; | |
924 | ||
925 | /* | |
926 | * Wait for the transmit ring to empty | |
927 | */ | |
928 | _IOGetTimestamp(&startTime); | |
929 | do | |
930 | { | |
931 | _transmitInterruptOccurred(true); | |
932 | _IOGetTimestamp(¤tTime); | |
933 | elapsedTimeMS = (currentTime - startTime) / (1000*1000); | |
934 | } | |
935 | while ( (txCommandHead != txCommandTail) && | |
936 | (elapsedTimeMS < TX_KDB_TIMEOUT) ); | |
937 | ||
938 | if ( txCommandHead != txCommandTail ) | |
939 | { | |
940 | IOLog( "Mace: Polled tranmit timeout - 1\n\r"); | |
941 | return; | |
942 | } | |
943 | ||
944 | txDebuggerPkt->m_next = 0; | |
945 | txDebuggerPkt->m_data = (caddr_t) pkt; | |
946 | txDebuggerPkt->m_pkthdr.len = txDebuggerPkt->m_len = pkt_len; | |
947 | ||
948 | /* | |
949 | * Send the debugger packet. txDebuggerPkt must not be freed by | |
950 | * the transmit routine. | |
951 | */ | |
952 | _transmitPacket(txDebuggerPkt); | |
953 | ||
954 | /* | |
955 | * Poll waiting for the transmit ring to empty again | |
956 | */ | |
957 | do | |
958 | { | |
959 | _transmitInterruptOccurred(true); | |
960 | _IOGetTimestamp(¤tTime); | |
961 | elapsedTimeMS = (currentTime - startTime) / (1000*1000); | |
962 | } | |
963 | while ( (txCommandHead != txCommandTail) && | |
964 | (elapsedTimeMS < TX_KDB_TIMEOUT) ); | |
965 | ||
966 | if ( txCommandHead != txCommandTail ) | |
967 | { | |
968 | IOLog("Mace: Polled transmit timeout - 2\n\r"); | |
969 | } | |
970 | ||
971 | return; | |
972 | } | |
973 | ||
974 | /*------------------------------------------------------------------------- | |
975 | * | |
976 | * | |
977 | * | |
978 | *-------------------------------------------------------------------------*/ | |
979 | ||
980 | bool MaceEnet::_receiveInterruptOccurred() | |
981 | { | |
982 | return _receivePackets(false); | |
983 | } | |
984 | ||
985 | /*------------------------------------------------------------------------- | |
986 | * | |
987 | * | |
988 | * | |
989 | *-------------------------------------------------------------------------*/ | |
990 | ||
991 | bool MaceEnet::_receivePackets(bool fDebugger) | |
992 | { | |
993 | enet_dma_cmd_t tmpCommand; | |
994 | struct mbuf * packet; | |
995 | u_int32_t i,j,last; | |
996 | u_int32_t dmaChnlStatus; | |
997 | int receivedFrameSize = 0; | |
998 | u_int32_t dmaCount[2], dmaResid[2], dmaStatus[2]; | |
999 | bool reusePkt; | |
1000 | bool status; | |
1001 | bool useNetif = !fDebugger && netifClient; | |
1002 | bool packetsQueued = false; | |
1003 | u_int8_t *rxFS = NULL; | |
1004 | u_int32_t nextDesc; | |
1005 | static const u_int32_t lastResetValue = (u_int32_t)(-1); | |
1006 | ||
1007 | last = lastResetValue; | |
1008 | i = rxCommandHead; | |
1009 | ||
1010 | while ( 1 ) | |
1011 | { | |
1012 | reusePkt = false; | |
1013 | ||
1014 | /* | |
1015 | * Purge cache references for the DBDMA entry we are about to look at. | |
1016 | */ | |
1017 | invalidate_cache_v((vm_offset_t)&rxDMACommands[i], | |
1018 | sizeof(enet_dma_cmd_t)); | |
1019 | ||
1020 | /* | |
1021 | * Collect the DMA residual counts/status for the two buffer segments. | |
1022 | */ | |
1023 | for ( j = 0; j < 2; j++ ) | |
1024 | { | |
1025 | dmaResid[j] = IOGetCCResult( &rxDMACommands[i].desc_seg[j] ); | |
1026 | dmaStatus[j] = dmaResid[j] >> 16; | |
1027 | dmaResid[j] &= 0x0000ffff; | |
1028 | dmaCount[j] = IOGetCCOperation( &rxDMACommands[i].desc_seg[j] ) & | |
1029 | kdbdmaReqCountMask; | |
1030 | } | |
1031 | ||
1032 | #if 0 | |
1033 | IOLog("Ethernet(Mace): Rx NetBuf[%2d] = %08x Resid[0] = %04x Status[0] = %04x Resid[1] = %04x Status[1] = %04x\n\r", | |
1034 | i, (int)nb_map(rxNetbuf[i]), dmaResid[0], dmaStatus[0], dmaResid[1], dmaStatus[1] ); | |
1035 | #endif | |
1036 | ||
1037 | /* | |
1038 | * If the current entry has not been written, then stop at this entry | |
1039 | */ | |
1040 | if ( !((dmaStatus[0] & kdbdmaBt) || (dmaStatus[1] & kdbdmaActive)) ) | |
1041 | { | |
1042 | break; | |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * The Mace Ethernet controller appends four bytes to each receive | |
1047 | * buffer containing the buffer size and receive frame status. | |
1048 | * We locate these bytes by using the DMA residual counts. | |
1049 | */ | |
1050 | receivedFrameSize = dmaCount[0] - dmaResid[0] + dmaCount[1] - | |
1051 | ((dmaStatus[0] & kdbdmaBt) ? dmaCount[1] : dmaResid[1]); | |
1052 | ||
1053 | if ( ( receivedFrameSize >= 4 ) && | |
1054 | ( receivedFrameSize <= NETWORK_BUFSIZE ) ) | |
1055 | { | |
1056 | /* | |
1057 | * Get the receive frame size as reported by the Mace controller | |
1058 | */ | |
1059 | ||
1060 | rxFS = mtod(rxMbuf[i], u_int8_t *) + receivedFrameSize - 4; | |
1061 | ||
1062 | receivedFrameSize = (u_int16_t) rxFS[0] | | |
1063 | (rxFS[1] & kRcvFS1RcvCnt) << 8; | |
1064 | } | |
1065 | ||
1066 | /* | |
1067 | * Reject packets that are runts or that have other mutations. | |
1068 | */ | |
1069 | if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) || | |
1070 | receivedFrameSize > (ETHERMAXPACKET + ETHERCRC) || | |
1071 | (rxFS[1] & (kRcvFS1OFlo | kRcvFS1Clsn | kRcvFS1Fram | kRcvFS1FCS)) | |
1072 | ) | |
1073 | { | |
1074 | if (useNetif) netStats->inputErrors++; | |
1075 | reusePkt = true; | |
1076 | } | |
1077 | else if ( useNetif == false ) | |
1078 | { | |
1079 | /* | |
1080 | * Always reuse packets in debugger mode. | |
1081 | */ | |
1082 | reusePkt = true; | |
1083 | if (fDebugger) | |
1084 | _packetToDebugger(rxMbuf[i], receivedFrameSize); | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * Before we pass this packet up the networking stack. Make sure we | |
1089 | * can get a replacement. Otherwise, hold on to the current packet and | |
1090 | * increment the input error count. | |
1091 | * Thanks Justin! | |
1092 | */ | |
1093 | ||
1094 | packet = 0; | |
1095 | ||
1096 | if ( reusePkt == false ) | |
1097 | { | |
1098 | bool replaced; | |
1099 | ||
1100 | packet = replaceOrCopyPacket(&rxMbuf[i], receivedFrameSize, | |
1101 | &replaced); | |
1102 | ||
1103 | reusePkt = true; | |
1104 | ||
1105 | if (packet && replaced) | |
1106 | { | |
1107 | status = _updateDescriptorFromMbuf(rxMbuf[i], | |
1108 | &rxDMACommands[i], true); | |
1109 | ||
1110 | if (status) | |
1111 | { | |
1112 | reusePkt = false; | |
1113 | } | |
1114 | else | |
1115 | { | |
1116 | // Assume descriptor has not been corrupted. | |
1117 | freePacket(rxMbuf[i]); // release new packet. | |
1118 | rxMbuf[i] = packet; // get the old packet back. | |
1119 | packet = 0; // pass up nothing. | |
1120 | IOLog("Mace: _updateDescriptorFromMbuf error\n"); | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | if (packet == 0) | |
1125 | netStats->inputErrors++; | |
1126 | } | |
1127 | ||
1128 | /* | |
1129 | * If we are reusing the existing mbuf, then refurbish the existing | |
1130 | * DMA command \ descriptors by clearing the status/residual count | |
1131 | * fields. | |
1132 | */ | |
1133 | ||
1134 | if ( reusePkt == true ) | |
1135 | { | |
1136 | for ( j=0; j < sizeof(enet_dma_cmd_t)/sizeof(IODBDMADescriptor); | |
1137 | j++ ) | |
1138 | { | |
1139 | IOSetCCResult( &rxDMACommands[i].desc_seg[j], 0 ); | |
1140 | } | |
1141 | flush_cache_v( (vm_offset_t)&rxDMACommands[i], | |
1142 | sizeof(enet_dma_cmd_t) ); | |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * Keep track of the last receive descriptor processed | |
1147 | */ | |
1148 | last = i; | |
1149 | ||
1150 | /* | |
1151 | * Implement ring wrap-around | |
1152 | */ | |
1153 | if (++i >= rxMaxCommand) i = 0; | |
1154 | ||
1155 | /* | |
1156 | * Early exit in debugger mode. | |
1157 | */ | |
1158 | if (fDebugger) | |
1159 | { | |
1160 | break; | |
1161 | } | |
1162 | ||
1163 | /* | |
1164 | * Transfer received to network stack. | |
1165 | */ | |
1166 | if (packet) | |
1167 | { | |
1168 | KERNEL_DEBUG(DBG_MACE_RXCOMPLETE | DBG_FUNC_NONE, (int) packet, | |
1169 | (int)receivedFrameSize, 0, 0, 0 ); | |
1170 | ||
1171 | /* | |
1172 | * The KDB lock must be held before calling this function. | |
1173 | */ | |
1174 | networkInterface->inputPacket(packet, receivedFrameSize, true); | |
1175 | netStats->inputPackets++; | |
1176 | packetsQueued = true; | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | /* | |
1181 | * OK...this is a little messy | |
1182 | * | |
1183 | * We just processed a bunch of DMA receive descriptors. We are going to | |
1184 | * exchange the current DMA stop command (rxCommandTail) with the last | |
1185 | * receive descriptor we processed (last). This will make these list of | |
1186 | * descriptors we just processed available. If we processed no receive | |
1187 | * descriptors on this call then skip this exchange. | |
1188 | */ | |
1189 | ||
1190 | #if 0 | |
1191 | IOLog("Mace: Prev - Rx Head = %2d Rx Tail = %2d Rx Last = %2d\n\r", | |
1192 | rxCommandHead, rxCommandTail, last ); | |
1193 | #endif | |
1194 | ||
1195 | if ( last != lastResetValue ) | |
1196 | { | |
1197 | /* | |
1198 | * Save the contents of the last receive descriptor processed. | |
1199 | */ | |
1200 | packet = rxMbuf[last]; | |
1201 | tmpCommand = rxDMACommands[last]; | |
1202 | ||
1203 | /* | |
1204 | * Write a DMA stop command into this descriptor slot | |
1205 | */ | |
1206 | rxDMACommands[last].desc_seg[0] = dbdmaCmd_Stop; | |
1207 | rxDMACommands[last].desc_seg[1] = dbdmaCmd_Nop; | |
1208 | rxMbuf[last] = 0; | |
1209 | ||
1210 | flush_cache_v( (vm_offset_t)&rxDMACommands[last], | |
1211 | sizeof(enet_dma_cmd_t) ); | |
1212 | ||
1213 | /* | |
1214 | * Replace the previous DMA stop command with the last receive | |
1215 | * descriptor processed. | |
1216 | * | |
1217 | * The new DMA command is written into the channel program so that the | |
1218 | * command word for the old Stop command is overwritten last. This | |
1219 | * prevents the DMA engine from executing a partially written channel | |
1220 | * command. | |
1221 | * | |
1222 | * Note: When relocating the descriptor, we must update its branch | |
1223 | * field to reflect its new location. | |
1224 | */ | |
1225 | nextDesc = rxDMACommandsPhys + (int)&rxDMACommands[rxCommandTail+1] - | |
1226 | (int)rxDMACommands; | |
1227 | IOSetCCCmdDep( &tmpCommand.desc_seg[0], nextDesc ); | |
1228 | ||
1229 | bcopy( (u_int32_t *)&tmpCommand+1, | |
1230 | (u_int32_t *)&rxDMACommands[rxCommandTail]+1, | |
1231 | sizeof(enet_dma_cmd_t)-sizeof(u_int32_t) ); | |
1232 | ||
1233 | flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], | |
1234 | sizeof(enet_dma_cmd_t) ); | |
1235 | ||
1236 | rxMbuf[rxCommandTail] = packet; | |
1237 | ||
1238 | rxDMACommands[rxCommandTail].desc_seg[0].operation = | |
1239 | tmpCommand.desc_seg[0].operation; | |
1240 | ||
1241 | flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], | |
1242 | sizeof(IODBDMADescriptor) ); | |
1243 | ||
1244 | /* | |
1245 | * Update rxCommmandTail to point to the new Stop command. Update | |
1246 | * rxCommandHead to point to the next slot in the ring past the Stop | |
1247 | * command | |
1248 | */ | |
1249 | rxCommandTail = last; | |
1250 | rxCommandHead = i; | |
1251 | } | |
1252 | ||
1253 | /* | |
1254 | * The DMA channel has a nasty habit of shutting down when there is a | |
1255 | * non-recoverable error on receive. We get no interrupt for this since | |
1256 | * the channel shuts down before the descriptor that causes the host | |
1257 | * interrupt is executed. | |
1258 | * | |
1259 | * We check if the channel is DEAD by checking the channel status reg. | |
1260 | * Also, the watchdog timer can force receiver interrupt servicing based | |
1261 | * on detecting that the receive DMA is DEAD. | |
1262 | */ | |
1263 | dmaChnlStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); | |
1264 | if ( dmaChnlStatus & kdbdmaDead ) | |
1265 | { | |
1266 | /* | |
1267 | * Read log error | |
1268 | */ | |
1269 | if (useNetif) netStats->inputErrors++; | |
1270 | IOLog( "Mace: Rx DMA Error - Status = %04x\n", dmaChnlStatus ); | |
1271 | ||
1272 | /* | |
1273 | * Reset and reinitialize chip | |
1274 | */ | |
1275 | _restartChip(); // This must not block in debugger mode. | |
1276 | } | |
1277 | else | |
1278 | { | |
1279 | /* | |
1280 | * Tap the DMA to wake it up | |
1281 | */ | |
1282 | IODBDMAContinue( ioBaseEnetRxDMA ); | |
1283 | } | |
1284 | ||
1285 | #if 0 | |
1286 | IOLog( "Mace: New - Rx Head = %2d Rx Tail = %2d\n\r", | |
1287 | rxCommandHead, rxCommandTail ); | |
1288 | #endif | |
1289 | ||
1290 | return packetsQueued; | |
1291 | } | |
1292 | ||
1293 | /*------------------------------------------------------------------------- | |
1294 | * | |
1295 | * | |
1296 | * | |
1297 | *-------------------------------------------------------------------------*/ | |
1298 | ||
1299 | bool MaceEnet::_transmitInterruptOccurred(bool fDebugger = false) | |
1300 | { | |
1301 | u_int32_t dmaStatus; | |
1302 | u_int32_t xmtFS; | |
1303 | bool fServiced = false; | |
1304 | bool useNetif = !fDebugger && netifClient; | |
1305 | ||
1306 | // Set the debugTxPoll flag to indicate the debugger was active | |
1307 | // and some cleanup may be needed when the driver returns to | |
1308 | // normal operation. | |
1309 | // | |
1310 | if (fDebugger) | |
1311 | debugTxPoll = true; | |
1312 | ||
1313 | while ( 1 ) | |
1314 | { | |
1315 | /* | |
1316 | * Purge cache references for the DBDMA entry we are about to look at. | |
1317 | */ | |
1318 | invalidate_cache_v((vm_offset_t)&txDMACommands[txCommandHead], | |
1319 | sizeof(enet_txdma_cmd_t)); | |
1320 | ||
1321 | /* | |
1322 | * Check the status of the last descriptor in this entry to see if | |
1323 | * the DMA engine completed this entry. | |
1324 | */ | |
1325 | dmaStatus = IOGetCCResult( | |
1326 | &txDMACommands[txCommandHead].desc_seg[3] ) >> 16; | |
1327 | ||
1328 | if ( !(dmaStatus & kdbdmaActive) ) | |
1329 | { | |
1330 | break; | |
1331 | } | |
1332 | ||
1333 | fServiced = true; | |
1334 | ||
1335 | /* | |
1336 | * Reset the status word for the entry we are about to process | |
1337 | */ | |
1338 | IOSetCCResult( &txDMACommands[txCommandHead].desc_seg[3], 0 ); | |
1339 | ||
1340 | flush_cache_v( (vm_offset_t) &txDMACommands[txCommandHead].desc_seg[3], | |
1341 | sizeof(IODBDMADescriptor) ); | |
1342 | ||
1343 | /* | |
1344 | * This DMA descriptor read the transmit frame status. See what it has | |
1345 | * to tell us. | |
1346 | */ | |
1347 | xmtFS = IOGetCCCmdDep( &txDMACommands[txCommandHead].desc_seg[2] ); | |
1348 | if ( useNetif && (xmtFS & kXmtFSXmtSV) ) | |
1349 | { | |
1350 | if (xmtFS & (kXmtFSUFlo | kXmtFSLCol | kXmtFSRtry | kXmtFSLCar) ) | |
1351 | { | |
1352 | netStats->outputErrors++; | |
1353 | } | |
1354 | else | |
1355 | { | |
1356 | netStats->outputPackets++; | |
1357 | } | |
1358 | ||
1359 | if (xmtFS & (kXmtFSOne | kXmtFSMore) ) | |
1360 | { | |
1361 | netStats->collisions++; | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * Free the mbuf we just transmitted. | |
1367 | */ | |
1368 | KERNEL_DEBUG(DBG_MACE_TXCOMPLETE | DBG_FUNC_NONE, | |
1369 | (int) txMbuf[txCommandHead], | |
1370 | (int) txMbuf[txCommandHead]->m_pkthdr.len, 0, 0, 0 ); | |
1371 | ||
1372 | if (txMbuf[txCommandHead] != txDebuggerPkt) | |
1373 | { | |
1374 | if ( fDebugger ) | |
1375 | { | |
1376 | // | |
1377 | // While in debugger mode, do not touch the mbuf pool. | |
1378 | // Queue any used mbufs to a local queue. This queue | |
1379 | // will get flushed after we exit from debugger mode. | |
1380 | // | |
1381 | // During continuous debugger transmission and | |
1382 | // interrupt polling, we expect only the txDebuggerPkt | |
1383 | // to show up on the transmit mbuf ring. | |
1384 | // | |
1385 | debugQueue->enqueue( txMbuf[txCommandHead] ); | |
1386 | } | |
1387 | else | |
1388 | { | |
1389 | freePacket( txMbuf[txCommandHead] ); | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | txMbuf[txCommandHead] = 0; | |
1394 | ||
1395 | if ( ++txCommandHead >= txMaxCommand ) txCommandHead = 0; | |
1396 | } | |
1397 | ||
1398 | /* | |
1399 | * The DMA channel has a nasty habit of shutting down when there is | |
1400 | * non-recoverable error on transmit. We get no interrupt for this since | |
1401 | * the channel shuts down before the descriptor that causes the host | |
1402 | * interrupt is executed. | |
1403 | * | |
1404 | * We check if the channel is DEAD by checking the channel status reg. | |
1405 | * Also, the watchdog timer can force a transmitter reset if it sees no | |
1406 | * interrupt activity for to consecutive timeout intervals. | |
1407 | */ | |
1408 | ||
1409 | dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); | |
1410 | if ( (dmaStatus & kdbdmaDead) || (txWDForceReset == true) ) | |
1411 | { | |
1412 | /* | |
1413 | * Read the transmit frame status and log error | |
1414 | */ | |
1415 | xmtFS = ReadMaceRegister( ioBaseEnet, kXmtFS ); | |
1416 | if (useNetif) netStats->outputErrors++; | |
1417 | IOLog( "Mace: Tx DMA Error - Status = %04x FS = %02x\n\r", | |
1418 | dmaStatus, xmtFS); | |
1419 | ||
1420 | /* | |
1421 | * Reset and reinitialize chip | |
1422 | */ | |
1423 | _restartChip(); | |
1424 | ||
1425 | txWDForceReset = false; | |
1426 | fServiced = true; | |
1427 | } | |
1428 | ||
1429 | return fServiced; | |
1430 | } | |
1431 | ||
1432 | /*------------------------------------------------------------------------- | |
1433 | * | |
1434 | * | |
1435 | * | |
1436 | *-------------------------------------------------------------------------*/ | |
1437 | ||
1438 | /* | |
1439 | * Breaks up an ethernet data buffer into two physical chunks. We know that | |
1440 | * the buffer can't straddle more than two pages. If the content of paddr2 is | |
1441 | * zero this means that all of the buffer lies in one physical page. Note | |
1442 | * that we use the fact that tx and rx descriptors have the same size and | |
1443 | * same layout of relevent fields (data address and count). | |
1444 | */ | |
1445 | bool | |
1446 | MaceEnet::_updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, | |
1447 | bool isReceive) | |
1448 | { | |
1449 | u_int32_t nextDesc = 0; | |
1450 | int segments; | |
1451 | struct IOPhysicalSegment segVector[2]; | |
1452 | ||
1453 | /* | |
1454 | * Although coalescing is always enabled, it cannot occur | |
1455 | * while the driver is in debugger mode. | |
1456 | */ | |
1457 | segments = mbufCursor->getPhysicalSegmentsWithCoalesce(m, segVector); | |
1458 | ||
1459 | if ((!segments) || (segments > 2)) { | |
1460 | IOLog("Mace: _updateDescriptorFromMbuf error, %d segments\n", | |
1461 | segments); | |
1462 | return false; | |
1463 | } | |
1464 | ||
1465 | if ( segments == 1 ) | |
1466 | { | |
1467 | IOMakeDBDMADescriptor( (&desc->desc_seg[0]), | |
1468 | ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), | |
1469 | (kdbdmaKeyStream0), | |
1470 | (kdbdmaIntNever), | |
1471 | (kdbdmaBranchNever), | |
1472 | ((isReceive) ? kdbdmaWaitNever : | |
1473 | kdbdmaWaitIfFalse), | |
1474 | (segVector[0].length), | |
1475 | (segVector[0].location) ); | |
1476 | ||
1477 | desc->desc_seg[1] = (isReceive) ? dbdmaCmd_NopWInt : dbdmaCmd_Nop; | |
1478 | } | |
1479 | else | |
1480 | { | |
1481 | if ( isReceive ) | |
1482 | { | |
1483 | nextDesc = rxDMACommandsPhys + (int)desc - (int)rxDMACommands + | |
1484 | sizeof(enet_dma_cmd_t); | |
1485 | } | |
1486 | ||
1487 | IOMakeDBDMADescriptorDep( (&desc->desc_seg[0]), | |
1488 | ((isReceive) ? kdbdmaInputMore : kdbdmaOutputMore), | |
1489 | (kdbdmaKeyStream0), | |
1490 | ((isReceive) ? kdbdmaIntIfTrue : kdbdmaIntNever), | |
1491 | ((isReceive) ? kdbdmaBranchIfTrue : | |
1492 | kdbdmaBranchNever), | |
1493 | (kdbdmaWaitNever), | |
1494 | (segVector[0].length), | |
1495 | (segVector[0].location), | |
1496 | nextDesc ); | |
1497 | ||
1498 | IOMakeDBDMADescriptor( (&desc->desc_seg[1]), | |
1499 | ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), | |
1500 | (kdbdmaKeyStream0), | |
1501 | ((isReceive) ? kdbdmaIntAlways : kdbdmaIntNever), | |
1502 | (kdbdmaBranchNever), | |
1503 | ((isReceive) ? kdbdmaWaitNever : | |
1504 | kdbdmaWaitIfFalse), | |
1505 | (segVector[1].length), | |
1506 | (segVector[1].location) ); | |
1507 | } | |
1508 | ||
1509 | flush_cache_v( (vm_offset_t)desc, sizeof(enet_dma_cmd_t) ); | |
1510 | ||
1511 | return true; | |
1512 | } | |
1513 | ||
1514 | ||
1515 | #ifdef DEBUG | |
1516 | /* | |
1517 | * Useful for testing. | |
1518 | */ | |
1519 | ||
1520 | void MaceEnet::_dumpDesc(void * addr, u_int32_t size) | |
1521 | { | |
1522 | u_int32_t i; | |
1523 | unsigned long *p; | |
1524 | vm_offset_t paddr; | |
1525 | ||
1526 | _IOPhysicalFromVirtual( (vm_offset_t) addr, (vm_offset_t *)&paddr ); | |
1527 | ||
1528 | p = (unsigned long *)addr; | |
1529 | ||
1530 | for ( i=0; i < size/sizeof(IODBDMADescriptor); i++, p+=4, | |
1531 | paddr+=sizeof(IODBDMADescriptor) ) | |
1532 | { | |
1533 | IOLog("Ethernet(Mace): %08x(v) %08x(p): %08x %08x %08x %08x\n", | |
1534 | (int)p, | |
1535 | (int)paddr, | |
1536 | (int)OSReadSwapInt32(p, 0), (int)OSReadSwapInt32(p, 4), | |
1537 | (int)OSReadSwapInt32(p, 8), (int)OSReadSwapInt32(p, 12) ); | |
1538 | } | |
1539 | IOLog("\n"); | |
1540 | } | |
1541 | ||
1542 | void MaceEnet::_dumpRegisters() | |
1543 | { | |
1544 | u_int8_t dataValue; | |
1545 | ||
1546 | IOLog("\nEthernet(Mace): IO Address = %08x", (int)ioBaseEnet ); | |
1547 | ||
1548 | dataValue = ReadMaceRegister(ioBaseEnet, kXmtFC); | |
1549 | IOLog("\nEthernet(Mace): Read Register %04x Transmit Frame Control = %02x", kXmtFC, dataValue ); | |
1550 | ||
1551 | dataValue = ReadMaceRegister(ioBaseEnet, kXmtFS); | |
1552 | IOLog("\nEthernet(Mace): Read Register %04x Transmit Frame Status = %02x", kXmtFS, dataValue ); | |
1553 | ||
1554 | dataValue = ReadMaceRegister(ioBaseEnet, kXmtRC); | |
1555 | IOLog("\nEthernet(Mace): Read Register %04x Transmit Retry Count = %02x", kXmtRC, dataValue ); | |
1556 | ||
1557 | dataValue = ReadMaceRegister(ioBaseEnet, kRcvFC); | |
1558 | IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Control = %02x", kRcvFC, dataValue ); | |
1559 | ||
1560 | dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS0); | |
1561 | IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 0 = %02x", kRcvFS0, dataValue ); | |
1562 | dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS1); | |
1563 | IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 1 = %02x", kRcvFS1, dataValue ); | |
1564 | dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS2); | |
1565 | IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 2 = %02x", kRcvFS2, dataValue ); | |
1566 | dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS3); | |
1567 | IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 3 = %02x", kRcvFS3, dataValue ); | |
1568 | ||
1569 | dataValue = ReadMaceRegister(ioBaseEnet, kFifoFC); | |
1570 | IOLog("\nEthernet(Mace): Read Register %04x FIFO Frame Count = %02x", kFifoFC, dataValue ); | |
1571 | ||
1572 | dataValue = ReadMaceRegister(ioBaseEnet, kIntReg); | |
1573 | IOLog("\nEthernet(Mace): Read Register %04x Interrupt Register = %02x", kIntReg, dataValue ); | |
1574 | ||
1575 | dataValue = ReadMaceRegister(ioBaseEnet, kIntMask); | |
1576 | IOLog("\nEthernet(Mace): Read Register %04x Interrupt Mask Register = %02x", kIntMask, dataValue ); | |
1577 | ||
1578 | dataValue = ReadMaceRegister(ioBaseEnet, kPollReg); | |
1579 | IOLog("\nEthernet(Mace): Read Register %04x Poll Register = %02x", kPollReg, dataValue ); | |
1580 | ||
1581 | dataValue = ReadMaceRegister(ioBaseEnet, kBIUCC); | |
1582 | IOLog("\nEthernet(Mace): Read Register %04x BUI Configuration Control = %02x", kBIUCC, dataValue ); | |
1583 | ||
1584 | dataValue = ReadMaceRegister(ioBaseEnet, kFifoCC); | |
1585 | IOLog("\nEthernet(Mace): Read Register %04x FIFO Configuration Control = %02x", kFifoCC, dataValue ); | |
1586 | ||
1587 | dataValue = ReadMaceRegister(ioBaseEnet, kMacCC); | |
1588 | IOLog("\nEthernet(Mace): Read Register %04x MAC Configuration Control = %02x", kMacCC, dataValue ); | |
1589 | ||
1590 | dataValue = ReadMaceRegister(ioBaseEnet, kPLSCC); | |
1591 | IOLog("\nEthernet(Mace): Read Register %04x PLS Configuration Contro = %02x", kPLSCC, dataValue ); | |
1592 | ||
1593 | dataValue = ReadMaceRegister(ioBaseEnet, kPHYCC); | |
1594 | IOLog("\nEthernet(Mace): Read Register %04x PHY Configuration Control = %02x", kPHYCC, dataValue ); | |
1595 | ||
1596 | dataValue = ReadMaceRegister(ioBaseEnet, kMaceChipId0); | |
1597 | IOLog("\nEthernet(Mace): Read Register %04x MACE ChipID Register 7:0 = %02x", kMaceChipId0, dataValue ); | |
1598 | ||
1599 | dataValue = ReadMaceRegister(ioBaseEnet, kMaceChipId1); | |
1600 | IOLog("\nEthernet(Mace): Read Register %04x MACE ChipID Register 15:8 = %02x", kMaceChipId1, dataValue ); | |
1601 | ||
1602 | dataValue = ReadMaceRegister(ioBaseEnet, kMPC); | |
1603 | IOLog("\nEthernet(Mace): Read Register %04x Missed Packet Count = %02x", kMPC, dataValue ); | |
1604 | ||
1605 | dataValue = ReadMaceRegister(ioBaseEnet, kUTR); | |
1606 | IOLog("\nEthernet(Mace): Read Register %04x User Test Register = %02x", kUTR, dataValue ); | |
1607 | IOLog("\nEthernet(Mace): -------------------------------------------------------\n" ); | |
1608 | } | |
1609 | #endif DEBUG | |
1610 | ||
1611 | ||
1612 | /*------------------------------------------------------------------------- | |
1613 | * | |
1614 | * | |
1615 | * | |
1616 | *-------------------------------------------------------------------------*/ | |
1617 | ||
1618 | IOReturn MaceEnet::getHardwareAddress(IOEthernetAddress *ea) | |
1619 | { | |
1620 | unsigned char data; | |
1621 | ||
1622 | for (UInt i = 0; i < sizeof(*ea); i++) | |
1623 | { | |
1624 | data = ((unsigned char *)ioBaseEnetROM)[i << 4]; | |
1625 | ea->bytes[i] = reverseBitOrder(data); | |
1626 | } | |
1627 | ||
1628 | return kIOReturnSuccess; | |
1629 | } | |
1630 | ||
1631 | /*------------------------------------------------------------------------- | |
1632 | * | |
1633 | * | |
1634 | * | |
1635 | *-------------------------------------------------------------------------*/ | |
1636 | ||
1637 | #define ENET_CRCPOLY 0x04c11db7 | |
1638 | ||
1639 | /* Real fast bit-reversal algorithm, 6-bit values */ | |
1640 | static int reverse6[] = | |
1641 | { 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, | |
1642 | 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, | |
1643 | 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, | |
1644 | 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, | |
1645 | 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, | |
1646 | 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, | |
1647 | 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, | |
1648 | 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f | |
1649 | }; | |
1650 | ||
1651 | static u_int32_t crc416(unsigned int current, unsigned short nxtval ) | |
1652 | { | |
1653 | register unsigned int counter; | |
1654 | register int highCRCBitSet, lowDataBitSet; | |
1655 | ||
1656 | /* Swap bytes */ | |
1657 | nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); | |
1658 | ||
1659 | /* Compute bit-by-bit */ | |
1660 | for (counter = 0; counter != 16; ++counter) | |
1661 | { /* is high CRC bit set? */ | |
1662 | if ((current & 0x80000000) == 0) | |
1663 | highCRCBitSet = 0; | |
1664 | else | |
1665 | highCRCBitSet = 1; | |
1666 | ||
1667 | current = current << 1; | |
1668 | ||
1669 | if ((nxtval & 0x0001) == 0) | |
1670 | lowDataBitSet = 0; | |
1671 | else | |
1672 | lowDataBitSet = 1; | |
1673 | ||
1674 | nxtval = nxtval >> 1; | |
1675 | ||
1676 | /* do the XOR */ | |
1677 | if (highCRCBitSet ^ lowDataBitSet) | |
1678 | current = current ^ ENET_CRCPOLY; | |
1679 | } | |
1680 | return current; | |
1681 | } | |
1682 | ||
1683 | /*------------------------------------------------------------------------- | |
1684 | * | |
1685 | * | |
1686 | * | |
1687 | *-------------------------------------------------------------------------*/ | |
1688 | ||
1689 | static u_int32_t mace_crc(unsigned short *address) | |
1690 | { | |
1691 | register u_int32_t newcrc; | |
1692 | ||
1693 | newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ | |
1694 | newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ | |
1695 | newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ | |
1696 | ||
1697 | return (newcrc); | |
1698 | } | |
1699 | ||
1700 | /* | |
1701 | * Clear the hash table filter. | |
1702 | * | |
1703 | */ | |
1704 | void MaceEnet::_resetHashTableMask() | |
1705 | { | |
1706 | bzero(hashTableUseCount, sizeof(hashTableUseCount)); | |
1707 | bzero(hashTableMask, sizeof(hashTableMask)); | |
1708 | } | |
1709 | ||
1710 | /* | |
1711 | * Add requested mcast addr to Mace's hash table filter. | |
1712 | * | |
1713 | */ | |
1714 | void MaceEnet::_addToHashTableMask(u_int8_t *addr) | |
1715 | { | |
1716 | u_int32_t crc; | |
1717 | u_int8_t mask; | |
1718 | ||
1719 | crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ | |
1720 | crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ | |
1721 | if (hashTableUseCount[crc]++) | |
1722 | return; /* This bit is already set */ | |
1723 | mask = crc % 8; | |
1724 | mask = (unsigned char) 1 << mask; | |
1725 | hashTableMask[crc/8] |= mask; | |
1726 | } | |
1727 | ||
1728 | /*------------------------------------------------------------------------- | |
1729 | * | |
1730 | * | |
1731 | * | |
1732 | *-------------------------------------------------------------------------*/ | |
1733 | ||
1734 | void MaceEnet::_removeFromHashTableMask(u_int8_t *addr) | |
1735 | { | |
1736 | unsigned int crc; | |
1737 | unsigned char mask; | |
1738 | ||
1739 | /* Now, delete the address from the filter copy, as indicated */ | |
1740 | crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ | |
1741 | crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ | |
1742 | if (hashTableUseCount[crc] == 0) | |
1743 | return; /* That bit wasn't in use! */ | |
1744 | ||
1745 | if (--hashTableUseCount[crc]) | |
1746 | return; /* That bit is still in use */ | |
1747 | ||
1748 | mask = crc % 8; | |
1749 | mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ | |
1750 | hashTableMask[crc/8] &= mask; | |
1751 | } | |
1752 | ||
1753 | /* | |
1754 | * Sync the adapter with the software copy of the multicast mask | |
1755 | * (logical address filter). | |
1756 | */ | |
1757 | void MaceEnet::_updateHashTableMask() | |
1758 | { | |
1759 | u_int8_t status; | |
1760 | u_int32_t i; | |
1761 | u_int8_t *p; | |
1762 | u_int8_t MacCCReg; | |
1763 | ||
1764 | // Stop the receiver before changing the filter. | |
1765 | // | |
1766 | MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); | |
1767 | WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnRcv ); | |
1768 | IODelay( RECEIVE_QUIESCE_uS ); | |
1769 | ||
1770 | if ( chipId != kMaceRevisionA2 ) | |
1771 | { | |
1772 | WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACLogAddr ); | |
1773 | do | |
1774 | { | |
1775 | status = ReadMaceRegister( ioBaseEnet, kIAC ); | |
1776 | } | |
1777 | while( status & kIACAddrChg ); | |
1778 | } | |
1779 | else | |
1780 | { | |
1781 | WriteMaceRegister( ioBaseEnet, kIAC, kIACLogAddr ); | |
1782 | } | |
1783 | ||
1784 | p = (u_int8_t *) hashTableMask; | |
1785 | for (i = 0; i < 8; i++, p++ ) | |
1786 | { | |
1787 | WriteMaceRegister( ioBaseEnet, kLADRF, *p ); | |
1788 | } | |
1789 | ||
1790 | // Restore the engine's state. | |
1791 | // | |
1792 | WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg ); | |
1793 | } |