ia64/linux-2.6.18-xen.hg

view drivers/net/gt96100eth.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Copyright 2000, 2001 MontaVista Software Inc.
3 * Author: MontaVista Software, Inc.
4 * stevel@mvista.com or source@mvista.com
5 *
6 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
18 *
19 * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
20 *
21 * Revision history
22 *
23 * 11.11.2001 Moved to 2.4.14, ppopov@mvista.com. Modified driver to add
24 * proper gt96100A support.
25 * 12.05.2001 Moved eth port 0 to irq 3 (mapped to GT_SERINT0 on EV96100A)
26 * in order for both ports to work. Also cleaned up boot
27 * option support (mac address string parsing), fleshed out
28 * gt96100_cleanup_module(), and other general code cleanups
29 * <stevel@mvista.com>.
30 */
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 #include <linux/timer.h>
35 #include <linux/errno.h>
36 #include <linux/in.h>
37 #include <linux/ioport.h>
38 #include <linux/slab.h>
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/init.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/delay.h>
46 #include <linux/ctype.h>
47 #include <linux/bitops.h>
49 #include <asm/irq.h>
50 #include <asm/io.h>
52 #define DESC_BE 1
53 #define DESC_DATA_BE 1
55 #define GT96100_DEBUG 2
57 #include "gt96100eth.h"
59 // prototypes
60 static void* dmaalloc(size_t size, dma_addr_t *dma_handle);
61 static void dmafree(size_t size, void *vaddr);
62 static void gt96100_delay(int msec);
63 static int gt96100_add_hash_entry(struct net_device *dev,
64 unsigned char* addr);
65 static void read_mib_counters(struct gt96100_private *gp);
66 static int read_MII(int phy_addr, u32 reg);
67 static int write_MII(int phy_addr, u32 reg, u16 data);
68 static int gt96100_init_module(void);
69 static void gt96100_cleanup_module(void);
70 static void dump_MII(int dbg_lvl, struct net_device *dev);
71 static void dump_tx_desc(int dbg_lvl, struct net_device *dev, int i);
72 static void dump_rx_desc(int dbg_lvl, struct net_device *dev, int i);
73 static void dump_skb(int dbg_lvl, struct net_device *dev,
74 struct sk_buff *skb);
75 static void update_stats(struct gt96100_private *gp);
76 static void abort(struct net_device *dev, u32 abort_bits);
77 static void hard_stop(struct net_device *dev);
78 static void enable_ether_irq(struct net_device *dev);
79 static void disable_ether_irq(struct net_device *dev);
80 static int gt96100_probe1(struct pci_dev *pci, int port_num);
81 static void reset_tx(struct net_device *dev);
82 static void reset_rx(struct net_device *dev);
83 static int gt96100_check_tx_consistent(struct gt96100_private *gp);
84 static int gt96100_init(struct net_device *dev);
85 static int gt96100_open(struct net_device *dev);
86 static int gt96100_close(struct net_device *dev);
87 static int gt96100_tx(struct sk_buff *skb, struct net_device *dev);
88 static int gt96100_rx(struct net_device *dev, u32 status);
89 static irqreturn_t gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs);
90 static void gt96100_tx_timeout(struct net_device *dev);
91 static void gt96100_set_rx_mode(struct net_device *dev);
92 static struct net_device_stats* gt96100_get_stats(struct net_device *dev);
94 extern char * __init prom_getcmdline(void);
96 static int max_interrupt_work = 32;
98 #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
100 #define RUN_AT(x) (jiffies + (x))
102 // For reading/writing 32-bit words and half-words from/to DMA memory
103 #ifdef DESC_BE
104 #define cpu_to_dma32 cpu_to_be32
105 #define dma32_to_cpu be32_to_cpu
106 #define cpu_to_dma16 cpu_to_be16
107 #define dma16_to_cpu be16_to_cpu
108 #else
109 #define cpu_to_dma32 cpu_to_le32
110 #define dma32_to_cpu le32_to_cpu
111 #define cpu_to_dma16 cpu_to_le16
112 #define dma16_to_cpu le16_to_cpu
113 #endif
115 static char mac0[18] = "00.02.03.04.05.06";
116 static char mac1[18] = "00.01.02.03.04.05";
117 module_param_string(mac0, mac0, 18, 0);
118 module_param_string(mac1, mac0, 18, 0);
119 MODULE_PARM_DESC(mac0, "MAC address for GT96100 ethernet port 0");
120 MODULE_PARM_DESC(mac1, "MAC address for GT96100 ethernet port 1");
122 /*
123 * Info for the GT96100 ethernet controller's ports.
124 */
125 static struct gt96100_if_t {
126 struct net_device *dev;
127 unsigned int iobase; // IO Base address of this port
128 int irq; // IRQ number of this port
129 char *mac_str;
130 } gt96100_iflist[NUM_INTERFACES] = {
131 {
132 NULL,
133 GT96100_ETH0_BASE, GT96100_ETHER0_IRQ,
134 mac0
135 },
136 {
137 NULL,
138 GT96100_ETH1_BASE, GT96100_ETHER1_IRQ,
139 mac1
140 }
141 };
143 static inline const char*
144 chip_name(int chip_rev)
145 {
146 switch (chip_rev) {
147 case REV_GT96100:
148 return "GT96100";
149 case REV_GT96100A_1:
150 case REV_GT96100A:
151 return "GT96100A";
152 default:
153 return "Unknown GT96100";
154 }
155 }
157 /*
158 DMA memory allocation, derived from pci_alloc_consistent.
159 */
160 static void * dmaalloc(size_t size, dma_addr_t *dma_handle)
161 {
162 void *ret;
164 ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size));
166 if (ret != NULL) {
167 dma_cache_inv((unsigned long)ret, size);
168 if (dma_handle != NULL)
169 *dma_handle = virt_to_phys(ret);
171 /* bump virtual address up to non-cached area */
172 ret = (void*)KSEG1ADDR(ret);
173 }
175 return ret;
176 }
178 static void dmafree(size_t size, void *vaddr)
179 {
180 vaddr = (void*)KSEG0ADDR(vaddr);
181 free_pages((unsigned long)vaddr, get_order(size));
182 }
184 static void gt96100_delay(int ms)
185 {
186 if (in_interrupt())
187 return;
188 else
189 msleep_interruptible(ms);
190 }
192 static int
193 parse_mac_addr(struct net_device *dev, char* macstr)
194 {
195 int i, j;
196 unsigned char result, value;
198 for (i=0; i<6; i++) {
199 result = 0;
200 if (i != 5 && *(macstr+2) != '.') {
201 err(__FILE__ "invalid mac address format: %d %c\n",
202 i, *(macstr+2));
203 return -EINVAL;
204 }
206 for (j=0; j<2; j++) {
207 if (isxdigit(*macstr) &&
208 (value = isdigit(*macstr) ? *macstr-'0' :
209 toupper(*macstr)-'A'+10) < 16) {
210 result = result*16 + value;
211 macstr++;
212 } else {
213 err(__FILE__ "invalid mac address "
214 "character: %c\n", *macstr);
215 return -EINVAL;
216 }
217 }
219 macstr++; // step over '.'
220 dev->dev_addr[i] = result;
221 }
223 return 0;
224 }
227 static int
228 read_MII(int phy_addr, u32 reg)
229 {
230 int timedout = 20;
231 u32 smir = smirOpCode | (phy_addr << smirPhyAdBit) |
232 (reg << smirRegAdBit);
234 // wait for last operation to complete
235 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
236 // snooze for 1 msec and check again
237 gt96100_delay(1);
239 if (--timedout == 0) {
240 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
241 return -ENODEV;
242 }
243 }
245 GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
247 timedout = 20;
248 // wait for read to complete
249 while (!((smir = GT96100_READ(GT96100_ETH_SMI_REG)) & smirReadValid)) {
250 // snooze for 1 msec and check again
251 gt96100_delay(1);
253 if (--timedout == 0) {
254 printk(KERN_ERR "%s: timeout!!\n", __FUNCTION__);
255 return -ENODEV;
256 }
257 }
259 return (int)(smir & smirDataMask);
260 }
262 static void
263 dump_tx_desc(int dbg_lvl, struct net_device *dev, int i)
264 {
265 struct gt96100_private *gp = netdev_priv(dev);
266 gt96100_td_t *td = &gp->tx_ring[i];
268 dbg(dbg_lvl, "Tx descriptor at 0x%08lx:\n", virt_to_phys(td));
269 dbg(dbg_lvl,
270 " cmdstat=%04x, byte_cnt=%04x, buff_ptr=%04x, next=%04x\n",
271 dma32_to_cpu(td->cmdstat),
272 dma16_to_cpu(td->byte_cnt),
273 dma32_to_cpu(td->buff_ptr),
274 dma32_to_cpu(td->next));
275 }
277 static void
278 dump_rx_desc(int dbg_lvl, struct net_device *dev, int i)
279 {
280 struct gt96100_private *gp = netdev_priv(dev);
281 gt96100_rd_t *rd = &gp->rx_ring[i];
283 dbg(dbg_lvl, "Rx descriptor at 0x%08lx:\n", virt_to_phys(rd));
284 dbg(dbg_lvl, " cmdstat=%04x, buff_sz=%04x, byte_cnt=%04x, "
285 "buff_ptr=%04x, next=%04x\n",
286 dma32_to_cpu(rd->cmdstat),
287 dma16_to_cpu(rd->buff_sz),
288 dma16_to_cpu(rd->byte_cnt),
289 dma32_to_cpu(rd->buff_ptr),
290 dma32_to_cpu(rd->next));
291 }
293 static int
294 write_MII(int phy_addr, u32 reg, u16 data)
295 {
296 int timedout = 20;
297 u32 smir = (phy_addr << smirPhyAdBit) |
298 (reg << smirRegAdBit) | data;
300 // wait for last operation to complete
301 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
302 // snooze for 1 msec and check again
303 gt96100_delay(1);
305 if (--timedout == 0) {
306 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
307 return -1;
308 }
309 }
311 GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
312 return 0;
313 }
315 static void
316 dump_MII(int dbg_lvl, struct net_device *dev)
317 {
318 int i, val;
319 struct gt96100_private *gp = netdev_priv(dev);
321 if (dbg_lvl <= GT96100_DEBUG) {
322 for (i=0; i<7; i++) {
323 if ((val = read_MII(gp->phy_addr, i)) >= 0)
324 printk("MII Reg %d=%x\n", i, val);
325 }
326 for (i=16; i<21; i++) {
327 if ((val = read_MII(gp->phy_addr, i)) >= 0)
328 printk("MII Reg %d=%x\n", i, val);
329 }
330 }
331 }
333 static void
334 dump_hw_addr(int dbg_lvl, struct net_device *dev, const char* pfx,
335 const char* func, unsigned char* addr_str)
336 {
337 int i;
338 char buf[100], octet[5];
340 if (dbg_lvl <= GT96100_DEBUG) {
341 sprintf(buf, pfx, func);
342 for (i = 0; i < 6; i++) {
343 sprintf(octet, "%2.2x%s",
344 addr_str[i], i<5 ? ":" : "\n");
345 strcat(buf, octet);
346 }
347 info("%s", buf);
348 }
349 }
352 static void
353 dump_skb(int dbg_lvl, struct net_device *dev, struct sk_buff *skb)
354 {
355 int i;
356 unsigned char* skbdata;
358 if (dbg_lvl <= GT96100_DEBUG) {
359 dbg(dbg_lvl, "%s: skb=%p, skb->data=%p, skb->len=%d\n",
360 __FUNCTION__, skb, skb->data, skb->len);
362 skbdata = (unsigned char*)KSEG1ADDR(skb->data);
364 for (i=0; i<skb->len; i++) {
365 if (!(i % 16))
366 printk(KERN_DEBUG "\n %3.3x: %2.2x,",
367 i, skbdata[i]);
368 else
369 printk(KERN_DEBUG "%2.2x,", skbdata[i]);
370 }
371 printk(KERN_DEBUG "\n");
372 }
373 }
376 static int
377 gt96100_add_hash_entry(struct net_device *dev, unsigned char* addr)
378 {
379 struct gt96100_private *gp = netdev_priv(dev);
380 //u16 hashResult, stmp;
381 //unsigned char ctmp, hash_ea[6];
382 u32 tblEntry1, tblEntry0, *tblEntryAddr;
383 int i;
385 tblEntry1 = hteValid | hteRD;
386 tblEntry1 |= (u32)addr[5] << 3;
387 tblEntry1 |= (u32)addr[4] << 11;
388 tblEntry1 |= (u32)addr[3] << 19;
389 tblEntry1 |= ((u32)addr[2] & 0x1f) << 27;
390 dbg(3, "%s: tblEntry1=%x\n", __FUNCTION__, tblEntry1);
391 tblEntry0 = ((u32)addr[2] >> 5) & 0x07;
392 tblEntry0 |= (u32)addr[1] << 3;
393 tblEntry0 |= (u32)addr[0] << 11;
394 dbg(3, "%s: tblEntry0=%x\n", __FUNCTION__, tblEntry0);
396 #if 0
398 for (i=0; i<6; i++) {
399 // nibble swap
400 ctmp = nibswap(addr[i]);
401 // invert every nibble
402 hash_ea[i] = ((ctmp&1)<<3) | ((ctmp&8)>>3) |
403 ((ctmp&2)<<1) | ((ctmp&4)>>1);
404 hash_ea[i] |= ((ctmp&0x10)<<3) | ((ctmp&0x80)>>3) |
405 ((ctmp&0x20)<<1) | ((ctmp&0x40)>>1);
406 }
408 dump_hw_addr(3, dev, "%s: nib swap/invt addr=", __FUNCTION__, hash_ea);
410 if (gp->hash_mode == 0) {
411 hashResult = ((u16)hash_ea[0] & 0xfc) << 7;
412 stmp = ((u16)hash_ea[0] & 0x03) |
413 (((u16)hash_ea[1] & 0x7f) << 2);
414 stmp ^= (((u16)hash_ea[1] >> 7) & 0x01) |
415 ((u16)hash_ea[2] << 1);
416 stmp ^= (u16)hash_ea[3] | (((u16)hash_ea[4] & 1) << 8);
417 hashResult |= stmp;
418 } else {
419 return -1; // don't support hash mode 1
420 }
422 dbg(3, "%s: hashResult=%x\n", __FUNCTION__, hashResult);
424 tblEntryAddr =
425 (u32 *)(&gp->hash_table[((u32)hashResult & 0x7ff) << 3]);
427 dbg(3, "%s: tblEntryAddr=%p\n", tblEntryAddr, __FUNCTION__);
429 for (i=0; i<HASH_HOP_NUMBER; i++) {
430 if ((*tblEntryAddr & hteValid) &&
431 !(*tblEntryAddr & hteSkip)) {
432 // This entry is already occupied, go to next entry
433 tblEntryAddr += 2;
434 dbg(3, "%s: skipping to %p\n", __FUNCTION__,
435 tblEntryAddr);
436 } else {
437 memset(tblEntryAddr, 0, 8);
438 tblEntryAddr[1] = cpu_to_dma32(tblEntry1);
439 tblEntryAddr[0] = cpu_to_dma32(tblEntry0);
440 break;
441 }
442 }
444 if (i >= HASH_HOP_NUMBER) {
445 err("%s: expired!\n", __FUNCTION__);
446 return -1; // Couldn't find an unused entry
447 }
449 #else
451 tblEntryAddr = (u32 *)gp->hash_table;
452 for (i=0; i<RX_HASH_TABLE_SIZE/4; i+=2) {
453 tblEntryAddr[i+1] = cpu_to_dma32(tblEntry1);
454 tblEntryAddr[i] = cpu_to_dma32(tblEntry0);
455 }
457 #endif
459 return 0;
460 }
463 static void
464 read_mib_counters(struct gt96100_private *gp)
465 {
466 u32* mib_regs = (u32*)&gp->mib;
467 int i;
469 for (i=0; i<sizeof(mib_counters_t)/sizeof(u32); i++)
470 mib_regs[i] = GT96100ETH_READ(gp, GT96100_ETH_MIB_COUNT_BASE +
471 i*sizeof(u32));
472 }
475 static void
476 update_stats(struct gt96100_private *gp)
477 {
478 mib_counters_t *mib = &gp->mib;
479 struct net_device_stats *stats = &gp->stats;
481 read_mib_counters(gp);
483 stats->rx_packets = mib->totalFramesReceived;
484 stats->tx_packets = mib->framesSent;
485 stats->rx_bytes = mib->totalByteReceived;
486 stats->tx_bytes = mib->byteSent;
487 stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
488 //the tx error counters are incremented by the ISR
489 //rx_dropped incremented by gt96100_rx
490 //tx_dropped incremented by gt96100_tx
491 stats->multicast = mib->multicastFramesReceived;
492 // collisions incremented by gt96100_tx_complete
493 stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
494 // The RxError condition means the Rx DMA encountered a
495 // CPU owned descriptor, which, if things are working as
496 // they should, means the Rx ring has overflowed.
497 stats->rx_over_errors = mib->macRxError;
498 stats->rx_crc_errors = mib->cRCError;
499 }
501 static void
502 abort(struct net_device *dev, u32 abort_bits)
503 {
504 struct gt96100_private *gp = netdev_priv(dev);
505 int timedout = 100; // wait up to 100 msec for hard stop to complete
507 dbg(3, "%s\n", __FUNCTION__);
509 // Return if neither Rx or Tx abort bits are set
510 if (!(abort_bits & (sdcmrAR | sdcmrAT)))
511 return;
513 // make sure only the Rx/Tx abort bits are set
514 abort_bits &= (sdcmrAR | sdcmrAT);
516 spin_lock(&gp->lock);
518 // abort any Rx/Tx DMA immediately
519 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, abort_bits);
521 dbg(3, "%s: SDMA comm = %x\n", __FUNCTION__,
522 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
524 // wait for abort to complete
525 while (GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM) & abort_bits) {
526 // snooze for 1 msec and check again
527 gt96100_delay(1);
529 if (--timedout == 0) {
530 err("%s: timeout!!\n", __FUNCTION__);
531 break;
532 }
533 }
535 spin_unlock(&gp->lock);
536 }
539 static void
540 hard_stop(struct net_device *dev)
541 {
542 struct gt96100_private *gp = netdev_priv(dev);
544 dbg(3, "%s\n", __FUNCTION__);
546 disable_ether_irq(dev);
548 abort(dev, sdcmrAR | sdcmrAT);
550 // disable port
551 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 0);
552 }
555 static void
556 enable_ether_irq(struct net_device *dev)
557 {
558 struct gt96100_private *gp = netdev_priv(dev);
559 u32 intMask;
560 /*
561 * route ethernet interrupt to GT_SERINT0 for port 0,
562 * GT_INT0 for port 1.
563 */
564 int intr_mask_reg = (gp->port_num == 0) ?
565 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
567 if (gp->chip_rev >= REV_GT96100A_1) {
568 intMask = icrTxBufferLow | icrTxEndLow |
569 icrTxErrorLow | icrRxOVR | icrTxUdr |
570 icrRxBufferQ0 | icrRxErrorQ0 |
571 icrMIIPhySTC | icrEtherIntSum;
572 }
573 else {
574 intMask = icrTxBufferLow | icrTxEndLow |
575 icrTxErrorLow | icrRxOVR | icrTxUdr |
576 icrRxBuffer | icrRxError |
577 icrMIIPhySTC | icrEtherIntSum;
578 }
580 // unmask interrupts
581 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, intMask);
583 intMask = GT96100_READ(intr_mask_reg);
584 intMask |= 1<<gp->port_num;
585 GT96100_WRITE(intr_mask_reg, intMask);
586 }
588 static void
589 disable_ether_irq(struct net_device *dev)
590 {
591 struct gt96100_private *gp = netdev_priv(dev);
592 u32 intMask;
593 int intr_mask_reg = (gp->port_num == 0) ?
594 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
596 intMask = GT96100_READ(intr_mask_reg);
597 intMask &= ~(1<<gp->port_num);
598 GT96100_WRITE(intr_mask_reg, intMask);
600 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, 0);
601 }
604 /*
605 * Init GT96100 ethernet controller driver
606 */
607 static int gt96100_init_module(void)
608 {
609 struct pci_dev *pci;
610 int i, retval=0;
611 u32 cpuConfig;
613 /*
614 * Stupid probe because this really isn't a PCI device
615 */
616 if (!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
617 PCI_DEVICE_ID_MARVELL_GT96100, NULL)) &&
618 !(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
619 PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) {
620 printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
621 return -ENODEV;
622 }
624 cpuConfig = GT96100_READ(GT96100_CPU_INTERF_CONFIG);
625 if (cpuConfig & (1<<12)) {
626 printk(KERN_ERR __FILE__
627 ": must be in Big Endian mode!\n");
628 return -ENODEV;
629 }
631 for (i=0; i < NUM_INTERFACES; i++)
632 retval |= gt96100_probe1(pci, i);
634 return retval;
635 }
637 static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
638 {
639 struct gt96100_private *gp = NULL;
640 struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
641 int phy_addr, phy_id1, phy_id2;
642 u32 phyAD;
643 int retval;
644 unsigned char chip_rev;
645 struct net_device *dev = NULL;
647 if (gtif->irq < 0) {
648 printk(KERN_ERR "%s: irq unknown - probing not supported\n",
649 __FUNCTION__);
650 return -ENODEV;
651 }
653 pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev);
655 if (chip_rev >= REV_GT96100A_1) {
656 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
657 phy_addr = (phyAD >> (5*port_num)) & 0x1f;
658 } else {
659 /*
660 * not sure what's this about -- probably a gt bug
661 */
662 phy_addr = port_num;
663 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
664 phyAD &= ~(0x1f << (port_num*5));
665 phyAD |= phy_addr << (port_num*5);
666 GT96100_WRITE(GT96100_ETH_PHY_ADDR_REG, phyAD);
667 }
669 // probe for the external PHY
670 if ((phy_id1 = read_MII(phy_addr, 2)) <= 0 ||
671 (phy_id2 = read_MII(phy_addr, 3)) <= 0) {
672 printk(KERN_ERR "%s: no PHY found on MII%d\n", __FUNCTION__, port_num);
673 return -ENODEV;
674 }
676 if (!request_region(gtif->iobase, GT96100_ETH_IO_SIZE, "GT96100ETH")) {
677 printk(KERN_ERR "%s: request_region failed\n", __FUNCTION__);
678 return -EBUSY;
679 }
681 dev = alloc_etherdev(sizeof(struct gt96100_private));
682 if (!dev)
683 goto out;
684 gtif->dev = dev;
686 /* private struct aligned and zeroed by alloc_etherdev */
687 /* Fill in the 'dev' fields. */
688 dev->base_addr = gtif->iobase;
689 dev->irq = gtif->irq;
691 if ((retval = parse_mac_addr(dev, gtif->mac_str))) {
692 err("%s: MAC address parse failed\n", __FUNCTION__);
693 retval = -EINVAL;
694 goto out1;
695 }
697 gp = netdev_priv(dev);
699 memset(gp, 0, sizeof(*gp)); // clear it
701 gp->port_num = port_num;
702 gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
703 gp->phy_addr = phy_addr;
704 gp->chip_rev = chip_rev;
706 info("%s found at 0x%x, irq %d\n",
707 chip_name(gp->chip_rev), gtif->iobase, gtif->irq);
708 dump_hw_addr(0, dev, "%s: HW Address ", __FUNCTION__, dev->dev_addr);
709 info("%s chip revision=%d\n", chip_name(gp->chip_rev), gp->chip_rev);
710 info("%s ethernet port %d\n", chip_name(gp->chip_rev), gp->port_num);
711 info("external PHY ID1=0x%04x, ID2=0x%04x\n", phy_id1, phy_id2);
713 // Allocate Rx and Tx descriptor rings
714 if (gp->rx_ring == NULL) {
715 // All descriptors in ring must be 16-byte aligned
716 gp->rx_ring = dmaalloc(sizeof(gt96100_rd_t) * RX_RING_SIZE
717 + sizeof(gt96100_td_t) * TX_RING_SIZE,
718 &gp->rx_ring_dma);
719 if (gp->rx_ring == NULL) {
720 retval = -ENOMEM;
721 goto out1;
722 }
724 gp->tx_ring = (gt96100_td_t *)(gp->rx_ring + RX_RING_SIZE);
725 gp->tx_ring_dma =
726 gp->rx_ring_dma + sizeof(gt96100_rd_t) * RX_RING_SIZE;
727 }
729 // Allocate the Rx Data Buffers
730 if (gp->rx_buff == NULL) {
731 gp->rx_buff = dmaalloc(PKT_BUF_SZ*RX_RING_SIZE,
732 &gp->rx_buff_dma);
733 if (gp->rx_buff == NULL) {
734 retval = -ENOMEM;
735 goto out2;
736 }
737 }
739 dbg(3, "%s: rx_ring=%p, tx_ring=%p\n", __FUNCTION__,
740 gp->rx_ring, gp->tx_ring);
742 // Allocate Rx Hash Table
743 if (gp->hash_table == NULL) {
744 gp->hash_table = (char*)dmaalloc(RX_HASH_TABLE_SIZE,
745 &gp->hash_table_dma);
746 if (gp->hash_table == NULL) {
747 retval = -ENOMEM;
748 goto out3;
749 }
750 }
752 dbg(3, "%s: hash=%p\n", __FUNCTION__, gp->hash_table);
754 spin_lock_init(&gp->lock);
756 dev->open = gt96100_open;
757 dev->hard_start_xmit = gt96100_tx;
758 dev->stop = gt96100_close;
759 dev->get_stats = gt96100_get_stats;
760 //dev->do_ioctl = gt96100_ioctl;
761 dev->set_multicast_list = gt96100_set_rx_mode;
762 dev->tx_timeout = gt96100_tx_timeout;
763 dev->watchdog_timeo = GT96100ETH_TX_TIMEOUT;
765 retval = register_netdev(dev);
766 if (retval)
767 goto out4;
768 return 0;
770 out4:
771 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
772 out3:
773 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
774 out2:
775 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
776 + sizeof(gt96100_td_t) * TX_RING_SIZE,
777 gp->rx_ring);
778 out1:
779 free_netdev (dev);
780 out:
781 release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
783 err("%s failed. Returns %d\n", __FUNCTION__, retval);
784 return retval;
785 }
788 static void
789 reset_tx(struct net_device *dev)
790 {
791 struct gt96100_private *gp = netdev_priv(dev);
792 int i;
794 abort(dev, sdcmrAT);
796 for (i=0; i<TX_RING_SIZE; i++) {
797 if (gp->tx_skbuff[i]) {
798 if (in_interrupt())
799 dev_kfree_skb_irq(gp->tx_skbuff[i]);
800 else
801 dev_kfree_skb(gp->tx_skbuff[i]);
802 gp->tx_skbuff[i] = NULL;
803 }
805 gp->tx_ring[i].cmdstat = 0; // CPU owns
806 gp->tx_ring[i].byte_cnt = 0;
807 gp->tx_ring[i].buff_ptr = 0;
808 gp->tx_ring[i].next =
809 cpu_to_dma32(gp->tx_ring_dma +
810 sizeof(gt96100_td_t) * (i+1));
811 dump_tx_desc(4, dev, i);
812 }
813 /* Wrap the ring. */
814 gp->tx_ring[i-1].next = cpu_to_dma32(gp->tx_ring_dma);
816 // setup only the lowest priority TxCDP reg
817 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR0, gp->tx_ring_dma);
818 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR1, 0);
820 // init Tx indeces and pkt counter
821 gp->tx_next_in = gp->tx_next_out = 0;
822 gp->tx_count = 0;
824 }
826 static void
827 reset_rx(struct net_device *dev)
828 {
829 struct gt96100_private *gp = netdev_priv(dev);
830 int i;
832 abort(dev, sdcmrAR);
834 for (i=0; i<RX_RING_SIZE; i++) {
835 gp->rx_ring[i].next =
836 cpu_to_dma32(gp->rx_ring_dma +
837 sizeof(gt96100_rd_t) * (i+1));
838 gp->rx_ring[i].buff_ptr =
839 cpu_to_dma32(gp->rx_buff_dma + i*PKT_BUF_SZ);
840 gp->rx_ring[i].buff_sz = cpu_to_dma16(PKT_BUF_SZ);
841 // Give ownership to device, set first and last, enable intr
842 gp->rx_ring[i].cmdstat =
843 cpu_to_dma32((u32)(rxFirst | rxLast | rxOwn | rxEI));
844 dump_rx_desc(4, dev, i);
845 }
846 /* Wrap the ring. */
847 gp->rx_ring[i-1].next = cpu_to_dma32(gp->rx_ring_dma);
849 // Setup only the lowest priority RxFDP and RxCDP regs
850 for (i=0; i<4; i++) {
851 if (i == 0) {
852 GT96100ETH_WRITE(gp, GT96100_ETH_1ST_RX_DESC_PTR0,
853 gp->rx_ring_dma);
854 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_RX_DESC_PTR0,
855 gp->rx_ring_dma);
856 } else {
857 GT96100ETH_WRITE(gp,
858 GT96100_ETH_1ST_RX_DESC_PTR0 + i*4,
859 0);
860 GT96100ETH_WRITE(gp,
861 GT96100_ETH_CURR_RX_DESC_PTR0 + i*4,
862 0);
863 }
864 }
866 // init Rx NextOut index
867 gp->rx_next_out = 0;
868 }
871 // Returns 1 if the Tx counter and indeces don't gel
872 static int
873 gt96100_check_tx_consistent(struct gt96100_private *gp)
874 {
875 int diff = gp->tx_next_in - gp->tx_next_out;
877 diff = diff<0 ? TX_RING_SIZE + diff : diff;
878 diff = gp->tx_count == TX_RING_SIZE ? diff + TX_RING_SIZE : diff;
880 return (diff != gp->tx_count);
881 }
883 static int
884 gt96100_init(struct net_device *dev)
885 {
886 struct gt96100_private *gp = netdev_priv(dev);
887 u32 tmp;
888 u16 mii_reg;
890 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
891 dbg(3, "%s: scs10_lo=%4x, scs10_hi=%4x\n", __FUNCTION__,
892 GT96100_READ(0x8), GT96100_READ(0x10));
893 dbg(3, "%s: scs32_lo=%4x, scs32_hi=%4x\n", __FUNCTION__,
894 GT96100_READ(0x18), GT96100_READ(0x20));
896 // Stop and disable Port
897 hard_stop(dev);
899 // Setup CIU Arbiter
900 tmp = GT96100_READ(GT96100_CIU_ARBITER_CONFIG);
901 tmp |= (0x0c << (gp->port_num*2)); // set Ether DMA req priority to hi
902 #ifndef DESC_BE
903 tmp &= ~(1<<31); // set desc endianess to little
904 #else
905 tmp |= (1<<31);
906 #endif
907 GT96100_WRITE(GT96100_CIU_ARBITER_CONFIG, tmp);
908 dbg(3, "%s: CIU Config=%x/%x\n", __FUNCTION__,
909 tmp, GT96100_READ(GT96100_CIU_ARBITER_CONFIG));
911 // Set routing.
912 tmp = GT96100_READ(GT96100_ROUTE_MAIN) & (0x3f << 18);
913 tmp |= (0x07 << (18 + gp->port_num*3));
914 GT96100_WRITE(GT96100_ROUTE_MAIN, tmp);
916 /* set MII as peripheral func */
917 tmp = GT96100_READ(GT96100_GPP_CONFIG2);
918 tmp |= 0x7fff << (gp->port_num*16);
919 GT96100_WRITE(GT96100_GPP_CONFIG2, tmp);
921 /* Set up MII port pin directions */
922 tmp = GT96100_READ(GT96100_GPP_IO2);
923 tmp |= 0x003d << (gp->port_num*16);
924 GT96100_WRITE(GT96100_GPP_IO2, tmp);
926 // Set-up hash table
927 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it
928 gp->hash_mode = 0;
929 // Add a single entry to hash table - our ethernet address
930 gt96100_add_hash_entry(dev, dev->dev_addr);
931 // Set-up DMA ptr to hash table
932 GT96100ETH_WRITE(gp, GT96100_ETH_HASH_TBL_PTR, gp->hash_table_dma);
933 dbg(3, "%s: Hash Tbl Ptr=%x\n", __FUNCTION__,
934 GT96100ETH_READ(gp, GT96100_ETH_HASH_TBL_PTR));
936 // Setup Tx
937 reset_tx(dev);
939 dbg(3, "%s: Curr Tx Desc Ptr0=%x\n", __FUNCTION__,
940 GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0));
942 // Setup Rx
943 reset_rx(dev);
945 dbg(3, "%s: 1st/Curr Rx Desc Ptr0=%x/%x\n", __FUNCTION__,
946 GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0),
947 GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0));
949 // eth port config register
950 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
951 pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen);
953 mii_reg = read_MII(gp->phy_addr, 0x11); /* int enable register */
954 mii_reg |= 2; /* enable mii interrupt */
955 write_MII(gp->phy_addr, 0x11, mii_reg);
957 dbg(3, "%s: PhyAD=%x\n", __FUNCTION__,
958 GT96100_READ(GT96100_ETH_PHY_ADDR_REG));
960 // setup DMA
962 // We want the Rx/Tx DMA to write/read data to/from memory in
963 // Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
964 #ifdef DESC_DATA_BE
965 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
966 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
967 #else
968 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
969 sdcrBLMR | sdcrBLMT |
970 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
971 #endif
972 dbg(3, "%s: SDMA Config=%x\n", __FUNCTION__,
973 GT96100ETH_READ(gp, GT96100_ETH_SDMA_CONFIG));
975 // start Rx DMA
976 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
977 dbg(3, "%s: SDMA Comm=%x\n", __FUNCTION__,
978 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
980 // enable this port (set hash size to 1/2K)
981 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS);
982 dbg(3, "%s: Port Config=%x\n", __FUNCTION__,
983 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG));
985 /*
986 * Disable all Type-of-Service queueing. All Rx packets will be
987 * treated normally and will be sent to the lowest priority
988 * queue.
989 *
990 * Disable flow-control for now. FIXME: support flow control?
991 */
993 // clear all the MIB ctr regs
994 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
995 pcxrFCTL | pcxrFCTLen | pcxrFLP |
996 pcxrPRIOrxOverride);
997 read_mib_counters(gp);
998 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
999 pcxrFCTL | pcxrFCTLen | pcxrFLP |
1000 pcxrPRIOrxOverride | pcxrMIBclrMode);
1002 dbg(3, "%s: Port Config Ext=%x\n", __FUNCTION__,
1003 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG_EXT));
1005 netif_start_queue(dev);
1007 dump_MII(4, dev);
1009 // enable interrupts
1010 enable_ether_irq(dev);
1012 // we should now be receiving frames
1013 return 0;
1017 static int
1018 gt96100_open(struct net_device *dev)
1020 int retval;
1022 dbg(2, "%s: dev=%p\n", __FUNCTION__, dev);
1024 // Initialize and startup the GT-96100 ethernet port
1025 if ((retval = gt96100_init(dev))) {
1026 err("error in gt96100_init\n");
1027 free_irq(dev->irq, dev);
1028 return retval;
1031 if ((retval = request_irq(dev->irq, &gt96100_interrupt,
1032 IRQF_SHARED, dev->name, dev))) {
1033 err("unable to get IRQ %d\n", dev->irq);
1034 return retval;
1037 dbg(2, "%s: Initialization done.\n", __FUNCTION__);
1039 return 0;
1042 static int
1043 gt96100_close(struct net_device *dev)
1045 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1047 // stop the device
1048 if (netif_device_present(dev)) {
1049 netif_stop_queue(dev);
1050 hard_stop(dev);
1053 free_irq(dev->irq, dev);
1055 return 0;
1059 static int
1060 gt96100_tx(struct sk_buff *skb, struct net_device *dev)
1062 struct gt96100_private *gp = netdev_priv(dev);
1063 unsigned long flags;
1064 int nextIn;
1066 spin_lock_irqsave(&gp->lock, flags);
1068 nextIn = gp->tx_next_in;
1070 dbg(3, "%s: nextIn=%d\n", __FUNCTION__, nextIn);
1072 if (gp->tx_count >= TX_RING_SIZE) {
1073 warn("Tx Ring full, pkt dropped.\n");
1074 gp->stats.tx_dropped++;
1075 spin_unlock_irqrestore(&gp->lock, flags);
1076 return 1;
1079 if (!(gp->last_psr & psrLink)) {
1080 err("%s: Link down, pkt dropped.\n", __FUNCTION__);
1081 gp->stats.tx_dropped++;
1082 spin_unlock_irqrestore(&gp->lock, flags);
1083 return 1;
1086 if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) {
1087 err("%s: device owns descriptor, pkt dropped.\n", __FUNCTION__);
1088 gp->stats.tx_dropped++;
1089 // stop the queue, so Tx timeout can fix it
1090 netif_stop_queue(dev);
1091 spin_unlock_irqrestore(&gp->lock, flags);
1092 return 1;
1095 // Prepare the Descriptor at tx_next_in
1096 gp->tx_skbuff[nextIn] = skb;
1097 gp->tx_ring[nextIn].byte_cnt = cpu_to_dma16(skb->len);
1098 gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data));
1099 // make sure packet gets written back to memory
1100 dma_cache_wback_inv((unsigned long)(skb->data), skb->len);
1101 // Give ownership to device, set first and last desc, enable interrupt
1102 // Setting of ownership bit must be *last*!
1103 gp->tx_ring[nextIn].cmdstat =
1104 cpu_to_dma32((u32)(txOwn | txGenCRC | txEI |
1105 txPad | txFirst | txLast));
1107 dump_tx_desc(4, dev, nextIn);
1108 dump_skb(4, dev, skb);
1110 // increment tx_next_in with wrap
1111 gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1112 // If DMA is stopped, restart
1113 if (!(GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow))
1114 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1115 sdcmrERD | sdcmrTXDL);
1117 // increment count and stop queue if full
1118 if (++gp->tx_count == TX_RING_SIZE) {
1119 gp->tx_full = 1;
1120 netif_stop_queue(dev);
1121 dbg(2, "Tx Ring now full, queue stopped.\n");
1124 dev->trans_start = jiffies;
1125 spin_unlock_irqrestore(&gp->lock, flags);
1127 return 0;
1131 static int
1132 gt96100_rx(struct net_device *dev, u32 status)
1134 struct gt96100_private *gp = netdev_priv(dev);
1135 struct sk_buff *skb;
1136 int pkt_len, nextOut, cdp;
1137 gt96100_rd_t *rd;
1138 u32 cmdstat;
1140 dbg(3, "%s: dev=%p, status=%x\n", __FUNCTION__, dev, status);
1142 cdp = (GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0)
1143 - gp->rx_ring_dma) / sizeof(gt96100_rd_t);
1145 // Continue until we reach 1st descriptor pointer
1146 for (nextOut = gp->rx_next_out; nextOut != cdp;
1147 nextOut = (nextOut + 1) % RX_RING_SIZE) {
1149 if (--gp->intr_work_done == 0)
1150 break;
1152 rd = &gp->rx_ring[nextOut];
1153 cmdstat = dma32_to_cpu(rd->cmdstat);
1155 dbg(4, "%s: Rx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1156 cmdstat, nextOut);
1158 if (cmdstat & (u32)rxOwn) {
1159 //err("%s: device owns descriptor!\n", __FUNCTION__);
1160 // DMA is not finished updating descriptor???
1161 // Leave and come back later to pick-up where
1162 // we left off.
1163 break;
1166 // Drop this received pkt if there were any errors
1167 if (((cmdstat & (u32)(rxErrorSummary)) &&
1168 (cmdstat & (u32)(rxFirst))) || (status & icrRxError)) {
1169 // update the detailed rx error counters that
1170 // are not covered by the MIB counters.
1171 if (cmdstat & (u32)rxOverrun)
1172 gp->stats.rx_fifo_errors++;
1173 cmdstat |= (u32)rxOwn;
1174 rd->cmdstat = cpu_to_dma32(cmdstat);
1175 continue;
1178 /*
1179 * Must be first and last (ie only) descriptor of packet. We
1180 * ignore (drop) any packets that do not fit in one descriptor.
1181 * Every descriptor's receive buffer is large enough to hold
1182 * the maximum 802.3 frame size, so a multi-descriptor packet
1183 * indicates an error. Most if not all corrupted packets will
1184 * have already been dropped by the above check for the
1185 * rxErrorSummary status bit.
1186 */
1187 if (!(cmdstat & (u32)rxFirst) || !(cmdstat & (u32)rxLast)) {
1188 if (cmdstat & (u32)rxFirst) {
1189 /*
1190 * This is the first descriptor of a
1191 * multi-descriptor packet. It isn't corrupted
1192 * because the above check for rxErrorSummary
1193 * would have dropped it already, so what's
1194 * the deal with this packet? Good question,
1195 * let's dump it out.
1196 */
1197 err("%s: desc not first and last!\n", __FUNCTION__);
1198 dump_rx_desc(0, dev, nextOut);
1200 cmdstat |= (u32)rxOwn;
1201 rd->cmdstat = cpu_to_dma32(cmdstat);
1202 // continue to drop every descriptor of this packet
1203 continue;
1206 pkt_len = dma16_to_cpu(rd->byte_cnt);
1208 /* Create new skb. */
1209 skb = dev_alloc_skb(pkt_len+2);
1210 if (skb == NULL) {
1211 err("%s: Memory squeeze, dropping packet.\n", __FUNCTION__);
1212 gp->stats.rx_dropped++;
1213 cmdstat |= (u32)rxOwn;
1214 rd->cmdstat = cpu_to_dma32(cmdstat);
1215 continue;
1217 skb->dev = dev;
1218 skb_reserve(skb, 2); /* 16 byte IP header align */
1219 memcpy(skb_put(skb, pkt_len),
1220 &gp->rx_buff[nextOut*PKT_BUF_SZ], pkt_len);
1221 skb->protocol = eth_type_trans(skb, dev);
1222 dump_skb(4, dev, skb);
1224 netif_rx(skb); /* pass the packet to upper layers */
1225 dev->last_rx = jiffies;
1227 // now we can release ownership of this desc back to device
1228 cmdstat |= (u32)rxOwn;
1229 rd->cmdstat = cpu_to_dma32(cmdstat);
1232 if (nextOut == gp->rx_next_out)
1233 dbg(3, "%s: RxCDP did not increment?\n", __FUNCTION__);
1235 gp->rx_next_out = nextOut;
1236 return 0;
1240 static void
1241 gt96100_tx_complete(struct net_device *dev, u32 status)
1243 struct gt96100_private *gp = netdev_priv(dev);
1244 int nextOut, cdp;
1245 gt96100_td_t *td;
1246 u32 cmdstat;
1248 cdp = (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0)
1249 - gp->tx_ring_dma) / sizeof(gt96100_td_t);
1251 // Continue until we reach the current descriptor pointer
1252 for (nextOut = gp->tx_next_out; nextOut != cdp;
1253 nextOut = (nextOut + 1) % TX_RING_SIZE) {
1255 if (--gp->intr_work_done == 0)
1256 break;
1258 td = &gp->tx_ring[nextOut];
1259 cmdstat = dma32_to_cpu(td->cmdstat);
1261 dbg(3, "%s: Tx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1262 cmdstat, nextOut);
1264 if (cmdstat & (u32)txOwn) {
1265 /*
1266 * DMA is not finished writing descriptor???
1267 * Leave and come back later to pick-up where
1268 * we left off.
1269 */
1270 break;
1273 // increment Tx error stats
1274 if (cmdstat & (u32)txErrorSummary) {
1275 dbg(2, "%s: Tx error, cmdstat = %x\n", __FUNCTION__,
1276 cmdstat);
1277 gp->stats.tx_errors++;
1278 if (cmdstat & (u32)txReTxLimit)
1279 gp->stats.tx_aborted_errors++;
1280 if (cmdstat & (u32)txUnderrun)
1281 gp->stats.tx_fifo_errors++;
1282 if (cmdstat & (u32)txLateCollision)
1283 gp->stats.tx_window_errors++;
1286 if (cmdstat & (u32)txCollision)
1287 gp->stats.collisions +=
1288 (u32)((cmdstat & txReTxCntMask) >>
1289 txReTxCntBit);
1291 // Wake the queue if the ring was full
1292 if (gp->tx_full) {
1293 gp->tx_full = 0;
1294 if (gp->last_psr & psrLink) {
1295 netif_wake_queue(dev);
1296 dbg(2, "%s: Tx Ring was full, queue waked\n",
1297 __FUNCTION__);
1301 // decrement tx ring buffer count
1302 if (gp->tx_count) gp->tx_count--;
1304 // free the skb
1305 if (gp->tx_skbuff[nextOut]) {
1306 dbg(3, "%s: good Tx, skb=%p\n", __FUNCTION__,
1307 gp->tx_skbuff[nextOut]);
1308 dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
1309 gp->tx_skbuff[nextOut] = NULL;
1310 } else {
1311 err("%s: no skb!\n", __FUNCTION__);
1315 gp->tx_next_out = nextOut;
1317 if (gt96100_check_tx_consistent(gp)) {
1318 err("%s: Tx queue inconsistent!\n", __FUNCTION__);
1321 if ((status & icrTxEndLow) && gp->tx_count != 0) {
1322 // we must restart the DMA
1323 dbg(3, "%s: Restarting Tx DMA\n", __FUNCTION__);
1324 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1325 sdcmrERD | sdcmrTXDL);
1330 static irqreturn_t
1331 gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1333 struct net_device *dev = (struct net_device *)dev_id;
1334 struct gt96100_private *gp = netdev_priv(dev);
1335 u32 status;
1336 int handled = 0;
1338 if (dev == NULL) {
1339 err("%s: null dev ptr\n", __FUNCTION__);
1340 return IRQ_NONE;
1343 dbg(3, "%s: entry, icr=%x\n", __FUNCTION__,
1344 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1346 spin_lock(&gp->lock);
1348 gp->intr_work_done = max_interrupt_work;
1350 while (gp->intr_work_done > 0) {
1352 status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE);
1353 // ACK interrupts
1354 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, ~status);
1356 if ((status & icrEtherIntSum) == 0 &&
1357 !(status & (icrTxBufferLow|icrTxBufferHigh|icrRxBuffer)))
1358 break;
1360 handled = 1;
1362 if (status & icrMIIPhySTC) {
1363 u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS);
1364 if (gp->last_psr != psr) {
1365 dbg(0, "port status:\n");
1366 dbg(0, " %s MBit/s, %s-duplex, "
1367 "flow-control %s, link is %s,\n",
1368 psr & psrSpeed ? "100":"10",
1369 psr & psrDuplex ? "full":"half",
1370 psr & psrFctl ? "disabled":"enabled",
1371 psr & psrLink ? "up":"down");
1372 dbg(0, " TxLowQ is %s, TxHighQ is %s, "
1373 "Transmitter is %s\n",
1374 psr & psrTxLow ? "running":"stopped",
1375 psr & psrTxHigh ? "running":"stopped",
1376 psr & psrTxInProg ? "on":"off");
1378 if ((psr & psrLink) && !gp->tx_full &&
1379 netif_queue_stopped(dev)) {
1380 dbg(0, "%s: Link up, waking queue.\n",
1381 __FUNCTION__);
1382 netif_wake_queue(dev);
1383 } else if (!(psr & psrLink) &&
1384 !netif_queue_stopped(dev)) {
1385 dbg(0, "%s: Link down, stopping queue.\n",
1386 __FUNCTION__);
1387 netif_stop_queue(dev);
1390 gp->last_psr = psr;
1393 if (--gp->intr_work_done == 0)
1394 break;
1397 if (status & (icrTxBufferLow | icrTxEndLow))
1398 gt96100_tx_complete(dev, status);
1400 if (status & (icrRxBuffer | icrRxError)) {
1401 gt96100_rx(dev, status);
1404 // Now check TX errors (RX errors were handled in gt96100_rx)
1405 if (status & icrTxErrorLow) {
1406 err("%s: Tx resource error\n", __FUNCTION__);
1407 if (--gp->intr_work_done == 0)
1408 break;
1411 if (status & icrTxUdr) {
1412 err("%s: Tx underrun error\n", __FUNCTION__);
1413 if (--gp->intr_work_done == 0)
1414 break;
1418 if (gp->intr_work_done == 0) {
1419 // ACK any remaining pending interrupts
1420 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0);
1421 dbg(3, "%s: hit max work\n", __FUNCTION__);
1424 dbg(3, "%s: exit, icr=%x\n", __FUNCTION__,
1425 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1427 spin_unlock(&gp->lock);
1428 return IRQ_RETVAL(handled);
1432 static void
1433 gt96100_tx_timeout(struct net_device *dev)
1435 struct gt96100_private *gp = netdev_priv(dev);
1436 unsigned long flags;
1438 spin_lock_irqsave(&gp->lock, flags);
1440 if (!(gp->last_psr & psrLink)) {
1441 err("tx_timeout: link down.\n");
1442 spin_unlock_irqrestore(&gp->lock, flags);
1443 } else {
1444 if (gt96100_check_tx_consistent(gp))
1445 err("tx_timeout: Tx ring error.\n");
1447 disable_ether_irq(dev);
1448 spin_unlock_irqrestore(&gp->lock, flags);
1449 reset_tx(dev);
1450 enable_ether_irq(dev);
1452 netif_wake_queue(dev);
1457 static void
1458 gt96100_set_rx_mode(struct net_device *dev)
1460 struct gt96100_private *gp = netdev_priv(dev);
1461 unsigned long flags;
1462 //struct dev_mc_list *mcptr;
1464 dbg(3, "%s: dev=%p, flags=%x\n", __FUNCTION__, dev, dev->flags);
1466 // stop the Receiver DMA
1467 abort(dev, sdcmrAR);
1469 spin_lock_irqsave(&gp->lock, flags);
1471 if (dev->flags & IFF_PROMISC) {
1472 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG,
1473 pcrEN | pcrHS | pcrPM);
1476 #if 0
1477 /*
1478 FIXME: currently multicast doesn't work - need to get hash table
1479 working first.
1480 */
1481 if (dev->mc_count) {
1482 // clear hash table
1483 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);
1484 // Add our ethernet address
1485 gt96100_add_hash_entry(dev, dev->dev_addr);
1487 for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
1488 dump_hw_addr(2, dev, "%s: addr=", __FUNCTION__,
1489 mcptr->dmi_addr);
1490 gt96100_add_hash_entry(dev, mcptr->dmi_addr);
1493 #endif
1495 // restart Rx DMA
1496 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
1498 spin_unlock_irqrestore(&gp->lock, flags);
1501 static struct net_device_stats *
1502 gt96100_get_stats(struct net_device *dev)
1504 struct gt96100_private *gp = netdev_priv(dev);
1505 unsigned long flags;
1507 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1509 if (netif_device_present(dev)) {
1510 spin_lock_irqsave (&gp->lock, flags);
1511 update_stats(gp);
1512 spin_unlock_irqrestore (&gp->lock, flags);
1515 return &gp->stats;
1518 static void gt96100_cleanup_module(void)
1520 int i;
1521 for (i=0; i<NUM_INTERFACES; i++) {
1522 struct gt96100_if_t *gtif = &gt96100_iflist[i];
1523 if (gtif->dev != NULL) {
1524 struct gt96100_private *gp = (struct gt96100_private *)
1525 netdev_priv(gtif->dev);
1526 unregister_netdev(gtif->dev);
1527 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
1528 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
1529 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
1530 + sizeof(gt96100_td_t) * TX_RING_SIZE,
1531 gp->rx_ring);
1532 free_netdev(gtif->dev);
1533 release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
1538 static int __init gt96100_setup(char *options)
1540 char *this_opt;
1542 if (!options || !*options)
1543 return 0;
1545 while ((this_opt = strsep (&options, ",")) != NULL) {
1546 if (!*this_opt)
1547 continue;
1548 if (!strncmp(this_opt, "mac0:", 5)) {
1549 memcpy(mac0, this_opt+5, 17);
1550 mac0[17]= '\0';
1551 } else if (!strncmp(this_opt, "mac1:", 5)) {
1552 memcpy(mac1, this_opt+5, 17);
1553 mac1[17]= '\0';
1557 return 1;
1560 __setup("gt96100eth=", gt96100_setup);
1562 module_init(gt96100_init_module);
1563 module_exit(gt96100_cleanup_module);
1565 MODULE_AUTHOR("Steve Longerbeam <stevel@mvista.com>");
1566 MODULE_DESCRIPTION("GT96100 Ethernet driver");