ia64/linux-2.6.18-xen.hg

view drivers/net/dl2k.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
2 /*
3 Copyright (c) 2001, 2002 by D-Link Corporation
4 Written by Edward Peng.<edward_peng@dlink.com.tw>
5 Created 03-May-2001, base on Linux' sundance.c.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11 */
13 #define DRV_NAME "D-Link DL2000-based linux driver"
14 #define DRV_VERSION "v1.18"
15 #define DRV_RELDATE "2006/06/27"
16 #include "dl2k.h"
17 #include <linux/dma-mapping.h>
19 static char version[] __devinitdata =
20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21 #define MAX_UNITS 8
22 static int mtu[MAX_UNITS];
23 static int vlan[MAX_UNITS];
24 static int jumbo[MAX_UNITS];
25 static char *media[MAX_UNITS];
26 static int tx_flow=-1;
27 static int rx_flow=-1;
28 static int copy_thresh;
29 static int rx_coalesce=10; /* Rx frame count each interrupt */
30 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
31 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
34 MODULE_AUTHOR ("Edward Peng");
35 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
36 MODULE_LICENSE("GPL");
37 module_param_array(mtu, int, NULL, 0);
38 module_param_array(media, charp, NULL, 0);
39 module_param_array(vlan, int, NULL, 0);
40 module_param_array(jumbo, int, NULL, 0);
41 module_param(tx_flow, int, 0);
42 module_param(rx_flow, int, 0);
43 module_param(copy_thresh, int, 0);
44 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
45 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
46 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
49 /* Enable the default interrupts */
50 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
51 UpdateStats | LinkEvent)
52 #define EnableInt() \
53 writew(DEFAULT_INTR, ioaddr + IntEnable)
55 static const int max_intrloop = 50;
56 static const int multicast_filter_limit = 0x40;
58 static int rio_open (struct net_device *dev);
59 static void rio_timer (unsigned long data);
60 static void rio_tx_timeout (struct net_device *dev);
61 static void alloc_list (struct net_device *dev);
62 static int start_xmit (struct sk_buff *skb, struct net_device *dev);
63 static irqreturn_t rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
64 static void rio_free_tx (struct net_device *dev, int irq);
65 static void tx_error (struct net_device *dev, int tx_status);
66 static int receive_packet (struct net_device *dev);
67 static void rio_error (struct net_device *dev, int int_status);
68 static int change_mtu (struct net_device *dev, int new_mtu);
69 static void set_multicast (struct net_device *dev);
70 static struct net_device_stats *get_stats (struct net_device *dev);
71 static int clear_stats (struct net_device *dev);
72 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
73 static int rio_close (struct net_device *dev);
74 static int find_miiphy (struct net_device *dev);
75 static int parse_eeprom (struct net_device *dev);
76 static int read_eeprom (long ioaddr, int eep_addr);
77 static int mii_wait_link (struct net_device *dev, int wait);
78 static int mii_set_media (struct net_device *dev);
79 static int mii_get_media (struct net_device *dev);
80 static int mii_set_media_pcs (struct net_device *dev);
81 static int mii_get_media_pcs (struct net_device *dev);
82 static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
83 static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
84 u16 data);
86 static struct ethtool_ops ethtool_ops;
88 static int __devinit
89 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
90 {
91 struct net_device *dev;
92 struct netdev_private *np;
93 static int card_idx;
94 int chip_idx = ent->driver_data;
95 int err, irq;
96 long ioaddr;
97 static int version_printed;
98 void *ring_space;
99 dma_addr_t ring_dma;
101 if (!version_printed++)
102 printk ("%s", version);
104 err = pci_enable_device (pdev);
105 if (err)
106 return err;
108 irq = pdev->irq;
109 err = pci_request_regions (pdev, "dl2k");
110 if (err)
111 goto err_out_disable;
113 pci_set_master (pdev);
114 dev = alloc_etherdev (sizeof (*np));
115 if (!dev) {
116 err = -ENOMEM;
117 goto err_out_res;
118 }
119 SET_MODULE_OWNER (dev);
120 SET_NETDEV_DEV(dev, &pdev->dev);
122 #ifdef MEM_MAPPING
123 ioaddr = pci_resource_start (pdev, 1);
124 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
125 if (!ioaddr) {
126 err = -ENOMEM;
127 goto err_out_dev;
128 }
129 #else
130 ioaddr = pci_resource_start (pdev, 0);
131 #endif
132 dev->base_addr = ioaddr;
133 dev->irq = irq;
134 np = netdev_priv(dev);
135 np->chip_id = chip_idx;
136 np->pdev = pdev;
137 spin_lock_init (&np->tx_lock);
138 spin_lock_init (&np->rx_lock);
140 /* Parse manual configuration */
141 np->an_enable = 1;
142 np->tx_coalesce = 1;
143 if (card_idx < MAX_UNITS) {
144 if (media[card_idx] != NULL) {
145 np->an_enable = 0;
146 if (strcmp (media[card_idx], "auto") == 0 ||
147 strcmp (media[card_idx], "autosense") == 0 ||
148 strcmp (media[card_idx], "0") == 0 ) {
149 np->an_enable = 2;
150 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
151 strcmp (media[card_idx], "4") == 0) {
152 np->speed = 100;
153 np->full_duplex = 1;
154 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
155 || strcmp (media[card_idx], "3") == 0) {
156 np->speed = 100;
157 np->full_duplex = 0;
158 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
159 strcmp (media[card_idx], "2") == 0) {
160 np->speed = 10;
161 np->full_duplex = 1;
162 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
163 strcmp (media[card_idx], "1") == 0) {
164 np->speed = 10;
165 np->full_duplex = 0;
166 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
167 strcmp (media[card_idx], "6") == 0) {
168 np->speed=1000;
169 np->full_duplex=1;
170 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
171 strcmp (media[card_idx], "5") == 0) {
172 np->speed = 1000;
173 np->full_duplex = 0;
174 } else {
175 np->an_enable = 1;
176 }
177 }
178 if (jumbo[card_idx] != 0) {
179 np->jumbo = 1;
180 dev->mtu = MAX_JUMBO;
181 } else {
182 np->jumbo = 0;
183 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
184 dev->mtu = mtu[card_idx];
185 }
186 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
187 vlan[card_idx] : 0;
188 if (rx_coalesce > 0 && rx_timeout > 0) {
189 np->rx_coalesce = rx_coalesce;
190 np->rx_timeout = rx_timeout;
191 np->coalesce = 1;
192 }
193 np->tx_flow = (tx_flow == 0) ? 0 : 1;
194 np->rx_flow = (rx_flow == 0) ? 0 : 1;
196 if (tx_coalesce < 1)
197 tx_coalesce = 1;
198 else if (tx_coalesce > TX_RING_SIZE-1)
199 tx_coalesce = TX_RING_SIZE - 1;
200 }
201 dev->open = &rio_open;
202 dev->hard_start_xmit = &start_xmit;
203 dev->stop = &rio_close;
204 dev->get_stats = &get_stats;
205 dev->set_multicast_list = &set_multicast;
206 dev->do_ioctl = &rio_ioctl;
207 dev->tx_timeout = &rio_tx_timeout;
208 dev->watchdog_timeo = TX_TIMEOUT;
209 dev->change_mtu = &change_mtu;
210 SET_ETHTOOL_OPS(dev, &ethtool_ops);
211 #if 0
212 dev->features = NETIF_F_IP_CSUM;
213 #endif
214 pci_set_drvdata (pdev, dev);
216 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
217 if (!ring_space)
218 goto err_out_iounmap;
219 np->tx_ring = (struct netdev_desc *) ring_space;
220 np->tx_ring_dma = ring_dma;
222 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
223 if (!ring_space)
224 goto err_out_unmap_tx;
225 np->rx_ring = (struct netdev_desc *) ring_space;
226 np->rx_ring_dma = ring_dma;
228 /* Parse eeprom data */
229 parse_eeprom (dev);
231 /* Find PHY address */
232 err = find_miiphy (dev);
233 if (err)
234 goto err_out_unmap_rx;
236 /* Fiber device? */
237 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
238 np->link_status = 0;
239 /* Set media and reset PHY */
240 if (np->phy_media) {
241 /* default Auto-Negotiation for fiber deivices */
242 if (np->an_enable == 2) {
243 np->an_enable = 1;
244 }
245 mii_set_media_pcs (dev);
246 } else {
247 /* Auto-Negotiation is mandatory for 1000BASE-T,
248 IEEE 802.3ab Annex 28D page 14 */
249 if (np->speed == 1000)
250 np->an_enable = 1;
251 mii_set_media (dev);
252 }
253 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
255 err = register_netdev (dev);
256 if (err)
257 goto err_out_unmap_rx;
259 card_idx++;
261 printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
262 dev->name, np->name,
263 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
264 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
265 if (tx_coalesce > 1)
266 printk(KERN_INFO "tx_coalesce:\t%d packets\n",
267 tx_coalesce);
268 if (np->coalesce)
269 printk(KERN_INFO "rx_coalesce:\t%d packets\n"
270 KERN_INFO "rx_timeout: \t%d ns\n",
271 np->rx_coalesce, np->rx_timeout*640);
272 if (np->vlan)
273 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
274 return 0;
276 err_out_unmap_rx:
277 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
278 err_out_unmap_tx:
279 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
280 err_out_iounmap:
281 #ifdef MEM_MAPPING
282 iounmap ((void *) ioaddr);
284 err_out_dev:
285 #endif
286 free_netdev (dev);
288 err_out_res:
289 pci_release_regions (pdev);
291 err_out_disable:
292 pci_disable_device (pdev);
293 return err;
294 }
296 int
297 find_miiphy (struct net_device *dev)
298 {
299 int i, phy_found = 0;
300 struct netdev_private *np;
301 long ioaddr;
302 np = netdev_priv(dev);
303 ioaddr = dev->base_addr;
304 np->phy_addr = 1;
306 for (i = 31; i >= 0; i--) {
307 int mii_status = mii_read (dev, i, 1);
308 if (mii_status != 0xffff && mii_status != 0x0000) {
309 np->phy_addr = i;
310 phy_found++;
311 }
312 }
313 if (!phy_found) {
314 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
315 return -ENODEV;
316 }
317 return 0;
318 }
320 int
321 parse_eeprom (struct net_device *dev)
322 {
323 int i, j;
324 long ioaddr = dev->base_addr;
325 u8 sromdata[256];
326 u8 *psib;
327 u32 crc;
328 PSROM_t psrom = (PSROM_t) sromdata;
329 struct netdev_private *np = netdev_priv(dev);
331 int cid, next;
333 #ifdef MEM_MAPPING
334 ioaddr = pci_resource_start (np->pdev, 0);
335 #endif
336 /* Read eeprom */
337 for (i = 0; i < 128; i++) {
338 ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i));
339 }
340 #ifdef MEM_MAPPING
341 ioaddr = dev->base_addr;
342 #endif
343 /* Check CRC */
344 crc = ~ether_crc_le (256 - 4, sromdata);
345 if (psrom->crc != crc) {
346 printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
347 return -1;
348 }
350 /* Set MAC address */
351 for (i = 0; i < 6; i++)
352 dev->dev_addr[i] = psrom->mac_addr[i];
354 /* Parse Software Information Block */
355 i = 0x30;
356 psib = (u8 *) sromdata;
357 do {
358 cid = psib[i++];
359 next = psib[i++];
360 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
361 printk (KERN_ERR "Cell data error\n");
362 return -1;
363 }
364 switch (cid) {
365 case 0: /* Format version */
366 break;
367 case 1: /* End of cell */
368 return 0;
369 case 2: /* Duplex Polarity */
370 np->duplex_polarity = psib[i];
371 writeb (readb (ioaddr + PhyCtrl) | psib[i],
372 ioaddr + PhyCtrl);
373 break;
374 case 3: /* Wake Polarity */
375 np->wake_polarity = psib[i];
376 break;
377 case 9: /* Adapter description */
378 j = (next - i > 255) ? 255 : next - i;
379 memcpy (np->name, &(psib[i]), j);
380 break;
381 case 4:
382 case 5:
383 case 6:
384 case 7:
385 case 8: /* Reversed */
386 break;
387 default: /* Unknown cell */
388 return -1;
389 }
390 i = next;
391 } while (1);
393 return 0;
394 }
396 static int
397 rio_open (struct net_device *dev)
398 {
399 struct netdev_private *np = netdev_priv(dev);
400 long ioaddr = dev->base_addr;
401 int i;
402 u16 macctrl;
404 i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
405 if (i)
406 return i;
408 /* Reset all logic functions */
409 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
410 ioaddr + ASICCtrl + 2);
411 mdelay(10);
413 /* DebugCtrl bit 4, 5, 9 must set */
414 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
416 /* Jumbo frame */
417 if (np->jumbo != 0)
418 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
420 alloc_list (dev);
422 /* Get station address */
423 for (i = 0; i < 6; i++)
424 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
426 set_multicast (dev);
427 if (np->coalesce) {
428 writel (np->rx_coalesce | np->rx_timeout << 16,
429 ioaddr + RxDMAIntCtrl);
430 }
431 /* Set RIO to poll every N*320nsec. */
432 writeb (0x20, ioaddr + RxDMAPollPeriod);
433 writeb (0xff, ioaddr + TxDMAPollPeriod);
434 writeb (0x30, ioaddr + RxDMABurstThresh);
435 writeb (0x30, ioaddr + RxDMAUrgentThresh);
436 writel (0x0007ffff, ioaddr + RmonStatMask);
437 /* clear statistics */
438 clear_stats (dev);
440 /* VLAN supported */
441 if (np->vlan) {
442 /* priority field in RxDMAIntCtrl */
443 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
444 ioaddr + RxDMAIntCtrl);
445 /* VLANId */
446 writew (np->vlan, ioaddr + VLANId);
447 /* Length/Type should be 0x8100 */
448 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
449 /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
450 VLAN information tagged by TFC' VID, CFI fields. */
451 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
452 ioaddr + MACCtrl);
453 }
455 init_timer (&np->timer);
456 np->timer.expires = jiffies + 1*HZ;
457 np->timer.data = (unsigned long) dev;
458 np->timer.function = &rio_timer;
459 add_timer (&np->timer);
461 /* Start Tx/Rx */
462 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
463 ioaddr + MACCtrl);
465 macctrl = 0;
466 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
467 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
468 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
469 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
470 writew(macctrl, ioaddr + MACCtrl);
472 netif_start_queue (dev);
474 /* Enable default interrupts */
475 EnableInt ();
476 return 0;
477 }
479 static void
480 rio_timer (unsigned long data)
481 {
482 struct net_device *dev = (struct net_device *)data;
483 struct netdev_private *np = netdev_priv(dev);
484 unsigned int entry;
485 int next_tick = 1*HZ;
486 unsigned long flags;
488 spin_lock_irqsave(&np->rx_lock, flags);
489 /* Recover rx ring exhausted error */
490 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
491 printk(KERN_INFO "Try to recover rx ring exhausted...\n");
492 /* Re-allocate skbuffs to fill the descriptor ring */
493 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
494 struct sk_buff *skb;
495 entry = np->old_rx % RX_RING_SIZE;
496 /* Dropped packets don't need to re-allocate */
497 if (np->rx_skbuff[entry] == NULL) {
498 skb = dev_alloc_skb (np->rx_buf_sz);
499 if (skb == NULL) {
500 np->rx_ring[entry].fraginfo = 0;
501 printk (KERN_INFO
502 "%s: Still unable to re-allocate Rx skbuff.#%d\n",
503 dev->name, entry);
504 break;
505 }
506 np->rx_skbuff[entry] = skb;
507 skb->dev = dev;
508 /* 16 byte align the IP header */
509 skb_reserve (skb, 2);
510 np->rx_ring[entry].fraginfo =
511 cpu_to_le64 (pci_map_single
512 (np->pdev, skb->data, np->rx_buf_sz,
513 PCI_DMA_FROMDEVICE));
514 }
515 np->rx_ring[entry].fraginfo |=
516 cpu_to_le64 (np->rx_buf_sz) << 48;
517 np->rx_ring[entry].status = 0;
518 } /* end for */
519 } /* end if */
520 spin_unlock_irqrestore (&np->rx_lock, flags);
521 np->timer.expires = jiffies + next_tick;
522 add_timer(&np->timer);
523 }
525 static void
526 rio_tx_timeout (struct net_device *dev)
527 {
528 long ioaddr = dev->base_addr;
530 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
531 dev->name, readl (ioaddr + TxStatus));
532 rio_free_tx(dev, 0);
533 dev->if_port = 0;
534 dev->trans_start = jiffies;
535 }
537 /* allocate and initialize Tx and Rx descriptors */
538 static void
539 alloc_list (struct net_device *dev)
540 {
541 struct netdev_private *np = netdev_priv(dev);
542 int i;
544 np->cur_rx = np->cur_tx = 0;
545 np->old_rx = np->old_tx = 0;
546 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
548 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
549 for (i = 0; i < TX_RING_SIZE; i++) {
550 np->tx_skbuff[i] = NULL;
551 np->tx_ring[i].status = cpu_to_le64 (TFDDone);
552 np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
553 ((i+1)%TX_RING_SIZE) *
554 sizeof (struct netdev_desc));
555 }
557 /* Initialize Rx descriptors */
558 for (i = 0; i < RX_RING_SIZE; i++) {
559 np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
560 ((i + 1) % RX_RING_SIZE) *
561 sizeof (struct netdev_desc));
562 np->rx_ring[i].status = 0;
563 np->rx_ring[i].fraginfo = 0;
564 np->rx_skbuff[i] = NULL;
565 }
567 /* Allocate the rx buffers */
568 for (i = 0; i < RX_RING_SIZE; i++) {
569 /* Allocated fixed size of skbuff */
570 struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
571 np->rx_skbuff[i] = skb;
572 if (skb == NULL) {
573 printk (KERN_ERR
574 "%s: alloc_list: allocate Rx buffer error! ",
575 dev->name);
576 break;
577 }
578 skb->dev = dev; /* Mark as being used by this device. */
579 skb_reserve (skb, 2); /* 16 byte align the IP header. */
580 /* Rubicon now supports 40 bits of addressing space. */
581 np->rx_ring[i].fraginfo =
582 cpu_to_le64 ( pci_map_single (
583 np->pdev, skb->data, np->rx_buf_sz,
584 PCI_DMA_FROMDEVICE));
585 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
586 }
588 /* Set RFDListPtr */
589 writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0);
590 writel (0, dev->base_addr + RFDListPtr1);
592 return;
593 }
595 static int
596 start_xmit (struct sk_buff *skb, struct net_device *dev)
597 {
598 struct netdev_private *np = netdev_priv(dev);
599 struct netdev_desc *txdesc;
600 unsigned entry;
601 u32 ioaddr;
602 u64 tfc_vlan_tag = 0;
604 if (np->link_status == 0) { /* Link Down */
605 dev_kfree_skb(skb);
606 return 0;
607 }
608 ioaddr = dev->base_addr;
609 entry = np->cur_tx % TX_RING_SIZE;
610 np->tx_skbuff[entry] = skb;
611 txdesc = &np->tx_ring[entry];
613 #if 0
614 if (skb->ip_summed == CHECKSUM_HW) {
615 txdesc->status |=
616 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
617 IPChecksumEnable);
618 }
619 #endif
620 if (np->vlan) {
621 tfc_vlan_tag =
622 cpu_to_le64 (VLANTagInsert) |
623 (cpu_to_le64 (np->vlan) << 32) |
624 (cpu_to_le64 (skb->priority) << 45);
625 }
626 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
627 skb->len,
628 PCI_DMA_TODEVICE));
629 txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
631 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
632 * Work around: Always use 1 descriptor in 10Mbps mode */
633 if (entry % np->tx_coalesce == 0 || np->speed == 10)
634 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
635 WordAlignDisable |
636 TxDMAIndicate |
637 (1 << FragCountShift));
638 else
639 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
640 WordAlignDisable |
641 (1 << FragCountShift));
643 /* TxDMAPollNow */
644 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
645 /* Schedule ISR */
646 writel(10000, ioaddr + CountDown);
647 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
648 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
649 < TX_QUEUE_LEN - 1 && np->speed != 10) {
650 /* do nothing */
651 } else if (!netif_queue_stopped(dev)) {
652 netif_stop_queue (dev);
653 }
655 /* The first TFDListPtr */
656 if (readl (dev->base_addr + TFDListPtr0) == 0) {
657 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
658 dev->base_addr + TFDListPtr0);
659 writel (0, dev->base_addr + TFDListPtr1);
660 }
662 /* NETDEV WATCHDOG timer */
663 dev->trans_start = jiffies;
664 return 0;
665 }
667 static irqreturn_t
668 rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
669 {
670 struct net_device *dev = dev_instance;
671 struct netdev_private *np;
672 unsigned int_status;
673 long ioaddr;
674 int cnt = max_intrloop;
675 int handled = 0;
677 ioaddr = dev->base_addr;
678 np = netdev_priv(dev);
679 while (1) {
680 int_status = readw (ioaddr + IntStatus);
681 writew (int_status, ioaddr + IntStatus);
682 int_status &= DEFAULT_INTR;
683 if (int_status == 0 || --cnt < 0)
684 break;
685 handled = 1;
686 /* Processing received packets */
687 if (int_status & RxDMAComplete)
688 receive_packet (dev);
689 /* TxDMAComplete interrupt */
690 if ((int_status & (TxDMAComplete|IntRequested))) {
691 int tx_status;
692 tx_status = readl (ioaddr + TxStatus);
693 if (tx_status & 0x01)
694 tx_error (dev, tx_status);
695 /* Free used tx skbuffs */
696 rio_free_tx (dev, 1);
697 }
699 /* Handle uncommon events */
700 if (int_status &
701 (HostError | LinkEvent | UpdateStats))
702 rio_error (dev, int_status);
703 }
704 if (np->cur_tx != np->old_tx)
705 writel (100, ioaddr + CountDown);
706 return IRQ_RETVAL(handled);
707 }
709 static void
710 rio_free_tx (struct net_device *dev, int irq)
711 {
712 struct netdev_private *np = netdev_priv(dev);
713 int entry = np->old_tx % TX_RING_SIZE;
714 int tx_use = 0;
715 unsigned long flag = 0;
717 if (irq)
718 spin_lock(&np->tx_lock);
719 else
720 spin_lock_irqsave(&np->tx_lock, flag);
722 /* Free used tx skbuffs */
723 while (entry != np->cur_tx) {
724 struct sk_buff *skb;
726 if (!(np->tx_ring[entry].status & TFDDone))
727 break;
728 skb = np->tx_skbuff[entry];
729 pci_unmap_single (np->pdev,
730 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
731 skb->len, PCI_DMA_TODEVICE);
732 if (irq)
733 dev_kfree_skb_irq (skb);
734 else
735 dev_kfree_skb (skb);
737 np->tx_skbuff[entry] = NULL;
738 entry = (entry + 1) % TX_RING_SIZE;
739 tx_use++;
740 }
741 if (irq)
742 spin_unlock(&np->tx_lock);
743 else
744 spin_unlock_irqrestore(&np->tx_lock, flag);
745 np->old_tx = entry;
747 /* If the ring is no longer full, clear tx_full and
748 call netif_wake_queue() */
750 if (netif_queue_stopped(dev) &&
751 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
752 < TX_QUEUE_LEN - 1 || np->speed == 10)) {
753 netif_wake_queue (dev);
754 }
755 }
757 static void
758 tx_error (struct net_device *dev, int tx_status)
759 {
760 struct netdev_private *np;
761 long ioaddr = dev->base_addr;
762 int frame_id;
763 int i;
765 np = netdev_priv(dev);
767 frame_id = (tx_status & 0xffff0000);
768 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
769 dev->name, tx_status, frame_id);
770 np->stats.tx_errors++;
771 /* Ttransmit Underrun */
772 if (tx_status & 0x10) {
773 np->stats.tx_fifo_errors++;
774 writew (readw (ioaddr + TxStartThresh) + 0x10,
775 ioaddr + TxStartThresh);
776 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
777 writew (TxReset | DMAReset | FIFOReset | NetworkReset,
778 ioaddr + ASICCtrl + 2);
779 /* Wait for ResetBusy bit clear */
780 for (i = 50; i > 0; i--) {
781 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
782 break;
783 mdelay (1);
784 }
785 rio_free_tx (dev, 1);
786 /* Reset TFDListPtr */
787 writel (np->tx_ring_dma +
788 np->old_tx * sizeof (struct netdev_desc),
789 dev->base_addr + TFDListPtr0);
790 writel (0, dev->base_addr + TFDListPtr1);
792 /* Let TxStartThresh stay default value */
793 }
794 /* Late Collision */
795 if (tx_status & 0x04) {
796 np->stats.tx_fifo_errors++;
797 /* TxReset and clear FIFO */
798 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
799 /* Wait reset done */
800 for (i = 50; i > 0; i--) {
801 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
802 break;
803 mdelay (1);
804 }
805 /* Let TxStartThresh stay default value */
806 }
807 /* Maximum Collisions */
808 #ifdef ETHER_STATS
809 if (tx_status & 0x08)
810 np->stats.collisions16++;
811 #else
812 if (tx_status & 0x08)
813 np->stats.collisions++;
814 #endif
815 /* Restart the Tx */
816 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
817 }
819 static int
820 receive_packet (struct net_device *dev)
821 {
822 struct netdev_private *np = netdev_priv(dev);
823 int entry = np->cur_rx % RX_RING_SIZE;
824 int cnt = 30;
826 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
827 while (1) {
828 struct netdev_desc *desc = &np->rx_ring[entry];
829 int pkt_len;
830 u64 frame_status;
832 if (!(desc->status & RFDDone) ||
833 !(desc->status & FrameStart) || !(desc->status & FrameEnd))
834 break;
836 /* Chip omits the CRC. */
837 pkt_len = le64_to_cpu (desc->status & 0xffff);
838 frame_status = le64_to_cpu (desc->status);
839 if (--cnt < 0)
840 break;
841 /* Update rx error statistics, drop packet. */
842 if (frame_status & RFS_Errors) {
843 np->stats.rx_errors++;
844 if (frame_status & (RxRuntFrame | RxLengthError))
845 np->stats.rx_length_errors++;
846 if (frame_status & RxFCSError)
847 np->stats.rx_crc_errors++;
848 if (frame_status & RxAlignmentError && np->speed != 1000)
849 np->stats.rx_frame_errors++;
850 if (frame_status & RxFIFOOverrun)
851 np->stats.rx_fifo_errors++;
852 } else {
853 struct sk_buff *skb;
855 /* Small skbuffs for short packets */
856 if (pkt_len > copy_thresh) {
857 pci_unmap_single (np->pdev,
858 desc->fraginfo & DMA_48BIT_MASK,
859 np->rx_buf_sz,
860 PCI_DMA_FROMDEVICE);
861 skb_put (skb = np->rx_skbuff[entry], pkt_len);
862 np->rx_skbuff[entry] = NULL;
863 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
864 pci_dma_sync_single_for_cpu(np->pdev,
865 desc->fraginfo &
866 DMA_48BIT_MASK,
867 np->rx_buf_sz,
868 PCI_DMA_FROMDEVICE);
869 skb->dev = dev;
870 /* 16 byte align the IP header */
871 skb_reserve (skb, 2);
872 eth_copy_and_sum (skb,
873 np->rx_skbuff[entry]->data,
874 pkt_len, 0);
875 skb_put (skb, pkt_len);
876 pci_dma_sync_single_for_device(np->pdev,
877 desc->fraginfo &
878 DMA_48BIT_MASK,
879 np->rx_buf_sz,
880 PCI_DMA_FROMDEVICE);
881 }
882 skb->protocol = eth_type_trans (skb, dev);
883 #if 0
884 /* Checksum done by hw, but csum value unavailable. */
885 if (np->pci_rev_id >= 0x0c &&
886 !(frame_status & (TCPError | UDPError | IPError))) {
887 skb->ip_summed = CHECKSUM_UNNECESSARY;
888 }
889 #endif
890 netif_rx (skb);
891 dev->last_rx = jiffies;
892 }
893 entry = (entry + 1) % RX_RING_SIZE;
894 }
895 spin_lock(&np->rx_lock);
896 np->cur_rx = entry;
897 /* Re-allocate skbuffs to fill the descriptor ring */
898 entry = np->old_rx;
899 while (entry != np->cur_rx) {
900 struct sk_buff *skb;
901 /* Dropped packets don't need to re-allocate */
902 if (np->rx_skbuff[entry] == NULL) {
903 skb = dev_alloc_skb (np->rx_buf_sz);
904 if (skb == NULL) {
905 np->rx_ring[entry].fraginfo = 0;
906 printk (KERN_INFO
907 "%s: receive_packet: "
908 "Unable to re-allocate Rx skbuff.#%d\n",
909 dev->name, entry);
910 break;
911 }
912 np->rx_skbuff[entry] = skb;
913 skb->dev = dev;
914 /* 16 byte align the IP header */
915 skb_reserve (skb, 2);
916 np->rx_ring[entry].fraginfo =
917 cpu_to_le64 (pci_map_single
918 (np->pdev, skb->data, np->rx_buf_sz,
919 PCI_DMA_FROMDEVICE));
920 }
921 np->rx_ring[entry].fraginfo |=
922 cpu_to_le64 (np->rx_buf_sz) << 48;
923 np->rx_ring[entry].status = 0;
924 entry = (entry + 1) % RX_RING_SIZE;
925 }
926 np->old_rx = entry;
927 spin_unlock(&np->rx_lock);
928 return 0;
929 }
931 static void
932 rio_error (struct net_device *dev, int int_status)
933 {
934 long ioaddr = dev->base_addr;
935 struct netdev_private *np = netdev_priv(dev);
936 u16 macctrl;
938 /* Link change event */
939 if (int_status & LinkEvent) {
940 if (mii_wait_link (dev, 10) == 0) {
941 printk (KERN_INFO "%s: Link up\n", dev->name);
942 if (np->phy_media)
943 mii_get_media_pcs (dev);
944 else
945 mii_get_media (dev);
946 if (np->speed == 1000)
947 np->tx_coalesce = tx_coalesce;
948 else
949 np->tx_coalesce = 1;
950 macctrl = 0;
951 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
952 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
953 macctrl |= (np->tx_flow) ?
954 TxFlowControlEnable : 0;
955 macctrl |= (np->rx_flow) ?
956 RxFlowControlEnable : 0;
957 writew(macctrl, ioaddr + MACCtrl);
958 np->link_status = 1;
959 netif_carrier_on(dev);
960 } else {
961 printk (KERN_INFO "%s: Link off\n", dev->name);
962 np->link_status = 0;
963 netif_carrier_off(dev);
964 }
965 }
967 /* UpdateStats statistics registers */
968 if (int_status & UpdateStats) {
969 get_stats (dev);
970 }
972 /* PCI Error, a catastronphic error related to the bus interface
973 occurs, set GlobalReset and HostReset to reset. */
974 if (int_status & HostError) {
975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
976 dev->name, int_status);
977 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
978 mdelay (500);
979 }
980 }
982 static struct net_device_stats *
983 get_stats (struct net_device *dev)
984 {
985 long ioaddr = dev->base_addr;
986 struct netdev_private *np = netdev_priv(dev);
987 #ifdef MEM_MAPPING
988 int i;
989 #endif
990 unsigned int stat_reg;
992 /* All statistics registers need to be acknowledged,
993 else statistic overflow could cause problems */
995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
1001 np->stats.collisions += readl (ioaddr + SingleColFrames)
1002 + readl (ioaddr + MultiColFrames);
1004 /* detailed tx errors */
1005 stat_reg = readw (ioaddr + FramesAbortXSColls);
1006 np->stats.tx_aborted_errors += stat_reg;
1007 np->stats.tx_errors += stat_reg;
1009 stat_reg = readw (ioaddr + CarrierSenseErrors);
1010 np->stats.tx_carrier_errors += stat_reg;
1011 np->stats.tx_errors += stat_reg;
1013 /* Clear all other statistic register. */
1014 readl (ioaddr + McstOctetXmtOk);
1015 readw (ioaddr + BcstFramesXmtdOk);
1016 readl (ioaddr + McstFramesXmtdOk);
1017 readw (ioaddr + BcstFramesRcvdOk);
1018 readw (ioaddr + MacControlFramesRcvd);
1019 readw (ioaddr + FrameTooLongErrors);
1020 readw (ioaddr + InRangeLengthErrors);
1021 readw (ioaddr + FramesCheckSeqErrors);
1022 readw (ioaddr + FramesLostRxErrors);
1023 readl (ioaddr + McstOctetXmtOk);
1024 readl (ioaddr + BcstOctetXmtOk);
1025 readl (ioaddr + McstFramesXmtdOk);
1026 readl (ioaddr + FramesWDeferredXmt);
1027 readl (ioaddr + LateCollisions);
1028 readw (ioaddr + BcstFramesXmtdOk);
1029 readw (ioaddr + MacControlFramesXmtd);
1030 readw (ioaddr + FramesWEXDeferal);
1032 #ifdef MEM_MAPPING
1033 for (i = 0x100; i <= 0x150; i += 4)
1034 readl (ioaddr + i);
1035 #endif
1036 readw (ioaddr + TxJumboFrames);
1037 readw (ioaddr + RxJumboFrames);
1038 readw (ioaddr + TCPCheckSumErrors);
1039 readw (ioaddr + UDPCheckSumErrors);
1040 readw (ioaddr + IPCheckSumErrors);
1041 return &np->stats;
1044 static int
1045 clear_stats (struct net_device *dev)
1047 long ioaddr = dev->base_addr;
1048 #ifdef MEM_MAPPING
1049 int i;
1050 #endif
1052 /* All statistics registers need to be acknowledged,
1053 else statistic overflow could cause problems */
1054 readl (ioaddr + FramesRcvOk);
1055 readl (ioaddr + FramesXmtOk);
1056 readl (ioaddr + OctetRcvOk);
1057 readl (ioaddr + OctetXmtOk);
1059 readl (ioaddr + McstFramesRcvdOk);
1060 readl (ioaddr + SingleColFrames);
1061 readl (ioaddr + MultiColFrames);
1062 readl (ioaddr + LateCollisions);
1063 /* detailed rx errors */
1064 readw (ioaddr + FrameTooLongErrors);
1065 readw (ioaddr + InRangeLengthErrors);
1066 readw (ioaddr + FramesCheckSeqErrors);
1067 readw (ioaddr + FramesLostRxErrors);
1069 /* detailed tx errors */
1070 readw (ioaddr + FramesAbortXSColls);
1071 readw (ioaddr + CarrierSenseErrors);
1073 /* Clear all other statistic register. */
1074 readl (ioaddr + McstOctetXmtOk);
1075 readw (ioaddr + BcstFramesXmtdOk);
1076 readl (ioaddr + McstFramesXmtdOk);
1077 readw (ioaddr + BcstFramesRcvdOk);
1078 readw (ioaddr + MacControlFramesRcvd);
1079 readl (ioaddr + McstOctetXmtOk);
1080 readl (ioaddr + BcstOctetXmtOk);
1081 readl (ioaddr + McstFramesXmtdOk);
1082 readl (ioaddr + FramesWDeferredXmt);
1083 readw (ioaddr + BcstFramesXmtdOk);
1084 readw (ioaddr + MacControlFramesXmtd);
1085 readw (ioaddr + FramesWEXDeferal);
1086 #ifdef MEM_MAPPING
1087 for (i = 0x100; i <= 0x150; i += 4)
1088 readl (ioaddr + i);
1089 #endif
1090 readw (ioaddr + TxJumboFrames);
1091 readw (ioaddr + RxJumboFrames);
1092 readw (ioaddr + TCPCheckSumErrors);
1093 readw (ioaddr + UDPCheckSumErrors);
1094 readw (ioaddr + IPCheckSumErrors);
1095 return 0;
1099 int
1100 change_mtu (struct net_device *dev, int new_mtu)
1102 struct netdev_private *np = netdev_priv(dev);
1103 int max = (np->jumbo) ? MAX_JUMBO : 1536;
1105 if ((new_mtu < 68) || (new_mtu > max)) {
1106 return -EINVAL;
1109 dev->mtu = new_mtu;
1111 return 0;
1114 static void
1115 set_multicast (struct net_device *dev)
1117 long ioaddr = dev->base_addr;
1118 u32 hash_table[2];
1119 u16 rx_mode = 0;
1120 struct netdev_private *np = netdev_priv(dev);
1122 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1124 hash_table[1] |= cpu_to_le32(0x02000000);
1125 if (dev->flags & IFF_PROMISC) {
1126 /* Receive all frames promiscuously. */
1127 rx_mode = ReceiveAllFrames;
1128 } else if ((dev->flags & IFF_ALLMULTI) ||
1129 (dev->mc_count > multicast_filter_limit)) {
1130 /* Receive broadcast and multicast frames */
1131 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1132 } else if (dev->mc_count > 0) {
1133 int i;
1134 struct dev_mc_list *mclist;
1135 /* Receive broadcast frames and multicast frames filtering
1136 by Hashtable */
1137 rx_mode =
1138 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1139 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1140 i++, mclist=mclist->next)
1142 int bit, index = 0;
1143 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1144 /* The inverted high significant 6 bits of CRC are
1145 used as an index to hashtable */
1146 for (bit = 0; bit < 6; bit++)
1147 if (crc & (1 << (31 - bit)))
1148 index |= (1 << bit);
1149 hash_table[index / 32] |= (1 << (index % 32));
1151 } else {
1152 rx_mode = ReceiveBroadcast | ReceiveUnicast;
1154 if (np->vlan) {
1155 /* ReceiveVLANMatch field in ReceiveMode */
1156 rx_mode |= ReceiveVLANMatch;
1159 writel (hash_table[0], ioaddr + HashTable0);
1160 writel (hash_table[1], ioaddr + HashTable1);
1161 writew (rx_mode, ioaddr + ReceiveMode);
1164 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1166 struct netdev_private *np = netdev_priv(dev);
1167 strcpy(info->driver, "dl2k");
1168 strcpy(info->version, DRV_VERSION);
1169 strcpy(info->bus_info, pci_name(np->pdev));
1172 static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1174 struct netdev_private *np = netdev_priv(dev);
1175 if (np->phy_media) {
1176 /* fiber device */
1177 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1178 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
1179 cmd->port = PORT_FIBRE;
1180 cmd->transceiver = XCVR_INTERNAL;
1181 } else {
1182 /* copper device */
1183 cmd->supported = SUPPORTED_10baseT_Half |
1184 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
1185 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
1186 SUPPORTED_Autoneg | SUPPORTED_MII;
1187 cmd->advertising = ADVERTISED_10baseT_Half |
1188 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
1189 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
1190 ADVERTISED_Autoneg | ADVERTISED_MII;
1191 cmd->port = PORT_MII;
1192 cmd->transceiver = XCVR_INTERNAL;
1194 if ( np->link_status ) {
1195 cmd->speed = np->speed;
1196 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1197 } else {
1198 cmd->speed = -1;
1199 cmd->duplex = -1;
1201 if ( np->an_enable)
1202 cmd->autoneg = AUTONEG_ENABLE;
1203 else
1204 cmd->autoneg = AUTONEG_DISABLE;
1206 cmd->phy_address = np->phy_addr;
1207 return 0;
1210 static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1212 struct netdev_private *np = netdev_priv(dev);
1213 netif_carrier_off(dev);
1214 if (cmd->autoneg == AUTONEG_ENABLE) {
1215 if (np->an_enable)
1216 return 0;
1217 else {
1218 np->an_enable = 1;
1219 mii_set_media(dev);
1220 return 0;
1222 } else {
1223 np->an_enable = 0;
1224 if (np->speed == 1000) {
1225 cmd->speed = SPEED_100;
1226 cmd->duplex = DUPLEX_FULL;
1227 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1229 switch(cmd->speed + cmd->duplex) {
1231 case SPEED_10 + DUPLEX_HALF:
1232 np->speed = 10;
1233 np->full_duplex = 0;
1234 break;
1236 case SPEED_10 + DUPLEX_FULL:
1237 np->speed = 10;
1238 np->full_duplex = 1;
1239 break;
1240 case SPEED_100 + DUPLEX_HALF:
1241 np->speed = 100;
1242 np->full_duplex = 0;
1243 break;
1244 case SPEED_100 + DUPLEX_FULL:
1245 np->speed = 100;
1246 np->full_duplex = 1;
1247 break;
1248 case SPEED_1000 + DUPLEX_HALF:/* not supported */
1249 case SPEED_1000 + DUPLEX_FULL:/* not supported */
1250 default:
1251 return -EINVAL;
1253 mii_set_media(dev);
1255 return 0;
1258 static u32 rio_get_link(struct net_device *dev)
1260 struct netdev_private *np = netdev_priv(dev);
1261 return np->link_status;
1264 static struct ethtool_ops ethtool_ops = {
1265 .get_drvinfo = rio_get_drvinfo,
1266 .get_settings = rio_get_settings,
1267 .set_settings = rio_set_settings,
1268 .get_link = rio_get_link,
1269 };
1271 static int
1272 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1274 int phy_addr;
1275 struct netdev_private *np = netdev_priv(dev);
1276 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
1278 struct netdev_desc *desc;
1279 int i;
1281 phy_addr = np->phy_addr;
1282 switch (cmd) {
1283 case SIOCDEVPRIVATE:
1284 break;
1286 case SIOCDEVPRIVATE + 1:
1287 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1288 break;
1289 case SIOCDEVPRIVATE + 2:
1290 mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
1291 break;
1292 case SIOCDEVPRIVATE + 3:
1293 break;
1294 case SIOCDEVPRIVATE + 4:
1295 break;
1296 case SIOCDEVPRIVATE + 5:
1297 netif_stop_queue (dev);
1298 break;
1299 case SIOCDEVPRIVATE + 6:
1300 netif_wake_queue (dev);
1301 break;
1302 case SIOCDEVPRIVATE + 7:
1303 printk
1304 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1305 netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
1306 np->old_rx);
1307 break;
1308 case SIOCDEVPRIVATE + 8:
1309 printk("TX ring:\n");
1310 for (i = 0; i < TX_RING_SIZE; i++) {
1311 desc = &np->tx_ring[i];
1312 printk
1313 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1314 i,
1315 (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1316 (u32) desc->next_desc,
1317 (u32) desc->status, (u32) (desc->fraginfo >> 32),
1318 (u32) desc->fraginfo);
1319 printk ("\n");
1321 printk ("\n");
1322 break;
1324 default:
1325 return -EOPNOTSUPP;
1327 return 0;
1330 #define EEP_READ 0x0200
1331 #define EEP_BUSY 0x8000
1332 /* Read the EEPROM word */
1333 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1334 int
1335 read_eeprom (long ioaddr, int eep_addr)
1337 int i = 1000;
1338 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
1339 while (i-- > 0) {
1340 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
1341 return inw (ioaddr + EepromData);
1344 return 0;
1347 enum phy_ctrl_bits {
1348 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1349 MII_DUPLEX = 0x08,
1350 };
1352 #define mii_delay() readb(ioaddr)
1353 static void
1354 mii_sendbit (struct net_device *dev, u32 data)
1356 long ioaddr = dev->base_addr + PhyCtrl;
1357 data = (data) ? MII_DATA1 : 0;
1358 data |= MII_WRITE;
1359 data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
1360 writeb (data, ioaddr);
1361 mii_delay ();
1362 writeb (data | MII_CLK, ioaddr);
1363 mii_delay ();
1366 static int
1367 mii_getbit (struct net_device *dev)
1369 long ioaddr = dev->base_addr + PhyCtrl;
1370 u8 data;
1372 data = (readb (ioaddr) & 0xf8) | MII_READ;
1373 writeb (data, ioaddr);
1374 mii_delay ();
1375 writeb (data | MII_CLK, ioaddr);
1376 mii_delay ();
1377 return ((readb (ioaddr) >> 1) & 1);
1380 static void
1381 mii_send_bits (struct net_device *dev, u32 data, int len)
1383 int i;
1384 for (i = len - 1; i >= 0; i--) {
1385 mii_sendbit (dev, data & (1 << i));
1389 static int
1390 mii_read (struct net_device *dev, int phy_addr, int reg_num)
1392 u32 cmd;
1393 int i;
1394 u32 retval = 0;
1396 /* Preamble */
1397 mii_send_bits (dev, 0xffffffff, 32);
1398 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1399 /* ST,OP = 0110'b for read operation */
1400 cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1401 mii_send_bits (dev, cmd, 14);
1402 /* Turnaround */
1403 if (mii_getbit (dev))
1404 goto err_out;
1405 /* Read data */
1406 for (i = 0; i < 16; i++) {
1407 retval |= mii_getbit (dev);
1408 retval <<= 1;
1410 /* End cycle */
1411 mii_getbit (dev);
1412 return (retval >> 1) & 0xffff;
1414 err_out:
1415 return 0;
1417 static int
1418 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1420 u32 cmd;
1422 /* Preamble */
1423 mii_send_bits (dev, 0xffffffff, 32);
1424 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1425 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1426 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1427 mii_send_bits (dev, cmd, 32);
1428 /* End cycle */
1429 mii_getbit (dev);
1430 return 0;
1432 static int
1433 mii_wait_link (struct net_device *dev, int wait)
1435 BMSR_t bmsr;
1436 int phy_addr;
1437 struct netdev_private *np;
1439 np = netdev_priv(dev);
1440 phy_addr = np->phy_addr;
1442 do {
1443 bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1444 if (bmsr.bits.link_status)
1445 return 0;
1446 mdelay (1);
1447 } while (--wait > 0);
1448 return -1;
1450 static int
1451 mii_get_media (struct net_device *dev)
1453 ANAR_t negotiate;
1454 BMSR_t bmsr;
1455 BMCR_t bmcr;
1456 MSCR_t mscr;
1457 MSSR_t mssr;
1458 int phy_addr;
1459 struct netdev_private *np;
1461 np = netdev_priv(dev);
1462 phy_addr = np->phy_addr;
1464 bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1465 if (np->an_enable) {
1466 if (!bmsr.bits.an_complete) {
1467 /* Auto-Negotiation not completed */
1468 return -1;
1470 negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
1471 mii_read (dev, phy_addr, MII_ANLPAR);
1472 mscr.image = mii_read (dev, phy_addr, MII_MSCR);
1473 mssr.image = mii_read (dev, phy_addr, MII_MSSR);
1474 if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) {
1475 np->speed = 1000;
1476 np->full_duplex = 1;
1477 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1478 } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) {
1479 np->speed = 1000;
1480 np->full_duplex = 0;
1481 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1482 } else if (negotiate.bits.media_100BX_FD) {
1483 np->speed = 100;
1484 np->full_duplex = 1;
1485 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1486 } else if (negotiate.bits.media_100BX_HD) {
1487 np->speed = 100;
1488 np->full_duplex = 0;
1489 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1490 } else if (negotiate.bits.media_10BT_FD) {
1491 np->speed = 10;
1492 np->full_duplex = 1;
1493 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1494 } else if (negotiate.bits.media_10BT_HD) {
1495 np->speed = 10;
1496 np->full_duplex = 0;
1497 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1499 if (negotiate.bits.pause) {
1500 np->tx_flow &= 1;
1501 np->rx_flow &= 1;
1502 } else if (negotiate.bits.asymmetric) {
1503 np->tx_flow = 0;
1504 np->rx_flow &= 1;
1506 /* else tx_flow, rx_flow = user select */
1507 } else {
1508 bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
1509 if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) {
1510 printk (KERN_INFO "Operating at 100 Mbps, ");
1511 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) {
1512 printk (KERN_INFO "Operating at 10 Mbps, ");
1513 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) {
1514 printk (KERN_INFO "Operating at 1000 Mbps, ");
1516 if (bmcr.bits.duplex_mode) {
1517 printk ("Full duplex\n");
1518 } else {
1519 printk ("Half duplex\n");
1522 if (np->tx_flow)
1523 printk(KERN_INFO "Enable Tx Flow Control\n");
1524 else
1525 printk(KERN_INFO "Disable Tx Flow Control\n");
1526 if (np->rx_flow)
1527 printk(KERN_INFO "Enable Rx Flow Control\n");
1528 else
1529 printk(KERN_INFO "Disable Rx Flow Control\n");
1531 return 0;
1534 static int
1535 mii_set_media (struct net_device *dev)
1537 PHY_SCR_t pscr;
1538 BMCR_t bmcr;
1539 BMSR_t bmsr;
1540 ANAR_t anar;
1541 int phy_addr;
1542 struct netdev_private *np;
1543 np = netdev_priv(dev);
1544 phy_addr = np->phy_addr;
1546 /* Does user set speed? */
1547 if (np->an_enable) {
1548 /* Advertise capabilities */
1549 bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1550 anar.image = mii_read (dev, phy_addr, MII_ANAR);
1551 anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD;
1552 anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD;
1553 anar.bits.media_100BT4 = bmsr.bits.media_100BT4;
1554 anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD;
1555 anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD;
1556 anar.bits.pause = 1;
1557 anar.bits.asymmetric = 1;
1558 mii_write (dev, phy_addr, MII_ANAR, anar.image);
1560 /* Enable Auto crossover */
1561 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
1562 pscr.bits.mdi_crossover_mode = 3; /* 11'b */
1563 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
1565 /* Soft reset PHY */
1566 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1567 bmcr.image = 0;
1568 bmcr.bits.an_enable = 1;
1569 bmcr.bits.restart_an = 1;
1570 bmcr.bits.reset = 1;
1571 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1572 mdelay(1);
1573 } else {
1574 /* Force speed setting */
1575 /* 1) Disable Auto crossover */
1576 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
1577 pscr.bits.mdi_crossover_mode = 0;
1578 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
1580 /* 2) PHY Reset */
1581 bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
1582 bmcr.bits.reset = 1;
1583 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1585 /* 3) Power Down */
1586 bmcr.image = 0x1940; /* must be 0x1940 */
1587 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1588 mdelay (100); /* wait a certain time */
1590 /* 4) Advertise nothing */
1591 mii_write (dev, phy_addr, MII_ANAR, 0);
1593 /* 5) Set media and Power Up */
1594 bmcr.image = 0;
1595 bmcr.bits.power_down = 1;
1596 if (np->speed == 100) {
1597 bmcr.bits.speed100 = 1;
1598 bmcr.bits.speed1000 = 0;
1599 printk (KERN_INFO "Manual 100 Mbps, ");
1600 } else if (np->speed == 10) {
1601 bmcr.bits.speed100 = 0;
1602 bmcr.bits.speed1000 = 0;
1603 printk (KERN_INFO "Manual 10 Mbps, ");
1605 if (np->full_duplex) {
1606 bmcr.bits.duplex_mode = 1;
1607 printk ("Full duplex\n");
1608 } else {
1609 bmcr.bits.duplex_mode = 0;
1610 printk ("Half duplex\n");
1612 #if 0
1613 /* Set 1000BaseT Master/Slave setting */
1614 mscr.image = mii_read (dev, phy_addr, MII_MSCR);
1615 mscr.bits.cfg_enable = 1;
1616 mscr.bits.cfg_value = 0;
1617 #endif
1618 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1619 mdelay(10);
1621 return 0;
1624 static int
1625 mii_get_media_pcs (struct net_device *dev)
1627 ANAR_PCS_t negotiate;
1628 BMSR_t bmsr;
1629 BMCR_t bmcr;
1630 int phy_addr;
1631 struct netdev_private *np;
1633 np = netdev_priv(dev);
1634 phy_addr = np->phy_addr;
1636 bmsr.image = mii_read (dev, phy_addr, PCS_BMSR);
1637 if (np->an_enable) {
1638 if (!bmsr.bits.an_complete) {
1639 /* Auto-Negotiation not completed */
1640 return -1;
1642 negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
1643 mii_read (dev, phy_addr, PCS_ANLPAR);
1644 np->speed = 1000;
1645 if (negotiate.bits.full_duplex) {
1646 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1647 np->full_duplex = 1;
1648 } else {
1649 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1650 np->full_duplex = 0;
1652 if (negotiate.bits.pause) {
1653 np->tx_flow &= 1;
1654 np->rx_flow &= 1;
1655 } else if (negotiate.bits.asymmetric) {
1656 np->tx_flow = 0;
1657 np->rx_flow &= 1;
1659 /* else tx_flow, rx_flow = user select */
1660 } else {
1661 bmcr.image = mii_read (dev, phy_addr, PCS_BMCR);
1662 printk (KERN_INFO "Operating at 1000 Mbps, ");
1663 if (bmcr.bits.duplex_mode) {
1664 printk ("Full duplex\n");
1665 } else {
1666 printk ("Half duplex\n");
1669 if (np->tx_flow)
1670 printk(KERN_INFO "Enable Tx Flow Control\n");
1671 else
1672 printk(KERN_INFO "Disable Tx Flow Control\n");
1673 if (np->rx_flow)
1674 printk(KERN_INFO "Enable Rx Flow Control\n");
1675 else
1676 printk(KERN_INFO "Disable Rx Flow Control\n");
1678 return 0;
1681 static int
1682 mii_set_media_pcs (struct net_device *dev)
1684 BMCR_t bmcr;
1685 ESR_t esr;
1686 ANAR_PCS_t anar;
1687 int phy_addr;
1688 struct netdev_private *np;
1689 np = netdev_priv(dev);
1690 phy_addr = np->phy_addr;
1692 /* Auto-Negotiation? */
1693 if (np->an_enable) {
1694 /* Advertise capabilities */
1695 esr.image = mii_read (dev, phy_addr, PCS_ESR);
1696 anar.image = mii_read (dev, phy_addr, MII_ANAR);
1697 anar.bits.half_duplex =
1698 esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
1699 anar.bits.full_duplex =
1700 esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
1701 anar.bits.pause = 1;
1702 anar.bits.asymmetric = 1;
1703 mii_write (dev, phy_addr, MII_ANAR, anar.image);
1705 /* Soft reset PHY */
1706 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1707 bmcr.image = 0;
1708 bmcr.bits.an_enable = 1;
1709 bmcr.bits.restart_an = 1;
1710 bmcr.bits.reset = 1;
1711 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1712 mdelay(1);
1713 } else {
1714 /* Force speed setting */
1715 /* PHY Reset */
1716 bmcr.image = 0;
1717 bmcr.bits.reset = 1;
1718 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1719 mdelay(10);
1720 bmcr.image = 0;
1721 bmcr.bits.an_enable = 0;
1722 if (np->full_duplex) {
1723 bmcr.bits.duplex_mode = 1;
1724 printk (KERN_INFO "Manual full duplex\n");
1725 } else {
1726 bmcr.bits.duplex_mode = 0;
1727 printk (KERN_INFO "Manual half duplex\n");
1729 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1730 mdelay(10);
1732 /* Advertise nothing */
1733 mii_write (dev, phy_addr, MII_ANAR, 0);
1735 return 0;
1739 static int
1740 rio_close (struct net_device *dev)
1742 long ioaddr = dev->base_addr;
1743 struct netdev_private *np = netdev_priv(dev);
1744 struct sk_buff *skb;
1745 int i;
1747 netif_stop_queue (dev);
1749 /* Disable interrupts */
1750 writew (0, ioaddr + IntEnable);
1752 /* Stop Tx and Rx logics */
1753 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
1754 synchronize_irq (dev->irq);
1755 free_irq (dev->irq, dev);
1756 del_timer_sync (&np->timer);
1758 /* Free all the skbuffs in the queue. */
1759 for (i = 0; i < RX_RING_SIZE; i++) {
1760 np->rx_ring[i].status = 0;
1761 np->rx_ring[i].fraginfo = 0;
1762 skb = np->rx_skbuff[i];
1763 if (skb) {
1764 pci_unmap_single(np->pdev,
1765 np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
1766 skb->len, PCI_DMA_FROMDEVICE);
1767 dev_kfree_skb (skb);
1768 np->rx_skbuff[i] = NULL;
1771 for (i = 0; i < TX_RING_SIZE; i++) {
1772 skb = np->tx_skbuff[i];
1773 if (skb) {
1774 pci_unmap_single(np->pdev,
1775 np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
1776 skb->len, PCI_DMA_TODEVICE);
1777 dev_kfree_skb (skb);
1778 np->tx_skbuff[i] = NULL;
1782 return 0;
1785 static void __devexit
1786 rio_remove1 (struct pci_dev *pdev)
1788 struct net_device *dev = pci_get_drvdata (pdev);
1790 if (dev) {
1791 struct netdev_private *np = netdev_priv(dev);
1793 unregister_netdev (dev);
1794 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
1795 np->rx_ring_dma);
1796 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1797 np->tx_ring_dma);
1798 #ifdef MEM_MAPPING
1799 iounmap ((char *) (dev->base_addr));
1800 #endif
1801 free_netdev (dev);
1802 pci_release_regions (pdev);
1803 pci_disable_device (pdev);
1805 pci_set_drvdata (pdev, NULL);
1808 static struct pci_driver rio_driver = {
1809 .name = "dl2k",
1810 .id_table = rio_pci_tbl,
1811 .probe = rio_probe1,
1812 .remove = __devexit_p(rio_remove1),
1813 };
1815 static int __init
1816 rio_init (void)
1818 return pci_module_init (&rio_driver);
1821 static void __exit
1822 rio_exit (void)
1824 pci_unregister_driver (&rio_driver);
1827 module_init (rio_init);
1828 module_exit (rio_exit);
1830 /*
1832 Compile command:
1834 gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1836 Read Documentation/networking/dl2k.txt for details.
1838 */