ia64/linux-2.6.18-xen.hg

view drivers/net/fec_8xx/fec_main.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
8 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
9 *
10 * Released under the GPL
11 */
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/ptrace.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/bitops.h>
34 #include <asm/8xx_immap.h>
35 #include <asm/pgtable.h>
36 #include <asm/mpc8xx.h>
37 #include <asm/irq.h>
38 #include <asm/uaccess.h>
39 #include <asm/commproc.h>
40 #include <asm/dma-mapping.h>
42 #include "fec_8xx.h"
44 /*************************************************/
46 #define FEC_MAX_MULTICAST_ADDRS 64
48 /*************************************************/
50 static char version[] __devinitdata =
51 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
53 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
54 MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver");
55 MODULE_LICENSE("GPL");
57 int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */
58 module_param(fec_8xx_debug, int, 0);
59 MODULE_PARM_DESC(fec_8xx_debug,
60 "FEC 8xx bitmapped debugging message enable value");
63 /*************************************************/
65 /*
66 * Delay to wait for FEC reset command to complete (in us)
67 */
68 #define FEC_RESET_DELAY 50
70 /*****************************************************************************************/
72 static void fec_whack_reset(fec_t * fecp)
73 {
74 int i;
76 /*
77 * Whack a reset. We should wait for this.
78 */
79 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
80 for (i = 0;
81 (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY;
82 i++)
83 udelay(1);
85 if (i == FEC_RESET_DELAY)
86 printk(KERN_WARNING "FEC Reset timeout!\n");
88 }
90 /****************************************************************************/
92 /*
93 * Transmitter timeout.
94 */
95 #define TX_TIMEOUT (2*HZ)
97 /****************************************************************************/
99 /*
100 * Returns the CRC needed when filling in the hash table for
101 * multicast group filtering
102 * pAddr must point to a MAC address (6 bytes)
103 */
104 static __u32 fec_mulicast_calc_crc(char *pAddr)
105 {
106 u8 byte;
107 int byte_count;
108 int bit_count;
109 __u32 crc = 0xffffffff;
110 u8 msb;
112 for (byte_count = 0; byte_count < 6; byte_count++) {
113 byte = pAddr[byte_count];
114 for (bit_count = 0; bit_count < 8; bit_count++) {
115 msb = crc >> 31;
116 crc <<= 1;
117 if (msb ^ (byte & 0x1)) {
118 crc ^= FEC_CRC_POLY;
119 }
120 byte >>= 1;
121 }
122 }
123 return (crc);
124 }
126 /*
127 * Set or clear the multicast filter for this adaptor.
128 * Skeleton taken from sunlance driver.
129 * The CPM Ethernet implementation allows Multicast as well as individual
130 * MAC address filtering. Some of the drivers check to make sure it is
131 * a group multicast address, and discard those that are not. I guess I
132 * will do the same for now, but just remove the test if you want
133 * individual filtering as well (do the upper net layers want or support
134 * this kind of feature?).
135 */
136 static void fec_set_multicast_list(struct net_device *dev)
137 {
138 struct fec_enet_private *fep = netdev_priv(dev);
139 fec_t *fecp = fep->fecp;
140 struct dev_mc_list *pmc;
141 __u32 crc;
142 int temp;
143 __u32 csrVal;
144 int hash_index;
145 __u32 hthi, htlo;
146 unsigned long flags;
149 if ((dev->flags & IFF_PROMISC) != 0) {
151 spin_lock_irqsave(&fep->lock, flags);
152 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
153 spin_unlock_irqrestore(&fep->lock, flags);
155 /*
156 * Log any net taps.
157 */
158 printk(KERN_WARNING DRV_MODULE_NAME
159 ": %s: Promiscuous mode enabled.\n", dev->name);
160 return;
162 }
164 if ((dev->flags & IFF_ALLMULTI) != 0 ||
165 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
166 /*
167 * Catch all multicast addresses, set the filter to all 1's.
168 */
169 hthi = 0xffffffffU;
170 htlo = 0xffffffffU;
171 } else {
172 hthi = 0;
173 htlo = 0;
175 /*
176 * Now populate the hash table
177 */
178 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) {
179 crc = fec_mulicast_calc_crc(pmc->dmi_addr);
180 temp = (crc & 0x3f) >> 1;
181 hash_index = ((temp & 0x01) << 4) |
182 ((temp & 0x02) << 2) |
183 ((temp & 0x04)) |
184 ((temp & 0x08) >> 2) |
185 ((temp & 0x10) >> 4);
186 csrVal = (1 << hash_index);
187 if (crc & 1)
188 hthi |= csrVal;
189 else
190 htlo |= csrVal;
191 }
192 }
194 spin_lock_irqsave(&fep->lock, flags);
195 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
196 FW(fecp, hash_table_high, hthi);
197 FW(fecp, hash_table_low, htlo);
198 spin_unlock_irqrestore(&fep->lock, flags);
199 }
201 static int fec_set_mac_address(struct net_device *dev, void *addr)
202 {
203 struct sockaddr *mac = addr;
204 struct fec_enet_private *fep = netdev_priv(dev);
205 struct fec *fecp = fep->fecp;
206 int i;
207 __u32 addrhi, addrlo;
208 unsigned long flags;
210 /* Get pointer to SCC area in parameter RAM. */
211 for (i = 0; i < 6; i++)
212 dev->dev_addr[i] = mac->sa_data[i];
214 /*
215 * Set station address.
216 */
217 addrhi = ((__u32) dev->dev_addr[0] << 24) |
218 ((__u32) dev->dev_addr[1] << 16) |
219 ((__u32) dev->dev_addr[2] << 8) |
220 (__u32) dev->dev_addr[3];
221 addrlo = ((__u32) dev->dev_addr[4] << 24) |
222 ((__u32) dev->dev_addr[5] << 16);
224 spin_lock_irqsave(&fep->lock, flags);
225 FW(fecp, addr_low, addrhi);
226 FW(fecp, addr_high, addrlo);
227 spin_unlock_irqrestore(&fep->lock, flags);
229 return 0;
230 }
232 /*
233 * This function is called to start or restart the FEC during a link
234 * change. This only happens when switching between half and full
235 * duplex.
236 */
237 void fec_restart(struct net_device *dev, int duplex, int speed)
238 {
239 #ifdef CONFIG_DUET
240 immap_t *immap = (immap_t *) IMAP_ADDR;
241 __u32 cptr;
242 #endif
243 struct fec_enet_private *fep = netdev_priv(dev);
244 struct fec *fecp = fep->fecp;
245 const struct fec_platform_info *fpi = fep->fpi;
246 cbd_t *bdp;
247 struct sk_buff *skb;
248 int i;
249 __u32 addrhi, addrlo;
251 fec_whack_reset(fep->fecp);
253 /*
254 * Set station address.
255 */
256 addrhi = ((__u32) dev->dev_addr[0] << 24) |
257 ((__u32) dev->dev_addr[1] << 16) |
258 ((__u32) dev->dev_addr[2] << 8) |
259 (__u32) dev->dev_addr[3];
260 addrlo = ((__u32) dev->dev_addr[4] << 24) |
261 ((__u32) dev->dev_addr[5] << 16);
262 FW(fecp, addr_low, addrhi);
263 FW(fecp, addr_high, addrlo);
265 /*
266 * Reset all multicast.
267 */
268 FW(fecp, hash_table_high, 0);
269 FW(fecp, hash_table_low, 0);
271 /*
272 * Set maximum receive buffer size.
273 */
274 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
275 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
277 /*
278 * Set receive and transmit descriptor base.
279 */
280 FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base)));
281 FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base)));
283 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
284 fep->tx_free = fep->tx_ring;
285 fep->cur_rx = fep->rx_bd_base;
287 /*
288 * Reset SKB receive buffers
289 */
290 for (i = 0; i < fep->rx_ring; i++) {
291 if ((skb = fep->rx_skbuff[i]) == NULL)
292 continue;
293 fep->rx_skbuff[i] = NULL;
294 dev_kfree_skb(skb);
295 }
297 /*
298 * Initialize the receive buffer descriptors.
299 */
300 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
301 skb = dev_alloc_skb(ENET_RX_FRSIZE);
302 if (skb == NULL) {
303 printk(KERN_WARNING DRV_MODULE_NAME
304 ": %s Memory squeeze, unable to allocate skb\n",
305 dev->name);
306 fep->stats.rx_dropped++;
307 break;
308 }
309 fep->rx_skbuff[i] = skb;
310 skb->dev = dev;
311 CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
312 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
313 DMA_FROM_DEVICE));
314 CBDW_DATLEN(bdp, 0); /* zero */
315 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
316 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
317 }
318 /*
319 * if we failed, fillup remainder
320 */
321 for (; i < fep->rx_ring; i++, bdp++) {
322 fep->rx_skbuff[i] = NULL;
323 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
324 }
326 /*
327 * Reset SKB transmit buffers.
328 */
329 for (i = 0; i < fep->tx_ring; i++) {
330 if ((skb = fep->tx_skbuff[i]) == NULL)
331 continue;
332 fep->tx_skbuff[i] = NULL;
333 dev_kfree_skb(skb);
334 }
336 /*
337 * ...and the same for transmit.
338 */
339 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
340 fep->tx_skbuff[i] = NULL;
341 CBDW_BUFADDR(bdp, virt_to_bus(NULL));
342 CBDW_DATLEN(bdp, 0);
343 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
344 }
346 /*
347 * Enable big endian and don't care about SDMA FC.
348 */
349 FW(fecp, fun_code, 0x78000000);
351 /*
352 * Set MII speed.
353 */
354 FW(fecp, mii_speed, fep->fec_phy_speed);
356 /*
357 * Clear any outstanding interrupt.
358 */
359 FW(fecp, ievent, 0xffc0);
360 FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
362 /*
363 * adjust to speed (only for DUET & RMII)
364 */
365 #ifdef CONFIG_DUET
366 cptr = in_be32(&immap->im_cpm.cp_cptr);
367 switch (fpi->fec_no) {
368 case 0:
369 /*
370 * check if in RMII mode
371 */
372 if ((cptr & 0x100) == 0)
373 break;
375 if (speed == 10)
376 cptr |= 0x0000010;
377 else if (speed == 100)
378 cptr &= ~0x0000010;
379 break;
380 case 1:
381 /*
382 * check if in RMII mode
383 */
384 if ((cptr & 0x80) == 0)
385 break;
387 if (speed == 10)
388 cptr |= 0x0000008;
389 else if (speed == 100)
390 cptr &= ~0x0000008;
391 break;
392 default:
393 break;
394 }
395 out_be32(&immap->im_cpm.cp_cptr, cptr);
396 #endif
398 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
399 /*
400 * adjust to duplex mode
401 */
402 if (duplex) {
403 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
404 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
405 } else {
406 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
407 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
408 }
410 /*
411 * Enable interrupts we wish to service.
412 */
413 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
414 FEC_ENET_RXF | FEC_ENET_RXB);
416 /*
417 * And last, enable the transmit and receive processing.
418 */
419 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
420 FW(fecp, r_des_active, 0x01000000);
421 }
423 void fec_stop(struct net_device *dev)
424 {
425 struct fec_enet_private *fep = netdev_priv(dev);
426 fec_t *fecp = fep->fecp;
427 struct sk_buff *skb;
428 int i;
430 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
431 return; /* already down */
433 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
434 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
435 i < FEC_RESET_DELAY; i++)
436 udelay(1);
438 if (i == FEC_RESET_DELAY)
439 printk(KERN_WARNING DRV_MODULE_NAME
440 ": %s FEC timeout on graceful transmit stop\n",
441 dev->name);
442 /*
443 * Disable FEC. Let only MII interrupts.
444 */
445 FW(fecp, imask, 0);
446 FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN);
448 /*
449 * Reset SKB transmit buffers.
450 */
451 for (i = 0; i < fep->tx_ring; i++) {
452 if ((skb = fep->tx_skbuff[i]) == NULL)
453 continue;
454 fep->tx_skbuff[i] = NULL;
455 dev_kfree_skb(skb);
456 }
458 /*
459 * Reset SKB receive buffers
460 */
461 for (i = 0; i < fep->rx_ring; i++) {
462 if ((skb = fep->rx_skbuff[i]) == NULL)
463 continue;
464 fep->rx_skbuff[i] = NULL;
465 dev_kfree_skb(skb);
466 }
467 }
469 /* common receive function */
470 static int fec_enet_rx_common(struct net_device *dev, int *budget)
471 {
472 struct fec_enet_private *fep = netdev_priv(dev);
473 fec_t *fecp = fep->fecp;
474 const struct fec_platform_info *fpi = fep->fpi;
475 cbd_t *bdp;
476 struct sk_buff *skb, *skbn, *skbt;
477 int received = 0;
478 __u16 pkt_len, sc;
479 int curidx;
480 int rx_work_limit;
482 if (fpi->use_napi) {
483 rx_work_limit = min(dev->quota, *budget);
485 if (!netif_running(dev))
486 return 0;
487 }
489 /*
490 * First, grab all of the stats for the incoming packet.
491 * These get messed up if we get called due to a busy condition.
492 */
493 bdp = fep->cur_rx;
495 /* clear RX status bits for napi*/
496 if (fpi->use_napi)
497 FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB);
499 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
501 curidx = bdp - fep->rx_bd_base;
503 /*
504 * Since we have allocated space to hold a complete frame,
505 * the last indicator should be set.
506 */
507 if ((sc & BD_ENET_RX_LAST) == 0)
508 printk(KERN_WARNING DRV_MODULE_NAME
509 ": %s rcv is not +last\n",
510 dev->name);
512 /*
513 * Check for errors.
514 */
515 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
516 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
517 fep->stats.rx_errors++;
518 /* Frame too long or too short. */
519 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
520 fep->stats.rx_length_errors++;
521 /* Frame alignment */
522 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
523 fep->stats.rx_frame_errors++;
524 /* CRC Error */
525 if (sc & BD_ENET_RX_CR)
526 fep->stats.rx_crc_errors++;
527 /* FIFO overrun */
528 if (sc & BD_ENET_RX_OV)
529 fep->stats.rx_crc_errors++;
531 skbn = fep->rx_skbuff[curidx];
532 BUG_ON(skbn == NULL);
534 } else {
536 /* napi, got packet but no quota */
537 if (fpi->use_napi && --rx_work_limit < 0)
538 break;
540 skb = fep->rx_skbuff[curidx];
541 BUG_ON(skb == NULL);
543 /*
544 * Process the incoming frame.
545 */
546 fep->stats.rx_packets++;
547 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
548 fep->stats.rx_bytes += pkt_len + 4;
550 if (pkt_len <= fpi->rx_copybreak) {
551 /* +2 to make IP header L1 cache aligned */
552 skbn = dev_alloc_skb(pkt_len + 2);
553 if (skbn != NULL) {
554 skb_reserve(skbn, 2); /* align IP header */
555 memcpy(skbn->data, skb->data, pkt_len);
556 /* swap */
557 skbt = skb;
558 skb = skbn;
559 skbn = skbt;
560 }
561 } else
562 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
564 if (skbn != NULL) {
565 skb->dev = dev;
566 skb_put(skb, pkt_len); /* Make room */
567 skb->protocol = eth_type_trans(skb, dev);
568 received++;
569 if (!fpi->use_napi)
570 netif_rx(skb);
571 else
572 netif_receive_skb(skb);
573 } else {
574 printk(KERN_WARNING DRV_MODULE_NAME
575 ": %s Memory squeeze, dropping packet.\n",
576 dev->name);
577 fep->stats.rx_dropped++;
578 skbn = skb;
579 }
580 }
582 fep->rx_skbuff[curidx] = skbn;
583 CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data,
584 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
585 DMA_FROM_DEVICE));
586 CBDW_DATLEN(bdp, 0);
587 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
589 /*
590 * Update BD pointer to next entry.
591 */
592 if ((sc & BD_ENET_RX_WRAP) == 0)
593 bdp++;
594 else
595 bdp = fep->rx_bd_base;
597 /*
598 * Doing this here will keep the FEC running while we process
599 * incoming frames. On a heavily loaded network, we should be
600 * able to keep up at the expense of system resources.
601 */
602 FW(fecp, r_des_active, 0x01000000);
603 }
605 fep->cur_rx = bdp;
607 if (fpi->use_napi) {
608 dev->quota -= received;
609 *budget -= received;
611 if (rx_work_limit < 0)
612 return 1; /* not done */
614 /* done */
615 netif_rx_complete(dev);
617 /* enable RX interrupt bits */
618 FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
619 }
621 return 0;
622 }
624 static void fec_enet_tx(struct net_device *dev)
625 {
626 struct fec_enet_private *fep = netdev_priv(dev);
627 cbd_t *bdp;
628 struct sk_buff *skb;
629 int dirtyidx, do_wake;
630 __u16 sc;
632 spin_lock(&fep->lock);
633 bdp = fep->dirty_tx;
635 do_wake = 0;
636 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
638 dirtyidx = bdp - fep->tx_bd_base;
640 if (fep->tx_free == fep->tx_ring)
641 break;
643 skb = fep->tx_skbuff[dirtyidx];
645 /*
646 * Check for errors.
647 */
648 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
649 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
650 fep->stats.tx_errors++;
651 if (sc & BD_ENET_TX_HB) /* No heartbeat */
652 fep->stats.tx_heartbeat_errors++;
653 if (sc & BD_ENET_TX_LC) /* Late collision */
654 fep->stats.tx_window_errors++;
655 if (sc & BD_ENET_TX_RL) /* Retrans limit */
656 fep->stats.tx_aborted_errors++;
657 if (sc & BD_ENET_TX_UN) /* Underrun */
658 fep->stats.tx_fifo_errors++;
659 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
660 fep->stats.tx_carrier_errors++;
661 } else
662 fep->stats.tx_packets++;
664 if (sc & BD_ENET_TX_READY)
665 printk(KERN_WARNING DRV_MODULE_NAME
666 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
667 dev->name);
669 /*
670 * Deferred means some collisions occurred during transmit,
671 * but we eventually sent the packet OK.
672 */
673 if (sc & BD_ENET_TX_DEF)
674 fep->stats.collisions++;
676 /*
677 * Free the sk buffer associated with this last transmit.
678 */
679 dev_kfree_skb_irq(skb);
680 fep->tx_skbuff[dirtyidx] = NULL;
682 /*
683 * Update pointer to next buffer descriptor to be transmitted.
684 */
685 if ((sc & BD_ENET_TX_WRAP) == 0)
686 bdp++;
687 else
688 bdp = fep->tx_bd_base;
690 /*
691 * Since we have freed up a buffer, the ring is no longer
692 * full.
693 */
694 if (!fep->tx_free++)
695 do_wake = 1;
696 }
698 fep->dirty_tx = bdp;
700 spin_unlock(&fep->lock);
702 if (do_wake && netif_queue_stopped(dev))
703 netif_wake_queue(dev);
704 }
706 /*
707 * The interrupt handler.
708 * This is called from the MPC core interrupt.
709 */
710 static irqreturn_t
711 fec_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
712 {
713 struct net_device *dev = dev_id;
714 struct fec_enet_private *fep;
715 const struct fec_platform_info *fpi;
716 fec_t *fecp;
717 __u32 int_events;
718 __u32 int_events_napi;
720 if (unlikely(dev == NULL))
721 return IRQ_NONE;
723 fep = netdev_priv(dev);
724 fecp = fep->fecp;
725 fpi = fep->fpi;
727 /*
728 * Get the interrupt events that caused us to be here.
729 */
730 while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) {
732 if (!fpi->use_napi)
733 FW(fecp, ievent, int_events);
734 else {
735 int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB);
736 FW(fecp, ievent, int_events_napi);
737 }
739 if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
740 FEC_ENET_BABT | FEC_ENET_EBERR)) != 0)
741 printk(KERN_WARNING DRV_MODULE_NAME
742 ": %s FEC ERROR(s) 0x%x\n",
743 dev->name, int_events);
745 if ((int_events & FEC_ENET_RXF) != 0) {
746 if (!fpi->use_napi)
747 fec_enet_rx_common(dev, NULL);
748 else {
749 if (netif_rx_schedule_prep(dev)) {
750 /* disable rx interrupts */
751 FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
752 __netif_rx_schedule(dev);
753 } else {
754 printk(KERN_ERR DRV_MODULE_NAME
755 ": %s driver bug! interrupt while in poll!\n",
756 dev->name);
757 FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
758 }
759 }
760 }
762 if ((int_events & FEC_ENET_TXF) != 0)
763 fec_enet_tx(dev);
764 }
766 return IRQ_HANDLED;
767 }
769 /* This interrupt occurs when the PHY detects a link change. */
770 static irqreturn_t
771 fec_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
772 {
773 struct net_device *dev = dev_id;
774 struct fec_enet_private *fep;
775 const struct fec_platform_info *fpi;
777 if (unlikely(dev == NULL))
778 return IRQ_NONE;
780 fep = netdev_priv(dev);
781 fpi = fep->fpi;
783 if (!fpi->use_mdio)
784 return IRQ_NONE;
786 /*
787 * Acknowledge the interrupt if possible. If we have not
788 * found the PHY yet we can't process or acknowledge the
789 * interrupt now. Instead we ignore this interrupt for now,
790 * which we can do since it is edge triggered. It will be
791 * acknowledged later by fec_enet_open().
792 */
793 if (!fep->phy)
794 return IRQ_NONE;
796 fec_mii_ack_int(dev);
797 fec_mii_link_status_change_check(dev, 0);
799 return IRQ_HANDLED;
800 }
803 /**********************************************************************************/
805 static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
806 {
807 struct fec_enet_private *fep = netdev_priv(dev);
808 fec_t *fecp = fep->fecp;
809 cbd_t *bdp;
810 int curidx;
811 unsigned long flags;
813 spin_lock_irqsave(&fep->tx_lock, flags);
815 /*
816 * Fill in a Tx ring entry
817 */
818 bdp = fep->cur_tx;
820 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
821 netif_stop_queue(dev);
822 spin_unlock_irqrestore(&fep->tx_lock, flags);
824 /*
825 * Ooops. All transmit buffers are full. Bail out.
826 * This should not happen, since the tx queue should be stopped.
827 */
828 printk(KERN_WARNING DRV_MODULE_NAME
829 ": %s tx queue full!.\n", dev->name);
830 return 1;
831 }
833 curidx = bdp - fep->tx_bd_base;
834 /*
835 * Clear all of the status flags.
836 */
837 CBDC_SC(bdp, BD_ENET_TX_STATS);
839 /*
840 * Save skb pointer.
841 */
842 fep->tx_skbuff[curidx] = skb;
844 fep->stats.tx_bytes += skb->len;
846 /*
847 * Push the data cache so the CPM does not get stale memory data.
848 */
849 CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
850 skb->len, DMA_TO_DEVICE));
851 CBDW_DATLEN(bdp, skb->len);
853 dev->trans_start = jiffies;
855 /*
856 * If this was the last BD in the ring, start at the beginning again.
857 */
858 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
859 fep->cur_tx++;
860 else
861 fep->cur_tx = fep->tx_bd_base;
863 if (!--fep->tx_free)
864 netif_stop_queue(dev);
866 /*
867 * Trigger transmission start
868 */
869 CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR |
870 BD_ENET_TX_LAST | BD_ENET_TX_TC);
871 FW(fecp, x_des_active, 0x01000000);
873 spin_unlock_irqrestore(&fep->tx_lock, flags);
875 return 0;
876 }
878 static void fec_timeout(struct net_device *dev)
879 {
880 struct fec_enet_private *fep = netdev_priv(dev);
882 fep->stats.tx_errors++;
884 if (fep->tx_free)
885 netif_wake_queue(dev);
887 /* check link status again */
888 fec_mii_link_status_change_check(dev, 0);
889 }
891 static int fec_enet_open(struct net_device *dev)
892 {
893 struct fec_enet_private *fep = netdev_priv(dev);
894 const struct fec_platform_info *fpi = fep->fpi;
895 unsigned long flags;
897 /* Install our interrupt handler. */
898 if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) {
899 printk(KERN_ERR DRV_MODULE_NAME
900 ": %s Could not allocate FEC IRQ!", dev->name);
901 return -EINVAL;
902 }
904 /* Install our phy interrupt handler */
905 if (fpi->phy_irq != -1 &&
906 request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy",
907 dev) != 0) {
908 printk(KERN_ERR DRV_MODULE_NAME
909 ": %s Could not allocate PHY IRQ!", dev->name);
910 free_irq(fpi->fec_irq, dev);
911 return -EINVAL;
912 }
914 if (fpi->use_mdio) {
915 fec_mii_startup(dev);
916 netif_carrier_off(dev);
917 fec_mii_link_status_change_check(dev, 1);
918 } else {
919 spin_lock_irqsave(&fep->lock, flags);
920 fec_restart(dev, 1, 100); /* XXX this sucks */
921 spin_unlock_irqrestore(&fep->lock, flags);
923 netif_carrier_on(dev);
924 netif_start_queue(dev);
925 }
926 return 0;
927 }
929 static int fec_enet_close(struct net_device *dev)
930 {
931 struct fec_enet_private *fep = netdev_priv(dev);
932 const struct fec_platform_info *fpi = fep->fpi;
933 unsigned long flags;
935 netif_stop_queue(dev);
936 netif_carrier_off(dev);
938 if (fpi->use_mdio)
939 fec_mii_shutdown(dev);
941 spin_lock_irqsave(&fep->lock, flags);
942 fec_stop(dev);
943 spin_unlock_irqrestore(&fep->lock, flags);
945 /* release any irqs */
946 if (fpi->phy_irq != -1)
947 free_irq(fpi->phy_irq, dev);
948 free_irq(fpi->fec_irq, dev);
950 return 0;
951 }
953 static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
954 {
955 struct fec_enet_private *fep = netdev_priv(dev);
956 return &fep->stats;
957 }
959 static int fec_enet_poll(struct net_device *dev, int *budget)
960 {
961 return fec_enet_rx_common(dev, budget);
962 }
964 /*************************************************************************/
966 static void fec_get_drvinfo(struct net_device *dev,
967 struct ethtool_drvinfo *info)
968 {
969 strcpy(info->driver, DRV_MODULE_NAME);
970 strcpy(info->version, DRV_MODULE_VERSION);
971 }
973 static int fec_get_regs_len(struct net_device *dev)
974 {
975 return sizeof(fec_t);
976 }
978 static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs,
979 void *p)
980 {
981 struct fec_enet_private *fep = netdev_priv(dev);
982 unsigned long flags;
984 if (regs->len < sizeof(fec_t))
985 return;
987 regs->version = 0;
988 spin_lock_irqsave(&fep->lock, flags);
989 memcpy_fromio(p, fep->fecp, sizeof(fec_t));
990 spin_unlock_irqrestore(&fep->lock, flags);
991 }
993 static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
994 {
995 struct fec_enet_private *fep = netdev_priv(dev);
996 unsigned long flags;
997 int rc;
999 spin_lock_irqsave(&fep->lock, flags);
1000 rc = mii_ethtool_gset(&fep->mii_if, cmd);
1001 spin_unlock_irqrestore(&fep->lock, flags);
1003 return rc;
1006 static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1008 struct fec_enet_private *fep = netdev_priv(dev);
1009 unsigned long flags;
1010 int rc;
1012 spin_lock_irqsave(&fep->lock, flags);
1013 rc = mii_ethtool_sset(&fep->mii_if, cmd);
1014 spin_unlock_irqrestore(&fep->lock, flags);
1016 return rc;
1019 static int fec_nway_reset(struct net_device *dev)
1021 struct fec_enet_private *fep = netdev_priv(dev);
1022 return mii_nway_restart(&fep->mii_if);
1025 static __u32 fec_get_msglevel(struct net_device *dev)
1027 struct fec_enet_private *fep = netdev_priv(dev);
1028 return fep->msg_enable;
1031 static void fec_set_msglevel(struct net_device *dev, __u32 value)
1033 struct fec_enet_private *fep = netdev_priv(dev);
1034 fep->msg_enable = value;
1037 static struct ethtool_ops fec_ethtool_ops = {
1038 .get_drvinfo = fec_get_drvinfo,
1039 .get_regs_len = fec_get_regs_len,
1040 .get_settings = fec_get_settings,
1041 .set_settings = fec_set_settings,
1042 .nway_reset = fec_nway_reset,
1043 .get_link = ethtool_op_get_link,
1044 .get_msglevel = fec_get_msglevel,
1045 .set_msglevel = fec_set_msglevel,
1046 .get_tx_csum = ethtool_op_get_tx_csum,
1047 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1048 .get_sg = ethtool_op_get_sg,
1049 .set_sg = ethtool_op_set_sg,
1050 .get_regs = fec_get_regs,
1051 };
1053 static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1055 struct fec_enet_private *fep = netdev_priv(dev);
1056 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
1057 unsigned long flags;
1058 int rc;
1060 if (!netif_running(dev))
1061 return -EINVAL;
1063 spin_lock_irqsave(&fep->lock, flags);
1064 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
1065 spin_unlock_irqrestore(&fep->lock, flags);
1066 return rc;
1069 int fec_8xx_init_one(const struct fec_platform_info *fpi,
1070 struct net_device **devp)
1072 immap_t *immap = (immap_t *) IMAP_ADDR;
1073 static int fec_8xx_version_printed = 0;
1074 struct net_device *dev = NULL;
1075 struct fec_enet_private *fep = NULL;
1076 fec_t *fecp = NULL;
1077 int i;
1078 int err = 0;
1079 int registered = 0;
1080 __u32 siel;
1082 *devp = NULL;
1084 switch (fpi->fec_no) {
1085 case 0:
1086 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
1087 break;
1088 #ifdef CONFIG_DUET
1089 case 1:
1090 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2;
1091 break;
1092 #endif
1093 default:
1094 return -EINVAL;
1097 if (fec_8xx_version_printed++ == 0)
1098 printk(KERN_INFO "%s", version);
1100 i = sizeof(*fep) + (sizeof(struct sk_buff **) *
1101 (fpi->rx_ring + fpi->tx_ring));
1103 dev = alloc_etherdev(i);
1104 if (!dev) {
1105 err = -ENOMEM;
1106 goto err;
1108 SET_MODULE_OWNER(dev);
1110 fep = netdev_priv(dev);
1112 /* partial reset of FEC */
1113 fec_whack_reset(fecp);
1115 /* point rx_skbuff, tx_skbuff */
1116 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1117 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1119 fep->fecp = fecp;
1120 fep->fpi = fpi;
1122 /* init locks */
1123 spin_lock_init(&fep->lock);
1124 spin_lock_init(&fep->tx_lock);
1126 /*
1127 * Set the Ethernet address.
1128 */
1129 for (i = 0; i < 6; i++)
1130 dev->dev_addr[i] = fpi->macaddr[i];
1132 fep->ring_base = dma_alloc_coherent(NULL,
1133 (fpi->tx_ring + fpi->rx_ring) *
1134 sizeof(cbd_t), &fep->ring_mem_addr,
1135 GFP_KERNEL);
1136 if (fep->ring_base == NULL) {
1137 printk(KERN_ERR DRV_MODULE_NAME
1138 ": %s dma alloc failed.\n", dev->name);
1139 err = -ENOMEM;
1140 goto err;
1143 /*
1144 * Set receive and transmit descriptor base.
1145 */
1146 fep->rx_bd_base = fep->ring_base;
1147 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1149 /* initialize ring size variables */
1150 fep->tx_ring = fpi->tx_ring;
1151 fep->rx_ring = fpi->rx_ring;
1153 /* SIU interrupt */
1154 if (fpi->phy_irq != -1 &&
1155 (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) {
1157 siel = in_be32(&immap->im_siu_conf.sc_siel);
1158 if ((fpi->phy_irq & 1) == 0)
1159 siel |= (0x80000000 >> fpi->phy_irq);
1160 else
1161 siel &= ~(0x80000000 >> (fpi->phy_irq & ~1));
1162 out_be32(&immap->im_siu_conf.sc_siel, siel);
1165 /*
1166 * The FEC Ethernet specific entries in the device structure.
1167 */
1168 dev->open = fec_enet_open;
1169 dev->hard_start_xmit = fec_enet_start_xmit;
1170 dev->tx_timeout = fec_timeout;
1171 dev->watchdog_timeo = TX_TIMEOUT;
1172 dev->stop = fec_enet_close;
1173 dev->get_stats = fec_enet_get_stats;
1174 dev->set_multicast_list = fec_set_multicast_list;
1175 dev->set_mac_address = fec_set_mac_address;
1176 if (fpi->use_napi) {
1177 dev->poll = fec_enet_poll;
1178 dev->weight = fpi->napi_weight;
1180 dev->ethtool_ops = &fec_ethtool_ops;
1181 dev->do_ioctl = fec_ioctl;
1183 fep->fec_phy_speed =
1184 ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1;
1186 init_timer(&fep->phy_timer_list);
1188 /* partial reset of FEC so that only MII works */
1189 FW(fecp, mii_speed, fep->fec_phy_speed);
1190 FW(fecp, ievent, 0xffc0);
1191 FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
1192 FW(fecp, imask, 0);
1193 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
1194 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
1196 netif_carrier_off(dev);
1198 err = register_netdev(dev);
1199 if (err != 0)
1200 goto err;
1201 registered = 1;
1203 if (fpi->use_mdio) {
1204 fep->mii_if.dev = dev;
1205 fep->mii_if.mdio_read = fec_mii_read;
1206 fep->mii_if.mdio_write = fec_mii_write;
1207 fep->mii_if.phy_id_mask = 0x1f;
1208 fep->mii_if.reg_num_mask = 0x1f;
1209 fep->mii_if.phy_id = fec_mii_phy_id_detect(dev);
1212 *devp = dev;
1214 return 0;
1216 err:
1217 if (dev != NULL) {
1218 if (fecp != NULL)
1219 fec_whack_reset(fecp);
1221 if (registered)
1222 unregister_netdev(dev);
1224 if (fep != NULL) {
1225 if (fep->ring_base)
1226 dma_free_coherent(NULL,
1227 (fpi->tx_ring +
1228 fpi->rx_ring) *
1229 sizeof(cbd_t), fep->ring_base,
1230 fep->ring_mem_addr);
1232 free_netdev(dev);
1234 return err;
1237 int fec_8xx_cleanup_one(struct net_device *dev)
1239 struct fec_enet_private *fep = netdev_priv(dev);
1240 fec_t *fecp = fep->fecp;
1241 const struct fec_platform_info *fpi = fep->fpi;
1243 fec_whack_reset(fecp);
1245 unregister_netdev(dev);
1247 dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1248 fep->ring_base, fep->ring_mem_addr);
1250 free_netdev(dev);
1252 return 0;
1255 /**************************************************************************************/
1256 /**************************************************************************************/
1257 /**************************************************************************************/
1259 static int __init fec_8xx_init(void)
1261 return fec_8xx_platform_init();
1264 static void __exit fec_8xx_cleanup(void)
1266 fec_8xx_platform_cleanup();
1269 /**************************************************************************************/
1270 /**************************************************************************************/
1271 /**************************************************************************************/
1273 module_init(fec_8xx_init);
1274 module_exit(fec_8xx_cleanup);