ia64/linux-2.6.18-xen.hg

view drivers/net/sunqe.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
2 * Once again I am out to prove that every ethernet
3 * controller out there can be most efficiently programmed
4 * if you make it look like a LANCE.
5 *
6 * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net)
7 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/in.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/crc32.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/ethtool.h>
26 #include <linux/bitops.h>
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/dma.h>
31 #include <asm/byteorder.h>
32 #include <asm/idprom.h>
33 #include <asm/sbus.h>
34 #include <asm/openprom.h>
35 #include <asm/oplib.h>
36 #include <asm/auxio.h>
37 #include <asm/pgtable.h>
38 #include <asm/irq.h>
40 #include "sunqe.h"
42 #define DRV_NAME "sunqe"
43 #define DRV_VERSION "4.0"
44 #define DRV_RELDATE "June 23, 2006"
45 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
47 static char version[] =
48 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
50 MODULE_VERSION(DRV_VERSION);
51 MODULE_AUTHOR(DRV_AUTHOR);
52 MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
53 MODULE_LICENSE("GPL");
55 static struct sunqec *root_qec_dev;
57 static void qe_set_multicast(struct net_device *dev);
59 #define QEC_RESET_TRIES 200
61 static inline int qec_global_reset(void __iomem *gregs)
62 {
63 int tries = QEC_RESET_TRIES;
65 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
66 while (--tries) {
67 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
68 if (tmp & GLOB_CTRL_RESET) {
69 udelay(20);
70 continue;
71 }
72 break;
73 }
74 if (tries)
75 return 0;
76 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
77 return -1;
78 }
80 #define MACE_RESET_RETRIES 200
81 #define QE_RESET_RETRIES 200
83 static inline int qe_stop(struct sunqe *qep)
84 {
85 void __iomem *cregs = qep->qcregs;
86 void __iomem *mregs = qep->mregs;
87 int tries;
89 /* Reset the MACE, then the QEC channel. */
90 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
91 tries = MACE_RESET_RETRIES;
92 while (--tries) {
93 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
94 if (tmp & MREGS_BCONFIG_RESET) {
95 udelay(20);
96 continue;
97 }
98 break;
99 }
100 if (!tries) {
101 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
102 return -1;
103 }
105 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
106 tries = QE_RESET_RETRIES;
107 while (--tries) {
108 u32 tmp = sbus_readl(cregs + CREG_CTRL);
109 if (tmp & CREG_CTRL_RESET) {
110 udelay(20);
111 continue;
112 }
113 break;
114 }
115 if (!tries) {
116 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
117 return -1;
118 }
119 return 0;
120 }
122 static void qe_init_rings(struct sunqe *qep)
123 {
124 struct qe_init_block *qb = qep->qe_block;
125 struct sunqe_buffers *qbufs = qep->buffers;
126 __u32 qbufs_dvma = qep->buffers_dvma;
127 int i;
129 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
130 memset(qb, 0, sizeof(struct qe_init_block));
131 memset(qbufs, 0, sizeof(struct sunqe_buffers));
132 for (i = 0; i < RX_RING_SIZE; i++) {
133 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
134 qb->qe_rxd[i].rx_flags =
135 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
136 }
137 }
139 static int qe_init(struct sunqe *qep, int from_irq)
140 {
141 struct sunqec *qecp = qep->parent;
142 void __iomem *cregs = qep->qcregs;
143 void __iomem *mregs = qep->mregs;
144 void __iomem *gregs = qecp->gregs;
145 unsigned char *e = &qep->dev->dev_addr[0];
146 u32 tmp;
147 int i;
149 /* Shut it up. */
150 if (qe_stop(qep))
151 return -EAGAIN;
153 /* Setup initial rx/tx init block pointers. */
154 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
155 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
157 /* Enable/mask the various irq's. */
158 sbus_writel(0, cregs + CREG_RIMASK);
159 sbus_writel(1, cregs + CREG_TIMASK);
161 sbus_writel(0, cregs + CREG_QMASK);
162 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
164 /* Setup the FIFO pointers into QEC local memory. */
165 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
166 sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
167 sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
169 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
170 sbus_readl(gregs + GLOB_RSIZE);
171 sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
172 sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
174 /* Clear the channel collision counter. */
175 sbus_writel(0, cregs + CREG_CCNT);
177 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
178 sbus_writel(0, cregs + CREG_PIPG);
180 /* Now dork with the AMD MACE. */
181 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
182 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
183 sbus_writeb(0, mregs + MREGS_RXFCNTL);
185 /* The QEC dma's the rx'd packets from local memory out to main memory,
186 * and therefore it interrupts when the packet reception is "complete".
187 * So don't listen for the MACE talking about it.
188 */
189 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
190 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
191 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
192 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
193 mregs + MREGS_FCONFIG);
195 /* Only usable interface on QuadEther is twisted pair. */
196 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
198 /* Tell MACE we are changing the ether address. */
199 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
200 mregs + MREGS_IACONFIG);
201 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
202 barrier();
203 sbus_writeb(e[0], mregs + MREGS_ETHADDR);
204 sbus_writeb(e[1], mregs + MREGS_ETHADDR);
205 sbus_writeb(e[2], mregs + MREGS_ETHADDR);
206 sbus_writeb(e[3], mregs + MREGS_ETHADDR);
207 sbus_writeb(e[4], mregs + MREGS_ETHADDR);
208 sbus_writeb(e[5], mregs + MREGS_ETHADDR);
210 /* Clear out the address filter. */
211 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
212 mregs + MREGS_IACONFIG);
213 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
214 barrier();
215 for (i = 0; i < 8; i++)
216 sbus_writeb(0, mregs + MREGS_FILTER);
218 /* Address changes are now complete. */
219 sbus_writeb(0, mregs + MREGS_IACONFIG);
221 qe_init_rings(qep);
223 /* Wait a little bit for the link to come up... */
224 mdelay(5);
225 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
226 int tries = 50;
228 while (tries--) {
229 u8 tmp;
231 mdelay(5);
232 barrier();
233 tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
234 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
235 break;
236 }
237 if (tries == 0)
238 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
239 }
241 /* Missed packet counter is cleared on a read. */
242 sbus_readb(mregs + MREGS_MPCNT);
244 /* Reload multicast information, this will enable the receiver
245 * and transmitter.
246 */
247 qe_set_multicast(qep->dev);
249 /* QEC should now start to show interrupts. */
250 return 0;
251 }
253 /* Grrr, certain error conditions completely lock up the AMD MACE,
254 * so when we get these we _must_ reset the chip.
255 */
256 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
257 {
258 struct net_device *dev = qep->dev;
259 int mace_hwbug_workaround = 0;
261 if (qe_status & CREG_STAT_EDEFER) {
262 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
263 qep->net_stats.tx_errors++;
264 }
266 if (qe_status & CREG_STAT_CLOSS) {
267 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
268 qep->net_stats.tx_errors++;
269 qep->net_stats.tx_carrier_errors++;
270 }
272 if (qe_status & CREG_STAT_ERETRIES) {
273 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
274 qep->net_stats.tx_errors++;
275 mace_hwbug_workaround = 1;
276 }
278 if (qe_status & CREG_STAT_LCOLL) {
279 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
280 qep->net_stats.tx_errors++;
281 qep->net_stats.collisions++;
282 mace_hwbug_workaround = 1;
283 }
285 if (qe_status & CREG_STAT_FUFLOW) {
286 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
287 qep->net_stats.tx_errors++;
288 mace_hwbug_workaround = 1;
289 }
291 if (qe_status & CREG_STAT_JERROR) {
292 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
293 }
295 if (qe_status & CREG_STAT_BERROR) {
296 printk(KERN_ERR "%s: Babble error.\n", dev->name);
297 }
299 if (qe_status & CREG_STAT_CCOFLOW) {
300 qep->net_stats.tx_errors += 256;
301 qep->net_stats.collisions += 256;
302 }
304 if (qe_status & CREG_STAT_TXDERROR) {
305 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
306 qep->net_stats.tx_errors++;
307 qep->net_stats.tx_aborted_errors++;
308 mace_hwbug_workaround = 1;
309 }
311 if (qe_status & CREG_STAT_TXLERR) {
312 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
313 qep->net_stats.tx_errors++;
314 mace_hwbug_workaround = 1;
315 }
317 if (qe_status & CREG_STAT_TXPERR) {
318 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
319 qep->net_stats.tx_errors++;
320 qep->net_stats.tx_aborted_errors++;
321 mace_hwbug_workaround = 1;
322 }
324 if (qe_status & CREG_STAT_TXSERR) {
325 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
326 qep->net_stats.tx_errors++;
327 qep->net_stats.tx_aborted_errors++;
328 mace_hwbug_workaround = 1;
329 }
331 if (qe_status & CREG_STAT_RCCOFLOW) {
332 qep->net_stats.rx_errors += 256;
333 qep->net_stats.collisions += 256;
334 }
336 if (qe_status & CREG_STAT_RUOFLOW) {
337 qep->net_stats.rx_errors += 256;
338 qep->net_stats.rx_over_errors += 256;
339 }
341 if (qe_status & CREG_STAT_MCOFLOW) {
342 qep->net_stats.rx_errors += 256;
343 qep->net_stats.rx_missed_errors += 256;
344 }
346 if (qe_status & CREG_STAT_RXFOFLOW) {
347 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
348 qep->net_stats.rx_errors++;
349 qep->net_stats.rx_over_errors++;
350 }
352 if (qe_status & CREG_STAT_RLCOLL) {
353 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
354 qep->net_stats.rx_errors++;
355 qep->net_stats.collisions++;
356 }
358 if (qe_status & CREG_STAT_FCOFLOW) {
359 qep->net_stats.rx_errors += 256;
360 qep->net_stats.rx_frame_errors += 256;
361 }
363 if (qe_status & CREG_STAT_CECOFLOW) {
364 qep->net_stats.rx_errors += 256;
365 qep->net_stats.rx_crc_errors += 256;
366 }
368 if (qe_status & CREG_STAT_RXDROP) {
369 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
370 qep->net_stats.rx_errors++;
371 qep->net_stats.rx_dropped++;
372 qep->net_stats.rx_missed_errors++;
373 }
375 if (qe_status & CREG_STAT_RXSMALL) {
376 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
377 qep->net_stats.rx_errors++;
378 qep->net_stats.rx_length_errors++;
379 }
381 if (qe_status & CREG_STAT_RXLERR) {
382 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
383 qep->net_stats.rx_errors++;
384 mace_hwbug_workaround = 1;
385 }
387 if (qe_status & CREG_STAT_RXPERR) {
388 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
389 qep->net_stats.rx_errors++;
390 qep->net_stats.rx_missed_errors++;
391 mace_hwbug_workaround = 1;
392 }
394 if (qe_status & CREG_STAT_RXSERR) {
395 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
396 qep->net_stats.rx_errors++;
397 qep->net_stats.rx_missed_errors++;
398 mace_hwbug_workaround = 1;
399 }
401 if (mace_hwbug_workaround)
402 qe_init(qep, 1);
403 return mace_hwbug_workaround;
404 }
406 /* Per-QE receive interrupt service routine. Just like on the happy meal
407 * we receive directly into skb's with a small packet copy water mark.
408 */
409 static void qe_rx(struct sunqe *qep)
410 {
411 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
412 struct qe_rxd *this;
413 struct sunqe_buffers *qbufs = qep->buffers;
414 __u32 qbufs_dvma = qep->buffers_dvma;
415 int elem = qep->rx_new, drops = 0;
416 u32 flags;
418 this = &rxbase[elem];
419 while (!((flags = this->rx_flags) & RXD_OWN)) {
420 struct sk_buff *skb;
421 unsigned char *this_qbuf =
422 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
423 __u32 this_qbuf_dvma = qbufs_dvma +
424 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
425 struct qe_rxd *end_rxd =
426 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
427 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
429 /* Check for errors. */
430 if (len < ETH_ZLEN) {
431 qep->net_stats.rx_errors++;
432 qep->net_stats.rx_length_errors++;
433 qep->net_stats.rx_dropped++;
434 } else {
435 skb = dev_alloc_skb(len + 2);
436 if (skb == NULL) {
437 drops++;
438 qep->net_stats.rx_dropped++;
439 } else {
440 skb->dev = qep->dev;
441 skb_reserve(skb, 2);
442 skb_put(skb, len);
443 eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
444 len, 0);
445 skb->protocol = eth_type_trans(skb, qep->dev);
446 netif_rx(skb);
447 qep->dev->last_rx = jiffies;
448 qep->net_stats.rx_packets++;
449 qep->net_stats.rx_bytes += len;
450 }
451 }
452 end_rxd->rx_addr = this_qbuf_dvma;
453 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
455 elem = NEXT_RX(elem);
456 this = &rxbase[elem];
457 }
458 qep->rx_new = elem;
459 if (drops)
460 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
461 }
463 static void qe_tx_reclaim(struct sunqe *qep);
465 /* Interrupts for all QE's get filtered out via the QEC master controller,
466 * so we just run through each qe and check to see who is signaling
467 * and thus needs to be serviced.
468 */
469 static irqreturn_t qec_interrupt(int irq, void *dev_id, struct pt_regs *regs)
470 {
471 struct sunqec *qecp = (struct sunqec *) dev_id;
472 u32 qec_status;
473 int channel = 0;
475 /* Latch the status now. */
476 qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
477 while (channel < 4) {
478 if (qec_status & 0xf) {
479 struct sunqe *qep = qecp->qes[channel];
480 u32 qe_status;
482 qe_status = sbus_readl(qep->qcregs + CREG_STAT);
483 if (qe_status & CREG_STAT_ERRORS) {
484 if (qe_is_bolixed(qep, qe_status))
485 goto next;
486 }
487 if (qe_status & CREG_STAT_RXIRQ)
488 qe_rx(qep);
489 if (netif_queue_stopped(qep->dev) &&
490 (qe_status & CREG_STAT_TXIRQ)) {
491 spin_lock(&qep->lock);
492 qe_tx_reclaim(qep);
493 if (TX_BUFFS_AVAIL(qep) > 0) {
494 /* Wake net queue and return to
495 * lazy tx reclaim.
496 */
497 netif_wake_queue(qep->dev);
498 sbus_writel(1, qep->qcregs + CREG_TIMASK);
499 }
500 spin_unlock(&qep->lock);
501 }
502 next:
503 ;
504 }
505 qec_status >>= 4;
506 channel++;
507 }
509 return IRQ_HANDLED;
510 }
512 static int qe_open(struct net_device *dev)
513 {
514 struct sunqe *qep = (struct sunqe *) dev->priv;
516 qep->mconfig = (MREGS_MCONFIG_TXENAB |
517 MREGS_MCONFIG_RXENAB |
518 MREGS_MCONFIG_MBAENAB);
519 return qe_init(qep, 0);
520 }
522 static int qe_close(struct net_device *dev)
523 {
524 struct sunqe *qep = (struct sunqe *) dev->priv;
526 qe_stop(qep);
527 return 0;
528 }
530 /* Reclaim TX'd frames from the ring. This must always run under
531 * the IRQ protected qep->lock.
532 */
533 static void qe_tx_reclaim(struct sunqe *qep)
534 {
535 struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
536 int elem = qep->tx_old;
538 while (elem != qep->tx_new) {
539 u32 flags = txbase[elem].tx_flags;
541 if (flags & TXD_OWN)
542 break;
543 elem = NEXT_TX(elem);
544 }
545 qep->tx_old = elem;
546 }
548 static void qe_tx_timeout(struct net_device *dev)
549 {
550 struct sunqe *qep = (struct sunqe *) dev->priv;
551 int tx_full;
553 spin_lock_irq(&qep->lock);
555 /* Try to reclaim, if that frees up some tx
556 * entries, we're fine.
557 */
558 qe_tx_reclaim(qep);
559 tx_full = TX_BUFFS_AVAIL(qep) <= 0;
561 spin_unlock_irq(&qep->lock);
563 if (! tx_full)
564 goto out;
566 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
567 qe_init(qep, 1);
569 out:
570 netif_wake_queue(dev);
571 }
573 /* Get a packet queued to go onto the wire. */
574 static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
575 {
576 struct sunqe *qep = (struct sunqe *) dev->priv;
577 struct sunqe_buffers *qbufs = qep->buffers;
578 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
579 unsigned char *txbuf;
580 int len, entry;
582 spin_lock_irq(&qep->lock);
584 qe_tx_reclaim(qep);
586 len = skb->len;
587 entry = qep->tx_new;
589 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
590 txbuf_dvma = qbufs_dvma +
591 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
593 /* Avoid a race... */
594 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
596 memcpy(txbuf, skb->data, len);
598 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
599 qep->qe_block->qe_txd[entry].tx_flags =
600 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
601 qep->tx_new = NEXT_TX(entry);
603 /* Get it going. */
604 dev->trans_start = jiffies;
605 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
607 qep->net_stats.tx_packets++;
608 qep->net_stats.tx_bytes += len;
610 if (TX_BUFFS_AVAIL(qep) <= 0) {
611 /* Halt the net queue and enable tx interrupts.
612 * When the tx queue empties the tx irq handler
613 * will wake up the queue and return us back to
614 * the lazy tx reclaim scheme.
615 */
616 netif_stop_queue(dev);
617 sbus_writel(0, qep->qcregs + CREG_TIMASK);
618 }
619 spin_unlock_irq(&qep->lock);
621 dev_kfree_skb(skb);
623 return 0;
624 }
626 static struct net_device_stats *qe_get_stats(struct net_device *dev)
627 {
628 struct sunqe *qep = (struct sunqe *) dev->priv;
630 return &qep->net_stats;
631 }
633 static void qe_set_multicast(struct net_device *dev)
634 {
635 struct sunqe *qep = (struct sunqe *) dev->priv;
636 struct dev_mc_list *dmi = dev->mc_list;
637 u8 new_mconfig = qep->mconfig;
638 char *addrs;
639 int i;
640 u32 crc;
642 /* Lock out others. */
643 netif_stop_queue(dev);
645 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
646 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
647 qep->mregs + MREGS_IACONFIG);
648 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
649 barrier();
650 for (i = 0; i < 8; i++)
651 sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
652 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
653 } else if (dev->flags & IFF_PROMISC) {
654 new_mconfig |= MREGS_MCONFIG_PROMISC;
655 } else {
656 u16 hash_table[4];
657 u8 *hbytes = (unsigned char *) &hash_table[0];
659 for (i = 0; i < 4; i++)
660 hash_table[i] = 0;
662 for (i = 0; i < dev->mc_count; i++) {
663 addrs = dmi->dmi_addr;
664 dmi = dmi->next;
666 if (!(*addrs & 1))
667 continue;
668 crc = ether_crc_le(6, addrs);
669 crc >>= 26;
670 hash_table[crc >> 4] |= 1 << (crc & 0xf);
671 }
672 /* Program the qe with the new filter value. */
673 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
674 qep->mregs + MREGS_IACONFIG);
675 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
676 barrier();
677 for (i = 0; i < 8; i++) {
678 u8 tmp = *hbytes++;
679 sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
680 }
681 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
682 }
684 /* Any change of the logical address filter, the physical address,
685 * or enabling/disabling promiscuous mode causes the MACE to disable
686 * the receiver. So we must re-enable them here or else the MACE
687 * refuses to listen to anything on the network. Sheesh, took
688 * me a day or two to find this bug.
689 */
690 qep->mconfig = new_mconfig;
691 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
693 /* Let us get going again. */
694 netif_wake_queue(dev);
695 }
697 /* Ethtool support... */
698 static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
699 {
700 struct sunqe *qep = dev->priv;
702 strcpy(info->driver, "sunqe");
703 strcpy(info->version, "3.0");
704 sprintf(info->bus_info, "SBUS:%d",
705 qep->qe_sdev->slot);
706 }
708 static u32 qe_get_link(struct net_device *dev)
709 {
710 struct sunqe *qep = dev->priv;
711 void __iomem *mregs = qep->mregs;
712 u8 phyconfig;
714 spin_lock_irq(&qep->lock);
715 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
716 spin_unlock_irq(&qep->lock);
718 return (phyconfig & MREGS_PHYCONFIG_LSTAT);
719 }
721 static struct ethtool_ops qe_ethtool_ops = {
722 .get_drvinfo = qe_get_drvinfo,
723 .get_link = qe_get_link,
724 };
726 /* This is only called once at boot time for each card probed. */
727 static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
728 {
729 u8 bsizes = qecp->qec_bursts;
731 if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) {
732 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
733 } else if (bsizes & DMA_BURST32) {
734 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
735 } else {
736 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
737 }
739 /* Packetsize only used in 100baseT BigMAC configurations,
740 * set it to zero just to be on the safe side.
741 */
742 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
744 /* Set the local memsize register, divided up to one piece per QE channel. */
745 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2),
746 qecp->gregs + GLOB_MSIZE);
748 /* Divide up the local QEC memory amongst the 4 QE receiver and
749 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
750 */
751 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
752 qecp->gregs + GLOB_TSIZE);
753 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
754 qecp->gregs + GLOB_RSIZE);
755 }
757 static u8 __init qec_get_burst(struct device_node *dp)
758 {
759 u8 bsizes, bsizes_more;
761 /* Find and set the burst sizes for the QEC, since it
762 * does the actual dma for all 4 channels.
763 */
764 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
765 bsizes &= 0xff;
766 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
768 if (bsizes_more != 0xff)
769 bsizes &= bsizes_more;
770 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
771 (bsizes & DMA_BURST32)==0)
772 bsizes = (DMA_BURST32 - 1);
774 return bsizes;
775 }
777 static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
778 {
779 struct sbus_dev *qec_sdev = child_sdev->parent;
780 struct sunqec *qecp;
782 for (qecp = root_qec_dev; qecp; qecp = qecp->next_module) {
783 if (qecp->qec_sdev == qec_sdev)
784 break;
785 }
786 if (!qecp) {
787 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
788 if (qecp) {
789 u32 ctrl;
791 qecp->qec_sdev = qec_sdev;
792 qecp->gregs = sbus_ioremap(&qec_sdev->resource[0], 0,
793 GLOB_REG_SIZE,
794 "QEC Global Registers");
795 if (!qecp->gregs)
796 goto fail;
798 /* Make sure the QEC is in MACE mode. */
799 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
800 ctrl &= 0xf0000000;
801 if (ctrl != GLOB_CTRL_MMODE) {
802 printk(KERN_ERR "qec: Not in MACE mode!\n");
803 goto fail;
804 }
806 if (qec_global_reset(qecp->gregs))
807 goto fail;
809 qecp->qec_bursts = qec_get_burst(qec_sdev->ofdev.node);
811 qec_init_once(qecp, qec_sdev);
813 if (request_irq(qec_sdev->irqs[0], &qec_interrupt,
814 IRQF_SHARED, "qec", (void *) qecp)) {
815 printk(KERN_ERR "qec: Can't register irq.\n");
816 goto fail;
817 }
819 qecp->next_module = root_qec_dev;
820 root_qec_dev = qecp;
821 }
822 }
824 return qecp;
826 fail:
827 if (qecp->gregs)
828 sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
829 kfree(qecp);
830 return NULL;
831 }
833 static int __init qec_ether_init(struct sbus_dev *sdev)
834 {
835 static unsigned version_printed;
836 struct net_device *dev;
837 struct sunqe *qe;
838 struct sunqec *qecp;
839 int i, res;
841 if (version_printed++ == 0)
842 printk(KERN_INFO "%s", version);
844 dev = alloc_etherdev(sizeof(struct sunqe));
845 if (!dev)
846 return -ENOMEM;
848 qe = netdev_priv(dev);
850 i = of_getintprop_default(sdev->ofdev.node, "channel#", -1);
851 if (i == -1) {
852 struct sbus_dev *td = sdev->parent->child;
853 i = 0;
854 while (td != sdev) {
855 td = td->next;
856 i++;
857 }
858 }
859 qe->channel = i;
860 spin_lock_init(&qe->lock);
862 res = -ENODEV;
863 qecp = get_qec(sdev);
864 if (!qecp)
865 goto fail;
867 qecp->qes[qe->channel] = qe;
868 qe->dev = dev;
869 qe->parent = qecp;
870 qe->qe_sdev = sdev;
872 res = -ENOMEM;
873 qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
874 CREG_REG_SIZE, "QEC Channel Registers");
875 if (!qe->qcregs) {
876 printk(KERN_ERR "qe: Cannot map channel registers.\n");
877 goto fail;
878 }
880 qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
881 MREGS_REG_SIZE, "QE MACE Registers");
882 if (!qe->mregs) {
883 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
884 goto fail;
885 }
887 qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
888 PAGE_SIZE,
889 &qe->qblock_dvma);
890 qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
891 sizeof(struct sunqe_buffers),
892 &qe->buffers_dvma);
893 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
894 qe->buffers == NULL || qe->buffers_dvma == 0)
895 goto fail;
897 /* Stop this QE. */
898 qe_stop(qe);
900 SET_MODULE_OWNER(dev);
901 SET_NETDEV_DEV(dev, &sdev->ofdev.dev);
903 dev->open = qe_open;
904 dev->stop = qe_close;
905 dev->hard_start_xmit = qe_start_xmit;
906 dev->get_stats = qe_get_stats;
907 dev->set_multicast_list = qe_set_multicast;
908 dev->tx_timeout = qe_tx_timeout;
909 dev->watchdog_timeo = 5*HZ;
910 dev->irq = sdev->irqs[0];
911 dev->dma = 0;
912 dev->ethtool_ops = &qe_ethtool_ops;
914 res = register_netdev(dev);
915 if (res)
916 goto fail;
918 dev_set_drvdata(&sdev->ofdev.dev, qe);
920 printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
921 for (i = 0; i < 6; i++)
922 printk ("%2.2x%c",
923 dev->dev_addr[i],
924 i == 5 ? ' ': ':');
925 printk("\n");
928 return 0;
930 fail:
931 if (qe->qcregs)
932 sbus_iounmap(qe->qcregs, CREG_REG_SIZE);
933 if (qe->mregs)
934 sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
935 if (qe->qe_block)
936 sbus_free_consistent(qe->qe_sdev,
937 PAGE_SIZE,
938 qe->qe_block,
939 qe->qblock_dvma);
940 if (qe->buffers)
941 sbus_free_consistent(qe->qe_sdev,
942 sizeof(struct sunqe_buffers),
943 qe->buffers,
944 qe->buffers_dvma);
946 free_netdev(dev);
948 return res;
949 }
951 static int __devinit qec_sbus_probe(struct of_device *dev, const struct of_device_id *match)
952 {
953 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
955 return qec_ether_init(sdev);
956 }
958 static int __devexit qec_sbus_remove(struct of_device *dev)
959 {
960 struct sunqe *qp = dev_get_drvdata(&dev->dev);
961 struct net_device *net_dev = qp->dev;
963 unregister_netdevice(net_dev);
965 sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
966 sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
967 sbus_free_consistent(qp->qe_sdev,
968 PAGE_SIZE,
969 qp->qe_block,
970 qp->qblock_dvma);
971 sbus_free_consistent(qp->qe_sdev,
972 sizeof(struct sunqe_buffers),
973 qp->buffers,
974 qp->buffers_dvma);
976 free_netdev(net_dev);
978 dev_set_drvdata(&dev->dev, NULL);
980 return 0;
981 }
983 static struct of_device_id qec_sbus_match[] = {
984 {
985 .name = "qe",
986 },
987 {},
988 };
990 MODULE_DEVICE_TABLE(of, qec_sbus_match);
992 static struct of_platform_driver qec_sbus_driver = {
993 .name = "qec",
994 .match_table = qec_sbus_match,
995 .probe = qec_sbus_probe,
996 .remove = __devexit_p(qec_sbus_remove),
997 };
999 static int __init qec_init(void)
1001 return of_register_driver(&qec_sbus_driver, &sbus_bus_type);
1004 static void __exit qec_exit(void)
1006 of_unregister_driver(&qec_sbus_driver);
1008 while (root_qec_dev) {
1009 struct sunqec *next = root_qec_dev->next_module;
1011 free_irq(root_qec_dev->qec_sdev->irqs[0],
1012 (void *) root_qec_dev);
1013 sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE);
1015 kfree(root_qec_dev);
1017 root_qec_dev = next;
1021 module_init(qec_init);
1022 module_exit(qec_exit);