ia64/linux-2.6.18-xen.hg

view drivers/net/mace.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1996 Paul Mackerras.
6 */
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/string.h>
14 #include <linux/timer.h>
15 #include <linux/init.h>
16 #include <linux/crc32.h>
17 #include <linux/spinlock.h>
18 #include <asm/prom.h>
19 #include <asm/dbdma.h>
20 #include <asm/io.h>
21 #include <asm/pgtable.h>
22 #include <asm/macio.h>
24 #include "mace.h"
26 static int port_aaui = -1;
28 #define N_RX_RING 8
29 #define N_TX_RING 6
30 #define MAX_TX_ACTIVE 1
31 #define NCMDS_TX 1 /* dma commands per element in tx ring */
32 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
33 #define TX_TIMEOUT HZ /* 1 second */
35 /* Chip rev needs workaround on HW & multicast addr change */
36 #define BROKEN_ADDRCHG_REV 0x0941
38 /* Bits in transmit DMA status */
39 #define TX_DMA_ERR 0x80
41 struct mace_data {
42 volatile struct mace __iomem *mace;
43 volatile struct dbdma_regs __iomem *tx_dma;
44 int tx_dma_intr;
45 volatile struct dbdma_regs __iomem *rx_dma;
46 int rx_dma_intr;
47 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
48 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
49 struct sk_buff *rx_bufs[N_RX_RING];
50 int rx_fill;
51 int rx_empty;
52 struct sk_buff *tx_bufs[N_TX_RING];
53 int tx_fill;
54 int tx_empty;
55 unsigned char maccc;
56 unsigned char tx_fullup;
57 unsigned char tx_active;
58 unsigned char tx_bad_runt;
59 struct net_device_stats stats;
60 struct timer_list tx_timeout;
61 int timeout_active;
62 int port_aaui;
63 int chipid;
64 struct macio_dev *mdev;
65 spinlock_t lock;
66 };
68 /*
69 * Number of bytes of private data per MACE: allow enough for
70 * the rx and tx dma commands plus a branch dma command each,
71 * and another 16 bytes to allow us to align the dma command
72 * buffers on a 16 byte boundary.
73 */
74 #define PRIV_BYTES (sizeof(struct mace_data) \
75 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
77 static int bitrev(int);
78 static int mace_open(struct net_device *dev);
79 static int mace_close(struct net_device *dev);
80 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
81 static struct net_device_stats *mace_stats(struct net_device *dev);
82 static void mace_set_multicast(struct net_device *dev);
83 static void mace_reset(struct net_device *dev);
84 static int mace_set_address(struct net_device *dev, void *addr);
85 static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
86 static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
87 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
88 static void mace_set_timeout(struct net_device *dev);
89 static void mace_tx_timeout(unsigned long data);
90 static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
91 static inline void mace_clean_rings(struct mace_data *mp);
92 static void __mace_set_address(struct net_device *dev, void *addr);
94 /*
95 * If we can't get a skbuff when we need it, we use this area for DMA.
96 */
97 static unsigned char *dummy_buf;
99 /* Bit-reverse one byte of an ethernet hardware address. */
100 static inline int
101 bitrev(int b)
102 {
103 int d = 0, i;
105 for (i = 0; i < 8; ++i, b >>= 1)
106 d = (d << 1) | (b & 1);
107 return d;
108 }
111 static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
112 {
113 struct device_node *mace = macio_get_of_node(mdev);
114 struct net_device *dev;
115 struct mace_data *mp;
116 unsigned char *addr;
117 int j, rev, rc = -EBUSY;
119 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
120 printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
121 mace->full_name);
122 return -ENODEV;
123 }
125 addr = get_property(mace, "mac-address", NULL);
126 if (addr == NULL) {
127 addr = get_property(mace, "local-mac-address", NULL);
128 if (addr == NULL) {
129 printk(KERN_ERR "Can't get mac-address for MACE %s\n",
130 mace->full_name);
131 return -ENODEV;
132 }
133 }
135 /*
136 * lazy allocate the driver-wide dummy buffer. (Note that we
137 * never have more than one MACE in the system anyway)
138 */
139 if (dummy_buf == NULL) {
140 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
141 if (dummy_buf == NULL) {
142 printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
143 return -ENOMEM;
144 }
145 }
147 if (macio_request_resources(mdev, "mace")) {
148 printk(KERN_ERR "MACE: can't request IO resources !\n");
149 return -EBUSY;
150 }
152 dev = alloc_etherdev(PRIV_BYTES);
153 if (!dev) {
154 printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
155 rc = -ENOMEM;
156 goto err_release;
157 }
158 SET_MODULE_OWNER(dev);
159 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
161 mp = dev->priv;
162 mp->mdev = mdev;
163 macio_set_drvdata(mdev, dev);
165 dev->base_addr = macio_resource_start(mdev, 0);
166 mp->mace = ioremap(dev->base_addr, 0x1000);
167 if (mp->mace == NULL) {
168 printk(KERN_ERR "MACE: can't map IO resources !\n");
169 rc = -ENOMEM;
170 goto err_free;
171 }
172 dev->irq = macio_irq(mdev, 0);
174 rev = addr[0] == 0 && addr[1] == 0xA0;
175 for (j = 0; j < 6; ++j) {
176 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
177 }
178 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
179 in_8(&mp->mace->chipid_lo);
182 mp = (struct mace_data *) dev->priv;
183 mp->maccc = ENXMT | ENRCV;
185 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
186 if (mp->tx_dma == NULL) {
187 printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
188 rc = -ENOMEM;
189 goto err_unmap_io;
190 }
191 mp->tx_dma_intr = macio_irq(mdev, 1);
193 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
194 if (mp->rx_dma == NULL) {
195 printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
196 rc = -ENOMEM;
197 goto err_unmap_tx_dma;
198 }
199 mp->rx_dma_intr = macio_irq(mdev, 2);
201 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
202 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
204 memset(&mp->stats, 0, sizeof(mp->stats));
205 memset((char *) mp->tx_cmds, 0,
206 (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
207 init_timer(&mp->tx_timeout);
208 spin_lock_init(&mp->lock);
209 mp->timeout_active = 0;
211 if (port_aaui >= 0)
212 mp->port_aaui = port_aaui;
213 else {
214 /* Apple Network Server uses the AAUI port */
215 if (machine_is_compatible("AAPL,ShinerESB"))
216 mp->port_aaui = 1;
217 else {
218 #ifdef CONFIG_MACE_AAUI_PORT
219 mp->port_aaui = 1;
220 #else
221 mp->port_aaui = 0;
222 #endif
223 }
224 }
226 dev->open = mace_open;
227 dev->stop = mace_close;
228 dev->hard_start_xmit = mace_xmit_start;
229 dev->get_stats = mace_stats;
230 dev->set_multicast_list = mace_set_multicast;
231 dev->set_mac_address = mace_set_address;
233 /*
234 * Most of what is below could be moved to mace_open()
235 */
236 mace_reset(dev);
238 rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
239 if (rc) {
240 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
241 goto err_unmap_rx_dma;
242 }
243 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
244 if (rc) {
245 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
246 goto err_free_irq;
247 }
248 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
249 if (rc) {
250 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
251 goto err_free_tx_irq;
252 }
254 rc = register_netdev(dev);
255 if (rc) {
256 printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
257 goto err_free_rx_irq;
258 }
260 printk(KERN_INFO "%s: MACE at", dev->name);
261 for (j = 0; j < 6; ++j) {
262 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
263 }
264 printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff);
266 return 0;
268 err_free_rx_irq:
269 free_irq(macio_irq(mdev, 2), dev);
270 err_free_tx_irq:
271 free_irq(macio_irq(mdev, 1), dev);
272 err_free_irq:
273 free_irq(macio_irq(mdev, 0), dev);
274 err_unmap_rx_dma:
275 iounmap(mp->rx_dma);
276 err_unmap_tx_dma:
277 iounmap(mp->tx_dma);
278 err_unmap_io:
279 iounmap(mp->mace);
280 err_free:
281 free_netdev(dev);
282 err_release:
283 macio_release_resources(mdev);
285 return rc;
286 }
288 static int __devexit mace_remove(struct macio_dev *mdev)
289 {
290 struct net_device *dev = macio_get_drvdata(mdev);
291 struct mace_data *mp;
293 BUG_ON(dev == NULL);
295 macio_set_drvdata(mdev, NULL);
297 mp = dev->priv;
299 unregister_netdev(dev);
301 free_irq(dev->irq, dev);
302 free_irq(mp->tx_dma_intr, dev);
303 free_irq(mp->rx_dma_intr, dev);
305 iounmap(mp->rx_dma);
306 iounmap(mp->tx_dma);
307 iounmap(mp->mace);
309 free_netdev(dev);
311 macio_release_resources(mdev);
313 return 0;
314 }
316 static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
317 {
318 int i;
320 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
322 /*
323 * Yes this looks peculiar, but apparently it needs to be this
324 * way on some machines.
325 */
326 for (i = 200; i > 0; --i)
327 if (ld_le32(&dma->control) & RUN)
328 udelay(1);
329 }
331 static void mace_reset(struct net_device *dev)
332 {
333 struct mace_data *mp = (struct mace_data *) dev->priv;
334 volatile struct mace __iomem *mb = mp->mace;
335 int i;
337 /* soft-reset the chip */
338 i = 200;
339 while (--i) {
340 out_8(&mb->biucc, SWRST);
341 if (in_8(&mb->biucc) & SWRST) {
342 udelay(10);
343 continue;
344 }
345 break;
346 }
347 if (!i) {
348 printk(KERN_ERR "mace: cannot reset chip!\n");
349 return;
350 }
352 out_8(&mb->imr, 0xff); /* disable all intrs for now */
353 i = in_8(&mb->ir);
354 out_8(&mb->maccc, 0); /* turn off tx, rx */
356 out_8(&mb->biucc, XMTSP_64);
357 out_8(&mb->utr, RTRD);
358 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
359 out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
360 out_8(&mb->rcvfc, 0);
362 /* load up the hardware address */
363 __mace_set_address(dev, dev->dev_addr);
365 /* clear the multicast filter */
366 if (mp->chipid == BROKEN_ADDRCHG_REV)
367 out_8(&mb->iac, LOGADDR);
368 else {
369 out_8(&mb->iac, ADDRCHG | LOGADDR);
370 while ((in_8(&mb->iac) & ADDRCHG) != 0)
371 ;
372 }
373 for (i = 0; i < 8; ++i)
374 out_8(&mb->ladrf, 0);
376 /* done changing address */
377 if (mp->chipid != BROKEN_ADDRCHG_REV)
378 out_8(&mb->iac, 0);
380 if (mp->port_aaui)
381 out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
382 else
383 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
384 }
386 static void __mace_set_address(struct net_device *dev, void *addr)
387 {
388 struct mace_data *mp = (struct mace_data *) dev->priv;
389 volatile struct mace __iomem *mb = mp->mace;
390 unsigned char *p = addr;
391 int i;
393 /* load up the hardware address */
394 if (mp->chipid == BROKEN_ADDRCHG_REV)
395 out_8(&mb->iac, PHYADDR);
396 else {
397 out_8(&mb->iac, ADDRCHG | PHYADDR);
398 while ((in_8(&mb->iac) & ADDRCHG) != 0)
399 ;
400 }
401 for (i = 0; i < 6; ++i)
402 out_8(&mb->padr, dev->dev_addr[i] = p[i]);
403 if (mp->chipid != BROKEN_ADDRCHG_REV)
404 out_8(&mb->iac, 0);
405 }
407 static int mace_set_address(struct net_device *dev, void *addr)
408 {
409 struct mace_data *mp = (struct mace_data *) dev->priv;
410 volatile struct mace __iomem *mb = mp->mace;
411 unsigned long flags;
413 spin_lock_irqsave(&mp->lock, flags);
415 __mace_set_address(dev, addr);
417 /* note: setting ADDRCHG clears ENRCV */
418 out_8(&mb->maccc, mp->maccc);
420 spin_unlock_irqrestore(&mp->lock, flags);
421 return 0;
422 }
424 static inline void mace_clean_rings(struct mace_data *mp)
425 {
426 int i;
428 /* free some skb's */
429 for (i = 0; i < N_RX_RING; ++i) {
430 if (mp->rx_bufs[i] != 0) {
431 dev_kfree_skb(mp->rx_bufs[i]);
432 mp->rx_bufs[i] = NULL;
433 }
434 }
435 for (i = mp->tx_empty; i != mp->tx_fill; ) {
436 dev_kfree_skb(mp->tx_bufs[i]);
437 if (++i >= N_TX_RING)
438 i = 0;
439 }
440 }
442 static int mace_open(struct net_device *dev)
443 {
444 struct mace_data *mp = (struct mace_data *) dev->priv;
445 volatile struct mace __iomem *mb = mp->mace;
446 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
447 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
448 volatile struct dbdma_cmd *cp;
449 int i;
450 struct sk_buff *skb;
451 unsigned char *data;
453 /* reset the chip */
454 mace_reset(dev);
456 /* initialize list of sk_buffs for receiving and set up recv dma */
457 mace_clean_rings(mp);
458 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
459 cp = mp->rx_cmds;
460 for (i = 0; i < N_RX_RING - 1; ++i) {
461 skb = dev_alloc_skb(RX_BUFLEN + 2);
462 if (skb == 0) {
463 data = dummy_buf;
464 } else {
465 skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
466 data = skb->data;
467 }
468 mp->rx_bufs[i] = skb;
469 st_le16(&cp->req_count, RX_BUFLEN);
470 st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
471 st_le32(&cp->phy_addr, virt_to_bus(data));
472 cp->xfer_status = 0;
473 ++cp;
474 }
475 mp->rx_bufs[i] = NULL;
476 st_le16(&cp->command, DBDMA_STOP);
477 mp->rx_fill = i;
478 mp->rx_empty = 0;
480 /* Put a branch back to the beginning of the receive command list */
481 ++cp;
482 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
483 st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
485 /* start rx dma */
486 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
487 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
488 out_le32(&rd->control, (RUN << 16) | RUN);
490 /* put a branch at the end of the tx command list */
491 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
492 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
493 st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
495 /* reset tx dma */
496 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
497 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
498 mp->tx_fill = 0;
499 mp->tx_empty = 0;
500 mp->tx_fullup = 0;
501 mp->tx_active = 0;
502 mp->tx_bad_runt = 0;
504 /* turn it on! */
505 out_8(&mb->maccc, mp->maccc);
506 /* enable all interrupts except receive interrupts */
507 out_8(&mb->imr, RCVINT);
509 return 0;
510 }
512 static int mace_close(struct net_device *dev)
513 {
514 struct mace_data *mp = (struct mace_data *) dev->priv;
515 volatile struct mace __iomem *mb = mp->mace;
516 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
517 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
519 /* disable rx and tx */
520 out_8(&mb->maccc, 0);
521 out_8(&mb->imr, 0xff); /* disable all intrs */
523 /* disable rx and tx dma */
524 st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
525 st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
527 mace_clean_rings(mp);
529 return 0;
530 }
532 static inline void mace_set_timeout(struct net_device *dev)
533 {
534 struct mace_data *mp = (struct mace_data *) dev->priv;
536 if (mp->timeout_active)
537 del_timer(&mp->tx_timeout);
538 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
539 mp->tx_timeout.function = mace_tx_timeout;
540 mp->tx_timeout.data = (unsigned long) dev;
541 add_timer(&mp->tx_timeout);
542 mp->timeout_active = 1;
543 }
545 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
546 {
547 struct mace_data *mp = (struct mace_data *) dev->priv;
548 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
549 volatile struct dbdma_cmd *cp, *np;
550 unsigned long flags;
551 int fill, next, len;
553 /* see if there's a free slot in the tx ring */
554 spin_lock_irqsave(&mp->lock, flags);
555 fill = mp->tx_fill;
556 next = fill + 1;
557 if (next >= N_TX_RING)
558 next = 0;
559 if (next == mp->tx_empty) {
560 netif_stop_queue(dev);
561 mp->tx_fullup = 1;
562 spin_unlock_irqrestore(&mp->lock, flags);
563 return 1; /* can't take it at the moment */
564 }
565 spin_unlock_irqrestore(&mp->lock, flags);
567 /* partially fill in the dma command block */
568 len = skb->len;
569 if (len > ETH_FRAME_LEN) {
570 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
571 len = ETH_FRAME_LEN;
572 }
573 mp->tx_bufs[fill] = skb;
574 cp = mp->tx_cmds + NCMDS_TX * fill;
575 st_le16(&cp->req_count, len);
576 st_le32(&cp->phy_addr, virt_to_bus(skb->data));
578 np = mp->tx_cmds + NCMDS_TX * next;
579 out_le16(&np->command, DBDMA_STOP);
581 /* poke the tx dma channel */
582 spin_lock_irqsave(&mp->lock, flags);
583 mp->tx_fill = next;
584 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
585 out_le16(&cp->xfer_status, 0);
586 out_le16(&cp->command, OUTPUT_LAST);
587 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
588 ++mp->tx_active;
589 mace_set_timeout(dev);
590 }
591 if (++next >= N_TX_RING)
592 next = 0;
593 if (next == mp->tx_empty)
594 netif_stop_queue(dev);
595 spin_unlock_irqrestore(&mp->lock, flags);
597 return 0;
598 }
600 static struct net_device_stats *mace_stats(struct net_device *dev)
601 {
602 struct mace_data *p = (struct mace_data *) dev->priv;
604 return &p->stats;
605 }
607 static void mace_set_multicast(struct net_device *dev)
608 {
609 struct mace_data *mp = (struct mace_data *) dev->priv;
610 volatile struct mace __iomem *mb = mp->mace;
611 int i, j;
612 u32 crc;
613 unsigned long flags;
615 spin_lock_irqsave(&mp->lock, flags);
616 mp->maccc &= ~PROM;
617 if (dev->flags & IFF_PROMISC) {
618 mp->maccc |= PROM;
619 } else {
620 unsigned char multicast_filter[8];
621 struct dev_mc_list *dmi = dev->mc_list;
623 if (dev->flags & IFF_ALLMULTI) {
624 for (i = 0; i < 8; i++)
625 multicast_filter[i] = 0xff;
626 } else {
627 for (i = 0; i < 8; i++)
628 multicast_filter[i] = 0;
629 for (i = 0; i < dev->mc_count; i++) {
630 crc = ether_crc_le(6, dmi->dmi_addr);
631 j = crc >> 26; /* bit number in multicast_filter */
632 multicast_filter[j >> 3] |= 1 << (j & 7);
633 dmi = dmi->next;
634 }
635 }
636 #if 0
637 printk("Multicast filter :");
638 for (i = 0; i < 8; i++)
639 printk("%02x ", multicast_filter[i]);
640 printk("\n");
641 #endif
643 if (mp->chipid == BROKEN_ADDRCHG_REV)
644 out_8(&mb->iac, LOGADDR);
645 else {
646 out_8(&mb->iac, ADDRCHG | LOGADDR);
647 while ((in_8(&mb->iac) & ADDRCHG) != 0)
648 ;
649 }
650 for (i = 0; i < 8; ++i)
651 out_8(&mb->ladrf, multicast_filter[i]);
652 if (mp->chipid != BROKEN_ADDRCHG_REV)
653 out_8(&mb->iac, 0);
654 }
655 /* reset maccc */
656 out_8(&mb->maccc, mp->maccc);
657 spin_unlock_irqrestore(&mp->lock, flags);
658 }
660 static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
661 {
662 volatile struct mace __iomem *mb = mp->mace;
663 static int mace_babbles, mace_jabbers;
665 if (intr & MPCO)
666 mp->stats.rx_missed_errors += 256;
667 mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
668 if (intr & RNTPCO)
669 mp->stats.rx_length_errors += 256;
670 mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
671 if (intr & CERR)
672 ++mp->stats.tx_heartbeat_errors;
673 if (intr & BABBLE)
674 if (mace_babbles++ < 4)
675 printk(KERN_DEBUG "mace: babbling transmitter\n");
676 if (intr & JABBER)
677 if (mace_jabbers++ < 4)
678 printk(KERN_DEBUG "mace: jabbering transceiver\n");
679 }
681 static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
682 {
683 struct net_device *dev = (struct net_device *) dev_id;
684 struct mace_data *mp = (struct mace_data *) dev->priv;
685 volatile struct mace __iomem *mb = mp->mace;
686 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
687 volatile struct dbdma_cmd *cp;
688 int intr, fs, i, stat, x;
689 int xcount, dstat;
690 unsigned long flags;
691 /* static int mace_last_fs, mace_last_xcount; */
693 spin_lock_irqsave(&mp->lock, flags);
694 intr = in_8(&mb->ir); /* read interrupt register */
695 in_8(&mb->xmtrc); /* get retries */
696 mace_handle_misc_intrs(mp, intr);
698 i = mp->tx_empty;
699 while (in_8(&mb->pr) & XMTSV) {
700 del_timer(&mp->tx_timeout);
701 mp->timeout_active = 0;
702 /*
703 * Clear any interrupt indication associated with this status
704 * word. This appears to unlatch any error indication from
705 * the DMA controller.
706 */
707 intr = in_8(&mb->ir);
708 if (intr != 0)
709 mace_handle_misc_intrs(mp, intr);
710 if (mp->tx_bad_runt) {
711 fs = in_8(&mb->xmtfs);
712 mp->tx_bad_runt = 0;
713 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
714 continue;
715 }
716 dstat = ld_le32(&td->status);
717 /* stop DMA controller */
718 out_le32(&td->control, RUN << 16);
719 /*
720 * xcount is the number of complete frames which have been
721 * written to the fifo but for which status has not been read.
722 */
723 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
724 if (xcount == 0 || (dstat & DEAD)) {
725 /*
726 * If a packet was aborted before the DMA controller has
727 * finished transferring it, it seems that there are 2 bytes
728 * which are stuck in some buffer somewhere. These will get
729 * transmitted as soon as we read the frame status (which
730 * reenables the transmit data transfer request). Turning
731 * off the DMA controller and/or resetting the MACE doesn't
732 * help. So we disable auto-padding and FCS transmission
733 * so the two bytes will only be a runt packet which should
734 * be ignored by other stations.
735 */
736 out_8(&mb->xmtfc, DXMTFCS);
737 }
738 fs = in_8(&mb->xmtfs);
739 if ((fs & XMTSV) == 0) {
740 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
741 fs, xcount, dstat);
742 mace_reset(dev);
743 /*
744 * XXX mace likes to hang the machine after a xmtfs error.
745 * This is hard to reproduce, reseting *may* help
746 */
747 }
748 cp = mp->tx_cmds + NCMDS_TX * i;
749 stat = ld_le16(&cp->xfer_status);
750 if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
751 /*
752 * Check whether there were in fact 2 bytes written to
753 * the transmit FIFO.
754 */
755 udelay(1);
756 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
757 if (x != 0) {
758 /* there were two bytes with an end-of-packet indication */
759 mp->tx_bad_runt = 1;
760 mace_set_timeout(dev);
761 } else {
762 /*
763 * Either there weren't the two bytes buffered up, or they
764 * didn't have an end-of-packet indication.
765 * We flush the transmit FIFO just in case (by setting the
766 * XMTFWU bit with the transmitter disabled).
767 */
768 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
769 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
770 udelay(1);
771 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
772 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
773 }
774 }
775 /* dma should have finished */
776 if (i == mp->tx_fill) {
777 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
778 fs, xcount, dstat);
779 continue;
780 }
781 /* Update stats */
782 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
783 ++mp->stats.tx_errors;
784 if (fs & LCAR)
785 ++mp->stats.tx_carrier_errors;
786 if (fs & (UFLO|LCOL|RTRY))
787 ++mp->stats.tx_aborted_errors;
788 } else {
789 mp->stats.tx_bytes += mp->tx_bufs[i]->len;
790 ++mp->stats.tx_packets;
791 }
792 dev_kfree_skb_irq(mp->tx_bufs[i]);
793 --mp->tx_active;
794 if (++i >= N_TX_RING)
795 i = 0;
796 #if 0
797 mace_last_fs = fs;
798 mace_last_xcount = xcount;
799 #endif
800 }
802 if (i != mp->tx_empty) {
803 mp->tx_fullup = 0;
804 netif_wake_queue(dev);
805 }
806 mp->tx_empty = i;
807 i += mp->tx_active;
808 if (i >= N_TX_RING)
809 i -= N_TX_RING;
810 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
811 do {
812 /* set up the next one */
813 cp = mp->tx_cmds + NCMDS_TX * i;
814 out_le16(&cp->xfer_status, 0);
815 out_le16(&cp->command, OUTPUT_LAST);
816 ++mp->tx_active;
817 if (++i >= N_TX_RING)
818 i = 0;
819 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
820 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
821 mace_set_timeout(dev);
822 }
823 spin_unlock_irqrestore(&mp->lock, flags);
824 return IRQ_HANDLED;
825 }
827 static void mace_tx_timeout(unsigned long data)
828 {
829 struct net_device *dev = (struct net_device *) data;
830 struct mace_data *mp = (struct mace_data *) dev->priv;
831 volatile struct mace __iomem *mb = mp->mace;
832 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
833 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
834 volatile struct dbdma_cmd *cp;
835 unsigned long flags;
836 int i;
838 spin_lock_irqsave(&mp->lock, flags);
839 mp->timeout_active = 0;
840 if (mp->tx_active == 0 && !mp->tx_bad_runt)
841 goto out;
843 /* update various counters */
844 mace_handle_misc_intrs(mp, in_8(&mb->ir));
846 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
848 /* turn off both tx and rx and reset the chip */
849 out_8(&mb->maccc, 0);
850 printk(KERN_ERR "mace: transmit timeout - resetting\n");
851 dbdma_reset(td);
852 mace_reset(dev);
854 /* restart rx dma */
855 cp = bus_to_virt(ld_le32(&rd->cmdptr));
856 dbdma_reset(rd);
857 out_le16(&cp->xfer_status, 0);
858 out_le32(&rd->cmdptr, virt_to_bus(cp));
859 out_le32(&rd->control, (RUN << 16) | RUN);
861 /* fix up the transmit side */
862 i = mp->tx_empty;
863 mp->tx_active = 0;
864 ++mp->stats.tx_errors;
865 if (mp->tx_bad_runt) {
866 mp->tx_bad_runt = 0;
867 } else if (i != mp->tx_fill) {
868 dev_kfree_skb(mp->tx_bufs[i]);
869 if (++i >= N_TX_RING)
870 i = 0;
871 mp->tx_empty = i;
872 }
873 mp->tx_fullup = 0;
874 netif_wake_queue(dev);
875 if (i != mp->tx_fill) {
876 cp = mp->tx_cmds + NCMDS_TX * i;
877 out_le16(&cp->xfer_status, 0);
878 out_le16(&cp->command, OUTPUT_LAST);
879 out_le32(&td->cmdptr, virt_to_bus(cp));
880 out_le32(&td->control, (RUN << 16) | RUN);
881 ++mp->tx_active;
882 mace_set_timeout(dev);
883 }
885 /* turn it back on */
886 out_8(&mb->imr, RCVINT);
887 out_8(&mb->maccc, mp->maccc);
889 out:
890 spin_unlock_irqrestore(&mp->lock, flags);
891 }
893 static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
894 {
895 return IRQ_HANDLED;
896 }
898 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
899 {
900 struct net_device *dev = (struct net_device *) dev_id;
901 struct mace_data *mp = (struct mace_data *) dev->priv;
902 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
903 volatile struct dbdma_cmd *cp, *np;
904 int i, nb, stat, next;
905 struct sk_buff *skb;
906 unsigned frame_status;
907 static int mace_lost_status;
908 unsigned char *data;
909 unsigned long flags;
911 spin_lock_irqsave(&mp->lock, flags);
912 for (i = mp->rx_empty; i != mp->rx_fill; ) {
913 cp = mp->rx_cmds + i;
914 stat = ld_le16(&cp->xfer_status);
915 if ((stat & ACTIVE) == 0) {
916 next = i + 1;
917 if (next >= N_RX_RING)
918 next = 0;
919 np = mp->rx_cmds + next;
920 if (next != mp->rx_fill
921 && (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
922 printk(KERN_DEBUG "mace: lost a status word\n");
923 ++mace_lost_status;
924 } else
925 break;
926 }
927 nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
928 out_le16(&cp->command, DBDMA_STOP);
929 /* got a packet, have a look at it */
930 skb = mp->rx_bufs[i];
931 if (skb == 0) {
932 ++mp->stats.rx_dropped;
933 } else if (nb > 8) {
934 data = skb->data;
935 frame_status = (data[nb-3] << 8) + data[nb-4];
936 if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
937 ++mp->stats.rx_errors;
938 if (frame_status & RS_OFLO)
939 ++mp->stats.rx_over_errors;
940 if (frame_status & RS_FRAMERR)
941 ++mp->stats.rx_frame_errors;
942 if (frame_status & RS_FCSERR)
943 ++mp->stats.rx_crc_errors;
944 } else {
945 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
946 * FCS on frames with 802.3 headers. This means that Ethernet
947 * frames have 8 extra octets at the end, while 802.3 frames
948 * have only 4. We need to correctly account for this. */
949 if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
950 nb -= 4;
951 else /* Ethernet header; mace includes FCS */
952 nb -= 8;
953 skb_put(skb, nb);
954 skb->dev = dev;
955 skb->protocol = eth_type_trans(skb, dev);
956 mp->stats.rx_bytes += skb->len;
957 netif_rx(skb);
958 dev->last_rx = jiffies;
959 mp->rx_bufs[i] = NULL;
960 ++mp->stats.rx_packets;
961 }
962 } else {
963 ++mp->stats.rx_errors;
964 ++mp->stats.rx_length_errors;
965 }
967 /* advance to next */
968 if (++i >= N_RX_RING)
969 i = 0;
970 }
971 mp->rx_empty = i;
973 i = mp->rx_fill;
974 for (;;) {
975 next = i + 1;
976 if (next >= N_RX_RING)
977 next = 0;
978 if (next == mp->rx_empty)
979 break;
980 cp = mp->rx_cmds + i;
981 skb = mp->rx_bufs[i];
982 if (skb == 0) {
983 skb = dev_alloc_skb(RX_BUFLEN + 2);
984 if (skb != 0) {
985 skb_reserve(skb, 2);
986 mp->rx_bufs[i] = skb;
987 }
988 }
989 st_le16(&cp->req_count, RX_BUFLEN);
990 data = skb? skb->data: dummy_buf;
991 st_le32(&cp->phy_addr, virt_to_bus(data));
992 out_le16(&cp->xfer_status, 0);
993 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
994 #if 0
995 if ((ld_le32(&rd->status) & ACTIVE) != 0) {
996 out_le32(&rd->control, (PAUSE << 16) | PAUSE);
997 while ((in_le32(&rd->status) & ACTIVE) != 0)
998 ;
999 }
1000 #endif
1001 i = next;
1003 if (i != mp->rx_fill) {
1004 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
1005 mp->rx_fill = i;
1007 spin_unlock_irqrestore(&mp->lock, flags);
1008 return IRQ_HANDLED;
1011 static struct of_device_id mace_match[] =
1014 .name = "mace",
1015 },
1016 {},
1017 };
1018 MODULE_DEVICE_TABLE (of, mace_match);
1020 static struct macio_driver mace_driver =
1022 .name = "mace",
1023 .match_table = mace_match,
1024 .probe = mace_probe,
1025 .remove = mace_remove,
1026 };
1029 static int __init mace_init(void)
1031 return macio_register_driver(&mace_driver);
1034 static void __exit mace_cleanup(void)
1036 macio_unregister_driver(&mace_driver);
1038 kfree(dummy_buf);
1039 dummy_buf = NULL;
1042 MODULE_AUTHOR("Paul Mackerras");
1043 MODULE_DESCRIPTION("PowerMac MACE driver.");
1044 module_param(port_aaui, int, 0);
1045 MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
1046 MODULE_LICENSE("GPL");
1048 module_init(mace_init);
1049 module_exit(mace_cleanup);