ia64/linux-2.6.18-xen.hg

view drivers/net/sunlance.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
2 * lance.c: Linux/Sparc/Lance driver
3 *
4 * Written 1995, 1996 by Miguel de Icaza
5 * Sources:
6 * The Linux depca driver
7 * The Linux lance driver.
8 * The Linux skeleton driver.
9 * The NetBSD Sparc/Lance driver.
10 * Theo de Raadt (deraadt@openbsd.org)
11 * NCR92C990 Lan Controller manual
12 *
13 * 1.4:
14 * Added support to run with a ledma on the Sun4m
15 *
16 * 1.5:
17 * Added multiple card detection.
18 *
19 * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
20 * (ecd@skynet.be)
21 *
22 * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
23 * (ecd@skynet.be)
24 *
25 * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
26 * (davem@caip.rutgers.edu)
27 *
28 * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
29 * this disables auto carrier detection on sun4m. Eddie C. Dost
30 * (ecd@skynet.be)
31 *
32 * 1.7:
33 * 6/26/96: Bug fix for multiple ledmas, miguel.
34 *
35 * 1.8:
36 * Stole multicast code from depca.c, fixed lance_tx.
37 *
38 * 1.9:
39 * 8/21/96: Fixed the multicast code (Pedro Roque)
40 *
41 * 8/28/96: Send fake packet in lance_open() if auto_select is true,
42 * so we can detect the carrier loss condition in time.
43 * Eddie C. Dost (ecd@skynet.be)
44 *
45 * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
46 * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
47 *
48 * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
49 *
50 * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
51 * This was the sun4c killer. Shit, stupid bug.
52 * (ecd@skynet.be)
53 *
54 * 1.10:
55 * 1/26/97: Modularize driver. (ecd@skynet.be)
56 *
57 * 1.11:
58 * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
59 *
60 * 1.12:
61 * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
62 * Anton Blanchard (anton@progsoc.uts.edu.au)
63 * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
64 * David S. Miller (davem@redhat.com)
65 * 2.01:
66 * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
67 *
68 */
70 #undef DEBUG_DRIVER
72 static char lancestr[] = "LANCE";
74 #include <linux/module.h>
75 #include <linux/kernel.h>
76 #include <linux/types.h>
77 #include <linux/fcntl.h>
78 #include <linux/interrupt.h>
79 #include <linux/ioport.h>
80 #include <linux/in.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/delay.h>
84 #include <linux/init.h>
85 #include <linux/crc32.h>
86 #include <linux/errno.h>
87 #include <linux/socket.h> /* Used for the temporal inet entries and routing */
88 #include <linux/route.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/ethtool.h>
93 #include <linux/bitops.h>
95 #include <asm/system.h>
96 #include <asm/io.h>
97 #include <asm/dma.h>
98 #include <asm/pgtable.h>
99 #include <asm/byteorder.h> /* Used by the checksum routines */
100 #include <asm/idprom.h>
101 #include <asm/sbus.h>
102 #include <asm/openprom.h>
103 #include <asm/oplib.h>
104 #include <asm/auxio.h> /* For tpe-link-test? setting */
105 #include <asm/irq.h>
107 #define DRV_NAME "sunlance"
108 #define DRV_VERSION "2.02"
109 #define DRV_RELDATE "8/24/03"
110 #define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
112 static char version[] =
113 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
115 MODULE_VERSION(DRV_VERSION);
116 MODULE_AUTHOR(DRV_AUTHOR);
117 MODULE_DESCRIPTION("Sun Lance ethernet driver");
118 MODULE_LICENSE("GPL");
120 /* Define: 2^4 Tx buffers and 2^4 Rx buffers */
121 #ifndef LANCE_LOG_TX_BUFFERS
122 #define LANCE_LOG_TX_BUFFERS 4
123 #define LANCE_LOG_RX_BUFFERS 4
124 #endif
126 #define LE_CSR0 0
127 #define LE_CSR1 1
128 #define LE_CSR2 2
129 #define LE_CSR3 3
131 #define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
133 #define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
134 #define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
135 #define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
136 #define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
137 #define LE_C0_MERR 0x0800 /* ME: Memory error */
138 #define LE_C0_RINT 0x0400 /* Received interrupt */
139 #define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
140 #define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
141 #define LE_C0_INTR 0x0080 /* Interrupt or error */
142 #define LE_C0_INEA 0x0040 /* Interrupt enable */
143 #define LE_C0_RXON 0x0020 /* Receiver on */
144 #define LE_C0_TXON 0x0010 /* Transmitter on */
145 #define LE_C0_TDMD 0x0008 /* Transmitter demand */
146 #define LE_C0_STOP 0x0004 /* Stop the card */
147 #define LE_C0_STRT 0x0002 /* Start the card */
148 #define LE_C0_INIT 0x0001 /* Init the card */
150 #define LE_C3_BSWP 0x4 /* SWAP */
151 #define LE_C3_ACON 0x2 /* ALE Control */
152 #define LE_C3_BCON 0x1 /* Byte control */
154 /* Receive message descriptor 1 */
155 #define LE_R1_OWN 0x80 /* Who owns the entry */
156 #define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
157 #define LE_R1_FRA 0x20 /* FRA: Frame error */
158 #define LE_R1_OFL 0x10 /* OFL: Frame overflow */
159 #define LE_R1_CRC 0x08 /* CRC error */
160 #define LE_R1_BUF 0x04 /* BUF: Buffer error */
161 #define LE_R1_SOP 0x02 /* Start of packet */
162 #define LE_R1_EOP 0x01 /* End of packet */
163 #define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
165 #define LE_T1_OWN 0x80 /* Lance owns the packet */
166 #define LE_T1_ERR 0x40 /* Error summary */
167 #define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
168 #define LE_T1_EONE 0x08 /* Error: one retry needed */
169 #define LE_T1_EDEF 0x04 /* Error: deferred */
170 #define LE_T1_SOP 0x02 /* Start of packet */
171 #define LE_T1_EOP 0x01 /* End of packet */
172 #define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
174 #define LE_T3_BUF 0x8000 /* Buffer error */
175 #define LE_T3_UFL 0x4000 /* Error underflow */
176 #define LE_T3_LCOL 0x1000 /* Error late collision */
177 #define LE_T3_CLOS 0x0800 /* Error carrier loss */
178 #define LE_T3_RTY 0x0400 /* Error retry */
179 #define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
181 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
182 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
183 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
184 #define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
186 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
187 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
188 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
189 #define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
191 #define PKT_BUF_SZ 1544
192 #define RX_BUFF_SIZE PKT_BUF_SZ
193 #define TX_BUFF_SIZE PKT_BUF_SZ
195 struct lance_rx_desc {
196 u16 rmd0; /* low address of packet */
197 u8 rmd1_bits; /* descriptor bits */
198 u8 rmd1_hadr; /* high address of packet */
199 s16 length; /* This length is 2s complement (negative)!
200 * Buffer length
201 */
202 u16 mblength; /* This is the actual number of bytes received */
203 };
205 struct lance_tx_desc {
206 u16 tmd0; /* low address of packet */
207 u8 tmd1_bits; /* descriptor bits */
208 u8 tmd1_hadr; /* high address of packet */
209 s16 length; /* Length is 2s complement (negative)! */
210 u16 misc;
211 };
213 /* The LANCE initialization block, described in databook. */
214 /* On the Sparc, this block should be on a DMA region */
215 struct lance_init_block {
216 u16 mode; /* Pre-set mode (reg. 15) */
217 u8 phys_addr[6]; /* Physical ethernet address */
218 u32 filter[2]; /* Multicast filter. */
220 /* Receive and transmit ring base, along with extra bits. */
221 u16 rx_ptr; /* receive descriptor addr */
222 u16 rx_len; /* receive len and high addr */
223 u16 tx_ptr; /* transmit descriptor addr */
224 u16 tx_len; /* transmit len and high addr */
226 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
227 struct lance_rx_desc brx_ring[RX_RING_SIZE];
228 struct lance_tx_desc btx_ring[TX_RING_SIZE];
230 u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
231 u8 pad[2]; /* align rx_buf for copy_and_sum(). */
232 u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
233 };
235 #define libdesc_offset(rt, elem) \
236 ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
238 #define libbuff_offset(rt, elem) \
239 ((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
241 struct lance_private {
242 void __iomem *lregs; /* Lance RAP/RDP regs. */
243 void __iomem *dregs; /* DMA controller regs. */
244 struct lance_init_block __iomem *init_block_iomem;
245 struct lance_init_block *init_block_mem;
247 spinlock_t lock;
249 int rx_new, tx_new;
250 int rx_old, tx_old;
252 struct net_device_stats stats;
253 struct sbus_dma *ledma; /* If set this points to ledma */
254 char tpe; /* cable-selection is TPE */
255 char auto_select; /* cable-selection by carrier */
256 char burst_sizes; /* ledma SBus burst sizes */
257 char pio_buffer; /* init block in PIO space? */
259 unsigned short busmaster_regval;
261 void (*init_ring)(struct net_device *);
262 void (*rx)(struct net_device *);
263 void (*tx)(struct net_device *);
265 char *name;
266 dma_addr_t init_block_dvma;
267 struct net_device *dev; /* Backpointer */
268 struct sbus_dev *sdev;
269 struct timer_list multicast_timer;
270 };
272 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
273 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
274 lp->tx_old - lp->tx_new-1)
276 /* Lance registers. */
277 #define RDP 0x00UL /* register data port */
278 #define RAP 0x02UL /* register address port */
279 #define LANCE_REG_SIZE 0x04UL
281 #define STOP_LANCE(__lp) \
282 do { void __iomem *__base = (__lp)->lregs; \
283 sbus_writew(LE_CSR0, __base + RAP); \
284 sbus_writew(LE_C0_STOP, __base + RDP); \
285 } while (0)
287 int sparc_lance_debug = 2;
289 /* The Lance uses 24 bit addresses */
290 /* On the Sun4c the DVMA will provide the remaining bytes for us */
291 /* On the Sun4m we have to instruct the ledma to provide them */
292 /* Even worse, on scsi/ether SBUS cards, the init block and the
293 * transmit/receive buffers are addresses as offsets from absolute
294 * zero on the lebuffer PIO area. -DaveM
295 */
297 #define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
299 /* Load the CSR registers */
300 static void load_csrs(struct lance_private *lp)
301 {
302 u32 leptr;
304 if (lp->pio_buffer)
305 leptr = 0;
306 else
307 leptr = LANCE_ADDR(lp->init_block_dvma);
309 sbus_writew(LE_CSR1, lp->lregs + RAP);
310 sbus_writew(leptr & 0xffff, lp->lregs + RDP);
311 sbus_writew(LE_CSR2, lp->lregs + RAP);
312 sbus_writew(leptr >> 16, lp->lregs + RDP);
313 sbus_writew(LE_CSR3, lp->lregs + RAP);
314 sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
316 /* Point back to csr0 */
317 sbus_writew(LE_CSR0, lp->lregs + RAP);
318 }
320 /* Setup the Lance Rx and Tx rings */
321 static void lance_init_ring_dvma(struct net_device *dev)
322 {
323 struct lance_private *lp = netdev_priv(dev);
324 struct lance_init_block *ib = lp->init_block_mem;
325 dma_addr_t aib = lp->init_block_dvma;
326 __u32 leptr;
327 int i;
329 /* Lock out other processes while setting up hardware */
330 netif_stop_queue(dev);
331 lp->rx_new = lp->tx_new = 0;
332 lp->rx_old = lp->tx_old = 0;
334 /* Copy the ethernet address to the lance init block
335 * Note that on the sparc you need to swap the ethernet address.
336 */
337 ib->phys_addr [0] = dev->dev_addr [1];
338 ib->phys_addr [1] = dev->dev_addr [0];
339 ib->phys_addr [2] = dev->dev_addr [3];
340 ib->phys_addr [3] = dev->dev_addr [2];
341 ib->phys_addr [4] = dev->dev_addr [5];
342 ib->phys_addr [5] = dev->dev_addr [4];
344 /* Setup the Tx ring entries */
345 for (i = 0; i <= TX_RING_SIZE; i++) {
346 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
347 ib->btx_ring [i].tmd0 = leptr;
348 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
349 ib->btx_ring [i].tmd1_bits = 0;
350 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
351 ib->btx_ring [i].misc = 0;
352 }
354 /* Setup the Rx ring entries */
355 for (i = 0; i < RX_RING_SIZE; i++) {
356 leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
358 ib->brx_ring [i].rmd0 = leptr;
359 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
360 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
361 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
362 ib->brx_ring [i].mblength = 0;
363 }
365 /* Setup the initialization block */
367 /* Setup rx descriptor pointer */
368 leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
369 ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
370 ib->rx_ptr = leptr;
372 /* Setup tx descriptor pointer */
373 leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
374 ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
375 ib->tx_ptr = leptr;
376 }
378 static void lance_init_ring_pio(struct net_device *dev)
379 {
380 struct lance_private *lp = netdev_priv(dev);
381 struct lance_init_block __iomem *ib = lp->init_block_iomem;
382 u32 leptr;
383 int i;
385 /* Lock out other processes while setting up hardware */
386 netif_stop_queue(dev);
387 lp->rx_new = lp->tx_new = 0;
388 lp->rx_old = lp->tx_old = 0;
390 /* Copy the ethernet address to the lance init block
391 * Note that on the sparc you need to swap the ethernet address.
392 */
393 sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
394 sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
395 sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
396 sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
397 sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
398 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
400 /* Setup the Tx ring entries */
401 for (i = 0; i <= TX_RING_SIZE; i++) {
402 leptr = libbuff_offset(tx_buf, i);
403 sbus_writew(leptr, &ib->btx_ring [i].tmd0);
404 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
405 sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
407 /* The ones required by tmd2 */
408 sbus_writew(0xf000, &ib->btx_ring [i].length);
409 sbus_writew(0, &ib->btx_ring [i].misc);
410 }
412 /* Setup the Rx ring entries */
413 for (i = 0; i < RX_RING_SIZE; i++) {
414 leptr = libbuff_offset(rx_buf, i);
416 sbus_writew(leptr, &ib->brx_ring [i].rmd0);
417 sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
418 sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
419 sbus_writew(-RX_BUFF_SIZE|0xf000,
420 &ib->brx_ring [i].length);
421 sbus_writew(0, &ib->brx_ring [i].mblength);
422 }
424 /* Setup the initialization block */
426 /* Setup rx descriptor pointer */
427 leptr = libdesc_offset(brx_ring, 0);
428 sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
429 &ib->rx_len);
430 sbus_writew(leptr, &ib->rx_ptr);
432 /* Setup tx descriptor pointer */
433 leptr = libdesc_offset(btx_ring, 0);
434 sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
435 &ib->tx_len);
436 sbus_writew(leptr, &ib->tx_ptr);
437 }
439 static void init_restart_ledma(struct lance_private *lp)
440 {
441 u32 csr = sbus_readl(lp->dregs + DMA_CSR);
443 if (!(csr & DMA_HNDL_ERROR)) {
444 /* E-Cache draining */
445 while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
446 barrier();
447 }
449 csr = sbus_readl(lp->dregs + DMA_CSR);
450 csr &= ~DMA_E_BURSTS;
451 if (lp->burst_sizes & DMA_BURST32)
452 csr |= DMA_E_BURST32;
453 else
454 csr |= DMA_E_BURST16;
456 csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
458 if (lp->tpe)
459 csr |= DMA_EN_ENETAUI;
460 else
461 csr &= ~DMA_EN_ENETAUI;
462 udelay(20);
463 sbus_writel(csr, lp->dregs + DMA_CSR);
464 udelay(200);
465 }
467 static int init_restart_lance(struct lance_private *lp)
468 {
469 u16 regval = 0;
470 int i;
472 if (lp->dregs)
473 init_restart_ledma(lp);
475 sbus_writew(LE_CSR0, lp->lregs + RAP);
476 sbus_writew(LE_C0_INIT, lp->lregs + RDP);
478 /* Wait for the lance to complete initialization */
479 for (i = 0; i < 100; i++) {
480 regval = sbus_readw(lp->lregs + RDP);
482 if (regval & (LE_C0_ERR | LE_C0_IDON))
483 break;
484 barrier();
485 }
486 if (i == 100 || (regval & LE_C0_ERR)) {
487 printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
488 i, regval);
489 if (lp->dregs)
490 printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
491 return -1;
492 }
494 /* Clear IDON by writing a "1", enable interrupts and start lance */
495 sbus_writew(LE_C0_IDON, lp->lregs + RDP);
496 sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
498 if (lp->dregs) {
499 u32 csr = sbus_readl(lp->dregs + DMA_CSR);
501 csr |= DMA_INT_ENAB;
502 sbus_writel(csr, lp->dregs + DMA_CSR);
503 }
505 return 0;
506 }
508 static void lance_rx_dvma(struct net_device *dev)
509 {
510 struct lance_private *lp = netdev_priv(dev);
511 struct lance_init_block *ib = lp->init_block_mem;
512 struct lance_rx_desc *rd;
513 u8 bits;
514 int len, entry = lp->rx_new;
515 struct sk_buff *skb;
517 for (rd = &ib->brx_ring [entry];
518 !((bits = rd->rmd1_bits) & LE_R1_OWN);
519 rd = &ib->brx_ring [entry]) {
521 /* We got an incomplete frame? */
522 if ((bits & LE_R1_POK) != LE_R1_POK) {
523 lp->stats.rx_over_errors++;
524 lp->stats.rx_errors++;
525 } else if (bits & LE_R1_ERR) {
526 /* Count only the end frame as a rx error,
527 * not the beginning
528 */
529 if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
530 if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
531 if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
532 if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
533 if (bits & LE_R1_EOP) lp->stats.rx_errors++;
534 } else {
535 len = (rd->mblength & 0xfff) - 4;
536 skb = dev_alloc_skb(len + 2);
538 if (skb == NULL) {
539 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
540 dev->name);
541 lp->stats.rx_dropped++;
542 rd->mblength = 0;
543 rd->rmd1_bits = LE_R1_OWN;
544 lp->rx_new = RX_NEXT(entry);
545 return;
546 }
548 lp->stats.rx_bytes += len;
550 skb->dev = dev;
551 skb_reserve(skb, 2); /* 16 byte align */
552 skb_put(skb, len); /* make room */
553 eth_copy_and_sum(skb,
554 (unsigned char *)&(ib->rx_buf [entry][0]),
555 len, 0);
556 skb->protocol = eth_type_trans(skb, dev);
557 netif_rx(skb);
558 dev->last_rx = jiffies;
559 lp->stats.rx_packets++;
560 }
562 /* Return the packet to the pool */
563 rd->mblength = 0;
564 rd->rmd1_bits = LE_R1_OWN;
565 entry = RX_NEXT(entry);
566 }
568 lp->rx_new = entry;
569 }
571 static void lance_tx_dvma(struct net_device *dev)
572 {
573 struct lance_private *lp = netdev_priv(dev);
574 struct lance_init_block *ib = lp->init_block_mem;
575 int i, j;
577 spin_lock(&lp->lock);
579 j = lp->tx_old;
580 for (i = j; i != lp->tx_new; i = j) {
581 struct lance_tx_desc *td = &ib->btx_ring [i];
582 u8 bits = td->tmd1_bits;
584 /* If we hit a packet not owned by us, stop */
585 if (bits & LE_T1_OWN)
586 break;
588 if (bits & LE_T1_ERR) {
589 u16 status = td->misc;
591 lp->stats.tx_errors++;
592 if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
593 if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
595 if (status & LE_T3_CLOS) {
596 lp->stats.tx_carrier_errors++;
597 if (lp->auto_select) {
598 lp->tpe = 1 - lp->tpe;
599 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
600 dev->name, lp->tpe?"TPE":"AUI");
601 STOP_LANCE(lp);
602 lp->init_ring(dev);
603 load_csrs(lp);
604 init_restart_lance(lp);
605 goto out;
606 }
607 }
609 /* Buffer errors and underflows turn off the
610 * transmitter, restart the adapter.
611 */
612 if (status & (LE_T3_BUF|LE_T3_UFL)) {
613 lp->stats.tx_fifo_errors++;
615 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
616 dev->name);
617 STOP_LANCE(lp);
618 lp->init_ring(dev);
619 load_csrs(lp);
620 init_restart_lance(lp);
621 goto out;
622 }
623 } else if ((bits & LE_T1_POK) == LE_T1_POK) {
624 /*
625 * So we don't count the packet more than once.
626 */
627 td->tmd1_bits = bits & ~(LE_T1_POK);
629 /* One collision before packet was sent. */
630 if (bits & LE_T1_EONE)
631 lp->stats.collisions++;
633 /* More than one collision, be optimistic. */
634 if (bits & LE_T1_EMORE)
635 lp->stats.collisions += 2;
637 lp->stats.tx_packets++;
638 }
640 j = TX_NEXT(j);
641 }
642 lp->tx_old = j;
643 out:
644 if (netif_queue_stopped(dev) &&
645 TX_BUFFS_AVAIL > 0)
646 netif_wake_queue(dev);
648 spin_unlock(&lp->lock);
649 }
651 static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
652 {
653 u16 *p16 = (u16 *) skb->data;
654 u32 *p32;
655 u8 *p8;
656 void __iomem *pbuf = piobuf;
658 /* We know here that both src and dest are on a 16bit boundary. */
659 *p16++ = sbus_readw(pbuf);
660 p32 = (u32 *) p16;
661 pbuf += 2;
662 len -= 2;
664 while (len >= 4) {
665 *p32++ = sbus_readl(pbuf);
666 pbuf += 4;
667 len -= 4;
668 }
669 p8 = (u8 *) p32;
670 if (len >= 2) {
671 p16 = (u16 *) p32;
672 *p16++ = sbus_readw(pbuf);
673 pbuf += 2;
674 len -= 2;
675 p8 = (u8 *) p16;
676 }
677 if (len >= 1)
678 *p8 = sbus_readb(pbuf);
679 }
681 static void lance_rx_pio(struct net_device *dev)
682 {
683 struct lance_private *lp = netdev_priv(dev);
684 struct lance_init_block __iomem *ib = lp->init_block_iomem;
685 struct lance_rx_desc __iomem *rd;
686 unsigned char bits;
687 int len, entry;
688 struct sk_buff *skb;
690 entry = lp->rx_new;
691 for (rd = &ib->brx_ring [entry];
692 !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
693 rd = &ib->brx_ring [entry]) {
695 /* We got an incomplete frame? */
696 if ((bits & LE_R1_POK) != LE_R1_POK) {
697 lp->stats.rx_over_errors++;
698 lp->stats.rx_errors++;
699 } else if (bits & LE_R1_ERR) {
700 /* Count only the end frame as a rx error,
701 * not the beginning
702 */
703 if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
704 if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
705 if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
706 if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
707 if (bits & LE_R1_EOP) lp->stats.rx_errors++;
708 } else {
709 len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
710 skb = dev_alloc_skb(len + 2);
712 if (skb == NULL) {
713 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
714 dev->name);
715 lp->stats.rx_dropped++;
716 sbus_writew(0, &rd->mblength);
717 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
718 lp->rx_new = RX_NEXT(entry);
719 return;
720 }
722 lp->stats.rx_bytes += len;
724 skb->dev = dev;
725 skb_reserve (skb, 2); /* 16 byte align */
726 skb_put(skb, len); /* make room */
727 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
728 skb->protocol = eth_type_trans(skb, dev);
729 netif_rx(skb);
730 dev->last_rx = jiffies;
731 lp->stats.rx_packets++;
732 }
734 /* Return the packet to the pool */
735 sbus_writew(0, &rd->mblength);
736 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
737 entry = RX_NEXT(entry);
738 }
740 lp->rx_new = entry;
741 }
743 static void lance_tx_pio(struct net_device *dev)
744 {
745 struct lance_private *lp = netdev_priv(dev);
746 struct lance_init_block __iomem *ib = lp->init_block_iomem;
747 int i, j;
749 spin_lock(&lp->lock);
751 j = lp->tx_old;
752 for (i = j; i != lp->tx_new; i = j) {
753 struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
754 u8 bits = sbus_readb(&td->tmd1_bits);
756 /* If we hit a packet not owned by us, stop */
757 if (bits & LE_T1_OWN)
758 break;
760 if (bits & LE_T1_ERR) {
761 u16 status = sbus_readw(&td->misc);
763 lp->stats.tx_errors++;
764 if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
765 if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
767 if (status & LE_T3_CLOS) {
768 lp->stats.tx_carrier_errors++;
769 if (lp->auto_select) {
770 lp->tpe = 1 - lp->tpe;
771 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
772 dev->name, lp->tpe?"TPE":"AUI");
773 STOP_LANCE(lp);
774 lp->init_ring(dev);
775 load_csrs(lp);
776 init_restart_lance(lp);
777 goto out;
778 }
779 }
781 /* Buffer errors and underflows turn off the
782 * transmitter, restart the adapter.
783 */
784 if (status & (LE_T3_BUF|LE_T3_UFL)) {
785 lp->stats.tx_fifo_errors++;
787 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
788 dev->name);
789 STOP_LANCE(lp);
790 lp->init_ring(dev);
791 load_csrs(lp);
792 init_restart_lance(lp);
793 goto out;
794 }
795 } else if ((bits & LE_T1_POK) == LE_T1_POK) {
796 /*
797 * So we don't count the packet more than once.
798 */
799 sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
801 /* One collision before packet was sent. */
802 if (bits & LE_T1_EONE)
803 lp->stats.collisions++;
805 /* More than one collision, be optimistic. */
806 if (bits & LE_T1_EMORE)
807 lp->stats.collisions += 2;
809 lp->stats.tx_packets++;
810 }
812 j = TX_NEXT(j);
813 }
814 lp->tx_old = j;
816 if (netif_queue_stopped(dev) &&
817 TX_BUFFS_AVAIL > 0)
818 netif_wake_queue(dev);
819 out:
820 spin_unlock(&lp->lock);
821 }
823 static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs)
824 {
825 struct net_device *dev = (struct net_device *)dev_id;
826 struct lance_private *lp = netdev_priv(dev);
827 int csr0;
829 sbus_writew(LE_CSR0, lp->lregs + RAP);
830 csr0 = sbus_readw(lp->lregs + RDP);
832 /* Acknowledge all the interrupt sources ASAP */
833 sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
834 lp->lregs + RDP);
836 if ((csr0 & LE_C0_ERR) != 0) {
837 /* Clear the error condition */
838 sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
839 LE_C0_CERR | LE_C0_MERR),
840 lp->lregs + RDP);
841 }
843 if (csr0 & LE_C0_RINT)
844 lp->rx(dev);
846 if (csr0 & LE_C0_TINT)
847 lp->tx(dev);
849 if (csr0 & LE_C0_BABL)
850 lp->stats.tx_errors++;
852 if (csr0 & LE_C0_MISS)
853 lp->stats.rx_errors++;
855 if (csr0 & LE_C0_MERR) {
856 if (lp->dregs) {
857 u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
859 printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
860 dev->name, csr0, addr & 0xffffff);
861 } else {
862 printk(KERN_ERR "%s: Memory error, status %04x\n",
863 dev->name, csr0);
864 }
866 sbus_writew(LE_C0_STOP, lp->lregs + RDP);
868 if (lp->dregs) {
869 u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
871 dma_csr |= DMA_FIFO_INV;
872 sbus_writel(dma_csr, lp->dregs + DMA_CSR);
873 }
875 lp->init_ring(dev);
876 load_csrs(lp);
877 init_restart_lance(lp);
878 netif_wake_queue(dev);
879 }
881 sbus_writew(LE_C0_INEA, lp->lregs + RDP);
883 return IRQ_HANDLED;
884 }
886 /* Build a fake network packet and send it to ourselves. */
887 static void build_fake_packet(struct lance_private *lp)
888 {
889 struct net_device *dev = lp->dev;
890 int i, entry;
892 entry = lp->tx_new & TX_RING_MOD_MASK;
893 if (lp->pio_buffer) {
894 struct lance_init_block __iomem *ib = lp->init_block_iomem;
895 u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
896 struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
897 for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
898 sbus_writew(0, &packet[i]);
899 for (i = 0; i < 6; i++) {
900 sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
901 sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
902 }
903 sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
904 sbus_writew(0, &ib->btx_ring[entry].misc);
905 sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
906 } else {
907 struct lance_init_block *ib = lp->init_block_mem;
908 u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
909 struct ethhdr *eth = (struct ethhdr *) packet;
910 memset(packet, 0, ETH_ZLEN);
911 for (i = 0; i < 6; i++) {
912 eth->h_dest[i] = dev->dev_addr[i];
913 eth->h_source[i] = dev->dev_addr[i];
914 }
915 ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
916 ib->btx_ring[entry].misc = 0;
917 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
918 }
919 lp->tx_new = TX_NEXT(entry);
920 }
922 struct net_device *last_dev;
924 static int lance_open(struct net_device *dev)
925 {
926 struct lance_private *lp = netdev_priv(dev);
927 int status = 0;
929 last_dev = dev;
931 STOP_LANCE(lp);
933 if (request_irq(dev->irq, &lance_interrupt, IRQF_SHARED,
934 lancestr, (void *) dev)) {
935 printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
936 return -EAGAIN;
937 }
939 /* On the 4m, setup the ledma to provide the upper bits for buffers */
940 if (lp->dregs) {
941 u32 regval = lp->init_block_dvma & 0xff000000;
943 sbus_writel(regval, lp->dregs + DMA_TEST);
944 }
946 /* Set mode and clear multicast filter only at device open,
947 * so that lance_init_ring() called at any error will not
948 * forget multicast filters.
949 *
950 * BTW it is common bug in all lance drivers! --ANK
951 */
952 if (lp->pio_buffer) {
953 struct lance_init_block __iomem *ib = lp->init_block_iomem;
954 sbus_writew(0, &ib->mode);
955 sbus_writel(0, &ib->filter[0]);
956 sbus_writel(0, &ib->filter[1]);
957 } else {
958 struct lance_init_block *ib = lp->init_block_mem;
959 ib->mode = 0;
960 ib->filter [0] = 0;
961 ib->filter [1] = 0;
962 }
964 lp->init_ring(dev);
965 load_csrs(lp);
967 netif_start_queue(dev);
969 status = init_restart_lance(lp);
970 if (!status && lp->auto_select) {
971 build_fake_packet(lp);
972 sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
973 }
975 return status;
976 }
978 static int lance_close(struct net_device *dev)
979 {
980 struct lance_private *lp = netdev_priv(dev);
982 netif_stop_queue(dev);
983 del_timer_sync(&lp->multicast_timer);
985 STOP_LANCE(lp);
987 free_irq(dev->irq, (void *) dev);
988 return 0;
989 }
991 static int lance_reset(struct net_device *dev)
992 {
993 struct lance_private *lp = netdev_priv(dev);
994 int status;
996 STOP_LANCE(lp);
998 /* On the 4m, reset the dma too */
999 if (lp->dregs) {
1000 u32 csr, addr;
1002 printk(KERN_ERR "resetting ledma\n");
1003 csr = sbus_readl(lp->dregs + DMA_CSR);
1004 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
1005 udelay(200);
1006 sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1008 addr = lp->init_block_dvma & 0xff000000;
1009 sbus_writel(addr, lp->dregs + DMA_TEST);
1011 lp->init_ring(dev);
1012 load_csrs(lp);
1013 dev->trans_start = jiffies;
1014 status = init_restart_lance(lp);
1015 return status;
1018 static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
1020 void __iomem *piobuf = dest;
1021 u32 *p32;
1022 u16 *p16;
1023 u8 *p8;
1025 switch ((unsigned long)src & 0x3) {
1026 case 0:
1027 p32 = (u32 *) src;
1028 while (len >= 4) {
1029 sbus_writel(*p32, piobuf);
1030 p32++;
1031 piobuf += 4;
1032 len -= 4;
1034 src = (char *) p32;
1035 break;
1036 case 1:
1037 case 3:
1038 p8 = (u8 *) src;
1039 while (len >= 4) {
1040 u32 val;
1042 val = p8[0] << 24;
1043 val |= p8[1] << 16;
1044 val |= p8[2] << 8;
1045 val |= p8[3];
1046 sbus_writel(val, piobuf);
1047 p8 += 4;
1048 piobuf += 4;
1049 len -= 4;
1051 src = (char *) p8;
1052 break;
1053 case 2:
1054 p16 = (u16 *) src;
1055 while (len >= 4) {
1056 u32 val = p16[0]<<16 | p16[1];
1057 sbus_writel(val, piobuf);
1058 p16 += 2;
1059 piobuf += 4;
1060 len -= 4;
1062 src = (char *) p16;
1063 break;
1064 };
1065 if (len >= 2) {
1066 u16 val = src[0] << 8 | src[1];
1067 sbus_writew(val, piobuf);
1068 src += 2;
1069 piobuf += 2;
1070 len -= 2;
1072 if (len >= 1)
1073 sbus_writeb(src[0], piobuf);
1076 static void lance_piozero(void __iomem *dest, int len)
1078 void __iomem *piobuf = dest;
1080 if ((unsigned long)piobuf & 1) {
1081 sbus_writeb(0, piobuf);
1082 piobuf += 1;
1083 len -= 1;
1084 if (len == 0)
1085 return;
1087 if (len == 1) {
1088 sbus_writeb(0, piobuf);
1089 return;
1091 if ((unsigned long)piobuf & 2) {
1092 sbus_writew(0, piobuf);
1093 piobuf += 2;
1094 len -= 2;
1095 if (len == 0)
1096 return;
1098 while (len >= 4) {
1099 sbus_writel(0, piobuf);
1100 piobuf += 4;
1101 len -= 4;
1103 if (len >= 2) {
1104 sbus_writew(0, piobuf);
1105 piobuf += 2;
1106 len -= 2;
1108 if (len >= 1)
1109 sbus_writeb(0, piobuf);
1112 static void lance_tx_timeout(struct net_device *dev)
1114 struct lance_private *lp = netdev_priv(dev);
1116 printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
1117 dev->name, sbus_readw(lp->lregs + RDP));
1118 lance_reset(dev);
1119 netif_wake_queue(dev);
1122 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1124 struct lance_private *lp = netdev_priv(dev);
1125 int entry, skblen, len;
1127 skblen = skb->len;
1129 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
1131 spin_lock_irq(&lp->lock);
1133 lp->stats.tx_bytes += len;
1135 entry = lp->tx_new & TX_RING_MOD_MASK;
1136 if (lp->pio_buffer) {
1137 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1138 sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
1139 sbus_writew(0, &ib->btx_ring[entry].misc);
1140 lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
1141 if (len != skblen)
1142 lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
1143 sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
1144 } else {
1145 struct lance_init_block *ib = lp->init_block_mem;
1146 ib->btx_ring [entry].length = (-len) | 0xf000;
1147 ib->btx_ring [entry].misc = 0;
1148 memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen);
1149 if (len != skblen)
1150 memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
1151 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
1154 lp->tx_new = TX_NEXT(entry);
1156 if (TX_BUFFS_AVAIL <= 0)
1157 netif_stop_queue(dev);
1159 /* Kick the lance: transmit now */
1160 sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
1162 /* Read back CSR to invalidate the E-Cache.
1163 * This is needed, because DMA_DSBL_WR_INV is set.
1164 */
1165 if (lp->dregs)
1166 sbus_readw(lp->lregs + RDP);
1168 spin_unlock_irq(&lp->lock);
1170 dev->trans_start = jiffies;
1171 dev_kfree_skb(skb);
1173 return 0;
1176 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1178 struct lance_private *lp = netdev_priv(dev);
1180 return &lp->stats;
1183 /* taken from the depca driver */
1184 static void lance_load_multicast(struct net_device *dev)
1186 struct lance_private *lp = netdev_priv(dev);
1187 struct dev_mc_list *dmi = dev->mc_list;
1188 char *addrs;
1189 int i;
1190 u32 crc;
1191 u32 val;
1193 /* set all multicast bits */
1194 if (dev->flags & IFF_ALLMULTI)
1195 val = ~0;
1196 else
1197 val = 0;
1199 if (lp->pio_buffer) {
1200 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1201 sbus_writel(val, &ib->filter[0]);
1202 sbus_writel(val, &ib->filter[1]);
1203 } else {
1204 struct lance_init_block *ib = lp->init_block_mem;
1205 ib->filter [0] = val;
1206 ib->filter [1] = val;
1209 if (dev->flags & IFF_ALLMULTI)
1210 return;
1212 /* Add addresses */
1213 for (i = 0; i < dev->mc_count; i++) {
1214 addrs = dmi->dmi_addr;
1215 dmi = dmi->next;
1217 /* multicast address? */
1218 if (!(*addrs & 1))
1219 continue;
1220 crc = ether_crc_le(6, addrs);
1221 crc = crc >> 26;
1222 if (lp->pio_buffer) {
1223 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1224 u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
1225 u16 tmp = sbus_readw(&mcast_table[crc>>4]);
1226 tmp |= 1 << (crc & 0xf);
1227 sbus_writew(tmp, &mcast_table[crc>>4]);
1228 } else {
1229 struct lance_init_block *ib = lp->init_block_mem;
1230 u16 *mcast_table = (u16 *) &ib->filter;
1231 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
1236 static void lance_set_multicast(struct net_device *dev)
1238 struct lance_private *lp = netdev_priv(dev);
1239 struct lance_init_block *ib_mem = lp->init_block_mem;
1240 struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
1241 u16 mode;
1243 if (!netif_running(dev))
1244 return;
1246 if (lp->tx_old != lp->tx_new) {
1247 mod_timer(&lp->multicast_timer, jiffies + 4);
1248 netif_wake_queue(dev);
1249 return;
1252 netif_stop_queue(dev);
1254 STOP_LANCE(lp);
1255 lp->init_ring(dev);
1257 if (lp->pio_buffer)
1258 mode = sbus_readw(&ib_iomem->mode);
1259 else
1260 mode = ib_mem->mode;
1261 if (dev->flags & IFF_PROMISC) {
1262 mode |= LE_MO_PROM;
1263 if (lp->pio_buffer)
1264 sbus_writew(mode, &ib_iomem->mode);
1265 else
1266 ib_mem->mode = mode;
1267 } else {
1268 mode &= ~LE_MO_PROM;
1269 if (lp->pio_buffer)
1270 sbus_writew(mode, &ib_iomem->mode);
1271 else
1272 ib_mem->mode = mode;
1273 lance_load_multicast(dev);
1275 load_csrs(lp);
1276 init_restart_lance(lp);
1277 netif_wake_queue(dev);
1280 static void lance_set_multicast_retry(unsigned long _opaque)
1282 struct net_device *dev = (struct net_device *) _opaque;
1284 lance_set_multicast(dev);
1287 static void lance_free_hwresources(struct lance_private *lp)
1289 if (lp->lregs)
1290 sbus_iounmap(lp->lregs, LANCE_REG_SIZE);
1291 if (lp->init_block_iomem) {
1292 sbus_iounmap(lp->init_block_iomem,
1293 sizeof(struct lance_init_block));
1294 } else if (lp->init_block_mem) {
1295 sbus_free_consistent(lp->sdev,
1296 sizeof(struct lance_init_block),
1297 lp->init_block_mem,
1298 lp->init_block_dvma);
1302 /* Ethtool support... */
1303 static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1305 struct lance_private *lp = netdev_priv(dev);
1307 strcpy(info->driver, "sunlance");
1308 strcpy(info->version, "2.02");
1309 sprintf(info->bus_info, "SBUS:%d",
1310 lp->sdev->slot);
1313 static u32 sparc_lance_get_link(struct net_device *dev)
1315 /* We really do not keep track of this, but this
1316 * is better than not reporting anything at all.
1317 */
1318 return 1;
1321 static struct ethtool_ops sparc_lance_ethtool_ops = {
1322 .get_drvinfo = sparc_lance_get_drvinfo,
1323 .get_link = sparc_lance_get_link,
1324 };
1326 static int __init sparc_lance_probe_one(struct sbus_dev *sdev,
1327 struct sbus_dma *ledma,
1328 struct sbus_dev *lebuffer)
1330 static unsigned version_printed;
1331 struct net_device *dev;
1332 struct lance_private *lp;
1333 int i;
1335 dev = alloc_etherdev(sizeof(struct lance_private) + 8);
1336 if (!dev)
1337 return -ENOMEM;
1339 lp = netdev_priv(dev);
1340 memset(lp, 0, sizeof(*lp));
1342 if (sparc_lance_debug && version_printed++ == 0)
1343 printk (KERN_INFO "%s", version);
1345 spin_lock_init(&lp->lock);
1347 /* Copy the IDPROM ethernet address to the device structure, later we
1348 * will copy the address in the device structure to the lance
1349 * initialization block.
1350 */
1351 for (i = 0; i < 6; i++)
1352 dev->dev_addr[i] = idprom->id_ethaddr[i];
1354 /* Get the IO region */
1355 lp->lregs = sbus_ioremap(&sdev->resource[0], 0,
1356 LANCE_REG_SIZE, lancestr);
1357 if (!lp->lregs) {
1358 printk(KERN_ERR "SunLance: Cannot map registers.\n");
1359 goto fail;
1362 lp->sdev = sdev;
1363 if (lebuffer) {
1364 /* sanity check */
1365 if (lebuffer->resource[0].start & 7) {
1366 printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
1367 goto fail;
1369 lp->init_block_iomem =
1370 sbus_ioremap(&lebuffer->resource[0], 0,
1371 sizeof(struct lance_init_block), "lebuffer");
1372 if (!lp->init_block_iomem) {
1373 printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
1374 goto fail;
1376 lp->init_block_dvma = 0;
1377 lp->pio_buffer = 1;
1378 lp->init_ring = lance_init_ring_pio;
1379 lp->rx = lance_rx_pio;
1380 lp->tx = lance_tx_pio;
1381 } else {
1382 lp->init_block_mem =
1383 sbus_alloc_consistent(sdev, sizeof(struct lance_init_block),
1384 &lp->init_block_dvma);
1385 if (!lp->init_block_mem || lp->init_block_dvma == 0) {
1386 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
1387 goto fail;
1389 lp->pio_buffer = 0;
1390 lp->init_ring = lance_init_ring_dvma;
1391 lp->rx = lance_rx_dvma;
1392 lp->tx = lance_tx_dvma;
1394 lp->busmaster_regval = prom_getintdefault(sdev->prom_node,
1395 "busmaster-regval",
1396 (LE_C3_BSWP | LE_C3_ACON |
1397 LE_C3_BCON));
1399 lp->name = lancestr;
1400 lp->ledma = ledma;
1402 lp->burst_sizes = 0;
1403 if (lp->ledma) {
1404 char prop[6];
1405 unsigned int sbmask;
1406 u32 csr;
1408 /* Find burst-size property for ledma */
1409 lp->burst_sizes = prom_getintdefault(ledma->sdev->prom_node,
1410 "burst-sizes", 0);
1412 /* ledma may be capable of fast bursts, but sbus may not. */
1413 sbmask = prom_getintdefault(ledma->sdev->bus->prom_node,
1414 "burst-sizes", DMA_BURSTBITS);
1415 lp->burst_sizes &= sbmask;
1417 /* Get the cable-selection property */
1418 memset(prop, 0, sizeof(prop));
1419 prom_getstring(ledma->sdev->prom_node, "cable-selection",
1420 prop, sizeof(prop));
1421 if (prop[0] == 0) {
1422 int topnd, nd;
1424 printk(KERN_INFO "SunLance: using auto-carrier-detection.\n");
1426 /* Is this found at /options .attributes in all
1427 * Prom versions? XXX
1428 */
1429 topnd = prom_getchild(prom_root_node);
1431 nd = prom_searchsiblings(topnd, "options");
1432 if (!nd)
1433 goto no_link_test;
1435 if (!prom_node_has_property(nd, "tpe-link-test?"))
1436 goto no_link_test;
1438 memset(prop, 0, sizeof(prop));
1439 prom_getstring(nd, "tpe-link-test?", prop,
1440 sizeof(prop));
1442 if (strcmp(prop, "true")) {
1443 printk(KERN_NOTICE "SunLance: warning: overriding option "
1444 "'tpe-link-test?'\n");
1445 printk(KERN_NOTICE "SunLance: warning: mail any problems "
1446 "to ecd@skynet.be\n");
1447 auxio_set_lte(AUXIO_LTE_ON);
1449 no_link_test:
1450 lp->auto_select = 1;
1451 lp->tpe = 0;
1452 } else if (!strcmp(prop, "aui")) {
1453 lp->auto_select = 0;
1454 lp->tpe = 0;
1455 } else {
1456 lp->auto_select = 0;
1457 lp->tpe = 1;
1460 lp->dregs = ledma->regs;
1462 /* Reset ledma */
1463 csr = sbus_readl(lp->dregs + DMA_CSR);
1464 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
1465 udelay(200);
1466 sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1467 } else
1468 lp->dregs = NULL;
1470 lp->dev = dev;
1471 SET_MODULE_OWNER(dev);
1472 SET_NETDEV_DEV(dev, &sdev->ofdev.dev);
1473 dev->open = &lance_open;
1474 dev->stop = &lance_close;
1475 dev->hard_start_xmit = &lance_start_xmit;
1476 dev->tx_timeout = &lance_tx_timeout;
1477 dev->watchdog_timeo = 5*HZ;
1478 dev->get_stats = &lance_get_stats;
1479 dev->set_multicast_list = &lance_set_multicast;
1480 dev->ethtool_ops = &sparc_lance_ethtool_ops;
1482 dev->irq = sdev->irqs[0];
1484 dev->dma = 0;
1486 /* We cannot sleep if the chip is busy during a
1487 * multicast list update event, because such events
1488 * can occur from interrupts (ex. IPv6). So we
1489 * use a timer to try again later when necessary. -DaveM
1490 */
1491 init_timer(&lp->multicast_timer);
1492 lp->multicast_timer.data = (unsigned long) dev;
1493 lp->multicast_timer.function = &lance_set_multicast_retry;
1495 if (register_netdev(dev)) {
1496 printk(KERN_ERR "SunLance: Cannot register device.\n");
1497 goto fail;
1500 dev_set_drvdata(&sdev->ofdev.dev, lp);
1502 printk(KERN_INFO "%s: LANCE ", dev->name);
1504 for (i = 0; i < 6; i++)
1505 printk("%2.2x%c", dev->dev_addr[i],
1506 i == 5 ? ' ': ':');
1507 printk("\n");
1509 return 0;
1511 fail:
1512 lance_free_hwresources(lp);
1513 free_netdev(dev);
1514 return -ENODEV;
1517 /* On 4m, find the associated dma for the lance chip */
1518 static inline struct sbus_dma *find_ledma(struct sbus_dev *sdev)
1520 struct sbus_dma *p;
1522 for_each_dvma(p) {
1523 if (p->sdev == sdev)
1524 return p;
1526 return NULL;
1529 #ifdef CONFIG_SUN4
1531 #include <asm/sun4paddr.h>
1532 #include <asm/machines.h>
1534 /* Find all the lance cards on the system and initialize them */
1535 static struct sbus_dev sun4_sdev;
1536 static int __init sparc_lance_init(void)
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
1545 return -ENODEV;
1548 static int __exit sunlance_sun4_remove(void)
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev;
1553 unregister_netdevice(net_dev);
1555 lance_free_hwresources(lp);
1557 free_netdev(net_dev);
1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1561 return 0;
1564 #else /* !CONFIG_SUN4 */
1566 static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1568 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1569 int err;
1571 if (sdev->parent) {
1572 struct of_device *parent = &sdev->parent->ofdev;
1574 if (!strcmp(parent->node->name, "ledma")) {
1575 struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
1577 err = sparc_lance_probe_one(sdev, ledma, NULL);
1578 } else if (!strcmp(parent->node->name, "lebuffer")) {
1579 err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
1580 } else
1581 err = sparc_lance_probe_one(sdev, NULL, NULL);
1582 } else
1583 err = sparc_lance_probe_one(sdev, NULL, NULL);
1585 return err;
1588 static int __devexit sunlance_sbus_remove(struct of_device *dev)
1590 struct lance_private *lp = dev_get_drvdata(&dev->dev);
1591 struct net_device *net_dev = lp->dev;
1593 unregister_netdevice(net_dev);
1595 lance_free_hwresources(lp);
1597 free_netdev(net_dev);
1599 dev_set_drvdata(&dev->dev, NULL);
1601 return 0;
1604 static struct of_device_id sunlance_sbus_match[] = {
1606 .name = "le",
1607 },
1608 {},
1609 };
1611 MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
1613 static struct of_platform_driver sunlance_sbus_driver = {
1614 .name = "sunlance",
1615 .match_table = sunlance_sbus_match,
1616 .probe = sunlance_sbus_probe,
1617 .remove = __devexit_p(sunlance_sbus_remove),
1618 };
1621 /* Find all the lance cards on the system and initialize them */
1622 static int __init sparc_lance_init(void)
1624 return of_register_driver(&sunlance_sbus_driver, &sbus_bus_type);
1626 #endif /* !CONFIG_SUN4 */
1628 static void __exit sparc_lance_exit(void)
1630 #ifdef CONFIG_SUN4
1631 sunlance_sun4_remove();
1632 #else
1633 of_unregister_driver(&sunlance_sbus_driver);
1634 #endif
1637 module_init(sparc_lance_init);
1638 module_exit(sparc_lance_exit);