ia64/linux-2.6.18-xen.hg

view drivers/net/7990.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * 7990.c -- LANCE ethernet IC generic routines.
3 * This is an attempt to separate out the bits of various ethernet
4 * drivers that are common because they all use the AMD 7990 LANCE
5 * (Local Area Network Controller for Ethernet) chip.
6 *
7 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
8 *
9 * Most of this stuff was obtained by looking at other LANCE drivers,
10 * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
11 * NB: this was made easy by the fact that Jes Sorensen had cleaned up
12 * most of a2025 and sunlance with the aim of merging them, so the
13 * common code was pretty obvious.
14 */
15 #include <linux/crc32.h>
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/fcntl.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/in.h>
28 #include <linux/route.h>
29 #include <linux/slab.h>
30 #include <linux/string.h>
31 #include <linux/skbuff.h>
32 #include <asm/irq.h>
33 /* Used for the temporal inet entries and routing */
34 #include <linux/socket.h>
35 #include <linux/bitops.h>
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/dma.h>
40 #include <asm/pgtable.h>
41 #ifdef CONFIG_HP300
42 #include <asm/blinken.h>
43 #endif
45 #include "7990.h"
47 #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
48 #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
49 #define READRDP(lp) in_be16(lp->base + LANCE_RDP)
51 #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
52 #include "hplance.h"
54 #undef WRITERAP
55 #undef WRITERDP
56 #undef READRDP
58 #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
60 /* Lossage Factor Nine, Mr Sulu. */
61 #define WRITERAP(lp,x) (lp->writerap(lp,x))
62 #define WRITERDP(lp,x) (lp->writerdp(lp,x))
63 #define READRDP(lp) (lp->readrdp(lp))
65 #else
67 /* These inlines can be used if only CONFIG_HPLANCE is defined */
68 static inline void WRITERAP(struct lance_private *lp, __u16 value)
69 {
70 do {
71 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
72 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
73 }
75 static inline void WRITERDP(struct lance_private *lp, __u16 value)
76 {
77 do {
78 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
79 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
80 }
82 static inline __u16 READRDP(struct lance_private *lp)
83 {
84 __u16 value;
85 do {
86 value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
87 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
88 return value;
89 }
91 #endif
92 #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
94 /* debugging output macros, various flavours */
95 /* #define TEST_HITS */
96 #ifdef UNDEF
97 #define PRINT_RINGS() \
98 do { \
99 int t; \
100 for (t=0; t < RX_RING_SIZE; t++) { \
101 printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
102 t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
103 ib->brx_ring[t].length,\
104 ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
105 }\
106 for (t=0; t < TX_RING_SIZE; t++) { \
107 printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
108 t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
109 ib->btx_ring[t].length,\
110 ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
111 }\
112 } while (0)
113 #else
114 #define PRINT_RINGS()
115 #endif
117 /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
118 static void load_csrs (struct lance_private *lp)
119 {
120 volatile struct lance_init_block *aib = lp->lance_init_block;
121 int leptr;
123 leptr = LANCE_ADDR (aib);
125 WRITERAP(lp, LE_CSR1); /* load address of init block */
126 WRITERDP(lp, leptr & 0xFFFF);
127 WRITERAP(lp, LE_CSR2);
128 WRITERDP(lp, leptr >> 16);
129 WRITERAP(lp, LE_CSR3);
130 WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
132 /* Point back to csr0 */
133 WRITERAP(lp, LE_CSR0);
134 }
136 /* #define to 0 or 1 appropriately */
137 #define DEBUG_IRING 0
138 /* Set up the Lance Rx and Tx rings and the init block */
139 static void lance_init_ring (struct net_device *dev)
140 {
141 struct lance_private *lp = netdev_priv(dev);
142 volatile struct lance_init_block *ib = lp->init_block;
143 volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
144 int leptr;
145 int i;
147 aib = lp->lance_init_block;
149 lp->rx_new = lp->tx_new = 0;
150 lp->rx_old = lp->tx_old = 0;
152 ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
154 /* Copy the ethernet address to the lance init block
155 * Notice that we do a byteswap if we're big endian.
156 * [I think this is the right criterion; at least, sunlance,
157 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
158 * However, the datasheet says that the BSWAP bit doesn't affect
159 * the init block, so surely it should be low byte first for
160 * everybody? Um.]
161 * We could define the ib->physaddr as three 16bit values and
162 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
163 */
164 #ifdef __BIG_ENDIAN
165 ib->phys_addr [0] = dev->dev_addr [1];
166 ib->phys_addr [1] = dev->dev_addr [0];
167 ib->phys_addr [2] = dev->dev_addr [3];
168 ib->phys_addr [3] = dev->dev_addr [2];
169 ib->phys_addr [4] = dev->dev_addr [5];
170 ib->phys_addr [5] = dev->dev_addr [4];
171 #else
172 for (i=0; i<6; i++)
173 ib->phys_addr[i] = dev->dev_addr[i];
174 #endif
176 if (DEBUG_IRING)
177 printk ("TX rings:\n");
179 lp->tx_full = 0;
180 /* Setup the Tx ring entries */
181 for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
182 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
183 ib->btx_ring [i].tmd0 = leptr;
184 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
185 ib->btx_ring [i].tmd1_bits = 0;
186 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
187 ib->btx_ring [i].misc = 0;
188 if (DEBUG_IRING)
189 printk ("%d: 0x%8.8x\n", i, leptr);
190 }
192 /* Setup the Rx ring entries */
193 if (DEBUG_IRING)
194 printk ("RX rings:\n");
195 for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
196 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
198 ib->brx_ring [i].rmd0 = leptr;
199 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
200 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
201 /* 0xf000 == bits that must be one (reserved, presumably) */
202 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
203 ib->brx_ring [i].mblength = 0;
204 if (DEBUG_IRING)
205 printk ("%d: 0x%8.8x\n", i, leptr);
206 }
208 /* Setup the initialization block */
210 /* Setup rx descriptor pointer */
211 leptr = LANCE_ADDR(&aib->brx_ring);
212 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
213 ib->rx_ptr = leptr;
214 if (DEBUG_IRING)
215 printk ("RX ptr: %8.8x\n", leptr);
217 /* Setup tx descriptor pointer */
218 leptr = LANCE_ADDR(&aib->btx_ring);
219 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
220 ib->tx_ptr = leptr;
221 if (DEBUG_IRING)
222 printk ("TX ptr: %8.8x\n", leptr);
224 /* Clear the multicast filter */
225 ib->filter [0] = 0;
226 ib->filter [1] = 0;
227 PRINT_RINGS();
228 }
230 /* LANCE must be STOPped before we do this, too... */
231 static int init_restart_lance (struct lance_private *lp)
232 {
233 int i;
235 WRITERAP(lp, LE_CSR0);
236 WRITERDP(lp, LE_C0_INIT);
238 /* Need a hook here for sunlance ledma stuff */
240 /* Wait for the lance to complete initialization */
241 for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
242 barrier();
243 if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
244 printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
245 return -1;
246 }
248 /* Clear IDON by writing a "1", enable interrupts and start lance */
249 WRITERDP(lp, LE_C0_IDON);
250 WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
252 return 0;
253 }
255 static int lance_reset (struct net_device *dev)
256 {
257 struct lance_private *lp = netdev_priv(dev);
258 int status;
260 /* Stop the lance */
261 WRITERAP(lp, LE_CSR0);
262 WRITERDP(lp, LE_C0_STOP);
264 load_csrs (lp);
265 lance_init_ring (dev);
266 dev->trans_start = jiffies;
267 status = init_restart_lance (lp);
268 #ifdef DEBUG_DRIVER
269 printk ("Lance restart=%d\n", status);
270 #endif
271 return status;
272 }
274 static int lance_rx (struct net_device *dev)
275 {
276 struct lance_private *lp = netdev_priv(dev);
277 volatile struct lance_init_block *ib = lp->init_block;
278 volatile struct lance_rx_desc *rd;
279 unsigned char bits;
280 int len = 0; /* XXX shut up gcc warnings */
281 struct sk_buff *skb = 0; /* XXX shut up gcc warnings */
282 #ifdef TEST_HITS
283 int i;
284 #endif
286 #ifdef TEST_HITS
287 printk ("[");
288 for (i = 0; i < RX_RING_SIZE; i++) {
289 if (i == lp->rx_new)
290 printk ("%s",
291 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
292 else
293 printk ("%s",
294 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
295 }
296 printk ("]");
297 #endif
298 #ifdef CONFIG_HP300
299 blinken_leds(0x40, 0);
300 #endif
301 WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
302 for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
303 !((bits = rd->rmd1_bits) & LE_R1_OWN);
304 rd = &ib->brx_ring [lp->rx_new]) {
306 /* We got an incomplete frame? */
307 if ((bits & LE_R1_POK) != LE_R1_POK) {
308 lp->stats.rx_over_errors++;
309 lp->stats.rx_errors++;
310 continue;
311 } else if (bits & LE_R1_ERR) {
312 /* Count only the end frame as a rx error,
313 * not the beginning
314 */
315 if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
316 if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
317 if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
318 if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
319 if (bits & LE_R1_EOP) lp->stats.rx_errors++;
320 } else {
321 len = (rd->mblength & 0xfff) - 4;
322 skb = dev_alloc_skb (len+2);
324 if (skb == 0) {
325 printk ("%s: Memory squeeze, deferring packet.\n",
326 dev->name);
327 lp->stats.rx_dropped++;
328 rd->mblength = 0;
329 rd->rmd1_bits = LE_R1_OWN;
330 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
331 return 0;
332 }
334 skb->dev = dev;
335 skb_reserve (skb, 2); /* 16 byte align */
336 skb_put (skb, len); /* make room */
337 eth_copy_and_sum(skb,
338 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
339 len, 0);
340 skb->protocol = eth_type_trans (skb, dev);
341 netif_rx (skb);
342 dev->last_rx = jiffies;
343 lp->stats.rx_packets++;
344 lp->stats.rx_bytes += len;
345 }
347 /* Return the packet to the pool */
348 rd->mblength = 0;
349 rd->rmd1_bits = LE_R1_OWN;
350 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
351 }
352 return 0;
353 }
355 static int lance_tx (struct net_device *dev)
356 {
357 struct lance_private *lp = netdev_priv(dev);
358 volatile struct lance_init_block *ib = lp->init_block;
359 volatile struct lance_tx_desc *td;
360 int i, j;
361 int status;
363 #ifdef CONFIG_HP300
364 blinken_leds(0x80, 0);
365 #endif
366 /* csr0 is 2f3 */
367 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
368 /* csr0 is 73 */
370 j = lp->tx_old;
371 for (i = j; i != lp->tx_new; i = j) {
372 td = &ib->btx_ring [i];
374 /* If we hit a packet not owned by us, stop */
375 if (td->tmd1_bits & LE_T1_OWN)
376 break;
378 if (td->tmd1_bits & LE_T1_ERR) {
379 status = td->misc;
381 lp->stats.tx_errors++;
382 if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
383 if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
385 if (status & LE_T3_CLOS) {
386 lp->stats.tx_carrier_errors++;
387 if (lp->auto_select) {
388 lp->tpe = 1 - lp->tpe;
389 printk("%s: Carrier Lost, trying %s\n",
390 dev->name, lp->tpe?"TPE":"AUI");
391 /* Stop the lance */
392 WRITERAP(lp, LE_CSR0);
393 WRITERDP(lp, LE_C0_STOP);
394 lance_init_ring (dev);
395 load_csrs (lp);
396 init_restart_lance (lp);
397 return 0;
398 }
399 }
401 /* buffer errors and underflows turn off the transmitter */
402 /* Restart the adapter */
403 if (status & (LE_T3_BUF|LE_T3_UFL)) {
404 lp->stats.tx_fifo_errors++;
406 printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
407 dev->name);
408 /* Stop the lance */
409 WRITERAP(lp, LE_CSR0);
410 WRITERDP(lp, LE_C0_STOP);
411 lance_init_ring (dev);
412 load_csrs (lp);
413 init_restart_lance (lp);
414 return 0;
415 }
416 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
417 /*
418 * So we don't count the packet more than once.
419 */
420 td->tmd1_bits &= ~(LE_T1_POK);
422 /* One collision before packet was sent. */
423 if (td->tmd1_bits & LE_T1_EONE)
424 lp->stats.collisions++;
426 /* More than one collision, be optimistic. */
427 if (td->tmd1_bits & LE_T1_EMORE)
428 lp->stats.collisions += 2;
430 lp->stats.tx_packets++;
431 }
433 j = (j + 1) & lp->tx_ring_mod_mask;
434 }
435 lp->tx_old = j;
436 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
437 return 0;
438 }
440 static irqreturn_t
441 lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
442 {
443 struct net_device *dev = (struct net_device *)dev_id;
444 struct lance_private *lp = netdev_priv(dev);
445 int csr0;
447 spin_lock (&lp->devlock);
449 WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
450 csr0 = READRDP(lp);
452 PRINT_RINGS();
454 if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
455 spin_unlock (&lp->devlock);
456 return IRQ_NONE; /* been generated by the Lance. */
457 }
459 /* Acknowledge all the interrupt sources ASAP */
460 WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
462 if ((csr0 & LE_C0_ERR)) {
463 /* Clear the error condition */
464 WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
465 }
467 if (csr0 & LE_C0_RINT)
468 lance_rx (dev);
470 if (csr0 & LE_C0_TINT)
471 lance_tx (dev);
473 /* Log misc errors. */
474 if (csr0 & LE_C0_BABL)
475 lp->stats.tx_errors++; /* Tx babble. */
476 if (csr0 & LE_C0_MISS)
477 lp->stats.rx_errors++; /* Missed a Rx frame. */
478 if (csr0 & LE_C0_MERR) {
479 printk("%s: Bus master arbitration failure, status %4.4x.\n",
480 dev->name, csr0);
481 /* Restart the chip. */
482 WRITERDP(lp, LE_C0_STRT);
483 }
485 if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
486 lp->tx_full = 0;
487 netif_wake_queue (dev);
488 }
490 WRITERAP(lp, LE_CSR0);
491 WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
493 spin_unlock (&lp->devlock);
494 return IRQ_HANDLED;
495 }
497 int lance_open (struct net_device *dev)
498 {
499 struct lance_private *lp = netdev_priv(dev);
500 int res;
502 /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
503 if (request_irq(lp->irq, lance_interrupt, 0, lp->name, dev))
504 return -EAGAIN;
506 res = lance_reset(dev);
507 spin_lock_init(&lp->devlock);
508 netif_start_queue (dev);
510 return res;
511 }
513 int lance_close (struct net_device *dev)
514 {
515 struct lance_private *lp = netdev_priv(dev);
517 netif_stop_queue (dev);
519 /* Stop the LANCE */
520 WRITERAP(lp, LE_CSR0);
521 WRITERDP(lp, LE_C0_STOP);
523 free_irq(lp->irq, dev);
525 return 0;
526 }
528 void lance_tx_timeout(struct net_device *dev)
529 {
530 printk("lance_tx_timeout\n");
531 lance_reset(dev);
532 dev->trans_start = jiffies;
533 netif_wake_queue (dev);
534 }
537 int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
538 {
539 struct lance_private *lp = netdev_priv(dev);
540 volatile struct lance_init_block *ib = lp->init_block;
541 int entry, skblen, len;
542 static int outs;
543 unsigned long flags;
545 if (!TX_BUFFS_AVAIL)
546 return -1;
548 netif_stop_queue (dev);
550 skblen = skb->len;
552 #ifdef DEBUG_DRIVER
553 /* dump the packet */
554 {
555 int i;
557 for (i = 0; i < 64; i++) {
558 if ((i % 16) == 0)
559 printk ("\n");
560 printk ("%2.2x ", skb->data [i]);
561 }
562 }
563 #endif
564 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
565 entry = lp->tx_new & lp->tx_ring_mod_mask;
566 ib->btx_ring [entry].length = (-len) | 0xf000;
567 ib->btx_ring [entry].misc = 0;
569 if (skb->len < ETH_ZLEN)
570 memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
571 memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
573 /* Now, give the packet to the lance */
574 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
575 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
577 outs++;
578 /* Kick the lance: transmit now */
579 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
580 dev->trans_start = jiffies;
581 dev_kfree_skb (skb);
583 spin_lock_irqsave (&lp->devlock, flags);
584 if (TX_BUFFS_AVAIL)
585 netif_start_queue (dev);
586 else
587 lp->tx_full = 1;
588 spin_unlock_irqrestore (&lp->devlock, flags);
590 return 0;
591 }
593 struct net_device_stats *lance_get_stats (struct net_device *dev)
594 {
595 struct lance_private *lp = netdev_priv(dev);
597 return &lp->stats;
598 }
600 /* taken from the depca driver via a2065.c */
601 static void lance_load_multicast (struct net_device *dev)
602 {
603 struct lance_private *lp = netdev_priv(dev);
604 volatile struct lance_init_block *ib = lp->init_block;
605 volatile u16 *mcast_table = (u16 *)&ib->filter;
606 struct dev_mc_list *dmi=dev->mc_list;
607 char *addrs;
608 int i;
609 u32 crc;
611 /* set all multicast bits */
612 if (dev->flags & IFF_ALLMULTI){
613 ib->filter [0] = 0xffffffff;
614 ib->filter [1] = 0xffffffff;
615 return;
616 }
617 /* clear the multicast filter */
618 ib->filter [0] = 0;
619 ib->filter [1] = 0;
621 /* Add addresses */
622 for (i = 0; i < dev->mc_count; i++){
623 addrs = dmi->dmi_addr;
624 dmi = dmi->next;
626 /* multicast address? */
627 if (!(*addrs & 1))
628 continue;
630 crc = ether_crc_le(6, addrs);
631 crc = crc >> 26;
632 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
633 }
634 return;
635 }
638 void lance_set_multicast (struct net_device *dev)
639 {
640 struct lance_private *lp = netdev_priv(dev);
641 volatile struct lance_init_block *ib = lp->init_block;
642 int stopped;
644 stopped = netif_queue_stopped(dev);
645 if (!stopped)
646 netif_stop_queue (dev);
648 while (lp->tx_old != lp->tx_new)
649 schedule();
651 WRITERAP(lp, LE_CSR0);
652 WRITERDP(lp, LE_C0_STOP);
653 lance_init_ring (dev);
655 if (dev->flags & IFF_PROMISC) {
656 ib->mode |= LE_MO_PROM;
657 } else {
658 ib->mode &= ~LE_MO_PROM;
659 lance_load_multicast (dev);
660 }
661 load_csrs (lp);
662 init_restart_lance (lp);
664 if (!stopped)
665 netif_start_queue (dev);
666 }
668 #ifdef CONFIG_NET_POLL_CONTROLLER
669 void lance_poll(struct net_device *dev)
670 {
671 struct lance_private *lp = netdev_priv(dev);
673 spin_lock (&lp->devlock);
674 WRITERAP(lp, LE_CSR0);
675 WRITERDP(lp, LE_C0_STRT);
676 spin_unlock (&lp->devlock);
677 lance_interrupt(dev->irq, dev, NULL);
678 }
679 #endif
681 MODULE_LICENSE("GPL");