ia64/linux-2.6.18-xen.hg

view drivers/net/macmace.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@redhat.com>
13 *
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 */
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/delay.h>
23 #include <linux/string.h>
24 #include <linux/crc32.h>
25 #include <asm/io.h>
26 #include <asm/pgtable.h>
27 #include <asm/irq.h>
28 #include <asm/macintosh.h>
29 #include <asm/macints.h>
30 #include <asm/mac_psc.h>
31 #include <asm/page.h>
32 #include "mace.h"
34 #define N_TX_RING 1
35 #define N_RX_RING 8
36 #define N_RX_PAGES ((N_RX_RING * 0x0800 + PAGE_SIZE - 1) / PAGE_SIZE)
37 #define TX_TIMEOUT HZ
39 /* Bits in transmit DMA status */
40 #define TX_DMA_ERR 0x80
42 /* The MACE is simply wired down on a Mac68K box */
44 #define MACE_BASE (void *)(0x50F1C000)
45 #define MACE_PROM (void *)(0x50F08001)
47 struct mace_data {
48 volatile struct mace *mace;
49 volatile unsigned char *tx_ring;
50 volatile unsigned char *tx_ring_phys;
51 volatile unsigned char *rx_ring;
52 volatile unsigned char *rx_ring_phys;
53 int dma_intr;
54 struct net_device_stats stats;
55 int rx_slot, rx_tail;
56 int tx_slot, tx_sloti, tx_count;
57 };
59 struct mace_frame {
60 u16 len;
61 u16 status;
62 u16 rntpc;
63 u16 rcvcc;
64 u32 pad1;
65 u32 pad2;
66 u8 data[1];
67 /* And frame continues.. */
68 };
70 #define PRIV_BYTES sizeof(struct mace_data)
72 extern void psc_debug_dump(void);
74 static int mace_open(struct net_device *dev);
75 static int mace_close(struct net_device *dev);
76 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
77 static struct net_device_stats *mace_stats(struct net_device *dev);
78 static void mace_set_multicast(struct net_device *dev);
79 static int mace_set_address(struct net_device *dev, void *addr);
80 static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
81 static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs);
82 static void mace_tx_timeout(struct net_device *dev);
84 /* Bit-reverse one byte of an ethernet hardware address. */
86 static int bitrev(int b)
87 {
88 int d = 0, i;
90 for (i = 0; i < 8; ++i, b >>= 1) {
91 d = (d << 1) | (b & 1);
92 }
94 return d;
95 }
97 /*
98 * Load a receive DMA channel with a base address and ring length
99 */
101 static void mace_load_rxdma_base(struct net_device *dev, int set)
102 {
103 struct mace_data *mp = (struct mace_data *) dev->priv;
105 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
106 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
107 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
108 psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
109 mp->rx_tail = 0;
110 }
112 /*
113 * Reset the receive DMA subsystem
114 */
116 static void mace_rxdma_reset(struct net_device *dev)
117 {
118 struct mace_data *mp = (struct mace_data *) dev->priv;
119 volatile struct mace *mace = mp->mace;
120 u8 maccc = mace->maccc;
122 mace->maccc = maccc & ~ENRCV;
124 psc_write_word(PSC_ENETRD_CTL, 0x8800);
125 mace_load_rxdma_base(dev, 0x00);
126 psc_write_word(PSC_ENETRD_CTL, 0x0400);
128 psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 mace_load_rxdma_base(dev, 0x10);
130 psc_write_word(PSC_ENETRD_CTL, 0x0400);
132 mace->maccc = maccc;
133 mp->rx_slot = 0;
135 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
136 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
137 }
139 /*
140 * Reset the transmit DMA subsystem
141 */
143 static void mace_txdma_reset(struct net_device *dev)
144 {
145 struct mace_data *mp = (struct mace_data *) dev->priv;
146 volatile struct mace *mace = mp->mace;
147 u8 maccc;
149 psc_write_word(PSC_ENETWR_CTL, 0x8800);
151 maccc = mace->maccc;
152 mace->maccc = maccc & ~ENXMT;
154 mp->tx_slot = mp->tx_sloti = 0;
155 mp->tx_count = N_TX_RING;
157 psc_write_word(PSC_ENETWR_CTL, 0x0400);
158 mace->maccc = maccc;
159 }
161 /*
162 * Disable DMA
163 */
165 static void mace_dma_off(struct net_device *dev)
166 {
167 psc_write_word(PSC_ENETRD_CTL, 0x8800);
168 psc_write_word(PSC_ENETRD_CTL, 0x1000);
169 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
170 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
172 psc_write_word(PSC_ENETWR_CTL, 0x8800);
173 psc_write_word(PSC_ENETWR_CTL, 0x1000);
174 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
175 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
176 }
178 /*
179 * Not really much of a probe. The hardware table tells us if this
180 * model of Macintrash has a MACE (AV macintoshes)
181 */
183 struct net_device *mace_probe(int unit)
184 {
185 int j;
186 struct mace_data *mp;
187 unsigned char *addr;
188 struct net_device *dev;
189 unsigned char checksum = 0;
190 static int found = 0;
191 int err;
193 if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
194 return ERR_PTR(-ENODEV);
196 found = 1; /* prevent 'finding' one on every device probe */
198 dev = alloc_etherdev(PRIV_BYTES);
199 if (!dev)
200 return ERR_PTR(-ENOMEM);
202 if (unit >= 0)
203 sprintf(dev->name, "eth%d", unit);
205 mp = (struct mace_data *) dev->priv;
206 dev->base_addr = (u32)MACE_BASE;
207 mp->mace = (volatile struct mace *) MACE_BASE;
209 dev->irq = IRQ_MAC_MACE;
210 mp->dma_intr = IRQ_MAC_MACE_DMA;
212 /*
213 * The PROM contains 8 bytes which total 0xFF when XOR'd
214 * together. Due to the usual peculiar apple brain damage
215 * the bytes are spaced out in a strange boundary and the
216 * bits are reversed.
217 */
219 addr = (void *)MACE_PROM;
221 for (j = 0; j < 6; ++j) {
222 u8 v=bitrev(addr[j<<4]);
223 checksum ^= v;
224 dev->dev_addr[j] = v;
225 }
226 for (; j < 8; ++j) {
227 checksum ^= bitrev(addr[j<<4]);
228 }
230 if (checksum != 0xFF) {
231 free_netdev(dev);
232 return ERR_PTR(-ENODEV);
233 }
235 memset(&mp->stats, 0, sizeof(mp->stats));
237 dev->open = mace_open;
238 dev->stop = mace_close;
239 dev->hard_start_xmit = mace_xmit_start;
240 dev->tx_timeout = mace_tx_timeout;
241 dev->watchdog_timeo = TX_TIMEOUT;
242 dev->get_stats = mace_stats;
243 dev->set_multicast_list = mace_set_multicast;
244 dev->set_mac_address = mace_set_address;
246 printk(KERN_INFO "%s: 68K MACE, hardware address %.2X", dev->name, dev->dev_addr[0]);
247 for (j = 1 ; j < 6 ; j++) printk(":%.2X", dev->dev_addr[j]);
248 printk("\n");
250 err = register_netdev(dev);
251 if (!err)
252 return dev;
254 free_netdev(dev);
255 return ERR_PTR(err);
256 }
258 /*
259 * Load the address on a mace controller.
260 */
262 static int mace_set_address(struct net_device *dev, void *addr)
263 {
264 unsigned char *p = addr;
265 struct mace_data *mp = (struct mace_data *) dev->priv;
266 volatile struct mace *mb = mp->mace;
267 int i;
268 unsigned long flags;
269 u8 maccc;
271 local_irq_save(flags);
273 maccc = mb->maccc;
275 /* load up the hardware address */
276 mb->iac = ADDRCHG | PHYADDR;
277 while ((mb->iac & ADDRCHG) != 0);
279 for (i = 0; i < 6; ++i) {
280 mb->padr = dev->dev_addr[i] = p[i];
281 }
283 mb->maccc = maccc;
284 local_irq_restore(flags);
286 return 0;
287 }
289 /*
290 * Open the Macintosh MACE. Most of this is playing with the DMA
291 * engine. The ethernet chip is quite friendly.
292 */
294 static int mace_open(struct net_device *dev)
295 {
296 struct mace_data *mp = (struct mace_data *) dev->priv;
297 volatile struct mace *mb = mp->mace;
298 #if 0
299 int i;
301 i = 200;
302 while (--i) {
303 mb->biucc = SWRST;
304 if (mb->biucc & SWRST) {
305 udelay(10);
306 continue;
307 }
308 break;
309 }
310 if (!i) {
311 printk(KERN_ERR "%s: software reset failed!!\n", dev->name);
312 return -EAGAIN;
313 }
314 #endif
316 mb->biucc = XMTSP_64;
317 mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST;
318 mb->xmtfc = AUTO_PAD_XMIT;
319 mb->plscc = PORTSEL_AUI;
320 /* mb->utr = RTRD; */
322 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
323 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
324 return -EAGAIN;
325 }
326 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
327 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
328 free_irq(dev->irq, dev);
329 return -EAGAIN;
330 }
332 /* Allocate the DMA ring buffers */
334 mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES);
335 mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
337 if (mp->tx_ring==NULL || mp->rx_ring==NULL) {
338 if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES);
339 if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0);
340 free_irq(dev->irq, dev);
341 free_irq(mp->dma_intr, dev);
342 printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name);
343 return -ENOMEM;
344 }
346 mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring);
347 mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring);
349 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */
351 kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER);
352 kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH);
354 mace_dma_off(dev);
356 /* Not sure what these do */
358 psc_write_word(PSC_ENETWR_CTL, 0x9000);
359 psc_write_word(PSC_ENETRD_CTL, 0x9000);
360 psc_write_word(PSC_ENETWR_CTL, 0x0400);
361 psc_write_word(PSC_ENETRD_CTL, 0x0400);
363 #if 0
364 /* load up the hardware address */
366 mb->iac = ADDRCHG | PHYADDR;
368 while ((mb->iac & ADDRCHG) != 0);
370 for (i = 0; i < 6; ++i)
371 mb->padr = dev->dev_addr[i];
373 /* clear the multicast filter */
374 mb->iac = ADDRCHG | LOGADDR;
376 while ((mb->iac & ADDRCHG) != 0);
378 for (i = 0; i < 8; ++i)
379 mb->ladrf = 0;
381 mb->plscc = PORTSEL_GPSI + ENPLSIO;
383 mb->maccc = ENXMT | ENRCV;
384 mb->imr = RCVINT;
385 #endif
387 mace_rxdma_reset(dev);
388 mace_txdma_reset(dev);
390 return 0;
391 }
393 /*
394 * Shut down the mace and its interrupt channel
395 */
397 static int mace_close(struct net_device *dev)
398 {
399 struct mace_data *mp = (struct mace_data *) dev->priv;
400 volatile struct mace *mb = mp->mace;
402 mb->maccc = 0; /* disable rx and tx */
403 mb->imr = 0xFF; /* disable all irqs */
404 mace_dma_off(dev); /* disable rx and tx dma */
406 free_irq(dev->irq, dev);
407 free_irq(IRQ_MAC_MACE_DMA, dev);
409 free_pages((u32) mp->rx_ring, N_RX_PAGES);
410 free_pages((u32) mp->tx_ring, 0);
412 return 0;
413 }
415 /*
416 * Transmit a frame
417 */
419 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
420 {
421 struct mace_data *mp = (struct mace_data *) dev->priv;
423 /* Stop the queue if the buffer is full */
425 if (!mp->tx_count) {
426 netif_stop_queue(dev);
427 return 1;
428 }
429 mp->tx_count--;
431 mp->stats.tx_packets++;
432 mp->stats.tx_bytes += skb->len;
434 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
436 memcpy((void *) mp->tx_ring, skb->data, skb->len);
438 /* load the Tx DMA and fire it off */
440 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
441 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
442 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
444 mp->tx_slot ^= 0x10;
446 dev_kfree_skb(skb);
448 return 0;
449 }
451 static struct net_device_stats *mace_stats(struct net_device *dev)
452 {
453 struct mace_data *p = (struct mace_data *) dev->priv;
454 return &p->stats;
455 }
457 static void mace_set_multicast(struct net_device *dev)
458 {
459 struct mace_data *mp = (struct mace_data *) dev->priv;
460 volatile struct mace *mb = mp->mace;
461 int i, j;
462 u32 crc;
463 u8 maccc;
465 maccc = mb->maccc;
466 mb->maccc &= ~PROM;
468 if (dev->flags & IFF_PROMISC) {
469 mb->maccc |= PROM;
470 } else {
471 unsigned char multicast_filter[8];
472 struct dev_mc_list *dmi = dev->mc_list;
474 if (dev->flags & IFF_ALLMULTI) {
475 for (i = 0; i < 8; i++) {
476 multicast_filter[i] = 0xFF;
477 }
478 } else {
479 for (i = 0; i < 8; i++)
480 multicast_filter[i] = 0;
481 for (i = 0; i < dev->mc_count; i++) {
482 crc = ether_crc_le(6, dmi->dmi_addr);
483 j = crc >> 26; /* bit number in multicast_filter */
484 multicast_filter[j >> 3] |= 1 << (j & 7);
485 dmi = dmi->next;
486 }
487 }
489 mb->iac = ADDRCHG | LOGADDR;
490 while (mb->iac & ADDRCHG);
492 for (i = 0; i < 8; ++i) {
493 mb->ladrf = multicast_filter[i];
494 }
495 }
497 mb->maccc = maccc;
498 }
500 /*
501 * Miscellaneous interrupts are handled here. We may end up
502 * having to bash the chip on the head for bad errors
503 */
505 static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
506 {
507 volatile struct mace *mb = mp->mace;
508 static int mace_babbles, mace_jabbers;
510 if (intr & MPCO) {
511 mp->stats.rx_missed_errors += 256;
512 }
513 mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */
515 if (intr & RNTPCO) {
516 mp->stats.rx_length_errors += 256;
517 }
518 mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */
520 if (intr & CERR) {
521 ++mp->stats.tx_heartbeat_errors;
522 }
523 if (intr & BABBLE) {
524 if (mace_babbles++ < 4) {
525 printk(KERN_DEBUG "mace: babbling transmitter\n");
526 }
527 }
528 if (intr & JABBER) {
529 if (mace_jabbers++ < 4) {
530 printk(KERN_DEBUG "mace: jabbering transceiver\n");
531 }
532 }
533 }
535 /*
536 * A transmit error has occurred. (We kick the transmit side from
537 * the DMA completion)
538 */
540 static void mace_xmit_error(struct net_device *dev)
541 {
542 struct mace_data *mp = (struct mace_data *) dev->priv;
543 volatile struct mace *mb = mp->mace;
544 u8 xmtfs, xmtrc;
546 xmtfs = mb->xmtfs;
547 xmtrc = mb->xmtrc;
549 if (xmtfs & XMTSV) {
550 if (xmtfs & UFLO) {
551 printk("%s: DMA underrun.\n", dev->name);
552 mp->stats.tx_errors++;
553 mp->stats.tx_fifo_errors++;
554 mace_txdma_reset(dev);
555 }
556 if (xmtfs & RTRY) {
557 mp->stats.collisions++;
558 }
559 }
560 }
562 /*
563 * A receive interrupt occurred.
564 */
566 static void mace_recv_interrupt(struct net_device *dev)
567 {
568 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
569 // volatile struct mace *mb = mp->mace;
570 }
572 /*
573 * Process the chip interrupt
574 */
576 static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
577 {
578 struct net_device *dev = (struct net_device *) dev_id;
579 struct mace_data *mp = (struct mace_data *) dev->priv;
580 volatile struct mace *mb = mp->mace;
581 u8 ir;
583 ir = mb->ir;
584 mace_handle_misc_intrs(mp, ir);
586 if (ir & XMTINT) {
587 mace_xmit_error(dev);
588 }
589 if (ir & RCVINT) {
590 mace_recv_interrupt(dev);
591 }
592 return IRQ_HANDLED;
593 }
595 static void mace_tx_timeout(struct net_device *dev)
596 {
597 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
598 // volatile struct mace *mb = mp->mace;
599 }
601 /*
602 * Handle a newly arrived frame
603 */
605 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
606 {
607 struct mace_data *mp = (struct mace_data *) dev->priv;
608 struct sk_buff *skb;
610 if (mf->status & RS_OFLO) {
611 printk("%s: fifo overflow.\n", dev->name);
612 mp->stats.rx_errors++;
613 mp->stats.rx_fifo_errors++;
614 }
615 if (mf->status&(RS_CLSN|RS_FRAMERR|RS_FCSERR))
616 mp->stats.rx_errors++;
618 if (mf->status&RS_CLSN) {
619 mp->stats.collisions++;
620 }
621 if (mf->status&RS_FRAMERR) {
622 mp->stats.rx_frame_errors++;
623 }
624 if (mf->status&RS_FCSERR) {
625 mp->stats.rx_crc_errors++;
626 }
628 skb = dev_alloc_skb(mf->len+2);
629 if (!skb) {
630 mp->stats.rx_dropped++;
631 return;
632 }
633 skb_reserve(skb,2);
634 memcpy(skb_put(skb, mf->len), mf->data, mf->len);
636 skb->dev = dev;
637 skb->protocol = eth_type_trans(skb, dev);
638 netif_rx(skb);
639 dev->last_rx = jiffies;
640 mp->stats.rx_packets++;
641 mp->stats.rx_bytes += mf->len;
642 }
644 /*
645 * The PSC has passed us a DMA interrupt event.
646 */
648 static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
649 {
650 struct net_device *dev = (struct net_device *) dev_id;
651 struct mace_data *mp = (struct mace_data *) dev->priv;
652 int left, head;
653 u16 status;
654 u32 baka;
656 /* Not sure what this does */
658 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
659 if (!(baka & 0x60000000)) return IRQ_NONE;
661 /*
662 * Process the read queue
663 */
665 status = psc_read_word(PSC_ENETRD_CTL);
667 if (status & 0x2000) {
668 mace_rxdma_reset(dev);
669 } else if (status & 0x0100) {
670 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
672 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
673 head = N_RX_RING - left;
675 /* Loop through the ring buffer and process new packages */
677 while (mp->rx_tail < head) {
678 mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800)));
679 mp->rx_tail++;
680 }
682 /* If we're out of buffers in this ring then switch to */
683 /* the other set, otherwise just reactivate this one. */
685 if (!left) {
686 mace_load_rxdma_base(dev, mp->rx_slot);
687 mp->rx_slot ^= 0x10;
688 } else {
689 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
690 }
691 }
693 /*
694 * Process the write queue
695 */
697 status = psc_read_word(PSC_ENETWR_CTL);
699 if (status & 0x2000) {
700 mace_txdma_reset(dev);
701 } else if (status & 0x0100) {
702 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
703 mp->tx_sloti ^= 0x10;
704 mp->tx_count++;
705 netif_wake_queue(dev);
706 }
707 return IRQ_HANDLED;
708 }
710 MODULE_LICENSE("GPL");