ia64/linux-2.6.18-xen.hg

view drivers/net/ni65.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4 * copyrights (c) 1994,1995,1996 by M.Hipp
5 *
6 * This driver can handle the old ni6510 board and the newer ni6510
7 * EtherBlaster. (probably it also works with every full NE2100
8 * compatible card)
9 *
10 * To compile as module, type:
11 * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
12 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
13 *
14 * This is an extension to the Linux operating system, and is covered by the
15 * same GNU General Public License that covers the Linux-kernel.
16 *
17 * comments/bugs/suggestions can be sent to:
18 * Michael Hipp
19 * email: hippm@informatik.uni-tuebingen.de
20 *
21 * sources:
22 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
23 * and from the original drivers by D.Becker
24 *
25 * known problems:
26 * - on some PCI boards (including my own) the card/board/ISA-bridge has
27 * problems with bus master DMA. This results in lotsa overruns.
28 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
29 * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
30 * Or just play with your BIOS options to optimize ISA-DMA access.
31 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
32 * defines -> please report me your experience then
33 * - Harald reported for ASUS SP3G mainboards, that you should use
34 * the 'optimal settings' from the user's manual on page 3-12!
35 *
36 * credits:
37 * thanx to Jason Sullivan for sending me a ni6510 card!
38 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
39 *
40 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
41 * average: FTP -> 8384421 bytes received in 8.5 seconds
42 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
43 * peak: FTP -> 8384421 bytes received in 7.5 seconds
44 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
45 */
47 /*
48 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
49 * 96.Sept.29: virt_to_bus stuff added for new memory modell
50 * 96.April.29: Added Harald Koenig's Patches (MH)
51 * 96.April.13: enhanced error handling .. more tests (MH)
52 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
53 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
54 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
55 * hopefully no more 16MB limit
56 *
57 * 95.Nov.18: multicast tweaked (AC).
58 *
59 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
60 *
61 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
62 */
64 #include <linux/kernel.h>
65 #include <linux/string.h>
66 #include <linux/errno.h>
67 #include <linux/ioport.h>
68 #include <linux/slab.h>
69 #include <linux/interrupt.h>
70 #include <linux/delay.h>
71 #include <linux/init.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/module.h>
76 #include <linux/bitops.h>
78 #include <asm/io.h>
79 #include <asm/dma.h>
81 #include "ni65.h"
83 /*
84 * the current setting allows an acceptable performance
85 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
86 * the header of this file
87 * 'invert' the defines for max. performance. This may cause DMA problems
88 * on some boards (e.g on my ASUS SP3G)
89 */
90 #undef XMT_VIA_SKB
91 #undef RCV_VIA_SKB
92 #define RCV_PARANOIA_CHECK
94 #define MID_PERFORMANCE
96 #if defined( LOW_PERFORMANCE )
97 static int isa0=7,isa1=7,csr80=0x0c10;
98 #elif defined( MID_PERFORMANCE )
99 static int isa0=5,isa1=5,csr80=0x2810;
100 #else /* high performance */
101 static int isa0=4,isa1=4,csr80=0x0017;
102 #endif
104 /*
105 * a few card/vendor specific defines
106 */
107 #define NI65_ID0 0x00
108 #define NI65_ID1 0x55
109 #define NI65_EB_ID0 0x52
110 #define NI65_EB_ID1 0x44
111 #define NE2100_ID0 0x57
112 #define NE2100_ID1 0x57
114 #define PORT p->cmdr_addr
116 /*
117 * buffer configuration
118 */
119 #if 1
120 #define RMDNUM 16
121 #define RMDNUMMASK 0x80000000
122 #else
123 #define RMDNUM 8
124 #define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
125 #endif
127 #if 0
128 #define TMDNUM 1
129 #define TMDNUMMASK 0x00000000
130 #else
131 #define TMDNUM 4
132 #define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
133 #endif
135 /* slightly oversized */
136 #define R_BUF_SIZE 1544
137 #define T_BUF_SIZE 1544
139 /*
140 * lance register defines
141 */
142 #define L_DATAREG 0x00
143 #define L_ADDRREG 0x02
144 #define L_RESET 0x04
145 #define L_CONFIG 0x05
146 #define L_BUSIF 0x06
148 /*
149 * to access the lance/am7990-regs, you have to write
150 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
151 */
152 #define CSR0 0x00
153 #define CSR1 0x01
154 #define CSR2 0x02
155 #define CSR3 0x03
157 #define INIT_RING_BEFORE_START 0x1
158 #define FULL_RESET_ON_ERROR 0x2
160 #if 0
161 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
162 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
163 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
164 inw(PORT+L_DATAREG))
165 #if 0
166 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
167 #else
168 #define writedatareg(val) { writereg(val,CSR0); }
169 #endif
170 #else
171 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
172 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
173 #define writedatareg(val) { writereg(val,CSR0); }
174 #endif
176 static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
178 static struct card {
179 unsigned char id0,id1;
180 short id_offset;
181 short total_size;
182 short cmd_offset;
183 short addr_offset;
184 unsigned char *vendor_id;
185 char *cardname;
186 long config;
187 } cards[] = {
188 {
189 .id0 = NI65_ID0,
190 .id1 = NI65_ID1,
191 .id_offset = 0x0e,
192 .total_size = 0x10,
193 .cmd_offset = 0x0,
194 .addr_offset = 0x8,
195 .vendor_id = ni_vendor,
196 .cardname = "ni6510",
197 .config = 0x1,
198 },
199 {
200 .id0 = NI65_EB_ID0,
201 .id1 = NI65_EB_ID1,
202 .id_offset = 0x0e,
203 .total_size = 0x18,
204 .cmd_offset = 0x10,
205 .addr_offset = 0x0,
206 .vendor_id = ni_vendor,
207 .cardname = "ni6510 EtherBlaster",
208 .config = 0x2,
209 },
210 {
211 .id0 = NE2100_ID0,
212 .id1 = NE2100_ID1,
213 .id_offset = 0x0e,
214 .total_size = 0x18,
215 .cmd_offset = 0x10,
216 .addr_offset = 0x0,
217 .vendor_id = NULL,
218 .cardname = "generic NE2100",
219 .config = 0x0,
220 },
221 };
222 #define NUM_CARDS 3
224 struct priv
225 {
226 struct rmd rmdhead[RMDNUM];
227 struct tmd tmdhead[TMDNUM];
228 struct init_block ib;
229 int rmdnum;
230 int tmdnum,tmdlast;
231 #ifdef RCV_VIA_SKB
232 struct sk_buff *recv_skb[RMDNUM];
233 #else
234 void *recvbounce[RMDNUM];
235 #endif
236 #ifdef XMT_VIA_SKB
237 struct sk_buff *tmd_skb[TMDNUM];
238 #endif
239 void *tmdbounce[TMDNUM];
240 int tmdbouncenum;
241 int lock,xmit_queued;
242 struct net_device_stats stats;
243 void *self;
244 int cmdr_addr;
245 int cardno;
246 int features;
247 spinlock_t ring_lock;
248 };
250 static int ni65_probe1(struct net_device *dev,int);
251 static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
252 static void ni65_recv_intr(struct net_device *dev,int);
253 static void ni65_xmit_intr(struct net_device *dev,int);
254 static int ni65_open(struct net_device *dev);
255 static int ni65_lance_reinit(struct net_device *dev);
256 static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
257 static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev);
258 static void ni65_timeout(struct net_device *dev);
259 static int ni65_close(struct net_device *dev);
260 static int ni65_alloc_buffer(struct net_device *dev);
261 static void ni65_free_buffer(struct priv *p);
262 static struct net_device_stats *ni65_get_stats(struct net_device *);
263 static void set_multicast_list(struct net_device *dev);
265 static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
266 static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
268 static int debuglevel = 1;
270 /*
271 * set 'performance' registers .. we must STOP lance for that
272 */
273 static void ni65_set_performance(struct priv *p)
274 {
275 writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
277 if( !(cards[p->cardno].config & 0x02) )
278 return;
280 outw(80,PORT+L_ADDRREG);
281 if(inw(PORT+L_ADDRREG) != 80)
282 return;
284 writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
285 outw(0,PORT+L_ADDRREG);
286 outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
287 outw(1,PORT+L_ADDRREG);
288 outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
290 outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
291 }
293 /*
294 * open interface (up)
295 */
296 static int ni65_open(struct net_device *dev)
297 {
298 struct priv *p = (struct priv *) dev->priv;
299 int irqval = request_irq(dev->irq, &ni65_interrupt,0,
300 cards[p->cardno].cardname,dev);
301 if (irqval) {
302 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
303 dev->name,dev->irq, irqval);
304 return -EAGAIN;
305 }
307 if(ni65_lance_reinit(dev))
308 {
309 netif_start_queue(dev);
310 return 0;
311 }
312 else
313 {
314 free_irq(dev->irq,dev);
315 return -EAGAIN;
316 }
317 }
319 /*
320 * close interface (down)
321 */
322 static int ni65_close(struct net_device *dev)
323 {
324 struct priv *p = (struct priv *) dev->priv;
326 netif_stop_queue(dev);
328 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
330 #ifdef XMT_VIA_SKB
331 {
332 int i;
333 for(i=0;i<TMDNUM;i++)
334 {
335 if(p->tmd_skb[i]) {
336 dev_kfree_skb(p->tmd_skb[i]);
337 p->tmd_skb[i] = NULL;
338 }
339 }
340 }
341 #endif
342 free_irq(dev->irq,dev);
343 return 0;
344 }
346 static void cleanup_card(struct net_device *dev)
347 {
348 struct priv *p = (struct priv *) dev->priv;
349 disable_dma(dev->dma);
350 free_dma(dev->dma);
351 release_region(dev->base_addr, cards[p->cardno].total_size);
352 ni65_free_buffer(p);
353 }
355 /* set: io,irq,dma or set it when calling insmod */
356 static int irq;
357 static int io;
358 static int dma;
360 /*
361 * Probe The Card (not the lance-chip)
362 */
363 struct net_device * __init ni65_probe(int unit)
364 {
365 struct net_device *dev = alloc_etherdev(0);
366 static int ports[] = {0x360,0x300,0x320,0x340, 0};
367 int *port;
368 int err = 0;
370 if (!dev)
371 return ERR_PTR(-ENOMEM);
373 if (unit >= 0) {
374 sprintf(dev->name, "eth%d", unit);
375 netdev_boot_setup_check(dev);
376 irq = dev->irq;
377 dma = dev->dma;
378 } else {
379 dev->base_addr = io;
380 }
382 if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
383 err = ni65_probe1(dev, dev->base_addr);
384 } else if (dev->base_addr > 0) { /* Don't probe at all. */
385 err = -ENXIO;
386 } else {
387 for (port = ports; *port && ni65_probe1(dev, *port); port++)
388 ;
389 if (!*port)
390 err = -ENODEV;
391 }
392 if (err)
393 goto out;
395 err = register_netdev(dev);
396 if (err)
397 goto out1;
398 return dev;
399 out1:
400 cleanup_card(dev);
401 out:
402 free_netdev(dev);
403 return ERR_PTR(err);
404 }
406 /*
407 * this is the real card probe ..
408 */
409 static int __init ni65_probe1(struct net_device *dev,int ioaddr)
410 {
411 int i,j;
412 struct priv *p;
413 unsigned long flags;
415 dev->irq = irq;
416 dev->dma = dma;
418 for(i=0;i<NUM_CARDS;i++) {
419 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
420 continue;
421 if(cards[i].id_offset >= 0) {
422 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
423 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
424 release_region(ioaddr, cards[i].total_size);
425 continue;
426 }
427 }
428 if(cards[i].vendor_id) {
429 for(j=0;j<3;j++)
430 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
431 release_region(ioaddr, cards[i].total_size);
432 continue;
433 }
434 }
435 break;
436 }
437 if(i == NUM_CARDS)
438 return -ENODEV;
440 for(j=0;j<6;j++)
441 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
443 if( (j=ni65_alloc_buffer(dev)) < 0) {
444 release_region(ioaddr, cards[i].total_size);
445 return j;
446 }
447 p = (struct priv *) dev->priv;
448 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
449 p->cardno = i;
450 spin_lock_init(&p->ring_lock);
452 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
454 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
455 if( (j=readreg(CSR0)) != 0x4) {
456 printk("failed.\n");
457 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
458 ni65_free_buffer(p);
459 release_region(ioaddr, cards[p->cardno].total_size);
460 return -EAGAIN;
461 }
463 outw(88,PORT+L_ADDRREG);
464 if(inw(PORT+L_ADDRREG) == 88) {
465 unsigned long v;
466 v = inw(PORT+L_DATAREG);
467 v <<= 16;
468 outw(89,PORT+L_ADDRREG);
469 v |= inw(PORT+L_DATAREG);
470 printk("Version %#08lx, ",v);
471 p->features = INIT_RING_BEFORE_START;
472 }
473 else {
474 printk("ancient LANCE, ");
475 p->features = 0x0;
476 }
478 if(test_bit(0,&cards[i].config)) {
479 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
480 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
481 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
482 }
483 else {
484 if(dev->dma == 0) {
485 /* 'stuck test' from lance.c */
486 long dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
487 (inb(DMA2_STAT_REG) & 0xf0);
488 for(i=1;i<5;i++) {
489 int dma = dmatab[i];
490 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
491 continue;
493 flags=claim_dma_lock();
494 disable_dma(dma);
495 set_dma_mode(dma,DMA_MODE_CASCADE);
496 enable_dma(dma);
497 release_dma_lock(flags);
499 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
501 flags=claim_dma_lock();
502 disable_dma(dma);
503 free_dma(dma);
504 release_dma_lock(flags);
506 if(readreg(CSR0) & CSR0_IDON)
507 break;
508 }
509 if(i == 5) {
510 printk("failed.\n");
511 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
512 ni65_free_buffer(p);
513 release_region(ioaddr, cards[p->cardno].total_size);
514 return -EAGAIN;
515 }
516 dev->dma = dmatab[i];
517 printk("DMA %d (autodetected), ",dev->dma);
518 }
519 else
520 printk("DMA %d (assigned), ",dev->dma);
522 if(dev->irq < 2)
523 {
524 unsigned long irq_mask;
526 ni65_init_lance(p,dev->dev_addr,0,0);
527 irq_mask = probe_irq_on();
528 writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
529 msleep(20);
530 dev->irq = probe_irq_off(irq_mask);
531 if(!dev->irq)
532 {
533 printk("Failed to detect IRQ line!\n");
534 ni65_free_buffer(p);
535 release_region(ioaddr, cards[p->cardno].total_size);
536 return -EAGAIN;
537 }
538 printk("IRQ %d (autodetected).\n",dev->irq);
539 }
540 else
541 printk("IRQ %d (assigned).\n",dev->irq);
542 }
544 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
545 {
546 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
547 ni65_free_buffer(p);
548 release_region(ioaddr, cards[p->cardno].total_size);
549 return -EAGAIN;
550 }
552 dev->base_addr = ioaddr;
553 SET_MODULE_OWNER(dev);
554 dev->open = ni65_open;
555 dev->stop = ni65_close;
556 dev->hard_start_xmit = ni65_send_packet;
557 dev->tx_timeout = ni65_timeout;
558 dev->watchdog_timeo = HZ/2;
559 dev->get_stats = ni65_get_stats;
560 dev->set_multicast_list = set_multicast_list;
561 return 0; /* everything is OK */
562 }
564 /*
565 * set lance register and trigger init
566 */
567 static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
568 {
569 int i;
570 u32 pib;
572 writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
574 for(i=0;i<6;i++)
575 p->ib.eaddr[i] = daddr[i];
577 for(i=0;i<8;i++)
578 p->ib.filter[i] = filter;
579 p->ib.mode = mode;
581 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
582 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
583 writereg(0,CSR3); /* busmaster/no word-swap */
584 pib = (u32) isa_virt_to_bus(&p->ib);
585 writereg(pib & 0xffff,CSR1);
586 writereg(pib >> 16,CSR2);
588 writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
590 for(i=0;i<32;i++)
591 {
592 mdelay(4);
593 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
594 break; /* init ok ? */
595 }
596 }
598 /*
599 * allocate memory area and check the 16MB border
600 */
601 static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
602 {
603 struct sk_buff *skb=NULL;
604 unsigned char *ptr;
605 void *ret;
607 if(type) {
608 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
609 if(!skb) {
610 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
611 return NULL;
612 }
613 skb->dev = dev;
614 skb_reserve(skb,2+16);
615 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
616 ptr = skb->data;
617 }
618 else {
619 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
620 if(!ret) {
621 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
622 return NULL;
623 }
624 }
625 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
626 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
627 if(type)
628 kfree_skb(skb);
629 else
630 kfree(ptr);
631 return NULL;
632 }
633 return ret;
634 }
636 /*
637 * allocate all memory structures .. send/recv buffers etc ...
638 */
639 static int ni65_alloc_buffer(struct net_device *dev)
640 {
641 unsigned char *ptr;
642 struct priv *p;
643 int i;
645 /*
646 * we need 8-aligned memory ..
647 */
648 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
649 if(!ptr)
650 return -ENOMEM;
652 p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
653 memset((char *) dev->priv,0,sizeof(struct priv));
654 p->self = ptr;
656 for(i=0;i<TMDNUM;i++)
657 {
658 #ifdef XMT_VIA_SKB
659 p->tmd_skb[i] = NULL;
660 #endif
661 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
662 if(!p->tmdbounce[i]) {
663 ni65_free_buffer(p);
664 return -ENOMEM;
665 }
666 }
668 for(i=0;i<RMDNUM;i++)
669 {
670 #ifdef RCV_VIA_SKB
671 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
672 if(!p->recv_skb[i]) {
673 ni65_free_buffer(p);
674 return -ENOMEM;
675 }
676 #else
677 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
678 if(!p->recvbounce[i]) {
679 ni65_free_buffer(p);
680 return -ENOMEM;
681 }
682 #endif
683 }
685 return 0; /* everything is OK */
686 }
688 /*
689 * free buffers and private struct
690 */
691 static void ni65_free_buffer(struct priv *p)
692 {
693 int i;
695 if(!p)
696 return;
698 for(i=0;i<TMDNUM;i++) {
699 kfree(p->tmdbounce[i]);
700 #ifdef XMT_VIA_SKB
701 if(p->tmd_skb[i])
702 dev_kfree_skb(p->tmd_skb[i]);
703 #endif
704 }
706 for(i=0;i<RMDNUM;i++)
707 {
708 #ifdef RCV_VIA_SKB
709 if(p->recv_skb[i])
710 dev_kfree_skb(p->recv_skb[i]);
711 #else
712 kfree(p->recvbounce[i]);
713 #endif
714 }
715 kfree(p->self);
716 }
719 /*
720 * stop and (re)start lance .. e.g after an error
721 */
722 static void ni65_stop_start(struct net_device *dev,struct priv *p)
723 {
724 int csr0 = CSR0_INEA;
726 writedatareg(CSR0_STOP);
728 if(debuglevel > 1)
729 printk(KERN_DEBUG "ni65_stop_start\n");
731 if(p->features & INIT_RING_BEFORE_START) {
732 int i;
733 #ifdef XMT_VIA_SKB
734 struct sk_buff *skb_save[TMDNUM];
735 #endif
736 unsigned long buffer[TMDNUM];
737 short blen[TMDNUM];
739 if(p->xmit_queued) {
740 while(1) {
741 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
742 break;
743 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
744 if(p->tmdlast == p->tmdnum)
745 break;
746 }
747 }
749 for(i=0;i<TMDNUM;i++) {
750 struct tmd *tmdp = p->tmdhead + i;
751 #ifdef XMT_VIA_SKB
752 skb_save[i] = p->tmd_skb[i];
753 #endif
754 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
755 blen[i] = tmdp->blen;
756 tmdp->u.s.status = 0x0;
757 }
759 for(i=0;i<RMDNUM;i++) {
760 struct rmd *rmdp = p->rmdhead + i;
761 rmdp->u.s.status = RCV_OWN;
762 }
763 p->tmdnum = p->xmit_queued = 0;
764 writedatareg(CSR0_STRT | csr0);
766 for(i=0;i<TMDNUM;i++) {
767 int num = (i + p->tmdlast) & (TMDNUM-1);
768 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
769 p->tmdhead[i].blen = blen[num];
770 if(p->tmdhead[i].u.s.status & XMIT_OWN) {
771 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
772 p->xmit_queued = 1;
773 writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
774 }
775 #ifdef XMT_VIA_SKB
776 p->tmd_skb[i] = skb_save[num];
777 #endif
778 }
779 p->rmdnum = p->tmdlast = 0;
780 if(!p->lock)
781 if (p->tmdnum || !p->xmit_queued)
782 netif_wake_queue(dev);
783 dev->trans_start = jiffies;
784 }
785 else
786 writedatareg(CSR0_STRT | csr0);
787 }
789 /*
790 * init lance (write init-values .. init-buffers) (open-helper)
791 */
792 static int ni65_lance_reinit(struct net_device *dev)
793 {
794 int i;
795 struct priv *p = (struct priv *) dev->priv;
796 unsigned long flags;
798 p->lock = 0;
799 p->xmit_queued = 0;
801 flags=claim_dma_lock();
802 disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
803 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
804 enable_dma(dev->dma);
805 release_dma_lock(flags);
807 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
808 if( (i=readreg(CSR0) ) != 0x4)
809 {
810 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
811 cards[p->cardno].cardname,(int) i);
812 flags=claim_dma_lock();
813 disable_dma(dev->dma);
814 release_dma_lock(flags);
815 return 0;
816 }
818 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
819 for(i=0;i<TMDNUM;i++)
820 {
821 struct tmd *tmdp = p->tmdhead + i;
822 #ifdef XMT_VIA_SKB
823 if(p->tmd_skb[i]) {
824 dev_kfree_skb(p->tmd_skb[i]);
825 p->tmd_skb[i] = NULL;
826 }
827 #endif
828 tmdp->u.buffer = 0x0;
829 tmdp->u.s.status = XMIT_START | XMIT_END;
830 tmdp->blen = tmdp->status2 = 0;
831 }
833 for(i=0;i<RMDNUM;i++)
834 {
835 struct rmd *rmdp = p->rmdhead + i;
836 #ifdef RCV_VIA_SKB
837 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
838 #else
839 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
840 #endif
841 rmdp->blen = -(R_BUF_SIZE-8);
842 rmdp->mlen = 0;
843 rmdp->u.s.status = RCV_OWN;
844 }
846 if(dev->flags & IFF_PROMISC)
847 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
848 else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
849 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
850 else
851 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
853 /*
854 * ni65_set_lance_mem() sets L_ADDRREG to CSR0
855 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
856 */
858 if(inw(PORT+L_DATAREG) & CSR0_IDON) {
859 ni65_set_performance(p);
860 /* init OK: start lance , enable interrupts */
861 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
862 return 1; /* ->OK */
863 }
864 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
865 flags=claim_dma_lock();
866 disable_dma(dev->dma);
867 release_dma_lock(flags);
868 return 0; /* ->Error */
869 }
871 /*
872 * interrupt handler
873 */
874 static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
875 {
876 int csr0 = 0;
877 struct net_device *dev = dev_id;
878 struct priv *p;
879 int bcnt = 32;
881 p = (struct priv *) dev->priv;
883 spin_lock(&p->ring_lock);
885 while(--bcnt) {
886 csr0 = inw(PORT+L_DATAREG);
888 #if 0
889 writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
890 #else
891 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
892 #endif
894 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
895 break;
897 if(csr0 & CSR0_RINT) /* RECV-int? */
898 ni65_recv_intr(dev,csr0);
899 if(csr0 & CSR0_TINT) /* XMIT-int? */
900 ni65_xmit_intr(dev,csr0);
902 if(csr0 & CSR0_ERR)
903 {
904 struct priv *p = (struct priv *) dev->priv;
905 if(debuglevel > 1)
906 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
907 if(csr0 & CSR0_BABL)
908 p->stats.tx_errors++;
909 if(csr0 & CSR0_MISS) {
910 int i;
911 for(i=0;i<RMDNUM;i++)
912 printk("%02x ",p->rmdhead[i].u.s.status);
913 printk("\n");
914 p->stats.rx_errors++;
915 }
916 if(csr0 & CSR0_MERR) {
917 if(debuglevel > 1)
918 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
919 ni65_stop_start(dev,p);
920 }
921 }
922 }
924 #ifdef RCV_PARANOIA_CHECK
925 {
926 int j;
927 for(j=0;j<RMDNUM;j++)
928 {
929 struct priv *p = (struct priv *) dev->priv;
930 int i,k,num1,num2;
931 for(i=RMDNUM-1;i>0;i--) {
932 num2 = (p->rmdnum + i) & (RMDNUM-1);
933 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
934 break;
935 }
937 if(i) {
938 for(k=0;k<RMDNUM;k++) {
939 num1 = (p->rmdnum + k) & (RMDNUM-1);
940 if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
941 break;
942 }
943 if(!k)
944 break;
946 if(debuglevel > 0)
947 {
948 char buf[256],*buf1;
949 int k;
950 buf1 = buf;
951 for(k=0;k<RMDNUM;k++) {
952 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
953 buf1 += 3;
954 }
955 *buf1 = 0;
956 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
957 }
959 p->rmdnum = num1;
960 ni65_recv_intr(dev,csr0);
961 if((p->rmdhead[num2].u.s.status & RCV_OWN))
962 break; /* ok, we are 'in sync' again */
963 }
964 else
965 break;
966 }
967 }
968 #endif
970 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
971 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
972 ni65_stop_start(dev,p);
973 }
974 else
975 writedatareg(CSR0_INEA);
977 spin_unlock(&p->ring_lock);
978 return IRQ_HANDLED;
979 }
981 /*
982 * We have received an Xmit-Interrupt ..
983 * send a new packet if necessary
984 */
985 static void ni65_xmit_intr(struct net_device *dev,int csr0)
986 {
987 struct priv *p = (struct priv *) dev->priv;
989 while(p->xmit_queued)
990 {
991 struct tmd *tmdp = p->tmdhead + p->tmdlast;
992 int tmdstat = tmdp->u.s.status;
994 if(tmdstat & XMIT_OWN)
995 break;
997 if(tmdstat & XMIT_ERR)
998 {
999 #if 0
1000 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
1001 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1002 #endif
1003 /* checking some errors */
1004 if(tmdp->status2 & XMIT_RTRY)
1005 p->stats.tx_aborted_errors++;
1006 if(tmdp->status2 & XMIT_LCAR)
1007 p->stats.tx_carrier_errors++;
1008 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1009 /* this stops the xmitter */
1010 p->stats.tx_fifo_errors++;
1011 if(debuglevel > 0)
1012 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1013 if(p->features & INIT_RING_BEFORE_START) {
1014 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
1015 ni65_stop_start(dev,p);
1016 break; /* no more Xmit processing .. */
1018 else
1019 ni65_stop_start(dev,p);
1021 if(debuglevel > 2)
1022 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1023 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1024 p->stats.tx_errors++;
1025 tmdp->status2 = 0;
1027 else {
1028 p->stats.tx_bytes -= (short)(tmdp->blen);
1029 p->stats.tx_packets++;
1032 #ifdef XMT_VIA_SKB
1033 if(p->tmd_skb[p->tmdlast]) {
1034 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1035 p->tmd_skb[p->tmdlast] = NULL;
1037 #endif
1039 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1040 if(p->tmdlast == p->tmdnum)
1041 p->xmit_queued = 0;
1043 netif_wake_queue(dev);
1046 /*
1047 * We have received a packet
1048 */
1049 static void ni65_recv_intr(struct net_device *dev,int csr0)
1051 struct rmd *rmdp;
1052 int rmdstat,len;
1053 int cnt=0;
1054 struct priv *p = (struct priv *) dev->priv;
1056 rmdp = p->rmdhead + p->rmdnum;
1057 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1059 cnt++;
1060 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
1062 if(!(rmdstat & RCV_ERR)) {
1063 if(rmdstat & RCV_START)
1065 p->stats.rx_length_errors++;
1066 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1069 else {
1070 if(debuglevel > 2)
1071 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1072 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1073 if(rmdstat & RCV_FRAM)
1074 p->stats.rx_frame_errors++;
1075 if(rmdstat & RCV_OFLO)
1076 p->stats.rx_over_errors++;
1077 if(rmdstat & RCV_CRC)
1078 p->stats.rx_crc_errors++;
1079 if(rmdstat & RCV_BUF_ERR)
1080 p->stats.rx_fifo_errors++;
1082 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1083 p->stats.rx_errors++;
1085 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1087 #ifdef RCV_VIA_SKB
1088 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1089 if (skb)
1090 skb_reserve(skb,16);
1091 #else
1092 struct sk_buff *skb = dev_alloc_skb(len+2);
1093 #endif
1094 if(skb)
1096 skb_reserve(skb,2);
1097 skb->dev = dev;
1098 #ifdef RCV_VIA_SKB
1099 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1100 skb_put(skb,len);
1101 eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
1103 else {
1104 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1105 skb_put(skb,R_BUF_SIZE);
1106 p->recv_skb[p->rmdnum] = skb;
1107 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1108 skb = skb1;
1109 skb_trim(skb,len);
1111 #else
1112 skb_put(skb,len);
1113 eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
1114 #endif
1115 p->stats.rx_packets++;
1116 p->stats.rx_bytes += len;
1117 skb->protocol=eth_type_trans(skb,dev);
1118 netif_rx(skb);
1119 dev->last_rx = jiffies;
1121 else
1123 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1124 p->stats.rx_dropped++;
1127 else {
1128 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1129 p->stats.rx_errors++;
1131 rmdp->blen = -(R_BUF_SIZE-8);
1132 rmdp->mlen = 0;
1133 rmdp->u.s.status = RCV_OWN; /* change owner */
1134 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1135 rmdp = p->rmdhead + p->rmdnum;
1139 /*
1140 * kick xmitter ..
1141 */
1143 static void ni65_timeout(struct net_device *dev)
1145 int i;
1146 struct priv *p = (struct priv *) dev->priv;
1148 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1149 for(i=0;i<TMDNUM;i++)
1150 printk("%02x ",p->tmdhead[i].u.s.status);
1151 printk("\n");
1152 ni65_lance_reinit(dev);
1153 dev->trans_start = jiffies;
1154 netif_wake_queue(dev);
1157 /*
1158 * Send a packet
1159 */
1161 static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1163 struct priv *p = (struct priv *) dev->priv;
1165 netif_stop_queue(dev);
1167 if (test_and_set_bit(0, (void*)&p->lock)) {
1168 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1169 return 1;
1173 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1174 struct tmd *tmdp;
1175 unsigned long flags;
1177 #ifdef XMT_VIA_SKB
1178 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1179 #endif
1181 memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
1182 (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
1183 if (len > skb->len)
1184 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1185 dev_kfree_skb (skb);
1187 spin_lock_irqsave(&p->ring_lock, flags);
1188 tmdp = p->tmdhead + p->tmdnum;
1189 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1190 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1192 #ifdef XMT_VIA_SKB
1194 else {
1195 spin_lock_irqsave(&p->ring_lock, flags);
1197 tmdp = p->tmdhead + p->tmdnum;
1198 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1199 p->tmd_skb[p->tmdnum] = skb;
1201 #endif
1202 tmdp->blen = -len;
1204 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1205 writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
1207 p->xmit_queued = 1;
1208 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1210 if(p->tmdnum != p->tmdlast)
1211 netif_wake_queue(dev);
1213 p->lock = 0;
1214 dev->trans_start = jiffies;
1216 spin_unlock_irqrestore(&p->ring_lock, flags);
1219 return 0;
1222 static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1225 #if 0
1226 int i;
1227 struct priv *p = (struct priv *) dev->priv;
1228 for(i=0;i<RMDNUM;i++)
1230 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
1231 printk("%02x ",rmdp->u.s.status);
1233 printk("\n");
1234 #endif
1236 return &((struct priv *) dev->priv)->stats;
1239 static void set_multicast_list(struct net_device *dev)
1241 if(!ni65_lance_reinit(dev))
1242 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1243 netif_wake_queue(dev);
1246 #ifdef MODULE
1247 static struct net_device *dev_ni65;
1249 module_param(irq, int, 0);
1250 module_param(io, int, 0);
1251 module_param(dma, int, 0);
1252 MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1253 MODULE_PARM_DESC(io, "ni6510 I/O base address");
1254 MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1256 int __init init_module(void)
1258 dev_ni65 = ni65_probe(-1);
1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1262 void cleanup_module(void)
1264 unregister_netdev(dev_ni65);
1265 cleanup_card(dev_ni65);
1266 free_netdev(dev_ni65);
1268 #endif /* MODULE */
1270 MODULE_LICENSE("GPL");
1272 /*
1273 * END of ni65.c
1274 */