ia64/linux-2.6.18-xen.hg

view drivers/atm/he.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
3 /*
5 he.c
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 /*
28 he.c
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
46 AUTHORS:
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
50 NOTES:
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 */
58 #include <linux/module.h>
59 #include <linux/kernel.h>
60 #include <linux/skbuff.h>
61 #include <linux/pci.h>
62 #include <linux/errno.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/delay.h>
66 #include <linux/init.h>
67 #include <linux/mm.h>
68 #include <linux/sched.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #define USE_TASKLET
81 #undef USE_SCATTERGATHER
82 #undef USE_CHECKSUM_HW /* still confused about this */
83 #define USE_RBPS
84 #undef USE_RBPS_POOL /* if memory is tight try this */
85 #undef USE_RBPL_POOL /* if memory is tight try this */
86 #define USE_TPD_POOL
87 /* #undef CONFIG_ATM_HE_USE_SUNI */
88 /* #undef HE_DEBUG */
90 #include "he.h"
91 #include "suni.h"
92 #include <linux/atm_he.h>
94 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96 #ifdef HE_DEBUG
97 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98 #else /* !HE_DEBUG */
99 #define HPRINTK(fmt,args...) do { } while (0)
100 #endif /* HE_DEBUG */
102 /* version definition */
104 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
106 /* declarations */
108 static int he_open(struct atm_vcc *vcc);
109 static void he_close(struct atm_vcc *vcc);
110 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
111 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
112 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
113 static void he_tasklet(unsigned long data);
114 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
115 static int he_start(struct atm_dev *dev);
116 static void he_stop(struct he_dev *dev);
117 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
118 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
120 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
122 /* globals */
124 static struct he_dev *he_devs;
125 static int disable64;
126 static short nvpibits = -1;
127 static short nvcibits = -1;
128 static short rx_skb_reserve = 16;
129 static int irq_coalesce = 1;
130 static int sdh = 0;
132 /* Read from EEPROM = 0000 0011b */
133 static unsigned int readtab[] = {
134 CS_HIGH | CLK_HIGH,
135 CS_LOW | CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW,
142 CLK_HIGH, /* 0 */
143 CLK_LOW,
144 CLK_HIGH, /* 0 */
145 CLK_LOW,
146 CLK_HIGH, /* 0 */
147 CLK_LOW | SI_HIGH,
148 CLK_HIGH | SI_HIGH, /* 1 */
149 CLK_LOW | SI_HIGH,
150 CLK_HIGH | SI_HIGH /* 1 */
151 };
153 /* Clock to read from/write to the EEPROM */
154 static unsigned int clocktab[] = {
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW,
166 CLK_HIGH,
167 CLK_LOW,
168 CLK_HIGH,
169 CLK_LOW,
170 CLK_HIGH,
171 CLK_LOW
172 };
174 static struct atmdev_ops he_ops =
175 {
176 .open = he_open,
177 .close = he_close,
178 .ioctl = he_ioctl,
179 .send = he_send,
180 .phy_put = he_phy_put,
181 .phy_get = he_phy_get,
182 .proc_read = he_proc_read,
183 .owner = THIS_MODULE
184 };
186 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187 #define he_readl(dev, reg) readl((dev)->membase + (reg))
189 /* section 2.12 connection memory access */
191 static __inline__ void
192 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
193 unsigned flags)
194 {
195 he_writel(he_dev, val, CON_DAT);
196 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
197 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
198 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199 }
201 #define he_writel_rcm(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_RCM)
204 #define he_writel_tcm(dev, val, reg) \
205 he_writel_internal(dev, val, reg, CON_CTL_TCM)
207 #define he_writel_mbox(dev, val, reg) \
208 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
210 static unsigned
211 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
212 {
213 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
214 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
215 return he_readl(he_dev, CON_DAT);
216 }
218 #define he_readl_rcm(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_RCM)
221 #define he_readl_tcm(dev, reg) \
222 he_readl_internal(dev, reg, CON_CTL_TCM)
224 #define he_readl_mbox(dev, reg) \
225 he_readl_internal(dev, reg, CON_CTL_MBOX)
228 /* figure 2.2 connection id */
230 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
232 /* 2.5.1 per connection transmit state registers */
234 #define he_writel_tsr0(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236 #define he_readl_tsr0(dev, cid) \
237 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
239 #define he_writel_tsr1(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
242 #define he_writel_tsr2(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
245 #define he_writel_tsr3(dev, val, cid) \
246 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
248 #define he_writel_tsr4(dev, val, cid) \
249 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
251 /* from page 2-20
252 *
253 * NOTE While the transmit connection is active, bits 23 through 0
254 * of this register must not be written by the host. Byte
255 * enables should be used during normal operation when writing
256 * the most significant byte.
257 */
259 #define he_writel_tsr4_upper(dev, val, cid) \
260 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
261 CON_CTL_TCM \
262 | CON_BYTE_DISABLE_2 \
263 | CON_BYTE_DISABLE_1 \
264 | CON_BYTE_DISABLE_0)
266 #define he_readl_tsr4(dev, cid) \
267 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
269 #define he_writel_tsr5(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
272 #define he_writel_tsr6(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
275 #define he_writel_tsr7(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
279 #define he_writel_tsr8(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
282 #define he_writel_tsr9(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
285 #define he_writel_tsr10(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
288 #define he_writel_tsr11(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
292 #define he_writel_tsr12(dev, val, cid) \
293 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
295 #define he_writel_tsr13(dev, val, cid) \
296 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
299 #define he_writel_tsr14(dev, val, cid) \
300 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
302 #define he_writel_tsr14_upper(dev, val, cid) \
303 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
304 CON_CTL_TCM \
305 | CON_BYTE_DISABLE_2 \
306 | CON_BYTE_DISABLE_1 \
307 | CON_BYTE_DISABLE_0)
309 /* 2.7.1 per connection receive state registers */
311 #define he_writel_rsr0(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313 #define he_readl_rsr0(dev, cid) \
314 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
316 #define he_writel_rsr1(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
319 #define he_writel_rsr2(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
322 #define he_writel_rsr3(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
325 #define he_writel_rsr4(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
328 #define he_writel_rsr5(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
331 #define he_writel_rsr6(dev, val, cid) \
332 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
334 #define he_writel_rsr7(dev, val, cid) \
335 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
337 static __inline__ struct atm_vcc*
338 __find_vcc(struct he_dev *he_dev, unsigned cid)
339 {
340 struct hlist_head *head;
341 struct atm_vcc *vcc;
342 struct hlist_node *node;
343 struct sock *s;
344 short vpi;
345 int vci;
347 vpi = cid >> he_dev->vcibits;
348 vci = cid & ((1 << he_dev->vcibits) - 1);
349 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
351 sk_for_each(s, node, head) {
352 vcc = atm_sk(s);
353 if (vcc->dev == he_dev->atm_dev &&
354 vcc->vci == vci && vcc->vpi == vpi &&
355 vcc->qos.rxtp.traffic_class != ATM_NONE) {
356 return vcc;
357 }
358 }
359 return NULL;
360 }
362 static int __devinit
363 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
364 {
365 struct atm_dev *atm_dev = NULL;
366 struct he_dev *he_dev = NULL;
367 int err = 0;
369 printk(KERN_INFO "he: %s\n", version);
371 if (pci_enable_device(pci_dev))
372 return -EIO;
373 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
374 printk(KERN_WARNING "he: no suitable dma available\n");
375 err = -EIO;
376 goto init_one_failure;
377 }
379 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
380 if (!atm_dev) {
381 err = -ENODEV;
382 goto init_one_failure;
383 }
384 pci_set_drvdata(pci_dev, atm_dev);
386 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
387 GFP_KERNEL);
388 if (!he_dev) {
389 err = -ENOMEM;
390 goto init_one_failure;
391 }
392 memset(he_dev, 0, sizeof(struct he_dev));
394 he_dev->pci_dev = pci_dev;
395 he_dev->atm_dev = atm_dev;
396 he_dev->atm_dev->dev_data = he_dev;
397 atm_dev->dev_data = he_dev;
398 he_dev->number = atm_dev->number;
399 if (he_start(atm_dev)) {
400 he_stop(he_dev);
401 err = -ENODEV;
402 goto init_one_failure;
403 }
404 he_dev->next = NULL;
405 if (he_devs)
406 he_dev->next = he_devs;
407 he_devs = he_dev;
408 return 0;
410 init_one_failure:
411 if (atm_dev)
412 atm_dev_deregister(atm_dev);
413 kfree(he_dev);
414 pci_disable_device(pci_dev);
415 return err;
416 }
418 static void __devexit
419 he_remove_one (struct pci_dev *pci_dev)
420 {
421 struct atm_dev *atm_dev;
422 struct he_dev *he_dev;
424 atm_dev = pci_get_drvdata(pci_dev);
425 he_dev = HE_DEV(atm_dev);
427 /* need to remove from he_devs */
429 he_stop(he_dev);
430 atm_dev_deregister(atm_dev);
431 kfree(he_dev);
433 pci_set_drvdata(pci_dev, NULL);
434 pci_disable_device(pci_dev);
435 }
438 static unsigned
439 rate_to_atmf(unsigned rate) /* cps to atm forum format */
440 {
441 #define NONZERO (1 << 14)
443 unsigned exp = 0;
445 if (rate == 0)
446 return 0;
448 rate <<= 9;
449 while (rate > 0x3ff) {
450 ++exp;
451 rate >>= 1;
452 }
454 return (NONZERO | (exp << 9) | (rate & 0x1ff));
455 }
457 static void __init
458 he_init_rx_lbfp0(struct he_dev *he_dev)
459 {
460 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
461 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
462 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
463 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
465 lbufd_index = 0;
466 lbm_offset = he_readl(he_dev, RCMLBM_BA);
468 he_writel(he_dev, lbufd_index, RLBF0_H);
470 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
471 lbufd_index += 2;
472 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
474 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
475 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
477 if (++lbuf_count == lbufs_per_row) {
478 lbuf_count = 0;
479 row_offset += he_dev->bytes_per_row;
480 }
481 lbm_offset += 4;
482 }
484 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
485 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
486 }
488 static void __init
489 he_init_rx_lbfp1(struct he_dev *he_dev)
490 {
491 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
492 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
493 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
494 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
496 lbufd_index = 1;
497 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
499 he_writel(he_dev, lbufd_index, RLBF1_H);
501 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
502 lbufd_index += 2;
503 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
505 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
506 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
508 if (++lbuf_count == lbufs_per_row) {
509 lbuf_count = 0;
510 row_offset += he_dev->bytes_per_row;
511 }
512 lbm_offset += 4;
513 }
515 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
516 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
517 }
519 static void __init
520 he_init_tx_lbfp(struct he_dev *he_dev)
521 {
522 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
523 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
524 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
525 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
527 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
528 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
530 he_writel(he_dev, lbufd_index, TLBF_H);
532 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
533 lbufd_index += 1;
534 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
536 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
537 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
539 if (++lbuf_count == lbufs_per_row) {
540 lbuf_count = 0;
541 row_offset += he_dev->bytes_per_row;
542 }
543 lbm_offset += 2;
544 }
546 he_writel(he_dev, lbufd_index - 1, TLBF_T);
547 }
549 static int __init
550 he_init_tpdrq(struct he_dev *he_dev)
551 {
552 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
553 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
554 if (he_dev->tpdrq_base == NULL) {
555 hprintk("failed to alloc tpdrq\n");
556 return -ENOMEM;
557 }
558 memset(he_dev->tpdrq_base, 0,
559 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
561 he_dev->tpdrq_tail = he_dev->tpdrq_base;
562 he_dev->tpdrq_head = he_dev->tpdrq_base;
564 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
565 he_writel(he_dev, 0, TPDRQ_T);
566 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
568 return 0;
569 }
571 static void __init
572 he_init_cs_block(struct he_dev *he_dev)
573 {
574 unsigned clock, rate, delta;
575 int reg;
577 /* 5.1.7 cs block initialization */
579 for (reg = 0; reg < 0x20; ++reg)
580 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
582 /* rate grid timer reload values */
584 clock = he_is622(he_dev) ? 66667000 : 50000000;
585 rate = he_dev->atm_dev->link_rate;
586 delta = rate / 16 / 2;
588 for (reg = 0; reg < 0x10; ++reg) {
589 /* 2.4 internal transmit function
590 *
591 * we initialize the first row in the rate grid.
592 * values are period (in clock cycles) of timer
593 */
594 unsigned period = clock / rate;
596 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
597 rate -= delta;
598 }
600 if (he_is622(he_dev)) {
601 /* table 5.2 (4 cells per lbuf) */
602 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
603 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
604 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
605 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
606 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
608 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
609 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
610 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
611 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
612 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
613 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
614 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
616 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
618 /* table 5.8 */
619 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
620 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
621 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
622 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
623 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
624 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
626 /* table 5.9 */
627 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
628 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
629 } else {
630 /* table 5.1 (4 cells per lbuf) */
631 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
632 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
633 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
634 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
635 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
637 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
638 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
639 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
640 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
641 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
642 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
643 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
645 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
647 /* table 5.8 */
648 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
649 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
650 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
651 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
652 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
653 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
655 /* table 5.9 */
656 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
657 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
658 }
660 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
662 for (reg = 0; reg < 0x8; ++reg)
663 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
665 }
667 static int __init
668 he_init_cs_block_rcm(struct he_dev *he_dev)
669 {
670 unsigned (*rategrid)[16][16];
671 unsigned rate, delta;
672 int i, j, reg;
674 unsigned rate_atmf, exp, man;
675 unsigned long long rate_cps;
676 int mult, buf, buf_limit = 4;
678 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
679 if (!rategrid)
680 return -ENOMEM;
682 /* initialize rate grid group table */
684 for (reg = 0x0; reg < 0xff; ++reg)
685 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
687 /* initialize rate controller groups */
689 for (reg = 0x100; reg < 0x1ff; ++reg)
690 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
692 /* initialize tNrm lookup table */
694 /* the manual makes reference to a routine in a sample driver
695 for proper configuration; fortunately, we only need this
696 in order to support abr connection */
698 /* initialize rate to group table */
700 rate = he_dev->atm_dev->link_rate;
701 delta = rate / 32;
703 /*
704 * 2.4 transmit internal functions
705 *
706 * we construct a copy of the rate grid used by the scheduler
707 * in order to construct the rate to group table below
708 */
710 for (j = 0; j < 16; j++) {
711 (*rategrid)[0][j] = rate;
712 rate -= delta;
713 }
715 for (i = 1; i < 16; i++)
716 for (j = 0; j < 16; j++)
717 if (i > 14)
718 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
719 else
720 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
722 /*
723 * 2.4 transmit internal function
724 *
725 * this table maps the upper 5 bits of exponent and mantissa
726 * of the atm forum representation of the rate into an index
727 * on rate grid
728 */
730 rate_atmf = 0;
731 while (rate_atmf < 0x400) {
732 man = (rate_atmf & 0x1f) << 4;
733 exp = rate_atmf >> 5;
735 /*
736 instead of '/ 512', use '>> 9' to prevent a call
737 to divdu3 on x86 platforms
738 */
739 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
741 if (rate_cps < 10)
742 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
744 for (i = 255; i > 0; i--)
745 if ((*rategrid)[i/16][i%16] >= rate_cps)
746 break; /* pick nearest rate instead? */
748 /*
749 * each table entry is 16 bits: (rate grid index (8 bits)
750 * and a buffer limit (8 bits)
751 * there are two table entries in each 32-bit register
752 */
754 #ifdef notdef
755 buf = rate_cps * he_dev->tx_numbuffs /
756 (he_dev->atm_dev->link_rate * 2);
757 #else
758 /* this is pretty, but avoids _divdu3 and is mostly correct */
759 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
760 if (rate_cps > (272 * mult))
761 buf = 4;
762 else if (rate_cps > (204 * mult))
763 buf = 3;
764 else if (rate_cps > (136 * mult))
765 buf = 2;
766 else if (rate_cps > (68 * mult))
767 buf = 1;
768 else
769 buf = 0;
770 #endif
771 if (buf > buf_limit)
772 buf = buf_limit;
773 reg = (reg << 16) | ((i << 8) | buf);
775 #define RTGTBL_OFFSET 0x400
777 if (rate_atmf & 0x1)
778 he_writel_rcm(he_dev, reg,
779 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
781 ++rate_atmf;
782 }
784 kfree(rategrid);
785 return 0;
786 }
788 static int __init
789 he_init_group(struct he_dev *he_dev, int group)
790 {
791 int i;
793 #ifdef USE_RBPS
794 /* small buffer pool */
795 #ifdef USE_RBPS_POOL
796 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
797 CONFIG_RBPS_BUFSIZE, 8, 0);
798 if (he_dev->rbps_pool == NULL) {
799 hprintk("unable to create rbps pages\n");
800 return -ENOMEM;
801 }
802 #else /* !USE_RBPS_POOL */
803 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
804 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
805 if (he_dev->rbps_pages == NULL) {
806 hprintk("unable to create rbps page pool\n");
807 return -ENOMEM;
808 }
809 #endif /* USE_RBPS_POOL */
811 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
812 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
813 if (he_dev->rbps_base == NULL) {
814 hprintk("failed to alloc rbps\n");
815 return -ENOMEM;
816 }
817 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
818 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
820 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
821 dma_addr_t dma_handle;
822 void *cpuaddr;
824 #ifdef USE_RBPS_POOL
825 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
826 if (cpuaddr == NULL)
827 return -ENOMEM;
828 #else
829 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
830 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
831 #endif
833 he_dev->rbps_virt[i].virt = cpuaddr;
834 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
835 he_dev->rbps_base[i].phys = dma_handle;
837 }
838 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
840 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
841 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
842 G0_RBPS_T + (group * 32));
843 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
844 G0_RBPS_BS + (group * 32));
845 he_writel(he_dev,
846 RBP_THRESH(CONFIG_RBPS_THRESH) |
847 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
848 RBP_INT_ENB,
849 G0_RBPS_QI + (group * 32));
850 #else /* !USE_RBPS */
851 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
852 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
853 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
854 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
855 G0_RBPS_BS + (group * 32));
856 #endif /* USE_RBPS */
858 /* large buffer pool */
859 #ifdef USE_RBPL_POOL
860 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
861 CONFIG_RBPL_BUFSIZE, 8, 0);
862 if (he_dev->rbpl_pool == NULL) {
863 hprintk("unable to create rbpl pool\n");
864 return -ENOMEM;
865 }
866 #else /* !USE_RBPL_POOL */
867 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
868 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
869 if (he_dev->rbpl_pages == NULL) {
870 hprintk("unable to create rbpl pages\n");
871 return -ENOMEM;
872 }
873 #endif /* USE_RBPL_POOL */
875 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
876 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
877 if (he_dev->rbpl_base == NULL) {
878 hprintk("failed to alloc rbpl\n");
879 return -ENOMEM;
880 }
881 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
882 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
884 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
885 dma_addr_t dma_handle;
886 void *cpuaddr;
888 #ifdef USE_RBPL_POOL
889 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
890 if (cpuaddr == NULL)
891 return -ENOMEM;
892 #else
893 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
894 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
895 #endif
897 he_dev->rbpl_virt[i].virt = cpuaddr;
898 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
899 he_dev->rbpl_base[i].phys = dma_handle;
900 }
901 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
903 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
904 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
905 G0_RBPL_T + (group * 32));
906 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
907 G0_RBPL_BS + (group * 32));
908 he_writel(he_dev,
909 RBP_THRESH(CONFIG_RBPL_THRESH) |
910 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
911 RBP_INT_ENB,
912 G0_RBPL_QI + (group * 32));
914 /* rx buffer ready queue */
916 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
917 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
918 if (he_dev->rbrq_base == NULL) {
919 hprintk("failed to allocate rbrq\n");
920 return -ENOMEM;
921 }
922 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
924 he_dev->rbrq_head = he_dev->rbrq_base;
925 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
926 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
927 he_writel(he_dev,
928 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
929 G0_RBRQ_Q + (group * 16));
930 if (irq_coalesce) {
931 hprintk("coalescing interrupts\n");
932 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
933 G0_RBRQ_I + (group * 16));
934 } else
935 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
936 G0_RBRQ_I + (group * 16));
938 /* tx buffer ready queue */
940 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
941 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
942 if (he_dev->tbrq_base == NULL) {
943 hprintk("failed to allocate tbrq\n");
944 return -ENOMEM;
945 }
946 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
948 he_dev->tbrq_head = he_dev->tbrq_base;
950 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
951 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
952 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
953 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
955 return 0;
956 }
958 static int __init
959 he_init_irq(struct he_dev *he_dev)
960 {
961 int i;
963 /* 2.9.3.5 tail offset for each interrupt queue is located after the
964 end of the interrupt queue */
966 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
967 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
968 if (he_dev->irq_base == NULL) {
969 hprintk("failed to allocate irq\n");
970 return -ENOMEM;
971 }
972 he_dev->irq_tailoffset = (unsigned *)
973 &he_dev->irq_base[CONFIG_IRQ_SIZE];
974 *he_dev->irq_tailoffset = 0;
975 he_dev->irq_head = he_dev->irq_base;
976 he_dev->irq_tail = he_dev->irq_base;
978 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
979 he_dev->irq_base[i].isw = ITYPE_INVALID;
981 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
982 he_writel(he_dev,
983 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
984 IRQ0_HEAD);
985 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
986 he_writel(he_dev, 0x0, IRQ0_DATA);
988 he_writel(he_dev, 0x0, IRQ1_BASE);
989 he_writel(he_dev, 0x0, IRQ1_HEAD);
990 he_writel(he_dev, 0x0, IRQ1_CNTL);
991 he_writel(he_dev, 0x0, IRQ1_DATA);
993 he_writel(he_dev, 0x0, IRQ2_BASE);
994 he_writel(he_dev, 0x0, IRQ2_HEAD);
995 he_writel(he_dev, 0x0, IRQ2_CNTL);
996 he_writel(he_dev, 0x0, IRQ2_DATA);
998 he_writel(he_dev, 0x0, IRQ3_BASE);
999 he_writel(he_dev, 0x0, IRQ3_HEAD);
1000 he_writel(he_dev, 0x0, IRQ3_CNTL);
1001 he_writel(he_dev, 0x0, IRQ3_DATA);
1003 /* 2.9.3.2 interrupt queue mapping registers */
1005 he_writel(he_dev, 0x0, GRP_10_MAP);
1006 he_writel(he_dev, 0x0, GRP_32_MAP);
1007 he_writel(he_dev, 0x0, GRP_54_MAP);
1008 he_writel(he_dev, 0x0, GRP_76_MAP);
1010 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1011 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1012 return -EINVAL;
1015 he_dev->irq = he_dev->pci_dev->irq;
1017 return 0;
1020 static int __devinit
1021 he_start(struct atm_dev *dev)
1023 struct he_dev *he_dev;
1024 struct pci_dev *pci_dev;
1025 unsigned long membase;
1027 u16 command;
1028 u32 gen_cntl_0, host_cntl, lb_swap;
1029 u8 cache_size, timer;
1031 unsigned err;
1032 unsigned int status, reg;
1033 int i, group;
1035 he_dev = HE_DEV(dev);
1036 pci_dev = he_dev->pci_dev;
1038 membase = pci_resource_start(pci_dev, 0);
1039 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1041 /*
1042 * pci bus controller initialization
1043 */
1045 /* 4.3 pci bus controller-specific initialization */
1046 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1047 hprintk("can't read GEN_CNTL_0\n");
1048 return -EINVAL;
1050 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1051 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1052 hprintk("can't write GEN_CNTL_0.\n");
1053 return -EINVAL;
1056 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1057 hprintk("can't read PCI_COMMAND.\n");
1058 return -EINVAL;
1061 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1062 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1063 hprintk("can't enable memory.\n");
1064 return -EINVAL;
1067 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1068 hprintk("can't read cache line size?\n");
1069 return -EINVAL;
1072 if (cache_size < 16) {
1073 cache_size = 16;
1074 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1075 hprintk("can't set cache line size to %d\n", cache_size);
1078 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1079 hprintk("can't read latency timer?\n");
1080 return -EINVAL;
1083 /* from table 3.9
1085 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1087 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1088 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1090 */
1091 #define LAT_TIMER 209
1092 if (timer < LAT_TIMER) {
1093 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1094 timer = LAT_TIMER;
1095 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1096 hprintk("can't set latency timer to %d\n", timer);
1099 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1100 hprintk("can't set up page mapping\n");
1101 return -EINVAL;
1104 /* 4.4 card reset */
1105 he_writel(he_dev, 0x0, RESET_CNTL);
1106 he_writel(he_dev, 0xff, RESET_CNTL);
1108 udelay(16*1000); /* 16 ms */
1109 status = he_readl(he_dev, RESET_CNTL);
1110 if ((status & BOARD_RST_STATUS) == 0) {
1111 hprintk("reset failed\n");
1112 return -EINVAL;
1115 /* 4.5 set bus width */
1116 host_cntl = he_readl(he_dev, HOST_CNTL);
1117 if (host_cntl & PCI_BUS_SIZE64)
1118 gen_cntl_0 |= ENBL_64;
1119 else
1120 gen_cntl_0 &= ~ENBL_64;
1122 if (disable64 == 1) {
1123 hprintk("disabling 64-bit pci bus transfers\n");
1124 gen_cntl_0 &= ~ENBL_64;
1127 if (gen_cntl_0 & ENBL_64)
1128 hprintk("64-bit transfers enabled\n");
1130 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1132 /* 4.7 read prom contents */
1133 for (i = 0; i < PROD_ID_LEN; ++i)
1134 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1136 he_dev->media = read_prom_byte(he_dev, MEDIA);
1138 for (i = 0; i < 6; ++i)
1139 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1141 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1142 he_dev->prod_id,
1143 he_dev->media & 0x40 ? "SM" : "MM",
1144 dev->esi[0],
1145 dev->esi[1],
1146 dev->esi[2],
1147 dev->esi[3],
1148 dev->esi[4],
1149 dev->esi[5]);
1150 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1151 ATM_OC12_PCR : ATM_OC3_PCR;
1153 /* 4.6 set host endianess */
1154 lb_swap = he_readl(he_dev, LB_SWAP);
1155 if (he_is622(he_dev))
1156 lb_swap &= ~XFER_SIZE; /* 4 cells */
1157 else
1158 lb_swap |= XFER_SIZE; /* 8 cells */
1159 #ifdef __BIG_ENDIAN
1160 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1161 #else
1162 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1163 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1164 #endif /* __BIG_ENDIAN */
1165 he_writel(he_dev, lb_swap, LB_SWAP);
1167 /* 4.8 sdram controller initialization */
1168 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1170 /* 4.9 initialize rnum value */
1171 lb_swap |= SWAP_RNUM_MAX(0xf);
1172 he_writel(he_dev, lb_swap, LB_SWAP);
1174 /* 4.10 initialize the interrupt queues */
1175 if ((err = he_init_irq(he_dev)) != 0)
1176 return err;
1178 #ifdef USE_TASKLET
1179 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1180 #endif
1181 spin_lock_init(&he_dev->global_lock);
1183 /* 4.11 enable pci bus controller state machines */
1184 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1185 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1186 he_writel(he_dev, host_cntl, HOST_CNTL);
1188 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1189 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1191 /*
1192 * atm network controller initialization
1193 */
1195 /* 5.1.1 generic configuration state */
1197 /*
1198 * local (cell) buffer memory map
1200 * HE155 HE622
1202 * 0 ____________1023 bytes 0 _______________________2047 bytes
1203 * | | | | |
1204 * | utility | | rx0 | |
1205 * 5|____________| 255|___________________| u |
1206 * 6| | 256| | t |
1207 * | | | | i |
1208 * | rx0 | row | tx | l |
1209 * | | | | i |
1210 * | | 767|___________________| t |
1211 * 517|____________| 768| | y |
1212 * row 518| | | rx1 | |
1213 * | | 1023|___________________|___|
1214 * | |
1215 * | tx |
1216 * | |
1217 * | |
1218 * 1535|____________|
1219 * 1536| |
1220 * | rx1 |
1221 * 2047|____________|
1223 */
1225 /* total 4096 connections */
1226 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1227 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1229 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1230 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1231 return -ENODEV;
1234 if (nvpibits != -1) {
1235 he_dev->vpibits = nvpibits;
1236 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1239 if (nvcibits != -1) {
1240 he_dev->vcibits = nvcibits;
1241 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1245 if (he_is622(he_dev)) {
1246 he_dev->cells_per_row = 40;
1247 he_dev->bytes_per_row = 2048;
1248 he_dev->r0_numrows = 256;
1249 he_dev->tx_numrows = 512;
1250 he_dev->r1_numrows = 256;
1251 he_dev->r0_startrow = 0;
1252 he_dev->tx_startrow = 256;
1253 he_dev->r1_startrow = 768;
1254 } else {
1255 he_dev->cells_per_row = 20;
1256 he_dev->bytes_per_row = 1024;
1257 he_dev->r0_numrows = 512;
1258 he_dev->tx_numrows = 1018;
1259 he_dev->r1_numrows = 512;
1260 he_dev->r0_startrow = 6;
1261 he_dev->tx_startrow = 518;
1262 he_dev->r1_startrow = 1536;
1265 he_dev->cells_per_lbuf = 4;
1266 he_dev->buffer_limit = 4;
1267 he_dev->r0_numbuffs = he_dev->r0_numrows *
1268 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1269 if (he_dev->r0_numbuffs > 2560)
1270 he_dev->r0_numbuffs = 2560;
1272 he_dev->r1_numbuffs = he_dev->r1_numrows *
1273 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1274 if (he_dev->r1_numbuffs > 2560)
1275 he_dev->r1_numbuffs = 2560;
1277 he_dev->tx_numbuffs = he_dev->tx_numrows *
1278 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1279 if (he_dev->tx_numbuffs > 5120)
1280 he_dev->tx_numbuffs = 5120;
1282 /* 5.1.2 configure hardware dependent registers */
1284 he_writel(he_dev,
1285 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1286 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1287 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1288 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1289 LBARB);
1291 he_writel(he_dev, BANK_ON |
1292 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1293 SDRAMCON);
1295 he_writel(he_dev,
1296 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1297 RM_RW_WAIT(1), RCMCONFIG);
1298 he_writel(he_dev,
1299 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1300 TM_RW_WAIT(1), TCMCONFIG);
1302 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1304 he_writel(he_dev,
1305 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1306 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1307 RX_VALVP(he_dev->vpibits) |
1308 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1310 he_writel(he_dev, DRF_THRESH(0x20) |
1311 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1312 TX_VCI_MASK(he_dev->vcibits) |
1313 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1315 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1317 he_writel(he_dev, PHY_INT_ENB |
1318 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1319 RH_CONFIG);
1321 /* 5.1.3 initialize connection memory */
1323 for (i = 0; i < TCM_MEM_SIZE; ++i)
1324 he_writel_tcm(he_dev, 0, i);
1326 for (i = 0; i < RCM_MEM_SIZE; ++i)
1327 he_writel_rcm(he_dev, 0, i);
1329 /*
1330 * transmit connection memory map
1332 * tx memory
1333 * 0x0 ___________________
1334 * | |
1335 * | |
1336 * | TSRa |
1337 * | |
1338 * | |
1339 * 0x8000|___________________|
1340 * | |
1341 * | TSRb |
1342 * 0xc000|___________________|
1343 * | |
1344 * | TSRc |
1345 * 0xe000|___________________|
1346 * | TSRd |
1347 * 0xf000|___________________|
1348 * | tmABR |
1349 * 0x10000|___________________|
1350 * | |
1351 * | tmTPD |
1352 * |___________________|
1353 * | |
1354 * ....
1355 * 0x1ffff|___________________|
1358 */
1360 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1361 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1362 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1363 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1364 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1367 /*
1368 * receive connection memory map
1370 * 0x0 ___________________
1371 * | |
1372 * | |
1373 * | RSRa |
1374 * | |
1375 * | |
1376 * 0x8000|___________________|
1377 * | |
1378 * | rx0/1 |
1379 * | LBM | link lists of local
1380 * | tx | buffer memory
1381 * | |
1382 * 0xd000|___________________|
1383 * | |
1384 * | rmABR |
1385 * 0xe000|___________________|
1386 * | |
1387 * | RSRb |
1388 * |___________________|
1389 * | |
1390 * ....
1391 * 0xffff|___________________|
1392 */
1394 he_writel(he_dev, 0x08000, RCMLBM_BA);
1395 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1396 he_writel(he_dev, 0x0d800, RCMABR_BA);
1398 /* 5.1.4 initialize local buffer free pools linked lists */
1400 he_init_rx_lbfp0(he_dev);
1401 he_init_rx_lbfp1(he_dev);
1403 he_writel(he_dev, 0x0, RLBC_H);
1404 he_writel(he_dev, 0x0, RLBC_T);
1405 he_writel(he_dev, 0x0, RLBC_H2);
1407 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1408 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1410 he_init_tx_lbfp(he_dev);
1412 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1414 /* 5.1.5 initialize intermediate receive queues */
1416 if (he_is622(he_dev)) {
1417 he_writel(he_dev, 0x000f, G0_INMQ_S);
1418 he_writel(he_dev, 0x200f, G0_INMQ_L);
1420 he_writel(he_dev, 0x001f, G1_INMQ_S);
1421 he_writel(he_dev, 0x201f, G1_INMQ_L);
1423 he_writel(he_dev, 0x002f, G2_INMQ_S);
1424 he_writel(he_dev, 0x202f, G2_INMQ_L);
1426 he_writel(he_dev, 0x003f, G3_INMQ_S);
1427 he_writel(he_dev, 0x203f, G3_INMQ_L);
1429 he_writel(he_dev, 0x004f, G4_INMQ_S);
1430 he_writel(he_dev, 0x204f, G4_INMQ_L);
1432 he_writel(he_dev, 0x005f, G5_INMQ_S);
1433 he_writel(he_dev, 0x205f, G5_INMQ_L);
1435 he_writel(he_dev, 0x006f, G6_INMQ_S);
1436 he_writel(he_dev, 0x206f, G6_INMQ_L);
1438 he_writel(he_dev, 0x007f, G7_INMQ_S);
1439 he_writel(he_dev, 0x207f, G7_INMQ_L);
1440 } else {
1441 he_writel(he_dev, 0x0000, G0_INMQ_S);
1442 he_writel(he_dev, 0x0008, G0_INMQ_L);
1444 he_writel(he_dev, 0x0001, G1_INMQ_S);
1445 he_writel(he_dev, 0x0009, G1_INMQ_L);
1447 he_writel(he_dev, 0x0002, G2_INMQ_S);
1448 he_writel(he_dev, 0x000a, G2_INMQ_L);
1450 he_writel(he_dev, 0x0003, G3_INMQ_S);
1451 he_writel(he_dev, 0x000b, G3_INMQ_L);
1453 he_writel(he_dev, 0x0004, G4_INMQ_S);
1454 he_writel(he_dev, 0x000c, G4_INMQ_L);
1456 he_writel(he_dev, 0x0005, G5_INMQ_S);
1457 he_writel(he_dev, 0x000d, G5_INMQ_L);
1459 he_writel(he_dev, 0x0006, G6_INMQ_S);
1460 he_writel(he_dev, 0x000e, G6_INMQ_L);
1462 he_writel(he_dev, 0x0007, G7_INMQ_S);
1463 he_writel(he_dev, 0x000f, G7_INMQ_L);
1466 /* 5.1.6 application tunable parameters */
1468 he_writel(he_dev, 0x0, MCC);
1469 he_writel(he_dev, 0x0, OEC);
1470 he_writel(he_dev, 0x0, DCC);
1471 he_writel(he_dev, 0x0, CEC);
1473 /* 5.1.7 cs block initialization */
1475 he_init_cs_block(he_dev);
1477 /* 5.1.8 cs block connection memory initialization */
1479 if (he_init_cs_block_rcm(he_dev) < 0)
1480 return -ENOMEM;
1482 /* 5.1.10 initialize host structures */
1484 he_init_tpdrq(he_dev);
1486 #ifdef USE_TPD_POOL
1487 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1488 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1489 if (he_dev->tpd_pool == NULL) {
1490 hprintk("unable to create tpd pci_pool\n");
1491 return -ENOMEM;
1494 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1495 #else
1496 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1497 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1498 if (!he_dev->tpd_base)
1499 return -ENOMEM;
1501 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1502 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1503 he_dev->tpd_base[i].inuse = 0;
1506 he_dev->tpd_head = he_dev->tpd_base;
1507 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1508 #endif
1510 if (he_init_group(he_dev, 0) != 0)
1511 return -ENOMEM;
1513 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1514 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1515 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1516 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1517 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1518 G0_RBPS_BS + (group * 32));
1520 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1521 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1522 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1523 G0_RBPL_QI + (group * 32));
1524 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1526 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1527 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1528 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1529 G0_RBRQ_Q + (group * 16));
1530 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1532 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1533 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1534 he_writel(he_dev, TBRQ_THRESH(0x1),
1535 G0_TBRQ_THRESH + (group * 16));
1536 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1539 /* host status page */
1541 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1542 sizeof(struct he_hsp), &he_dev->hsp_phys);
1543 if (he_dev->hsp == NULL) {
1544 hprintk("failed to allocate host status page\n");
1545 return -ENOMEM;
1547 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1548 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1550 /* initialize framer */
1552 #ifdef CONFIG_ATM_HE_USE_SUNI
1553 suni_init(he_dev->atm_dev);
1554 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1555 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1556 #endif /* CONFIG_ATM_HE_USE_SUNI */
1558 if (sdh) {
1559 /* this really should be in suni.c but for now... */
1560 int val;
1562 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1563 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1564 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1567 /* 5.1.12 enable transmit and receive */
1569 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1570 reg |= TX_ENABLE|ER_ENABLE;
1571 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1573 reg = he_readl(he_dev, RC_CONFIG);
1574 reg |= RX_ENABLE;
1575 he_writel(he_dev, reg, RC_CONFIG);
1577 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1578 he_dev->cs_stper[i].inuse = 0;
1579 he_dev->cs_stper[i].pcr = -1;
1581 he_dev->total_bw = 0;
1584 /* atm linux initialization */
1586 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1587 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1589 he_dev->irq_peak = 0;
1590 he_dev->rbrq_peak = 0;
1591 he_dev->rbpl_peak = 0;
1592 he_dev->tbrq_peak = 0;
1594 HPRINTK("hell bent for leather!\n");
1596 return 0;
1599 static void
1600 he_stop(struct he_dev *he_dev)
1602 u16 command;
1603 u32 gen_cntl_0, reg;
1604 struct pci_dev *pci_dev;
1606 pci_dev = he_dev->pci_dev;
1608 /* disable interrupts */
1610 if (he_dev->membase) {
1611 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1612 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1613 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1615 #ifdef USE_TASKLET
1616 tasklet_disable(&he_dev->tasklet);
1617 #endif
1619 /* disable recv and transmit */
1621 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1622 reg &= ~(TX_ENABLE|ER_ENABLE);
1623 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1625 reg = he_readl(he_dev, RC_CONFIG);
1626 reg &= ~(RX_ENABLE);
1627 he_writel(he_dev, reg, RC_CONFIG);
1630 #ifdef CONFIG_ATM_HE_USE_SUNI
1631 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1632 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1633 #endif /* CONFIG_ATM_HE_USE_SUNI */
1635 if (he_dev->irq)
1636 free_irq(he_dev->irq, he_dev);
1638 if (he_dev->irq_base)
1639 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1640 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1642 if (he_dev->hsp)
1643 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1644 he_dev->hsp, he_dev->hsp_phys);
1646 if (he_dev->rbpl_base) {
1647 #ifdef USE_RBPL_POOL
1648 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1649 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1650 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1652 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1654 #else
1655 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1656 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1657 #endif
1658 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1659 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1662 #ifdef USE_RBPL_POOL
1663 if (he_dev->rbpl_pool)
1664 pci_pool_destroy(he_dev->rbpl_pool);
1665 #endif
1667 #ifdef USE_RBPS
1668 if (he_dev->rbps_base) {
1669 #ifdef USE_RBPS_POOL
1670 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1671 void *cpuaddr = he_dev->rbps_virt[i].virt;
1672 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1674 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1676 #else
1677 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1678 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1679 #endif
1680 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1681 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1684 #ifdef USE_RBPS_POOL
1685 if (he_dev->rbps_pool)
1686 pci_pool_destroy(he_dev->rbps_pool);
1687 #endif
1689 #endif /* USE_RBPS */
1691 if (he_dev->rbrq_base)
1692 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1693 he_dev->rbrq_base, he_dev->rbrq_phys);
1695 if (he_dev->tbrq_base)
1696 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1697 he_dev->tbrq_base, he_dev->tbrq_phys);
1699 if (he_dev->tpdrq_base)
1700 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1701 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1703 #ifdef USE_TPD_POOL
1704 if (he_dev->tpd_pool)
1705 pci_pool_destroy(he_dev->tpd_pool);
1706 #else
1707 if (he_dev->tpd_base)
1708 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1709 he_dev->tpd_base, he_dev->tpd_base_phys);
1710 #endif
1712 if (he_dev->pci_dev) {
1713 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1714 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1715 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1718 if (he_dev->membase)
1719 iounmap(he_dev->membase);
1722 static struct he_tpd *
1723 __alloc_tpd(struct he_dev *he_dev)
1725 #ifdef USE_TPD_POOL
1726 struct he_tpd *tpd;
1727 dma_addr_t dma_handle;
1729 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1730 if (tpd == NULL)
1731 return NULL;
1733 tpd->status = TPD_ADDR(dma_handle);
1734 tpd->reserved = 0;
1735 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1736 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1737 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1739 return tpd;
1740 #else
1741 int i;
1743 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1744 ++he_dev->tpd_head;
1745 if (he_dev->tpd_head > he_dev->tpd_end) {
1746 he_dev->tpd_head = he_dev->tpd_base;
1749 if (!he_dev->tpd_head->inuse) {
1750 he_dev->tpd_head->inuse = 1;
1751 he_dev->tpd_head->status &= TPD_MASK;
1752 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1753 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1754 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1755 return he_dev->tpd_head;
1758 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1759 return NULL;
1760 #endif
1763 #define AAL5_LEN(buf,len) \
1764 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1765 (((unsigned char *)(buf))[(len)-5]))
1767 /* 2.10.1.2 receive
1769 * aal5 packets can optionally return the tcp checksum in the lower
1770 * 16 bits of the crc (RSR0_TCP_CKSUM)
1771 */
1773 #define TCP_CKSUM(buf,len) \
1774 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1775 (((unsigned char *)(buf))[(len-1)]))
1777 static int
1778 he_service_rbrq(struct he_dev *he_dev, int group)
1780 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1781 ((unsigned long)he_dev->rbrq_base |
1782 he_dev->hsp->group[group].rbrq_tail);
1783 struct he_rbp *rbp = NULL;
1784 unsigned cid, lastcid = -1;
1785 unsigned buf_len = 0;
1786 struct sk_buff *skb;
1787 struct atm_vcc *vcc = NULL;
1788 struct he_vcc *he_vcc;
1789 struct he_iovec *iov;
1790 int pdus_assembled = 0;
1791 int updated = 0;
1793 read_lock(&vcc_sklist_lock);
1794 while (he_dev->rbrq_head != rbrq_tail) {
1795 ++updated;
1797 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1798 he_dev->rbrq_head, group,
1799 RBRQ_ADDR(he_dev->rbrq_head),
1800 RBRQ_BUFLEN(he_dev->rbrq_head),
1801 RBRQ_CID(he_dev->rbrq_head),
1802 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1803 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1804 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1805 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1806 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1807 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1809 #ifdef USE_RBPS
1810 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1811 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1812 else
1813 #endif
1814 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1816 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1817 cid = RBRQ_CID(he_dev->rbrq_head);
1819 if (cid != lastcid)
1820 vcc = __find_vcc(he_dev, cid);
1821 lastcid = cid;
1823 if (vcc == NULL) {
1824 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1825 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1826 rbp->status &= ~RBP_LOANED;
1828 goto next_rbrq_entry;
1831 he_vcc = HE_VCC(vcc);
1832 if (he_vcc == NULL) {
1833 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1834 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1835 rbp->status &= ~RBP_LOANED;
1836 goto next_rbrq_entry;
1839 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1840 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1841 atomic_inc(&vcc->stats->rx_drop);
1842 goto return_host_buffers;
1845 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1846 he_vcc->iov_tail->iov_len = buf_len;
1847 he_vcc->pdu_len += buf_len;
1848 ++he_vcc->iov_tail;
1850 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1851 lastcid = -1;
1852 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1853 wake_up(&he_vcc->rx_waitq);
1854 goto return_host_buffers;
1857 #ifdef notdef
1858 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1859 hprintk("iovec full! cid 0x%x\n", cid);
1860 goto return_host_buffers;
1862 #endif
1863 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1864 goto next_rbrq_entry;
1866 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1867 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1868 HPRINTK("%s%s (%d.%d)\n",
1869 RBRQ_CRC_ERR(he_dev->rbrq_head)
1870 ? "CRC_ERR " : "",
1871 RBRQ_LEN_ERR(he_dev->rbrq_head)
1872 ? "LEN_ERR" : "",
1873 vcc->vpi, vcc->vci);
1874 atomic_inc(&vcc->stats->rx_err);
1875 goto return_host_buffers;
1878 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1879 GFP_ATOMIC);
1880 if (!skb) {
1881 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1882 goto return_host_buffers;
1885 if (rx_skb_reserve > 0)
1886 skb_reserve(skb, rx_skb_reserve);
1888 __net_timestamp(skb);
1890 for (iov = he_vcc->iov_head;
1891 iov < he_vcc->iov_tail; ++iov) {
1892 #ifdef USE_RBPS
1893 if (iov->iov_base & RBP_SMALLBUF)
1894 memcpy(skb_put(skb, iov->iov_len),
1895 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1896 else
1897 #endif
1898 memcpy(skb_put(skb, iov->iov_len),
1899 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1902 switch (vcc->qos.aal) {
1903 case ATM_AAL0:
1904 /* 2.10.1.5 raw cell receive */
1905 skb->len = ATM_AAL0_SDU;
1906 skb->tail = skb->data + skb->len;
1907 break;
1908 case ATM_AAL5:
1909 /* 2.10.1.2 aal5 receive */
1911 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1912 skb->tail = skb->data + skb->len;
1913 #ifdef USE_CHECKSUM_HW
1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1915 skb->ip_summed = CHECKSUM_HW;
1916 skb->csum = TCP_CKSUM(skb->data,
1917 he_vcc->pdu_len);
1919 #endif
1920 break;
1923 #ifdef should_never_happen
1924 if (skb->len > vcc->qos.rxtp.max_sdu)
1925 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1926 #endif
1928 #ifdef notdef
1929 ATM_SKB(skb)->vcc = vcc;
1930 #endif
1931 spin_unlock(&he_dev->global_lock);
1932 vcc->push(vcc, skb);
1933 spin_lock(&he_dev->global_lock);
1935 atomic_inc(&vcc->stats->rx);
1937 return_host_buffers:
1938 ++pdus_assembled;
1940 for (iov = he_vcc->iov_head;
1941 iov < he_vcc->iov_tail; ++iov) {
1942 #ifdef USE_RBPS
1943 if (iov->iov_base & RBP_SMALLBUF)
1944 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1945 else
1946 #endif
1947 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1949 rbp->status &= ~RBP_LOANED;
1952 he_vcc->iov_tail = he_vcc->iov_head;
1953 he_vcc->pdu_len = 0;
1955 next_rbrq_entry:
1956 he_dev->rbrq_head = (struct he_rbrq *)
1957 ((unsigned long) he_dev->rbrq_base |
1958 RBRQ_MASK(++he_dev->rbrq_head));
1961 read_unlock(&vcc_sklist_lock);
1963 if (updated) {
1964 if (updated > he_dev->rbrq_peak)
1965 he_dev->rbrq_peak = updated;
1967 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1968 G0_RBRQ_H + (group * 16));
1971 return pdus_assembled;
1974 static void
1975 he_service_tbrq(struct he_dev *he_dev, int group)
1977 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1978 ((unsigned long)he_dev->tbrq_base |
1979 he_dev->hsp->group[group].tbrq_tail);
1980 struct he_tpd *tpd;
1981 int slot, updated = 0;
1982 #ifdef USE_TPD_POOL
1983 struct he_tpd *__tpd;
1984 #endif
1986 /* 2.1.6 transmit buffer return queue */
1988 while (he_dev->tbrq_head != tbrq_tail) {
1989 ++updated;
1991 HPRINTK("tbrq%d 0x%x%s%s\n",
1992 group,
1993 TBRQ_TPD(he_dev->tbrq_head),
1994 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1995 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1996 #ifdef USE_TPD_POOL
1997 tpd = NULL;
1998 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1999 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2000 tpd = __tpd;
2001 list_del(&__tpd->entry);
2002 break;
2006 if (tpd == NULL) {
2007 hprintk("unable to locate tpd for dma buffer %x\n",
2008 TBRQ_TPD(he_dev->tbrq_head));
2009 goto next_tbrq_entry;
2011 #else
2012 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2013 #endif
2015 if (TBRQ_EOS(he_dev->tbrq_head)) {
2016 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2017 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2018 if (tpd->vcc)
2019 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2021 goto next_tbrq_entry;
2024 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2025 if (tpd->iovec[slot].addr)
2026 pci_unmap_single(he_dev->pci_dev,
2027 tpd->iovec[slot].addr,
2028 tpd->iovec[slot].len & TPD_LEN_MASK,
2029 PCI_DMA_TODEVICE);
2030 if (tpd->iovec[slot].len & TPD_LST)
2031 break;
2035 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2036 if (tpd->vcc && tpd->vcc->pop)
2037 tpd->vcc->pop(tpd->vcc, tpd->skb);
2038 else
2039 dev_kfree_skb_any(tpd->skb);
2042 next_tbrq_entry:
2043 #ifdef USE_TPD_POOL
2044 if (tpd)
2045 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2046 #else
2047 tpd->inuse = 0;
2048 #endif
2049 he_dev->tbrq_head = (struct he_tbrq *)
2050 ((unsigned long) he_dev->tbrq_base |
2051 TBRQ_MASK(++he_dev->tbrq_head));
2054 if (updated) {
2055 if (updated > he_dev->tbrq_peak)
2056 he_dev->tbrq_peak = updated;
2058 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2059 G0_TBRQ_H + (group * 16));
2064 static void
2065 he_service_rbpl(struct he_dev *he_dev, int group)
2067 struct he_rbp *newtail;
2068 struct he_rbp *rbpl_head;
2069 int moved = 0;
2071 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2072 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2074 for (;;) {
2075 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2076 RBPL_MASK(he_dev->rbpl_tail+1));
2078 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2079 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2080 break;
2082 newtail->status |= RBP_LOANED;
2083 he_dev->rbpl_tail = newtail;
2084 ++moved;
2087 if (moved)
2088 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2091 #ifdef USE_RBPS
2092 static void
2093 he_service_rbps(struct he_dev *he_dev, int group)
2095 struct he_rbp *newtail;
2096 struct he_rbp *rbps_head;
2097 int moved = 0;
2099 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2100 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2102 for (;;) {
2103 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2104 RBPS_MASK(he_dev->rbps_tail+1));
2106 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2107 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2108 break;
2110 newtail->status |= RBP_LOANED;
2111 he_dev->rbps_tail = newtail;
2112 ++moved;
2115 if (moved)
2116 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2118 #endif /* USE_RBPS */
2120 static void
2121 he_tasklet(unsigned long data)
2123 unsigned long flags;
2124 struct he_dev *he_dev = (struct he_dev *) data;
2125 int group, type;
2126 int updated = 0;
2128 HPRINTK("tasklet (0x%lx)\n", data);
2129 #ifdef USE_TASKLET
2130 spin_lock_irqsave(&he_dev->global_lock, flags);
2131 #endif
2133 while (he_dev->irq_head != he_dev->irq_tail) {
2134 ++updated;
2136 type = ITYPE_TYPE(he_dev->irq_head->isw);
2137 group = ITYPE_GROUP(he_dev->irq_head->isw);
2139 switch (type) {
2140 case ITYPE_RBRQ_THRESH:
2141 HPRINTK("rbrq%d threshold\n", group);
2142 /* fall through */
2143 case ITYPE_RBRQ_TIMER:
2144 if (he_service_rbrq(he_dev, group)) {
2145 he_service_rbpl(he_dev, group);
2146 #ifdef USE_RBPS
2147 he_service_rbps(he_dev, group);
2148 #endif /* USE_RBPS */
2150 break;
2151 case ITYPE_TBRQ_THRESH:
2152 HPRINTK("tbrq%d threshold\n", group);
2153 /* fall through */
2154 case ITYPE_TPD_COMPLETE:
2155 he_service_tbrq(he_dev, group);
2156 break;
2157 case ITYPE_RBPL_THRESH:
2158 he_service_rbpl(he_dev, group);
2159 break;
2160 case ITYPE_RBPS_THRESH:
2161 #ifdef USE_RBPS
2162 he_service_rbps(he_dev, group);
2163 #endif /* USE_RBPS */
2164 break;
2165 case ITYPE_PHY:
2166 HPRINTK("phy interrupt\n");
2167 #ifdef CONFIG_ATM_HE_USE_SUNI
2168 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2169 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2170 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2171 spin_lock_irqsave(&he_dev->global_lock, flags);
2172 #endif
2173 break;
2174 case ITYPE_OTHER:
2175 switch (type|group) {
2176 case ITYPE_PARITY:
2177 hprintk("parity error\n");
2178 break;
2179 case ITYPE_ABORT:
2180 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2181 break;
2183 break;
2184 case ITYPE_TYPE(ITYPE_INVALID):
2185 /* see 8.1.1 -- check all queues */
2187 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2189 he_service_rbrq(he_dev, 0);
2190 he_service_rbpl(he_dev, 0);
2191 #ifdef USE_RBPS
2192 he_service_rbps(he_dev, 0);
2193 #endif /* USE_RBPS */
2194 he_service_tbrq(he_dev, 0);
2195 break;
2196 default:
2197 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2200 he_dev->irq_head->isw = ITYPE_INVALID;
2202 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2205 if (updated) {
2206 if (updated > he_dev->irq_peak)
2207 he_dev->irq_peak = updated;
2209 he_writel(he_dev,
2210 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2211 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2212 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2213 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2215 #ifdef USE_TASKLET
2216 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217 #endif
2220 static irqreturn_t
2221 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2223 unsigned long flags;
2224 struct he_dev *he_dev = (struct he_dev * )dev_id;
2225 int handled = 0;
2227 if (he_dev == NULL)
2228 return IRQ_NONE;
2230 spin_lock_irqsave(&he_dev->global_lock, flags);
2232 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2233 (*he_dev->irq_tailoffset << 2));
2235 if (he_dev->irq_tail == he_dev->irq_head) {
2236 HPRINTK("tailoffset not updated?\n");
2237 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2238 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2239 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2242 #ifdef DEBUG
2243 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2244 hprintk("spurious (or shared) interrupt?\n");
2245 #endif
2247 if (he_dev->irq_head != he_dev->irq_tail) {
2248 handled = 1;
2249 #ifdef USE_TASKLET
2250 tasklet_schedule(&he_dev->tasklet);
2251 #else
2252 he_tasklet((unsigned long) he_dev);
2253 #endif
2254 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2255 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2257 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2258 return IRQ_RETVAL(handled);
2262 static __inline__ void
2263 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2265 struct he_tpdrq *new_tail;
2267 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2268 tpd, cid, he_dev->tpdrq_tail);
2270 /* new_tail = he_dev->tpdrq_tail; */
2271 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2272 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2274 /*
2275 * check to see if we are about to set the tail == head
2276 * if true, update the head pointer from the adapter
2277 * to see if this is really the case (reading the queue
2278 * head for every enqueue would be unnecessarily slow)
2279 */
2281 if (new_tail == he_dev->tpdrq_head) {
2282 he_dev->tpdrq_head = (struct he_tpdrq *)
2283 (((unsigned long)he_dev->tpdrq_base) |
2284 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2286 if (new_tail == he_dev->tpdrq_head) {
2287 int slot;
2289 hprintk("tpdrq full (cid 0x%x)\n", cid);
2290 /*
2291 * FIXME
2292 * push tpd onto a transmit backlog queue
2293 * after service_tbrq, service the backlog
2294 * for now, we just drop the pdu
2295 */
2296 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2297 if (tpd->iovec[slot].addr)
2298 pci_unmap_single(he_dev->pci_dev,
2299 tpd->iovec[slot].addr,
2300 tpd->iovec[slot].len & TPD_LEN_MASK,
2301 PCI_DMA_TODEVICE);
2303 if (tpd->skb) {
2304 if (tpd->vcc->pop)
2305 tpd->vcc->pop(tpd->vcc, tpd->skb);
2306 else
2307 dev_kfree_skb_any(tpd->skb);
2308 atomic_inc(&tpd->vcc->stats->tx_err);
2310 #ifdef USE_TPD_POOL
2311 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2312 #else
2313 tpd->inuse = 0;
2314 #endif
2315 return;
2319 /* 2.1.5 transmit packet descriptor ready queue */
2320 #ifdef USE_TPD_POOL
2321 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2322 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2323 #else
2324 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2325 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2326 #endif
2327 he_dev->tpdrq_tail->cid = cid;
2328 wmb();
2330 he_dev->tpdrq_tail = new_tail;
2332 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2333 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2336 static int
2337 he_open(struct atm_vcc *vcc)
2339 unsigned long flags;
2340 struct he_dev *he_dev = HE_DEV(vcc->dev);
2341 struct he_vcc *he_vcc;
2342 int err = 0;
2343 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2344 short vpi = vcc->vpi;
2345 int vci = vcc->vci;
2347 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2348 return 0;
2350 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2352 set_bit(ATM_VF_ADDR, &vcc->flags);
2354 cid = he_mkcid(he_dev, vpi, vci);
2356 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2357 if (he_vcc == NULL) {
2358 hprintk("unable to allocate he_vcc during open\n");
2359 return -ENOMEM;
2362 he_vcc->iov_tail = he_vcc->iov_head;
2363 he_vcc->pdu_len = 0;
2364 he_vcc->rc_index = -1;
2366 init_waitqueue_head(&he_vcc->rx_waitq);
2367 init_waitqueue_head(&he_vcc->tx_waitq);
2369 vcc->dev_data = he_vcc;
2371 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2372 int pcr_goal;
2374 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2375 if (pcr_goal == 0)
2376 pcr_goal = he_dev->atm_dev->link_rate;
2377 if (pcr_goal < 0) /* means round down, technically */
2378 pcr_goal = -pcr_goal;
2380 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2382 switch (vcc->qos.aal) {
2383 case ATM_AAL5:
2384 tsr0_aal = TSR0_AAL5;
2385 tsr4 = TSR4_AAL5;
2386 break;
2387 case ATM_AAL0:
2388 tsr0_aal = TSR0_AAL0_SDU;
2389 tsr4 = TSR4_AAL0_SDU;
2390 break;
2391 default:
2392 err = -EINVAL;
2393 goto open_failed;
2396 spin_lock_irqsave(&he_dev->global_lock, flags);
2397 tsr0 = he_readl_tsr0(he_dev, cid);
2398 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2400 if (TSR0_CONN_STATE(tsr0) != 0) {
2401 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2402 err = -EBUSY;
2403 goto open_failed;
2406 switch (vcc->qos.txtp.traffic_class) {
2407 case ATM_UBR:
2408 /* 2.3.3.1 open connection ubr */
2410 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2411 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2412 break;
2414 case ATM_CBR:
2415 /* 2.3.3.2 open connection cbr */
2417 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2418 if ((he_dev->total_bw + pcr_goal)
2419 > (he_dev->atm_dev->link_rate * 9 / 10))
2421 err = -EBUSY;
2422 goto open_failed;
2425 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2427 /* find an unused cs_stper register */
2428 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2429 if (he_dev->cs_stper[reg].inuse == 0 ||
2430 he_dev->cs_stper[reg].pcr == pcr_goal)
2431 break;
2433 if (reg == HE_NUM_CS_STPER) {
2434 err = -EBUSY;
2435 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2436 goto open_failed;
2439 he_dev->total_bw += pcr_goal;
2441 he_vcc->rc_index = reg;
2442 ++he_dev->cs_stper[reg].inuse;
2443 he_dev->cs_stper[reg].pcr = pcr_goal;
2445 clock = he_is622(he_dev) ? 66667000 : 50000000;
2446 period = clock / pcr_goal;
2448 HPRINTK("rc_index = %d period = %d\n",
2449 reg, period);
2451 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2452 CS_STPER0 + reg);
2453 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2455 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2456 TSR0_RC_INDEX(reg);
2458 break;
2459 default:
2460 err = -EINVAL;
2461 goto open_failed;
2464 spin_lock_irqsave(&he_dev->global_lock, flags);
2466 he_writel_tsr0(he_dev, tsr0, cid);
2467 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2468 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2469 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2470 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2471 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2473 he_writel_tsr3(he_dev, 0x0, cid);
2474 he_writel_tsr5(he_dev, 0x0, cid);
2475 he_writel_tsr6(he_dev, 0x0, cid);
2476 he_writel_tsr7(he_dev, 0x0, cid);
2477 he_writel_tsr8(he_dev, 0x0, cid);
2478 he_writel_tsr10(he_dev, 0x0, cid);
2479 he_writel_tsr11(he_dev, 0x0, cid);
2480 he_writel_tsr12(he_dev, 0x0, cid);
2481 he_writel_tsr13(he_dev, 0x0, cid);
2482 he_writel_tsr14(he_dev, 0x0, cid);
2483 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2484 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2487 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2488 unsigned aal;
2490 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2491 &HE_VCC(vcc)->rx_waitq);
2493 switch (vcc->qos.aal) {
2494 case ATM_AAL5:
2495 aal = RSR0_AAL5;
2496 break;
2497 case ATM_AAL0:
2498 aal = RSR0_RAWCELL;
2499 break;
2500 default:
2501 err = -EINVAL;
2502 goto open_failed;
2505 spin_lock_irqsave(&he_dev->global_lock, flags);
2507 rsr0 = he_readl_rsr0(he_dev, cid);
2508 if (rsr0 & RSR0_OPEN_CONN) {
2509 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2511 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2512 err = -EBUSY;
2513 goto open_failed;
2516 #ifdef USE_RBPS
2517 rsr1 = RSR1_GROUP(0);
2518 rsr4 = RSR4_GROUP(0);
2519 #else /* !USE_RBPS */
2520 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2521 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2522 #endif /* USE_RBPS */
2523 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2524 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2526 #ifdef USE_CHECKSUM_HW
2527 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2528 rsr0 |= RSR0_TCP_CKSUM;
2529 #endif
2531 he_writel_rsr4(he_dev, rsr4, cid);
2532 he_writel_rsr1(he_dev, rsr1, cid);
2533 /* 5.1.11 last parameter initialized should be
2534 the open/closed indication in rsr0 */
2535 he_writel_rsr0(he_dev,
2536 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2537 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2539 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2542 open_failed:
2544 if (err) {
2545 kfree(he_vcc);
2546 clear_bit(ATM_VF_ADDR, &vcc->flags);
2548 else
2549 set_bit(ATM_VF_READY, &vcc->flags);
2551 return err;
2554 static void
2555 he_close(struct atm_vcc *vcc)
2557 unsigned long flags;
2558 DECLARE_WAITQUEUE(wait, current);
2559 struct he_dev *he_dev = HE_DEV(vcc->dev);
2560 struct he_tpd *tpd;
2561 unsigned cid;
2562 struct he_vcc *he_vcc = HE_VCC(vcc);
2563 #define MAX_RETRY 30
2564 int retry = 0, sleep = 1, tx_inuse;
2566 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2568 clear_bit(ATM_VF_READY, &vcc->flags);
2569 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2571 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2572 int timeout;
2574 HPRINTK("close rx cid 0x%x\n", cid);
2576 /* 2.7.2.2 close receive operation */
2578 /* wait for previous close (if any) to finish */
2580 spin_lock_irqsave(&he_dev->global_lock, flags);
2581 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2582 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2583 udelay(250);
2586 set_current_state(TASK_UNINTERRUPTIBLE);
2587 add_wait_queue(&he_vcc->rx_waitq, &wait);
2589 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2590 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2591 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2592 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2594 timeout = schedule_timeout(30*HZ);
2596 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2597 set_current_state(TASK_RUNNING);
2599 if (timeout == 0)
2600 hprintk("close rx timeout cid 0x%x\n", cid);
2602 HPRINTK("close rx cid 0x%x complete\n", cid);
2606 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2607 volatile unsigned tsr4, tsr0;
2608 int timeout;
2610 HPRINTK("close tx cid 0x%x\n", cid);
2612 /* 2.1.2
2614 * ... the host must first stop queueing packets to the TPDRQ
2615 * on the connection to be closed, then wait for all outstanding
2616 * packets to be transmitted and their buffers returned to the
2617 * TBRQ. When the last packet on the connection arrives in the
2618 * TBRQ, the host issues the close command to the adapter.
2619 */
2621 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2622 (retry < MAX_RETRY)) {
2623 msleep(sleep);
2624 if (sleep < 250)
2625 sleep = sleep * 2;
2627 ++retry;
2630 if (tx_inuse)
2631 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2633 /* 2.3.1.1 generic close operations with flush */
2635 spin_lock_irqsave(&he_dev->global_lock, flags);
2636 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2637 /* also clears TSR4_SESSION_ENDED */
2639 switch (vcc->qos.txtp.traffic_class) {
2640 case ATM_UBR:
2641 he_writel_tsr1(he_dev,
2642 TSR1_MCR(rate_to_atmf(200000))
2643 | TSR1_PCR(0), cid);
2644 break;
2645 case ATM_CBR:
2646 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2647 break;
2649 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2651 tpd = __alloc_tpd(he_dev);
2652 if (tpd == NULL) {
2653 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2654 goto close_tx_incomplete;
2656 tpd->status |= TPD_EOS | TPD_INT;
2657 tpd->skb = NULL;
2658 tpd->vcc = vcc;
2659 wmb();
2661 set_current_state(TASK_UNINTERRUPTIBLE);
2662 add_wait_queue(&he_vcc->tx_waitq, &wait);
2663 __enqueue_tpd(he_dev, tpd, cid);
2664 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2666 timeout = schedule_timeout(30*HZ);
2668 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2669 set_current_state(TASK_RUNNING);
2671 spin_lock_irqsave(&he_dev->global_lock, flags);
2673 if (timeout == 0) {
2674 hprintk("close tx timeout cid 0x%x\n", cid);
2675 goto close_tx_incomplete;
2678 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2679 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2680 udelay(250);
2683 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2684 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2685 udelay(250);
2688 close_tx_incomplete:
2690 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2691 int reg = he_vcc->rc_index;
2693 HPRINTK("cs_stper reg = %d\n", reg);
2695 if (he_dev->cs_stper[reg].inuse == 0)
2696 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2697 else
2698 --he_dev->cs_stper[reg].inuse;
2700 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2702 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2704 HPRINTK("close tx cid 0x%x complete\n", cid);
2707 kfree(he_vcc);
2709 clear_bit(ATM_VF_ADDR, &vcc->flags);
2712 static int
2713 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2715 unsigned long flags;
2716 struct he_dev *he_dev = HE_DEV(vcc->dev);
2717 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2718 struct he_tpd *tpd;
2719 #ifdef USE_SCATTERGATHER
2720 int i, slot = 0;
2721 #endif
2723 #define HE_TPD_BUFSIZE 0xffff
2725 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2727 if ((skb->len > HE_TPD_BUFSIZE) ||
2728 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2729 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2730 if (vcc->pop)
2731 vcc->pop(vcc, skb);
2732 else
2733 dev_kfree_skb_any(skb);
2734 atomic_inc(&vcc->stats->tx_err);
2735 return -EINVAL;
2738 #ifndef USE_SCATTERGATHER
2739 if (skb_shinfo(skb)->nr_frags) {
2740 hprintk("no scatter/gather support\n");
2741 if (vcc->pop)
2742 vcc->pop(vcc, skb);
2743 else
2744 dev_kfree_skb_any(skb);
2745 atomic_inc(&vcc->stats->tx_err);
2746 return -EINVAL;
2748 #endif
2749 spin_lock_irqsave(&he_dev->global_lock, flags);
2751 tpd = __alloc_tpd(he_dev);
2752 if (tpd == NULL) {
2753 if (vcc->pop)
2754 vcc->pop(vcc, skb);
2755 else
2756 dev_kfree_skb_any(skb);
2757 atomic_inc(&vcc->stats->tx_err);
2758 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2759 return -ENOMEM;
2762 if (vcc->qos.aal == ATM_AAL5)
2763 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2764 else {
2765 char *pti_clp = (void *) (skb->data + 3);
2766 int clp, pti;
2768 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2769 clp = (*pti_clp & ATM_HDR_CLP);
2770 tpd->status |= TPD_CELLTYPE(pti);
2771 if (clp)
2772 tpd->status |= TPD_CLP;
2774 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2777 #ifdef USE_SCATTERGATHER
2778 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2779 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2780 tpd->iovec[slot].len = skb->len - skb->data_len;
2781 ++slot;
2783 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2784 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2786 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2787 tpd->vcc = vcc;
2788 tpd->skb = NULL; /* not the last fragment
2789 so dont ->push() yet */
2790 wmb();
2792 __enqueue_tpd(he_dev, tpd, cid);
2793 tpd = __alloc_tpd(he_dev);
2794 if (tpd == NULL) {
2795 if (vcc->pop)
2796 vcc->pop(vcc, skb);
2797 else
2798 dev_kfree_skb_any(skb);
2799 atomic_inc(&vcc->stats->tx_err);
2800 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2801 return -ENOMEM;
2803 tpd->status |= TPD_USERCELL;
2804 slot = 0;
2807 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2808 (void *) page_address(frag->page) + frag->page_offset,
2809 frag->size, PCI_DMA_TODEVICE);
2810 tpd->iovec[slot].len = frag->size;
2811 ++slot;
2815 tpd->iovec[slot - 1].len |= TPD_LST;
2816 #else
2817 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2818 tpd->length0 = skb->len | TPD_LST;
2819 #endif
2820 tpd->status |= TPD_INT;
2822 tpd->vcc = vcc;
2823 tpd->skb = skb;
2824 wmb();
2825 ATM_SKB(skb)->vcc = vcc;
2827 __enqueue_tpd(he_dev, tpd, cid);
2828 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2830 atomic_inc(&vcc->stats->tx);
2832 return 0;
2835 static int
2836 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2838 unsigned long flags;
2839 struct he_dev *he_dev = HE_DEV(atm_dev);
2840 struct he_ioctl_reg reg;
2841 int err = 0;
2843 switch (cmd) {
2844 case HE_GET_REG:
2845 if (!capable(CAP_NET_ADMIN))
2846 return -EPERM;
2848 if (copy_from_user(&reg, arg,
2849 sizeof(struct he_ioctl_reg)))
2850 return -EFAULT;
2852 spin_lock_irqsave(&he_dev->global_lock, flags);
2853 switch (reg.type) {
2854 case HE_REGTYPE_PCI:
2855 reg.val = he_readl(he_dev, reg.addr);
2856 break;
2857 case HE_REGTYPE_RCM:
2858 reg.val =
2859 he_readl_rcm(he_dev, reg.addr);
2860 break;
2861 case HE_REGTYPE_TCM:
2862 reg.val =
2863 he_readl_tcm(he_dev, reg.addr);
2864 break;
2865 case HE_REGTYPE_MBOX:
2866 reg.val =
2867 he_readl_mbox(he_dev, reg.addr);
2868 break;
2869 default:
2870 err = -EINVAL;
2871 break;
2873 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2874 if (err == 0)
2875 if (copy_to_user(arg, &reg,
2876 sizeof(struct he_ioctl_reg)))
2877 return -EFAULT;
2878 break;
2879 default:
2880 #ifdef CONFIG_ATM_HE_USE_SUNI
2881 if (atm_dev->phy && atm_dev->phy->ioctl)
2882 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2883 #else /* CONFIG_ATM_HE_USE_SUNI */
2884 err = -EINVAL;
2885 #endif /* CONFIG_ATM_HE_USE_SUNI */
2886 break;
2889 return err;
2892 static void
2893 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2895 unsigned long flags;
2896 struct he_dev *he_dev = HE_DEV(atm_dev);
2898 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2900 spin_lock_irqsave(&he_dev->global_lock, flags);
2901 he_writel(he_dev, val, FRAMER + (addr*4));
2902 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2903 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2907 static unsigned char
2908 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2910 unsigned long flags;
2911 struct he_dev *he_dev = HE_DEV(atm_dev);
2912 unsigned reg;
2914 spin_lock_irqsave(&he_dev->global_lock, flags);
2915 reg = he_readl(he_dev, FRAMER + (addr*4));
2916 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2918 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2919 return reg;
2922 static int
2923 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2925 unsigned long flags;
2926 struct he_dev *he_dev = HE_DEV(dev);
2927 int left, i;
2928 #ifdef notdef
2929 struct he_rbrq *rbrq_tail;
2930 struct he_tpdrq *tpdrq_head;
2931 int rbpl_head, rbpl_tail;
2932 #endif
2933 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2936 left = *pos;
2937 if (!left--)
2938 return sprintf(page, "%s\n", version);
2940 if (!left--)
2941 return sprintf(page, "%s%s\n\n",
2942 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2944 if (!left--)
2945 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2947 spin_lock_irqsave(&he_dev->global_lock, flags);
2948 mcc += he_readl(he_dev, MCC);
2949 oec += he_readl(he_dev, OEC);
2950 dcc += he_readl(he_dev, DCC);
2951 cec += he_readl(he_dev, CEC);
2952 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2954 if (!left--)
2955 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2956 mcc, oec, dcc, cec);
2958 if (!left--)
2959 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2960 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2962 if (!left--)
2963 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2964 CONFIG_TPDRQ_SIZE);
2966 if (!left--)
2967 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2968 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2970 if (!left--)
2971 return sprintf(page, "tbrq_size = %d peak = %d\n",
2972 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2975 #ifdef notdef
2976 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2977 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2979 inuse = rbpl_head - rbpl_tail;
2980 if (inuse < 0)
2981 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2982 inuse /= sizeof(struct he_rbp);
2984 if (!left--)
2985 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2986 CONFIG_RBPL_SIZE, inuse);
2987 #endif
2989 if (!left--)
2990 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2992 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2993 if (!left--)
2994 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2995 he_dev->cs_stper[i].pcr,
2996 he_dev->cs_stper[i].inuse);
2998 if (!left--)
2999 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
3000 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
3002 return 0;
3005 /* eeprom routines -- see 4.7 */
3007 u8
3008 read_prom_byte(struct he_dev *he_dev, int addr)
3010 u32 val = 0, tmp_read = 0;
3011 int i, j = 0;
3012 u8 byte_read = 0;
3014 val = readl(he_dev->membase + HOST_CNTL);
3015 val &= 0xFFFFE0FF;
3017 /* Turn on write enable */
3018 val |= 0x800;
3019 he_writel(he_dev, val, HOST_CNTL);
3021 /* Send READ instruction */
3022 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3023 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3024 udelay(EEPROM_DELAY);
3027 /* Next, we need to send the byte address to read from */
3028 for (i = 7; i >= 0; i--) {
3029 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3030 udelay(EEPROM_DELAY);
3031 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3032 udelay(EEPROM_DELAY);
3035 j = 0;
3037 val &= 0xFFFFF7FF; /* Turn off write enable */
3038 he_writel(he_dev, val, HOST_CNTL);
3040 /* Now, we can read data from the EEPROM by clocking it in */
3041 for (i = 7; i >= 0; i--) {
3042 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3043 udelay(EEPROM_DELAY);
3044 tmp_read = he_readl(he_dev, HOST_CNTL);
3045 byte_read |= (unsigned char)
3046 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3047 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3048 udelay(EEPROM_DELAY);
3051 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3052 udelay(EEPROM_DELAY);
3054 return byte_read;
3057 MODULE_LICENSE("GPL");
3058 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3059 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3060 module_param(disable64, bool, 0);
3061 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3062 module_param(nvpibits, short, 0);
3063 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3064 module_param(nvcibits, short, 0);
3065 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3066 module_param(rx_skb_reserve, short, 0);
3067 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3068 module_param(irq_coalesce, bool, 0);
3069 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3070 module_param(sdh, bool, 0);
3071 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3073 static struct pci_device_id he_pci_tbl[] = {
3074 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3075 0, 0, 0 },
3076 { 0, }
3077 };
3079 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3081 static struct pci_driver he_driver = {
3082 .name = "he",
3083 .probe = he_init_one,
3084 .remove = __devexit_p(he_remove_one),
3085 .id_table = he_pci_tbl,
3086 };
3088 static int __init he_init(void)
3090 return pci_register_driver(&he_driver);
3093 static void __exit he_cleanup(void)
3095 pci_unregister_driver(&he_driver);
3098 module_init(he_init);
3099 module_exit(he_cleanup);