ia64/linux-2.6.18-xen.hg

view drivers/net/b44.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
6 *
7 * Distribute under GPL.
8 */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
28 #include "b44.h"
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
47 */
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
107 };
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113 static void b44_init_hw(struct b44 *, int);
115 static int dma_desc_align_mask;
116 static int dma_desc_sync_size;
118 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119 #define _B44(x...) # x,
120 B44_STAT_REG_DECLARE
121 #undef _B44
122 };
124 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
125 dma_addr_t dma_base,
126 unsigned long offset,
127 enum dma_data_direction dir)
128 {
129 dma_sync_single_range_for_device(&pdev->dev, dma_base,
130 offset & dma_desc_align_mask,
131 dma_desc_sync_size, dir);
132 }
134 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
135 dma_addr_t dma_base,
136 unsigned long offset,
137 enum dma_data_direction dir)
138 {
139 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140 offset & dma_desc_align_mask,
141 dma_desc_sync_size, dir);
142 }
144 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
145 {
146 return readl(bp->regs + reg);
147 }
149 static inline void bw32(const struct b44 *bp,
150 unsigned long reg, unsigned long val)
151 {
152 writel(val, bp->regs + reg);
153 }
155 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156 u32 bit, unsigned long timeout, const int clear)
157 {
158 unsigned long i;
160 for (i = 0; i < timeout; i++) {
161 u32 val = br32(bp, reg);
163 if (clear && !(val & bit))
164 break;
165 if (!clear && (val & bit))
166 break;
167 udelay(10);
168 }
169 if (i == timeout) {
170 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
171 "%lx to %s.\n",
172 bp->dev->name,
173 bit, reg,
174 (clear ? "clear" : "set"));
175 return -ENODEV;
176 }
177 return 0;
178 }
180 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
181 * buzz words used on this company's website :-)
182 *
183 * All of these routines must be invoked with bp->lock held and
184 * interrupts disabled.
185 */
187 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
188 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
190 static u32 ssb_get_core_rev(struct b44 *bp)
191 {
192 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
193 }
195 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
196 {
197 u32 bar_orig, pci_rev, val;
199 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201 pci_rev = ssb_get_core_rev(bp);
203 val = br32(bp, B44_SBINTVEC);
204 val |= cores;
205 bw32(bp, B44_SBINTVEC, val);
207 val = br32(bp, SSB_PCI_TRANS_2);
208 val |= SSB_PCI_PREF | SSB_PCI_BURST;
209 bw32(bp, SSB_PCI_TRANS_2, val);
211 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
213 return pci_rev;
214 }
216 static void ssb_core_disable(struct b44 *bp)
217 {
218 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
219 return;
221 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225 SBTMSLOW_REJECT | SBTMSLOW_RESET));
226 br32(bp, B44_SBTMSLOW);
227 udelay(1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229 br32(bp, B44_SBTMSLOW);
230 udelay(1);
231 }
233 static void ssb_core_reset(struct b44 *bp)
234 {
235 u32 val;
237 ssb_core_disable(bp);
238 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239 br32(bp, B44_SBTMSLOW);
240 udelay(1);
242 /* Clear SERR if set, this is a hw bug workaround. */
243 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244 bw32(bp, B44_SBTMSHIGH, 0);
246 val = br32(bp, B44_SBIMSTATE);
247 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
250 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251 br32(bp, B44_SBTMSLOW);
252 udelay(1);
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255 br32(bp, B44_SBTMSLOW);
256 udelay(1);
257 }
259 static int ssb_core_unit(struct b44 *bp)
260 {
261 #if 0
262 u32 val = br32(bp, B44_SBADMATCH0);
263 u32 base;
265 type = val & SBADMATCH0_TYPE_MASK;
266 switch (type) {
267 case 0:
268 base = val & SBADMATCH0_BS0_MASK;
269 break;
271 case 1:
272 base = val & SBADMATCH0_BS1_MASK;
273 break;
275 case 2:
276 default:
277 base = val & SBADMATCH0_BS2_MASK;
278 break;
279 };
280 #endif
281 return 0;
282 }
284 static int ssb_is_core_up(struct b44 *bp)
285 {
286 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
287 == SBTMSLOW_CLOCK);
288 }
290 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
291 {
292 u32 val;
294 val = ((u32) data[2]) << 24;
295 val |= ((u32) data[3]) << 16;
296 val |= ((u32) data[4]) << 8;
297 val |= ((u32) data[5]) << 0;
298 bw32(bp, B44_CAM_DATA_LO, val);
299 val = (CAM_DATA_HI_VALID |
300 (((u32) data[0]) << 8) |
301 (((u32) data[1]) << 0));
302 bw32(bp, B44_CAM_DATA_HI, val);
303 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304 (index << CAM_CTRL_INDEX_SHIFT)));
305 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
306 }
308 static inline void __b44_disable_ints(struct b44 *bp)
309 {
310 bw32(bp, B44_IMASK, 0);
311 }
313 static void b44_disable_ints(struct b44 *bp)
314 {
315 __b44_disable_ints(bp);
317 /* Flush posted writes. */
318 br32(bp, B44_IMASK);
319 }
321 static void b44_enable_ints(struct b44 *bp)
322 {
323 bw32(bp, B44_IMASK, bp->imask);
324 }
326 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
327 {
328 int err;
330 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334 (reg << MDIO_DATA_RA_SHIFT) |
335 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
339 return err;
340 }
342 static int b44_writephy(struct b44 *bp, int reg, u32 val)
343 {
344 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348 (reg << MDIO_DATA_RA_SHIFT) |
349 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350 (val & MDIO_DATA_DATA)));
351 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
352 }
354 /* miilib interface */
355 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356 * due to code existing before miilib use was added to this driver.
357 * Someone should remove this artificial driver limitation in
358 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
359 */
360 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
361 {
362 u32 val;
363 struct b44 *bp = netdev_priv(dev);
364 int rc = b44_readphy(bp, location, &val);
365 if (rc)
366 return 0xffffffff;
367 return val;
368 }
370 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
371 int val)
372 {
373 struct b44 *bp = netdev_priv(dev);
374 b44_writephy(bp, location, val);
375 }
377 static int b44_phy_reset(struct b44 *bp)
378 {
379 u32 val;
380 int err;
382 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
383 if (err)
384 return err;
385 udelay(100);
386 err = b44_readphy(bp, MII_BMCR, &val);
387 if (!err) {
388 if (val & BMCR_RESET) {
389 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
390 bp->dev->name);
391 err = -ENODEV;
392 }
393 }
395 return 0;
396 }
398 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
399 {
400 u32 val;
402 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403 bp->flags |= pause_flags;
405 val = br32(bp, B44_RXCONFIG);
406 if (pause_flags & B44_FLAG_RX_PAUSE)
407 val |= RXCONFIG_FLOW;
408 else
409 val &= ~RXCONFIG_FLOW;
410 bw32(bp, B44_RXCONFIG, val);
412 val = br32(bp, B44_MAC_FLOW);
413 if (pause_flags & B44_FLAG_TX_PAUSE)
414 val |= (MAC_FLOW_PAUSE_ENAB |
415 (0xc0 & MAC_FLOW_RX_HI_WATER));
416 else
417 val &= ~MAC_FLOW_PAUSE_ENAB;
418 bw32(bp, B44_MAC_FLOW, val);
419 }
421 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
422 {
423 u32 pause_enab = 0;
425 /* The driver supports only rx pause by default because
426 the b44 mac tx pause mechanism generates excessive
427 pause frames.
428 Use ethtool to turn on b44 tx pause if necessary.
429 */
430 if ((local & ADVERTISE_PAUSE_CAP) &&
431 (local & ADVERTISE_PAUSE_ASYM)){
432 if ((remote & LPA_PAUSE_ASYM) &&
433 !(remote & LPA_PAUSE_CAP))
434 pause_enab |= B44_FLAG_RX_PAUSE;
435 }
437 __b44_set_flow_ctrl(bp, pause_enab);
438 }
440 static int b44_setup_phy(struct b44 *bp)
441 {
442 u32 val;
443 int err;
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446 goto out;
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448 val & MII_ALEDCTRL_ALLMSK)) != 0)
449 goto out;
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453 val | MII_TLEDCTRL_ENABLE)) != 0)
454 goto out;
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457 u32 adv = ADVERTISE_CSMA;
459 if (bp->flags & B44_FLAG_ADV_10HALF)
460 adv |= ADVERTISE_10HALF;
461 if (bp->flags & B44_FLAG_ADV_10FULL)
462 adv |= ADVERTISE_10FULL;
463 if (bp->flags & B44_FLAG_ADV_100HALF)
464 adv |= ADVERTISE_100HALF;
465 if (bp->flags & B44_FLAG_ADV_100FULL)
466 adv |= ADVERTISE_100FULL;
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472 goto out;
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474 BMCR_ANRESTART))) != 0)
475 goto out;
476 } else {
477 u32 bmcr;
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480 goto out;
481 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482 if (bp->flags & B44_FLAG_100_BASE_T)
483 bmcr |= BMCR_SPEED100;
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485 bmcr |= BMCR_FULLDPLX;
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487 goto out;
489 /* Since we will not be negotiating there is no safe way
490 * to determine if the link partner supports flow control
491 * or not. So just disable it completely in this case.
492 */
493 b44_set_flow_ctrl(bp, 0, 0);
494 }
496 out:
497 return err;
498 }
500 static void b44_stats_update(struct b44 *bp)
501 {
502 unsigned long reg;
503 u32 *val;
505 val = &bp->hw_stats.tx_good_octets;
506 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507 *val++ += br32(bp, reg);
508 }
510 /* Pad */
511 reg += 8*4UL;
513 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514 *val++ += br32(bp, reg);
515 }
516 }
518 static void b44_link_report(struct b44 *bp)
519 {
520 if (!netif_carrier_ok(bp->dev)) {
521 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
522 } else {
523 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
524 bp->dev->name,
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
528 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
529 "%s for RX.\n",
530 bp->dev->name,
531 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
533 }
534 }
536 static void b44_check_phy(struct b44 *bp)
537 {
538 u32 bmsr, aux;
540 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
542 (bmsr != 0xffff)) {
543 if (aux & MII_AUXCTRL_SPEED)
544 bp->flags |= B44_FLAG_100_BASE_T;
545 else
546 bp->flags &= ~B44_FLAG_100_BASE_T;
547 if (aux & MII_AUXCTRL_DUPLEX)
548 bp->flags |= B44_FLAG_FULL_DUPLEX;
549 else
550 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552 if (!netif_carrier_ok(bp->dev) &&
553 (bmsr & BMSR_LSTATUS)) {
554 u32 val = br32(bp, B44_TX_CTRL);
555 u32 local_adv, remote_adv;
557 if (bp->flags & B44_FLAG_FULL_DUPLEX)
558 val |= TX_CTRL_DUPLEX;
559 else
560 val &= ~TX_CTRL_DUPLEX;
561 bw32(bp, B44_TX_CTRL, val);
563 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565 !b44_readphy(bp, MII_LPA, &remote_adv))
566 b44_set_flow_ctrl(bp, local_adv, remote_adv);
568 /* Link now up */
569 netif_carrier_on(bp->dev);
570 b44_link_report(bp);
571 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
572 /* Link now down */
573 netif_carrier_off(bp->dev);
574 b44_link_report(bp);
575 }
577 if (bmsr & BMSR_RFAULT)
578 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
579 bp->dev->name);
580 if (bmsr & BMSR_JCD)
581 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
582 bp->dev->name);
583 }
584 }
586 static void b44_timer(unsigned long __opaque)
587 {
588 struct b44 *bp = (struct b44 *) __opaque;
590 spin_lock_irq(&bp->lock);
592 b44_check_phy(bp);
594 b44_stats_update(bp);
596 spin_unlock_irq(&bp->lock);
598 bp->timer.expires = jiffies + HZ;
599 add_timer(&bp->timer);
600 }
602 static void b44_tx(struct b44 *bp)
603 {
604 u32 cur, cons;
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
614 BUG_ON(skb == NULL);
616 pci_unmap_single(bp->pdev,
617 pci_unmap_addr(rp, mapping),
618 skb->len,
619 PCI_DMA_TODEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
622 }
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
630 }
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
636 */
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638 {
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
656 mapping = pci_map_single(bp->pdev, skb->data,
657 RX_PKT_BUF_SZ,
658 PCI_DMA_FROMDEVICE);
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(mapping) ||
663 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
664 /* Sigh... */
665 if (!dma_mapping_error(mapping))
666 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
667 dev_kfree_skb_any(skb);
668 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
669 if (skb == NULL)
670 return -ENOMEM;
671 mapping = pci_map_single(bp->pdev, skb->data,
672 RX_PKT_BUF_SZ,
673 PCI_DMA_FROMDEVICE);
674 if (dma_mapping_error(mapping) ||
675 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676 if (!dma_mapping_error(mapping))
677 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
678 dev_kfree_skb_any(skb);
679 return -ENOMEM;
680 }
681 }
683 skb->dev = bp->dev;
684 skb_reserve(skb, bp->rx_offset);
686 rh = (struct rx_header *)
687 (skb->data - bp->rx_offset);
688 rh->len = 0;
689 rh->flags = 0;
691 map->skb = skb;
692 pci_unmap_addr_set(map, mapping, mapping);
694 if (src_map != NULL)
695 src_map->skb = NULL;
697 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698 if (dest_idx == (B44_RX_RING_SIZE - 1))
699 ctrl |= DESC_CTRL_EOT;
701 dp = &bp->rx_ring[dest_idx];
702 dp->ctrl = cpu_to_le32(ctrl);
703 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
705 if (bp->flags & B44_FLAG_RX_RING_HACK)
706 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707 dest_idx * sizeof(dp),
708 DMA_BIDIRECTIONAL);
710 return RX_PKT_BUF_SZ;
711 }
713 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 {
715 struct dma_desc *src_desc, *dest_desc;
716 struct ring_info *src_map, *dest_map;
717 struct rx_header *rh;
718 int dest_idx;
719 u32 ctrl;
721 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722 dest_desc = &bp->rx_ring[dest_idx];
723 dest_map = &bp->rx_buffers[dest_idx];
724 src_desc = &bp->rx_ring[src_idx];
725 src_map = &bp->rx_buffers[src_idx];
727 dest_map->skb = src_map->skb;
728 rh = (struct rx_header *) src_map->skb->data;
729 rh->len = 0;
730 rh->flags = 0;
731 pci_unmap_addr_set(dest_map, mapping,
732 pci_unmap_addr(src_map, mapping));
734 if (bp->flags & B44_FLAG_RX_RING_HACK)
735 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736 src_idx * sizeof(src_desc),
737 DMA_BIDIRECTIONAL);
739 ctrl = src_desc->ctrl;
740 if (dest_idx == (B44_RX_RING_SIZE - 1))
741 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
742 else
743 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
745 dest_desc->ctrl = ctrl;
746 dest_desc->addr = src_desc->addr;
748 src_map->skb = NULL;
750 if (bp->flags & B44_FLAG_RX_RING_HACK)
751 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752 dest_idx * sizeof(dest_desc),
753 DMA_BIDIRECTIONAL);
755 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
756 RX_PKT_BUF_SZ,
757 PCI_DMA_FROMDEVICE);
758 }
760 static int b44_rx(struct b44 *bp, int budget)
761 {
762 int received;
763 u32 cons, prod;
765 received = 0;
766 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767 prod /= sizeof(struct dma_desc);
768 cons = bp->rx_cons;
770 while (cons != prod && budget > 0) {
771 struct ring_info *rp = &bp->rx_buffers[cons];
772 struct sk_buff *skb = rp->skb;
773 dma_addr_t map = pci_unmap_addr(rp, mapping);
774 struct rx_header *rh;
775 u16 len;
777 pci_dma_sync_single_for_cpu(bp->pdev, map,
778 RX_PKT_BUF_SZ,
779 PCI_DMA_FROMDEVICE);
780 rh = (struct rx_header *) skb->data;
781 len = cpu_to_le16(rh->len);
782 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
784 drop_it:
785 b44_recycle_rx(bp, cons, bp->rx_prod);
786 drop_it_no_recycle:
787 bp->stats.rx_dropped++;
788 goto next_pkt;
789 }
791 if (len == 0) {
792 int i = 0;
794 do {
795 udelay(2);
796 barrier();
797 len = cpu_to_le16(rh->len);
798 } while (len == 0 && i++ < 5);
799 if (len == 0)
800 goto drop_it;
801 }
803 /* Omit CRC. */
804 len -= 4;
806 if (len > RX_COPY_THRESHOLD) {
807 int skb_size;
808 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809 if (skb_size < 0)
810 goto drop_it;
811 pci_unmap_single(bp->pdev, map,
812 skb_size, PCI_DMA_FROMDEVICE);
813 /* Leave out rx_header */
814 skb_put(skb, len+bp->rx_offset);
815 skb_pull(skb,bp->rx_offset);
816 } else {
817 struct sk_buff *copy_skb;
819 b44_recycle_rx(bp, cons, bp->rx_prod);
820 copy_skb = dev_alloc_skb(len + 2);
821 if (copy_skb == NULL)
822 goto drop_it_no_recycle;
824 copy_skb->dev = bp->dev;
825 skb_reserve(copy_skb, 2);
826 skb_put(copy_skb, len);
827 /* DMA sync done above, copy just the actual packet */
828 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
830 skb = copy_skb;
831 }
832 skb->ip_summed = CHECKSUM_NONE;
833 skb->protocol = eth_type_trans(skb, bp->dev);
834 netif_receive_skb(skb);
835 bp->dev->last_rx = jiffies;
836 received++;
837 budget--;
838 next_pkt:
839 bp->rx_prod = (bp->rx_prod + 1) &
840 (B44_RX_RING_SIZE - 1);
841 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
842 }
844 bp->rx_cons = cons;
845 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847 return received;
848 }
850 static int b44_poll(struct net_device *netdev, int *budget)
851 {
852 struct b44 *bp = netdev_priv(netdev);
853 int done;
855 spin_lock_irq(&bp->lock);
857 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858 /* spin_lock(&bp->tx_lock); */
859 b44_tx(bp);
860 /* spin_unlock(&bp->tx_lock); */
861 }
862 spin_unlock_irq(&bp->lock);
864 done = 1;
865 if (bp->istat & ISTAT_RX) {
866 int orig_budget = *budget;
867 int work_done;
869 if (orig_budget > netdev->quota)
870 orig_budget = netdev->quota;
872 work_done = b44_rx(bp, orig_budget);
874 *budget -= work_done;
875 netdev->quota -= work_done;
877 if (work_done >= orig_budget)
878 done = 0;
879 }
881 if (bp->istat & ISTAT_ERRORS) {
882 spin_lock_irq(&bp->lock);
883 b44_halt(bp);
884 b44_init_rings(bp);
885 b44_init_hw(bp, 1);
886 netif_wake_queue(bp->dev);
887 spin_unlock_irq(&bp->lock);
888 done = 1;
889 }
891 if (done) {
892 netif_rx_complete(netdev);
893 b44_enable_ints(bp);
894 }
896 return (done ? 0 : 1);
897 }
899 static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
900 {
901 struct net_device *dev = dev_id;
902 struct b44 *bp = netdev_priv(dev);
903 u32 istat, imask;
904 int handled = 0;
906 spin_lock(&bp->lock);
908 istat = br32(bp, B44_ISTAT);
909 imask = br32(bp, B44_IMASK);
911 /* ??? What the fuck is the purpose of the interrupt mask
912 * ??? register if we have to mask it out by hand anyways?
913 */
914 istat &= imask;
915 if (istat) {
916 handled = 1;
918 if (unlikely(!netif_running(dev))) {
919 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
920 goto irq_ack;
921 }
923 if (netif_rx_schedule_prep(dev)) {
924 /* NOTE: These writes are posted by the readback of
925 * the ISTAT register below.
926 */
927 bp->istat = istat;
928 __b44_disable_ints(bp);
929 __netif_rx_schedule(dev);
930 } else {
931 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
932 dev->name);
933 }
935 irq_ack:
936 bw32(bp, B44_ISTAT, istat);
937 br32(bp, B44_ISTAT);
938 }
939 spin_unlock(&bp->lock);
940 return IRQ_RETVAL(handled);
941 }
943 static void b44_tx_timeout(struct net_device *dev)
944 {
945 struct b44 *bp = netdev_priv(dev);
947 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
948 dev->name);
950 spin_lock_irq(&bp->lock);
952 b44_halt(bp);
953 b44_init_rings(bp);
954 b44_init_hw(bp, 1);
956 spin_unlock_irq(&bp->lock);
958 b44_enable_ints(bp);
960 netif_wake_queue(dev);
961 }
963 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
964 {
965 struct b44 *bp = netdev_priv(dev);
966 struct sk_buff *bounce_skb;
967 int rc = NETDEV_TX_OK;
968 dma_addr_t mapping;
969 u32 len, entry, ctrl;
971 len = skb->len;
972 spin_lock_irq(&bp->lock);
974 /* This is a hard error, log it. */
975 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
976 netif_stop_queue(dev);
977 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
978 dev->name);
979 goto err_out;
980 }
982 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
983 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
984 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
985 if (!dma_mapping_error(mapping))
986 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
988 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
989 GFP_ATOMIC|GFP_DMA);
990 if (!bounce_skb)
991 goto err_out;
993 mapping = pci_map_single(bp->pdev, bounce_skb->data,
994 len, PCI_DMA_TODEVICE);
995 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
996 if (!dma_mapping_error(mapping))
997 pci_unmap_single(bp->pdev, mapping,
998 len, PCI_DMA_TODEVICE);
999 dev_kfree_skb_any(bounce_skb);
1000 goto err_out;
1003 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1004 dev_kfree_skb_any(skb);
1005 skb = bounce_skb;
1008 entry = bp->tx_prod;
1009 bp->tx_buffers[entry].skb = skb;
1010 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1012 ctrl = (len & DESC_CTRL_LEN);
1013 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1014 if (entry == (B44_TX_RING_SIZE - 1))
1015 ctrl |= DESC_CTRL_EOT;
1017 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1018 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1020 if (bp->flags & B44_FLAG_TX_RING_HACK)
1021 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1022 entry * sizeof(bp->tx_ring[0]),
1023 DMA_TO_DEVICE);
1025 entry = NEXT_TX(entry);
1027 bp->tx_prod = entry;
1029 wmb();
1031 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1032 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1033 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1034 if (bp->flags & B44_FLAG_REORDER_BUG)
1035 br32(bp, B44_DMATX_PTR);
1037 if (TX_BUFFS_AVAIL(bp) < 1)
1038 netif_stop_queue(dev);
1040 dev->trans_start = jiffies;
1042 out_unlock:
1043 spin_unlock_irq(&bp->lock);
1045 return rc;
1047 err_out:
1048 rc = NETDEV_TX_BUSY;
1049 goto out_unlock;
1052 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1054 struct b44 *bp = netdev_priv(dev);
1056 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1057 return -EINVAL;
1059 if (!netif_running(dev)) {
1060 /* We'll just catch it later when the
1061 * device is up'd.
1062 */
1063 dev->mtu = new_mtu;
1064 return 0;
1067 spin_lock_irq(&bp->lock);
1068 b44_halt(bp);
1069 dev->mtu = new_mtu;
1070 b44_init_rings(bp);
1071 b44_init_hw(bp, 1);
1072 spin_unlock_irq(&bp->lock);
1074 b44_enable_ints(bp);
1076 return 0;
1079 /* Free up pending packets in all rx/tx rings.
1081 * The chip has been shut down and the driver detached from
1082 * the networking, so no interrupts or new tx packets will
1083 * end up in the driver. bp->lock is not held and we are not
1084 * in an interrupt context and thus may sleep.
1085 */
1086 static void b44_free_rings(struct b44 *bp)
1088 struct ring_info *rp;
1089 int i;
1091 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1092 rp = &bp->rx_buffers[i];
1094 if (rp->skb == NULL)
1095 continue;
1096 pci_unmap_single(bp->pdev,
1097 pci_unmap_addr(rp, mapping),
1098 RX_PKT_BUF_SZ,
1099 PCI_DMA_FROMDEVICE);
1100 dev_kfree_skb_any(rp->skb);
1101 rp->skb = NULL;
1104 /* XXX needs changes once NETIF_F_SG is set... */
1105 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1106 rp = &bp->tx_buffers[i];
1108 if (rp->skb == NULL)
1109 continue;
1110 pci_unmap_single(bp->pdev,
1111 pci_unmap_addr(rp, mapping),
1112 rp->skb->len,
1113 PCI_DMA_TODEVICE);
1114 dev_kfree_skb_any(rp->skb);
1115 rp->skb = NULL;
1119 /* Initialize tx/rx rings for packet processing.
1121 * The chip has been shut down and the driver detached from
1122 * the networking, so no interrupts or new tx packets will
1123 * end up in the driver.
1124 */
1125 static void b44_init_rings(struct b44 *bp)
1127 int i;
1129 b44_free_rings(bp);
1131 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1132 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1134 if (bp->flags & B44_FLAG_RX_RING_HACK)
1135 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1136 DMA_TABLE_BYTES,
1137 PCI_DMA_BIDIRECTIONAL);
1139 if (bp->flags & B44_FLAG_TX_RING_HACK)
1140 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1141 DMA_TABLE_BYTES,
1142 PCI_DMA_TODEVICE);
1144 for (i = 0; i < bp->rx_pending; i++) {
1145 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1146 break;
1150 /*
1151 * Must not be invoked with interrupt sources disabled and
1152 * the hardware shutdown down.
1153 */
1154 static void b44_free_consistent(struct b44 *bp)
1156 kfree(bp->rx_buffers);
1157 bp->rx_buffers = NULL;
1158 kfree(bp->tx_buffers);
1159 bp->tx_buffers = NULL;
1160 if (bp->rx_ring) {
1161 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1162 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1163 DMA_TABLE_BYTES,
1164 DMA_BIDIRECTIONAL);
1165 kfree(bp->rx_ring);
1166 } else
1167 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1168 bp->rx_ring, bp->rx_ring_dma);
1169 bp->rx_ring = NULL;
1170 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1172 if (bp->tx_ring) {
1173 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1174 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1175 DMA_TABLE_BYTES,
1176 DMA_TO_DEVICE);
1177 kfree(bp->tx_ring);
1178 } else
1179 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1180 bp->tx_ring, bp->tx_ring_dma);
1181 bp->tx_ring = NULL;
1182 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1186 /*
1187 * Must not be invoked with interrupt sources disabled and
1188 * the hardware shutdown down. Can sleep.
1189 */
1190 static int b44_alloc_consistent(struct b44 *bp)
1192 int size;
1194 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1195 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1196 if (!bp->rx_buffers)
1197 goto out_err;
1199 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1200 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1201 if (!bp->tx_buffers)
1202 goto out_err;
1204 size = DMA_TABLE_BYTES;
1205 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1206 if (!bp->rx_ring) {
1207 /* Allocation may have failed due to pci_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive
1209 than necessary... */
1210 struct dma_desc *rx_ring;
1211 dma_addr_t rx_ring_dma;
1213 rx_ring = kzalloc(size, GFP_KERNEL);
1214 if (!rx_ring)
1215 goto out_err;
1217 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1218 DMA_TABLE_BYTES,
1219 DMA_BIDIRECTIONAL);
1221 if (dma_mapping_error(rx_ring_dma) ||
1222 rx_ring_dma + size > B44_DMA_MASK) {
1223 kfree(rx_ring);
1224 goto out_err;
1227 bp->rx_ring = rx_ring;
1228 bp->rx_ring_dma = rx_ring_dma;
1229 bp->flags |= B44_FLAG_RX_RING_HACK;
1232 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1233 if (!bp->tx_ring) {
1234 /* Allocation may have failed due to pci_alloc_consistent
1235 insisting on use of GFP_DMA, which is more restrictive
1236 than necessary... */
1237 struct dma_desc *tx_ring;
1238 dma_addr_t tx_ring_dma;
1240 tx_ring = kzalloc(size, GFP_KERNEL);
1241 if (!tx_ring)
1242 goto out_err;
1244 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1245 DMA_TABLE_BYTES,
1246 DMA_TO_DEVICE);
1248 if (dma_mapping_error(tx_ring_dma) ||
1249 tx_ring_dma + size > B44_DMA_MASK) {
1250 kfree(tx_ring);
1251 goto out_err;
1254 bp->tx_ring = tx_ring;
1255 bp->tx_ring_dma = tx_ring_dma;
1256 bp->flags |= B44_FLAG_TX_RING_HACK;
1259 return 0;
1261 out_err:
1262 b44_free_consistent(bp);
1263 return -ENOMEM;
1266 /* bp->lock is held. */
1267 static void b44_clear_stats(struct b44 *bp)
1269 unsigned long reg;
1271 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1272 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1273 br32(bp, reg);
1274 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1275 br32(bp, reg);
1278 /* bp->lock is held. */
1279 static void b44_chip_reset(struct b44 *bp)
1281 if (ssb_is_core_up(bp)) {
1282 bw32(bp, B44_RCV_LAZY, 0);
1283 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1284 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1285 bw32(bp, B44_DMATX_CTRL, 0);
1286 bp->tx_prod = bp->tx_cons = 0;
1287 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1288 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1289 100, 0);
1291 bw32(bp, B44_DMARX_CTRL, 0);
1292 bp->rx_prod = bp->rx_cons = 0;
1293 } else {
1294 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1295 SBINTVEC_ENET0 :
1296 SBINTVEC_ENET1));
1299 ssb_core_reset(bp);
1301 b44_clear_stats(bp);
1303 /* Make PHY accessible. */
1304 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 (0x0d & MDIO_CTRL_MAXF_MASK)));
1306 br32(bp, B44_MDIO_CTRL);
1308 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310 br32(bp, B44_ENET_CTRL);
1311 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1312 } else {
1313 u32 val = br32(bp, B44_DEVCTRL);
1315 if (val & DEVCTRL_EPR) {
1316 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317 br32(bp, B44_DEVCTRL);
1318 udelay(100);
1320 bp->flags |= B44_FLAG_INTERNAL_PHY;
1324 /* bp->lock is held. */
1325 static void b44_halt(struct b44 *bp)
1327 b44_disable_ints(bp);
1328 b44_chip_reset(bp);
1331 /* bp->lock is held. */
1332 static void __b44_set_mac_addr(struct b44 *bp)
1334 bw32(bp, B44_CAM_CTRL, 0);
1335 if (!(bp->dev->flags & IFF_PROMISC)) {
1336 u32 val;
1338 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1339 val = br32(bp, B44_CAM_CTRL);
1340 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1344 static int b44_set_mac_addr(struct net_device *dev, void *p)
1346 struct b44 *bp = netdev_priv(dev);
1347 struct sockaddr *addr = p;
1349 if (netif_running(dev))
1350 return -EBUSY;
1352 if (!is_valid_ether_addr(addr->sa_data))
1353 return -EINVAL;
1355 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1357 spin_lock_irq(&bp->lock);
1358 __b44_set_mac_addr(bp);
1359 spin_unlock_irq(&bp->lock);
1361 return 0;
1364 /* Called at device open time to get the chip ready for
1365 * packet processing. Invoked with bp->lock held.
1366 */
1367 static void __b44_set_rx_mode(struct net_device *);
1368 static void b44_init_hw(struct b44 *bp, int full_reset)
1370 u32 val;
1372 b44_chip_reset(bp);
1373 if (full_reset) {
1374 b44_phy_reset(bp);
1375 b44_setup_phy(bp);
1378 /* Enable CRC32, set proper LED modes and power on PHY */
1379 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1380 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1382 /* This sets the MAC address too. */
1383 __b44_set_rx_mode(bp->dev);
1385 /* MTU + eth header + possible VLAN tag + struct rx_header */
1386 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1387 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1389 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1390 if (full_reset) {
1391 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1392 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1393 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1394 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1395 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1397 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1398 bp->rx_prod = bp->rx_pending;
1400 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1401 } else {
1402 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1406 val = br32(bp, B44_ENET_CTRL);
1407 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1410 static int b44_open(struct net_device *dev)
1412 struct b44 *bp = netdev_priv(dev);
1413 int err;
1415 err = b44_alloc_consistent(bp);
1416 if (err)
1417 goto out;
1419 b44_init_rings(bp);
1420 b44_init_hw(bp, 1);
1422 b44_check_phy(bp);
1424 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1425 if (unlikely(err < 0)) {
1426 b44_chip_reset(bp);
1427 b44_free_rings(bp);
1428 b44_free_consistent(bp);
1429 goto out;
1432 init_timer(&bp->timer);
1433 bp->timer.expires = jiffies + HZ;
1434 bp->timer.data = (unsigned long) bp;
1435 bp->timer.function = b44_timer;
1436 add_timer(&bp->timer);
1438 b44_enable_ints(bp);
1439 netif_start_queue(dev);
1440 out:
1441 return err;
1444 #if 0
1445 /*static*/ void b44_dump_state(struct b44 *bp)
1447 u32 val32, val32_2, val32_3, val32_4, val32_5;
1448 u16 val16;
1450 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1451 printk("DEBUG: PCI status [%04x] \n", val16);
1454 #endif
1456 #ifdef CONFIG_NET_POLL_CONTROLLER
1457 /*
1458 * Polling receive - used by netconsole and other diagnostic tools
1459 * to allow network i/o with interrupts disabled.
1460 */
1461 static void b44_poll_controller(struct net_device *dev)
1463 disable_irq(dev->irq);
1464 b44_interrupt(dev->irq, dev, NULL);
1465 enable_irq(dev->irq);
1467 #endif
1469 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1471 u32 i;
1472 u32 *pattern = (u32 *) pp;
1474 for (i = 0; i < bytes; i += sizeof(u32)) {
1475 bw32(bp, B44_FILT_ADDR, table_offset + i);
1476 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1480 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1482 int magicsync = 6;
1483 int k, j, len = offset;
1484 int ethaddr_bytes = ETH_ALEN;
1486 memset(ppattern + offset, 0xff, magicsync);
1487 for (j = 0; j < magicsync; j++)
1488 set_bit(len++, (unsigned long *) pmask);
1490 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1491 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1492 ethaddr_bytes = ETH_ALEN;
1493 else
1494 ethaddr_bytes = B44_PATTERN_SIZE - len;
1495 if (ethaddr_bytes <=0)
1496 break;
1497 for (k = 0; k< ethaddr_bytes; k++) {
1498 ppattern[offset + magicsync +
1499 (j * ETH_ALEN) + k] = macaddr[k];
1500 len++;
1501 set_bit(len, (unsigned long *) pmask);
1504 return len - 1;
1507 /* Setup magic packet patterns in the b44 WOL
1508 * pattern matching filter.
1509 */
1510 static void b44_setup_pseudo_magicp(struct b44 *bp)
1513 u32 val;
1514 int plen0, plen1, plen2;
1515 u8 *pwol_pattern;
1516 u8 pwol_mask[B44_PMASK_SIZE];
1518 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1519 if (!pwol_pattern) {
1520 printk(KERN_ERR PFX "Memory not available for WOL\n");
1521 return;
1524 /* Ipv4 magic packet pattern - pattern 0.*/
1525 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1526 memset(pwol_mask, 0, B44_PMASK_SIZE);
1527 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1528 B44_ETHIPV4UDP_HLEN);
1530 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1531 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1533 /* Raw ethernet II magic packet pattern - pattern 1 */
1534 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1535 memset(pwol_mask, 0, B44_PMASK_SIZE);
1536 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537 ETH_HLEN);
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1540 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1541 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1542 B44_PMASK_BASE + B44_PMASK_SIZE);
1544 /* Ipv6 magic packet pattern - pattern 2 */
1545 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1546 memset(pwol_mask, 0, B44_PMASK_SIZE);
1547 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1548 B44_ETHIPV6UDP_HLEN);
1550 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1551 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1552 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1553 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1555 kfree(pwol_pattern);
1557 /* set these pattern's lengths: one less than each real length */
1558 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1559 bw32(bp, B44_WKUP_LEN, val);
1561 /* enable wakeup pattern matching */
1562 val = br32(bp, B44_DEVCTRL);
1563 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1567 static void b44_setup_wol(struct b44 *bp)
1569 u32 val;
1570 u16 pmval;
1572 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1574 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1576 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1578 val = bp->dev->dev_addr[2] << 24 |
1579 bp->dev->dev_addr[3] << 16 |
1580 bp->dev->dev_addr[4] << 8 |
1581 bp->dev->dev_addr[5];
1582 bw32(bp, B44_ADDR_LO, val);
1584 val = bp->dev->dev_addr[0] << 8 |
1585 bp->dev->dev_addr[1];
1586 bw32(bp, B44_ADDR_HI, val);
1588 val = br32(bp, B44_DEVCTRL);
1589 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1591 } else {
1592 b44_setup_pseudo_magicp(bp);
1595 val = br32(bp, B44_SBTMSLOW);
1596 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1598 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1599 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1603 static int b44_close(struct net_device *dev)
1605 struct b44 *bp = netdev_priv(dev);
1607 netif_stop_queue(dev);
1609 netif_poll_disable(dev);
1611 del_timer_sync(&bp->timer);
1613 spin_lock_irq(&bp->lock);
1615 #if 0
1616 b44_dump_state(bp);
1617 #endif
1618 b44_halt(bp);
1619 b44_free_rings(bp);
1620 netif_carrier_off(dev);
1622 spin_unlock_irq(&bp->lock);
1624 free_irq(dev->irq, dev);
1626 netif_poll_enable(dev);
1628 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1629 b44_init_hw(bp, 0);
1630 b44_setup_wol(bp);
1633 b44_free_consistent(bp);
1635 return 0;
1638 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640 struct b44 *bp = netdev_priv(dev);
1641 struct net_device_stats *nstat = &bp->stats;
1642 struct b44_hw_stats *hwstat = &bp->hw_stats;
1644 /* Convert HW stats into netdevice stats. */
1645 nstat->rx_packets = hwstat->rx_pkts;
1646 nstat->tx_packets = hwstat->tx_pkts;
1647 nstat->rx_bytes = hwstat->rx_octets;
1648 nstat->tx_bytes = hwstat->tx_octets;
1649 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1650 hwstat->tx_oversize_pkts +
1651 hwstat->tx_underruns +
1652 hwstat->tx_excessive_cols +
1653 hwstat->tx_late_cols);
1654 nstat->multicast = hwstat->tx_multicast_pkts;
1655 nstat->collisions = hwstat->tx_total_cols;
1657 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658 hwstat->rx_undersize);
1659 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1660 nstat->rx_frame_errors = hwstat->rx_align_errs;
1661 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1662 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1663 hwstat->rx_oversize_pkts +
1664 hwstat->rx_missed_pkts +
1665 hwstat->rx_crc_align_errs +
1666 hwstat->rx_undersize +
1667 hwstat->rx_crc_errs +
1668 hwstat->rx_align_errs +
1669 hwstat->rx_symbol_errs);
1671 nstat->tx_aborted_errors = hwstat->tx_underruns;
1672 #if 0
1673 /* Carrier lost counter seems to be broken for some devices */
1674 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675 #endif
1677 return nstat;
1680 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682 struct dev_mc_list *mclist;
1683 int i, num_ents;
1685 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1686 mclist = dev->mc_list;
1687 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1688 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1690 return i+1;
1693 static void __b44_set_rx_mode(struct net_device *dev)
1695 struct b44 *bp = netdev_priv(dev);
1696 u32 val;
1698 val = br32(bp, B44_RXCONFIG);
1699 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1700 if (dev->flags & IFF_PROMISC) {
1701 val |= RXCONFIG_PROMISC;
1702 bw32(bp, B44_RXCONFIG, val);
1703 } else {
1704 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1705 int i = 0;
1707 __b44_set_mac_addr(bp);
1709 if (dev->flags & IFF_ALLMULTI)
1710 val |= RXCONFIG_ALLMULTI;
1711 else
1712 i = __b44_load_mcast(bp, dev);
1714 for (; i < 64; i++) {
1715 __b44_cam_write(bp, zero, i);
1717 bw32(bp, B44_RXCONFIG, val);
1718 val = br32(bp, B44_CAM_CTRL);
1719 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1723 static void b44_set_rx_mode(struct net_device *dev)
1725 struct b44 *bp = netdev_priv(dev);
1727 spin_lock_irq(&bp->lock);
1728 __b44_set_rx_mode(dev);
1729 spin_unlock_irq(&bp->lock);
1732 static u32 b44_get_msglevel(struct net_device *dev)
1734 struct b44 *bp = netdev_priv(dev);
1735 return bp->msg_enable;
1738 static void b44_set_msglevel(struct net_device *dev, u32 value)
1740 struct b44 *bp = netdev_priv(dev);
1741 bp->msg_enable = value;
1744 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1746 struct b44 *bp = netdev_priv(dev);
1747 struct pci_dev *pci_dev = bp->pdev;
1749 strcpy (info->driver, DRV_MODULE_NAME);
1750 strcpy (info->version, DRV_MODULE_VERSION);
1751 strcpy (info->bus_info, pci_name(pci_dev));
1754 static int b44_nway_reset(struct net_device *dev)
1756 struct b44 *bp = netdev_priv(dev);
1757 u32 bmcr;
1758 int r;
1760 spin_lock_irq(&bp->lock);
1761 b44_readphy(bp, MII_BMCR, &bmcr);
1762 b44_readphy(bp, MII_BMCR, &bmcr);
1763 r = -EINVAL;
1764 if (bmcr & BMCR_ANENABLE) {
1765 b44_writephy(bp, MII_BMCR,
1766 bmcr | BMCR_ANRESTART);
1767 r = 0;
1769 spin_unlock_irq(&bp->lock);
1771 return r;
1774 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1776 struct b44 *bp = netdev_priv(dev);
1778 cmd->supported = (SUPPORTED_Autoneg);
1779 cmd->supported |= (SUPPORTED_100baseT_Half |
1780 SUPPORTED_100baseT_Full |
1781 SUPPORTED_10baseT_Half |
1782 SUPPORTED_10baseT_Full |
1783 SUPPORTED_MII);
1785 cmd->advertising = 0;
1786 if (bp->flags & B44_FLAG_ADV_10HALF)
1787 cmd->advertising |= ADVERTISED_10baseT_Half;
1788 if (bp->flags & B44_FLAG_ADV_10FULL)
1789 cmd->advertising |= ADVERTISED_10baseT_Full;
1790 if (bp->flags & B44_FLAG_ADV_100HALF)
1791 cmd->advertising |= ADVERTISED_100baseT_Half;
1792 if (bp->flags & B44_FLAG_ADV_100FULL)
1793 cmd->advertising |= ADVERTISED_100baseT_Full;
1794 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1795 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1796 SPEED_100 : SPEED_10;
1797 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1798 DUPLEX_FULL : DUPLEX_HALF;
1799 cmd->port = 0;
1800 cmd->phy_address = bp->phy_addr;
1801 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1802 XCVR_INTERNAL : XCVR_EXTERNAL;
1803 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1804 AUTONEG_DISABLE : AUTONEG_ENABLE;
1805 if (cmd->autoneg == AUTONEG_ENABLE)
1806 cmd->advertising |= ADVERTISED_Autoneg;
1807 if (!netif_running(dev)){
1808 cmd->speed = 0;
1809 cmd->duplex = 0xff;
1811 cmd->maxtxpkt = 0;
1812 cmd->maxrxpkt = 0;
1813 return 0;
1816 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1818 struct b44 *bp = netdev_priv(dev);
1820 /* We do not support gigabit. */
1821 if (cmd->autoneg == AUTONEG_ENABLE) {
1822 if (cmd->advertising &
1823 (ADVERTISED_1000baseT_Half |
1824 ADVERTISED_1000baseT_Full))
1825 return -EINVAL;
1826 } else if ((cmd->speed != SPEED_100 &&
1827 cmd->speed != SPEED_10) ||
1828 (cmd->duplex != DUPLEX_HALF &&
1829 cmd->duplex != DUPLEX_FULL)) {
1830 return -EINVAL;
1833 spin_lock_irq(&bp->lock);
1835 if (cmd->autoneg == AUTONEG_ENABLE) {
1836 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1837 B44_FLAG_100_BASE_T |
1838 B44_FLAG_FULL_DUPLEX |
1839 B44_FLAG_ADV_10HALF |
1840 B44_FLAG_ADV_10FULL |
1841 B44_FLAG_ADV_100HALF |
1842 B44_FLAG_ADV_100FULL);
1843 if (cmd->advertising == 0) {
1844 bp->flags |= (B44_FLAG_ADV_10HALF |
1845 B44_FLAG_ADV_10FULL |
1846 B44_FLAG_ADV_100HALF |
1847 B44_FLAG_ADV_100FULL);
1848 } else {
1849 if (cmd->advertising & ADVERTISED_10baseT_Half)
1850 bp->flags |= B44_FLAG_ADV_10HALF;
1851 if (cmd->advertising & ADVERTISED_10baseT_Full)
1852 bp->flags |= B44_FLAG_ADV_10FULL;
1853 if (cmd->advertising & ADVERTISED_100baseT_Half)
1854 bp->flags |= B44_FLAG_ADV_100HALF;
1855 if (cmd->advertising & ADVERTISED_100baseT_Full)
1856 bp->flags |= B44_FLAG_ADV_100FULL;
1858 } else {
1859 bp->flags |= B44_FLAG_FORCE_LINK;
1860 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1861 if (cmd->speed == SPEED_100)
1862 bp->flags |= B44_FLAG_100_BASE_T;
1863 if (cmd->duplex == DUPLEX_FULL)
1864 bp->flags |= B44_FLAG_FULL_DUPLEX;
1867 if (netif_running(dev))
1868 b44_setup_phy(bp);
1870 spin_unlock_irq(&bp->lock);
1872 return 0;
1875 static void b44_get_ringparam(struct net_device *dev,
1876 struct ethtool_ringparam *ering)
1878 struct b44 *bp = netdev_priv(dev);
1880 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1881 ering->rx_pending = bp->rx_pending;
1883 /* XXX ethtool lacks a tx_max_pending, oops... */
1886 static int b44_set_ringparam(struct net_device *dev,
1887 struct ethtool_ringparam *ering)
1889 struct b44 *bp = netdev_priv(dev);
1891 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1892 (ering->rx_mini_pending != 0) ||
1893 (ering->rx_jumbo_pending != 0) ||
1894 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1895 return -EINVAL;
1897 spin_lock_irq(&bp->lock);
1899 bp->rx_pending = ering->rx_pending;
1900 bp->tx_pending = ering->tx_pending;
1902 b44_halt(bp);
1903 b44_init_rings(bp);
1904 b44_init_hw(bp, 1);
1905 netif_wake_queue(bp->dev);
1906 spin_unlock_irq(&bp->lock);
1908 b44_enable_ints(bp);
1910 return 0;
1913 static void b44_get_pauseparam(struct net_device *dev,
1914 struct ethtool_pauseparam *epause)
1916 struct b44 *bp = netdev_priv(dev);
1918 epause->autoneg =
1919 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1920 epause->rx_pause =
1921 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1922 epause->tx_pause =
1923 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1926 static int b44_set_pauseparam(struct net_device *dev,
1927 struct ethtool_pauseparam *epause)
1929 struct b44 *bp = netdev_priv(dev);
1931 spin_lock_irq(&bp->lock);
1932 if (epause->autoneg)
1933 bp->flags |= B44_FLAG_PAUSE_AUTO;
1934 else
1935 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1936 if (epause->rx_pause)
1937 bp->flags |= B44_FLAG_RX_PAUSE;
1938 else
1939 bp->flags &= ~B44_FLAG_RX_PAUSE;
1940 if (epause->tx_pause)
1941 bp->flags |= B44_FLAG_TX_PAUSE;
1942 else
1943 bp->flags &= ~B44_FLAG_TX_PAUSE;
1944 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1945 b44_halt(bp);
1946 b44_init_rings(bp);
1947 b44_init_hw(bp, 1);
1948 } else {
1949 __b44_set_flow_ctrl(bp, bp->flags);
1951 spin_unlock_irq(&bp->lock);
1953 b44_enable_ints(bp);
1955 return 0;
1958 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1960 switch(stringset) {
1961 case ETH_SS_STATS:
1962 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1963 break;
1967 static int b44_get_stats_count(struct net_device *dev)
1969 return ARRAY_SIZE(b44_gstrings);
1972 static void b44_get_ethtool_stats(struct net_device *dev,
1973 struct ethtool_stats *stats, u64 *data)
1975 struct b44 *bp = netdev_priv(dev);
1976 u32 *val = &bp->hw_stats.tx_good_octets;
1977 u32 i;
1979 spin_lock_irq(&bp->lock);
1981 b44_stats_update(bp);
1983 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1984 *data++ = *val++;
1986 spin_unlock_irq(&bp->lock);
1989 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1991 struct b44 *bp = netdev_priv(dev);
1993 wol->supported = WAKE_MAGIC;
1994 if (bp->flags & B44_FLAG_WOL_ENABLE)
1995 wol->wolopts = WAKE_MAGIC;
1996 else
1997 wol->wolopts = 0;
1998 memset(&wol->sopass, 0, sizeof(wol->sopass));
2001 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2003 struct b44 *bp = netdev_priv(dev);
2005 spin_lock_irq(&bp->lock);
2006 if (wol->wolopts & WAKE_MAGIC)
2007 bp->flags |= B44_FLAG_WOL_ENABLE;
2008 else
2009 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2010 spin_unlock_irq(&bp->lock);
2012 return 0;
2015 static struct ethtool_ops b44_ethtool_ops = {
2016 .get_drvinfo = b44_get_drvinfo,
2017 .get_settings = b44_get_settings,
2018 .set_settings = b44_set_settings,
2019 .nway_reset = b44_nway_reset,
2020 .get_link = ethtool_op_get_link,
2021 .get_wol = b44_get_wol,
2022 .set_wol = b44_set_wol,
2023 .get_ringparam = b44_get_ringparam,
2024 .set_ringparam = b44_set_ringparam,
2025 .get_pauseparam = b44_get_pauseparam,
2026 .set_pauseparam = b44_set_pauseparam,
2027 .get_msglevel = b44_get_msglevel,
2028 .set_msglevel = b44_set_msglevel,
2029 .get_strings = b44_get_strings,
2030 .get_stats_count = b44_get_stats_count,
2031 .get_ethtool_stats = b44_get_ethtool_stats,
2032 .get_perm_addr = ethtool_op_get_perm_addr,
2033 };
2035 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2037 struct mii_ioctl_data *data = if_mii(ifr);
2038 struct b44 *bp = netdev_priv(dev);
2039 int err = -EINVAL;
2041 if (!netif_running(dev))
2042 goto out;
2044 spin_lock_irq(&bp->lock);
2045 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2046 spin_unlock_irq(&bp->lock);
2047 out:
2048 return err;
2051 /* Read 128-bytes of EEPROM. */
2052 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2054 long i;
2055 u16 *ptr = (u16 *) data;
2057 for (i = 0; i < 128; i += 2)
2058 ptr[i / 2] = readw(bp->regs + 4096 + i);
2060 return 0;
2063 static int __devinit b44_get_invariants(struct b44 *bp)
2065 u8 eeprom[128];
2066 int err;
2068 err = b44_read_eeprom(bp, &eeprom[0]);
2069 if (err)
2070 goto out;
2072 bp->dev->dev_addr[0] = eeprom[79];
2073 bp->dev->dev_addr[1] = eeprom[78];
2074 bp->dev->dev_addr[2] = eeprom[81];
2075 bp->dev->dev_addr[3] = eeprom[80];
2076 bp->dev->dev_addr[4] = eeprom[83];
2077 bp->dev->dev_addr[5] = eeprom[82];
2079 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2080 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2081 return -EINVAL;
2084 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2086 bp->phy_addr = eeprom[90] & 0x1f;
2088 /* With this, plus the rx_header prepended to the data by the
2089 * hardware, we'll land the ethernet header on a 2-byte boundary.
2090 */
2091 bp->rx_offset = 30;
2093 bp->imask = IMASK_DEF;
2095 bp->core_unit = ssb_core_unit(bp);
2096 bp->dma_offset = SB_PCI_DMA;
2098 /* XXX - really required?
2099 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2100 */
2102 if (ssb_get_core_rev(bp) >= 7)
2103 bp->flags |= B44_FLAG_B0_ANDLATER;
2105 out:
2106 return err;
2109 static int __devinit b44_init_one(struct pci_dev *pdev,
2110 const struct pci_device_id *ent)
2112 static int b44_version_printed = 0;
2113 unsigned long b44reg_base, b44reg_len;
2114 struct net_device *dev;
2115 struct b44 *bp;
2116 int err, i;
2118 if (b44_version_printed++ == 0)
2119 printk(KERN_INFO "%s", version);
2121 err = pci_enable_device(pdev);
2122 if (err) {
2123 dev_err(&pdev->dev, "Cannot enable PCI device, "
2124 "aborting.\n");
2125 return err;
2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2129 dev_err(&pdev->dev,
2130 "Cannot find proper PCI device "
2131 "base address, aborting.\n");
2132 err = -ENODEV;
2133 goto err_out_disable_pdev;
2136 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2137 if (err) {
2138 dev_err(&pdev->dev,
2139 "Cannot obtain PCI resources, aborting.\n");
2140 goto err_out_disable_pdev;
2143 pci_set_master(pdev);
2145 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2146 if (err) {
2147 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2148 goto err_out_free_res;
2151 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2152 if (err) {
2153 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2154 goto err_out_free_res;
2157 b44reg_base = pci_resource_start(pdev, 0);
2158 b44reg_len = pci_resource_len(pdev, 0);
2160 dev = alloc_etherdev(sizeof(*bp));
2161 if (!dev) {
2162 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2163 err = -ENOMEM;
2164 goto err_out_free_res;
2167 SET_MODULE_OWNER(dev);
2168 SET_NETDEV_DEV(dev,&pdev->dev);
2170 /* No interesting netdevice features in this card... */
2171 dev->features |= 0;
2173 bp = netdev_priv(dev);
2174 bp->pdev = pdev;
2175 bp->dev = dev;
2177 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2179 spin_lock_init(&bp->lock);
2181 bp->regs = ioremap(b44reg_base, b44reg_len);
2182 if (bp->regs == 0UL) {
2183 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2184 err = -ENOMEM;
2185 goto err_out_free_dev;
2188 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2189 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2191 dev->open = b44_open;
2192 dev->stop = b44_close;
2193 dev->hard_start_xmit = b44_start_xmit;
2194 dev->get_stats = b44_get_stats;
2195 dev->set_multicast_list = b44_set_rx_mode;
2196 dev->set_mac_address = b44_set_mac_addr;
2197 dev->do_ioctl = b44_ioctl;
2198 dev->tx_timeout = b44_tx_timeout;
2199 dev->poll = b44_poll;
2200 dev->weight = 64;
2201 dev->watchdog_timeo = B44_TX_TIMEOUT;
2202 #ifdef CONFIG_NET_POLL_CONTROLLER
2203 dev->poll_controller = b44_poll_controller;
2204 #endif
2205 dev->change_mtu = b44_change_mtu;
2206 dev->irq = pdev->irq;
2207 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2209 netif_carrier_off(dev);
2211 err = b44_get_invariants(bp);
2212 if (err) {
2213 dev_err(&pdev->dev,
2214 "Problem fetching invariants of chip, aborting.\n");
2215 goto err_out_iounmap;
2218 bp->mii_if.dev = dev;
2219 bp->mii_if.mdio_read = b44_mii_read;
2220 bp->mii_if.mdio_write = b44_mii_write;
2221 bp->mii_if.phy_id = bp->phy_addr;
2222 bp->mii_if.phy_id_mask = 0x1f;
2223 bp->mii_if.reg_num_mask = 0x1f;
2225 /* By default, advertise all speed/duplex settings. */
2226 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2227 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2229 /* By default, auto-negotiate PAUSE. */
2230 bp->flags |= B44_FLAG_PAUSE_AUTO;
2232 err = register_netdev(dev);
2233 if (err) {
2234 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2235 goto err_out_iounmap;
2238 pci_set_drvdata(pdev, dev);
2240 pci_save_state(bp->pdev);
2242 /* Chip reset provides power to the b44 MAC & PCI cores, which
2243 * is necessary for MAC register access.
2244 */
2245 b44_chip_reset(bp);
2247 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2248 for (i = 0; i < 6; i++)
2249 printk("%2.2x%c", dev->dev_addr[i],
2250 i == 5 ? '\n' : ':');
2252 return 0;
2254 err_out_iounmap:
2255 iounmap(bp->regs);
2257 err_out_free_dev:
2258 free_netdev(dev);
2260 err_out_free_res:
2261 pci_release_regions(pdev);
2263 err_out_disable_pdev:
2264 pci_disable_device(pdev);
2265 pci_set_drvdata(pdev, NULL);
2266 return err;
2269 static void __devexit b44_remove_one(struct pci_dev *pdev)
2271 struct net_device *dev = pci_get_drvdata(pdev);
2272 struct b44 *bp = netdev_priv(dev);
2274 unregister_netdev(dev);
2275 iounmap(bp->regs);
2276 free_netdev(dev);
2277 pci_release_regions(pdev);
2278 pci_disable_device(pdev);
2279 pci_set_drvdata(pdev, NULL);
2282 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2284 struct net_device *dev = pci_get_drvdata(pdev);
2285 struct b44 *bp = netdev_priv(dev);
2287 if (!netif_running(dev))
2288 return 0;
2290 del_timer_sync(&bp->timer);
2292 spin_lock_irq(&bp->lock);
2294 b44_halt(bp);
2295 netif_carrier_off(bp->dev);
2296 netif_device_detach(bp->dev);
2297 b44_free_rings(bp);
2299 spin_unlock_irq(&bp->lock);
2301 free_irq(dev->irq, dev);
2302 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2303 b44_init_hw(bp, 0);
2304 b44_setup_wol(bp);
2306 pci_disable_device(pdev);
2307 return 0;
2310 static int b44_resume(struct pci_dev *pdev)
2312 struct net_device *dev = pci_get_drvdata(pdev);
2313 struct b44 *bp = netdev_priv(dev);
2315 pci_restore_state(pdev);
2316 pci_enable_device(pdev);
2317 pci_set_master(pdev);
2319 if (!netif_running(dev))
2320 return 0;
2322 if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2323 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2325 spin_lock_irq(&bp->lock);
2327 b44_init_rings(bp);
2328 b44_init_hw(bp, 1);
2329 netif_device_attach(bp->dev);
2330 spin_unlock_irq(&bp->lock);
2332 bp->timer.expires = jiffies + HZ;
2333 add_timer(&bp->timer);
2335 b44_enable_ints(bp);
2336 netif_wake_queue(dev);
2337 return 0;
2340 static struct pci_driver b44_driver = {
2341 .name = DRV_MODULE_NAME,
2342 .id_table = b44_pci_tbl,
2343 .probe = b44_init_one,
2344 .remove = __devexit_p(b44_remove_one),
2345 .suspend = b44_suspend,
2346 .resume = b44_resume,
2347 };
2349 static int __init b44_init(void)
2351 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2353 /* Setup paramaters for syncing RX/TX DMA descriptors */
2354 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2355 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2357 return pci_module_init(&b44_driver);
2360 static void __exit b44_cleanup(void)
2362 pci_unregister_driver(&b44_driver);
2365 module_init(b44_init);
2366 module_exit(b44_cleanup);