ia64/linux-2.6.18-xen.hg

annotate drivers/net/bmac.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
rev   line source
ian@0 1 /*
ian@0 2 * Network device driver for the BMAC ethernet controller on
ian@0 3 * Apple Powermacs. Assumes it's under a DBDMA controller.
ian@0 4 *
ian@0 5 * Copyright (C) 1998 Randy Gobbel.
ian@0 6 *
ian@0 7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
ian@0 8 * dynamic procfs inode.
ian@0 9 */
ian@0 10 #include <linux/module.h>
ian@0 11 #include <linux/kernel.h>
ian@0 12 #include <linux/netdevice.h>
ian@0 13 #include <linux/etherdevice.h>
ian@0 14 #include <linux/delay.h>
ian@0 15 #include <linux/string.h>
ian@0 16 #include <linux/timer.h>
ian@0 17 #include <linux/proc_fs.h>
ian@0 18 #include <linux/init.h>
ian@0 19 #include <linux/spinlock.h>
ian@0 20 #include <linux/crc32.h>
ian@0 21 #include <asm/prom.h>
ian@0 22 #include <asm/dbdma.h>
ian@0 23 #include <asm/io.h>
ian@0 24 #include <asm/page.h>
ian@0 25 #include <asm/pgtable.h>
ian@0 26 #include <asm/machdep.h>
ian@0 27 #include <asm/pmac_feature.h>
ian@0 28 #include <asm/macio.h>
ian@0 29 #include <asm/irq.h>
ian@0 30
ian@0 31 #include "bmac.h"
ian@0 32
ian@0 33 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
ian@0 34 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
ian@0 35
ian@0 36 /*
ian@0 37 * CRC polynomial - used in working out multicast filter bits.
ian@0 38 */
ian@0 39 #define ENET_CRCPOLY 0x04c11db7
ian@0 40
ian@0 41 /* switch to use multicast code lifted from sunhme driver */
ian@0 42 #define SUNHME_MULTICAST
ian@0 43
ian@0 44 #define N_RX_RING 64
ian@0 45 #define N_TX_RING 32
ian@0 46 #define MAX_TX_ACTIVE 1
ian@0 47 #define ETHERCRC 4
ian@0 48 #define ETHERMINPACKET 64
ian@0 49 #define ETHERMTU 1500
ian@0 50 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
ian@0 51 #define TX_TIMEOUT HZ /* 1 second */
ian@0 52
ian@0 53 /* Bits in transmit DMA status */
ian@0 54 #define TX_DMA_ERR 0x80
ian@0 55
ian@0 56 #define XXDEBUG(args)
ian@0 57
ian@0 58 struct bmac_data {
ian@0 59 /* volatile struct bmac *bmac; */
ian@0 60 struct sk_buff_head *queue;
ian@0 61 volatile struct dbdma_regs __iomem *tx_dma;
ian@0 62 int tx_dma_intr;
ian@0 63 volatile struct dbdma_regs __iomem *rx_dma;
ian@0 64 int rx_dma_intr;
ian@0 65 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
ian@0 66 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
ian@0 67 struct macio_dev *mdev;
ian@0 68 int is_bmac_plus;
ian@0 69 struct sk_buff *rx_bufs[N_RX_RING];
ian@0 70 int rx_fill;
ian@0 71 int rx_empty;
ian@0 72 struct sk_buff *tx_bufs[N_TX_RING];
ian@0 73 int tx_fill;
ian@0 74 int tx_empty;
ian@0 75 unsigned char tx_fullup;
ian@0 76 struct net_device_stats stats;
ian@0 77 struct timer_list tx_timeout;
ian@0 78 int timeout_active;
ian@0 79 int sleeping;
ian@0 80 int opened;
ian@0 81 unsigned short hash_use_count[64];
ian@0 82 unsigned short hash_table_mask[4];
ian@0 83 spinlock_t lock;
ian@0 84 };
ian@0 85
ian@0 86 #if 0 /* Move that to ethtool */
ian@0 87
ian@0 88 typedef struct bmac_reg_entry {
ian@0 89 char *name;
ian@0 90 unsigned short reg_offset;
ian@0 91 } bmac_reg_entry_t;
ian@0 92
ian@0 93 #define N_REG_ENTRIES 31
ian@0 94
ian@0 95 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
ian@0 96 {"MEMADD", MEMADD},
ian@0 97 {"MEMDATAHI", MEMDATAHI},
ian@0 98 {"MEMDATALO", MEMDATALO},
ian@0 99 {"TXPNTR", TXPNTR},
ian@0 100 {"RXPNTR", RXPNTR},
ian@0 101 {"IPG1", IPG1},
ian@0 102 {"IPG2", IPG2},
ian@0 103 {"ALIMIT", ALIMIT},
ian@0 104 {"SLOT", SLOT},
ian@0 105 {"PALEN", PALEN},
ian@0 106 {"PAPAT", PAPAT},
ian@0 107 {"TXSFD", TXSFD},
ian@0 108 {"JAM", JAM},
ian@0 109 {"TXCFG", TXCFG},
ian@0 110 {"TXMAX", TXMAX},
ian@0 111 {"TXMIN", TXMIN},
ian@0 112 {"PAREG", PAREG},
ian@0 113 {"DCNT", DCNT},
ian@0 114 {"NCCNT", NCCNT},
ian@0 115 {"NTCNT", NTCNT},
ian@0 116 {"EXCNT", EXCNT},
ian@0 117 {"LTCNT", LTCNT},
ian@0 118 {"TXSM", TXSM},
ian@0 119 {"RXCFG", RXCFG},
ian@0 120 {"RXMAX", RXMAX},
ian@0 121 {"RXMIN", RXMIN},
ian@0 122 {"FRCNT", FRCNT},
ian@0 123 {"AECNT", AECNT},
ian@0 124 {"FECNT", FECNT},
ian@0 125 {"RXSM", RXSM},
ian@0 126 {"RXCV", RXCV}
ian@0 127 };
ian@0 128
ian@0 129 #endif
ian@0 130
ian@0 131 static unsigned char *bmac_emergency_rxbuf;
ian@0 132
ian@0 133 /*
ian@0 134 * Number of bytes of private data per BMAC: allow enough for
ian@0 135 * the rx and tx dma commands plus a branch dma command each,
ian@0 136 * and another 16 bytes to allow us to align the dma command
ian@0 137 * buffers on a 16 byte boundary.
ian@0 138 */
ian@0 139 #define PRIV_BYTES (sizeof(struct bmac_data) \
ian@0 140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
ian@0 141 + sizeof(struct sk_buff_head))
ian@0 142
ian@0 143 static unsigned char bitrev(unsigned char b);
ian@0 144 static int bmac_open(struct net_device *dev);
ian@0 145 static int bmac_close(struct net_device *dev);
ian@0 146 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
ian@0 147 static struct net_device_stats *bmac_stats(struct net_device *dev);
ian@0 148 static void bmac_set_multicast(struct net_device *dev);
ian@0 149 static void bmac_reset_and_enable(struct net_device *dev);
ian@0 150 static void bmac_start_chip(struct net_device *dev);
ian@0 151 static void bmac_init_chip(struct net_device *dev);
ian@0 152 static void bmac_init_registers(struct net_device *dev);
ian@0 153 static void bmac_enable_and_reset_chip(struct net_device *dev);
ian@0 154 static int bmac_set_address(struct net_device *dev, void *addr);
ian@0 155 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
ian@0 156 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
ian@0 157 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
ian@0 158 static void bmac_set_timeout(struct net_device *dev);
ian@0 159 static void bmac_tx_timeout(unsigned long data);
ian@0 160 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
ian@0 161 static void bmac_start(struct net_device *dev);
ian@0 162
ian@0 163 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
ian@0 164 #define DBDMA_CLEAR(x) ( (x) << 16)
ian@0 165
ian@0 166 static inline void
ian@0 167 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
ian@0 168 {
ian@0 169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
ian@0 170 return;
ian@0 171 }
ian@0 172
ian@0 173 static inline unsigned long
ian@0 174 dbdma_ld32(volatile __u32 __iomem *a)
ian@0 175 {
ian@0 176 __u32 swap;
ian@0 177 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
ian@0 178 return swap;
ian@0 179 }
ian@0 180
ian@0 181 static void
ian@0 182 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
ian@0 183 {
ian@0 184 dbdma_st32(&dmap->control,
ian@0 185 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
ian@0 186 eieio();
ian@0 187 }
ian@0 188
ian@0 189 static void
ian@0 190 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
ian@0 191 {
ian@0 192 dbdma_st32(&dmap->control,
ian@0 193 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
ian@0 194 eieio();
ian@0 195 while (dbdma_ld32(&dmap->status) & RUN)
ian@0 196 eieio();
ian@0 197 }
ian@0 198
ian@0 199 static void
ian@0 200 dbdma_setcmd(volatile struct dbdma_cmd *cp,
ian@0 201 unsigned short cmd, unsigned count, unsigned long addr,
ian@0 202 unsigned long cmd_dep)
ian@0 203 {
ian@0 204 out_le16(&cp->command, cmd);
ian@0 205 out_le16(&cp->req_count, count);
ian@0 206 out_le32(&cp->phy_addr, addr);
ian@0 207 out_le32(&cp->cmd_dep, cmd_dep);
ian@0 208 out_le16(&cp->xfer_status, 0);
ian@0 209 out_le16(&cp->res_count, 0);
ian@0 210 }
ian@0 211
ian@0 212 static inline
ian@0 213 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
ian@0 214 {
ian@0 215 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
ian@0 216 }
ian@0 217
ian@0 218
ian@0 219 static inline
ian@0 220 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
ian@0 221 {
ian@0 222 return in_le16((void __iomem *)dev->base_addr + reg_offset);
ian@0 223 }
ian@0 224
ian@0 225 static void
ian@0 226 bmac_enable_and_reset_chip(struct net_device *dev)
ian@0 227 {
ian@0 228 struct bmac_data *bp = netdev_priv(dev);
ian@0 229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 230 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 231
ian@0 232 if (rd)
ian@0 233 dbdma_reset(rd);
ian@0 234 if (td)
ian@0 235 dbdma_reset(td);
ian@0 236
ian@0 237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
ian@0 238 }
ian@0 239
ian@0 240 #define MIFDELAY udelay(10)
ian@0 241
ian@0 242 static unsigned int
ian@0 243 bmac_mif_readbits(struct net_device *dev, int nb)
ian@0 244 {
ian@0 245 unsigned int val = 0;
ian@0 246
ian@0 247 while (--nb >= 0) {
ian@0 248 bmwrite(dev, MIFCSR, 0);
ian@0 249 MIFDELAY;
ian@0 250 if (bmread(dev, MIFCSR) & 8)
ian@0 251 val |= 1 << nb;
ian@0 252 bmwrite(dev, MIFCSR, 1);
ian@0 253 MIFDELAY;
ian@0 254 }
ian@0 255 bmwrite(dev, MIFCSR, 0);
ian@0 256 MIFDELAY;
ian@0 257 bmwrite(dev, MIFCSR, 1);
ian@0 258 MIFDELAY;
ian@0 259 return val;
ian@0 260 }
ian@0 261
ian@0 262 static void
ian@0 263 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
ian@0 264 {
ian@0 265 int b;
ian@0 266
ian@0 267 while (--nb >= 0) {
ian@0 268 b = (val & (1 << nb))? 6: 4;
ian@0 269 bmwrite(dev, MIFCSR, b);
ian@0 270 MIFDELAY;
ian@0 271 bmwrite(dev, MIFCSR, b|1);
ian@0 272 MIFDELAY;
ian@0 273 }
ian@0 274 }
ian@0 275
ian@0 276 static unsigned int
ian@0 277 bmac_mif_read(struct net_device *dev, unsigned int addr)
ian@0 278 {
ian@0 279 unsigned int val;
ian@0 280
ian@0 281 bmwrite(dev, MIFCSR, 4);
ian@0 282 MIFDELAY;
ian@0 283 bmac_mif_writebits(dev, ~0U, 32);
ian@0 284 bmac_mif_writebits(dev, 6, 4);
ian@0 285 bmac_mif_writebits(dev, addr, 10);
ian@0 286 bmwrite(dev, MIFCSR, 2);
ian@0 287 MIFDELAY;
ian@0 288 bmwrite(dev, MIFCSR, 1);
ian@0 289 MIFDELAY;
ian@0 290 val = bmac_mif_readbits(dev, 17);
ian@0 291 bmwrite(dev, MIFCSR, 4);
ian@0 292 MIFDELAY;
ian@0 293 return val;
ian@0 294 }
ian@0 295
ian@0 296 static void
ian@0 297 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
ian@0 298 {
ian@0 299 bmwrite(dev, MIFCSR, 4);
ian@0 300 MIFDELAY;
ian@0 301 bmac_mif_writebits(dev, ~0U, 32);
ian@0 302 bmac_mif_writebits(dev, 5, 4);
ian@0 303 bmac_mif_writebits(dev, addr, 10);
ian@0 304 bmac_mif_writebits(dev, 2, 2);
ian@0 305 bmac_mif_writebits(dev, val, 16);
ian@0 306 bmac_mif_writebits(dev, 3, 2);
ian@0 307 }
ian@0 308
ian@0 309 static void
ian@0 310 bmac_init_registers(struct net_device *dev)
ian@0 311 {
ian@0 312 struct bmac_data *bp = netdev_priv(dev);
ian@0 313 volatile unsigned short regValue;
ian@0 314 unsigned short *pWord16;
ian@0 315 int i;
ian@0 316
ian@0 317 /* XXDEBUG(("bmac: enter init_registers\n")); */
ian@0 318
ian@0 319 bmwrite(dev, RXRST, RxResetValue);
ian@0 320 bmwrite(dev, TXRST, TxResetBit);
ian@0 321
ian@0 322 i = 100;
ian@0 323 do {
ian@0 324 --i;
ian@0 325 udelay(10000);
ian@0 326 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
ian@0 327 } while ((regValue & TxResetBit) && i > 0);
ian@0 328
ian@0 329 if (!bp->is_bmac_plus) {
ian@0 330 regValue = bmread(dev, XCVRIF);
ian@0 331 regValue |= ClkBit | SerialMode | COLActiveLow;
ian@0 332 bmwrite(dev, XCVRIF, regValue);
ian@0 333 udelay(10000);
ian@0 334 }
ian@0 335
ian@0 336 bmwrite(dev, RSEED, (unsigned short)0x1968);
ian@0 337
ian@0 338 regValue = bmread(dev, XIFC);
ian@0 339 regValue |= TxOutputEnable;
ian@0 340 bmwrite(dev, XIFC, regValue);
ian@0 341
ian@0 342 bmread(dev, PAREG);
ian@0 343
ian@0 344 /* set collision counters to 0 */
ian@0 345 bmwrite(dev, NCCNT, 0);
ian@0 346 bmwrite(dev, NTCNT, 0);
ian@0 347 bmwrite(dev, EXCNT, 0);
ian@0 348 bmwrite(dev, LTCNT, 0);
ian@0 349
ian@0 350 /* set rx counters to 0 */
ian@0 351 bmwrite(dev, FRCNT, 0);
ian@0 352 bmwrite(dev, LECNT, 0);
ian@0 353 bmwrite(dev, AECNT, 0);
ian@0 354 bmwrite(dev, FECNT, 0);
ian@0 355 bmwrite(dev, RXCV, 0);
ian@0 356
ian@0 357 /* set tx fifo information */
ian@0 358 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
ian@0 359
ian@0 360 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
ian@0 361 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
ian@0 362
ian@0 363 /* set rx fifo information */
ian@0 364 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
ian@0 365 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
ian@0 366
ian@0 367 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
ian@0 368 bmread(dev, STATUS); /* read it just to clear it */
ian@0 369
ian@0 370 /* zero out the chip Hash Filter registers */
ian@0 371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
ian@0 372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
ian@0 373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
ian@0 374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
ian@0 375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
ian@0 376
ian@0 377 pWord16 = (unsigned short *)dev->dev_addr;
ian@0 378 bmwrite(dev, MADD0, *pWord16++);
ian@0 379 bmwrite(dev, MADD1, *pWord16++);
ian@0 380 bmwrite(dev, MADD2, *pWord16);
ian@0 381
ian@0 382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
ian@0 383
ian@0 384 bmwrite(dev, INTDISABLE, EnableNormal);
ian@0 385
ian@0 386 return;
ian@0 387 }
ian@0 388
ian@0 389 #if 0
ian@0 390 static void
ian@0 391 bmac_disable_interrupts(struct net_device *dev)
ian@0 392 {
ian@0 393 bmwrite(dev, INTDISABLE, DisableAll);
ian@0 394 }
ian@0 395
ian@0 396 static void
ian@0 397 bmac_enable_interrupts(struct net_device *dev)
ian@0 398 {
ian@0 399 bmwrite(dev, INTDISABLE, EnableNormal);
ian@0 400 }
ian@0 401 #endif
ian@0 402
ian@0 403
ian@0 404 static void
ian@0 405 bmac_start_chip(struct net_device *dev)
ian@0 406 {
ian@0 407 struct bmac_data *bp = netdev_priv(dev);
ian@0 408 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 409 unsigned short oldConfig;
ian@0 410
ian@0 411 /* enable rx dma channel */
ian@0 412 dbdma_continue(rd);
ian@0 413
ian@0 414 oldConfig = bmread(dev, TXCFG);
ian@0 415 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
ian@0 416
ian@0 417 /* turn on rx plus any other bits already on (promiscuous possibly) */
ian@0 418 oldConfig = bmread(dev, RXCFG);
ian@0 419 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
ian@0 420 udelay(20000);
ian@0 421 }
ian@0 422
ian@0 423 static void
ian@0 424 bmac_init_phy(struct net_device *dev)
ian@0 425 {
ian@0 426 unsigned int addr;
ian@0 427 struct bmac_data *bp = netdev_priv(dev);
ian@0 428
ian@0 429 printk(KERN_DEBUG "phy registers:");
ian@0 430 for (addr = 0; addr < 32; ++addr) {
ian@0 431 if ((addr & 7) == 0)
ian@0 432 printk("\n" KERN_DEBUG);
ian@0 433 printk(" %.4x", bmac_mif_read(dev, addr));
ian@0 434 }
ian@0 435 printk("\n");
ian@0 436 if (bp->is_bmac_plus) {
ian@0 437 unsigned int capable, ctrl;
ian@0 438
ian@0 439 ctrl = bmac_mif_read(dev, 0);
ian@0 440 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
ian@0 441 if (bmac_mif_read(dev, 4) != capable
ian@0 442 || (ctrl & 0x1000) == 0) {
ian@0 443 bmac_mif_write(dev, 4, capable);
ian@0 444 bmac_mif_write(dev, 0, 0x1200);
ian@0 445 } else
ian@0 446 bmac_mif_write(dev, 0, 0x1000);
ian@0 447 }
ian@0 448 }
ian@0 449
ian@0 450 static void bmac_init_chip(struct net_device *dev)
ian@0 451 {
ian@0 452 bmac_init_phy(dev);
ian@0 453 bmac_init_registers(dev);
ian@0 454 }
ian@0 455
ian@0 456 #ifdef CONFIG_PM
ian@0 457 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
ian@0 458 {
ian@0 459 struct net_device* dev = macio_get_drvdata(mdev);
ian@0 460 struct bmac_data *bp = netdev_priv(dev);
ian@0 461 unsigned long flags;
ian@0 462 unsigned short config;
ian@0 463 int i;
ian@0 464
ian@0 465 netif_device_detach(dev);
ian@0 466 /* prolly should wait for dma to finish & turn off the chip */
ian@0 467 spin_lock_irqsave(&bp->lock, flags);
ian@0 468 if (bp->timeout_active) {
ian@0 469 del_timer(&bp->tx_timeout);
ian@0 470 bp->timeout_active = 0;
ian@0 471 }
ian@0 472 disable_irq(dev->irq);
ian@0 473 disable_irq(bp->tx_dma_intr);
ian@0 474 disable_irq(bp->rx_dma_intr);
ian@0 475 bp->sleeping = 1;
ian@0 476 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 477 if (bp->opened) {
ian@0 478 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 479 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 480
ian@0 481 config = bmread(dev, RXCFG);
ian@0 482 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
ian@0 483 config = bmread(dev, TXCFG);
ian@0 484 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
ian@0 485 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
ian@0 486 /* disable rx and tx dma */
ian@0 487 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
ian@0 488 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
ian@0 489 /* free some skb's */
ian@0 490 for (i=0; i<N_RX_RING; i++) {
ian@0 491 if (bp->rx_bufs[i] != NULL) {
ian@0 492 dev_kfree_skb(bp->rx_bufs[i]);
ian@0 493 bp->rx_bufs[i] = NULL;
ian@0 494 }
ian@0 495 }
ian@0 496 for (i = 0; i<N_TX_RING; i++) {
ian@0 497 if (bp->tx_bufs[i] != NULL) {
ian@0 498 dev_kfree_skb(bp->tx_bufs[i]);
ian@0 499 bp->tx_bufs[i] = NULL;
ian@0 500 }
ian@0 501 }
ian@0 502 }
ian@0 503 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
ian@0 504 return 0;
ian@0 505 }
ian@0 506
ian@0 507 static int bmac_resume(struct macio_dev *mdev)
ian@0 508 {
ian@0 509 struct net_device* dev = macio_get_drvdata(mdev);
ian@0 510 struct bmac_data *bp = netdev_priv(dev);
ian@0 511
ian@0 512 /* see if this is enough */
ian@0 513 if (bp->opened)
ian@0 514 bmac_reset_and_enable(dev);
ian@0 515
ian@0 516 enable_irq(dev->irq);
ian@0 517 enable_irq(bp->tx_dma_intr);
ian@0 518 enable_irq(bp->rx_dma_intr);
ian@0 519 netif_device_attach(dev);
ian@0 520
ian@0 521 return 0;
ian@0 522 }
ian@0 523 #endif /* CONFIG_PM */
ian@0 524
ian@0 525 static int bmac_set_address(struct net_device *dev, void *addr)
ian@0 526 {
ian@0 527 struct bmac_data *bp = netdev_priv(dev);
ian@0 528 unsigned char *p = addr;
ian@0 529 unsigned short *pWord16;
ian@0 530 unsigned long flags;
ian@0 531 int i;
ian@0 532
ian@0 533 XXDEBUG(("bmac: enter set_address\n"));
ian@0 534 spin_lock_irqsave(&bp->lock, flags);
ian@0 535
ian@0 536 for (i = 0; i < 6; ++i) {
ian@0 537 dev->dev_addr[i] = p[i];
ian@0 538 }
ian@0 539 /* load up the hardware address */
ian@0 540 pWord16 = (unsigned short *)dev->dev_addr;
ian@0 541 bmwrite(dev, MADD0, *pWord16++);
ian@0 542 bmwrite(dev, MADD1, *pWord16++);
ian@0 543 bmwrite(dev, MADD2, *pWord16);
ian@0 544
ian@0 545 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 546 XXDEBUG(("bmac: exit set_address\n"));
ian@0 547 return 0;
ian@0 548 }
ian@0 549
ian@0 550 static inline void bmac_set_timeout(struct net_device *dev)
ian@0 551 {
ian@0 552 struct bmac_data *bp = netdev_priv(dev);
ian@0 553 unsigned long flags;
ian@0 554
ian@0 555 spin_lock_irqsave(&bp->lock, flags);
ian@0 556 if (bp->timeout_active)
ian@0 557 del_timer(&bp->tx_timeout);
ian@0 558 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
ian@0 559 bp->tx_timeout.function = bmac_tx_timeout;
ian@0 560 bp->tx_timeout.data = (unsigned long) dev;
ian@0 561 add_timer(&bp->tx_timeout);
ian@0 562 bp->timeout_active = 1;
ian@0 563 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 564 }
ian@0 565
ian@0 566 static void
ian@0 567 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
ian@0 568 {
ian@0 569 void *vaddr;
ian@0 570 unsigned long baddr;
ian@0 571 unsigned long len;
ian@0 572
ian@0 573 len = skb->len;
ian@0 574 vaddr = skb->data;
ian@0 575 baddr = virt_to_bus(vaddr);
ian@0 576
ian@0 577 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
ian@0 578 }
ian@0 579
ian@0 580 static void
ian@0 581 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
ian@0 582 {
ian@0 583 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
ian@0 584
ian@0 585 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
ian@0 586 virt_to_bus(addr), 0);
ian@0 587 }
ian@0 588
ian@0 589 /* Bit-reverse one byte of an ethernet hardware address. */
ian@0 590 static unsigned char
ian@0 591 bitrev(unsigned char b)
ian@0 592 {
ian@0 593 int d = 0, i;
ian@0 594
ian@0 595 for (i = 0; i < 8; ++i, b >>= 1)
ian@0 596 d = (d << 1) | (b & 1);
ian@0 597 return d;
ian@0 598 }
ian@0 599
ian@0 600
ian@0 601 static void
ian@0 602 bmac_init_tx_ring(struct bmac_data *bp)
ian@0 603 {
ian@0 604 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 605
ian@0 606 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
ian@0 607
ian@0 608 bp->tx_empty = 0;
ian@0 609 bp->tx_fill = 0;
ian@0 610 bp->tx_fullup = 0;
ian@0 611
ian@0 612 /* put a branch at the end of the tx command list */
ian@0 613 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
ian@0 614 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
ian@0 615
ian@0 616 /* reset tx dma */
ian@0 617 dbdma_reset(td);
ian@0 618 out_le32(&td->wait_sel, 0x00200020);
ian@0 619 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
ian@0 620 }
ian@0 621
ian@0 622 static int
ian@0 623 bmac_init_rx_ring(struct bmac_data *bp)
ian@0 624 {
ian@0 625 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 626 int i;
ian@0 627 struct sk_buff *skb;
ian@0 628
ian@0 629 /* initialize list of sk_buffs for receiving and set up recv dma */
ian@0 630 memset((char *)bp->rx_cmds, 0,
ian@0 631 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
ian@0 632 for (i = 0; i < N_RX_RING; i++) {
ian@0 633 if ((skb = bp->rx_bufs[i]) == NULL) {
ian@0 634 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
ian@0 635 if (skb != NULL)
ian@0 636 skb_reserve(skb, 2);
ian@0 637 }
ian@0 638 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
ian@0 639 }
ian@0 640
ian@0 641 bp->rx_empty = 0;
ian@0 642 bp->rx_fill = i;
ian@0 643
ian@0 644 /* Put a branch back to the beginning of the receive command list */
ian@0 645 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
ian@0 646 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
ian@0 647
ian@0 648 /* start rx dma */
ian@0 649 dbdma_reset(rd);
ian@0 650 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
ian@0 651
ian@0 652 return 1;
ian@0 653 }
ian@0 654
ian@0 655
ian@0 656 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
ian@0 657 {
ian@0 658 struct bmac_data *bp = netdev_priv(dev);
ian@0 659 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 660 int i;
ian@0 661
ian@0 662 /* see if there's a free slot in the tx ring */
ian@0 663 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
ian@0 664 /* bp->tx_empty, bp->tx_fill)); */
ian@0 665 i = bp->tx_fill + 1;
ian@0 666 if (i >= N_TX_RING)
ian@0 667 i = 0;
ian@0 668 if (i == bp->tx_empty) {
ian@0 669 netif_stop_queue(dev);
ian@0 670 bp->tx_fullup = 1;
ian@0 671 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
ian@0 672 return -1; /* can't take it at the moment */
ian@0 673 }
ian@0 674
ian@0 675 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
ian@0 676
ian@0 677 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
ian@0 678
ian@0 679 bp->tx_bufs[bp->tx_fill] = skb;
ian@0 680 bp->tx_fill = i;
ian@0 681
ian@0 682 bp->stats.tx_bytes += skb->len;
ian@0 683
ian@0 684 dbdma_continue(td);
ian@0 685
ian@0 686 return 0;
ian@0 687 }
ian@0 688
ian@0 689 static int rxintcount;
ian@0 690
ian@0 691 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
ian@0 692 {
ian@0 693 struct net_device *dev = (struct net_device *) dev_id;
ian@0 694 struct bmac_data *bp = netdev_priv(dev);
ian@0 695 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 696 volatile struct dbdma_cmd *cp;
ian@0 697 int i, nb, stat;
ian@0 698 struct sk_buff *skb;
ian@0 699 unsigned int residual;
ian@0 700 int last;
ian@0 701 unsigned long flags;
ian@0 702
ian@0 703 spin_lock_irqsave(&bp->lock, flags);
ian@0 704
ian@0 705 if (++rxintcount < 10) {
ian@0 706 XXDEBUG(("bmac_rxdma_intr\n"));
ian@0 707 }
ian@0 708
ian@0 709 last = -1;
ian@0 710 i = bp->rx_empty;
ian@0 711
ian@0 712 while (1) {
ian@0 713 cp = &bp->rx_cmds[i];
ian@0 714 stat = ld_le16(&cp->xfer_status);
ian@0 715 residual = ld_le16(&cp->res_count);
ian@0 716 if ((stat & ACTIVE) == 0)
ian@0 717 break;
ian@0 718 nb = RX_BUFLEN - residual - 2;
ian@0 719 if (nb < (ETHERMINPACKET - ETHERCRC)) {
ian@0 720 skb = NULL;
ian@0 721 bp->stats.rx_length_errors++;
ian@0 722 bp->stats.rx_errors++;
ian@0 723 } else {
ian@0 724 skb = bp->rx_bufs[i];
ian@0 725 bp->rx_bufs[i] = NULL;
ian@0 726 }
ian@0 727 if (skb != NULL) {
ian@0 728 nb -= ETHERCRC;
ian@0 729 skb_put(skb, nb);
ian@0 730 skb->dev = dev;
ian@0 731 skb->protocol = eth_type_trans(skb, dev);
ian@0 732 netif_rx(skb);
ian@0 733 dev->last_rx = jiffies;
ian@0 734 ++bp->stats.rx_packets;
ian@0 735 bp->stats.rx_bytes += nb;
ian@0 736 } else {
ian@0 737 ++bp->stats.rx_dropped;
ian@0 738 }
ian@0 739 dev->last_rx = jiffies;
ian@0 740 if ((skb = bp->rx_bufs[i]) == NULL) {
ian@0 741 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
ian@0 742 if (skb != NULL)
ian@0 743 skb_reserve(bp->rx_bufs[i], 2);
ian@0 744 }
ian@0 745 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
ian@0 746 st_le16(&cp->res_count, 0);
ian@0 747 st_le16(&cp->xfer_status, 0);
ian@0 748 last = i;
ian@0 749 if (++i >= N_RX_RING) i = 0;
ian@0 750 }
ian@0 751
ian@0 752 if (last != -1) {
ian@0 753 bp->rx_fill = last;
ian@0 754 bp->rx_empty = i;
ian@0 755 }
ian@0 756
ian@0 757 dbdma_continue(rd);
ian@0 758 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 759
ian@0 760 if (rxintcount < 10) {
ian@0 761 XXDEBUG(("bmac_rxdma_intr done\n"));
ian@0 762 }
ian@0 763 return IRQ_HANDLED;
ian@0 764 }
ian@0 765
ian@0 766 static int txintcount;
ian@0 767
ian@0 768 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
ian@0 769 {
ian@0 770 struct net_device *dev = (struct net_device *) dev_id;
ian@0 771 struct bmac_data *bp = netdev_priv(dev);
ian@0 772 volatile struct dbdma_cmd *cp;
ian@0 773 int stat;
ian@0 774 unsigned long flags;
ian@0 775
ian@0 776 spin_lock_irqsave(&bp->lock, flags);
ian@0 777
ian@0 778 if (txintcount++ < 10) {
ian@0 779 XXDEBUG(("bmac_txdma_intr\n"));
ian@0 780 }
ian@0 781
ian@0 782 /* del_timer(&bp->tx_timeout); */
ian@0 783 /* bp->timeout_active = 0; */
ian@0 784
ian@0 785 while (1) {
ian@0 786 cp = &bp->tx_cmds[bp->tx_empty];
ian@0 787 stat = ld_le16(&cp->xfer_status);
ian@0 788 if (txintcount < 10) {
ian@0 789 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
ian@0 790 }
ian@0 791 if (!(stat & ACTIVE)) {
ian@0 792 /*
ian@0 793 * status field might not have been filled by DBDMA
ian@0 794 */
ian@0 795 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
ian@0 796 break;
ian@0 797 }
ian@0 798
ian@0 799 if (bp->tx_bufs[bp->tx_empty]) {
ian@0 800 ++bp->stats.tx_packets;
ian@0 801 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
ian@0 802 }
ian@0 803 bp->tx_bufs[bp->tx_empty] = NULL;
ian@0 804 bp->tx_fullup = 0;
ian@0 805 netif_wake_queue(dev);
ian@0 806 if (++bp->tx_empty >= N_TX_RING)
ian@0 807 bp->tx_empty = 0;
ian@0 808 if (bp->tx_empty == bp->tx_fill)
ian@0 809 break;
ian@0 810 }
ian@0 811
ian@0 812 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 813
ian@0 814 if (txintcount < 10) {
ian@0 815 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
ian@0 816 }
ian@0 817
ian@0 818 bmac_start(dev);
ian@0 819 return IRQ_HANDLED;
ian@0 820 }
ian@0 821
ian@0 822 static struct net_device_stats *bmac_stats(struct net_device *dev)
ian@0 823 {
ian@0 824 struct bmac_data *p = netdev_priv(dev);
ian@0 825
ian@0 826 return &p->stats;
ian@0 827 }
ian@0 828
ian@0 829 #ifndef SUNHME_MULTICAST
ian@0 830 /* Real fast bit-reversal algorithm, 6-bit values */
ian@0 831 static int reverse6[64] = {
ian@0 832 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
ian@0 833 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
ian@0 834 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
ian@0 835 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
ian@0 836 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
ian@0 837 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
ian@0 838 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
ian@0 839 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
ian@0 840 };
ian@0 841
ian@0 842 static unsigned int
ian@0 843 crc416(unsigned int curval, unsigned short nxtval)
ian@0 844 {
ian@0 845 register unsigned int counter, cur = curval, next = nxtval;
ian@0 846 register int high_crc_set, low_data_set;
ian@0 847
ian@0 848 /* Swap bytes */
ian@0 849 next = ((next & 0x00FF) << 8) | (next >> 8);
ian@0 850
ian@0 851 /* Compute bit-by-bit */
ian@0 852 for (counter = 0; counter < 16; ++counter) {
ian@0 853 /* is high CRC bit set? */
ian@0 854 if ((cur & 0x80000000) == 0) high_crc_set = 0;
ian@0 855 else high_crc_set = 1;
ian@0 856
ian@0 857 cur = cur << 1;
ian@0 858
ian@0 859 if ((next & 0x0001) == 0) low_data_set = 0;
ian@0 860 else low_data_set = 1;
ian@0 861
ian@0 862 next = next >> 1;
ian@0 863
ian@0 864 /* do the XOR */
ian@0 865 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
ian@0 866 }
ian@0 867 return cur;
ian@0 868 }
ian@0 869
ian@0 870 static unsigned int
ian@0 871 bmac_crc(unsigned short *address)
ian@0 872 {
ian@0 873 unsigned int newcrc;
ian@0 874
ian@0 875 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
ian@0 876 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
ian@0 877 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
ian@0 878 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
ian@0 879
ian@0 880 return(newcrc);
ian@0 881 }
ian@0 882
ian@0 883 /*
ian@0 884 * Add requested mcast addr to BMac's hash table filter.
ian@0 885 *
ian@0 886 */
ian@0 887
ian@0 888 static void
ian@0 889 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
ian@0 890 {
ian@0 891 unsigned int crc;
ian@0 892 unsigned short mask;
ian@0 893
ian@0 894 if (!(*addr)) return;
ian@0 895 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
ian@0 896 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
ian@0 897 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
ian@0 898 mask = crc % 16;
ian@0 899 mask = (unsigned char)1 << mask;
ian@0 900 bp->hash_use_count[crc/16] |= mask;
ian@0 901 }
ian@0 902
ian@0 903 static void
ian@0 904 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
ian@0 905 {
ian@0 906 unsigned int crc;
ian@0 907 unsigned char mask;
ian@0 908
ian@0 909 /* Now, delete the address from the filter copy, as indicated */
ian@0 910 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
ian@0 911 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
ian@0 912 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
ian@0 913 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
ian@0 914 mask = crc % 16;
ian@0 915 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
ian@0 916 bp->hash_table_mask[crc/16] &= mask;
ian@0 917 }
ian@0 918
ian@0 919 /*
ian@0 920 * Sync the adapter with the software copy of the multicast mask
ian@0 921 * (logical address filter).
ian@0 922 */
ian@0 923
ian@0 924 static void
ian@0 925 bmac_rx_off(struct net_device *dev)
ian@0 926 {
ian@0 927 unsigned short rx_cfg;
ian@0 928
ian@0 929 rx_cfg = bmread(dev, RXCFG);
ian@0 930 rx_cfg &= ~RxMACEnable;
ian@0 931 bmwrite(dev, RXCFG, rx_cfg);
ian@0 932 do {
ian@0 933 rx_cfg = bmread(dev, RXCFG);
ian@0 934 } while (rx_cfg & RxMACEnable);
ian@0 935 }
ian@0 936
ian@0 937 unsigned short
ian@0 938 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
ian@0 939 {
ian@0 940 unsigned short rx_cfg;
ian@0 941
ian@0 942 rx_cfg = bmread(dev, RXCFG);
ian@0 943 rx_cfg |= RxMACEnable;
ian@0 944 if (hash_enable) rx_cfg |= RxHashFilterEnable;
ian@0 945 else rx_cfg &= ~RxHashFilterEnable;
ian@0 946 if (promisc_enable) rx_cfg |= RxPromiscEnable;
ian@0 947 else rx_cfg &= ~RxPromiscEnable;
ian@0 948 bmwrite(dev, RXRST, RxResetValue);
ian@0 949 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
ian@0 950 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
ian@0 951 bmwrite(dev, RXCFG, rx_cfg );
ian@0 952 return rx_cfg;
ian@0 953 }
ian@0 954
ian@0 955 static void
ian@0 956 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
ian@0 957 {
ian@0 958 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
ian@0 959 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
ian@0 960 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
ian@0 961 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
ian@0 962 }
ian@0 963
ian@0 964 #if 0
ian@0 965 static void
ian@0 966 bmac_add_multi(struct net_device *dev,
ian@0 967 struct bmac_data *bp, unsigned char *addr)
ian@0 968 {
ian@0 969 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
ian@0 970 bmac_addhash(bp, addr);
ian@0 971 bmac_rx_off(dev);
ian@0 972 bmac_update_hash_table_mask(dev, bp);
ian@0 973 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
ian@0 974 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
ian@0 975 }
ian@0 976
ian@0 977 static void
ian@0 978 bmac_remove_multi(struct net_device *dev,
ian@0 979 struct bmac_data *bp, unsigned char *addr)
ian@0 980 {
ian@0 981 bmac_removehash(bp, addr);
ian@0 982 bmac_rx_off(dev);
ian@0 983 bmac_update_hash_table_mask(dev, bp);
ian@0 984 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
ian@0 985 }
ian@0 986 #endif
ian@0 987
ian@0 988 /* Set or clear the multicast filter for this adaptor.
ian@0 989 num_addrs == -1 Promiscuous mode, receive all packets
ian@0 990 num_addrs == 0 Normal mode, clear multicast list
ian@0 991 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
ian@0 992 best-effort filtering.
ian@0 993 */
ian@0 994 static void bmac_set_multicast(struct net_device *dev)
ian@0 995 {
ian@0 996 struct dev_mc_list *dmi;
ian@0 997 struct bmac_data *bp = netdev_priv(dev);
ian@0 998 int num_addrs = dev->mc_count;
ian@0 999 unsigned short rx_cfg;
ian@0 1000 int i;
ian@0 1001
ian@0 1002 if (bp->sleeping)
ian@0 1003 return;
ian@0 1004
ian@0 1005 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
ian@0 1006
ian@0 1007 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
ian@0 1008 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
ian@0 1009 bmac_update_hash_table_mask(dev, bp);
ian@0 1010 rx_cfg = bmac_rx_on(dev, 1, 0);
ian@0 1011 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
ian@0 1012 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
ian@0 1013 rx_cfg = bmread(dev, RXCFG);
ian@0 1014 rx_cfg |= RxPromiscEnable;
ian@0 1015 bmwrite(dev, RXCFG, rx_cfg);
ian@0 1016 rx_cfg = bmac_rx_on(dev, 0, 1);
ian@0 1017 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
ian@0 1018 } else {
ian@0 1019 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
ian@0 1020 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
ian@0 1021 if (num_addrs == 0) {
ian@0 1022 rx_cfg = bmac_rx_on(dev, 0, 0);
ian@0 1023 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
ian@0 1024 } else {
ian@0 1025 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
ian@0 1026 bmac_addhash(bp, dmi->dmi_addr);
ian@0 1027 bmac_update_hash_table_mask(dev, bp);
ian@0 1028 rx_cfg = bmac_rx_on(dev, 1, 0);
ian@0 1029 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
ian@0 1030 }
ian@0 1031 }
ian@0 1032 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
ian@0 1033 }
ian@0 1034 #else /* ifdef SUNHME_MULTICAST */
ian@0 1035
ian@0 1036 /* The version of set_multicast below was lifted from sunhme.c */
ian@0 1037
ian@0 1038 static void bmac_set_multicast(struct net_device *dev)
ian@0 1039 {
ian@0 1040 struct dev_mc_list *dmi = dev->mc_list;
ian@0 1041 char *addrs;
ian@0 1042 int i;
ian@0 1043 unsigned short rx_cfg;
ian@0 1044 u32 crc;
ian@0 1045
ian@0 1046 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
ian@0 1047 bmwrite(dev, BHASH0, 0xffff);
ian@0 1048 bmwrite(dev, BHASH1, 0xffff);
ian@0 1049 bmwrite(dev, BHASH2, 0xffff);
ian@0 1050 bmwrite(dev, BHASH3, 0xffff);
ian@0 1051 } else if(dev->flags & IFF_PROMISC) {
ian@0 1052 rx_cfg = bmread(dev, RXCFG);
ian@0 1053 rx_cfg |= RxPromiscEnable;
ian@0 1054 bmwrite(dev, RXCFG, rx_cfg);
ian@0 1055 } else {
ian@0 1056 u16 hash_table[4];
ian@0 1057
ian@0 1058 rx_cfg = bmread(dev, RXCFG);
ian@0 1059 rx_cfg &= ~RxPromiscEnable;
ian@0 1060 bmwrite(dev, RXCFG, rx_cfg);
ian@0 1061
ian@0 1062 for(i = 0; i < 4; i++) hash_table[i] = 0;
ian@0 1063
ian@0 1064 for(i = 0; i < dev->mc_count; i++) {
ian@0 1065 addrs = dmi->dmi_addr;
ian@0 1066 dmi = dmi->next;
ian@0 1067
ian@0 1068 if(!(*addrs & 1))
ian@0 1069 continue;
ian@0 1070
ian@0 1071 crc = ether_crc_le(6, addrs);
ian@0 1072 crc >>= 26;
ian@0 1073 hash_table[crc >> 4] |= 1 << (crc & 0xf);
ian@0 1074 }
ian@0 1075 bmwrite(dev, BHASH0, hash_table[0]);
ian@0 1076 bmwrite(dev, BHASH1, hash_table[1]);
ian@0 1077 bmwrite(dev, BHASH2, hash_table[2]);
ian@0 1078 bmwrite(dev, BHASH3, hash_table[3]);
ian@0 1079 }
ian@0 1080 }
ian@0 1081 #endif /* SUNHME_MULTICAST */
ian@0 1082
ian@0 1083 static int miscintcount;
ian@0 1084
ian@0 1085 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
ian@0 1086 {
ian@0 1087 struct net_device *dev = (struct net_device *) dev_id;
ian@0 1088 struct bmac_data *bp = netdev_priv(dev);
ian@0 1089 unsigned int status = bmread(dev, STATUS);
ian@0 1090 if (miscintcount++ < 10) {
ian@0 1091 XXDEBUG(("bmac_misc_intr\n"));
ian@0 1092 }
ian@0 1093 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
ian@0 1094 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
ian@0 1095 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
ian@0 1096 if (status & RxErrorMask) bp->stats.rx_errors++;
ian@0 1097 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
ian@0 1098 if (status & RxLenCntExp) bp->stats.rx_length_errors++;
ian@0 1099 if (status & RxOverFlow) bp->stats.rx_over_errors++;
ian@0 1100 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
ian@0 1101
ian@0 1102 /* if (status & FrameSent) bp->stats.tx_dropped++; */
ian@0 1103 if (status & TxErrorMask) bp->stats.tx_errors++;
ian@0 1104 if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
ian@0 1105 if (status & TxNormalCollExp) bp->stats.collisions++;
ian@0 1106 return IRQ_HANDLED;
ian@0 1107 }
ian@0 1108
ian@0 1109 /*
ian@0 1110 * Procedure for reading EEPROM
ian@0 1111 */
ian@0 1112 #define SROMAddressLength 5
ian@0 1113 #define DataInOn 0x0008
ian@0 1114 #define DataInOff 0x0000
ian@0 1115 #define Clk 0x0002
ian@0 1116 #define ChipSelect 0x0001
ian@0 1117 #define SDIShiftCount 3
ian@0 1118 #define SD0ShiftCount 2
ian@0 1119 #define DelayValue 1000 /* number of microseconds */
ian@0 1120 #define SROMStartOffset 10 /* this is in words */
ian@0 1121 #define SROMReadCount 3 /* number of words to read from SROM */
ian@0 1122 #define SROMAddressBits 6
ian@0 1123 #define EnetAddressOffset 20
ian@0 1124
ian@0 1125 static unsigned char
ian@0 1126 bmac_clock_out_bit(struct net_device *dev)
ian@0 1127 {
ian@0 1128 unsigned short data;
ian@0 1129 unsigned short val;
ian@0 1130
ian@0 1131 bmwrite(dev, SROMCSR, ChipSelect | Clk);
ian@0 1132 udelay(DelayValue);
ian@0 1133
ian@0 1134 data = bmread(dev, SROMCSR);
ian@0 1135 udelay(DelayValue);
ian@0 1136 val = (data >> SD0ShiftCount) & 1;
ian@0 1137
ian@0 1138 bmwrite(dev, SROMCSR, ChipSelect);
ian@0 1139 udelay(DelayValue);
ian@0 1140
ian@0 1141 return val;
ian@0 1142 }
ian@0 1143
ian@0 1144 static void
ian@0 1145 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
ian@0 1146 {
ian@0 1147 unsigned short data;
ian@0 1148
ian@0 1149 if (val != 0 && val != 1) return;
ian@0 1150
ian@0 1151 data = (val << SDIShiftCount);
ian@0 1152 bmwrite(dev, SROMCSR, data | ChipSelect );
ian@0 1153 udelay(DelayValue);
ian@0 1154
ian@0 1155 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
ian@0 1156 udelay(DelayValue);
ian@0 1157
ian@0 1158 bmwrite(dev, SROMCSR, data | ChipSelect);
ian@0 1159 udelay(DelayValue);
ian@0 1160 }
ian@0 1161
ian@0 1162 static void
ian@0 1163 reset_and_select_srom(struct net_device *dev)
ian@0 1164 {
ian@0 1165 /* first reset */
ian@0 1166 bmwrite(dev, SROMCSR, 0);
ian@0 1167 udelay(DelayValue);
ian@0 1168
ian@0 1169 /* send it the read command (110) */
ian@0 1170 bmac_clock_in_bit(dev, 1);
ian@0 1171 bmac_clock_in_bit(dev, 1);
ian@0 1172 bmac_clock_in_bit(dev, 0);
ian@0 1173 }
ian@0 1174
ian@0 1175 static unsigned short
ian@0 1176 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
ian@0 1177 {
ian@0 1178 unsigned short data, val;
ian@0 1179 int i;
ian@0 1180
ian@0 1181 /* send out the address we want to read from */
ian@0 1182 for (i = 0; i < addr_len; i++) {
ian@0 1183 val = addr >> (addr_len-i-1);
ian@0 1184 bmac_clock_in_bit(dev, val & 1);
ian@0 1185 }
ian@0 1186
ian@0 1187 /* Now read in the 16-bit data */
ian@0 1188 data = 0;
ian@0 1189 for (i = 0; i < 16; i++) {
ian@0 1190 val = bmac_clock_out_bit(dev);
ian@0 1191 data <<= 1;
ian@0 1192 data |= val;
ian@0 1193 }
ian@0 1194 bmwrite(dev, SROMCSR, 0);
ian@0 1195
ian@0 1196 return data;
ian@0 1197 }
ian@0 1198
ian@0 1199 /*
ian@0 1200 * It looks like Cogent and SMC use different methods for calculating
ian@0 1201 * checksums. What a pain..
ian@0 1202 */
ian@0 1203
ian@0 1204 static int
ian@0 1205 bmac_verify_checksum(struct net_device *dev)
ian@0 1206 {
ian@0 1207 unsigned short data, storedCS;
ian@0 1208
ian@0 1209 reset_and_select_srom(dev);
ian@0 1210 data = read_srom(dev, 3, SROMAddressBits);
ian@0 1211 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
ian@0 1212
ian@0 1213 return 0;
ian@0 1214 }
ian@0 1215
ian@0 1216
ian@0 1217 static void
ian@0 1218 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
ian@0 1219 {
ian@0 1220 int i;
ian@0 1221 unsigned short data;
ian@0 1222
ian@0 1223 for (i = 0; i < 6; i++)
ian@0 1224 {
ian@0 1225 reset_and_select_srom(dev);
ian@0 1226 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
ian@0 1227 ea[2*i] = bitrev(data & 0x0ff);
ian@0 1228 ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
ian@0 1229 }
ian@0 1230 }
ian@0 1231
ian@0 1232 static void bmac_reset_and_enable(struct net_device *dev)
ian@0 1233 {
ian@0 1234 struct bmac_data *bp = netdev_priv(dev);
ian@0 1235 unsigned long flags;
ian@0 1236 struct sk_buff *skb;
ian@0 1237 unsigned char *data;
ian@0 1238
ian@0 1239 spin_lock_irqsave(&bp->lock, flags);
ian@0 1240 bmac_enable_and_reset_chip(dev);
ian@0 1241 bmac_init_tx_ring(bp);
ian@0 1242 bmac_init_rx_ring(bp);
ian@0 1243 bmac_init_chip(dev);
ian@0 1244 bmac_start_chip(dev);
ian@0 1245 bmwrite(dev, INTDISABLE, EnableNormal);
ian@0 1246 bp->sleeping = 0;
ian@0 1247
ian@0 1248 /*
ian@0 1249 * It seems that the bmac can't receive until it's transmitted
ian@0 1250 * a packet. So we give it a dummy packet to transmit.
ian@0 1251 */
ian@0 1252 skb = dev_alloc_skb(ETHERMINPACKET);
ian@0 1253 if (skb != NULL) {
ian@0 1254 data = skb_put(skb, ETHERMINPACKET);
ian@0 1255 memset(data, 0, ETHERMINPACKET);
ian@0 1256 memcpy(data, dev->dev_addr, 6);
ian@0 1257 memcpy(data+6, dev->dev_addr, 6);
ian@0 1258 bmac_transmit_packet(skb, dev);
ian@0 1259 }
ian@0 1260 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 1261 }
ian@0 1262
ian@0 1263 static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
ian@0 1264 {
ian@0 1265 int j, rev, ret;
ian@0 1266 struct bmac_data *bp;
ian@0 1267 unsigned char *addr;
ian@0 1268 struct net_device *dev;
ian@0 1269 int is_bmac_plus = ((int)match->data) != 0;
ian@0 1270
ian@0 1271 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
ian@0 1272 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
ian@0 1273 return -ENODEV;
ian@0 1274 }
ian@0 1275 addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
ian@0 1276 if (addr == NULL) {
ian@0 1277 addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL);
ian@0 1278 if (addr == NULL) {
ian@0 1279 printk(KERN_ERR "BMAC: Can't get mac-address\n");
ian@0 1280 return -ENODEV;
ian@0 1281 }
ian@0 1282 }
ian@0 1283
ian@0 1284 dev = alloc_etherdev(PRIV_BYTES);
ian@0 1285 if (!dev) {
ian@0 1286 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
ian@0 1287 return -ENOMEM;
ian@0 1288 }
ian@0 1289
ian@0 1290 bp = netdev_priv(dev);
ian@0 1291 SET_MODULE_OWNER(dev);
ian@0 1292 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
ian@0 1293 macio_set_drvdata(mdev, dev);
ian@0 1294
ian@0 1295 bp->mdev = mdev;
ian@0 1296 spin_lock_init(&bp->lock);
ian@0 1297
ian@0 1298 if (macio_request_resources(mdev, "bmac")) {
ian@0 1299 printk(KERN_ERR "BMAC: can't request IO resource !\n");
ian@0 1300 goto out_free;
ian@0 1301 }
ian@0 1302
ian@0 1303 dev->base_addr = (unsigned long)
ian@0 1304 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
ian@0 1305 if (dev->base_addr == 0)
ian@0 1306 goto out_release;
ian@0 1307
ian@0 1308 dev->irq = macio_irq(mdev, 0);
ian@0 1309
ian@0 1310 bmac_enable_and_reset_chip(dev);
ian@0 1311 bmwrite(dev, INTDISABLE, DisableAll);
ian@0 1312
ian@0 1313 rev = addr[0] == 0 && addr[1] == 0xA0;
ian@0 1314 for (j = 0; j < 6; ++j)
ian@0 1315 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
ian@0 1316
ian@0 1317 /* Enable chip without interrupts for now */
ian@0 1318 bmac_enable_and_reset_chip(dev);
ian@0 1319 bmwrite(dev, INTDISABLE, DisableAll);
ian@0 1320
ian@0 1321 dev->open = bmac_open;
ian@0 1322 dev->stop = bmac_close;
ian@0 1323 dev->hard_start_xmit = bmac_output;
ian@0 1324 dev->get_stats = bmac_stats;
ian@0 1325 dev->set_multicast_list = bmac_set_multicast;
ian@0 1326 dev->set_mac_address = bmac_set_address;
ian@0 1327
ian@0 1328 bmac_get_station_address(dev, addr);
ian@0 1329 if (bmac_verify_checksum(dev) != 0)
ian@0 1330 goto err_out_iounmap;
ian@0 1331
ian@0 1332 bp->is_bmac_plus = is_bmac_plus;
ian@0 1333 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
ian@0 1334 if (!bp->tx_dma)
ian@0 1335 goto err_out_iounmap;
ian@0 1336 bp->tx_dma_intr = macio_irq(mdev, 1);
ian@0 1337 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
ian@0 1338 if (!bp->rx_dma)
ian@0 1339 goto err_out_iounmap_tx;
ian@0 1340 bp->rx_dma_intr = macio_irq(mdev, 2);
ian@0 1341
ian@0 1342 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
ian@0 1343 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
ian@0 1344
ian@0 1345 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
ian@0 1346 skb_queue_head_init(bp->queue);
ian@0 1347
ian@0 1348 init_timer(&bp->tx_timeout);
ian@0 1349
ian@0 1350 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
ian@0 1351 if (ret) {
ian@0 1352 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
ian@0 1353 goto err_out_iounmap_rx;
ian@0 1354 }
ian@0 1355 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
ian@0 1356 if (ret) {
ian@0 1357 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
ian@0 1358 goto err_out_irq0;
ian@0 1359 }
ian@0 1360 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
ian@0 1361 if (ret) {
ian@0 1362 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
ian@0 1363 goto err_out_irq1;
ian@0 1364 }
ian@0 1365
ian@0 1366 /* Mask chip interrupts and disable chip, will be
ian@0 1367 * re-enabled on open()
ian@0 1368 */
ian@0 1369 disable_irq(dev->irq);
ian@0 1370 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
ian@0 1371
ian@0 1372 if (register_netdev(dev) != 0) {
ian@0 1373 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
ian@0 1374 goto err_out_irq2;
ian@0 1375 }
ian@0 1376
ian@0 1377 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
ian@0 1378 for (j = 0; j < 6; ++j)
ian@0 1379 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
ian@0 1380 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
ian@0 1381 printk("\n");
ian@0 1382
ian@0 1383 return 0;
ian@0 1384
ian@0 1385 err_out_irq2:
ian@0 1386 free_irq(bp->rx_dma_intr, dev);
ian@0 1387 err_out_irq1:
ian@0 1388 free_irq(bp->tx_dma_intr, dev);
ian@0 1389 err_out_irq0:
ian@0 1390 free_irq(dev->irq, dev);
ian@0 1391 err_out_iounmap_rx:
ian@0 1392 iounmap(bp->rx_dma);
ian@0 1393 err_out_iounmap_tx:
ian@0 1394 iounmap(bp->tx_dma);
ian@0 1395 err_out_iounmap:
ian@0 1396 iounmap((void __iomem *)dev->base_addr);
ian@0 1397 out_release:
ian@0 1398 macio_release_resources(mdev);
ian@0 1399 out_free:
ian@0 1400 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
ian@0 1401 free_netdev(dev);
ian@0 1402
ian@0 1403 return -ENODEV;
ian@0 1404 }
ian@0 1405
ian@0 1406 static int bmac_open(struct net_device *dev)
ian@0 1407 {
ian@0 1408 struct bmac_data *bp = netdev_priv(dev);
ian@0 1409 /* XXDEBUG(("bmac: enter open\n")); */
ian@0 1410 /* reset the chip */
ian@0 1411 bp->opened = 1;
ian@0 1412 bmac_reset_and_enable(dev);
ian@0 1413 enable_irq(dev->irq);
ian@0 1414 return 0;
ian@0 1415 }
ian@0 1416
ian@0 1417 static int bmac_close(struct net_device *dev)
ian@0 1418 {
ian@0 1419 struct bmac_data *bp = netdev_priv(dev);
ian@0 1420 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 1421 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 1422 unsigned short config;
ian@0 1423 int i;
ian@0 1424
ian@0 1425 bp->sleeping = 1;
ian@0 1426
ian@0 1427 /* disable rx and tx */
ian@0 1428 config = bmread(dev, RXCFG);
ian@0 1429 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
ian@0 1430
ian@0 1431 config = bmread(dev, TXCFG);
ian@0 1432 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
ian@0 1433
ian@0 1434 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
ian@0 1435
ian@0 1436 /* disable rx and tx dma */
ian@0 1437 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
ian@0 1438 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
ian@0 1439
ian@0 1440 /* free some skb's */
ian@0 1441 XXDEBUG(("bmac: free rx bufs\n"));
ian@0 1442 for (i=0; i<N_RX_RING; i++) {
ian@0 1443 if (bp->rx_bufs[i] != NULL) {
ian@0 1444 dev_kfree_skb(bp->rx_bufs[i]);
ian@0 1445 bp->rx_bufs[i] = NULL;
ian@0 1446 }
ian@0 1447 }
ian@0 1448 XXDEBUG(("bmac: free tx bufs\n"));
ian@0 1449 for (i = 0; i<N_TX_RING; i++) {
ian@0 1450 if (bp->tx_bufs[i] != NULL) {
ian@0 1451 dev_kfree_skb(bp->tx_bufs[i]);
ian@0 1452 bp->tx_bufs[i] = NULL;
ian@0 1453 }
ian@0 1454 }
ian@0 1455 XXDEBUG(("bmac: all bufs freed\n"));
ian@0 1456
ian@0 1457 bp->opened = 0;
ian@0 1458 disable_irq(dev->irq);
ian@0 1459 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
ian@0 1460
ian@0 1461 return 0;
ian@0 1462 }
ian@0 1463
ian@0 1464 static void
ian@0 1465 bmac_start(struct net_device *dev)
ian@0 1466 {
ian@0 1467 struct bmac_data *bp = netdev_priv(dev);
ian@0 1468 int i;
ian@0 1469 struct sk_buff *skb;
ian@0 1470 unsigned long flags;
ian@0 1471
ian@0 1472 if (bp->sleeping)
ian@0 1473 return;
ian@0 1474
ian@0 1475 spin_lock_irqsave(&bp->lock, flags);
ian@0 1476 while (1) {
ian@0 1477 i = bp->tx_fill + 1;
ian@0 1478 if (i >= N_TX_RING)
ian@0 1479 i = 0;
ian@0 1480 if (i == bp->tx_empty)
ian@0 1481 break;
ian@0 1482 skb = skb_dequeue(bp->queue);
ian@0 1483 if (skb == NULL)
ian@0 1484 break;
ian@0 1485 bmac_transmit_packet(skb, dev);
ian@0 1486 }
ian@0 1487 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 1488 }
ian@0 1489
ian@0 1490 static int
ian@0 1491 bmac_output(struct sk_buff *skb, struct net_device *dev)
ian@0 1492 {
ian@0 1493 struct bmac_data *bp = netdev_priv(dev);
ian@0 1494 skb_queue_tail(bp->queue, skb);
ian@0 1495 bmac_start(dev);
ian@0 1496 return 0;
ian@0 1497 }
ian@0 1498
ian@0 1499 static void bmac_tx_timeout(unsigned long data)
ian@0 1500 {
ian@0 1501 struct net_device *dev = (struct net_device *) data;
ian@0 1502 struct bmac_data *bp = netdev_priv(dev);
ian@0 1503 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
ian@0 1504 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
ian@0 1505 volatile struct dbdma_cmd *cp;
ian@0 1506 unsigned long flags;
ian@0 1507 unsigned short config, oldConfig;
ian@0 1508 int i;
ian@0 1509
ian@0 1510 XXDEBUG(("bmac: tx_timeout called\n"));
ian@0 1511 spin_lock_irqsave(&bp->lock, flags);
ian@0 1512 bp->timeout_active = 0;
ian@0 1513
ian@0 1514 /* update various counters */
ian@0 1515 /* bmac_handle_misc_intrs(bp, 0); */
ian@0 1516
ian@0 1517 cp = &bp->tx_cmds[bp->tx_empty];
ian@0 1518 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
ian@0 1519 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
ian@0 1520 /* mb->pr, mb->xmtfs, mb->fifofc)); */
ian@0 1521
ian@0 1522 /* turn off both tx and rx and reset the chip */
ian@0 1523 config = bmread(dev, RXCFG);
ian@0 1524 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
ian@0 1525 config = bmread(dev, TXCFG);
ian@0 1526 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
ian@0 1527 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
ian@0 1528 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
ian@0 1529 bmac_enable_and_reset_chip(dev);
ian@0 1530
ian@0 1531 /* restart rx dma */
ian@0 1532 cp = bus_to_virt(ld_le32(&rd->cmdptr));
ian@0 1533 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
ian@0 1534 out_le16(&cp->xfer_status, 0);
ian@0 1535 out_le32(&rd->cmdptr, virt_to_bus(cp));
ian@0 1536 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
ian@0 1537
ian@0 1538 /* fix up the transmit side */
ian@0 1539 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
ian@0 1540 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
ian@0 1541 i = bp->tx_empty;
ian@0 1542 ++bp->stats.tx_errors;
ian@0 1543 if (i != bp->tx_fill) {
ian@0 1544 dev_kfree_skb(bp->tx_bufs[i]);
ian@0 1545 bp->tx_bufs[i] = NULL;
ian@0 1546 if (++i >= N_TX_RING) i = 0;
ian@0 1547 bp->tx_empty = i;
ian@0 1548 }
ian@0 1549 bp->tx_fullup = 0;
ian@0 1550 netif_wake_queue(dev);
ian@0 1551 if (i != bp->tx_fill) {
ian@0 1552 cp = &bp->tx_cmds[i];
ian@0 1553 out_le16(&cp->xfer_status, 0);
ian@0 1554 out_le16(&cp->command, OUTPUT_LAST);
ian@0 1555 out_le32(&td->cmdptr, virt_to_bus(cp));
ian@0 1556 out_le32(&td->control, DBDMA_SET(RUN));
ian@0 1557 /* bmac_set_timeout(dev); */
ian@0 1558 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
ian@0 1559 }
ian@0 1560
ian@0 1561 /* turn it back on */
ian@0 1562 oldConfig = bmread(dev, RXCFG);
ian@0 1563 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
ian@0 1564 oldConfig = bmread(dev, TXCFG);
ian@0 1565 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
ian@0 1566
ian@0 1567 spin_unlock_irqrestore(&bp->lock, flags);
ian@0 1568 }
ian@0 1569
ian@0 1570 #if 0
ian@0 1571 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
ian@0 1572 {
ian@0 1573 int i,*ip;
ian@0 1574
ian@0 1575 for (i=0;i< count;i++) {
ian@0 1576 ip = (int*)(cp+i);
ian@0 1577
ian@0 1578 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
ian@0 1579 ld_le32(ip+0),
ian@0 1580 ld_le32(ip+1),
ian@0 1581 ld_le32(ip+2),
ian@0 1582 ld_le32(ip+3));
ian@0 1583 }
ian@0 1584
ian@0 1585 }
ian@0 1586 #endif
ian@0 1587
ian@0 1588 #if 0
ian@0 1589 static int
ian@0 1590 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
ian@0 1591 {
ian@0 1592 int len = 0;
ian@0 1593 off_t pos = 0;
ian@0 1594 off_t begin = 0;
ian@0 1595 int i;
ian@0 1596
ian@0 1597 if (bmac_devs == NULL)
ian@0 1598 return (-ENOSYS);
ian@0 1599
ian@0 1600 len += sprintf(buffer, "BMAC counters & registers\n");
ian@0 1601
ian@0 1602 for (i = 0; i<N_REG_ENTRIES; i++) {
ian@0 1603 len += sprintf(buffer + len, "%s: %#08x\n",
ian@0 1604 reg_entries[i].name,
ian@0 1605 bmread(bmac_devs, reg_entries[i].reg_offset));
ian@0 1606 pos = begin + len;
ian@0 1607
ian@0 1608 if (pos < offset) {
ian@0 1609 len = 0;
ian@0 1610 begin = pos;
ian@0 1611 }
ian@0 1612
ian@0 1613 if (pos > offset+length) break;
ian@0 1614 }
ian@0 1615
ian@0 1616 *start = buffer + (offset - begin);
ian@0 1617 len -= (offset - begin);
ian@0 1618
ian@0 1619 if (len > length) len = length;
ian@0 1620
ian@0 1621 return len;
ian@0 1622 }
ian@0 1623 #endif
ian@0 1624
ian@0 1625 static int __devexit bmac_remove(struct macio_dev *mdev)
ian@0 1626 {
ian@0 1627 struct net_device *dev = macio_get_drvdata(mdev);
ian@0 1628 struct bmac_data *bp = netdev_priv(dev);
ian@0 1629
ian@0 1630 unregister_netdev(dev);
ian@0 1631
ian@0 1632 free_irq(dev->irq, dev);
ian@0 1633 free_irq(bp->tx_dma_intr, dev);
ian@0 1634 free_irq(bp->rx_dma_intr, dev);
ian@0 1635
ian@0 1636 iounmap((void __iomem *)dev->base_addr);
ian@0 1637 iounmap(bp->tx_dma);
ian@0 1638 iounmap(bp->rx_dma);
ian@0 1639
ian@0 1640 macio_release_resources(mdev);
ian@0 1641
ian@0 1642 free_netdev(dev);
ian@0 1643
ian@0 1644 return 0;
ian@0 1645 }
ian@0 1646
ian@0 1647 static struct of_device_id bmac_match[] =
ian@0 1648 {
ian@0 1649 {
ian@0 1650 .name = "bmac",
ian@0 1651 .data = (void *)0,
ian@0 1652 },
ian@0 1653 {
ian@0 1654 .type = "network",
ian@0 1655 .compatible = "bmac+",
ian@0 1656 .data = (void *)1,
ian@0 1657 },
ian@0 1658 {},
ian@0 1659 };
ian@0 1660 MODULE_DEVICE_TABLE (of, bmac_match);
ian@0 1661
ian@0 1662 static struct macio_driver bmac_driver =
ian@0 1663 {
ian@0 1664 .name = "bmac",
ian@0 1665 .match_table = bmac_match,
ian@0 1666 .probe = bmac_probe,
ian@0 1667 .remove = bmac_remove,
ian@0 1668 #ifdef CONFIG_PM
ian@0 1669 .suspend = bmac_suspend,
ian@0 1670 .resume = bmac_resume,
ian@0 1671 #endif
ian@0 1672 };
ian@0 1673
ian@0 1674
ian@0 1675 static int __init bmac_init(void)
ian@0 1676 {
ian@0 1677 if (bmac_emergency_rxbuf == NULL) {
ian@0 1678 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
ian@0 1679 if (bmac_emergency_rxbuf == NULL) {
ian@0 1680 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
ian@0 1681 return -ENOMEM;
ian@0 1682 }
ian@0 1683 }
ian@0 1684
ian@0 1685 return macio_register_driver(&bmac_driver);
ian@0 1686 }
ian@0 1687
ian@0 1688 static void __exit bmac_exit(void)
ian@0 1689 {
ian@0 1690 macio_unregister_driver(&bmac_driver);
ian@0 1691
ian@0 1692 kfree(bmac_emergency_rxbuf);
ian@0 1693 bmac_emergency_rxbuf = NULL;
ian@0 1694 }
ian@0 1695
ian@0 1696 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
ian@0 1697 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
ian@0 1698 MODULE_LICENSE("GPL");
ian@0 1699
ian@0 1700 module_init(bmac_init);
ian@0 1701 module_exit(bmac_exit);