ia64/linux-2.6.18-xen.hg

view drivers/net/sb1250-mac.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 *
19 * This driver is designed for the Broadcom SiByte SOC built-in
20 * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
21 */
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/timer.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/bitops.h>
35 #include <asm/processor.h> /* Processor type for cache alignment. */
36 #include <asm/io.h>
37 #include <asm/cache.h>
39 /* This is only here until the firmware is ready. In that case,
40 the firmware leaves the ethernet address in the register for us. */
41 #ifdef CONFIG_SIBYTE_STANDALONE
42 #define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
43 #define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
44 #define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
45 #define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
46 #endif
49 /* These identify the driver base version and may not be removed. */
50 #if 0
51 static char version1[] __devinitdata =
52 "sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
53 #endif
56 /* Operational parameters that usually are not changed. */
58 #define CONFIG_SBMAC_COALESCE
60 #define MAX_UNITS 4 /* More are supported, limit only on options */
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (2*HZ)
66 MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
67 MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
69 /* A few user-configurable values which may be modified when a driver
70 module is loaded. */
72 /* 1 normal messages, 0 quiet .. 7 verbose. */
73 static int debug = 1;
74 module_param(debug, int, S_IRUGO);
75 MODULE_PARM_DESC(debug, "Debug messages");
77 /* mii status msgs */
78 static int noisy_mii = 1;
79 module_param(noisy_mii, int, S_IRUGO);
80 MODULE_PARM_DESC(noisy_mii, "MII status messages");
82 /* Used to pass the media type, etc.
83 Both 'options[]' and 'full_duplex[]' should exist for driver
84 interoperability.
85 The media type is usually passed in 'options[]'.
86 */
87 #ifdef MODULE
88 static int options[MAX_UNITS] = {-1, -1, -1, -1};
89 module_param_array(options, int, NULL, S_IRUGO);
90 MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS));
92 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1};
93 module_param_array(full_duplex, int, NULL, S_IRUGO);
94 MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
95 #endif
97 #ifdef CONFIG_SBMAC_COALESCE
98 static int int_pktcnt = 0;
99 module_param(int_pktcnt, int, S_IRUGO);
100 MODULE_PARM_DESC(int_pktcnt, "Packet count");
102 static int int_timeout = 0;
103 module_param(int_timeout, int, S_IRUGO);
104 MODULE_PARM_DESC(int_timeout, "Timeout value");
105 #endif
107 #include <asm/sibyte/sb1250.h>
108 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
109 #include <asm/sibyte/bcm1480_regs.h>
110 #include <asm/sibyte/bcm1480_int.h>
111 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
112 #include <asm/sibyte/sb1250_regs.h>
113 #include <asm/sibyte/sb1250_int.h>
114 #else
115 #error invalid SiByte MAC configuation
116 #endif
117 #include <asm/sibyte/sb1250_scd.h>
118 #include <asm/sibyte/sb1250_mac.h>
119 #include <asm/sibyte/sb1250_dma.h>
121 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
122 #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
123 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
124 #define UNIT_INT(n) (K_INT_MAC_0 + (n))
125 #else
126 #error invalid SiByte MAC configuation
127 #endif
129 /**********************************************************************
130 * Simple types
131 ********************************************************************* */
134 typedef enum { sbmac_speed_auto, sbmac_speed_10,
135 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
137 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
138 sbmac_duplex_full } sbmac_duplex_t;
140 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
141 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
143 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
144 sbmac_state_broken } sbmac_state_t;
147 /**********************************************************************
148 * Macros
149 ********************************************************************* */
152 #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
153 (d)->sbdma_dscrtable : (d)->f+1)
156 #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
158 #define SBMAC_MAX_TXDESCR 32
159 #define SBMAC_MAX_RXDESCR 32
161 #define ETHER_ALIGN 2
162 #define ETHER_ADDR_LEN 6
163 #define ENET_PACKET_SIZE 1518
164 /*#define ENET_PACKET_SIZE 9216 */
166 /**********************************************************************
167 * DMA Descriptor structure
168 ********************************************************************* */
170 typedef struct sbdmadscr_s {
171 uint64_t dscr_a;
172 uint64_t dscr_b;
173 } sbdmadscr_t;
175 typedef unsigned long paddr_t;
177 /**********************************************************************
178 * DMA Controller structure
179 ********************************************************************* */
181 typedef struct sbmacdma_s {
183 /*
184 * This stuff is used to identify the channel and the registers
185 * associated with it.
186 */
188 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
189 int sbdma_channel; /* channel number */
190 int sbdma_txdir; /* direction (1=transmit) */
191 int sbdma_maxdescr; /* total # of descriptors in ring */
192 #ifdef CONFIG_SBMAC_COALESCE
193 int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/
194 int sbdma_int_timeout; /* # usec rx/tx interrupt */
195 #endif
197 volatile void __iomem *sbdma_config0; /* DMA config register 0 */
198 volatile void __iomem *sbdma_config1; /* DMA config register 1 */
199 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
200 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
201 volatile void __iomem *sbdma_curdscr; /* current descriptor address */
203 /*
204 * This stuff is for maintenance of the ring
205 */
207 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
208 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
210 struct sk_buff **sbdma_ctxtable; /* context table, one per descr */
212 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
213 sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */
214 sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */
215 } sbmacdma_t;
218 /**********************************************************************
219 * Ethernet softc structure
220 ********************************************************************* */
222 struct sbmac_softc {
224 /*
225 * Linux-specific things
226 */
228 struct net_device *sbm_dev; /* pointer to linux device */
229 spinlock_t sbm_lock; /* spin lock */
230 struct timer_list sbm_timer; /* for monitoring MII */
231 struct net_device_stats sbm_stats;
232 int sbm_devflags; /* current device flags */
234 int sbm_phy_oldbmsr;
235 int sbm_phy_oldanlpar;
236 int sbm_phy_oldk1stsr;
237 int sbm_phy_oldlinkstat;
238 int sbm_buffersize;
240 unsigned char sbm_phys[2];
242 /*
243 * Controller-specific things
244 */
246 volatile void __iomem *sbm_base; /* MAC's base address */
247 sbmac_state_t sbm_state; /* current state */
249 volatile void __iomem *sbm_macenable; /* MAC Enable Register */
250 volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */
251 volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */
252 volatile void __iomem *sbm_framecfg; /* Frame configuration register */
253 volatile void __iomem *sbm_rxfilter; /* receive filter register */
254 volatile void __iomem *sbm_isr; /* Interrupt status register */
255 volatile void __iomem *sbm_imr; /* Interrupt mask register */
256 volatile void __iomem *sbm_mdio; /* MDIO register */
258 sbmac_speed_t sbm_speed; /* current speed */
259 sbmac_duplex_t sbm_duplex; /* current duplex */
260 sbmac_fc_t sbm_fc; /* current flow control setting */
262 unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
264 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
265 sbmacdma_t sbm_rxdma;
266 int rx_hw_checksum;
267 int sbe_idx;
268 };
271 /**********************************************************************
272 * Externs
273 ********************************************************************* */
275 /**********************************************************************
276 * Prototypes
277 ********************************************************************* */
279 static void sbdma_initctx(sbmacdma_t *d,
280 struct sbmac_softc *s,
281 int chan,
282 int txrx,
283 int maxdescr);
284 static void sbdma_channel_start(sbmacdma_t *d, int rxtx);
285 static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
286 static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
287 static void sbdma_emptyring(sbmacdma_t *d);
288 static void sbdma_fillring(sbmacdma_t *d);
289 static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d);
290 static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d);
291 static int sbmac_initctx(struct sbmac_softc *s);
292 static void sbmac_channel_start(struct sbmac_softc *s);
293 static void sbmac_channel_stop(struct sbmac_softc *s);
294 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t);
295 static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff);
296 static uint64_t sbmac_addr2reg(unsigned char *ptr);
297 static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs);
298 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
299 static void sbmac_setmulti(struct sbmac_softc *sc);
300 static int sbmac_init(struct net_device *dev, int idx);
301 static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed);
302 static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc);
304 static int sbmac_open(struct net_device *dev);
305 static void sbmac_timer(unsigned long data);
306 static void sbmac_tx_timeout (struct net_device *dev);
307 static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
308 static void sbmac_set_rx_mode(struct net_device *dev);
309 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
310 static int sbmac_close(struct net_device *dev);
311 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
312 static int sbmac_mii_probe(struct net_device *dev);
314 static void sbmac_mii_sync(struct sbmac_softc *s);
315 static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
316 static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx);
317 static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
318 unsigned int regval);
321 /**********************************************************************
322 * Globals
323 ********************************************************************* */
325 static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
328 /**********************************************************************
329 * MDIO constants
330 ********************************************************************* */
332 #define MII_COMMAND_START 0x01
333 #define MII_COMMAND_READ 0x02
334 #define MII_COMMAND_WRITE 0x01
335 #define MII_COMMAND_ACK 0x02
337 #define BMCR_RESET 0x8000
338 #define BMCR_LOOPBACK 0x4000
339 #define BMCR_SPEED0 0x2000
340 #define BMCR_ANENABLE 0x1000
341 #define BMCR_POWERDOWN 0x0800
342 #define BMCR_ISOLATE 0x0400
343 #define BMCR_RESTARTAN 0x0200
344 #define BMCR_DUPLEX 0x0100
345 #define BMCR_COLTEST 0x0080
346 #define BMCR_SPEED1 0x0040
347 #define BMCR_SPEED1000 BMCR_SPEED1
348 #define BMCR_SPEED100 BMCR_SPEED0
349 #define BMCR_SPEED10 0
351 #define BMSR_100BT4 0x8000
352 #define BMSR_100BT_FDX 0x4000
353 #define BMSR_100BT_HDX 0x2000
354 #define BMSR_10BT_FDX 0x1000
355 #define BMSR_10BT_HDX 0x0800
356 #define BMSR_100BT2_FDX 0x0400
357 #define BMSR_100BT2_HDX 0x0200
358 #define BMSR_1000BT_XSR 0x0100
359 #define BMSR_PRESUP 0x0040
360 #define BMSR_ANCOMPLT 0x0020
361 #define BMSR_REMFAULT 0x0010
362 #define BMSR_AUTONEG 0x0008
363 #define BMSR_LINKSTAT 0x0004
364 #define BMSR_JABDETECT 0x0002
365 #define BMSR_EXTCAPAB 0x0001
367 #define PHYIDR1 0x2000
368 #define PHYIDR2 0x5C60
370 #define ANAR_NP 0x8000
371 #define ANAR_RF 0x2000
372 #define ANAR_ASYPAUSE 0x0800
373 #define ANAR_PAUSE 0x0400
374 #define ANAR_T4 0x0200
375 #define ANAR_TXFD 0x0100
376 #define ANAR_TXHD 0x0080
377 #define ANAR_10FD 0x0040
378 #define ANAR_10HD 0x0020
379 #define ANAR_PSB 0x0001
381 #define ANLPAR_NP 0x8000
382 #define ANLPAR_ACK 0x4000
383 #define ANLPAR_RF 0x2000
384 #define ANLPAR_ASYPAUSE 0x0800
385 #define ANLPAR_PAUSE 0x0400
386 #define ANLPAR_T4 0x0200
387 #define ANLPAR_TXFD 0x0100
388 #define ANLPAR_TXHD 0x0080
389 #define ANLPAR_10FD 0x0040
390 #define ANLPAR_10HD 0x0020
391 #define ANLPAR_PSB 0x0001 /* 802.3 */
393 #define ANER_PDF 0x0010
394 #define ANER_LPNPABLE 0x0008
395 #define ANER_NPABLE 0x0004
396 #define ANER_PAGERX 0x0002
397 #define ANER_LPANABLE 0x0001
399 #define ANNPTR_NP 0x8000
400 #define ANNPTR_MP 0x2000
401 #define ANNPTR_ACK2 0x1000
402 #define ANNPTR_TOGTX 0x0800
403 #define ANNPTR_CODE 0x0008
405 #define ANNPRR_NP 0x8000
406 #define ANNPRR_MP 0x2000
407 #define ANNPRR_ACK3 0x1000
408 #define ANNPRR_TOGTX 0x0800
409 #define ANNPRR_CODE 0x0008
411 #define K1TCR_TESTMODE 0x0000
412 #define K1TCR_MSMCE 0x1000
413 #define K1TCR_MSCV 0x0800
414 #define K1TCR_RPTR 0x0400
415 #define K1TCR_1000BT_FDX 0x200
416 #define K1TCR_1000BT_HDX 0x100
418 #define K1STSR_MSMCFLT 0x8000
419 #define K1STSR_MSCFGRES 0x4000
420 #define K1STSR_LRSTAT 0x2000
421 #define K1STSR_RRSTAT 0x1000
422 #define K1STSR_LP1KFD 0x0800
423 #define K1STSR_LP1KHD 0x0400
424 #define K1STSR_LPASMDIR 0x0200
426 #define K1SCR_1KX_FDX 0x8000
427 #define K1SCR_1KX_HDX 0x4000
428 #define K1SCR_1KT_FDX 0x2000
429 #define K1SCR_1KT_HDX 0x1000
431 #define STRAP_PHY1 0x0800
432 #define STRAP_NCMODE 0x0400
433 #define STRAP_MANMSCFG 0x0200
434 #define STRAP_ANENABLE 0x0100
435 #define STRAP_MSVAL 0x0080
436 #define STRAP_1KHDXADV 0x0010
437 #define STRAP_1KFDXADV 0x0008
438 #define STRAP_100ADV 0x0004
439 #define STRAP_SPEEDSEL 0x0000
440 #define STRAP_SPEED100 0x0001
442 #define PHYSUP_SPEED1000 0x10
443 #define PHYSUP_SPEED100 0x08
444 #define PHYSUP_SPEED10 0x00
445 #define PHYSUP_LINKUP 0x04
446 #define PHYSUP_FDX 0x02
448 #define MII_BMCR 0x00 /* Basic mode control register (rw) */
449 #define MII_BMSR 0x01 /* Basic mode status register (ro) */
450 #define MII_PHYIDR1 0x02
451 #define MII_PHYIDR2 0x03
453 #define MII_K1STSR 0x0A /* 1K Status Register (ro) */
454 #define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
457 #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
459 #define ENABLE 1
460 #define DISABLE 0
462 /**********************************************************************
463 * SBMAC_MII_SYNC(s)
464 *
465 * Synchronize with the MII - send a pattern of bits to the MII
466 * that will guarantee that it is ready to accept a command.
467 *
468 * Input parameters:
469 * s - sbmac structure
470 *
471 * Return value:
472 * nothing
473 ********************************************************************* */
475 static void sbmac_mii_sync(struct sbmac_softc *s)
476 {
477 int cnt;
478 uint64_t bits;
479 int mac_mdio_genc;
481 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
483 bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
485 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
487 for (cnt = 0; cnt < 32; cnt++) {
488 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
489 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
490 }
491 }
493 /**********************************************************************
494 * SBMAC_MII_SENDDATA(s,data,bitcnt)
495 *
496 * Send some bits to the MII. The bits to be sent are right-
497 * justified in the 'data' parameter.
498 *
499 * Input parameters:
500 * s - sbmac structure
501 * data - data to send
502 * bitcnt - number of bits to send
503 ********************************************************************* */
505 static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt)
506 {
507 int i;
508 uint64_t bits;
509 unsigned int curmask;
510 int mac_mdio_genc;
512 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
514 bits = M_MAC_MDIO_DIR_OUTPUT;
515 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
517 curmask = 1 << (bitcnt - 1);
519 for (i = 0; i < bitcnt; i++) {
520 if (data & curmask)
521 bits |= M_MAC_MDIO_OUT;
522 else bits &= ~M_MAC_MDIO_OUT;
523 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
524 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
525 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
526 curmask >>= 1;
527 }
528 }
532 /**********************************************************************
533 * SBMAC_MII_READ(s,phyaddr,regidx)
534 *
535 * Read a PHY register.
536 *
537 * Input parameters:
538 * s - sbmac structure
539 * phyaddr - PHY's address
540 * regidx = index of register to read
541 *
542 * Return value:
543 * value read, or 0 if an error occurred.
544 ********************************************************************* */
546 static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
547 {
548 int idx;
549 int error;
550 int regval;
551 int mac_mdio_genc;
553 /*
554 * Synchronize ourselves so that the PHY knows the next
555 * thing coming down is a command
556 */
558 sbmac_mii_sync(s);
560 /*
561 * Send the data to the PHY. The sequence is
562 * a "start" command (2 bits)
563 * a "read" command (2 bits)
564 * the PHY addr (5 bits)
565 * the register index (5 bits)
566 */
568 sbmac_mii_senddata(s,MII_COMMAND_START, 2);
569 sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
570 sbmac_mii_senddata(s,phyaddr, 5);
571 sbmac_mii_senddata(s,regidx, 5);
573 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
575 /*
576 * Switch the port around without a clock transition.
577 */
578 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
580 /*
581 * Send out a clock pulse to signal we want the status
582 */
584 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
585 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
587 /*
588 * If an error occurred, the PHY will signal '1' back
589 */
590 error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
592 /*
593 * Issue an 'idle' clock pulse, but keep the direction
594 * the same.
595 */
596 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
597 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
599 regval = 0;
601 for (idx = 0; idx < 16; idx++) {
602 regval <<= 1;
604 if (error == 0) {
605 if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
606 regval |= 1;
607 }
609 __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
610 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
611 }
613 /* Switch back to output */
614 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
616 if (error == 0)
617 return regval;
618 return 0;
619 }
622 /**********************************************************************
623 * SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
624 *
625 * Write a value to a PHY register.
626 *
627 * Input parameters:
628 * s - sbmac structure
629 * phyaddr - PHY to use
630 * regidx - register within the PHY
631 * regval - data to write to register
632 *
633 * Return value:
634 * nothing
635 ********************************************************************* */
637 static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
638 unsigned int regval)
639 {
640 int mac_mdio_genc;
642 sbmac_mii_sync(s);
644 sbmac_mii_senddata(s,MII_COMMAND_START,2);
645 sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
646 sbmac_mii_senddata(s,phyaddr, 5);
647 sbmac_mii_senddata(s,regidx, 5);
648 sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
649 sbmac_mii_senddata(s,regval,16);
651 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
653 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
654 }
658 /**********************************************************************
659 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
660 *
661 * Initialize a DMA channel context. Since there are potentially
662 * eight DMA channels per MAC, it's nice to do this in a standard
663 * way.
664 *
665 * Input parameters:
666 * d - sbmacdma_t structure (DMA channel context)
667 * s - sbmac_softc structure (pointer to a MAC)
668 * chan - channel number (0..1 right now)
669 * txrx - Identifies DMA_TX or DMA_RX for channel direction
670 * maxdescr - number of descriptors
671 *
672 * Return value:
673 * nothing
674 ********************************************************************* */
676 static void sbdma_initctx(sbmacdma_t *d,
677 struct sbmac_softc *s,
678 int chan,
679 int txrx,
680 int maxdescr)
681 {
682 /*
683 * Save away interesting stuff in the structure
684 */
686 d->sbdma_eth = s;
687 d->sbdma_channel = chan;
688 d->sbdma_txdir = txrx;
690 #if 0
691 /* RMON clearing */
692 s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
693 #endif
695 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
696 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
697 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
698 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
699 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
700 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
701 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
702 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
703 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
704 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
705 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
706 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
707 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
708 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
709 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
710 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
711 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
712 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
713 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
714 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
715 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
717 /*
718 * initialize register pointers
719 */
721 d->sbdma_config0 =
722 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
723 d->sbdma_config1 =
724 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
725 d->sbdma_dscrbase =
726 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
727 d->sbdma_dscrcnt =
728 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
729 d->sbdma_curdscr =
730 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
732 /*
733 * Allocate memory for the ring
734 */
736 d->sbdma_maxdescr = maxdescr;
738 d->sbdma_dscrtable = (sbdmadscr_t *)
739 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
741 /*
742 * The descriptor table must be aligned to at least 16 bytes or the
743 * MAC will corrupt it.
744 */
745 d->sbdma_dscrtable = (sbdmadscr_t *)
746 ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
748 memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
750 d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
752 d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
754 /*
755 * And context table
756 */
758 d->sbdma_ctxtable = (struct sk_buff **)
759 kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
761 memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
763 #ifdef CONFIG_SBMAC_COALESCE
764 /*
765 * Setup Rx/Tx DMA coalescing defaults
766 */
768 if ( int_pktcnt ) {
769 d->sbdma_int_pktcnt = int_pktcnt;
770 } else {
771 d->sbdma_int_pktcnt = 1;
772 }
774 if ( int_timeout ) {
775 d->sbdma_int_timeout = int_timeout;
776 } else {
777 d->sbdma_int_timeout = 0;
778 }
779 #endif
781 }
783 /**********************************************************************
784 * SBDMA_CHANNEL_START(d)
785 *
786 * Initialize the hardware registers for a DMA channel.
787 *
788 * Input parameters:
789 * d - DMA channel to init (context must be previously init'd
790 * rxtx - DMA_RX or DMA_TX depending on what type of channel
791 *
792 * Return value:
793 * nothing
794 ********************************************************************* */
796 static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
797 {
798 /*
799 * Turn on the DMA channel
800 */
802 #ifdef CONFIG_SBMAC_COALESCE
803 __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
804 0, d->sbdma_config1);
805 __raw_writeq(M_DMA_EOP_INT_EN |
806 V_DMA_RINGSZ(d->sbdma_maxdescr) |
807 V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
808 0, d->sbdma_config0);
809 #else
810 __raw_writeq(0, d->sbdma_config1);
811 __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
812 0, d->sbdma_config0);
813 #endif
815 __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
817 /*
818 * Initialize ring pointers
819 */
821 d->sbdma_addptr = d->sbdma_dscrtable;
822 d->sbdma_remptr = d->sbdma_dscrtable;
823 }
825 /**********************************************************************
826 * SBDMA_CHANNEL_STOP(d)
827 *
828 * Initialize the hardware registers for a DMA channel.
829 *
830 * Input parameters:
831 * d - DMA channel to init (context must be previously init'd
832 *
833 * Return value:
834 * nothing
835 ********************************************************************* */
837 static void sbdma_channel_stop(sbmacdma_t *d)
838 {
839 /*
840 * Turn off the DMA channel
841 */
843 __raw_writeq(0, d->sbdma_config1);
845 __raw_writeq(0, d->sbdma_dscrbase);
847 __raw_writeq(0, d->sbdma_config0);
849 /*
850 * Zero ring pointers
851 */
853 d->sbdma_addptr = NULL;
854 d->sbdma_remptr = NULL;
855 }
857 static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
858 {
859 unsigned long addr;
860 unsigned long newaddr;
862 addr = (unsigned long) skb->data;
864 newaddr = (addr + power2 - 1) & ~(power2 - 1);
866 skb_reserve(skb,newaddr-addr+offset);
867 }
870 /**********************************************************************
871 * SBDMA_ADD_RCVBUFFER(d,sb)
872 *
873 * Add a buffer to the specified DMA channel. For receive channels,
874 * this queues a buffer for inbound packets.
875 *
876 * Input parameters:
877 * d - DMA channel descriptor
878 * sb - sk_buff to add, or NULL if we should allocate one
879 *
880 * Return value:
881 * 0 if buffer could not be added (ring is full)
882 * 1 if buffer added successfully
883 ********************************************************************* */
886 static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
887 {
888 sbdmadscr_t *dsc;
889 sbdmadscr_t *nextdsc;
890 struct sk_buff *sb_new = NULL;
891 int pktsize = ENET_PACKET_SIZE;
893 /* get pointer to our current place in the ring */
895 dsc = d->sbdma_addptr;
896 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
898 /*
899 * figure out if the ring is full - if the next descriptor
900 * is the same as the one that we're going to remove from
901 * the ring, the ring is full
902 */
904 if (nextdsc == d->sbdma_remptr) {
905 return -ENOSPC;
906 }
908 /*
909 * Allocate a sk_buff if we don't already have one.
910 * If we do have an sk_buff, reset it so that it's empty.
911 *
912 * Note: sk_buffs don't seem to be guaranteed to have any sort
913 * of alignment when they are allocated. Therefore, allocate enough
914 * extra space to make sure that:
915 *
916 * 1. the data does not start in the middle of a cache line.
917 * 2. The data does not end in the middle of a cache line
918 * 3. The buffer can be aligned such that the IP addresses are
919 * naturally aligned.
920 *
921 * Remember, the SOCs MAC writes whole cache lines at a time,
922 * without reading the old contents first. So, if the sk_buff's
923 * data portion starts in the middle of a cache line, the SOC
924 * DMA will trash the beginning (and ending) portions.
925 */
927 if (sb == NULL) {
928 sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
929 if (sb_new == NULL) {
930 printk(KERN_INFO "%s: sk_buff allocation failed\n",
931 d->sbdma_eth->sbm_dev->name);
932 return -ENOBUFS;
933 }
935 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
937 /* mark skbuff owned by our device */
938 sb_new->dev = d->sbdma_eth->sbm_dev;
939 }
940 else {
941 sb_new = sb;
942 /*
943 * nothing special to reinit buffer, it's already aligned
944 * and sb->data already points to a good place.
945 */
946 }
948 /*
949 * fill in the descriptor
950 */
952 #ifdef CONFIG_SBMAC_COALESCE
953 /*
954 * Do not interrupt per DMA transfer.
955 */
956 dsc->dscr_a = virt_to_phys(sb_new->data) |
957 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
958 #else
959 dsc->dscr_a = virt_to_phys(sb_new->data) |
960 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
961 M_DMA_DSCRA_INTERRUPT;
962 #endif
964 /* receiving: no options */
965 dsc->dscr_b = 0;
967 /*
968 * fill in the context
969 */
971 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
973 /*
974 * point at next packet
975 */
977 d->sbdma_addptr = nextdsc;
979 /*
980 * Give the buffer to the DMA engine.
981 */
983 __raw_writeq(1, d->sbdma_dscrcnt);
985 return 0; /* we did it */
986 }
988 /**********************************************************************
989 * SBDMA_ADD_TXBUFFER(d,sb)
990 *
991 * Add a transmit buffer to the specified DMA channel, causing a
992 * transmit to start.
993 *
994 * Input parameters:
995 * d - DMA channel descriptor
996 * sb - sk_buff to add
997 *
998 * Return value:
999 * 0 transmit queued successfully
1000 * otherwise error code
1001 ********************************************************************* */
1004 static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
1006 sbdmadscr_t *dsc;
1007 sbdmadscr_t *nextdsc;
1008 uint64_t phys;
1009 uint64_t ncb;
1010 int length;
1012 /* get pointer to our current place in the ring */
1014 dsc = d->sbdma_addptr;
1015 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
1017 /*
1018 * figure out if the ring is full - if the next descriptor
1019 * is the same as the one that we're going to remove from
1020 * the ring, the ring is full
1021 */
1023 if (nextdsc == d->sbdma_remptr) {
1024 return -ENOSPC;
1027 /*
1028 * Under Linux, it's not necessary to copy/coalesce buffers
1029 * like it is on NetBSD. We think they're all contiguous,
1030 * but that may not be true for GBE.
1031 */
1033 length = sb->len;
1035 /*
1036 * fill in the descriptor. Note that the number of cache
1037 * blocks in the descriptor is the number of blocks
1038 * *spanned*, so we need to add in the offset (if any)
1039 * while doing the calculation.
1040 */
1042 phys = virt_to_phys(sb->data);
1043 ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
1045 dsc->dscr_a = phys |
1046 V_DMA_DSCRA_A_SIZE(ncb) |
1047 #ifndef CONFIG_SBMAC_COALESCE
1048 M_DMA_DSCRA_INTERRUPT |
1049 #endif
1050 M_DMA_ETHTX_SOP;
1052 /* transmitting: set outbound options and length */
1054 dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
1055 V_DMA_DSCRB_PKT_SIZE(length);
1057 /*
1058 * fill in the context
1059 */
1061 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
1063 /*
1064 * point at next packet
1065 */
1067 d->sbdma_addptr = nextdsc;
1069 /*
1070 * Give the buffer to the DMA engine.
1071 */
1073 __raw_writeq(1, d->sbdma_dscrcnt);
1075 return 0; /* we did it */
1081 /**********************************************************************
1082 * SBDMA_EMPTYRING(d)
1084 * Free all allocated sk_buffs on the specified DMA channel;
1086 * Input parameters:
1087 * d - DMA channel
1089 * Return value:
1090 * nothing
1091 ********************************************************************* */
1093 static void sbdma_emptyring(sbmacdma_t *d)
1095 int idx;
1096 struct sk_buff *sb;
1098 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
1099 sb = d->sbdma_ctxtable[idx];
1100 if (sb) {
1101 dev_kfree_skb(sb);
1102 d->sbdma_ctxtable[idx] = NULL;
1108 /**********************************************************************
1109 * SBDMA_FILLRING(d)
1111 * Fill the specified DMA channel (must be receive channel)
1112 * with sk_buffs
1114 * Input parameters:
1115 * d - DMA channel
1117 * Return value:
1118 * nothing
1119 ********************************************************************* */
1121 static void sbdma_fillring(sbmacdma_t *d)
1123 int idx;
1125 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
1126 if (sbdma_add_rcvbuffer(d,NULL) != 0)
1127 break;
1132 /**********************************************************************
1133 * SBDMA_RX_PROCESS(sc,d)
1135 * Process "completed" receive buffers on the specified DMA channel.
1136 * Note that this isn't really ideal for priority channels, since
1137 * it processes all of the packets on a given channel before
1138 * returning.
1140 * Input parameters:
1141 * sc - softc structure
1142 * d - DMA channel context
1144 * Return value:
1145 * nothing
1146 ********************************************************************* */
1148 static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1150 int curidx;
1151 int hwidx;
1152 sbdmadscr_t *dsc;
1153 struct sk_buff *sb;
1154 int len;
1156 for (;;) {
1157 /*
1158 * figure out where we are (as an index) and where
1159 * the hardware is (also as an index)
1161 * This could be done faster if (for example) the
1162 * descriptor table was page-aligned and contiguous in
1163 * both virtual and physical memory -- you could then
1164 * just compare the low-order bits of the virtual address
1165 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1166 */
1168 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1169 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1170 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1172 /*
1173 * If they're the same, that means we've processed all
1174 * of the descriptors up to (but not including) the one that
1175 * the hardware is working on right now.
1176 */
1178 if (curidx == hwidx)
1179 break;
1181 /*
1182 * Otherwise, get the packet's sk_buff ptr back
1183 */
1185 dsc = &(d->sbdma_dscrtable[curidx]);
1186 sb = d->sbdma_ctxtable[curidx];
1187 d->sbdma_ctxtable[curidx] = NULL;
1189 len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
1191 /*
1192 * Check packet status. If good, process it.
1193 * If not, silently drop it and put it back on the
1194 * receive ring.
1195 */
1197 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
1199 /*
1200 * Add a new buffer to replace the old one. If we fail
1201 * to allocate a buffer, we're going to drop this
1202 * packet and put it right back on the receive ring.
1203 */
1205 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
1206 sc->sbm_stats.rx_dropped++;
1207 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
1208 } else {
1209 /*
1210 * Set length into the packet
1211 */
1212 skb_put(sb,len);
1214 /*
1215 * Buffer has been replaced on the
1216 * receive ring. Pass the buffer to
1217 * the kernel
1218 */
1219 sc->sbm_stats.rx_bytes += len;
1220 sc->sbm_stats.rx_packets++;
1221 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
1222 /* Check hw IPv4/TCP checksum if supported */
1223 if (sc->rx_hw_checksum == ENABLE) {
1224 if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
1225 !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
1226 sb->ip_summed = CHECKSUM_UNNECESSARY;
1227 /* don't need to set sb->csum */
1228 } else {
1229 sb->ip_summed = CHECKSUM_NONE;
1233 netif_rx(sb);
1235 } else {
1236 /*
1237 * Packet was mangled somehow. Just drop it and
1238 * put it back on the receive ring.
1239 */
1240 sc->sbm_stats.rx_errors++;
1241 sbdma_add_rcvbuffer(d,sb);
1245 /*
1246 * .. and advance to the next buffer.
1247 */
1249 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1256 /**********************************************************************
1257 * SBDMA_TX_PROCESS(sc,d)
1259 * Process "completed" transmit buffers on the specified DMA channel.
1260 * This is normally called within the interrupt service routine.
1261 * Note that this isn't really ideal for priority channels, since
1262 * it processes all of the packets on a given channel before
1263 * returning.
1265 * Input parameters:
1266 * sc - softc structure
1267 * d - DMA channel context
1269 * Return value:
1270 * nothing
1271 ********************************************************************* */
1273 static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1275 int curidx;
1276 int hwidx;
1277 sbdmadscr_t *dsc;
1278 struct sk_buff *sb;
1279 unsigned long flags;
1281 spin_lock_irqsave(&(sc->sbm_lock), flags);
1283 for (;;) {
1284 /*
1285 * figure out where we are (as an index) and where
1286 * the hardware is (also as an index)
1288 * This could be done faster if (for example) the
1289 * descriptor table was page-aligned and contiguous in
1290 * both virtual and physical memory -- you could then
1291 * just compare the low-order bits of the virtual address
1292 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1293 */
1295 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1296 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1297 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1299 /*
1300 * If they're the same, that means we've processed all
1301 * of the descriptors up to (but not including) the one that
1302 * the hardware is working on right now.
1303 */
1305 if (curidx == hwidx)
1306 break;
1308 /*
1309 * Otherwise, get the packet's sk_buff ptr back
1310 */
1312 dsc = &(d->sbdma_dscrtable[curidx]);
1313 sb = d->sbdma_ctxtable[curidx];
1314 d->sbdma_ctxtable[curidx] = NULL;
1316 /*
1317 * Stats
1318 */
1320 sc->sbm_stats.tx_bytes += sb->len;
1321 sc->sbm_stats.tx_packets++;
1323 /*
1324 * for transmits, we just free buffers.
1325 */
1327 dev_kfree_skb_irq(sb);
1329 /*
1330 * .. and advance to the next buffer.
1331 */
1333 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1337 /*
1338 * Decide if we should wake up the protocol or not.
1339 * Other drivers seem to do this when we reach a low
1340 * watermark on the transmit queue.
1341 */
1343 netif_wake_queue(d->sbdma_eth->sbm_dev);
1345 spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1351 /**********************************************************************
1352 * SBMAC_INITCTX(s)
1354 * Initialize an Ethernet context structure - this is called
1355 * once per MAC on the 1250. Memory is allocated here, so don't
1356 * call it again from inside the ioctl routines that bring the
1357 * interface up/down
1359 * Input parameters:
1360 * s - sbmac context structure
1362 * Return value:
1363 * 0
1364 ********************************************************************* */
1366 static int sbmac_initctx(struct sbmac_softc *s)
1369 /*
1370 * figure out the addresses of some ports
1371 */
1373 s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
1374 s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
1375 s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
1376 s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
1377 s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
1378 s->sbm_isr = s->sbm_base + R_MAC_STATUS;
1379 s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
1380 s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
1382 s->sbm_phys[0] = 1;
1383 s->sbm_phys[1] = 0;
1385 s->sbm_phy_oldbmsr = 0;
1386 s->sbm_phy_oldanlpar = 0;
1387 s->sbm_phy_oldk1stsr = 0;
1388 s->sbm_phy_oldlinkstat = 0;
1390 /*
1391 * Initialize the DMA channels. Right now, only one per MAC is used
1392 * Note: Only do this _once_, as it allocates memory from the kernel!
1393 */
1395 sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
1396 sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
1398 /*
1399 * initial state is OFF
1400 */
1402 s->sbm_state = sbmac_state_off;
1404 /*
1405 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1406 */
1408 s->sbm_speed = sbmac_speed_10;
1409 s->sbm_duplex = sbmac_duplex_half;
1410 s->sbm_fc = sbmac_fc_disabled;
1412 return 0;
1416 static void sbdma_uninitctx(struct sbmacdma_s *d)
1418 if (d->sbdma_dscrtable) {
1419 kfree(d->sbdma_dscrtable);
1420 d->sbdma_dscrtable = NULL;
1423 if (d->sbdma_ctxtable) {
1424 kfree(d->sbdma_ctxtable);
1425 d->sbdma_ctxtable = NULL;
1430 static void sbmac_uninitctx(struct sbmac_softc *sc)
1432 sbdma_uninitctx(&(sc->sbm_txdma));
1433 sbdma_uninitctx(&(sc->sbm_rxdma));
1437 /**********************************************************************
1438 * SBMAC_CHANNEL_START(s)
1440 * Start packet processing on this MAC.
1442 * Input parameters:
1443 * s - sbmac structure
1445 * Return value:
1446 * nothing
1447 ********************************************************************* */
1449 static void sbmac_channel_start(struct sbmac_softc *s)
1451 uint64_t reg;
1452 volatile void __iomem *port;
1453 uint64_t cfg,fifo,framecfg;
1454 int idx, th_value;
1456 /*
1457 * Don't do this if running
1458 */
1460 if (s->sbm_state == sbmac_state_on)
1461 return;
1463 /*
1464 * Bring the controller out of reset, but leave it off.
1465 */
1467 __raw_writeq(0, s->sbm_macenable);
1469 /*
1470 * Ignore all received packets
1471 */
1473 __raw_writeq(0, s->sbm_rxfilter);
1475 /*
1476 * Calculate values for various control registers.
1477 */
1479 cfg = M_MAC_RETRY_EN |
1480 M_MAC_TX_HOLD_SOP_EN |
1481 V_MAC_TX_PAUSE_CNT_16K |
1482 M_MAC_AP_STAT_EN |
1483 M_MAC_FAST_SYNC |
1484 M_MAC_SS_EN |
1485 0;
1487 /*
1488 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
1489 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1490 * Use a larger RD_THRSH for gigabit
1491 */
1492 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
1493 th_value = 28;
1494 else
1495 th_value = 64;
1497 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1498 ((s->sbm_speed == sbmac_speed_1000)
1499 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
1500 V_MAC_TX_RL_THRSH(4) |
1501 V_MAC_RX_PL_THRSH(4) |
1502 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1503 V_MAC_RX_PL_THRSH(4) |
1504 V_MAC_RX_RL_THRSH(8) |
1505 0;
1507 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1508 V_MAC_MAX_FRAMESZ_DEFAULT |
1509 V_MAC_BACKOFF_SEL(1);
1511 /*
1512 * Clear out the hash address map
1513 */
1515 port = s->sbm_base + R_MAC_HASH_BASE;
1516 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1517 __raw_writeq(0, port);
1518 port += sizeof(uint64_t);
1521 /*
1522 * Clear out the exact-match table
1523 */
1525 port = s->sbm_base + R_MAC_ADDR_BASE;
1526 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1527 __raw_writeq(0, port);
1528 port += sizeof(uint64_t);
1531 /*
1532 * Clear out the DMA Channel mapping table registers
1533 */
1535 port = s->sbm_base + R_MAC_CHUP0_BASE;
1536 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1537 __raw_writeq(0, port);
1538 port += sizeof(uint64_t);
1542 port = s->sbm_base + R_MAC_CHLO0_BASE;
1543 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1544 __raw_writeq(0, port);
1545 port += sizeof(uint64_t);
1548 /*
1549 * Program the hardware address. It goes into the hardware-address
1550 * register as well as the first filter register.
1551 */
1553 reg = sbmac_addr2reg(s->sbm_hwaddr);
1555 port = s->sbm_base + R_MAC_ADDR_BASE;
1556 __raw_writeq(reg, port);
1557 port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1559 #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1560 /*
1561 * Pass1 SOCs do not receive packets addressed to the
1562 * destination address in the R_MAC_ETHERNET_ADDR register.
1563 * Set the value to zero.
1564 */
1565 __raw_writeq(0, port);
1566 #else
1567 __raw_writeq(reg, port);
1568 #endif
1570 /*
1571 * Set the receive filter for no packets, and write values
1572 * to the various config registers
1573 */
1575 __raw_writeq(0, s->sbm_rxfilter);
1576 __raw_writeq(0, s->sbm_imr);
1577 __raw_writeq(framecfg, s->sbm_framecfg);
1578 __raw_writeq(fifo, s->sbm_fifocfg);
1579 __raw_writeq(cfg, s->sbm_maccfg);
1581 /*
1582 * Initialize DMA channels (rings should be ok now)
1583 */
1585 sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
1586 sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
1588 /*
1589 * Configure the speed, duplex, and flow control
1590 */
1592 sbmac_set_speed(s,s->sbm_speed);
1593 sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
1595 /*
1596 * Fill the receive ring
1597 */
1599 sbdma_fillring(&(s->sbm_rxdma));
1601 /*
1602 * Turn on the rest of the bits in the enable register
1603 */
1605 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
1606 __raw_writeq(M_MAC_RXDMA_EN0 |
1607 M_MAC_TXDMA_EN0, s->sbm_macenable);
1608 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
1609 __raw_writeq(M_MAC_RXDMA_EN0 |
1610 M_MAC_TXDMA_EN0 |
1611 M_MAC_RX_ENABLE |
1612 M_MAC_TX_ENABLE, s->sbm_macenable);
1613 #else
1614 #error invalid SiByte MAC configuation
1615 #endif
1617 #ifdef CONFIG_SBMAC_COALESCE
1618 /*
1619 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
1620 */
1621 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1622 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1623 #else
1624 /*
1625 * Accept any kind of interrupt on TX and RX DMA channel 0
1626 */
1627 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1628 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1629 #endif
1631 /*
1632 * Enable receiving unicasts and broadcasts
1633 */
1635 __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
1637 /*
1638 * we're running now.
1639 */
1641 s->sbm_state = sbmac_state_on;
1643 /*
1644 * Program multicast addresses
1645 */
1647 sbmac_setmulti(s);
1649 /*
1650 * If channel was in promiscuous mode before, turn that on
1651 */
1653 if (s->sbm_devflags & IFF_PROMISC) {
1654 sbmac_promiscuous_mode(s,1);
1660 /**********************************************************************
1661 * SBMAC_CHANNEL_STOP(s)
1663 * Stop packet processing on this MAC.
1665 * Input parameters:
1666 * s - sbmac structure
1668 * Return value:
1669 * nothing
1670 ********************************************************************* */
1672 static void sbmac_channel_stop(struct sbmac_softc *s)
1674 /* don't do this if already stopped */
1676 if (s->sbm_state == sbmac_state_off)
1677 return;
1679 /* don't accept any packets, disable all interrupts */
1681 __raw_writeq(0, s->sbm_rxfilter);
1682 __raw_writeq(0, s->sbm_imr);
1684 /* Turn off ticker */
1686 /* XXX */
1688 /* turn off receiver and transmitter */
1690 __raw_writeq(0, s->sbm_macenable);
1692 /* We're stopped now. */
1694 s->sbm_state = sbmac_state_off;
1696 /*
1697 * Stop DMA channels (rings should be ok now)
1698 */
1700 sbdma_channel_stop(&(s->sbm_rxdma));
1701 sbdma_channel_stop(&(s->sbm_txdma));
1703 /* Empty the receive and transmit rings */
1705 sbdma_emptyring(&(s->sbm_rxdma));
1706 sbdma_emptyring(&(s->sbm_txdma));
1710 /**********************************************************************
1711 * SBMAC_SET_CHANNEL_STATE(state)
1713 * Set the channel's state ON or OFF
1715 * Input parameters:
1716 * state - new state
1718 * Return value:
1719 * old state
1720 ********************************************************************* */
1721 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
1722 sbmac_state_t state)
1724 sbmac_state_t oldstate = sc->sbm_state;
1726 /*
1727 * If same as previous state, return
1728 */
1730 if (state == oldstate) {
1731 return oldstate;
1734 /*
1735 * If new state is ON, turn channel on
1736 */
1738 if (state == sbmac_state_on) {
1739 sbmac_channel_start(sc);
1741 else {
1742 sbmac_channel_stop(sc);
1745 /*
1746 * Return previous state
1747 */
1749 return oldstate;
1753 /**********************************************************************
1754 * SBMAC_PROMISCUOUS_MODE(sc,onoff)
1756 * Turn on or off promiscuous mode
1758 * Input parameters:
1759 * sc - softc
1760 * onoff - 1 to turn on, 0 to turn off
1762 * Return value:
1763 * nothing
1764 ********************************************************************* */
1766 static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
1768 uint64_t reg;
1770 if (sc->sbm_state != sbmac_state_on)
1771 return;
1773 if (onoff) {
1774 reg = __raw_readq(sc->sbm_rxfilter);
1775 reg |= M_MAC_ALLPKT_EN;
1776 __raw_writeq(reg, sc->sbm_rxfilter);
1778 else {
1779 reg = __raw_readq(sc->sbm_rxfilter);
1780 reg &= ~M_MAC_ALLPKT_EN;
1781 __raw_writeq(reg, sc->sbm_rxfilter);
1785 /**********************************************************************
1786 * SBMAC_SETIPHDR_OFFSET(sc,onoff)
1788 * Set the iphdr offset as 15 assuming ethernet encapsulation
1790 * Input parameters:
1791 * sc - softc
1793 * Return value:
1794 * nothing
1795 ********************************************************************* */
1797 static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1799 uint64_t reg;
1801 /* Hard code the off set to 15 for now */
1802 reg = __raw_readq(sc->sbm_rxfilter);
1803 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
1804 __raw_writeq(reg, sc->sbm_rxfilter);
1806 /* BCM1250 pass1 didn't have hardware checksum. Everything
1807 later does. */
1808 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
1809 sc->rx_hw_checksum = DISABLE;
1810 } else {
1811 sc->rx_hw_checksum = ENABLE;
1816 /**********************************************************************
1817 * SBMAC_ADDR2REG(ptr)
1819 * Convert six bytes into the 64-bit register value that
1820 * we typically write into the SBMAC's address/mcast registers
1822 * Input parameters:
1823 * ptr - pointer to 6 bytes
1825 * Return value:
1826 * register value
1827 ********************************************************************* */
1829 static uint64_t sbmac_addr2reg(unsigned char *ptr)
1831 uint64_t reg = 0;
1833 ptr += 6;
1835 reg |= (uint64_t) *(--ptr);
1836 reg <<= 8;
1837 reg |= (uint64_t) *(--ptr);
1838 reg <<= 8;
1839 reg |= (uint64_t) *(--ptr);
1840 reg <<= 8;
1841 reg |= (uint64_t) *(--ptr);
1842 reg <<= 8;
1843 reg |= (uint64_t) *(--ptr);
1844 reg <<= 8;
1845 reg |= (uint64_t) *(--ptr);
1847 return reg;
1851 /**********************************************************************
1852 * SBMAC_SET_SPEED(s,speed)
1854 * Configure LAN speed for the specified MAC.
1855 * Warning: must be called when MAC is off!
1857 * Input parameters:
1858 * s - sbmac structure
1859 * speed - speed to set MAC to (see sbmac_speed_t enum)
1861 * Return value:
1862 * 1 if successful
1863 * 0 indicates invalid parameters
1864 ********************************************************************* */
1866 static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1868 uint64_t cfg;
1869 uint64_t framecfg;
1871 /*
1872 * Save new current values
1873 */
1875 s->sbm_speed = speed;
1877 if (s->sbm_state == sbmac_state_on)
1878 return 0; /* save for next restart */
1880 /*
1881 * Read current register values
1882 */
1884 cfg = __raw_readq(s->sbm_maccfg);
1885 framecfg = __raw_readq(s->sbm_framecfg);
1887 /*
1888 * Mask out the stuff we want to change
1889 */
1891 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1892 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1893 M_MAC_SLOT_SIZE);
1895 /*
1896 * Now add in the new bits
1897 */
1899 switch (speed) {
1900 case sbmac_speed_10:
1901 framecfg |= V_MAC_IFG_RX_10 |
1902 V_MAC_IFG_TX_10 |
1903 K_MAC_IFG_THRSH_10 |
1904 V_MAC_SLOT_SIZE_10;
1905 cfg |= V_MAC_SPEED_SEL_10MBPS;
1906 break;
1908 case sbmac_speed_100:
1909 framecfg |= V_MAC_IFG_RX_100 |
1910 V_MAC_IFG_TX_100 |
1911 V_MAC_IFG_THRSH_100 |
1912 V_MAC_SLOT_SIZE_100;
1913 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1914 break;
1916 case sbmac_speed_1000:
1917 framecfg |= V_MAC_IFG_RX_1000 |
1918 V_MAC_IFG_TX_1000 |
1919 V_MAC_IFG_THRSH_1000 |
1920 V_MAC_SLOT_SIZE_1000;
1921 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1922 break;
1924 case sbmac_speed_auto: /* XXX not implemented */
1925 /* fall through */
1926 default:
1927 return 0;
1930 /*
1931 * Send the bits back to the hardware
1932 */
1934 __raw_writeq(framecfg, s->sbm_framecfg);
1935 __raw_writeq(cfg, s->sbm_maccfg);
1937 return 1;
1940 /**********************************************************************
1941 * SBMAC_SET_DUPLEX(s,duplex,fc)
1943 * Set Ethernet duplex and flow control options for this MAC
1944 * Warning: must be called when MAC is off!
1946 * Input parameters:
1947 * s - sbmac structure
1948 * duplex - duplex setting (see sbmac_duplex_t)
1949 * fc - flow control setting (see sbmac_fc_t)
1951 * Return value:
1952 * 1 if ok
1953 * 0 if an invalid parameter combination was specified
1954 ********************************************************************* */
1956 static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
1958 uint64_t cfg;
1960 /*
1961 * Save new current values
1962 */
1964 s->sbm_duplex = duplex;
1965 s->sbm_fc = fc;
1967 if (s->sbm_state == sbmac_state_on)
1968 return 0; /* save for next restart */
1970 /*
1971 * Read current register values
1972 */
1974 cfg = __raw_readq(s->sbm_maccfg);
1976 /*
1977 * Mask off the stuff we're about to change
1978 */
1980 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1983 switch (duplex) {
1984 case sbmac_duplex_half:
1985 switch (fc) {
1986 case sbmac_fc_disabled:
1987 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1988 break;
1990 case sbmac_fc_collision:
1991 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1992 break;
1994 case sbmac_fc_carrier:
1995 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1996 break;
1998 case sbmac_fc_auto: /* XXX not implemented */
1999 /* fall through */
2000 case sbmac_fc_frame: /* not valid in half duplex */
2001 default: /* invalid selection */
2002 return 0;
2004 break;
2006 case sbmac_duplex_full:
2007 switch (fc) {
2008 case sbmac_fc_disabled:
2009 cfg |= V_MAC_FC_CMD_DISABLED;
2010 break;
2012 case sbmac_fc_frame:
2013 cfg |= V_MAC_FC_CMD_ENABLED;
2014 break;
2016 case sbmac_fc_collision: /* not valid in full duplex */
2017 case sbmac_fc_carrier: /* not valid in full duplex */
2018 case sbmac_fc_auto: /* XXX not implemented */
2019 /* fall through */
2020 default:
2021 return 0;
2023 break;
2024 case sbmac_duplex_auto:
2025 /* XXX not implemented */
2026 break;
2029 /*
2030 * Send the bits back to the hardware
2031 */
2033 __raw_writeq(cfg, s->sbm_maccfg);
2035 return 1;
2041 /**********************************************************************
2042 * SBMAC_INTR()
2044 * Interrupt handler for MAC interrupts
2046 * Input parameters:
2047 * MAC structure
2049 * Return value:
2050 * nothing
2051 ********************************************************************* */
2052 static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
2054 struct net_device *dev = (struct net_device *) dev_instance;
2055 struct sbmac_softc *sc = netdev_priv(dev);
2056 uint64_t isr;
2057 int handled = 0;
2059 for (;;) {
2061 /*
2062 * Read the ISR (this clears the bits in the real
2063 * register, except for counter addr)
2064 */
2066 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2068 if (isr == 0)
2069 break;
2071 handled = 1;
2073 /*
2074 * Transmits on channel 0
2075 */
2077 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2078 sbdma_tx_process(sc,&(sc->sbm_txdma));
2081 /*
2082 * Receives on channel 0
2083 */
2085 /*
2086 * It's important to test all the bits (or at least the
2087 * EOP_SEEN bit) when deciding to do the RX process
2088 * particularly when coalescing, to make sure we
2089 * take care of the following:
2091 * If you have some packets waiting (have been received
2092 * but no interrupt) and get a TX interrupt before
2093 * the RX timer or counter expires, reading the ISR
2094 * above will clear the timer and counter, and you
2095 * won't get another interrupt until a packet shows
2096 * up to start the timer again. Testing
2097 * EOP_SEEN here takes care of this case.
2098 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
2099 */
2102 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2103 sbdma_rx_process(sc,&(sc->sbm_rxdma));
2106 return IRQ_RETVAL(handled);
2110 /**********************************************************************
2111 * SBMAC_START_TX(skb,dev)
2113 * Start output on the specified interface. Basically, we
2114 * queue as many buffers as we can until the ring fills up, or
2115 * we run off the end of the queue, whichever comes first.
2117 * Input parameters:
2120 * Return value:
2121 * nothing
2122 ********************************************************************* */
2123 static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2125 struct sbmac_softc *sc = netdev_priv(dev);
2127 /* lock eth irq */
2128 spin_lock_irq (&sc->sbm_lock);
2130 /*
2131 * Put the buffer on the transmit ring. If we
2132 * don't have room, stop the queue.
2133 */
2135 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
2136 /* XXX save skb that we could not send */
2137 netif_stop_queue(dev);
2138 spin_unlock_irq(&sc->sbm_lock);
2140 return 1;
2143 dev->trans_start = jiffies;
2145 spin_unlock_irq (&sc->sbm_lock);
2147 return 0;
2150 /**********************************************************************
2151 * SBMAC_SETMULTI(sc)
2153 * Reprogram the multicast table into the hardware, given
2154 * the list of multicasts associated with the interface
2155 * structure.
2157 * Input parameters:
2158 * sc - softc
2160 * Return value:
2161 * nothing
2162 ********************************************************************* */
2164 static void sbmac_setmulti(struct sbmac_softc *sc)
2166 uint64_t reg;
2167 volatile void __iomem *port;
2168 int idx;
2169 struct dev_mc_list *mclist;
2170 struct net_device *dev = sc->sbm_dev;
2172 /*
2173 * Clear out entire multicast table. We do this by nuking
2174 * the entire hash table and all the direct matches except
2175 * the first one, which is used for our station address
2176 */
2178 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
2179 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
2180 __raw_writeq(0, port);
2183 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
2184 port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
2185 __raw_writeq(0, port);
2188 /*
2189 * Clear the filter to say we don't want any multicasts.
2190 */
2192 reg = __raw_readq(sc->sbm_rxfilter);
2193 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2194 __raw_writeq(reg, sc->sbm_rxfilter);
2196 if (dev->flags & IFF_ALLMULTI) {
2197 /*
2198 * Enable ALL multicasts. Do this by inverting the
2199 * multicast enable bit.
2200 */
2201 reg = __raw_readq(sc->sbm_rxfilter);
2202 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2203 __raw_writeq(reg, sc->sbm_rxfilter);
2204 return;
2208 /*
2209 * Progam new multicast entries. For now, only use the
2210 * perfect filter. In the future we'll need to use the
2211 * hash filter if the perfect filter overflows
2212 */
2214 /* XXX only using perfect filter for now, need to use hash
2215 * XXX if the table overflows */
2217 idx = 1; /* skip station address */
2218 mclist = dev->mc_list;
2219 while (mclist && (idx < MAC_ADDR_COUNT)) {
2220 reg = sbmac_addr2reg(mclist->dmi_addr);
2221 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2222 __raw_writeq(reg, port);
2223 idx++;
2224 mclist = mclist->next;
2227 /*
2228 * Enable the "accept multicast bits" if we programmed at least one
2229 * multicast.
2230 */
2232 if (idx > 1) {
2233 reg = __raw_readq(sc->sbm_rxfilter);
2234 reg |= M_MAC_MCAST_EN;
2235 __raw_writeq(reg, sc->sbm_rxfilter);
2241 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2242 /**********************************************************************
2243 * SBMAC_PARSE_XDIGIT(str)
2245 * Parse a hex digit, returning its value
2247 * Input parameters:
2248 * str - character
2250 * Return value:
2251 * hex value, or -1 if invalid
2252 ********************************************************************* */
2254 static int sbmac_parse_xdigit(char str)
2256 int digit;
2258 if ((str >= '0') && (str <= '9'))
2259 digit = str - '0';
2260 else if ((str >= 'a') && (str <= 'f'))
2261 digit = str - 'a' + 10;
2262 else if ((str >= 'A') && (str <= 'F'))
2263 digit = str - 'A' + 10;
2264 else
2265 return -1;
2267 return digit;
2270 /**********************************************************************
2271 * SBMAC_PARSE_HWADDR(str,hwaddr)
2273 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2274 * Ethernet address.
2276 * Input parameters:
2277 * str - string
2278 * hwaddr - pointer to hardware address
2280 * Return value:
2281 * 0 if ok, else -1
2282 ********************************************************************* */
2284 static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2286 int digit1,digit2;
2287 int idx = 6;
2289 while (*str && (idx > 0)) {
2290 digit1 = sbmac_parse_xdigit(*str);
2291 if (digit1 < 0)
2292 return -1;
2293 str++;
2294 if (!*str)
2295 return -1;
2297 if ((*str == ':') || (*str == '-')) {
2298 digit2 = digit1;
2299 digit1 = 0;
2301 else {
2302 digit2 = sbmac_parse_xdigit(*str);
2303 if (digit2 < 0)
2304 return -1;
2305 str++;
2308 *hwaddr++ = (digit1 << 4) | digit2;
2309 idx--;
2311 if (*str == '-')
2312 str++;
2313 if (*str == ':')
2314 str++;
2316 return 0;
2318 #endif
2320 static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2322 if (new_mtu > ENET_PACKET_SIZE)
2323 return -EINVAL;
2324 _dev->mtu = new_mtu;
2325 printk(KERN_INFO "changing the mtu to %d\n", new_mtu);
2326 return 0;
2329 /**********************************************************************
2330 * SBMAC_INIT(dev)
2332 * Attach routine - init hardware and hook ourselves into linux
2334 * Input parameters:
2335 * dev - net_device structure
2337 * Return value:
2338 * status
2339 ********************************************************************* */
2341 static int sbmac_init(struct net_device *dev, int idx)
2343 struct sbmac_softc *sc;
2344 unsigned char *eaddr;
2345 uint64_t ea_reg;
2346 int i;
2347 int err;
2349 sc = netdev_priv(dev);
2351 /* Determine controller base address */
2353 sc->sbm_base = IOADDR(dev->base_addr);
2354 sc->sbm_dev = dev;
2355 sc->sbe_idx = idx;
2357 eaddr = sc->sbm_hwaddr;
2359 /*
2360 * Read the ethernet address. The firwmare left this programmed
2361 * for us in the ethernet address register for each mac.
2362 */
2364 ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
2365 __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
2366 for (i = 0; i < 6; i++) {
2367 eaddr[i] = (uint8_t) (ea_reg & 0xFF);
2368 ea_reg >>= 8;
2371 for (i = 0; i < 6; i++) {
2372 dev->dev_addr[i] = eaddr[i];
2376 /*
2377 * Init packet size
2378 */
2380 sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
2382 /*
2383 * Initialize context (get pointers to registers and stuff), then
2384 * allocate the memory for the descriptor tables.
2385 */
2387 sbmac_initctx(sc);
2389 /*
2390 * Set up Linux device callins
2391 */
2393 spin_lock_init(&(sc->sbm_lock));
2395 dev->open = sbmac_open;
2396 dev->hard_start_xmit = sbmac_start_tx;
2397 dev->stop = sbmac_close;
2398 dev->get_stats = sbmac_get_stats;
2399 dev->set_multicast_list = sbmac_set_rx_mode;
2400 dev->do_ioctl = sbmac_mii_ioctl;
2401 dev->tx_timeout = sbmac_tx_timeout;
2402 dev->watchdog_timeo = TX_TIMEOUT;
2404 dev->change_mtu = sb1250_change_mtu;
2406 /* This is needed for PASS2 for Rx H/W checksum feature */
2407 sbmac_set_iphdr_offset(sc);
2409 err = register_netdev(dev);
2410 if (err)
2411 goto out_uninit;
2413 if (sc->rx_hw_checksum == ENABLE) {
2414 printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
2415 sc->sbm_dev->name);
2418 /*
2419 * Display Ethernet address (this is called during the config
2420 * process so we need to finish off the config message that
2421 * was being displayed)
2422 */
2423 printk(KERN_INFO
2424 "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
2425 dev->name, dev->base_addr,
2426 eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
2429 return 0;
2431 out_uninit:
2432 sbmac_uninitctx(sc);
2434 return err;
2438 static int sbmac_open(struct net_device *dev)
2440 struct sbmac_softc *sc = netdev_priv(dev);
2442 if (debug > 1) {
2443 printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
2446 /*
2447 * map/route interrupt (clear status first, in case something
2448 * weird is pending; we haven't initialized the mac registers
2449 * yet)
2450 */
2452 __raw_readq(sc->sbm_isr);
2453 if (request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev))
2454 return -EBUSY;
2456 /*
2457 * Probe phy address
2458 */
2460 if(sbmac_mii_probe(dev) == -1) {
2461 printk("%s: failed to probe PHY.\n", dev->name);
2462 return -EINVAL;
2465 /*
2466 * Configure default speed
2467 */
2469 sbmac_mii_poll(sc,noisy_mii);
2471 /*
2472 * Turn on the channel
2473 */
2475 sbmac_set_channel_state(sc,sbmac_state_on);
2477 /*
2478 * XXX Station address is in dev->dev_addr
2479 */
2481 if (dev->if_port == 0)
2482 dev->if_port = 0;
2484 netif_start_queue(dev);
2486 sbmac_set_rx_mode(dev);
2488 /* Set the timer to check for link beat. */
2489 init_timer(&sc->sbm_timer);
2490 sc->sbm_timer.expires = jiffies + 2 * HZ/100;
2491 sc->sbm_timer.data = (unsigned long)dev;
2492 sc->sbm_timer.function = &sbmac_timer;
2493 add_timer(&sc->sbm_timer);
2495 return 0;
2498 static int sbmac_mii_probe(struct net_device *dev)
2500 int i;
2501 struct sbmac_softc *s = netdev_priv(dev);
2502 u16 bmsr, id1, id2;
2503 u32 vendor, device;
2505 for (i=1; i<31; i++) {
2506 bmsr = sbmac_mii_read(s, i, MII_BMSR);
2507 if (bmsr != 0) {
2508 s->sbm_phys[0] = i;
2509 id1 = sbmac_mii_read(s, i, MII_PHYIDR1);
2510 id2 = sbmac_mii_read(s, i, MII_PHYIDR2);
2511 vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f);
2512 device = (id2 >> 4) & 0x3f;
2514 printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n",
2515 dev->name, i, vendor, device);
2516 return i;
2519 return -1;
2523 static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
2525 int bmsr,bmcr,k1stsr,anlpar;
2526 int chg;
2527 char buffer[100];
2528 char *p = buffer;
2530 /* Read the mode status and mode control registers. */
2531 bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR);
2532 bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR);
2534 /* get the link partner status */
2535 anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR);
2537 /* if supported, read the 1000baseT register */
2538 if (bmsr & BMSR_1000BT_XSR) {
2539 k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR);
2541 else {
2542 k1stsr = 0;
2545 chg = 0;
2547 if ((bmsr & BMSR_LINKSTAT) == 0) {
2548 /*
2549 * If link status is down, clear out old info so that when
2550 * it comes back up it will force us to reconfigure speed
2551 */
2552 s->sbm_phy_oldbmsr = 0;
2553 s->sbm_phy_oldanlpar = 0;
2554 s->sbm_phy_oldk1stsr = 0;
2555 return 0;
2558 if ((s->sbm_phy_oldbmsr != bmsr) ||
2559 (s->sbm_phy_oldanlpar != anlpar) ||
2560 (s->sbm_phy_oldk1stsr != k1stsr)) {
2561 if (debug > 1) {
2562 printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n",
2563 s->sbm_dev->name,
2564 s->sbm_phy_oldbmsr,bmsr,
2565 s->sbm_phy_oldanlpar,anlpar,
2566 s->sbm_phy_oldk1stsr,k1stsr);
2568 s->sbm_phy_oldbmsr = bmsr;
2569 s->sbm_phy_oldanlpar = anlpar;
2570 s->sbm_phy_oldk1stsr = k1stsr;
2571 chg = 1;
2574 if (chg == 0)
2575 return 0;
2577 p += sprintf(p,"Link speed: ");
2579 if (k1stsr & K1STSR_LP1KFD) {
2580 s->sbm_speed = sbmac_speed_1000;
2581 s->sbm_duplex = sbmac_duplex_full;
2582 s->sbm_fc = sbmac_fc_frame;
2583 p += sprintf(p,"1000BaseT FDX");
2585 else if (k1stsr & K1STSR_LP1KHD) {
2586 s->sbm_speed = sbmac_speed_1000;
2587 s->sbm_duplex = sbmac_duplex_half;
2588 s->sbm_fc = sbmac_fc_disabled;
2589 p += sprintf(p,"1000BaseT HDX");
2591 else if (anlpar & ANLPAR_TXFD) {
2592 s->sbm_speed = sbmac_speed_100;
2593 s->sbm_duplex = sbmac_duplex_full;
2594 s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled;
2595 p += sprintf(p,"100BaseT FDX");
2597 else if (anlpar & ANLPAR_TXHD) {
2598 s->sbm_speed = sbmac_speed_100;
2599 s->sbm_duplex = sbmac_duplex_half;
2600 s->sbm_fc = sbmac_fc_disabled;
2601 p += sprintf(p,"100BaseT HDX");
2603 else if (anlpar & ANLPAR_10FD) {
2604 s->sbm_speed = sbmac_speed_10;
2605 s->sbm_duplex = sbmac_duplex_full;
2606 s->sbm_fc = sbmac_fc_frame;
2607 p += sprintf(p,"10BaseT FDX");
2609 else if (anlpar & ANLPAR_10HD) {
2610 s->sbm_speed = sbmac_speed_10;
2611 s->sbm_duplex = sbmac_duplex_half;
2612 s->sbm_fc = sbmac_fc_collision;
2613 p += sprintf(p,"10BaseT HDX");
2615 else {
2616 p += sprintf(p,"Unknown");
2619 if (noisy) {
2620 printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer);
2623 return 1;
2627 static void sbmac_timer(unsigned long data)
2629 struct net_device *dev = (struct net_device *)data;
2630 struct sbmac_softc *sc = netdev_priv(dev);
2631 int next_tick = HZ;
2632 int mii_status;
2634 spin_lock_irq (&sc->sbm_lock);
2636 /* make IFF_RUNNING follow the MII status bit "Link established" */
2637 mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
2639 if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
2640 sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
2641 if (mii_status & BMSR_LINKSTAT) {
2642 netif_carrier_on(dev);
2644 else {
2645 netif_carrier_off(dev);
2649 /*
2650 * Poll the PHY to see what speed we should be running at
2651 */
2653 if (sbmac_mii_poll(sc,noisy_mii)) {
2654 if (sc->sbm_state != sbmac_state_off) {
2655 /*
2656 * something changed, restart the channel
2657 */
2658 if (debug > 1) {
2659 printk("%s: restarting channel because speed changed\n",
2660 sc->sbm_dev->name);
2662 sbmac_channel_stop(sc);
2663 sbmac_channel_start(sc);
2667 spin_unlock_irq (&sc->sbm_lock);
2669 sc->sbm_timer.expires = jiffies + next_tick;
2670 add_timer(&sc->sbm_timer);
2674 static void sbmac_tx_timeout (struct net_device *dev)
2676 struct sbmac_softc *sc = netdev_priv(dev);
2678 spin_lock_irq (&sc->sbm_lock);
2681 dev->trans_start = jiffies;
2682 sc->sbm_stats.tx_errors++;
2684 spin_unlock_irq (&sc->sbm_lock);
2686 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
2692 static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
2694 struct sbmac_softc *sc = netdev_priv(dev);
2695 unsigned long flags;
2697 spin_lock_irqsave(&sc->sbm_lock, flags);
2699 /* XXX update other stats here */
2701 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2703 return &sc->sbm_stats;
2708 static void sbmac_set_rx_mode(struct net_device *dev)
2710 unsigned long flags;
2711 int msg_flag = 0;
2712 struct sbmac_softc *sc = netdev_priv(dev);
2714 spin_lock_irqsave(&sc->sbm_lock, flags);
2715 if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
2716 /*
2717 * Promiscuous changed.
2718 */
2720 if (dev->flags & IFF_PROMISC) {
2721 /* Unconditionally log net taps. */
2722 msg_flag = 1;
2723 sbmac_promiscuous_mode(sc,1);
2725 else {
2726 msg_flag = 2;
2727 sbmac_promiscuous_mode(sc,0);
2730 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2732 if (msg_flag) {
2733 printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
2734 dev->name,(msg_flag==1)?"en":"dis");
2737 /*
2738 * Program the multicasts. Do this every time.
2739 */
2741 sbmac_setmulti(sc);
2745 static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2747 struct sbmac_softc *sc = netdev_priv(dev);
2748 u16 *data = (u16 *)&rq->ifr_ifru;
2749 unsigned long flags;
2750 int retval;
2752 spin_lock_irqsave(&sc->sbm_lock, flags);
2753 retval = 0;
2755 switch(cmd) {
2756 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2757 data[0] = sc->sbm_phys[0] & 0x1f;
2758 /* Fall Through */
2759 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2760 data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f);
2761 break;
2762 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2763 if (!capable(CAP_NET_ADMIN)) {
2764 retval = -EPERM;
2765 break;
2767 if (debug > 1) {
2768 printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name,
2769 data[0],data[1],data[2]);
2771 sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]);
2772 break;
2773 default:
2774 retval = -EOPNOTSUPP;
2777 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2778 return retval;
2781 static int sbmac_close(struct net_device *dev)
2783 struct sbmac_softc *sc = netdev_priv(dev);
2784 unsigned long flags;
2785 int irq;
2787 sbmac_set_channel_state(sc,sbmac_state_off);
2789 del_timer_sync(&sc->sbm_timer);
2791 spin_lock_irqsave(&sc->sbm_lock, flags);
2793 netif_stop_queue(dev);
2795 if (debug > 1) {
2796 printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name);
2799 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2801 irq = dev->irq;
2802 synchronize_irq(irq);
2803 free_irq(irq, dev);
2805 sbdma_emptyring(&(sc->sbm_txdma));
2806 sbdma_emptyring(&(sc->sbm_rxdma));
2808 return 0;
2813 #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2814 static void
2815 sbmac_setup_hwaddr(int chan,char *addr)
2817 uint8_t eaddr[6];
2818 uint64_t val;
2819 unsigned long port;
2821 port = A_MAC_CHANNEL_BASE(chan);
2822 sbmac_parse_hwaddr(addr,eaddr);
2823 val = sbmac_addr2reg(eaddr);
2824 __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
2825 val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2827 #endif
2829 static struct net_device *dev_sbmac[MAX_UNITS];
2831 static int __init
2832 sbmac_init_module(void)
2834 int idx;
2835 struct net_device *dev;
2836 unsigned long port;
2837 int chip_max_units;
2839 /* Set the number of available units based on the SOC type. */
2840 switch (soc_type) {
2841 case K_SYS_SOC_TYPE_BCM1250:
2842 case K_SYS_SOC_TYPE_BCM1250_ALT:
2843 chip_max_units = 3;
2844 break;
2845 case K_SYS_SOC_TYPE_BCM1120:
2846 case K_SYS_SOC_TYPE_BCM1125:
2847 case K_SYS_SOC_TYPE_BCM1125H:
2848 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2849 chip_max_units = 2;
2850 break;
2851 case K_SYS_SOC_TYPE_BCM1x55:
2852 case K_SYS_SOC_TYPE_BCM1x80:
2853 chip_max_units = 4;
2854 break;
2855 default:
2856 chip_max_units = 0;
2857 break;
2859 if (chip_max_units > MAX_UNITS)
2860 chip_max_units = MAX_UNITS;
2862 /*
2863 * For bringup when not using the firmware, we can pre-fill
2864 * the MAC addresses using the environment variables
2865 * specified in this file (or maybe from the config file?)
2866 */
2867 #ifdef SBMAC_ETH0_HWADDR
2868 if (chip_max_units > 0)
2869 sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
2870 #endif
2871 #ifdef SBMAC_ETH1_HWADDR
2872 if (chip_max_units > 1)
2873 sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
2874 #endif
2875 #ifdef SBMAC_ETH2_HWADDR
2876 if (chip_max_units > 2)
2877 sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
2878 #endif
2879 #ifdef SBMAC_ETH3_HWADDR
2880 if (chip_max_units > 3)
2881 sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR);
2882 #endif
2884 /*
2885 * Walk through the Ethernet controllers and find
2886 * those who have their MAC addresses set.
2887 */
2888 for (idx = 0; idx < chip_max_units; idx++) {
2890 /*
2891 * This is the base address of the MAC.
2892 */
2894 port = A_MAC_CHANNEL_BASE(idx);
2896 /*
2897 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2898 * value for us by the firmware if we're going to use this MAC.
2899 * If we find a zero, skip this MAC.
2900 */
2902 sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2903 if (sbmac_orig_hwaddr[idx] == 0) {
2904 printk(KERN_DEBUG "sbmac: not configuring MAC at "
2905 "%lx\n", port);
2906 continue;
2909 /*
2910 * Okay, cool. Initialize this MAC.
2911 */
2913 dev = alloc_etherdev(sizeof(struct sbmac_softc));
2914 if (!dev)
2915 return -ENOMEM; /* return ENOMEM */
2917 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
2919 dev->irq = UNIT_INT(idx);
2920 dev->base_addr = port;
2921 dev->mem_end = 0;
2922 if (sbmac_init(dev, idx)) {
2923 port = A_MAC_CHANNEL_BASE(idx);
2924 __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
2925 free_netdev(dev);
2926 continue;
2928 dev_sbmac[idx] = dev;
2930 return 0;
2934 static void __exit
2935 sbmac_cleanup_module(void)
2937 struct net_device *dev;
2938 int idx;
2940 for (idx = 0; idx < MAX_UNITS; idx++) {
2941 struct sbmac_softc *sc;
2942 dev = dev_sbmac[idx];
2943 if (!dev)
2944 continue;
2946 sc = netdev_priv(dev);
2947 unregister_netdev(dev);
2948 sbmac_uninitctx(sc);
2949 free_netdev(dev);
2953 module_init(sbmac_init_module);
2954 module_exit(sbmac_cleanup_module);