ia64/linux-2.6.18-xen.hg

view drivers/net/skge.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
5 *
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
9 *
10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
27 #include <linux/in.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/pci.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/delay.h>
38 #include <linux/crc32.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/mii.h>
41 #include <asm/irq.h>
43 #include "skge.h"
45 #define DRV_NAME "skge"
46 #define DRV_VERSION "1.6"
47 #define PFX DRV_NAME " "
49 #define DEFAULT_TX_RING_SIZE 128
50 #define DEFAULT_RX_RING_SIZE 512
51 #define MAX_TX_RING_SIZE 1024
52 #define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
53 #define MAX_RX_RING_SIZE 4096
54 #define RX_COPY_THRESHOLD 128
55 #define RX_BUF_SIZE 1536
56 #define PHY_RETRIES 1000
57 #define ETH_JUMBO_MTU 9000
58 #define TX_WATCHDOG (5 * HZ)
59 #define NAPI_WEIGHT 64
60 #define BLINK_MS 250
62 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
63 MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
64 MODULE_LICENSE("GPL");
65 MODULE_VERSION(DRV_VERSION);
67 static const u32 default_msg
68 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
69 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
71 static int debug = -1; /* defaults above */
72 module_param(debug, int, 0);
73 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
75 static const struct pci_device_id skge_id_table[] = {
76 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
77 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
85 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
86 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
87 { 0 }
88 };
89 MODULE_DEVICE_TABLE(pci, skge_id_table);
91 static int skge_up(struct net_device *dev);
92 static int skge_down(struct net_device *dev);
93 static void skge_phy_reset(struct skge_port *skge);
94 static void skge_tx_clean(struct skge_port *skge);
95 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
96 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
97 static void genesis_get_stats(struct skge_port *skge, u64 *data);
98 static void yukon_get_stats(struct skge_port *skge, u64 *data);
99 static void yukon_init(struct skge_hw *hw, int port);
100 static void genesis_mac_init(struct skge_hw *hw, int port);
101 static void genesis_link_up(struct skge_port *skge);
103 /* Avoid conditionals by using array */
104 static const int txqaddr[] = { Q_XA1, Q_XA2 };
105 static const int rxqaddr[] = { Q_R1, Q_R2 };
106 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
107 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
109 static int skge_get_regs_len(struct net_device *dev)
110 {
111 return 0x4000;
112 }
114 /*
115 * Returns copy of whole control register region
116 * Note: skip RAM address register because accessing it will
117 * cause bus hangs!
118 */
119 static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
120 void *p)
121 {
122 const struct skge_port *skge = netdev_priv(dev);
123 const void __iomem *io = skge->hw->regs;
125 regs->version = 1;
126 memset(p, 0, regs->len);
127 memcpy_fromio(p, io, B3_RAM_ADDR);
129 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
130 regs->len - B3_RI_WTO_R1);
131 }
133 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
134 static int wol_supported(const struct skge_hw *hw)
135 {
136 return !((hw->chip_id == CHIP_ID_GENESIS ||
137 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
138 }
140 static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
141 {
142 struct skge_port *skge = netdev_priv(dev);
144 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0;
145 wol->wolopts = skge->wol ? WAKE_MAGIC : 0;
146 }
148 static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
149 {
150 struct skge_port *skge = netdev_priv(dev);
151 struct skge_hw *hw = skge->hw;
153 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
154 return -EOPNOTSUPP;
156 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
157 return -EOPNOTSUPP;
159 skge->wol = wol->wolopts == WAKE_MAGIC;
161 if (skge->wol) {
162 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
164 skge_write16(hw, WOL_CTRL_STAT,
165 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
166 WOL_CTL_ENA_MAGIC_PKT_UNIT);
167 } else
168 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
170 return 0;
171 }
173 /* Determine supported/advertised modes based on hardware.
174 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
175 */
176 static u32 skge_supported_modes(const struct skge_hw *hw)
177 {
178 u32 supported;
180 if (hw->copper) {
181 supported = SUPPORTED_10baseT_Half
182 | SUPPORTED_10baseT_Full
183 | SUPPORTED_100baseT_Half
184 | SUPPORTED_100baseT_Full
185 | SUPPORTED_1000baseT_Half
186 | SUPPORTED_1000baseT_Full
187 | SUPPORTED_Autoneg| SUPPORTED_TP;
189 if (hw->chip_id == CHIP_ID_GENESIS)
190 supported &= ~(SUPPORTED_10baseT_Half
191 | SUPPORTED_10baseT_Full
192 | SUPPORTED_100baseT_Half
193 | SUPPORTED_100baseT_Full);
195 else if (hw->chip_id == CHIP_ID_YUKON)
196 supported &= ~SUPPORTED_1000baseT_Half;
197 } else
198 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
199 | SUPPORTED_Autoneg;
201 return supported;
202 }
204 static int skge_get_settings(struct net_device *dev,
205 struct ethtool_cmd *ecmd)
206 {
207 struct skge_port *skge = netdev_priv(dev);
208 struct skge_hw *hw = skge->hw;
210 ecmd->transceiver = XCVR_INTERNAL;
211 ecmd->supported = skge_supported_modes(hw);
213 if (hw->copper) {
214 ecmd->port = PORT_TP;
215 ecmd->phy_address = hw->phy_addr;
216 } else
217 ecmd->port = PORT_FIBRE;
219 ecmd->advertising = skge->advertising;
220 ecmd->autoneg = skge->autoneg;
221 ecmd->speed = skge->speed;
222 ecmd->duplex = skge->duplex;
223 return 0;
224 }
226 static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
227 {
228 struct skge_port *skge = netdev_priv(dev);
229 const struct skge_hw *hw = skge->hw;
230 u32 supported = skge_supported_modes(hw);
232 if (ecmd->autoneg == AUTONEG_ENABLE) {
233 ecmd->advertising = supported;
234 skge->duplex = -1;
235 skge->speed = -1;
236 } else {
237 u32 setting;
239 switch (ecmd->speed) {
240 case SPEED_1000:
241 if (ecmd->duplex == DUPLEX_FULL)
242 setting = SUPPORTED_1000baseT_Full;
243 else if (ecmd->duplex == DUPLEX_HALF)
244 setting = SUPPORTED_1000baseT_Half;
245 else
246 return -EINVAL;
247 break;
248 case SPEED_100:
249 if (ecmd->duplex == DUPLEX_FULL)
250 setting = SUPPORTED_100baseT_Full;
251 else if (ecmd->duplex == DUPLEX_HALF)
252 setting = SUPPORTED_100baseT_Half;
253 else
254 return -EINVAL;
255 break;
257 case SPEED_10:
258 if (ecmd->duplex == DUPLEX_FULL)
259 setting = SUPPORTED_10baseT_Full;
260 else if (ecmd->duplex == DUPLEX_HALF)
261 setting = SUPPORTED_10baseT_Half;
262 else
263 return -EINVAL;
264 break;
265 default:
266 return -EINVAL;
267 }
269 if ((setting & supported) == 0)
270 return -EINVAL;
272 skge->speed = ecmd->speed;
273 skge->duplex = ecmd->duplex;
274 }
276 skge->autoneg = ecmd->autoneg;
277 skge->advertising = ecmd->advertising;
279 if (netif_running(dev))
280 skge_phy_reset(skge);
282 return (0);
283 }
285 static void skge_get_drvinfo(struct net_device *dev,
286 struct ethtool_drvinfo *info)
287 {
288 struct skge_port *skge = netdev_priv(dev);
290 strcpy(info->driver, DRV_NAME);
291 strcpy(info->version, DRV_VERSION);
292 strcpy(info->fw_version, "N/A");
293 strcpy(info->bus_info, pci_name(skge->hw->pdev));
294 }
296 static const struct skge_stat {
297 char name[ETH_GSTRING_LEN];
298 u16 xmac_offset;
299 u16 gma_offset;
300 } skge_stats[] = {
301 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
302 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
304 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
305 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
306 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
307 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
308 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
309 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
310 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
311 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
313 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
314 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
315 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
316 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
317 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
318 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
320 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
321 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
322 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
323 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
324 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
325 };
327 static int skge_get_stats_count(struct net_device *dev)
328 {
329 return ARRAY_SIZE(skge_stats);
330 }
332 static void skge_get_ethtool_stats(struct net_device *dev,
333 struct ethtool_stats *stats, u64 *data)
334 {
335 struct skge_port *skge = netdev_priv(dev);
337 if (skge->hw->chip_id == CHIP_ID_GENESIS)
338 genesis_get_stats(skge, data);
339 else
340 yukon_get_stats(skge, data);
341 }
343 /* Use hardware MIB variables for critical path statistics and
344 * transmit feedback not reported at interrupt.
345 * Other errors are accounted for in interrupt handler.
346 */
347 static struct net_device_stats *skge_get_stats(struct net_device *dev)
348 {
349 struct skge_port *skge = netdev_priv(dev);
350 u64 data[ARRAY_SIZE(skge_stats)];
352 if (skge->hw->chip_id == CHIP_ID_GENESIS)
353 genesis_get_stats(skge, data);
354 else
355 yukon_get_stats(skge, data);
357 skge->net_stats.tx_bytes = data[0];
358 skge->net_stats.rx_bytes = data[1];
359 skge->net_stats.tx_packets = data[2] + data[4] + data[6];
360 skge->net_stats.rx_packets = data[3] + data[5] + data[7];
361 skge->net_stats.multicast = data[3] + data[5];
362 skge->net_stats.collisions = data[10];
363 skge->net_stats.tx_aborted_errors = data[12];
365 return &skge->net_stats;
366 }
368 static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
369 {
370 int i;
372 switch (stringset) {
373 case ETH_SS_STATS:
374 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
375 memcpy(data + i * ETH_GSTRING_LEN,
376 skge_stats[i].name, ETH_GSTRING_LEN);
377 break;
378 }
379 }
381 static void skge_get_ring_param(struct net_device *dev,
382 struct ethtool_ringparam *p)
383 {
384 struct skge_port *skge = netdev_priv(dev);
386 p->rx_max_pending = MAX_RX_RING_SIZE;
387 p->tx_max_pending = MAX_TX_RING_SIZE;
388 p->rx_mini_max_pending = 0;
389 p->rx_jumbo_max_pending = 0;
391 p->rx_pending = skge->rx_ring.count;
392 p->tx_pending = skge->tx_ring.count;
393 p->rx_mini_pending = 0;
394 p->rx_jumbo_pending = 0;
395 }
397 static int skge_set_ring_param(struct net_device *dev,
398 struct ethtool_ringparam *p)
399 {
400 struct skge_port *skge = netdev_priv(dev);
401 int err;
403 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
404 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
405 return -EINVAL;
407 skge->rx_ring.count = p->rx_pending;
408 skge->tx_ring.count = p->tx_pending;
410 if (netif_running(dev)) {
411 skge_down(dev);
412 err = skge_up(dev);
413 if (err)
414 dev_close(dev);
415 }
417 return 0;
418 }
420 static u32 skge_get_msglevel(struct net_device *netdev)
421 {
422 struct skge_port *skge = netdev_priv(netdev);
423 return skge->msg_enable;
424 }
426 static void skge_set_msglevel(struct net_device *netdev, u32 value)
427 {
428 struct skge_port *skge = netdev_priv(netdev);
429 skge->msg_enable = value;
430 }
432 static int skge_nway_reset(struct net_device *dev)
433 {
434 struct skge_port *skge = netdev_priv(dev);
436 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
437 return -EINVAL;
439 skge_phy_reset(skge);
440 return 0;
441 }
443 static int skge_set_sg(struct net_device *dev, u32 data)
444 {
445 struct skge_port *skge = netdev_priv(dev);
446 struct skge_hw *hw = skge->hw;
448 if (hw->chip_id == CHIP_ID_GENESIS && data)
449 return -EOPNOTSUPP;
450 return ethtool_op_set_sg(dev, data);
451 }
453 static int skge_set_tx_csum(struct net_device *dev, u32 data)
454 {
455 struct skge_port *skge = netdev_priv(dev);
456 struct skge_hw *hw = skge->hw;
458 if (hw->chip_id == CHIP_ID_GENESIS && data)
459 return -EOPNOTSUPP;
461 return ethtool_op_set_tx_csum(dev, data);
462 }
464 static u32 skge_get_rx_csum(struct net_device *dev)
465 {
466 struct skge_port *skge = netdev_priv(dev);
468 return skge->rx_csum;
469 }
471 /* Only Yukon supports checksum offload. */
472 static int skge_set_rx_csum(struct net_device *dev, u32 data)
473 {
474 struct skge_port *skge = netdev_priv(dev);
476 if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
477 return -EOPNOTSUPP;
479 skge->rx_csum = data;
480 return 0;
481 }
483 static void skge_get_pauseparam(struct net_device *dev,
484 struct ethtool_pauseparam *ecmd)
485 {
486 struct skge_port *skge = netdev_priv(dev);
488 ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
489 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
490 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
491 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
493 ecmd->autoneg = skge->autoneg;
494 }
496 static int skge_set_pauseparam(struct net_device *dev,
497 struct ethtool_pauseparam *ecmd)
498 {
499 struct skge_port *skge = netdev_priv(dev);
501 skge->autoneg = ecmd->autoneg;
502 if (ecmd->rx_pause && ecmd->tx_pause)
503 skge->flow_control = FLOW_MODE_SYMMETRIC;
504 else if (ecmd->rx_pause && !ecmd->tx_pause)
505 skge->flow_control = FLOW_MODE_REM_SEND;
506 else if (!ecmd->rx_pause && ecmd->tx_pause)
507 skge->flow_control = FLOW_MODE_LOC_SEND;
508 else
509 skge->flow_control = FLOW_MODE_NONE;
511 if (netif_running(dev))
512 skge_phy_reset(skge);
513 return 0;
514 }
516 /* Chip internal frequency for clock calculations */
517 static inline u32 hwkhz(const struct skge_hw *hw)
518 {
519 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 }
522 /* Chip HZ to microseconds */
523 static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
524 {
525 return (ticks * 1000) / hwkhz(hw);
526 }
528 /* Microseconds to chip HZ */
529 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
530 {
531 return hwkhz(hw) * usec / 1000;
532 }
534 static int skge_get_coalesce(struct net_device *dev,
535 struct ethtool_coalesce *ecmd)
536 {
537 struct skge_port *skge = netdev_priv(dev);
538 struct skge_hw *hw = skge->hw;
539 int port = skge->port;
541 ecmd->rx_coalesce_usecs = 0;
542 ecmd->tx_coalesce_usecs = 0;
544 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
545 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
546 u32 msk = skge_read32(hw, B2_IRQM_MSK);
548 if (msk & rxirqmask[port])
549 ecmd->rx_coalesce_usecs = delay;
550 if (msk & txirqmask[port])
551 ecmd->tx_coalesce_usecs = delay;
552 }
554 return 0;
555 }
557 /* Note: interrupt timer is per board, but can turn on/off per port */
558 static int skge_set_coalesce(struct net_device *dev,
559 struct ethtool_coalesce *ecmd)
560 {
561 struct skge_port *skge = netdev_priv(dev);
562 struct skge_hw *hw = skge->hw;
563 int port = skge->port;
564 u32 msk = skge_read32(hw, B2_IRQM_MSK);
565 u32 delay = 25;
567 if (ecmd->rx_coalesce_usecs == 0)
568 msk &= ~rxirqmask[port];
569 else if (ecmd->rx_coalesce_usecs < 25 ||
570 ecmd->rx_coalesce_usecs > 33333)
571 return -EINVAL;
572 else {
573 msk |= rxirqmask[port];
574 delay = ecmd->rx_coalesce_usecs;
575 }
577 if (ecmd->tx_coalesce_usecs == 0)
578 msk &= ~txirqmask[port];
579 else if (ecmd->tx_coalesce_usecs < 25 ||
580 ecmd->tx_coalesce_usecs > 33333)
581 return -EINVAL;
582 else {
583 msk |= txirqmask[port];
584 delay = min(delay, ecmd->rx_coalesce_usecs);
585 }
587 skge_write32(hw, B2_IRQM_MSK, msk);
588 if (msk == 0)
589 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
590 else {
591 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
592 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
593 }
594 return 0;
595 }
597 enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
598 static void skge_led(struct skge_port *skge, enum led_mode mode)
599 {
600 struct skge_hw *hw = skge->hw;
601 int port = skge->port;
603 mutex_lock(&hw->phy_mutex);
604 if (hw->chip_id == CHIP_ID_GENESIS) {
605 switch (mode) {
606 case LED_MODE_OFF:
607 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
608 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
609 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
610 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
611 break;
613 case LED_MODE_ON:
614 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
615 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
617 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
618 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
620 break;
622 case LED_MODE_TST:
623 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
624 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
625 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
627 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
628 break;
629 }
630 } else {
631 switch (mode) {
632 case LED_MODE_OFF:
633 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
634 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
635 PHY_M_LED_MO_DUP(MO_LED_OFF) |
636 PHY_M_LED_MO_10(MO_LED_OFF) |
637 PHY_M_LED_MO_100(MO_LED_OFF) |
638 PHY_M_LED_MO_1000(MO_LED_OFF) |
639 PHY_M_LED_MO_RX(MO_LED_OFF));
640 break;
641 case LED_MODE_ON:
642 gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
643 PHY_M_LED_PULS_DUR(PULS_170MS) |
644 PHY_M_LED_BLINK_RT(BLINK_84MS) |
645 PHY_M_LEDC_TX_CTRL |
646 PHY_M_LEDC_DP_CTRL);
648 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
649 PHY_M_LED_MO_RX(MO_LED_OFF) |
650 (skge->speed == SPEED_100 ?
651 PHY_M_LED_MO_100(MO_LED_ON) : 0));
652 break;
653 case LED_MODE_TST:
654 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
655 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
656 PHY_M_LED_MO_DUP(MO_LED_ON) |
657 PHY_M_LED_MO_10(MO_LED_ON) |
658 PHY_M_LED_MO_100(MO_LED_ON) |
659 PHY_M_LED_MO_1000(MO_LED_ON) |
660 PHY_M_LED_MO_RX(MO_LED_ON));
661 }
662 }
663 mutex_unlock(&hw->phy_mutex);
664 }
666 /* blink LED's for finding board */
667 static int skge_phys_id(struct net_device *dev, u32 data)
668 {
669 struct skge_port *skge = netdev_priv(dev);
670 unsigned long ms;
671 enum led_mode mode = LED_MODE_TST;
673 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
674 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000;
675 else
676 ms = data * 1000;
678 while (ms > 0) {
679 skge_led(skge, mode);
680 mode ^= LED_MODE_TST;
682 if (msleep_interruptible(BLINK_MS))
683 break;
684 ms -= BLINK_MS;
685 }
687 /* back to regular LED state */
688 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
690 return 0;
691 }
693 static struct ethtool_ops skge_ethtool_ops = {
694 .get_settings = skge_get_settings,
695 .set_settings = skge_set_settings,
696 .get_drvinfo = skge_get_drvinfo,
697 .get_regs_len = skge_get_regs_len,
698 .get_regs = skge_get_regs,
699 .get_wol = skge_get_wol,
700 .set_wol = skge_set_wol,
701 .get_msglevel = skge_get_msglevel,
702 .set_msglevel = skge_set_msglevel,
703 .nway_reset = skge_nway_reset,
704 .get_link = ethtool_op_get_link,
705 .get_ringparam = skge_get_ring_param,
706 .set_ringparam = skge_set_ring_param,
707 .get_pauseparam = skge_get_pauseparam,
708 .set_pauseparam = skge_set_pauseparam,
709 .get_coalesce = skge_get_coalesce,
710 .set_coalesce = skge_set_coalesce,
711 .get_sg = ethtool_op_get_sg,
712 .set_sg = skge_set_sg,
713 .get_tx_csum = ethtool_op_get_tx_csum,
714 .set_tx_csum = skge_set_tx_csum,
715 .get_rx_csum = skge_get_rx_csum,
716 .set_rx_csum = skge_set_rx_csum,
717 .get_strings = skge_get_strings,
718 .phys_id = skge_phys_id,
719 .get_stats_count = skge_get_stats_count,
720 .get_ethtool_stats = skge_get_ethtool_stats,
721 .get_perm_addr = ethtool_op_get_perm_addr,
722 };
724 /*
725 * Allocate ring elements and chain them together
726 * One-to-one association of board descriptors with ring elements
727 */
728 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
729 {
730 struct skge_tx_desc *d;
731 struct skge_element *e;
732 int i;
734 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
735 if (!ring->start)
736 return -ENOMEM;
738 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
739 e->desc = d;
740 if (i == ring->count - 1) {
741 e->next = ring->start;
742 d->next_offset = base;
743 } else {
744 e->next = e + 1;
745 d->next_offset = base + (i+1) * sizeof(*d);
746 }
747 }
748 ring->to_use = ring->to_clean = ring->start;
750 return 0;
751 }
753 /* Allocate and setup a new buffer for receiving */
754 static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
755 struct sk_buff *skb, unsigned int bufsize)
756 {
757 struct skge_rx_desc *rd = e->desc;
758 u64 map;
760 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
761 PCI_DMA_FROMDEVICE);
763 rd->dma_lo = map;
764 rd->dma_hi = map >> 32;
765 e->skb = skb;
766 rd->csum1_start = ETH_HLEN;
767 rd->csum2_start = ETH_HLEN;
768 rd->csum1 = 0;
769 rd->csum2 = 0;
771 wmb();
773 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
774 pci_unmap_addr_set(e, mapaddr, map);
775 pci_unmap_len_set(e, maplen, bufsize);
776 }
778 /* Resume receiving using existing skb,
779 * Note: DMA address is not changed by chip.
780 * MTU not changed while receiver active.
781 */
782 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
783 {
784 struct skge_rx_desc *rd = e->desc;
786 rd->csum2 = 0;
787 rd->csum2_start = ETH_HLEN;
789 wmb();
791 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
792 }
795 /* Free all buffers in receive ring, assumes receiver stopped */
796 static void skge_rx_clean(struct skge_port *skge)
797 {
798 struct skge_hw *hw = skge->hw;
799 struct skge_ring *ring = &skge->rx_ring;
800 struct skge_element *e;
802 e = ring->start;
803 do {
804 struct skge_rx_desc *rd = e->desc;
805 rd->control = 0;
806 if (e->skb) {
807 pci_unmap_single(hw->pdev,
808 pci_unmap_addr(e, mapaddr),
809 pci_unmap_len(e, maplen),
810 PCI_DMA_FROMDEVICE);
811 dev_kfree_skb(e->skb);
812 e->skb = NULL;
813 }
814 } while ((e = e->next) != ring->start);
815 }
818 /* Allocate buffers for receive ring
819 * For receive: to_clean is next received frame.
820 */
821 static int skge_rx_fill(struct skge_port *skge)
822 {
823 struct skge_ring *ring = &skge->rx_ring;
824 struct skge_element *e;
826 e = ring->start;
827 do {
828 struct sk_buff *skb;
830 skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
831 if (!skb)
832 return -ENOMEM;
834 skb_reserve(skb, NET_IP_ALIGN);
835 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
836 } while ( (e = e->next) != ring->start);
838 ring->to_clean = ring->start;
839 return 0;
840 }
842 static void skge_link_up(struct skge_port *skge)
843 {
844 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
845 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
847 netif_carrier_on(skge->netdev);
848 netif_wake_queue(skge->netdev);
850 if (netif_msg_link(skge))
851 printk(KERN_INFO PFX
852 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
853 skge->netdev->name, skge->speed,
854 skge->duplex == DUPLEX_FULL ? "full" : "half",
855 (skge->flow_control == FLOW_MODE_NONE) ? "none" :
856 (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
857 (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
858 (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
859 "unknown");
860 }
862 static void skge_link_down(struct skge_port *skge)
863 {
864 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
865 netif_carrier_off(skge->netdev);
866 netif_stop_queue(skge->netdev);
868 if (netif_msg_link(skge))
869 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
870 }
872 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
873 {
874 int i;
876 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
877 *val = xm_read16(hw, port, XM_PHY_DATA);
879 for (i = 0; i < PHY_RETRIES; i++) {
880 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
881 goto ready;
882 udelay(1);
883 }
885 return -ETIMEDOUT;
886 ready:
887 *val = xm_read16(hw, port, XM_PHY_DATA);
889 return 0;
890 }
892 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
893 {
894 u16 v = 0;
895 if (__xm_phy_read(hw, port, reg, &v))
896 printk(KERN_WARNING PFX "%s: phy read timed out\n",
897 hw->dev[port]->name);
898 return v;
899 }
901 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
902 {
903 int i;
905 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
906 for (i = 0; i < PHY_RETRIES; i++) {
907 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
908 goto ready;
909 udelay(1);
910 }
911 return -EIO;
913 ready:
914 xm_write16(hw, port, XM_PHY_DATA, val);
915 for (i = 0; i < PHY_RETRIES; i++) {
916 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
917 return 0;
918 udelay(1);
919 }
920 return -ETIMEDOUT;
921 }
923 static void genesis_init(struct skge_hw *hw)
924 {
925 /* set blink source counter */
926 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
927 skge_write8(hw, B2_BSC_CTRL, BSC_START);
929 /* configure mac arbiter */
930 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
932 /* configure mac arbiter timeout values */
933 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
934 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
935 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
936 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
938 skge_write8(hw, B3_MA_RCINI_RX1, 0);
939 skge_write8(hw, B3_MA_RCINI_RX2, 0);
940 skge_write8(hw, B3_MA_RCINI_TX1, 0);
941 skge_write8(hw, B3_MA_RCINI_TX2, 0);
943 /* configure packet arbiter timeout */
944 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
945 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
946 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
947 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
948 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
949 }
951 static void genesis_reset(struct skge_hw *hw, int port)
952 {
953 const u8 zero[8] = { 0 };
955 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
957 /* reset the statistics module */
958 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
959 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
960 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
961 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
962 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
964 /* disable Broadcom PHY IRQ */
965 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
967 xm_outhash(hw, port, XM_HSM, zero);
968 }
971 /* Convert mode to MII values */
972 static const u16 phy_pause_map[] = {
973 [FLOW_MODE_NONE] = 0,
974 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
975 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
976 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
977 };
980 /* Check status of Broadcom phy link */
981 static void bcom_check_link(struct skge_hw *hw, int port)
982 {
983 struct net_device *dev = hw->dev[port];
984 struct skge_port *skge = netdev_priv(dev);
985 u16 status;
987 /* read twice because of latch */
988 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
989 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
991 if ((status & PHY_ST_LSYNC) == 0) {
992 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
993 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
994 xm_write16(hw, port, XM_MMU_CMD, cmd);
995 /* dummy read to ensure writing */
996 (void) xm_read16(hw, port, XM_MMU_CMD);
998 if (netif_carrier_ok(dev))
999 skge_link_down(skge);
1000 } else {
1001 if (skge->autoneg == AUTONEG_ENABLE &&
1002 (status & PHY_ST_AN_OVER)) {
1003 u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
1004 u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1006 if (lpa & PHY_B_AN_RF) {
1007 printk(KERN_NOTICE PFX "%s: remote fault\n",
1008 dev->name);
1009 return;
1012 /* Check Duplex mismatch */
1013 switch (aux & PHY_B_AS_AN_RES_MSK) {
1014 case PHY_B_RES_1000FD:
1015 skge->duplex = DUPLEX_FULL;
1016 break;
1017 case PHY_B_RES_1000HD:
1018 skge->duplex = DUPLEX_HALF;
1019 break;
1020 default:
1021 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1022 dev->name);
1023 return;
1027 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1028 switch (aux & PHY_B_AS_PAUSE_MSK) {
1029 case PHY_B_AS_PAUSE_MSK:
1030 skge->flow_control = FLOW_MODE_SYMMETRIC;
1031 break;
1032 case PHY_B_AS_PRR:
1033 skge->flow_control = FLOW_MODE_REM_SEND;
1034 break;
1035 case PHY_B_AS_PRT:
1036 skge->flow_control = FLOW_MODE_LOC_SEND;
1037 break;
1038 default:
1039 skge->flow_control = FLOW_MODE_NONE;
1042 skge->speed = SPEED_1000;
1045 if (!netif_carrier_ok(dev))
1046 genesis_link_up(skge);
1050 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1051 * Phy on for 100 or 10Mbit operation
1052 */
1053 static void bcom_phy_init(struct skge_port *skge, int jumbo)
1055 struct skge_hw *hw = skge->hw;
1056 int port = skge->port;
1057 int i;
1058 u16 id1, r, ext, ctl;
1060 /* magic workaround patterns for Broadcom */
1061 static const struct {
1062 u16 reg;
1063 u16 val;
1064 } A1hack[] = {
1065 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1066 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1067 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1068 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1069 }, C0hack[] = {
1070 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1071 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1072 };
1074 /* read Id from external PHY (all have the same address) */
1075 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1077 /* Optimize MDIO transfer by suppressing preamble. */
1078 r = xm_read16(hw, port, XM_MMU_CMD);
1079 r |= XM_MMU_NO_PRE;
1080 xm_write16(hw, port, XM_MMU_CMD,r);
1082 switch (id1) {
1083 case PHY_BCOM_ID1_C0:
1084 /*
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1087 */
1088 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1089 xm_phy_write(hw, port,
1090 C0hack[i].reg, C0hack[i].val);
1092 break;
1093 case PHY_BCOM_ID1_A1:
1094 /*
1095 * Workaround BCOM Errata for the A1 type.
1096 * Write magic patterns to reserved registers.
1097 */
1098 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1099 xm_phy_write(hw, port,
1100 A1hack[i].reg, A1hack[i].val);
1101 break;
1104 /*
1105 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1106 * Disable Power Management after reset.
1107 */
1108 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1109 r |= PHY_B_AC_DIS_PM;
1110 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1112 /* Dummy read */
1113 xm_read16(hw, port, XM_ISRC);
1115 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1116 ctl = PHY_CT_SP1000; /* always 1000mbit */
1118 if (skge->autoneg == AUTONEG_ENABLE) {
1119 /*
1120 * Workaround BCOM Errata #1 for the C5 type.
1121 * 1000Base-T Link Acquisition Failure in Slave Mode
1122 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1123 */
1124 u16 adv = PHY_B_1000C_RD;
1125 if (skge->advertising & ADVERTISED_1000baseT_Half)
1126 adv |= PHY_B_1000C_AHD;
1127 if (skge->advertising & ADVERTISED_1000baseT_Full)
1128 adv |= PHY_B_1000C_AFD;
1129 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1131 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1132 } else {
1133 if (skge->duplex == DUPLEX_FULL)
1134 ctl |= PHY_CT_DUP_MD;
1135 /* Force to slave */
1136 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1139 /* Set autonegotiation pause parameters */
1140 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1141 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1143 /* Handle Jumbo frames */
1144 if (jumbo) {
1145 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1146 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1148 ext |= PHY_B_PEC_HIGH_LA;
1152 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1153 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1155 /* Use link status change interrupt */
1156 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1158 bcom_check_link(hw, port);
1161 static void genesis_mac_init(struct skge_hw *hw, int port)
1163 struct net_device *dev = hw->dev[port];
1164 struct skge_port *skge = netdev_priv(dev);
1165 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1166 int i;
1167 u32 r;
1168 const u8 zero[6] = { 0 };
1170 for (i = 0; i < 10; i++) {
1171 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1172 MFF_SET_MAC_RST);
1173 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1174 goto reset_ok;
1175 udelay(1);
1178 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
1180 reset_ok:
1181 /* Unreset the XMAC. */
1182 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1184 /*
1185 * Perform additional initialization for external PHYs,
1186 * namely for the 1000baseTX cards that use the XMAC's
1187 * GMII mode.
1188 */
1189 /* Take external Phy out of reset */
1190 r = skge_read32(hw, B2_GP_IO);
1191 if (port == 0)
1192 r |= GP_DIR_0|GP_IO_0;
1193 else
1194 r |= GP_DIR_2|GP_IO_2;
1196 skge_write32(hw, B2_GP_IO, r);
1199 /* Enable GMII interface */
1200 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1202 bcom_phy_init(skge, jumbo);
1204 /* Set Station Address */
1205 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1207 /* We don't use match addresses so clear */
1208 for (i = 1; i < 16; i++)
1209 xm_outaddr(hw, port, XM_EXM(i), zero);
1211 /* Clear MIB counters */
1212 xm_write16(hw, port, XM_STAT_CMD,
1213 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1214 /* Clear two times according to Errata #3 */
1215 xm_write16(hw, port, XM_STAT_CMD,
1216 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1218 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1219 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1221 /* We don't need the FCS appended to the packet. */
1222 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1223 if (jumbo)
1224 r |= XM_RX_BIG_PK_OK;
1226 if (skge->duplex == DUPLEX_HALF) {
1227 /*
1228 * If in manual half duplex mode the other side might be in
1229 * full duplex mode, so ignore if a carrier extension is not seen
1230 * on frames received
1231 */
1232 r |= XM_RX_DIS_CEXT;
1234 xm_write16(hw, port, XM_RX_CMD, r);
1237 /* We want short frames padded to 60 bytes. */
1238 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1240 /*
1241 * Bump up the transmit threshold. This helps hold off transmit
1242 * underruns when we're blasting traffic from both ports at once.
1243 */
1244 xm_write16(hw, port, XM_TX_THR, 512);
1246 /*
1247 * Enable the reception of all error frames. This is is
1248 * a necessary evil due to the design of the XMAC. The
1249 * XMAC's receive FIFO is only 8K in size, however jumbo
1250 * frames can be up to 9000 bytes in length. When bad
1251 * frame filtering is enabled, the XMAC's RX FIFO operates
1252 * in 'store and forward' mode. For this to work, the
1253 * entire frame has to fit into the FIFO, but that means
1254 * that jumbo frames larger than 8192 bytes will be
1255 * truncated. Disabling all bad frame filtering causes
1256 * the RX FIFO to operate in streaming mode, in which
1257 * case the XMAC will start transferring frames out of the
1258 * RX FIFO as soon as the FIFO threshold is reached.
1259 */
1260 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1263 /*
1264 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1265 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1266 * and 'Octets Rx OK Hi Cnt Ov'.
1267 */
1268 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1270 /*
1271 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1272 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1273 * and 'Octets Tx OK Hi Cnt Ov'.
1274 */
1275 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1277 /* Configure MAC arbiter */
1278 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1280 /* configure timeout values */
1281 skge_write8(hw, B3_MA_TOINI_RX1, 72);
1282 skge_write8(hw, B3_MA_TOINI_RX2, 72);
1283 skge_write8(hw, B3_MA_TOINI_TX1, 72);
1284 skge_write8(hw, B3_MA_TOINI_TX2, 72);
1286 skge_write8(hw, B3_MA_RCINI_RX1, 0);
1287 skge_write8(hw, B3_MA_RCINI_RX2, 0);
1288 skge_write8(hw, B3_MA_RCINI_TX1, 0);
1289 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1291 /* Configure Rx MAC FIFO */
1292 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1293 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1294 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1296 /* Configure Tx MAC FIFO */
1297 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1298 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1299 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1301 if (jumbo) {
1302 /* Enable frame flushing if jumbo frames used */
1303 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1304 } else {
1305 /* enable timeout timers if normal frames */
1306 skge_write16(hw, B3_PA_CTRL,
1307 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1311 static void genesis_stop(struct skge_port *skge)
1313 struct skge_hw *hw = skge->hw;
1314 int port = skge->port;
1315 u32 reg;
1317 genesis_reset(hw, port);
1319 /* Clear Tx packet arbiter timeout IRQ */
1320 skge_write16(hw, B3_PA_CTRL,
1321 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1323 /*
1324 * If the transfer sticks at the MAC the STOP command will not
1325 * terminate if we don't flush the XMAC's transmit FIFO !
1326 */
1327 xm_write32(hw, port, XM_MODE,
1328 xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1331 /* Reset the MAC */
1332 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1334 /* For external PHYs there must be special handling */
1335 reg = skge_read32(hw, B2_GP_IO);
1336 if (port == 0) {
1337 reg |= GP_DIR_0;
1338 reg &= ~GP_IO_0;
1339 } else {
1340 reg |= GP_DIR_2;
1341 reg &= ~GP_IO_2;
1343 skge_write32(hw, B2_GP_IO, reg);
1344 skge_read32(hw, B2_GP_IO);
1346 xm_write16(hw, port, XM_MMU_CMD,
1347 xm_read16(hw, port, XM_MMU_CMD)
1348 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1350 xm_read16(hw, port, XM_MMU_CMD);
1354 static void genesis_get_stats(struct skge_port *skge, u64 *data)
1356 struct skge_hw *hw = skge->hw;
1357 int port = skge->port;
1358 int i;
1359 unsigned long timeout = jiffies + HZ;
1361 xm_write16(hw, port,
1362 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1364 /* wait for update to complete */
1365 while (xm_read16(hw, port, XM_STAT_CMD)
1366 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1367 if (time_after(jiffies, timeout))
1368 break;
1369 udelay(10);
1372 /* special case for 64 bit octet counter */
1373 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1374 | xm_read32(hw, port, XM_TXO_OK_LO);
1375 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1376 | xm_read32(hw, port, XM_RXO_OK_LO);
1378 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1379 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1382 static void genesis_mac_intr(struct skge_hw *hw, int port)
1384 struct skge_port *skge = netdev_priv(hw->dev[port]);
1385 u16 status = xm_read16(hw, port, XM_ISRC);
1387 if (netif_msg_intr(skge))
1388 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1389 skge->netdev->name, status);
1391 if (status & XM_IS_TXF_UR) {
1392 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1393 ++skge->net_stats.tx_fifo_errors;
1395 if (status & XM_IS_RXF_OV) {
1396 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1397 ++skge->net_stats.rx_fifo_errors;
1401 static void genesis_link_up(struct skge_port *skge)
1403 struct skge_hw *hw = skge->hw;
1404 int port = skge->port;
1405 u16 cmd;
1406 u32 mode, msk;
1408 cmd = xm_read16(hw, port, XM_MMU_CMD);
1410 /*
1411 * enabling pause frame reception is required for 1000BT
1412 * because the XMAC is not reset if the link is going down
1413 */
1414 if (skge->flow_control == FLOW_MODE_NONE ||
1415 skge->flow_control == FLOW_MODE_LOC_SEND)
1416 /* Disable Pause Frame Reception */
1417 cmd |= XM_MMU_IGN_PF;
1418 else
1419 /* Enable Pause Frame Reception */
1420 cmd &= ~XM_MMU_IGN_PF;
1422 xm_write16(hw, port, XM_MMU_CMD, cmd);
1424 mode = xm_read32(hw, port, XM_MODE);
1425 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1426 skge->flow_control == FLOW_MODE_LOC_SEND) {
1427 /*
1428 * Configure Pause Frame Generation
1429 * Use internal and external Pause Frame Generation.
1430 * Sending pause frames is edge triggered.
1431 * Send a Pause frame with the maximum pause time if
1432 * internal oder external FIFO full condition occurs.
1433 * Send a zero pause time frame to re-start transmission.
1434 */
1435 /* XM_PAUSE_DA = '010000C28001' (default) */
1436 /* XM_MAC_PTIME = 0xffff (maximum) */
1437 /* remember this value is defined in big endian (!) */
1438 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1440 mode |= XM_PAUSE_MODE;
1441 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1442 } else {
1443 /*
1444 * disable pause frame generation is required for 1000BT
1445 * because the XMAC is not reset if the link is going down
1446 */
1447 /* Disable Pause Mode in Mode Register */
1448 mode &= ~XM_PAUSE_MODE;
1450 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1453 xm_write32(hw, port, XM_MODE, mode);
1455 msk = XM_DEF_MSK;
1456 /* disable GP0 interrupt bit for external Phy */
1457 msk |= XM_IS_INP_ASS;
1459 xm_write16(hw, port, XM_IMSK, msk);
1460 xm_read16(hw, port, XM_ISRC);
1462 /* get MMU Command Reg. */
1463 cmd = xm_read16(hw, port, XM_MMU_CMD);
1464 if (skge->duplex == DUPLEX_FULL)
1465 cmd |= XM_MMU_GMII_FD;
1467 /*
1468 * Workaround BCOM Errata (#10523) for all BCom Phys
1469 * Enable Power Management after link up
1470 */
1471 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1472 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1473 & ~PHY_B_AC_DIS_PM);
1474 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1476 /* enable Rx/Tx */
1477 xm_write16(hw, port, XM_MMU_CMD,
1478 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1479 skge_link_up(skge);
1483 static inline void bcom_phy_intr(struct skge_port *skge)
1485 struct skge_hw *hw = skge->hw;
1486 int port = skge->port;
1487 u16 isrc;
1489 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1490 if (netif_msg_intr(skge))
1491 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1492 skge->netdev->name, isrc);
1494 if (isrc & PHY_B_IS_PSE)
1495 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1496 hw->dev[port]->name);
1498 /* Workaround BCom Errata:
1499 * enable and disable loopback mode if "NO HCD" occurs.
1500 */
1501 if (isrc & PHY_B_IS_NO_HDCL) {
1502 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1503 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1504 ctrl | PHY_CT_LOOP);
1505 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1506 ctrl & ~PHY_CT_LOOP);
1509 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1510 bcom_check_link(hw, port);
1514 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1516 int i;
1518 gma_write16(hw, port, GM_SMI_DATA, val);
1519 gma_write16(hw, port, GM_SMI_CTRL,
1520 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1521 for (i = 0; i < PHY_RETRIES; i++) {
1522 udelay(1);
1524 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1525 return 0;
1528 printk(KERN_WARNING PFX "%s: phy write timeout\n",
1529 hw->dev[port]->name);
1530 return -EIO;
1533 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1535 int i;
1537 gma_write16(hw, port, GM_SMI_CTRL,
1538 GM_SMI_CT_PHY_AD(hw->phy_addr)
1539 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1541 for (i = 0; i < PHY_RETRIES; i++) {
1542 udelay(1);
1543 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1544 goto ready;
1547 return -ETIMEDOUT;
1548 ready:
1549 *val = gma_read16(hw, port, GM_SMI_DATA);
1550 return 0;
1553 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1555 u16 v = 0;
1556 if (__gm_phy_read(hw, port, reg, &v))
1557 printk(KERN_WARNING PFX "%s: phy read timeout\n",
1558 hw->dev[port]->name);
1559 return v;
1562 /* Marvell Phy Initialization */
1563 static void yukon_init(struct skge_hw *hw, int port)
1565 struct skge_port *skge = netdev_priv(hw->dev[port]);
1566 u16 ctrl, ct1000, adv;
1568 if (skge->autoneg == AUTONEG_ENABLE) {
1569 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1571 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1572 PHY_M_EC_MAC_S_MSK);
1573 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1575 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1577 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1580 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1581 if (skge->autoneg == AUTONEG_DISABLE)
1582 ctrl &= ~PHY_CT_ANE;
1584 ctrl |= PHY_CT_RESET;
1585 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1587 ctrl = 0;
1588 ct1000 = 0;
1589 adv = PHY_AN_CSMA;
1591 if (skge->autoneg == AUTONEG_ENABLE) {
1592 if (hw->copper) {
1593 if (skge->advertising & ADVERTISED_1000baseT_Full)
1594 ct1000 |= PHY_M_1000C_AFD;
1595 if (skge->advertising & ADVERTISED_1000baseT_Half)
1596 ct1000 |= PHY_M_1000C_AHD;
1597 if (skge->advertising & ADVERTISED_100baseT_Full)
1598 adv |= PHY_M_AN_100_FD;
1599 if (skge->advertising & ADVERTISED_100baseT_Half)
1600 adv |= PHY_M_AN_100_HD;
1601 if (skge->advertising & ADVERTISED_10baseT_Full)
1602 adv |= PHY_M_AN_10_FD;
1603 if (skge->advertising & ADVERTISED_10baseT_Half)
1604 adv |= PHY_M_AN_10_HD;
1605 } else /* special defines for FIBER (88E1011S only) */
1606 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1608 /* Set Flow-control capabilities */
1609 adv |= phy_pause_map[skge->flow_control];
1611 /* Restart Auto-negotiation */
1612 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1613 } else {
1614 /* forced speed/duplex settings */
1615 ct1000 = PHY_M_1000C_MSE;
1617 if (skge->duplex == DUPLEX_FULL)
1618 ctrl |= PHY_CT_DUP_MD;
1620 switch (skge->speed) {
1621 case SPEED_1000:
1622 ctrl |= PHY_CT_SP1000;
1623 break;
1624 case SPEED_100:
1625 ctrl |= PHY_CT_SP100;
1626 break;
1629 ctrl |= PHY_CT_RESET;
1632 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1634 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1635 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1637 /* Enable phy interrupt on autonegotiation complete (or link up) */
1638 if (skge->autoneg == AUTONEG_ENABLE)
1639 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
1640 else
1641 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1644 static void yukon_reset(struct skge_hw *hw, int port)
1646 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1647 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1648 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1649 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1650 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1652 gma_write16(hw, port, GM_RX_CTRL,
1653 gma_read16(hw, port, GM_RX_CTRL)
1654 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1657 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1658 static int is_yukon_lite_a0(struct skge_hw *hw)
1660 u32 reg;
1661 int ret;
1663 if (hw->chip_id != CHIP_ID_YUKON)
1664 return 0;
1666 reg = skge_read32(hw, B2_FAR);
1667 skge_write8(hw, B2_FAR + 3, 0xff);
1668 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1669 skge_write32(hw, B2_FAR, reg);
1670 return ret;
1673 static void yukon_mac_init(struct skge_hw *hw, int port)
1675 struct skge_port *skge = netdev_priv(hw->dev[port]);
1676 int i;
1677 u32 reg;
1678 const u8 *addr = hw->dev[port]->dev_addr;
1680 /* WA code for COMA mode -- set PHY reset */
1681 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1682 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1683 reg = skge_read32(hw, B2_GP_IO);
1684 reg |= GP_DIR_9 | GP_IO_9;
1685 skge_write32(hw, B2_GP_IO, reg);
1688 /* hard reset */
1689 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1690 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1692 /* WA code for COMA mode -- clear PHY reset */
1693 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1694 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1695 reg = skge_read32(hw, B2_GP_IO);
1696 reg |= GP_DIR_9;
1697 reg &= ~GP_IO_9;
1698 skge_write32(hw, B2_GP_IO, reg);
1701 /* Set hardware config mode */
1702 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1703 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1704 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1706 /* Clear GMC reset */
1707 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1708 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1709 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1711 if (skge->autoneg == AUTONEG_DISABLE) {
1712 reg = GM_GPCR_AU_ALL_DIS;
1713 gma_write16(hw, port, GM_GP_CTRL,
1714 gma_read16(hw, port, GM_GP_CTRL) | reg);
1716 switch (skge->speed) {
1717 case SPEED_1000:
1718 reg &= ~GM_GPCR_SPEED_100;
1719 reg |= GM_GPCR_SPEED_1000;
1720 break;
1721 case SPEED_100:
1722 reg &= ~GM_GPCR_SPEED_1000;
1723 reg |= GM_GPCR_SPEED_100;
1724 break;
1725 case SPEED_10:
1726 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1727 break;
1730 if (skge->duplex == DUPLEX_FULL)
1731 reg |= GM_GPCR_DUP_FULL;
1732 } else
1733 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1735 switch (skge->flow_control) {
1736 case FLOW_MODE_NONE:
1737 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1738 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1739 break;
1740 case FLOW_MODE_LOC_SEND:
1741 /* disable Rx flow-control */
1742 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1745 gma_write16(hw, port, GM_GP_CTRL, reg);
1746 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1748 yukon_init(hw, port);
1750 /* MIB clear */
1751 reg = gma_read16(hw, port, GM_PHY_ADDR);
1752 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1754 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1755 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1756 gma_write16(hw, port, GM_PHY_ADDR, reg);
1758 /* transmit control */
1759 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1761 /* receive control reg: unicast + multicast + no FCS */
1762 gma_write16(hw, port, GM_RX_CTRL,
1763 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1765 /* transmit flow control */
1766 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1768 /* transmit parameter */
1769 gma_write16(hw, port, GM_TX_PARAM,
1770 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1771 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1772 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1774 /* serial mode register */
1775 reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1776 if (hw->dev[port]->mtu > 1500)
1777 reg |= GM_SMOD_JUMBO_ENA;
1779 gma_write16(hw, port, GM_SERIAL_MODE, reg);
1781 /* physical address: used for pause frames */
1782 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1783 /* virtual address for data */
1784 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1786 /* enable interrupt mask for counter overflows */
1787 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1788 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1789 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1791 /* Initialize Mac Fifo */
1793 /* Configure Rx MAC FIFO */
1794 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1795 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1797 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1798 if (is_yukon_lite_a0(hw))
1799 reg &= ~GMF_RX_F_FL_ON;
1801 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1802 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1803 /*
1804 * because Pause Packet Truncation in GMAC is not working
1805 * we have to increase the Flush Threshold to 64 bytes
1806 * in order to flush pause packets in Rx FIFO on Yukon-1
1807 */
1808 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1810 /* Configure Tx MAC FIFO */
1811 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1812 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1815 /* Go into power down mode */
1816 static void yukon_suspend(struct skge_hw *hw, int port)
1818 u16 ctrl;
1820 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
1821 ctrl |= PHY_M_PC_POL_R_DIS;
1822 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
1824 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1825 ctrl |= PHY_CT_RESET;
1826 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1828 /* switch IEEE compatible power down mode on */
1829 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1830 ctrl |= PHY_CT_PDOWN;
1831 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1834 static void yukon_stop(struct skge_port *skge)
1836 struct skge_hw *hw = skge->hw;
1837 int port = skge->port;
1839 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1840 yukon_reset(hw, port);
1842 gma_write16(hw, port, GM_GP_CTRL,
1843 gma_read16(hw, port, GM_GP_CTRL)
1844 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1845 gma_read16(hw, port, GM_GP_CTRL);
1847 yukon_suspend(hw, port);
1849 /* set GPHY Control reset */
1850 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1851 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1854 static void yukon_get_stats(struct skge_port *skge, u64 *data)
1856 struct skge_hw *hw = skge->hw;
1857 int port = skge->port;
1858 int i;
1860 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1861 | gma_read32(hw, port, GM_TXO_OK_LO);
1862 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1863 | gma_read32(hw, port, GM_RXO_OK_LO);
1865 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1866 data[i] = gma_read32(hw, port,
1867 skge_stats[i].gma_offset);
1870 static void yukon_mac_intr(struct skge_hw *hw, int port)
1872 struct net_device *dev = hw->dev[port];
1873 struct skge_port *skge = netdev_priv(dev);
1874 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1876 if (netif_msg_intr(skge))
1877 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1878 dev->name, status);
1880 if (status & GM_IS_RX_FF_OR) {
1881 ++skge->net_stats.rx_fifo_errors;
1882 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1885 if (status & GM_IS_TX_FF_UR) {
1886 ++skge->net_stats.tx_fifo_errors;
1887 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1892 static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1894 switch (aux & PHY_M_PS_SPEED_MSK) {
1895 case PHY_M_PS_SPEED_1000:
1896 return SPEED_1000;
1897 case PHY_M_PS_SPEED_100:
1898 return SPEED_100;
1899 default:
1900 return SPEED_10;
1904 static void yukon_link_up(struct skge_port *skge)
1906 struct skge_hw *hw = skge->hw;
1907 int port = skge->port;
1908 u16 reg;
1910 /* Enable Transmit FIFO Underrun */
1911 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1913 reg = gma_read16(hw, port, GM_GP_CTRL);
1914 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1915 reg |= GM_GPCR_DUP_FULL;
1917 /* enable Rx/Tx */
1918 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1919 gma_write16(hw, port, GM_GP_CTRL, reg);
1921 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1922 skge_link_up(skge);
1925 static void yukon_link_down(struct skge_port *skge)
1927 struct skge_hw *hw = skge->hw;
1928 int port = skge->port;
1929 u16 ctrl;
1931 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1933 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1934 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1935 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1937 if (skge->flow_control == FLOW_MODE_REM_SEND) {
1938 /* restore Asymmetric Pause bit */
1939 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1940 gm_phy_read(hw, port,
1941 PHY_MARV_AUNE_ADV)
1942 | PHY_M_AN_ASP);
1946 yukon_reset(hw, port);
1947 skge_link_down(skge);
1949 yukon_init(hw, port);
1952 static void yukon_phy_intr(struct skge_port *skge)
1954 struct skge_hw *hw = skge->hw;
1955 int port = skge->port;
1956 const char *reason = NULL;
1957 u16 istatus, phystat;
1959 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1960 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1962 if (netif_msg_intr(skge))
1963 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1964 skge->netdev->name, istatus, phystat);
1966 if (istatus & PHY_M_IS_AN_COMPL) {
1967 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
1968 & PHY_M_AN_RF) {
1969 reason = "remote fault";
1970 goto failed;
1973 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1974 reason = "master/slave fault";
1975 goto failed;
1978 if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1979 reason = "speed/duplex";
1980 goto failed;
1983 skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1984 ? DUPLEX_FULL : DUPLEX_HALF;
1985 skge->speed = yukon_speed(hw, phystat);
1987 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1988 switch (phystat & PHY_M_PS_PAUSE_MSK) {
1989 case PHY_M_PS_PAUSE_MSK:
1990 skge->flow_control = FLOW_MODE_SYMMETRIC;
1991 break;
1992 case PHY_M_PS_RX_P_EN:
1993 skge->flow_control = FLOW_MODE_REM_SEND;
1994 break;
1995 case PHY_M_PS_TX_P_EN:
1996 skge->flow_control = FLOW_MODE_LOC_SEND;
1997 break;
1998 default:
1999 skge->flow_control = FLOW_MODE_NONE;
2002 if (skge->flow_control == FLOW_MODE_NONE ||
2003 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2004 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2005 else
2006 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2007 yukon_link_up(skge);
2008 return;
2011 if (istatus & PHY_M_IS_LSP_CHANGE)
2012 skge->speed = yukon_speed(hw, phystat);
2014 if (istatus & PHY_M_IS_DUP_CHANGE)
2015 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2016 if (istatus & PHY_M_IS_LST_CHANGE) {
2017 if (phystat & PHY_M_PS_LINK_UP)
2018 yukon_link_up(skge);
2019 else
2020 yukon_link_down(skge);
2022 return;
2023 failed:
2024 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
2025 skge->netdev->name, reason);
2027 /* XXX restart autonegotiation? */
2030 static void skge_phy_reset(struct skge_port *skge)
2032 struct skge_hw *hw = skge->hw;
2033 int port = skge->port;
2035 netif_stop_queue(skge->netdev);
2036 netif_carrier_off(skge->netdev);
2038 mutex_lock(&hw->phy_mutex);
2039 if (hw->chip_id == CHIP_ID_GENESIS) {
2040 genesis_reset(hw, port);
2041 genesis_mac_init(hw, port);
2042 } else {
2043 yukon_reset(hw, port);
2044 yukon_init(hw, port);
2046 mutex_unlock(&hw->phy_mutex);
2049 /* Basic MII support */
2050 static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2052 struct mii_ioctl_data *data = if_mii(ifr);
2053 struct skge_port *skge = netdev_priv(dev);
2054 struct skge_hw *hw = skge->hw;
2055 int err = -EOPNOTSUPP;
2057 if (!netif_running(dev))
2058 return -ENODEV; /* Phy still in reset */
2060 switch(cmd) {
2061 case SIOCGMIIPHY:
2062 data->phy_id = hw->phy_addr;
2064 /* fallthru */
2065 case SIOCGMIIREG: {
2066 u16 val = 0;
2067 mutex_lock(&hw->phy_mutex);
2068 if (hw->chip_id == CHIP_ID_GENESIS)
2069 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2070 else
2071 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2072 mutex_unlock(&hw->phy_mutex);
2073 data->val_out = val;
2074 break;
2077 case SIOCSMIIREG:
2078 if (!capable(CAP_NET_ADMIN))
2079 return -EPERM;
2081 mutex_lock(&hw->phy_mutex);
2082 if (hw->chip_id == CHIP_ID_GENESIS)
2083 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2084 data->val_in);
2085 else
2086 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2087 data->val_in);
2088 mutex_unlock(&hw->phy_mutex);
2089 break;
2091 return err;
2094 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2096 u32 end;
2098 start /= 8;
2099 len /= 8;
2100 end = start + len - 1;
2102 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2103 skge_write32(hw, RB_ADDR(q, RB_START), start);
2104 skge_write32(hw, RB_ADDR(q, RB_WP), start);
2105 skge_write32(hw, RB_ADDR(q, RB_RP), start);
2106 skge_write32(hw, RB_ADDR(q, RB_END), end);
2108 if (q == Q_R1 || q == Q_R2) {
2109 /* Set thresholds on receive queue's */
2110 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2111 start + (2*len)/3);
2112 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2113 start + (len/3));
2114 } else {
2115 /* Enable store & forward on Tx queue's because
2116 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2117 */
2118 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2121 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2124 /* Setup Bus Memory Interface */
2125 static void skge_qset(struct skge_port *skge, u16 q,
2126 const struct skge_element *e)
2128 struct skge_hw *hw = skge->hw;
2129 u32 watermark = 0x600;
2130 u64 base = skge->dma + (e->desc - skge->mem);
2132 /* optimization to reduce window on 32bit/33mhz */
2133 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2134 watermark /= 2;
2136 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2137 skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2138 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2139 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2142 static int skge_up(struct net_device *dev)
2144 struct skge_port *skge = netdev_priv(dev);
2145 struct skge_hw *hw = skge->hw;
2146 int port = skge->port;
2147 u32 chunk, ram_addr;
2148 size_t rx_size, tx_size;
2149 int err;
2151 if (netif_msg_ifup(skge))
2152 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2154 if (dev->mtu > RX_BUF_SIZE)
2155 skge->rx_buf_size = dev->mtu + ETH_HLEN;
2156 else
2157 skge->rx_buf_size = RX_BUF_SIZE;
2160 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2161 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2162 skge->mem_size = tx_size + rx_size;
2163 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2164 if (!skge->mem)
2165 return -ENOMEM;
2167 BUG_ON(skge->dma & 7);
2169 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2170 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
2171 err = -EINVAL;
2172 goto free_pci_mem;
2175 memset(skge->mem, 0, skge->mem_size);
2177 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2178 if (err)
2179 goto free_pci_mem;
2181 err = skge_rx_fill(skge);
2182 if (err)
2183 goto free_rx_ring;
2185 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2186 skge->dma + rx_size);
2187 if (err)
2188 goto free_rx_ring;
2190 /* Initialize MAC */
2191 mutex_lock(&hw->phy_mutex);
2192 if (hw->chip_id == CHIP_ID_GENESIS)
2193 genesis_mac_init(hw, port);
2194 else
2195 yukon_mac_init(hw, port);
2196 mutex_unlock(&hw->phy_mutex);
2198 /* Configure RAMbuffers */
2199 chunk = hw->ram_size / ((hw->ports + 1)*2);
2200 ram_addr = hw->ram_offset + 2 * chunk * port;
2202 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2203 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2205 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2206 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2207 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2209 /* Start receiver BMU */
2210 wmb();
2211 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2212 skge_led(skge, LED_MODE_ON);
2214 netif_poll_enable(dev);
2215 return 0;
2217 free_rx_ring:
2218 skge_rx_clean(skge);
2219 kfree(skge->rx_ring.start);
2220 free_pci_mem:
2221 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2222 skge->mem = NULL;
2224 return err;
2227 static int skge_down(struct net_device *dev)
2229 struct skge_port *skge = netdev_priv(dev);
2230 struct skge_hw *hw = skge->hw;
2231 int port = skge->port;
2233 if (skge->mem == NULL)
2234 return 0;
2236 if (netif_msg_ifdown(skge))
2237 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2239 netif_stop_queue(dev);
2241 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2242 if (hw->chip_id == CHIP_ID_GENESIS)
2243 genesis_stop(skge);
2244 else
2245 yukon_stop(skge);
2247 /* Stop transmitter */
2248 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2249 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2250 RB_RST_SET|RB_DIS_OP_MD);
2253 /* Disable Force Sync bit and Enable Alloc bit */
2254 skge_write8(hw, SK_REG(port, TXA_CTRL),
2255 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2257 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2258 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2259 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2261 /* Reset PCI FIFO */
2262 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2263 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2265 /* Reset the RAM Buffer async Tx queue */
2266 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2267 /* stop receiver */
2268 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2269 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2270 RB_RST_SET|RB_DIS_OP_MD);
2271 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2273 if (hw->chip_id == CHIP_ID_GENESIS) {
2274 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2275 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2276 } else {
2277 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2278 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2281 skge_led(skge, LED_MODE_OFF);
2283 netif_poll_disable(dev);
2284 skge_tx_clean(skge);
2285 skge_rx_clean(skge);
2287 kfree(skge->rx_ring.start);
2288 kfree(skge->tx_ring.start);
2289 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2290 skge->mem = NULL;
2291 return 0;
2294 static inline int skge_avail(const struct skge_ring *ring)
2296 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2297 + (ring->to_clean - ring->to_use) - 1;
2300 static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2302 struct skge_port *skge = netdev_priv(dev);
2303 struct skge_hw *hw = skge->hw;
2304 struct skge_element *e;
2305 struct skge_tx_desc *td;
2306 int i;
2307 u32 control, len;
2308 u64 map;
2309 unsigned long flags;
2311 if (skb_padto(skb, ETH_ZLEN))
2312 return NETDEV_TX_OK;
2314 if (!spin_trylock_irqsave(&skge->tx_lock, flags))
2315 /* Collision - tell upper layer to requeue */
2316 return NETDEV_TX_LOCKED;
2318 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2319 if (!netif_queue_stopped(dev)) {
2320 netif_stop_queue(dev);
2322 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2323 dev->name);
2325 spin_unlock_irqrestore(&skge->tx_lock, flags);
2326 return NETDEV_TX_BUSY;
2329 e = skge->tx_ring.to_use;
2330 td = e->desc;
2331 BUG_ON(td->control & BMU_OWN);
2332 e->skb = skb;
2333 len = skb_headlen(skb);
2334 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2335 pci_unmap_addr_set(e, mapaddr, map);
2336 pci_unmap_len_set(e, maplen, len);
2338 td->dma_lo = map;
2339 td->dma_hi = map >> 32;
2341 if (skb->ip_summed == CHECKSUM_HW) {
2342 int offset = skb->h.raw - skb->data;
2344 /* This seems backwards, but it is what the sk98lin
2345 * does. Looks like hardware is wrong?
2346 */
2347 if (skb->h.ipiph->protocol == IPPROTO_UDP
2348 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2349 control = BMU_TCP_CHECK;
2350 else
2351 control = BMU_UDP_CHECK;
2353 td->csum_offs = 0;
2354 td->csum_start = offset;
2355 td->csum_write = offset + skb->csum;
2356 } else
2357 control = BMU_CHECK;
2359 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2360 control |= BMU_EOF| BMU_IRQ_EOF;
2361 else {
2362 struct skge_tx_desc *tf = td;
2364 control |= BMU_STFWD;
2365 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2366 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2368 map = pci_map_page(hw->pdev, frag->page, frag->page_offset,
2369 frag->size, PCI_DMA_TODEVICE);
2371 e = e->next;
2372 e->skb = skb;
2373 tf = e->desc;
2374 BUG_ON(tf->control & BMU_OWN);
2376 tf->dma_lo = map;
2377 tf->dma_hi = (u64) map >> 32;
2378 pci_unmap_addr_set(e, mapaddr, map);
2379 pci_unmap_len_set(e, maplen, frag->size);
2381 tf->control = BMU_OWN | BMU_SW | control | frag->size;
2383 tf->control |= BMU_EOF | BMU_IRQ_EOF;
2385 /* Make sure all the descriptors written */
2386 wmb();
2387 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2388 wmb();
2390 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2392 if (unlikely(netif_msg_tx_queued(skge)))
2393 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
2394 dev->name, e - skge->tx_ring.start, skb->len);
2396 skge->tx_ring.to_use = e->next;
2397 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2398 pr_debug("%s: transmit queue full\n", dev->name);
2399 netif_stop_queue(dev);
2402 spin_unlock_irqrestore(&skge->tx_lock, flags);
2404 dev->trans_start = jiffies;
2406 return NETDEV_TX_OK;
2410 /* Free resources associated with this reing element */
2411 static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2412 u32 control)
2414 struct pci_dev *pdev = skge->hw->pdev;
2416 BUG_ON(!e->skb);
2418 /* skb header vs. fragment */
2419 if (control & BMU_STF)
2420 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2421 pci_unmap_len(e, maplen),
2422 PCI_DMA_TODEVICE);
2423 else
2424 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2425 pci_unmap_len(e, maplen),
2426 PCI_DMA_TODEVICE);
2428 if (control & BMU_EOF) {
2429 if (unlikely(netif_msg_tx_done(skge)))
2430 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2431 skge->netdev->name, e - skge->tx_ring.start);
2433 dev_kfree_skb_any(e->skb);
2435 e->skb = NULL;
2438 /* Free all buffers in transmit ring */
2439 static void skge_tx_clean(struct skge_port *skge)
2441 struct skge_element *e;
2442 unsigned long flags;
2444 spin_lock_irqsave(&skge->tx_lock, flags);
2445 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2446 struct skge_tx_desc *td = e->desc;
2447 skge_tx_free(skge, e, td->control);
2448 td->control = 0;
2451 skge->tx_ring.to_clean = e;
2452 netif_wake_queue(skge->netdev);
2453 spin_unlock_irqrestore(&skge->tx_lock, flags);
2456 static void skge_tx_timeout(struct net_device *dev)
2458 struct skge_port *skge = netdev_priv(dev);
2460 if (netif_msg_timer(skge))
2461 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2463 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2464 skge_tx_clean(skge);
2467 static int skge_change_mtu(struct net_device *dev, int new_mtu)
2469 int err;
2471 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2472 return -EINVAL;
2474 if (!netif_running(dev)) {
2475 dev->mtu = new_mtu;
2476 return 0;
2479 skge_down(dev);
2481 dev->mtu = new_mtu;
2483 err = skge_up(dev);
2484 if (err)
2485 dev_close(dev);
2487 return err;
2490 static void genesis_set_multicast(struct net_device *dev)
2492 struct skge_port *skge = netdev_priv(dev);
2493 struct skge_hw *hw = skge->hw;
2494 int port = skge->port;
2495 int i, count = dev->mc_count;
2496 struct dev_mc_list *list = dev->mc_list;
2497 u32 mode;
2498 u8 filter[8];
2500 mode = xm_read32(hw, port, XM_MODE);
2501 mode |= XM_MD_ENA_HASH;
2502 if (dev->flags & IFF_PROMISC)
2503 mode |= XM_MD_ENA_PROM;
2504 else
2505 mode &= ~XM_MD_ENA_PROM;
2507 if (dev->flags & IFF_ALLMULTI)
2508 memset(filter, 0xff, sizeof(filter));
2509 else {
2510 memset(filter, 0, sizeof(filter));
2511 for (i = 0; list && i < count; i++, list = list->next) {
2512 u32 crc, bit;
2513 crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2514 bit = ~crc & 0x3f;
2515 filter[bit/8] |= 1 << (bit%8);
2519 xm_write32(hw, port, XM_MODE, mode);
2520 xm_outhash(hw, port, XM_HSM, filter);
2523 static void yukon_set_multicast(struct net_device *dev)
2525 struct skge_port *skge = netdev_priv(dev);
2526 struct skge_hw *hw = skge->hw;
2527 int port = skge->port;
2528 struct dev_mc_list *list = dev->mc_list;
2529 u16 reg;
2530 u8 filter[8];
2532 memset(filter, 0, sizeof(filter));
2534 reg = gma_read16(hw, port, GM_RX_CTRL);
2535 reg |= GM_RXCR_UCF_ENA;
2537 if (dev->flags & IFF_PROMISC) /* promiscuous */
2538 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2539 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2540 memset(filter, 0xff, sizeof(filter));
2541 else if (dev->mc_count == 0) /* no multicast */
2542 reg &= ~GM_RXCR_MCF_ENA;
2543 else {
2544 int i;
2545 reg |= GM_RXCR_MCF_ENA;
2547 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2548 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2549 filter[bit/8] |= 1 << (bit%8);
2554 gma_write16(hw, port, GM_MC_ADDR_H1,
2555 (u16)filter[0] | ((u16)filter[1] << 8));
2556 gma_write16(hw, port, GM_MC_ADDR_H2,
2557 (u16)filter[2] | ((u16)filter[3] << 8));
2558 gma_write16(hw, port, GM_MC_ADDR_H3,
2559 (u16)filter[4] | ((u16)filter[5] << 8));
2560 gma_write16(hw, port, GM_MC_ADDR_H4,
2561 (u16)filter[6] | ((u16)filter[7] << 8));
2563 gma_write16(hw, port, GM_RX_CTRL, reg);
2566 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2568 if (hw->chip_id == CHIP_ID_GENESIS)
2569 return status >> XMR_FS_LEN_SHIFT;
2570 else
2571 return status >> GMR_FS_LEN_SHIFT;
2574 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2576 if (hw->chip_id == CHIP_ID_GENESIS)
2577 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
2578 else
2579 return (status & GMR_FS_ANY_ERR) ||
2580 (status & GMR_FS_RX_OK) == 0;
2584 /* Get receive buffer from descriptor.
2585 * Handles copy of small buffers and reallocation failures
2586 */
2587 static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2588 struct skge_element *e,
2589 u32 control, u32 status, u16 csum)
2591 struct sk_buff *skb;
2592 u16 len = control & BMU_BBC;
2594 if (unlikely(netif_msg_rx_status(skge)))
2595 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2596 skge->netdev->name, e - skge->rx_ring.start,
2597 status, len);
2599 if (len > skge->rx_buf_size)
2600 goto error;
2602 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2603 goto error;
2605 if (bad_phy_status(skge->hw, status))
2606 goto error;
2608 if (phy_length(skge->hw, status) != len)
2609 goto error;
2611 if (len < RX_COPY_THRESHOLD) {
2612 skb = alloc_skb(len + 2, GFP_ATOMIC);
2613 if (!skb)
2614 goto resubmit;
2616 skb_reserve(skb, 2);
2617 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2618 pci_unmap_addr(e, mapaddr),
2619 len, PCI_DMA_FROMDEVICE);
2620 memcpy(skb->data, e->skb->data, len);
2621 pci_dma_sync_single_for_device(skge->hw->pdev,
2622 pci_unmap_addr(e, mapaddr),
2623 len, PCI_DMA_FROMDEVICE);
2624 skge_rx_reuse(e, skge->rx_buf_size);
2625 } else {
2626 struct sk_buff *nskb;
2627 nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
2628 if (!nskb)
2629 goto resubmit;
2631 skb_reserve(nskb, NET_IP_ALIGN);
2632 pci_unmap_single(skge->hw->pdev,
2633 pci_unmap_addr(e, mapaddr),
2634 pci_unmap_len(e, maplen),
2635 PCI_DMA_FROMDEVICE);
2636 skb = e->skb;
2637 prefetch(skb->data);
2638 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2641 skb_put(skb, len);
2642 skb->dev = skge->netdev;
2643 if (skge->rx_csum) {
2644 skb->csum = csum;
2645 skb->ip_summed = CHECKSUM_HW;
2648 skb->protocol = eth_type_trans(skb, skge->netdev);
2650 return skb;
2651 error:
2653 if (netif_msg_rx_err(skge))
2654 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2655 skge->netdev->name, e - skge->rx_ring.start,
2656 control, status);
2658 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2659 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2660 skge->net_stats.rx_length_errors++;
2661 if (status & XMR_FS_FRA_ERR)
2662 skge->net_stats.rx_frame_errors++;
2663 if (status & XMR_FS_FCS_ERR)
2664 skge->net_stats.rx_crc_errors++;
2665 } else {
2666 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2667 skge->net_stats.rx_length_errors++;
2668 if (status & GMR_FS_FRAGMENT)
2669 skge->net_stats.rx_frame_errors++;
2670 if (status & GMR_FS_CRC_ERR)
2671 skge->net_stats.rx_crc_errors++;
2674 resubmit:
2675 skge_rx_reuse(e, skge->rx_buf_size);
2676 return NULL;
2679 /* Free all buffers in Tx ring which are no longer owned by device */
2680 static void skge_txirq(struct net_device *dev)
2682 struct skge_port *skge = netdev_priv(dev);
2683 struct skge_ring *ring = &skge->tx_ring;
2684 struct skge_element *e;
2686 rmb();
2688 spin_lock(&skge->tx_lock);
2689 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2690 struct skge_tx_desc *td = e->desc;
2692 if (td->control & BMU_OWN)
2693 break;
2695 skge_tx_free(skge, e, td->control);
2697 skge->tx_ring.to_clean = e;
2699 if (netif_queue_stopped(skge->netdev)
2700 && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
2701 netif_wake_queue(skge->netdev);
2703 spin_unlock(&skge->tx_lock);
2706 static int skge_poll(struct net_device *dev, int *budget)
2708 struct skge_port *skge = netdev_priv(dev);
2709 struct skge_hw *hw = skge->hw;
2710 struct skge_ring *ring = &skge->rx_ring;
2711 struct skge_element *e;
2712 int to_do = min(dev->quota, *budget);
2713 int work_done = 0;
2715 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2716 struct skge_rx_desc *rd = e->desc;
2717 struct sk_buff *skb;
2718 u32 control;
2720 rmb();
2721 control = rd->control;
2722 if (control & BMU_OWN)
2723 break;
2725 skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
2726 if (likely(skb)) {
2727 dev->last_rx = jiffies;
2728 netif_receive_skb(skb);
2730 ++work_done;
2733 ring->to_clean = e;
2735 /* restart receiver */
2736 wmb();
2737 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2739 *budget -= work_done;
2740 dev->quota -= work_done;
2742 if (work_done >= to_do)
2743 return 1; /* not done */
2745 netif_rx_complete(dev);
2747 spin_lock_irq(&hw->hw_lock);
2748 hw->intr_mask |= rxirqmask[skge->port];
2749 skge_write32(hw, B0_IMSK, hw->intr_mask);
2750 mmiowb();
2751 spin_unlock_irq(&hw->hw_lock);
2753 return 0;
2756 /* Parity errors seem to happen when Genesis is connected to a switch
2757 * with no other ports present. Heartbeat error??
2758 */
2759 static void skge_mac_parity(struct skge_hw *hw, int port)
2761 struct net_device *dev = hw->dev[port];
2763 if (dev) {
2764 struct skge_port *skge = netdev_priv(dev);
2765 ++skge->net_stats.tx_heartbeat_errors;
2768 if (hw->chip_id == CHIP_ID_GENESIS)
2769 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
2770 MFF_CLR_PERR);
2771 else
2772 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2773 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
2774 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
2775 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2778 static void skge_mac_intr(struct skge_hw *hw, int port)
2780 if (hw->chip_id == CHIP_ID_GENESIS)
2781 genesis_mac_intr(hw, port);
2782 else
2783 yukon_mac_intr(hw, port);
2786 /* Handle device specific framing and timeout interrupts */
2787 static void skge_error_irq(struct skge_hw *hw)
2789 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2791 if (hw->chip_id == CHIP_ID_GENESIS) {
2792 /* clear xmac errors */
2793 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2794 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2795 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2796 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2797 } else {
2798 /* Timestamp (unused) overflow */
2799 if (hwstatus & IS_IRQ_TIST_OV)
2800 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2803 if (hwstatus & IS_RAM_RD_PAR) {
2804 printk(KERN_ERR PFX "Ram read data parity error\n");
2805 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
2808 if (hwstatus & IS_RAM_WR_PAR) {
2809 printk(KERN_ERR PFX "Ram write data parity error\n");
2810 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
2813 if (hwstatus & IS_M1_PAR_ERR)
2814 skge_mac_parity(hw, 0);
2816 if (hwstatus & IS_M2_PAR_ERR)
2817 skge_mac_parity(hw, 1);
2819 if (hwstatus & IS_R1_PAR_ERR) {
2820 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2821 hw->dev[0]->name);
2822 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2825 if (hwstatus & IS_R2_PAR_ERR) {
2826 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2827 hw->dev[1]->name);
2828 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2831 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2832 u16 pci_status, pci_cmd;
2834 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
2835 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2837 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
2838 pci_name(hw->pdev), pci_cmd, pci_status);
2840 /* Write the error bits back to clear them. */
2841 pci_status &= PCI_STATUS_ERROR_BITS;
2842 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2843 pci_write_config_word(hw->pdev, PCI_COMMAND,
2844 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2845 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
2846 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2848 /* if error still set then just ignore it */
2849 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2850 if (hwstatus & IS_IRQ_STAT) {
2851 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
2852 hw->intr_mask &= ~IS_HW_ERR;
2857 /*
2858 * Interrupt from PHY are handled in work queue
2859 * because accessing phy registers requires spin wait which might
2860 * cause excess interrupt latency.
2861 */
2862 static void skge_extirq(void *arg)
2864 struct skge_hw *hw = arg;
2865 int port;
2867 mutex_lock(&hw->phy_mutex);
2868 for (port = 0; port < hw->ports; port++) {
2869 struct net_device *dev = hw->dev[port];
2870 struct skge_port *skge = netdev_priv(dev);
2872 if (netif_running(dev)) {
2873 if (hw->chip_id != CHIP_ID_GENESIS)
2874 yukon_phy_intr(skge);
2875 else
2876 bcom_phy_intr(skge);
2879 mutex_unlock(&hw->phy_mutex);
2881 spin_lock_irq(&hw->hw_lock);
2882 hw->intr_mask |= IS_EXT_REG;
2883 skge_write32(hw, B0_IMSK, hw->intr_mask);
2884 spin_unlock_irq(&hw->hw_lock);
2887 static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2889 struct skge_hw *hw = dev_id;
2890 u32 status;
2892 /* Reading this register masks IRQ */
2893 status = skge_read32(hw, B0_SP_ISRC);
2894 if (status == 0)
2895 return IRQ_NONE;
2897 spin_lock(&hw->hw_lock);
2898 status &= hw->intr_mask;
2899 if (status & IS_EXT_REG) {
2900 hw->intr_mask &= ~IS_EXT_REG;
2901 schedule_work(&hw->phy_work);
2904 if (status & IS_XA1_F) {
2905 skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
2906 skge_txirq(hw->dev[0]);
2909 if (status & IS_R1_F) {
2910 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2911 hw->intr_mask &= ~IS_R1_F;
2912 netif_rx_schedule(hw->dev[0]);
2915 if (status & IS_PA_TO_TX1)
2916 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2918 if (status & IS_PA_TO_RX1) {
2919 struct skge_port *skge = netdev_priv(hw->dev[0]);
2921 ++skge->net_stats.rx_over_errors;
2922 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2926 if (status & IS_MAC1)
2927 skge_mac_intr(hw, 0);
2929 if (hw->dev[1]) {
2930 if (status & IS_XA2_F) {
2931 skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
2932 skge_txirq(hw->dev[1]);
2935 if (status & IS_R2_F) {
2936 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2937 hw->intr_mask &= ~IS_R2_F;
2938 netif_rx_schedule(hw->dev[1]);
2941 if (status & IS_PA_TO_RX2) {
2942 struct skge_port *skge = netdev_priv(hw->dev[1]);
2943 ++skge->net_stats.rx_over_errors;
2944 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2947 if (status & IS_PA_TO_TX2)
2948 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2950 if (status & IS_MAC2)
2951 skge_mac_intr(hw, 1);
2954 if (status & IS_HW_ERR)
2955 skge_error_irq(hw);
2957 skge_write32(hw, B0_IMSK, hw->intr_mask);
2958 spin_unlock(&hw->hw_lock);
2960 return IRQ_HANDLED;
2963 #ifdef CONFIG_NET_POLL_CONTROLLER
2964 static void skge_netpoll(struct net_device *dev)
2966 struct skge_port *skge = netdev_priv(dev);
2968 disable_irq(dev->irq);
2969 skge_intr(dev->irq, skge->hw, NULL);
2970 enable_irq(dev->irq);
2972 #endif
2974 static int skge_set_mac_address(struct net_device *dev, void *p)
2976 struct skge_port *skge = netdev_priv(dev);
2977 struct skge_hw *hw = skge->hw;
2978 unsigned port = skge->port;
2979 const struct sockaddr *addr = p;
2981 if (!is_valid_ether_addr(addr->sa_data))
2982 return -EADDRNOTAVAIL;
2984 mutex_lock(&hw->phy_mutex);
2985 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2986 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2987 dev->dev_addr, ETH_ALEN);
2988 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2989 dev->dev_addr, ETH_ALEN);
2991 if (hw->chip_id == CHIP_ID_GENESIS)
2992 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2993 else {
2994 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2995 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2997 mutex_unlock(&hw->phy_mutex);
2999 return 0;
3002 static const struct {
3003 u8 id;
3004 const char *name;
3005 } skge_chips[] = {
3006 { CHIP_ID_GENESIS, "Genesis" },
3007 { CHIP_ID_YUKON, "Yukon" },
3008 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
3009 { CHIP_ID_YUKON_LP, "Yukon-LP"},
3010 };
3012 static const char *skge_board_name(const struct skge_hw *hw)
3014 int i;
3015 static char buf[16];
3017 for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
3018 if (skge_chips[i].id == hw->chip_id)
3019 return skge_chips[i].name;
3021 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
3022 return buf;
3026 /*
3027 * Setup the board data structure, but don't bring up
3028 * the port(s)
3029 */
3030 static int skge_reset(struct skge_hw *hw)
3032 u32 reg;
3033 u16 ctst, pci_status;
3034 u8 t8, mac_cfg, pmd_type, phy_type;
3035 int i;
3037 ctst = skge_read16(hw, B0_CTST);
3039 /* do a SW reset */
3040 skge_write8(hw, B0_CTST, CS_RST_SET);
3041 skge_write8(hw, B0_CTST, CS_RST_CLR);
3043 /* clear PCI errors, if any */
3044 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3045 skge_write8(hw, B2_TST_CTRL2, 0);
3047 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3048 pci_write_config_word(hw->pdev, PCI_STATUS,
3049 pci_status | PCI_STATUS_ERROR_BITS);
3050 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3051 skge_write8(hw, B0_CTST, CS_MRST_CLR);
3053 /* restore CLK_RUN bits (for Yukon-Lite) */
3054 skge_write16(hw, B0_CTST,
3055 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
3057 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
3058 phy_type = skge_read8(hw, B2_E_1) & 0xf;
3059 pmd_type = skge_read8(hw, B2_PMD_TYP);
3060 hw->copper = (pmd_type == 'T' || pmd_type == '1');
3062 switch (hw->chip_id) {
3063 case CHIP_ID_GENESIS:
3064 switch (phy_type) {
3065 case SK_PHY_BCOM:
3066 hw->phy_addr = PHY_ADDR_BCOM;
3067 break;
3068 default:
3069 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
3070 pci_name(hw->pdev), phy_type);
3071 return -EOPNOTSUPP;
3073 break;
3075 case CHIP_ID_YUKON:
3076 case CHIP_ID_YUKON_LITE:
3077 case CHIP_ID_YUKON_LP:
3078 if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
3079 hw->copper = 1;
3081 hw->phy_addr = PHY_ADDR_MARV;
3082 break;
3084 default:
3085 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
3086 pci_name(hw->pdev), hw->chip_id);
3087 return -EOPNOTSUPP;
3090 mac_cfg = skge_read8(hw, B2_MAC_CFG);
3091 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
3092 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
3094 /* read the adapters RAM size */
3095 t8 = skge_read8(hw, B2_E_0);
3096 if (hw->chip_id == CHIP_ID_GENESIS) {
3097 if (t8 == 3) {
3098 /* special case: 4 x 64k x 36, offset = 0x80000 */
3099 hw->ram_size = 0x100000;
3100 hw->ram_offset = 0x80000;
3101 } else
3102 hw->ram_size = t8 * 512;
3104 else if (t8 == 0)
3105 hw->ram_size = 0x20000;
3106 else
3107 hw->ram_size = t8 * 4096;
3109 spin_lock_init(&hw->hw_lock);
3110 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3111 if (hw->ports > 1)
3112 hw->intr_mask |= IS_PORT_2;
3114 if (hw->chip_id == CHIP_ID_GENESIS)
3115 genesis_init(hw);
3116 else {
3117 /* switch power to VCC (WA for VAUX problem) */
3118 skge_write8(hw, B0_POWER_CTRL,
3119 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3121 /* avoid boards with stuck Hardware error bits */
3122 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3123 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3124 printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
3125 hw->intr_mask &= ~IS_HW_ERR;
3128 /* Clear PHY COMA */
3129 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3130 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
3131 reg &= ~PCI_PHY_COMA;
3132 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
3133 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3136 for (i = 0; i < hw->ports; i++) {
3137 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3138 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3142 /* turn off hardware timer (unused) */
3143 skge_write8(hw, B2_TI_CTRL, TIM_STOP);
3144 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3145 skge_write8(hw, B0_LED, LED_STAT_ON);
3147 /* enable the Tx Arbiters */
3148 for (i = 0; i < hw->ports; i++)
3149 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3151 /* Initialize ram interface */
3152 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
3154 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
3155 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
3156 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
3157 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
3158 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
3159 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
3160 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
3161 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
3162 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
3163 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
3164 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
3165 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
3167 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
3169 /* Set interrupt moderation for Transmit only
3170 * Receive interrupts avoided by NAPI
3171 */
3172 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
3173 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3174 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3176 skge_write32(hw, B0_IMSK, hw->intr_mask);
3178 mutex_lock(&hw->phy_mutex);
3179 for (i = 0; i < hw->ports; i++) {
3180 if (hw->chip_id == CHIP_ID_GENESIS)
3181 genesis_reset(hw, i);
3182 else
3183 yukon_reset(hw, i);
3185 mutex_unlock(&hw->phy_mutex);
3187 return 0;
3190 /* Initialize network device */
3191 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3192 int highmem)
3194 struct skge_port *skge;
3195 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3197 if (!dev) {
3198 printk(KERN_ERR "skge etherdev alloc failed");
3199 return NULL;
3202 SET_MODULE_OWNER(dev);
3203 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3204 dev->open = skge_up;
3205 dev->stop = skge_down;
3206 dev->do_ioctl = skge_ioctl;
3207 dev->hard_start_xmit = skge_xmit_frame;
3208 dev->get_stats = skge_get_stats;
3209 if (hw->chip_id == CHIP_ID_GENESIS)
3210 dev->set_multicast_list = genesis_set_multicast;
3211 else
3212 dev->set_multicast_list = yukon_set_multicast;
3214 dev->set_mac_address = skge_set_mac_address;
3215 dev->change_mtu = skge_change_mtu;
3216 SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
3217 dev->tx_timeout = skge_tx_timeout;
3218 dev->watchdog_timeo = TX_WATCHDOG;
3219 dev->poll = skge_poll;
3220 dev->weight = NAPI_WEIGHT;
3221 #ifdef CONFIG_NET_POLL_CONTROLLER
3222 dev->poll_controller = skge_netpoll;
3223 #endif
3224 dev->irq = hw->pdev->irq;
3225 dev->features = NETIF_F_LLTX;
3226 if (highmem)
3227 dev->features |= NETIF_F_HIGHDMA;
3229 skge = netdev_priv(dev);
3230 skge->netdev = dev;
3231 skge->hw = hw;
3232 skge->msg_enable = netif_msg_init(debug, default_msg);
3233 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3234 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3236 /* Auto speed and flow control */
3237 skge->autoneg = AUTONEG_ENABLE;
3238 skge->flow_control = FLOW_MODE_SYMMETRIC;
3239 skge->duplex = -1;
3240 skge->speed = -1;
3241 skge->advertising = skge_supported_modes(hw);
3243 hw->dev[port] = dev;
3245 skge->port = port;
3247 spin_lock_init(&skge->tx_lock);
3249 if (hw->chip_id != CHIP_ID_GENESIS) {
3250 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3251 skge->rx_csum = 1;
3254 /* read the mac address */
3255 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3256 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3258 /* device is off until link detection */
3259 netif_carrier_off(dev);
3260 netif_stop_queue(dev);
3262 return dev;
3265 static void __devinit skge_show_addr(struct net_device *dev)
3267 const struct skge_port *skge = netdev_priv(dev);
3269 if (netif_msg_probe(skge))
3270 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3271 dev->name,
3272 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3273 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3276 static int __devinit skge_probe(struct pci_dev *pdev,
3277 const struct pci_device_id *ent)
3279 struct net_device *dev, *dev1;
3280 struct skge_hw *hw;
3281 int err, using_dac = 0;
3283 err = pci_enable_device(pdev);
3284 if (err) {
3285 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3286 pci_name(pdev));
3287 goto err_out;
3290 err = pci_request_regions(pdev, DRV_NAME);
3291 if (err) {
3292 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3293 pci_name(pdev));
3294 goto err_out_disable_pdev;
3297 pci_set_master(pdev);
3299 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3300 using_dac = 1;
3301 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3302 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3303 using_dac = 0;
3304 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3307 if (err) {
3308 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3309 pci_name(pdev));
3310 goto err_out_free_regions;
3313 #ifdef __BIG_ENDIAN
3314 /* byte swap descriptors in hardware */
3316 u32 reg;
3318 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3319 reg |= PCI_REV_DESC;
3320 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3322 #endif
3324 err = -ENOMEM;
3325 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3326 if (!hw) {
3327 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3328 pci_name(pdev));
3329 goto err_out_free_regions;
3332 hw->pdev = pdev;
3333 mutex_init(&hw->phy_mutex);
3334 INIT_WORK(&hw->phy_work, skge_extirq, hw);
3336 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3337 if (!hw->regs) {
3338 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3339 pci_name(pdev));
3340 goto err_out_free_hw;
3343 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
3344 if (err) {
3345 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3346 pci_name(pdev), pdev->irq);
3347 goto err_out_iounmap;
3349 pci_set_drvdata(pdev, hw);
3351 err = skge_reset(hw);
3352 if (err)
3353 goto err_out_free_irq;
3355 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
3356 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3357 skge_board_name(hw), hw->chip_rev);
3359 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3360 goto err_out_led_off;
3362 if (!is_valid_ether_addr(dev->dev_addr)) {
3363 printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
3364 pci_name(pdev));
3365 err = -EIO;
3366 goto err_out_free_netdev;
3370 err = register_netdev(dev);
3371 if (err) {
3372 printk(KERN_ERR PFX "%s: cannot register net device\n",
3373 pci_name(pdev));
3374 goto err_out_free_netdev;
3377 skge_show_addr(dev);
3379 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
3380 if (register_netdev(dev1) == 0)
3381 skge_show_addr(dev1);
3382 else {
3383 /* Failure to register second port need not be fatal */
3384 printk(KERN_WARNING PFX "register of second port failed\n");
3385 hw->dev[1] = NULL;
3386 free_netdev(dev1);
3390 return 0;
3392 err_out_free_netdev:
3393 free_netdev(dev);
3394 err_out_led_off:
3395 skge_write16(hw, B0_LED, LED_STAT_OFF);
3396 err_out_free_irq:
3397 free_irq(pdev->irq, hw);
3398 err_out_iounmap:
3399 iounmap(hw->regs);
3400 err_out_free_hw:
3401 kfree(hw);
3402 err_out_free_regions:
3403 pci_release_regions(pdev);
3404 err_out_disable_pdev:
3405 pci_disable_device(pdev);
3406 pci_set_drvdata(pdev, NULL);
3407 err_out:
3408 return err;
3411 static void __devexit skge_remove(struct pci_dev *pdev)
3413 struct skge_hw *hw = pci_get_drvdata(pdev);
3414 struct net_device *dev0, *dev1;
3416 if (!hw)
3417 return;
3419 if ((dev1 = hw->dev[1]))
3420 unregister_netdev(dev1);
3421 dev0 = hw->dev[0];
3422 unregister_netdev(dev0);
3424 spin_lock_irq(&hw->hw_lock);
3425 hw->intr_mask = 0;
3426 skge_write32(hw, B0_IMSK, 0);
3427 spin_unlock_irq(&hw->hw_lock);
3429 skge_write16(hw, B0_LED, LED_STAT_OFF);
3430 skge_write8(hw, B0_CTST, CS_RST_SET);
3432 flush_scheduled_work();
3434 free_irq(pdev->irq, hw);
3435 pci_release_regions(pdev);
3436 pci_disable_device(pdev);
3437 if (dev1)
3438 free_netdev(dev1);
3439 free_netdev(dev0);
3441 iounmap(hw->regs);
3442 kfree(hw);
3443 pci_set_drvdata(pdev, NULL);
3446 #ifdef CONFIG_PM
3447 static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3449 struct skge_hw *hw = pci_get_drvdata(pdev);
3450 int i, wol = 0;
3452 for (i = 0; i < 2; i++) {
3453 struct net_device *dev = hw->dev[i];
3455 if (dev) {
3456 struct skge_port *skge = netdev_priv(dev);
3457 if (netif_running(dev)) {
3458 netif_carrier_off(dev);
3459 if (skge->wol)
3460 netif_stop_queue(dev);
3461 else
3462 skge_down(dev);
3464 netif_device_detach(dev);
3465 wol |= skge->wol;
3469 pci_save_state(pdev);
3470 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3471 pci_disable_device(pdev);
3472 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3474 return 0;
3477 static int skge_resume(struct pci_dev *pdev)
3479 struct skge_hw *hw = pci_get_drvdata(pdev);
3480 int i;
3482 pci_set_power_state(pdev, PCI_D0);
3483 pci_restore_state(pdev);
3484 pci_enable_wake(pdev, PCI_D0, 0);
3486 skge_reset(hw);
3488 for (i = 0; i < 2; i++) {
3489 struct net_device *dev = hw->dev[i];
3490 if (dev) {
3491 netif_device_attach(dev);
3492 if (netif_running(dev) && skge_up(dev))
3493 dev_close(dev);
3496 return 0;
3498 #endif
3500 static struct pci_driver skge_driver = {
3501 .name = DRV_NAME,
3502 .id_table = skge_id_table,
3503 .probe = skge_probe,
3504 .remove = __devexit_p(skge_remove),
3505 #ifdef CONFIG_PM
3506 .suspend = skge_suspend,
3507 .resume = skge_resume,
3508 #endif
3509 };
3511 static int __init skge_init_module(void)
3513 return pci_module_init(&skge_driver);
3516 static void __exit skge_cleanup_module(void)
3518 pci_unregister_driver(&skge_driver);
3521 module_init(skge_init_module);
3522 module_exit(skge_cleanup_module);