ia64/linux-2.6.18-xen.hg

view drivers/net/sky2.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 3e8752eb6d9c
children
line source
1 /*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
26 #include <linux/crc32.h>
27 #include <linux/kernel.h>
28 #include <linux/version.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/pci.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/in.h>
38 #include <linux/delay.h>
39 #include <linux/workqueue.h>
40 #include <linux/if_vlan.h>
41 #include <linux/prefetch.h>
42 #include <linux/mii.h>
44 #include <asm/irq.h>
46 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
47 #define SKY2_VLAN_TAG_USED 1
48 #endif
50 #include "sky2.h"
52 #define DRV_NAME "sky2"
53 #define DRV_VERSION "1.5"
54 #define PFX DRV_NAME " "
56 /*
57 * The Yukon II chipset takes 64 bit command blocks (called list elements)
58 * that are organized into three (receive, transmit, status) different rings
59 * similar to Tigon3. A transmit can require several elements;
60 * a receive requires one (or two if using 64 bit dma).
61 */
63 #define RX_LE_SIZE 512
64 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
65 #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
66 #define RX_DEF_PENDING RX_MAX_PENDING
67 #define RX_SKB_ALIGN 8
68 #define RX_BUF_WRITE 16
70 #define TX_RING_SIZE 512
71 #define TX_DEF_PENDING (TX_RING_SIZE - 1)
72 #define TX_MIN_PENDING 64
73 #define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
75 #define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
76 #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
77 #define ETH_JUMBO_MTU 9000
78 #define TX_WATCHDOG (5 * HZ)
79 #define NAPI_WEIGHT 64
80 #define PHY_RETRIES 1000
82 #define RING_NEXT(x,s) (((x)+1) & ((s)-1))
84 static const u32 default_msg =
85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
86 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
87 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
89 static int debug = -1; /* defaults above */
90 module_param(debug, int, 0);
91 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
93 static int copybreak __read_mostly = 256;
94 module_param(copybreak, int, 0);
95 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
97 static int disable_msi = 0;
98 module_param(disable_msi, int, 0);
99 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
101 static int idle_timeout = 100;
102 module_param(idle_timeout, int, 0);
103 MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
105 static const struct pci_device_id sky2_id_table[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
109 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
113 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
114 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
115 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
116 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
132 { 0 }
133 };
135 MODULE_DEVICE_TABLE(pci, sky2_id_table);
137 /* Avoid conditionals by using array */
138 static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
139 static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
140 static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
142 /* This driver supports yukon2 chipset only */
143 static const char *yukon2_name[] = {
144 "XL", /* 0xb3 */
145 "EC Ultra", /* 0xb4 */
146 "UNKNOWN", /* 0xb5 */
147 "EC", /* 0xb6 */
148 "FE", /* 0xb7 */
149 };
151 /* Access to external PHY */
152 static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
153 {
154 int i;
156 gma_write16(hw, port, GM_SMI_DATA, val);
157 gma_write16(hw, port, GM_SMI_CTRL,
158 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
160 for (i = 0; i < PHY_RETRIES; i++) {
161 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
162 return 0;
163 udelay(1);
164 }
166 printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
167 return -ETIMEDOUT;
168 }
170 static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
171 {
172 int i;
174 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
175 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
177 for (i = 0; i < PHY_RETRIES; i++) {
178 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) {
179 *val = gma_read16(hw, port, GM_SMI_DATA);
180 return 0;
181 }
183 udelay(1);
184 }
186 return -ETIMEDOUT;
187 }
189 static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
190 {
191 u16 v;
193 if (__gm_phy_read(hw, port, reg, &v) != 0)
194 printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
195 return v;
196 }
198 static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
199 {
200 u16 power_control;
201 u32 reg1;
202 int vaux;
204 pr_debug("sky2_set_power_state %d\n", state);
205 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
207 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
208 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
209 (power_control & PCI_PM_CAP_PME_D3cold);
211 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
213 power_control |= PCI_PM_CTRL_PME_STATUS;
214 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
216 switch (state) {
217 case PCI_D0:
218 /* switch power to VCC (WA for VAUX problem) */
219 sky2_write8(hw, B0_POWER_CTRL,
220 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
222 /* disable Core Clock Division, */
223 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
225 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
226 /* enable bits are inverted */
227 sky2_write8(hw, B2_Y2_CLK_GATE,
228 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
229 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
230 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
231 else
232 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
234 /* Turn off phy power saving */
235 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
236 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
238 /* looks like this XL is back asswards .. */
239 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
240 reg1 |= PCI_Y2_PHY1_COMA;
241 if (hw->ports > 1)
242 reg1 |= PCI_Y2_PHY2_COMA;
243 }
244 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
245 udelay(100);
247 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
248 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
249 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
250 reg1 &= P_ASPM_CONTROL_MSK;
251 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
252 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
253 }
255 break;
257 case PCI_D3hot:
258 case PCI_D3cold:
259 /* Turn on phy power saving */
260 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
261 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
262 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
263 else
264 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
265 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
266 udelay(100);
268 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
269 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
270 else
271 /* enable bits are inverted */
272 sky2_write8(hw, B2_Y2_CLK_GATE,
273 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
274 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
275 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
277 /* switch power to VAUX */
278 if (vaux && state != PCI_D3cold)
279 sky2_write8(hw, B0_POWER_CTRL,
280 (PC_VAUX_ENA | PC_VCC_ENA |
281 PC_VAUX_ON | PC_VCC_OFF));
282 break;
283 default:
284 printk(KERN_ERR PFX "Unknown power state %d\n", state);
285 }
287 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
288 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
289 }
291 static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
292 {
293 u16 reg;
295 /* disable all GMAC IRQ's */
296 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
297 /* disable PHY IRQs */
298 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
300 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
301 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
302 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
303 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
305 reg = gma_read16(hw, port, GM_RX_CTRL);
306 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
307 gma_write16(hw, port, GM_RX_CTRL, reg);
308 }
310 static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
311 {
312 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
313 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
315 if (sky2->autoneg == AUTONEG_ENABLE &&
316 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
317 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
319 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
320 PHY_M_EC_MAC_S_MSK);
321 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
323 if (hw->chip_id == CHIP_ID_YUKON_EC)
324 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
325 else
326 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
328 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
329 }
331 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
332 if (hw->copper) {
333 if (hw->chip_id == CHIP_ID_YUKON_FE) {
334 /* enable automatic crossover */
335 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
336 } else {
337 /* disable energy detect */
338 ctrl &= ~PHY_M_PC_EN_DET_MSK;
340 /* enable automatic crossover */
341 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
343 if (sky2->autoneg == AUTONEG_ENABLE &&
344 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
345 ctrl &= ~PHY_M_PC_DSC_MSK;
346 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
347 }
348 }
349 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
350 } else {
351 /* workaround for deviation #4.88 (CRC errors) */
352 /* disable Automatic Crossover */
354 ctrl &= ~PHY_M_PC_MDIX_MSK;
355 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
357 if (hw->chip_id == CHIP_ID_YUKON_XL) {
358 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
359 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
360 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
361 ctrl &= ~PHY_M_MAC_MD_MSK;
362 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
363 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
365 /* select page 1 to access Fiber registers */
366 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
367 }
368 }
370 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
371 if (sky2->autoneg == AUTONEG_DISABLE)
372 ctrl &= ~PHY_CT_ANE;
373 else
374 ctrl |= PHY_CT_ANE;
376 ctrl |= PHY_CT_RESET;
377 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
379 ctrl = 0;
380 ct1000 = 0;
381 adv = PHY_AN_CSMA;
383 if (sky2->autoneg == AUTONEG_ENABLE) {
384 if (hw->copper) {
385 if (sky2->advertising & ADVERTISED_1000baseT_Full)
386 ct1000 |= PHY_M_1000C_AFD;
387 if (sky2->advertising & ADVERTISED_1000baseT_Half)
388 ct1000 |= PHY_M_1000C_AHD;
389 if (sky2->advertising & ADVERTISED_100baseT_Full)
390 adv |= PHY_M_AN_100_FD;
391 if (sky2->advertising & ADVERTISED_100baseT_Half)
392 adv |= PHY_M_AN_100_HD;
393 if (sky2->advertising & ADVERTISED_10baseT_Full)
394 adv |= PHY_M_AN_10_FD;
395 if (sky2->advertising & ADVERTISED_10baseT_Half)
396 adv |= PHY_M_AN_10_HD;
397 } else /* special defines for FIBER (88E1011S only) */
398 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
400 /* Set Flow-control capabilities */
401 if (sky2->tx_pause && sky2->rx_pause)
402 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
403 else if (sky2->rx_pause && !sky2->tx_pause)
404 adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
405 else if (!sky2->rx_pause && sky2->tx_pause)
406 adv |= PHY_AN_PAUSE_ASYM; /* local */
408 /* Restart Auto-negotiation */
409 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
410 } else {
411 /* forced speed/duplex settings */
412 ct1000 = PHY_M_1000C_MSE;
414 if (sky2->duplex == DUPLEX_FULL)
415 ctrl |= PHY_CT_DUP_MD;
417 switch (sky2->speed) {
418 case SPEED_1000:
419 ctrl |= PHY_CT_SP1000;
420 break;
421 case SPEED_100:
422 ctrl |= PHY_CT_SP100;
423 break;
424 }
426 ctrl |= PHY_CT_RESET;
427 }
429 if (hw->chip_id != CHIP_ID_YUKON_FE)
430 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
432 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
433 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
435 /* Setup Phy LED's */
436 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
437 ledover = 0;
439 switch (hw->chip_id) {
440 case CHIP_ID_YUKON_FE:
441 /* on 88E3082 these bits are at 11..9 (shifted left) */
442 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
444 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
446 /* delete ACT LED control bits */
447 ctrl &= ~PHY_M_FELP_LED1_MSK;
448 /* change ACT LED control to blink mode */
449 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
450 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
451 break;
453 case CHIP_ID_YUKON_XL:
454 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
456 /* select page 3 to access LED control register */
457 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
459 /* set LED Function Control register */
460 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
461 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
462 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
463 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
464 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
466 /* set Polarity Control register */
467 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
468 (PHY_M_POLC_LS1_P_MIX(4) |
469 PHY_M_POLC_IS0_P_MIX(4) |
470 PHY_M_POLC_LOS_CTRL(2) |
471 PHY_M_POLC_INIT_CTRL(2) |
472 PHY_M_POLC_STA1_CTRL(2) |
473 PHY_M_POLC_STA0_CTRL(2)));
475 /* restore page register */
476 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
477 break;
478 case CHIP_ID_YUKON_EC_U:
479 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
481 /* select page 3 to access LED control register */
482 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
484 /* set LED Function Control register */
485 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
486 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
487 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
488 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
489 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
491 /* set Blink Rate in LED Timer Control Register */
492 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
493 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
494 /* restore page register */
495 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
496 break;
498 default:
499 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
500 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
501 /* turn off the Rx LED (LED_RX) */
502 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
503 }
505 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
506 /* apply fixes in PHY AFE */
507 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
508 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
510 /* increase differential signal amplitude in 10BASE-T */
511 gm_phy_write(hw, port, 0x18, 0xaa99);
512 gm_phy_write(hw, port, 0x17, 0x2011);
514 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
515 gm_phy_write(hw, port, 0x18, 0xa204);
516 gm_phy_write(hw, port, 0x17, 0x2002);
518 /* set page register to 0 */
519 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
520 } else {
521 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
523 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
524 /* turn on 100 Mbps LED (LED_LINK100) */
525 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
526 }
528 if (ledover)
529 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
531 }
532 /* Enable phy interrupt on auto-negotiation complete (or link up) */
533 if (sky2->autoneg == AUTONEG_ENABLE)
534 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
535 else
536 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
537 }
539 /* Force a renegotiation */
540 static void sky2_phy_reinit(struct sky2_port *sky2)
541 {
542 spin_lock_bh(&sky2->phy_lock);
543 sky2_phy_init(sky2->hw, sky2->port);
544 spin_unlock_bh(&sky2->phy_lock);
545 }
547 static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
548 {
549 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
550 u16 reg;
551 int i;
552 const u8 *addr = hw->dev[port]->dev_addr;
554 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
555 sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
557 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
559 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
560 /* WA DEV_472 -- looks like crossed wires on port 2 */
561 /* clear GMAC 1 Control reset */
562 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
563 do {
564 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
565 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
566 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
567 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
568 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
569 }
571 if (sky2->autoneg == AUTONEG_DISABLE) {
572 reg = gma_read16(hw, port, GM_GP_CTRL);
573 reg |= GM_GPCR_AU_ALL_DIS;
574 gma_write16(hw, port, GM_GP_CTRL, reg);
575 gma_read16(hw, port, GM_GP_CTRL);
577 switch (sky2->speed) {
578 case SPEED_1000:
579 reg &= ~GM_GPCR_SPEED_100;
580 reg |= GM_GPCR_SPEED_1000;
581 break;
582 case SPEED_100:
583 reg &= ~GM_GPCR_SPEED_1000;
584 reg |= GM_GPCR_SPEED_100;
585 break;
586 case SPEED_10:
587 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
588 break;
589 }
591 if (sky2->duplex == DUPLEX_FULL)
592 reg |= GM_GPCR_DUP_FULL;
594 /* turn off pause in 10/100mbps half duplex */
595 else if (sky2->speed != SPEED_1000 &&
596 hw->chip_id != CHIP_ID_YUKON_EC_U)
597 sky2->tx_pause = sky2->rx_pause = 0;
598 } else
599 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
601 if (!sky2->tx_pause && !sky2->rx_pause) {
602 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
603 reg |=
604 GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
605 } else if (sky2->tx_pause && !sky2->rx_pause) {
606 /* disable Rx flow-control */
607 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
608 }
610 gma_write16(hw, port, GM_GP_CTRL, reg);
612 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
614 spin_lock_bh(&sky2->phy_lock);
615 sky2_phy_init(hw, port);
616 spin_unlock_bh(&sky2->phy_lock);
618 /* MIB clear */
619 reg = gma_read16(hw, port, GM_PHY_ADDR);
620 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
622 for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
623 gma_read16(hw, port, i);
624 gma_write16(hw, port, GM_PHY_ADDR, reg);
626 /* transmit control */
627 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
629 /* receive control reg: unicast + multicast + no FCS */
630 gma_write16(hw, port, GM_RX_CTRL,
631 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
633 /* transmit flow control */
634 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
636 /* transmit parameter */
637 gma_write16(hw, port, GM_TX_PARAM,
638 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
639 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
640 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
641 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
643 /* serial mode register */
644 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
645 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
647 if (hw->dev[port]->mtu > ETH_DATA_LEN)
648 reg |= GM_SMOD_JUMBO_ENA;
650 gma_write16(hw, port, GM_SERIAL_MODE, reg);
652 /* virtual address for data */
653 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
655 /* physical address: used for pause frames */
656 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
658 /* ignore counter overflows */
659 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
660 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
661 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
663 /* Configure Rx MAC FIFO */
664 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
665 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
666 GMF_OPER_ON | GMF_RX_F_FL_ON);
668 /* Flush Rx MAC FIFO on any flow control or error */
669 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
671 /* Set threshold to 0xa (64 bytes)
672 * ASF disabled so no need to do WA dev #4.30
673 */
674 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
676 /* Configure Tx MAC FIFO */
677 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
678 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
680 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
681 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
682 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
683 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
684 /* set Tx GMAC FIFO Almost Empty Threshold */
685 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
686 /* Disable Store & Forward mode for TX */
687 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
688 }
689 }
691 }
693 /* Assign Ram Buffer allocation in units of 64bit (8 bytes) */
694 static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 end)
695 {
696 pr_debug(PFX "q %d %#x %#x\n", q, start, end);
698 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
699 sky2_write32(hw, RB_ADDR(q, RB_START), start);
700 sky2_write32(hw, RB_ADDR(q, RB_END), end);
701 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
702 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
704 if (q == Q_R1 || q == Q_R2) {
705 u32 space = end - start + 1;
706 u32 tp = space - space/4;
708 /* On receive queue's set the thresholds
709 * give receiver priority when > 3/4 full
710 * send pause when down to 2K
711 */
712 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
713 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
715 tp = space - 2048/8;
716 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
717 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
718 } else {
719 /* Enable store & forward on Tx queue's because
720 * Tx FIFO is only 1K on Yukon
721 */
722 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
723 }
725 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
726 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
727 }
729 /* Setup Bus Memory Interface */
730 static void sky2_qset(struct sky2_hw *hw, u16 q)
731 {
732 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
733 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
734 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
735 sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
736 }
738 /* Setup prefetch unit registers. This is the interface between
739 * hardware and driver list elements
740 */
741 static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
742 u64 addr, u32 last)
743 {
744 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
745 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
746 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
747 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
748 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
749 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
751 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
752 }
754 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
755 {
756 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
758 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
759 return le;
760 }
762 /* Update chip's next pointer */
763 static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
764 {
765 wmb();
766 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
767 mmiowb();
768 }
771 static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
772 {
773 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
774 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
775 return le;
776 }
778 /* Return high part of DMA address (could be 32 or 64 bit) */
779 static inline u32 high32(dma_addr_t a)
780 {
781 return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
782 }
784 /* Build description to hardware about buffer */
785 static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
786 {
787 struct sky2_rx_le *le;
788 u32 hi = high32(map);
789 u16 len = sky2->rx_bufsize;
791 if (sky2->rx_addr64 != hi) {
792 le = sky2_next_rx(sky2);
793 le->addr = cpu_to_le32(hi);
794 le->ctrl = 0;
795 le->opcode = OP_ADDR64 | HW_OWNER;
796 sky2->rx_addr64 = high32(map + len);
797 }
799 le = sky2_next_rx(sky2);
800 le->addr = cpu_to_le32((u32) map);
801 le->length = cpu_to_le16(len);
802 le->ctrl = 0;
803 le->opcode = OP_PACKET | HW_OWNER;
804 }
807 /* Tell chip where to start receive checksum.
808 * Actually has two checksums, but set both same to avoid possible byte
809 * order problems.
810 */
811 static void rx_set_checksum(struct sky2_port *sky2)
812 {
813 struct sky2_rx_le *le;
815 le = sky2_next_rx(sky2);
816 le->addr = (ETH_HLEN << 16) | ETH_HLEN;
817 le->ctrl = 0;
818 le->opcode = OP_TCPSTART | HW_OWNER;
820 sky2_write32(sky2->hw,
821 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
822 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
824 }
826 /*
827 * The RX Stop command will not work for Yukon-2 if the BMU does not
828 * reach the end of packet and since we can't make sure that we have
829 * incoming data, we must reset the BMU while it is not doing a DMA
830 * transfer. Since it is possible that the RX path is still active,
831 * the RX RAM buffer will be stopped first, so any possible incoming
832 * data will not trigger a DMA. After the RAM buffer is stopped, the
833 * BMU is polled until any DMA in progress is ended and only then it
834 * will be reset.
835 */
836 static void sky2_rx_stop(struct sky2_port *sky2)
837 {
838 struct sky2_hw *hw = sky2->hw;
839 unsigned rxq = rxqaddr[sky2->port];
840 int i;
842 /* disable the RAM Buffer receive queue */
843 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
845 for (i = 0; i < 0xffff; i++)
846 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
847 == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
848 goto stopped;
850 printk(KERN_WARNING PFX "%s: receiver stop failed\n",
851 sky2->netdev->name);
852 stopped:
853 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
855 /* reset the Rx prefetch unit */
856 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
857 }
859 /* Clean out receive buffer area, assumes receiver hardware stopped */
860 static void sky2_rx_clean(struct sky2_port *sky2)
861 {
862 unsigned i;
864 memset(sky2->rx_le, 0, RX_LE_BYTES);
865 for (i = 0; i < sky2->rx_pending; i++) {
866 struct ring_info *re = sky2->rx_ring + i;
868 if (re->skb) {
869 pci_unmap_single(sky2->hw->pdev,
870 re->mapaddr, sky2->rx_bufsize,
871 PCI_DMA_FROMDEVICE);
872 kfree_skb(re->skb);
873 re->skb = NULL;
874 }
875 }
876 }
878 /* Basic MII support */
879 static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
880 {
881 struct mii_ioctl_data *data = if_mii(ifr);
882 struct sky2_port *sky2 = netdev_priv(dev);
883 struct sky2_hw *hw = sky2->hw;
884 int err = -EOPNOTSUPP;
886 if (!netif_running(dev))
887 return -ENODEV; /* Phy still in reset */
889 switch (cmd) {
890 case SIOCGMIIPHY:
891 data->phy_id = PHY_ADDR_MARV;
893 /* fallthru */
894 case SIOCGMIIREG: {
895 u16 val = 0;
897 spin_lock_bh(&sky2->phy_lock);
898 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
899 spin_unlock_bh(&sky2->phy_lock);
901 data->val_out = val;
902 break;
903 }
905 case SIOCSMIIREG:
906 if (!capable(CAP_NET_ADMIN))
907 return -EPERM;
909 spin_lock_bh(&sky2->phy_lock);
910 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
911 data->val_in);
912 spin_unlock_bh(&sky2->phy_lock);
913 break;
914 }
915 return err;
916 }
918 #ifdef SKY2_VLAN_TAG_USED
919 static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
920 {
921 struct sky2_port *sky2 = netdev_priv(dev);
922 struct sky2_hw *hw = sky2->hw;
923 u16 port = sky2->port;
925 spin_lock_bh(&sky2->tx_lock);
927 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
928 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
929 sky2->vlgrp = grp;
931 spin_unlock_bh(&sky2->tx_lock);
932 }
934 static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
935 {
936 struct sky2_port *sky2 = netdev_priv(dev);
937 struct sky2_hw *hw = sky2->hw;
938 u16 port = sky2->port;
940 spin_lock_bh(&sky2->tx_lock);
942 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
943 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
944 if (sky2->vlgrp)
945 sky2->vlgrp->vlan_devices[vid] = NULL;
947 spin_unlock_bh(&sky2->tx_lock);
948 }
949 #endif
951 /*
952 * It appears the hardware has a bug in the FIFO logic that
953 * cause it to hang if the FIFO gets overrun and the receive buffer
954 * is not aligned. ALso alloc_skb() won't align properly if slab
955 * debugging is enabled.
956 */
957 static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
958 {
959 struct sk_buff *skb;
961 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
962 if (likely(skb)) {
963 unsigned long p = (unsigned long) skb->data;
964 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
965 }
967 return skb;
968 }
970 /*
971 * Allocate and setup receiver buffer pool.
972 * In case of 64 bit dma, there are 2X as many list elements
973 * available as ring entries
974 * and need to reserve one list element so we don't wrap around.
975 */
976 static int sky2_rx_start(struct sky2_port *sky2)
977 {
978 struct sky2_hw *hw = sky2->hw;
979 unsigned rxq = rxqaddr[sky2->port];
980 int i;
981 unsigned thresh;
983 sky2->rx_put = sky2->rx_next = 0;
984 sky2_qset(hw, rxq);
986 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
987 /* MAC Rx RAM Read is controlled by hardware */
988 sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
989 }
991 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
993 rx_set_checksum(sky2);
994 for (i = 0; i < sky2->rx_pending; i++) {
995 struct ring_info *re = sky2->rx_ring + i;
997 re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
998 if (!re->skb)
999 goto nomem;
1001 re->mapaddr = pci_map_single(hw->pdev, re->skb->data,
1002 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1003 sky2_rx_add(sky2, re->mapaddr);
1007 /*
1008 * The receiver hangs if it receives frames larger than the
1009 * packet buffer. As a workaround, truncate oversize frames, but
1010 * the register is limited to 9 bits, so if you do frames > 2052
1011 * you better get the MTU right!
1012 */
1013 thresh = (sky2->rx_bufsize - 8) / sizeof(u32);
1014 if (thresh > 0x1ff)
1015 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1016 else {
1017 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1018 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1022 /* Tell chip about available buffers */
1023 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1024 return 0;
1025 nomem:
1026 sky2_rx_clean(sky2);
1027 return -ENOMEM;
1030 /* Bring up network interface. */
1031 static int sky2_up(struct net_device *dev)
1033 struct sky2_port *sky2 = netdev_priv(dev);
1034 struct sky2_hw *hw = sky2->hw;
1035 unsigned port = sky2->port;
1036 u32 ramsize, rxspace, imask;
1037 int cap, err = -ENOMEM;
1038 struct net_device *otherdev = hw->dev[sky2->port^1];
1040 /*
1041 * On dual port PCI-X card, there is an problem where status
1042 * can be received out of order due to split transactions
1043 */
1044 if (otherdev && netif_running(otherdev) &&
1045 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1046 struct sky2_port *osky2 = netdev_priv(otherdev);
1047 u16 cmd;
1049 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1050 cmd &= ~PCI_X_CMD_MAX_SPLIT;
1051 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1053 sky2->rx_csum = 0;
1054 osky2->rx_csum = 0;
1057 if (netif_msg_ifup(sky2))
1058 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1060 /* must be power of 2 */
1061 sky2->tx_le = pci_alloc_consistent(hw->pdev,
1062 TX_RING_SIZE *
1063 sizeof(struct sky2_tx_le),
1064 &sky2->tx_le_map);
1065 if (!sky2->tx_le)
1066 goto err_out;
1068 sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info),
1069 GFP_KERNEL);
1070 if (!sky2->tx_ring)
1071 goto err_out;
1072 sky2->tx_prod = sky2->tx_cons = 0;
1074 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1075 &sky2->rx_le_map);
1076 if (!sky2->rx_le)
1077 goto err_out;
1078 memset(sky2->rx_le, 0, RX_LE_BYTES);
1080 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct ring_info),
1081 GFP_KERNEL);
1082 if (!sky2->rx_ring)
1083 goto err_out;
1085 sky2_mac_init(hw, port);
1087 /* Determine available ram buffer space in qwords. */
1088 ramsize = sky2_read8(hw, B2_E_0) * 4096/8;
1090 if (ramsize > 6*1024/8)
1091 rxspace = ramsize - (ramsize + 2) / 3;
1092 else
1093 rxspace = ramsize / 2;
1095 sky2_ramset(hw, rxqaddr[port], 0, rxspace-1);
1096 sky2_ramset(hw, txqaddr[port], rxspace, ramsize-1);
1098 /* Make sure SyncQ is disabled */
1099 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1100 RB_RST_SET);
1102 sky2_qset(hw, txqaddr[port]);
1104 /* Set almost empty threshold */
1105 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
1106 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1108 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1109 TX_RING_SIZE - 1);
1111 err = sky2_rx_start(sky2);
1112 if (err)
1113 goto err_out;
1115 /* Enable interrupts from phy/mac for port */
1116 imask = sky2_read32(hw, B0_IMSK);
1117 imask |= portirq_msk[port];
1118 sky2_write32(hw, B0_IMSK, imask);
1120 return 0;
1122 err_out:
1123 if (sky2->rx_le) {
1124 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1125 sky2->rx_le, sky2->rx_le_map);
1126 sky2->rx_le = NULL;
1128 if (sky2->tx_le) {
1129 pci_free_consistent(hw->pdev,
1130 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1131 sky2->tx_le, sky2->tx_le_map);
1132 sky2->tx_le = NULL;
1134 kfree(sky2->tx_ring);
1135 kfree(sky2->rx_ring);
1137 sky2->tx_ring = NULL;
1138 sky2->rx_ring = NULL;
1139 return err;
1142 /* Modular subtraction in ring */
1143 static inline int tx_dist(unsigned tail, unsigned head)
1145 return (head - tail) & (TX_RING_SIZE - 1);
1148 /* Number of list elements available for next tx */
1149 static inline int tx_avail(const struct sky2_port *sky2)
1151 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1154 /* Estimate of number of transmit list elements required */
1155 static unsigned tx_le_req(const struct sk_buff *skb)
1157 unsigned count;
1159 count = sizeof(dma_addr_t) / sizeof(u32);
1160 count += skb_shinfo(skb)->nr_frags * count;
1162 if (skb_is_gso(skb))
1163 ++count;
1165 if (skb->ip_summed == CHECKSUM_HW)
1166 ++count;
1168 return count;
1171 /*
1172 * Put one packet in ring for transmit.
1173 * A single packet can generate multiple list elements, and
1174 * the number of ring elements will probably be less than the number
1175 * of list elements used.
1177 * No BH disabling for tx_lock here (like tg3)
1178 */
1179 static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1181 struct sky2_port *sky2 = netdev_priv(dev);
1182 struct sky2_hw *hw = sky2->hw;
1183 struct sky2_tx_le *le = NULL;
1184 struct tx_ring_info *re;
1185 unsigned i, len;
1186 int avail;
1187 dma_addr_t mapping;
1188 u32 addr64;
1189 u16 mss;
1190 u8 ctrl;
1192 /* No BH disabling for tx_lock here. We are running in BH disabled
1193 * context and TX reclaim runs via poll inside of a software
1194 * interrupt, and no related locks in IRQ processing.
1195 */
1196 if (!spin_trylock(&sky2->tx_lock))
1197 return NETDEV_TX_LOCKED;
1199 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1200 /* There is a known but harmless race with lockless tx
1201 * and netif_stop_queue.
1202 */
1203 if (!netif_queue_stopped(dev)) {
1204 netif_stop_queue(dev);
1205 if (net_ratelimit())
1206 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
1207 dev->name);
1209 spin_unlock(&sky2->tx_lock);
1211 return NETDEV_TX_BUSY;
1214 if (unlikely(netif_msg_tx_queued(sky2)))
1215 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1216 dev->name, sky2->tx_prod, skb->len);
1218 len = skb_headlen(skb);
1219 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1220 addr64 = high32(mapping);
1222 re = sky2->tx_ring + sky2->tx_prod;
1224 /* Send high bits if changed or crosses boundary */
1225 if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
1226 le = get_tx_le(sky2);
1227 le->tx.addr = cpu_to_le32(addr64);
1228 le->ctrl = 0;
1229 le->opcode = OP_ADDR64 | HW_OWNER;
1230 sky2->tx_addr64 = high32(mapping + len);
1233 /* Check for TCP Segmentation Offload */
1234 mss = skb_shinfo(skb)->gso_size;
1235 if (mss != 0) {
1236 /* just drop the packet if non-linear expansion fails */
1237 if (skb_header_cloned(skb) &&
1238 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1239 dev_kfree_skb(skb);
1240 goto out_unlock;
1243 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1244 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1245 mss += ETH_HLEN;
1248 if (mss != sky2->tx_last_mss) {
1249 le = get_tx_le(sky2);
1250 le->tx.tso.size = cpu_to_le16(mss);
1251 le->tx.tso.rsvd = 0;
1252 le->opcode = OP_LRGLEN | HW_OWNER;
1253 le->ctrl = 0;
1254 sky2->tx_last_mss = mss;
1257 ctrl = 0;
1258 #ifdef SKY2_VLAN_TAG_USED
1259 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1260 if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1261 if (!le) {
1262 le = get_tx_le(sky2);
1263 le->tx.addr = 0;
1264 le->opcode = OP_VLAN|HW_OWNER;
1265 le->ctrl = 0;
1266 } else
1267 le->opcode |= OP_VLAN;
1268 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1269 ctrl |= INS_VLAN;
1271 #endif
1273 /* Handle TCP checksum offload */
1274 if (skb->ip_summed == CHECKSUM_HW) {
1275 u16 hdr = skb->h.raw - skb->data;
1276 u16 offset = hdr + skb->csum;
1278 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1279 if (skb->nh.iph->protocol == IPPROTO_UDP)
1280 ctrl |= UDPTCP;
1282 le = get_tx_le(sky2);
1283 le->tx.csum.start = cpu_to_le16(hdr);
1284 le->tx.csum.offset = cpu_to_le16(offset);
1285 le->length = 0; /* initial checksum value */
1286 le->ctrl = 1; /* one packet */
1287 le->opcode = OP_TCPLISW | HW_OWNER;
1290 le = get_tx_le(sky2);
1291 le->tx.addr = cpu_to_le32((u32) mapping);
1292 le->length = cpu_to_le16(len);
1293 le->ctrl = ctrl;
1294 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1296 /* Record the transmit mapping info */
1297 re->skb = skb;
1298 pci_unmap_addr_set(re, mapaddr, mapping);
1300 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1301 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1302 struct tx_ring_info *fre;
1304 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1305 frag->size, PCI_DMA_TODEVICE);
1306 addr64 = high32(mapping);
1307 if (addr64 != sky2->tx_addr64) {
1308 le = get_tx_le(sky2);
1309 le->tx.addr = cpu_to_le32(addr64);
1310 le->ctrl = 0;
1311 le->opcode = OP_ADDR64 | HW_OWNER;
1312 sky2->tx_addr64 = addr64;
1315 le = get_tx_le(sky2);
1316 le->tx.addr = cpu_to_le32((u32) mapping);
1317 le->length = cpu_to_le16(frag->size);
1318 le->ctrl = ctrl;
1319 le->opcode = OP_BUFFER | HW_OWNER;
1321 fre = sky2->tx_ring
1322 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1323 pci_unmap_addr_set(fre, mapaddr, mapping);
1326 re->idx = sky2->tx_prod;
1327 le->ctrl |= EOP;
1329 avail = tx_avail(sky2);
1330 if (mss != 0 || avail < TX_MIN_PENDING) {
1331 le->ctrl |= FRC_STAT;
1332 if (avail <= MAX_SKB_TX_LE)
1333 netif_stop_queue(dev);
1336 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1338 out_unlock:
1339 spin_unlock(&sky2->tx_lock);
1341 dev->trans_start = jiffies;
1342 return NETDEV_TX_OK;
1345 /*
1346 * Free ring elements from starting at tx_cons until "done"
1348 * NB: the hardware will tell us about partial completion of multi-part
1349 * buffers; these are deferred until completion.
1350 */
1351 static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1353 struct net_device *dev = sky2->netdev;
1354 struct pci_dev *pdev = sky2->hw->pdev;
1355 u16 nxt, put;
1356 unsigned i;
1358 BUG_ON(done >= TX_RING_SIZE);
1360 if (unlikely(netif_msg_tx_done(sky2)))
1361 printk(KERN_DEBUG "%s: tx done, up to %u\n",
1362 dev->name, done);
1364 for (put = sky2->tx_cons; put != done; put = nxt) {
1365 struct tx_ring_info *re = sky2->tx_ring + put;
1366 struct sk_buff *skb = re->skb;
1368 nxt = re->idx;
1369 BUG_ON(nxt >= TX_RING_SIZE);
1370 prefetch(sky2->tx_ring + nxt);
1372 /* Check for partial status */
1373 if (tx_dist(put, done) < tx_dist(put, nxt))
1374 break;
1376 skb = re->skb;
1377 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
1378 skb_headlen(skb), PCI_DMA_TODEVICE);
1380 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1381 struct tx_ring_info *fre;
1382 fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1383 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1384 skb_shinfo(skb)->frags[i].size,
1385 PCI_DMA_TODEVICE);
1388 dev_kfree_skb(skb);
1391 sky2->tx_cons = put;
1392 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1393 netif_wake_queue(dev);
1396 /* Cleanup all untransmitted buffers, assume transmitter not running */
1397 static void sky2_tx_clean(struct sky2_port *sky2)
1399 spin_lock_bh(&sky2->tx_lock);
1400 sky2_tx_complete(sky2, sky2->tx_prod);
1401 spin_unlock_bh(&sky2->tx_lock);
1404 /* Network shutdown */
1405 static int sky2_down(struct net_device *dev)
1407 struct sky2_port *sky2 = netdev_priv(dev);
1408 struct sky2_hw *hw = sky2->hw;
1409 unsigned port = sky2->port;
1410 u16 ctrl;
1411 u32 imask;
1413 /* Never really got started! */
1414 if (!sky2->tx_le)
1415 return 0;
1417 if (netif_msg_ifdown(sky2))
1418 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1420 /* Stop more packets from being queued */
1421 netif_stop_queue(dev);
1423 /* Disable port IRQ */
1424 imask = sky2_read32(hw, B0_IMSK);
1425 imask &= ~portirq_msk[port];
1426 sky2_write32(hw, B0_IMSK, imask);
1428 sky2_phy_reset(hw, port);
1430 /* Stop transmitter */
1431 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1432 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1434 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1435 RB_RST_SET | RB_DIS_OP_MD);
1437 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1438 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1439 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1441 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1443 /* Workaround shared GMAC reset */
1444 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1445 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1446 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1448 /* Disable Force Sync bit and Enable Alloc bit */
1449 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1450 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1452 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1453 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1454 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1456 /* Reset the PCI FIFO of the async Tx queue */
1457 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1458 BMU_RST_SET | BMU_FIFO_RST);
1460 /* Reset the Tx prefetch units */
1461 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1462 PREF_UNIT_RST_SET);
1464 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1466 sky2_rx_stop(sky2);
1468 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1469 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1471 /* turn off LED's */
1472 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1474 synchronize_irq(hw->pdev->irq);
1476 sky2_tx_clean(sky2);
1477 sky2_rx_clean(sky2);
1479 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1480 sky2->rx_le, sky2->rx_le_map);
1481 kfree(sky2->rx_ring);
1483 pci_free_consistent(hw->pdev,
1484 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1485 sky2->tx_le, sky2->tx_le_map);
1486 kfree(sky2->tx_ring);
1488 sky2->tx_le = NULL;
1489 sky2->rx_le = NULL;
1491 sky2->rx_ring = NULL;
1492 sky2->tx_ring = NULL;
1494 return 0;
1497 static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1499 if (!hw->copper)
1500 return SPEED_1000;
1502 if (hw->chip_id == CHIP_ID_YUKON_FE)
1503 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1505 switch (aux & PHY_M_PS_SPEED_MSK) {
1506 case PHY_M_PS_SPEED_1000:
1507 return SPEED_1000;
1508 case PHY_M_PS_SPEED_100:
1509 return SPEED_100;
1510 default:
1511 return SPEED_10;
1515 static void sky2_link_up(struct sky2_port *sky2)
1517 struct sky2_hw *hw = sky2->hw;
1518 unsigned port = sky2->port;
1519 u16 reg;
1521 /* Enable Transmit FIFO Underrun */
1522 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1524 reg = gma_read16(hw, port, GM_GP_CTRL);
1525 if (sky2->autoneg == AUTONEG_DISABLE) {
1526 reg |= GM_GPCR_AU_ALL_DIS;
1528 /* Is write/read necessary? Copied from sky2_mac_init */
1529 gma_write16(hw, port, GM_GP_CTRL, reg);
1530 gma_read16(hw, port, GM_GP_CTRL);
1532 switch (sky2->speed) {
1533 case SPEED_1000:
1534 reg &= ~GM_GPCR_SPEED_100;
1535 reg |= GM_GPCR_SPEED_1000;
1536 break;
1537 case SPEED_100:
1538 reg &= ~GM_GPCR_SPEED_1000;
1539 reg |= GM_GPCR_SPEED_100;
1540 break;
1541 case SPEED_10:
1542 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1543 break;
1545 } else
1546 reg &= ~GM_GPCR_AU_ALL_DIS;
1548 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1549 reg |= GM_GPCR_DUP_FULL;
1551 /* enable Rx/Tx */
1552 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1553 gma_write16(hw, port, GM_GP_CTRL, reg);
1554 gma_read16(hw, port, GM_GP_CTRL);
1556 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1558 netif_carrier_on(sky2->netdev);
1559 netif_wake_queue(sky2->netdev);
1561 /* Turn on link LED */
1562 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1563 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1565 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
1566 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1567 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1569 switch(sky2->speed) {
1570 case SPEED_10:
1571 led |= PHY_M_LEDC_INIT_CTRL(7);
1572 break;
1574 case SPEED_100:
1575 led |= PHY_M_LEDC_STA1_CTRL(7);
1576 break;
1578 case SPEED_1000:
1579 led |= PHY_M_LEDC_STA0_CTRL(7);
1580 break;
1583 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1584 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
1585 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1588 if (netif_msg_link(sky2))
1589 printk(KERN_INFO PFX
1590 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1591 sky2->netdev->name, sky2->speed,
1592 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1593 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1594 sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1597 static void sky2_link_down(struct sky2_port *sky2)
1599 struct sky2_hw *hw = sky2->hw;
1600 unsigned port = sky2->port;
1601 u16 reg;
1603 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1605 reg = gma_read16(hw, port, GM_GP_CTRL);
1606 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1607 gma_write16(hw, port, GM_GP_CTRL, reg);
1608 gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1610 if (sky2->rx_pause && !sky2->tx_pause) {
1611 /* restore Asymmetric Pause bit */
1612 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1613 gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1614 | PHY_M_AN_ASP);
1617 netif_carrier_off(sky2->netdev);
1618 netif_stop_queue(sky2->netdev);
1620 /* Turn on link LED */
1621 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1623 if (netif_msg_link(sky2))
1624 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1625 sky2_phy_init(hw, port);
1628 static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1630 struct sky2_hw *hw = sky2->hw;
1631 unsigned port = sky2->port;
1632 u16 lpa;
1634 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1636 if (lpa & PHY_M_AN_RF) {
1637 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1638 return -1;
1641 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1642 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1643 printk(KERN_ERR PFX "%s: master/slave fault",
1644 sky2->netdev->name);
1645 return -1;
1648 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1649 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1650 sky2->netdev->name);
1651 return -1;
1654 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1656 sky2->speed = sky2_phy_speed(hw, aux);
1658 /* Pause bits are offset (9..8) */
1659 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
1660 aux >>= 6;
1662 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1663 sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1665 if ((sky2->tx_pause || sky2->rx_pause)
1666 && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1667 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1668 else
1669 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1671 return 0;
1674 /* Interrupt from PHY */
1675 static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1677 struct net_device *dev = hw->dev[port];
1678 struct sky2_port *sky2 = netdev_priv(dev);
1679 u16 istatus, phystat;
1681 if (!netif_running(dev))
1682 return;
1684 spin_lock(&sky2->phy_lock);
1685 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1686 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1688 if (netif_msg_intr(sky2))
1689 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1690 sky2->netdev->name, istatus, phystat);
1692 if (istatus & PHY_M_IS_AN_COMPL) {
1693 if (sky2_autoneg_done(sky2, phystat) == 0)
1694 sky2_link_up(sky2);
1695 goto out;
1698 if (istatus & PHY_M_IS_LSP_CHANGE)
1699 sky2->speed = sky2_phy_speed(hw, phystat);
1701 if (istatus & PHY_M_IS_DUP_CHANGE)
1702 sky2->duplex =
1703 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1705 if (istatus & PHY_M_IS_LST_CHANGE) {
1706 if (phystat & PHY_M_PS_LINK_UP)
1707 sky2_link_up(sky2);
1708 else
1709 sky2_link_down(sky2);
1711 out:
1712 spin_unlock(&sky2->phy_lock);
1716 /* Transmit timeout is only called if we are running, carries is up
1717 * and tx queue is full (stopped).
1718 */
1719 static void sky2_tx_timeout(struct net_device *dev)
1721 struct sky2_port *sky2 = netdev_priv(dev);
1722 struct sky2_hw *hw = sky2->hw;
1723 unsigned txq = txqaddr[sky2->port];
1724 u16 report, done;
1726 if (netif_msg_timer(sky2))
1727 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1729 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1730 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1732 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1733 dev->name,
1734 sky2->tx_cons, sky2->tx_prod, report, done);
1736 if (report != done) {
1737 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
1739 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1740 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1741 } else if (report != sky2->tx_cons) {
1742 printk(KERN_INFO PFX "status report lost?\n");
1744 spin_lock_bh(&sky2->tx_lock);
1745 sky2_tx_complete(sky2, report);
1746 spin_unlock_bh(&sky2->tx_lock);
1747 } else {
1748 printk(KERN_INFO PFX "hardware hung? flushing\n");
1750 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1751 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1753 sky2_tx_clean(sky2);
1755 sky2_qset(hw, txq);
1756 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1761 /* Want receive buffer size to be multiple of 64 bits
1762 * and incl room for vlan and truncation
1763 */
1764 static inline unsigned sky2_buf_size(int mtu)
1766 return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1769 static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1771 struct sky2_port *sky2 = netdev_priv(dev);
1772 struct sky2_hw *hw = sky2->hw;
1773 int err;
1774 u16 ctl, mode;
1775 u32 imask;
1777 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1778 return -EINVAL;
1780 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1781 return -EINVAL;
1783 if (!netif_running(dev)) {
1784 dev->mtu = new_mtu;
1785 return 0;
1788 imask = sky2_read32(hw, B0_IMSK);
1789 sky2_write32(hw, B0_IMSK, 0);
1791 dev->trans_start = jiffies; /* prevent tx timeout */
1792 netif_stop_queue(dev);
1793 netif_poll_disable(hw->dev[0]);
1795 synchronize_irq(hw->pdev->irq);
1797 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1798 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1799 sky2_rx_stop(sky2);
1800 sky2_rx_clean(sky2);
1802 dev->mtu = new_mtu;
1803 sky2->rx_bufsize = sky2_buf_size(new_mtu);
1804 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1805 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1807 if (dev->mtu > ETH_DATA_LEN)
1808 mode |= GM_SMOD_JUMBO_ENA;
1810 gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1812 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1814 err = sky2_rx_start(sky2);
1815 sky2_write32(hw, B0_IMSK, imask);
1817 if (err)
1818 dev_close(dev);
1819 else {
1820 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
1822 netif_poll_enable(hw->dev[0]);
1823 netif_wake_queue(dev);
1826 return err;
1829 /*
1830 * Receive one packet.
1831 * For small packets or errors, just reuse existing skb.
1832 * For larger packets, get new buffer.
1833 */
1834 static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1835 u16 length, u32 status)
1837 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1838 struct sk_buff *skb = NULL;
1840 if (unlikely(netif_msg_rx_status(sky2)))
1841 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1842 sky2->netdev->name, sky2->rx_next, status, length);
1844 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1845 prefetch(sky2->rx_ring + sky2->rx_next);
1847 if (status & GMR_FS_ANY_ERR)
1848 goto error;
1850 if (!(status & GMR_FS_RX_OK))
1851 goto resubmit;
1853 if (length > sky2->netdev->mtu + ETH_HLEN)
1854 goto oversize;
1856 if (length < copybreak) {
1857 skb = alloc_skb(length + 2, GFP_ATOMIC);
1858 if (!skb)
1859 goto resubmit;
1861 skb_reserve(skb, 2);
1862 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1863 length, PCI_DMA_FROMDEVICE);
1864 memcpy(skb->data, re->skb->data, length);
1865 skb->ip_summed = re->skb->ip_summed;
1866 skb->csum = re->skb->csum;
1867 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1868 length, PCI_DMA_FROMDEVICE);
1869 } else {
1870 struct sk_buff *nskb;
1872 nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
1873 if (!nskb)
1874 goto resubmit;
1876 skb = re->skb;
1877 re->skb = nskb;
1878 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1879 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1880 prefetch(skb->data);
1882 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1883 sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1886 skb_put(skb, length);
1887 resubmit:
1888 re->skb->ip_summed = CHECKSUM_NONE;
1889 sky2_rx_add(sky2, re->mapaddr);
1891 return skb;
1893 oversize:
1894 ++sky2->net_stats.rx_over_errors;
1895 goto resubmit;
1897 error:
1898 ++sky2->net_stats.rx_errors;
1900 if (netif_msg_rx_err(sky2) && net_ratelimit())
1901 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1902 sky2->netdev->name, status, length);
1904 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1905 sky2->net_stats.rx_length_errors++;
1906 if (status & GMR_FS_FRAGMENT)
1907 sky2->net_stats.rx_frame_errors++;
1908 if (status & GMR_FS_CRC_ERR)
1909 sky2->net_stats.rx_crc_errors++;
1910 if (status & GMR_FS_RX_FF_OV)
1911 sky2->net_stats.rx_fifo_errors++;
1913 goto resubmit;
1916 /* Transmit complete */
1917 static inline void sky2_tx_done(struct net_device *dev, u16 last)
1919 struct sky2_port *sky2 = netdev_priv(dev);
1921 if (netif_running(dev)) {
1922 spin_lock(&sky2->tx_lock);
1923 sky2_tx_complete(sky2, last);
1924 spin_unlock(&sky2->tx_lock);
1928 /* Is status ring empty or is there more to do? */
1929 static inline int sky2_more_work(const struct sky2_hw *hw)
1931 return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
1934 /* Process status response ring */
1935 static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1937 struct sky2_port *sky2;
1938 int work_done = 0;
1939 unsigned buf_write[2] = { 0, 0 };
1940 u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1942 rmb();
1944 while (hw->st_idx != hwidx) {
1945 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1946 struct net_device *dev;
1947 struct sk_buff *skb;
1948 u32 status;
1949 u16 length;
1951 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1953 BUG_ON(le->link >= 2);
1954 dev = hw->dev[le->link];
1956 sky2 = netdev_priv(dev);
1957 length = le->length;
1958 status = le->status;
1960 switch (le->opcode & ~HW_OWNER) {
1961 case OP_RXSTAT:
1962 skb = sky2_receive(sky2, length, status);
1963 if (!skb)
1964 break;
1966 skb->dev = dev;
1967 skb->protocol = eth_type_trans(skb, dev);
1968 dev->last_rx = jiffies;
1970 #ifdef SKY2_VLAN_TAG_USED
1971 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
1972 vlan_hwaccel_receive_skb(skb,
1973 sky2->vlgrp,
1974 be16_to_cpu(sky2->rx_tag));
1975 } else
1976 #endif
1977 netif_receive_skb(skb);
1979 /* Update receiver after 16 frames */
1980 if (++buf_write[le->link] == RX_BUF_WRITE) {
1981 sky2_put_idx(hw, rxqaddr[le->link],
1982 sky2->rx_put);
1983 buf_write[le->link] = 0;
1986 /* Stop after net poll weight */
1987 if (++work_done >= to_do)
1988 goto exit_loop;
1989 break;
1991 #ifdef SKY2_VLAN_TAG_USED
1992 case OP_RXVLAN:
1993 sky2->rx_tag = length;
1994 break;
1996 case OP_RXCHKSVLAN:
1997 sky2->rx_tag = length;
1998 /* fall through */
1999 #endif
2000 case OP_RXCHKS:
2001 skb = sky2->rx_ring[sky2->rx_next].skb;
2002 skb->ip_summed = CHECKSUM_HW;
2003 skb->csum = le16_to_cpu(status);
2004 break;
2006 case OP_TXINDEXLE:
2007 /* TX index reports status for both ports */
2008 BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
2009 sky2_tx_done(hw->dev[0], status & 0xfff);
2010 if (hw->dev[1])
2011 sky2_tx_done(hw->dev[1],
2012 ((status >> 24) & 0xff)
2013 | (u16)(length & 0xf) << 8);
2014 break;
2016 default:
2017 if (net_ratelimit())
2018 printk(KERN_WARNING PFX
2019 "unknown status opcode 0x%x\n", le->opcode);
2020 goto exit_loop;
2024 exit_loop:
2025 if (buf_write[0]) {
2026 sky2 = netdev_priv(hw->dev[0]);
2027 sky2_put_idx(hw, Q_R1, sky2->rx_put);
2030 if (buf_write[1]) {
2031 sky2 = netdev_priv(hw->dev[1]);
2032 sky2_put_idx(hw, Q_R2, sky2->rx_put);
2035 return work_done;
2038 static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2040 struct net_device *dev = hw->dev[port];
2042 if (net_ratelimit())
2043 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
2044 dev->name, status);
2046 if (status & Y2_IS_PAR_RD1) {
2047 if (net_ratelimit())
2048 printk(KERN_ERR PFX "%s: ram data read parity error\n",
2049 dev->name);
2050 /* Clear IRQ */
2051 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2054 if (status & Y2_IS_PAR_WR1) {
2055 if (net_ratelimit())
2056 printk(KERN_ERR PFX "%s: ram data write parity error\n",
2057 dev->name);
2059 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2062 if (status & Y2_IS_PAR_MAC1) {
2063 if (net_ratelimit())
2064 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
2065 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2068 if (status & Y2_IS_PAR_RX1) {
2069 if (net_ratelimit())
2070 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
2071 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2074 if (status & Y2_IS_TCP_TXA1) {
2075 if (net_ratelimit())
2076 printk(KERN_ERR PFX "%s: TCP segmentation error\n",
2077 dev->name);
2078 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2082 static void sky2_hw_intr(struct sky2_hw *hw)
2084 u32 status = sky2_read32(hw, B0_HWE_ISRC);
2086 if (status & Y2_IS_TIST_OV)
2087 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2089 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2090 u16 pci_err;
2092 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2093 if (net_ratelimit())
2094 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
2095 pci_name(hw->pdev), pci_err);
2097 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2098 sky2_pci_write16(hw, PCI_STATUS,
2099 pci_err | PCI_STATUS_ERROR_BITS);
2100 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2103 if (status & Y2_IS_PCI_EXP) {
2104 /* PCI-Express uncorrectable Error occurred */
2105 u32 pex_err;
2107 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2109 if (net_ratelimit())
2110 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
2111 pci_name(hw->pdev), pex_err);
2113 /* clear the interrupt */
2114 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2115 sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
2116 0xffffffffUL);
2117 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2119 if (pex_err & PEX_FATAL_ERRORS) {
2120 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2121 hwmsk &= ~Y2_IS_PCI_EXP;
2122 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
2126 if (status & Y2_HWE_L1_MASK)
2127 sky2_hw_error(hw, 0, status);
2128 status >>= 8;
2129 if (status & Y2_HWE_L1_MASK)
2130 sky2_hw_error(hw, 1, status);
2133 static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2135 struct net_device *dev = hw->dev[port];
2136 struct sky2_port *sky2 = netdev_priv(dev);
2137 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2139 if (netif_msg_intr(sky2))
2140 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2141 dev->name, status);
2143 if (status & GM_IS_RX_FF_OR) {
2144 ++sky2->net_stats.rx_fifo_errors;
2145 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2148 if (status & GM_IS_TX_FF_UR) {
2149 ++sky2->net_stats.tx_fifo_errors;
2150 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2154 /* This should never happen it is a fatal situation */
2155 static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2156 const char *rxtx, u32 mask)
2158 struct net_device *dev = hw->dev[port];
2159 struct sky2_port *sky2 = netdev_priv(dev);
2160 u32 imask;
2162 printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
2163 dev ? dev->name : "<not registered>", rxtx);
2165 imask = sky2_read32(hw, B0_IMSK);
2166 imask &= ~mask;
2167 sky2_write32(hw, B0_IMSK, imask);
2169 if (dev) {
2170 spin_lock(&sky2->phy_lock);
2171 sky2_link_down(sky2);
2172 spin_unlock(&sky2->phy_lock);
2176 /* If idle then force a fake soft NAPI poll once a second
2177 * to work around cases where sharing an edge triggered interrupt.
2178 */
2179 static inline void sky2_idle_start(struct sky2_hw *hw)
2181 if (idle_timeout > 0)
2182 mod_timer(&hw->idle_timer,
2183 jiffies + msecs_to_jiffies(idle_timeout));
2186 static void sky2_idle(unsigned long arg)
2188 struct sky2_hw *hw = (struct sky2_hw *) arg;
2189 struct net_device *dev = hw->dev[0];
2191 if (__netif_rx_schedule_prep(dev))
2192 __netif_rx_schedule(dev);
2194 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2198 static int sky2_poll(struct net_device *dev0, int *budget)
2200 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
2201 int work_limit = min(dev0->quota, *budget);
2202 int work_done = 0;
2203 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2205 if (status & Y2_IS_HW_ERR)
2206 sky2_hw_intr(hw);
2208 if (status & Y2_IS_IRQ_PHY1)
2209 sky2_phy_intr(hw, 0);
2211 if (status & Y2_IS_IRQ_PHY2)
2212 sky2_phy_intr(hw, 1);
2214 if (status & Y2_IS_IRQ_MAC1)
2215 sky2_mac_intr(hw, 0);
2217 if (status & Y2_IS_IRQ_MAC2)
2218 sky2_mac_intr(hw, 1);
2220 if (status & Y2_IS_CHK_RX1)
2221 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2223 if (status & Y2_IS_CHK_RX2)
2224 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2226 if (status & Y2_IS_CHK_TXA1)
2227 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2229 if (status & Y2_IS_CHK_TXA2)
2230 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2232 work_done = sky2_status_intr(hw, work_limit);
2233 *budget -= work_done;
2234 dev0->quota -= work_done;
2236 if (status & Y2_IS_STAT_BMU)
2237 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2239 if (sky2_more_work(hw))
2240 return 1;
2242 netif_rx_complete(dev0);
2244 sky2_read32(hw, B0_Y2_SP_LISR);
2245 return 0;
2248 static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2250 struct sky2_hw *hw = dev_id;
2251 struct net_device *dev0 = hw->dev[0];
2252 u32 status;
2254 /* Reading this mask interrupts as side effect */
2255 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2256 if (status == 0 || status == ~0)
2257 return IRQ_NONE;
2259 prefetch(&hw->st_le[hw->st_idx]);
2260 if (likely(__netif_rx_schedule_prep(dev0)))
2261 __netif_rx_schedule(dev0);
2263 return IRQ_HANDLED;
2266 #ifdef CONFIG_NET_POLL_CONTROLLER
2267 static void sky2_netpoll(struct net_device *dev)
2269 struct sky2_port *sky2 = netdev_priv(dev);
2270 struct net_device *dev0 = sky2->hw->dev[0];
2272 if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
2273 __netif_rx_schedule(dev0);
2275 #endif
2277 /* Chip internal frequency for clock calculations */
2278 static inline u32 sky2_mhz(const struct sky2_hw *hw)
2280 switch (hw->chip_id) {
2281 case CHIP_ID_YUKON_EC:
2282 case CHIP_ID_YUKON_EC_U:
2283 return 125; /* 125 Mhz */
2284 case CHIP_ID_YUKON_FE:
2285 return 100; /* 100 Mhz */
2286 default: /* YUKON_XL */
2287 return 156; /* 156 Mhz */
2291 static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2293 return sky2_mhz(hw) * us;
2296 static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2298 return clk / sky2_mhz(hw);
2302 static int sky2_reset(struct sky2_hw *hw)
2304 u16 status;
2305 u8 t8, pmd_type;
2306 int i;
2308 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2310 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2311 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2312 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2313 pci_name(hw->pdev), hw->chip_id);
2314 return -EOPNOTSUPP;
2317 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2319 /* This rev is really old, and requires untested workarounds */
2320 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2321 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
2322 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2323 hw->chip_id, hw->chip_rev);
2324 return -EOPNOTSUPP;
2327 /* disable ASF */
2328 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2329 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2330 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2333 /* do a SW reset */
2334 sky2_write8(hw, B0_CTST, CS_RST_SET);
2335 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2337 /* clear PCI errors, if any */
2338 status = sky2_pci_read16(hw, PCI_STATUS);
2340 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2341 sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
2344 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2346 /* clear any PEX errors */
2347 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2348 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2351 pmd_type = sky2_read8(hw, B2_PMD_TYP);
2352 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
2354 hw->ports = 1;
2355 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2356 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2357 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2358 ++hw->ports;
2361 sky2_set_power_state(hw, PCI_D0);
2363 for (i = 0; i < hw->ports; i++) {
2364 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2365 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2368 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2370 /* Clear I2C IRQ noise */
2371 sky2_write32(hw, B2_I2C_IRQ, 1);
2373 /* turn off hardware timer (unused) */
2374 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2375 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2377 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2379 /* Turn off descriptor polling */
2380 sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2382 /* Turn off receive timestamp */
2383 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2384 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2386 /* enable the Tx Arbiters */
2387 for (i = 0; i < hw->ports; i++)
2388 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2390 /* Initialize ram interface */
2391 for (i = 0; i < hw->ports; i++) {
2392 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2394 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2395 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2396 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2397 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2398 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2399 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2400 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2401 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2402 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2403 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2404 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2405 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2408 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2410 for (i = 0; i < hw->ports; i++)
2411 sky2_phy_reset(hw, i);
2413 memset(hw->st_le, 0, STATUS_LE_BYTES);
2414 hw->st_idx = 0;
2416 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2417 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2419 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2420 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2422 /* Set the list last index */
2423 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2425 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2426 sky2_write8(hw, STAT_FIFO_WM, 16);
2428 /* set Status-FIFO ISR watermark */
2429 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2430 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2431 else
2432 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2434 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2435 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2436 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2438 /* enable status unit */
2439 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2441 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2442 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2443 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2445 return 0;
2448 static u32 sky2_supported_modes(const struct sky2_hw *hw)
2450 u32 modes;
2451 if (hw->copper) {
2452 modes = SUPPORTED_10baseT_Half
2453 | SUPPORTED_10baseT_Full
2454 | SUPPORTED_100baseT_Half
2455 | SUPPORTED_100baseT_Full
2456 | SUPPORTED_Autoneg | SUPPORTED_TP;
2458 if (hw->chip_id != CHIP_ID_YUKON_FE)
2459 modes |= SUPPORTED_1000baseT_Half
2460 | SUPPORTED_1000baseT_Full;
2461 } else
2462 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
2463 | SUPPORTED_Autoneg;
2464 return modes;
2467 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2469 struct sky2_port *sky2 = netdev_priv(dev);
2470 struct sky2_hw *hw = sky2->hw;
2472 ecmd->transceiver = XCVR_INTERNAL;
2473 ecmd->supported = sky2_supported_modes(hw);
2474 ecmd->phy_address = PHY_ADDR_MARV;
2475 if (hw->copper) {
2476 ecmd->supported = SUPPORTED_10baseT_Half
2477 | SUPPORTED_10baseT_Full
2478 | SUPPORTED_100baseT_Half
2479 | SUPPORTED_100baseT_Full
2480 | SUPPORTED_1000baseT_Half
2481 | SUPPORTED_1000baseT_Full
2482 | SUPPORTED_Autoneg | SUPPORTED_TP;
2483 ecmd->port = PORT_TP;
2484 } else
2485 ecmd->port = PORT_FIBRE;
2487 ecmd->advertising = sky2->advertising;
2488 ecmd->autoneg = sky2->autoneg;
2489 ecmd->speed = sky2->speed;
2490 ecmd->duplex = sky2->duplex;
2491 return 0;
2494 static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2496 struct sky2_port *sky2 = netdev_priv(dev);
2497 const struct sky2_hw *hw = sky2->hw;
2498 u32 supported = sky2_supported_modes(hw);
2500 if (ecmd->autoneg == AUTONEG_ENABLE) {
2501 ecmd->advertising = supported;
2502 sky2->duplex = -1;
2503 sky2->speed = -1;
2504 } else {
2505 u32 setting;
2507 switch (ecmd->speed) {
2508 case SPEED_1000:
2509 if (ecmd->duplex == DUPLEX_FULL)
2510 setting = SUPPORTED_1000baseT_Full;
2511 else if (ecmd->duplex == DUPLEX_HALF)
2512 setting = SUPPORTED_1000baseT_Half;
2513 else
2514 return -EINVAL;
2515 break;
2516 case SPEED_100:
2517 if (ecmd->duplex == DUPLEX_FULL)
2518 setting = SUPPORTED_100baseT_Full;
2519 else if (ecmd->duplex == DUPLEX_HALF)
2520 setting = SUPPORTED_100baseT_Half;
2521 else
2522 return -EINVAL;
2523 break;
2525 case SPEED_10:
2526 if (ecmd->duplex == DUPLEX_FULL)
2527 setting = SUPPORTED_10baseT_Full;
2528 else if (ecmd->duplex == DUPLEX_HALF)
2529 setting = SUPPORTED_10baseT_Half;
2530 else
2531 return -EINVAL;
2532 break;
2533 default:
2534 return -EINVAL;
2537 if ((setting & supported) == 0)
2538 return -EINVAL;
2540 sky2->speed = ecmd->speed;
2541 sky2->duplex = ecmd->duplex;
2544 sky2->autoneg = ecmd->autoneg;
2545 sky2->advertising = ecmd->advertising;
2547 if (netif_running(dev))
2548 sky2_phy_reinit(sky2);
2550 return 0;
2553 static void sky2_get_drvinfo(struct net_device *dev,
2554 struct ethtool_drvinfo *info)
2556 struct sky2_port *sky2 = netdev_priv(dev);
2558 strcpy(info->driver, DRV_NAME);
2559 strcpy(info->version, DRV_VERSION);
2560 strcpy(info->fw_version, "N/A");
2561 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2564 static const struct sky2_stat {
2565 char name[ETH_GSTRING_LEN];
2566 u16 offset;
2567 } sky2_stats[] = {
2568 { "tx_bytes", GM_TXO_OK_HI },
2569 { "rx_bytes", GM_RXO_OK_HI },
2570 { "tx_broadcast", GM_TXF_BC_OK },
2571 { "rx_broadcast", GM_RXF_BC_OK },
2572 { "tx_multicast", GM_TXF_MC_OK },
2573 { "rx_multicast", GM_RXF_MC_OK },
2574 { "tx_unicast", GM_TXF_UC_OK },
2575 { "rx_unicast", GM_RXF_UC_OK },
2576 { "tx_mac_pause", GM_TXF_MPAUSE },
2577 { "rx_mac_pause", GM_RXF_MPAUSE },
2578 { "collisions", GM_TXF_COL },
2579 { "late_collision",GM_TXF_LAT_COL },
2580 { "aborted", GM_TXF_ABO_COL },
2581 { "single_collisions", GM_TXF_SNG_COL },
2582 { "multi_collisions", GM_TXF_MUL_COL },
2584 { "rx_short", GM_RXF_SHT },
2585 { "rx_runt", GM_RXE_FRAG },
2586 { "rx_64_byte_packets", GM_RXF_64B },
2587 { "rx_65_to_127_byte_packets", GM_RXF_127B },
2588 { "rx_128_to_255_byte_packets", GM_RXF_255B },
2589 { "rx_256_to_511_byte_packets", GM_RXF_511B },
2590 { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
2591 { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
2592 { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
2593 { "rx_too_long", GM_RXF_LNG_ERR },
2594 { "rx_fifo_overflow", GM_RXE_FIFO_OV },
2595 { "rx_jabber", GM_RXF_JAB_PKT },
2596 { "rx_fcs_error", GM_RXF_FCS_ERR },
2598 { "tx_64_byte_packets", GM_TXF_64B },
2599 { "tx_65_to_127_byte_packets", GM_TXF_127B },
2600 { "tx_128_to_255_byte_packets", GM_TXF_255B },
2601 { "tx_256_to_511_byte_packets", GM_TXF_511B },
2602 { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
2603 { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
2604 { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
2605 { "tx_fifo_underrun", GM_TXE_FIFO_UR },
2606 };
2608 static u32 sky2_get_rx_csum(struct net_device *dev)
2610 struct sky2_port *sky2 = netdev_priv(dev);
2612 return sky2->rx_csum;
2615 static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2617 struct sky2_port *sky2 = netdev_priv(dev);
2619 sky2->rx_csum = data;
2621 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2622 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2624 return 0;
2627 static u32 sky2_get_msglevel(struct net_device *netdev)
2629 struct sky2_port *sky2 = netdev_priv(netdev);
2630 return sky2->msg_enable;
2633 static int sky2_nway_reset(struct net_device *dev)
2635 struct sky2_port *sky2 = netdev_priv(dev);
2637 if (sky2->autoneg != AUTONEG_ENABLE)
2638 return -EINVAL;
2640 sky2_phy_reinit(sky2);
2642 return 0;
2645 static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2647 struct sky2_hw *hw = sky2->hw;
2648 unsigned port = sky2->port;
2649 int i;
2651 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2652 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2653 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2654 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2656 for (i = 2; i < count; i++)
2657 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2660 static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2662 struct sky2_port *sky2 = netdev_priv(netdev);
2663 sky2->msg_enable = value;
2666 static int sky2_get_stats_count(struct net_device *dev)
2668 return ARRAY_SIZE(sky2_stats);
2671 static void sky2_get_ethtool_stats(struct net_device *dev,
2672 struct ethtool_stats *stats, u64 * data)
2674 struct sky2_port *sky2 = netdev_priv(dev);
2676 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2679 static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2681 int i;
2683 switch (stringset) {
2684 case ETH_SS_STATS:
2685 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
2686 memcpy(data + i * ETH_GSTRING_LEN,
2687 sky2_stats[i].name, ETH_GSTRING_LEN);
2688 break;
2692 /* Use hardware MIB variables for critical path statistics and
2693 * transmit feedback not reported at interrupt.
2694 * Other errors are accounted for in interrupt handler.
2695 */
2696 static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2698 struct sky2_port *sky2 = netdev_priv(dev);
2699 u64 data[13];
2701 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2703 sky2->net_stats.tx_bytes = data[0];
2704 sky2->net_stats.rx_bytes = data[1];
2705 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2706 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2707 sky2->net_stats.multicast = data[3] + data[5];
2708 sky2->net_stats.collisions = data[10];
2709 sky2->net_stats.tx_aborted_errors = data[12];
2711 return &sky2->net_stats;
2714 static int sky2_set_mac_address(struct net_device *dev, void *p)
2716 struct sky2_port *sky2 = netdev_priv(dev);
2717 struct sky2_hw *hw = sky2->hw;
2718 unsigned port = sky2->port;
2719 const struct sockaddr *addr = p;
2721 if (!is_valid_ether_addr(addr->sa_data))
2722 return -EADDRNOTAVAIL;
2724 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2725 memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
2726 dev->dev_addr, ETH_ALEN);
2727 memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
2728 dev->dev_addr, ETH_ALEN);
2730 /* virtual address for data */
2731 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2733 /* physical address: used for pause frames */
2734 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2736 return 0;
2739 static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
2741 u32 bit;
2743 bit = ether_crc(ETH_ALEN, addr) & 63;
2744 filter[bit >> 3] |= 1 << (bit & 7);
2747 static void sky2_set_multicast(struct net_device *dev)
2749 struct sky2_port *sky2 = netdev_priv(dev);
2750 struct sky2_hw *hw = sky2->hw;
2751 unsigned port = sky2->port;
2752 struct dev_mc_list *list = dev->mc_list;
2753 u16 reg;
2754 u8 filter[8];
2755 static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2757 memset(filter, 0, sizeof(filter));
2759 reg = gma_read16(hw, port, GM_RX_CTRL);
2760 reg |= GM_RXCR_UCF_ENA;
2762 if (dev->flags & IFF_PROMISC) /* promiscuous */
2763 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2764 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
2765 memset(filter, 0xff, sizeof(filter));
2766 else if (dev->mc_count == 0 && !sky2->rx_pause)
2767 reg &= ~GM_RXCR_MCF_ENA;
2768 else {
2769 int i;
2770 reg |= GM_RXCR_MCF_ENA;
2772 if (sky2->rx_pause)
2773 sky2_add_filter(filter, pause_mc_addr);
2775 for (i = 0; list && i < dev->mc_count; i++, list = list->next)
2776 sky2_add_filter(filter, list->dmi_addr);
2779 gma_write16(hw, port, GM_MC_ADDR_H1,
2780 (u16) filter[0] | ((u16) filter[1] << 8));
2781 gma_write16(hw, port, GM_MC_ADDR_H2,
2782 (u16) filter[2] | ((u16) filter[3] << 8));
2783 gma_write16(hw, port, GM_MC_ADDR_H3,
2784 (u16) filter[4] | ((u16) filter[5] << 8));
2785 gma_write16(hw, port, GM_MC_ADDR_H4,
2786 (u16) filter[6] | ((u16) filter[7] << 8));
2788 gma_write16(hw, port, GM_RX_CTRL, reg);
2791 /* Can have one global because blinking is controlled by
2792 * ethtool and that is always under RTNL mutex
2793 */
2794 static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2796 u16 pg;
2798 switch (hw->chip_id) {
2799 case CHIP_ID_YUKON_XL:
2800 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2801 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2802 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
2803 on ? (PHY_M_LEDC_LOS_CTRL(1) |
2804 PHY_M_LEDC_INIT_CTRL(7) |
2805 PHY_M_LEDC_STA1_CTRL(7) |
2806 PHY_M_LEDC_STA0_CTRL(7))
2807 : 0);
2809 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2810 break;
2812 default:
2813 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2814 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2815 on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
2816 PHY_M_LED_MO_10(MO_LED_ON) |
2817 PHY_M_LED_MO_100(MO_LED_ON) |
2818 PHY_M_LED_MO_1000(MO_LED_ON) |
2819 PHY_M_LED_MO_RX(MO_LED_ON)
2820 : PHY_M_LED_MO_DUP(MO_LED_OFF) |
2821 PHY_M_LED_MO_10(MO_LED_OFF) |
2822 PHY_M_LED_MO_100(MO_LED_OFF) |
2823 PHY_M_LED_MO_1000(MO_LED_OFF) |
2824 PHY_M_LED_MO_RX(MO_LED_OFF));
2829 /* blink LED's for finding board */
2830 static int sky2_phys_id(struct net_device *dev, u32 data)
2832 struct sky2_port *sky2 = netdev_priv(dev);
2833 struct sky2_hw *hw = sky2->hw;
2834 unsigned port = sky2->port;
2835 u16 ledctrl, ledover = 0;
2836 long ms;
2837 int interrupted;
2838 int onoff = 1;
2840 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
2841 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2842 else
2843 ms = data * 1000;
2845 /* save initial values */
2846 spin_lock_bh(&sky2->phy_lock);
2847 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2848 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2849 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2850 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2851 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2852 } else {
2853 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2854 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2857 interrupted = 0;
2858 while (!interrupted && ms > 0) {
2859 sky2_led(hw, port, onoff);
2860 onoff = !onoff;
2862 spin_unlock_bh(&sky2->phy_lock);
2863 interrupted = msleep_interruptible(250);
2864 spin_lock_bh(&sky2->phy_lock);
2866 ms -= 250;
2869 /* resume regularly scheduled programming */
2870 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2871 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2872 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2873 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
2874 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2875 } else {
2876 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2877 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2879 spin_unlock_bh(&sky2->phy_lock);
2881 return 0;
2884 static void sky2_get_pauseparam(struct net_device *dev,
2885 struct ethtool_pauseparam *ecmd)
2887 struct sky2_port *sky2 = netdev_priv(dev);
2889 ecmd->tx_pause = sky2->tx_pause;
2890 ecmd->rx_pause = sky2->rx_pause;
2891 ecmd->autoneg = sky2->autoneg;
2894 static int sky2_set_pauseparam(struct net_device *dev,
2895 struct ethtool_pauseparam *ecmd)
2897 struct sky2_port *sky2 = netdev_priv(dev);
2898 int err = 0;
2900 sky2->autoneg = ecmd->autoneg;
2901 sky2->tx_pause = ecmd->tx_pause != 0;
2902 sky2->rx_pause = ecmd->rx_pause != 0;
2904 sky2_phy_reinit(sky2);
2906 return err;
2909 static int sky2_get_coalesce(struct net_device *dev,
2910 struct ethtool_coalesce *ecmd)
2912 struct sky2_port *sky2 = netdev_priv(dev);
2913 struct sky2_hw *hw = sky2->hw;
2915 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
2916 ecmd->tx_coalesce_usecs = 0;
2917 else {
2918 u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
2919 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
2921 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
2923 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
2924 ecmd->rx_coalesce_usecs = 0;
2925 else {
2926 u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
2927 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
2929 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
2931 if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
2932 ecmd->rx_coalesce_usecs_irq = 0;
2933 else {
2934 u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
2935 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
2938 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
2940 return 0;
2943 /* Note: this affect both ports */
2944 static int sky2_set_coalesce(struct net_device *dev,
2945 struct ethtool_coalesce *ecmd)
2947 struct sky2_port *sky2 = netdev_priv(dev);
2948 struct sky2_hw *hw = sky2->hw;
2949 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
2951 if (ecmd->tx_coalesce_usecs > tmax ||
2952 ecmd->rx_coalesce_usecs > tmax ||
2953 ecmd->rx_coalesce_usecs_irq > tmax)
2954 return -EINVAL;
2956 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
2957 return -EINVAL;
2958 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
2959 return -EINVAL;
2960 if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
2961 return -EINVAL;
2963 if (ecmd->tx_coalesce_usecs == 0)
2964 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2965 else {
2966 sky2_write32(hw, STAT_TX_TIMER_INI,
2967 sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
2968 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2970 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
2972 if (ecmd->rx_coalesce_usecs == 0)
2973 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
2974 else {
2975 sky2_write32(hw, STAT_LEV_TIMER_INI,
2976 sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
2977 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2979 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
2981 if (ecmd->rx_coalesce_usecs_irq == 0)
2982 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
2983 else {
2984 sky2_write32(hw, STAT_ISR_TIMER_INI,
2985 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
2986 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2988 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
2989 return 0;
2992 static void sky2_get_ringparam(struct net_device *dev,
2993 struct ethtool_ringparam *ering)
2995 struct sky2_port *sky2 = netdev_priv(dev);
2997 ering->rx_max_pending = RX_MAX_PENDING;
2998 ering->rx_mini_max_pending = 0;
2999 ering->rx_jumbo_max_pending = 0;
3000 ering->tx_max_pending = TX_RING_SIZE - 1;
3002 ering->rx_pending = sky2->rx_pending;
3003 ering->rx_mini_pending = 0;
3004 ering->rx_jumbo_pending = 0;
3005 ering->tx_pending = sky2->tx_pending;
3008 static int sky2_set_ringparam(struct net_device *dev,
3009 struct ethtool_ringparam *ering)
3011 struct sky2_port *sky2 = netdev_priv(dev);
3012 int err = 0;
3014 if (ering->rx_pending > RX_MAX_PENDING ||
3015 ering->rx_pending < 8 ||
3016 ering->tx_pending < MAX_SKB_TX_LE ||
3017 ering->tx_pending > TX_RING_SIZE - 1)
3018 return -EINVAL;
3020 if (netif_running(dev))
3021 sky2_down(dev);
3023 sky2->rx_pending = ering->rx_pending;
3024 sky2->tx_pending = ering->tx_pending;
3026 if (netif_running(dev)) {
3027 err = sky2_up(dev);
3028 if (err)
3029 dev_close(dev);
3030 else
3031 sky2_set_multicast(dev);
3034 return err;
3037 static int sky2_get_regs_len(struct net_device *dev)
3039 return 0x4000;
3042 /*
3043 * Returns copy of control register region
3044 * Note: access to the RAM address register set will cause timeouts.
3045 */
3046 static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3047 void *p)
3049 const struct sky2_port *sky2 = netdev_priv(dev);
3050 const void __iomem *io = sky2->hw->regs;
3052 BUG_ON(regs->len < B3_RI_WTO_R1);
3053 regs->version = 1;
3054 memset(p, 0, regs->len);
3056 memcpy_fromio(p, io, B3_RAM_ADDR);
3058 memcpy_fromio(p + B3_RI_WTO_R1,
3059 io + B3_RI_WTO_R1,
3060 regs->len - B3_RI_WTO_R1);
3063 static struct ethtool_ops sky2_ethtool_ops = {
3064 .get_settings = sky2_get_settings,
3065 .set_settings = sky2_set_settings,
3066 .get_drvinfo = sky2_get_drvinfo,
3067 .get_msglevel = sky2_get_msglevel,
3068 .set_msglevel = sky2_set_msglevel,
3069 .nway_reset = sky2_nway_reset,
3070 .get_regs_len = sky2_get_regs_len,
3071 .get_regs = sky2_get_regs,
3072 .get_link = ethtool_op_get_link,
3073 .get_sg = ethtool_op_get_sg,
3074 .set_sg = ethtool_op_set_sg,
3075 .get_tx_csum = ethtool_op_get_tx_csum,
3076 .set_tx_csum = ethtool_op_set_tx_csum,
3077 .get_tso = ethtool_op_get_tso,
3078 .set_tso = ethtool_op_set_tso,
3079 .get_rx_csum = sky2_get_rx_csum,
3080 .set_rx_csum = sky2_set_rx_csum,
3081 .get_strings = sky2_get_strings,
3082 .get_coalesce = sky2_get_coalesce,
3083 .set_coalesce = sky2_set_coalesce,
3084 .get_ringparam = sky2_get_ringparam,
3085 .set_ringparam = sky2_set_ringparam,
3086 .get_pauseparam = sky2_get_pauseparam,
3087 .set_pauseparam = sky2_set_pauseparam,
3088 .phys_id = sky2_phys_id,
3089 .get_stats_count = sky2_get_stats_count,
3090 .get_ethtool_stats = sky2_get_ethtool_stats,
3091 .get_perm_addr = ethtool_op_get_perm_addr,
3092 };
3094 /* Initialize network device */
3095 static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3096 unsigned port, int highmem)
3098 struct sky2_port *sky2;
3099 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
3101 if (!dev) {
3102 printk(KERN_ERR "sky2 etherdev alloc failed");
3103 return NULL;
3106 SET_MODULE_OWNER(dev);
3107 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3108 dev->irq = hw->pdev->irq;
3109 dev->open = sky2_up;
3110 dev->stop = sky2_down;
3111 dev->do_ioctl = sky2_ioctl;
3112 dev->hard_start_xmit = sky2_xmit_frame;
3113 dev->get_stats = sky2_get_stats;
3114 dev->set_multicast_list = sky2_set_multicast;
3115 dev->set_mac_address = sky2_set_mac_address;
3116 dev->change_mtu = sky2_change_mtu;
3117 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
3118 dev->tx_timeout = sky2_tx_timeout;
3119 dev->watchdog_timeo = TX_WATCHDOG;
3120 if (port == 0)
3121 dev->poll = sky2_poll;
3122 dev->weight = NAPI_WEIGHT;
3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3124 dev->poll_controller = sky2_netpoll;
3125 #endif
3127 sky2 = netdev_priv(dev);
3128 sky2->netdev = dev;
3129 sky2->hw = hw;
3130 sky2->msg_enable = netif_msg_init(debug, default_msg);
3132 spin_lock_init(&sky2->tx_lock);
3133 /* Auto speed and flow control */
3134 sky2->autoneg = AUTONEG_ENABLE;
3135 sky2->tx_pause = 1;
3136 sky2->rx_pause = 1;
3137 sky2->duplex = -1;
3138 sky2->speed = -1;
3139 sky2->advertising = sky2_supported_modes(hw);
3140 sky2->rx_csum = 1;
3142 spin_lock_init(&sky2->phy_lock);
3143 sky2->tx_pending = TX_DEF_PENDING;
3144 sky2->rx_pending = RX_DEF_PENDING;
3145 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
3147 hw->dev[port] = dev;
3149 sky2->port = port;
3151 dev->features |= NETIF_F_LLTX;
3152 if (hw->chip_id != CHIP_ID_YUKON_EC_U)
3153 dev->features |= NETIF_F_TSO;
3154 if (highmem)
3155 dev->features |= NETIF_F_HIGHDMA;
3156 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3158 #ifdef SKY2_VLAN_TAG_USED
3159 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3160 dev->vlan_rx_register = sky2_vlan_rx_register;
3161 dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid;
3162 #endif
3164 /* read the mac address */
3165 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
3166 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3168 /* device is off until link detection */
3169 netif_carrier_off(dev);
3170 netif_stop_queue(dev);
3172 return dev;
3175 static void __devinit sky2_show_addr(struct net_device *dev)
3177 const struct sky2_port *sky2 = netdev_priv(dev);
3179 if (netif_msg_probe(sky2))
3180 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3181 dev->name,
3182 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3183 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3186 /* Handle software interrupt used during MSI test */
3187 static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
3188 struct pt_regs *regs)
3190 struct sky2_hw *hw = dev_id;
3191 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3193 if (status == 0)
3194 return IRQ_NONE;
3196 if (status & Y2_IS_IRQ_SW) {
3197 hw->msi_detected = 1;
3198 wake_up(&hw->msi_wait);
3199 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3201 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3203 return IRQ_HANDLED;
3206 /* Test interrupt path by forcing a a software IRQ */
3207 static int __devinit sky2_test_msi(struct sky2_hw *hw)
3209 struct pci_dev *pdev = hw->pdev;
3210 int err;
3212 init_waitqueue_head (&hw->msi_wait);
3214 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
3216 err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
3217 if (err) {
3218 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3219 pci_name(pdev), pdev->irq);
3220 return err;
3223 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3224 sky2_read8(hw, B0_CTST);
3226 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
3228 if (!hw->msi_detected) {
3229 /* MSI test failed, go back to INTx mode */
3230 printk(KERN_INFO PFX "%s: No interrupt generated using MSI, "
3231 "switching to INTx mode.\n",
3232 pci_name(pdev));
3234 err = -EOPNOTSUPP;
3235 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3238 sky2_write32(hw, B0_IMSK, 0);
3239 sky2_read32(hw, B0_IMSK);
3241 free_irq(pdev->irq, hw);
3243 return err;
3246 static int __devinit sky2_probe(struct pci_dev *pdev,
3247 const struct pci_device_id *ent)
3249 struct net_device *dev, *dev1 = NULL;
3250 struct sky2_hw *hw;
3251 int err, pm_cap, using_dac = 0;
3253 err = pci_enable_device(pdev);
3254 if (err) {
3255 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3256 pci_name(pdev));
3257 goto err_out;
3260 err = pci_request_regions(pdev, DRV_NAME);
3261 if (err) {
3262 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3263 pci_name(pdev));
3264 goto err_out;
3267 pci_set_master(pdev);
3269 /* Find power-management capability. */
3270 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3271 if (pm_cap == 0) {
3272 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
3273 "aborting.\n");
3274 err = -EIO;
3275 goto err_out_free_regions;
3278 if (sizeof(dma_addr_t) > sizeof(u32) &&
3279 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3280 using_dac = 1;
3281 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3282 if (err < 0) {
3283 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
3284 "for consistent allocations\n", pci_name(pdev));
3285 goto err_out_free_regions;
3288 } else {
3289 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3290 if (err) {
3291 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3292 pci_name(pdev));
3293 goto err_out_free_regions;
3297 err = -ENOMEM;
3298 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3299 if (!hw) {
3300 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3301 pci_name(pdev));
3302 goto err_out_free_regions;
3305 hw->pdev = pdev;
3307 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3308 if (!hw->regs) {
3309 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3310 pci_name(pdev));
3311 goto err_out_free_hw;
3313 hw->pm_cap = pm_cap;
3315 #ifdef __BIG_ENDIAN
3316 /* byte swap descriptors in hardware */
3318 u32 reg;
3320 reg = sky2_pci_read32(hw, PCI_DEV_REG2);
3321 reg |= PCI_REV_DESC;
3322 sky2_pci_write32(hw, PCI_DEV_REG2, reg);
3324 #endif
3326 /* ring for status responses */
3327 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
3328 &hw->st_dma);
3329 if (!hw->st_le)
3330 goto err_out_iounmap;
3332 err = sky2_reset(hw);
3333 if (err)
3334 goto err_out_iounmap;
3336 printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
3337 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
3338 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
3339 hw->chip_id, hw->chip_rev);
3341 dev = sky2_init_netdev(hw, 0, using_dac);
3342 if (!dev)
3343 goto err_out_free_pci;
3345 err = register_netdev(dev);
3346 if (err) {
3347 printk(KERN_ERR PFX "%s: cannot register net device\n",
3348 pci_name(pdev));
3349 goto err_out_free_netdev;
3352 sky2_show_addr(dev);
3354 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
3355 if (register_netdev(dev1) == 0)
3356 sky2_show_addr(dev1);
3357 else {
3358 /* Failure to register second port need not be fatal */
3359 printk(KERN_WARNING PFX
3360 "register of second port failed\n");
3361 hw->dev[1] = NULL;
3362 free_netdev(dev1);
3366 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3367 err = sky2_test_msi(hw);
3368 if (err == -EOPNOTSUPP)
3369 pci_disable_msi(pdev);
3370 else if (err)
3371 goto err_out_unregister;
3374 err = request_irq(pdev->irq, sky2_intr, IRQF_SHARED, DRV_NAME, hw);
3375 if (err) {
3376 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3377 pci_name(pdev), pdev->irq);
3378 goto err_out_unregister;
3381 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3383 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3384 sky2_idle_start(hw);
3386 pci_set_drvdata(pdev, hw);
3388 return 0;
3390 err_out_unregister:
3391 pci_disable_msi(pdev);
3392 if (dev1) {
3393 unregister_netdev(dev1);
3394 free_netdev(dev1);
3396 unregister_netdev(dev);
3397 err_out_free_netdev:
3398 free_netdev(dev);
3399 err_out_free_pci:
3400 sky2_write8(hw, B0_CTST, CS_RST_SET);
3401 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3402 err_out_iounmap:
3403 iounmap(hw->regs);
3404 err_out_free_hw:
3405 kfree(hw);
3406 err_out_free_regions:
3407 pci_release_regions(pdev);
3408 pci_disable_device(pdev);
3409 err_out:
3410 return err;
3413 static void __devexit sky2_remove(struct pci_dev *pdev)
3415 struct sky2_hw *hw = pci_get_drvdata(pdev);
3416 struct net_device *dev0, *dev1;
3418 if (!hw)
3419 return;
3421 del_timer_sync(&hw->idle_timer);
3423 sky2_write32(hw, B0_IMSK, 0);
3424 synchronize_irq(hw->pdev->irq);
3426 dev0 = hw->dev[0];
3427 dev1 = hw->dev[1];
3428 if (dev1)
3429 unregister_netdev(dev1);
3430 unregister_netdev(dev0);
3432 sky2_set_power_state(hw, PCI_D3hot);
3433 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3434 sky2_write8(hw, B0_CTST, CS_RST_SET);
3435 sky2_read8(hw, B0_CTST);
3437 free_irq(pdev->irq, hw);
3438 pci_disable_msi(pdev);
3439 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3440 pci_release_regions(pdev);
3441 pci_disable_device(pdev);
3443 if (dev1)
3444 free_netdev(dev1);
3445 free_netdev(dev0);
3446 iounmap(hw->regs);
3447 kfree(hw);
3449 pci_set_drvdata(pdev, NULL);
3452 #ifdef CONFIG_PM
3453 static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3455 struct sky2_hw *hw = pci_get_drvdata(pdev);
3456 int i;
3457 pci_power_t pstate = pci_choose_state(pdev, state);
3459 if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
3460 return -EINVAL;
3462 del_timer_sync(&hw->idle_timer);
3463 netif_poll_disable(hw->dev[0]);
3465 for (i = 0; i < hw->ports; i++) {
3466 struct net_device *dev = hw->dev[i];
3468 if (netif_running(dev)) {
3469 sky2_down(dev);
3470 netif_device_detach(dev);
3474 sky2_write32(hw, B0_IMSK, 0);
3475 pci_save_state(pdev);
3476 sky2_set_power_state(hw, pstate);
3477 return 0;
3480 static int sky2_resume(struct pci_dev *pdev)
3482 struct sky2_hw *hw = pci_get_drvdata(pdev);
3483 int i, err;
3485 pci_restore_state(pdev);
3486 pci_enable_wake(pdev, PCI_D0, 0);
3487 sky2_set_power_state(hw, PCI_D0);
3489 err = sky2_reset(hw);
3490 if (err)
3491 goto out;
3493 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3495 for (i = 0; i < hw->ports; i++) {
3496 struct net_device *dev = hw->dev[i];
3497 if (netif_running(dev)) {
3498 netif_device_attach(dev);
3500 err = sky2_up(dev);
3501 if (err) {
3502 printk(KERN_ERR PFX "%s: could not up: %d\n",
3503 dev->name, err);
3504 dev_close(dev);
3505 goto out;
3510 netif_poll_enable(hw->dev[0]);
3511 sky2_idle_start(hw);
3512 out:
3513 return err;
3515 #endif
3517 static struct pci_driver sky2_driver = {
3518 .name = DRV_NAME,
3519 .id_table = sky2_id_table,
3520 .probe = sky2_probe,
3521 .remove = __devexit_p(sky2_remove),
3522 #ifdef CONFIG_PM
3523 .suspend = sky2_suspend,
3524 .resume = sky2_resume,
3525 #endif
3526 };
3528 static int __init sky2_init_module(void)
3530 return pci_register_driver(&sky2_driver);
3533 static void __exit sky2_cleanup_module(void)
3535 pci_unregister_driver(&sky2_driver);
3538 module_init(sky2_init_module);
3539 module_exit(sky2_cleanup_module);
3541 MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
3542 MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
3543 MODULE_LICENSE("GPL");
3544 MODULE_VERSION(DRV_VERSION);