* notice is accompanying it.
*/
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < 0x020612)
+#include <linux/config.h>
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020500)
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#endif
#include <linux/module.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/moduleparam.h>
+#endif
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/brcmphy.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/workqueue.h>
+#endif
#include <linux/prefetch.h>
+#if (LINUX_VERSION_CODE >= 0x020600)
#include <linux/dma-mapping.h>
+#endif
+#include <linux/bitops.h>
#include <net/checksum.h>
#include <net/ip.h>
#define TG3_VLAN_TAG_USED 0
#endif
+#ifdef NETIF_F_TSO
#define TG3_TSO_SUPPORT 1
+#else
+#define TG3_TSO_SUPPORT 0
+#endif
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.94"
-#define DRV_MODULE_RELDATE "August 14, 2008"
+#define DRV_MODULE_VERSION "3.92n"
+#define DRV_MODULE_RELDATE "September 29, 2008"
+
+/* The driver optimizes the hot rx code path by merging a mandatory rx double
+ * copy check with the normal double copy rx threshold check. On those
+ * architectures where the mandatory double copy is not needed, we can optimize
+ * further by saving a device structure dereference and hardcoding the double
+ * copy threshold in place.
+ */
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
-#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
+#define RX_PKT_BUF_SZ (1536 + 64)
+#define RX_JUMBO_PKT_BUF_SZ (9046 + 64)
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
+#define TG3_RAW_IP_ALIGN 2
+
/* number of ETHTOOL_GSTATS u64's */
#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
MODULE_VERSION(DRV_MODULE_VERSION);
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+#if (LINUX_VERSION_CODE >= 0x20600)
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+#endif
static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761SE)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
+#ifdef TG3_NAPI
napi_disable(&tp->napi);
+#else
+ netif_poll_disable(tp->dev);
+#endif
netif_tx_disable(tp->dev);
}
* so long as all callers are assured to have free tx slots
* (such as after tg3_init_hw)
*/
+#ifdef TG3_NAPI
napi_enable(&tp->napi);
+#else
+ netif_poll_enable(tp->dev);
+#endif
tp->hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
return 0;
}
-static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
-{
- struct tg3 *tp = (struct tg3 *)bp->priv;
- u32 val;
-
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
- return -EAGAIN;
-
- if (tg3_readphy(tp, reg, &val))
- return -EIO;
-
- return val;
-}
-
-static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
-{
- struct tg3 *tp = (struct tg3 *)bp->priv;
-
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
- return -EAGAIN;
-
- if (tg3_writephy(tp, reg, val))
- return -EIO;
-
- return 0;
-}
-
-static int tg3_mdio_reset(struct mii_bus *bp)
-{
- return 0;
-}
-
-static void tg3_mdio_config(struct tg3 *tp)
-{
- u32 val;
-
- if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
- PHY_INTERFACE_MODE_RGMII)
- return;
-
- val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
- MAC_PHYCFG1_RGMII_SND_STAT_EN);
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
- val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
- val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
- }
- tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
-
- val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
- val |= MAC_PHYCFG2_INBAND_ENABLE;
- tw32(MAC_PHYCFG2, val);
-
- val = tr32(MAC_EXT_RGMII_MODE);
- val &= ~(MAC_RGMII_MODE_RX_INT_B |
- MAC_RGMII_MODE_RX_QUALITY |
- MAC_RGMII_MODE_RX_ACTIVITY |
- MAC_RGMII_MODE_RX_ENG_DET |
- MAC_RGMII_MODE_TX_ENABLE |
- MAC_RGMII_MODE_TX_LOWPWR |
- MAC_RGMII_MODE_TX_RESET);
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
- val |= MAC_RGMII_MODE_RX_INT_B |
- MAC_RGMII_MODE_RX_QUALITY |
- MAC_RGMII_MODE_RX_ACTIVITY |
- MAC_RGMII_MODE_RX_ENG_DET;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
- val |= MAC_RGMII_MODE_TX_ENABLE |
- MAC_RGMII_MODE_TX_LOWPWR |
- MAC_RGMII_MODE_TX_RESET;
- }
- tw32(MAC_EXT_RGMII_MODE, val);
-}
-
-static void tg3_mdio_start(struct tg3 *tp)
-{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- mutex_lock(&tp->mdio_bus.mdio_lock);
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
- mutex_unlock(&tp->mdio_bus.mdio_lock);
- }
-
- tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
- tw32_f(MAC_MI_MODE, tp->mi_mode);
- udelay(80);
-
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
- tg3_mdio_config(tp);
-}
-
-static void tg3_mdio_stop(struct tg3 *tp)
-{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- mutex_lock(&tp->mdio_bus.mdio_lock);
- tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
- mutex_unlock(&tp->mdio_bus.mdio_lock);
- }
-}
-
-static int tg3_mdio_init(struct tg3 *tp)
-{
- int i;
- u32 reg;
- struct phy_device *phydev;
- struct mii_bus *mdio_bus = &tp->mdio_bus;
-
- tg3_mdio_start(tp);
-
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
- (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
- return 0;
-
- memset(mdio_bus, 0, sizeof(*mdio_bus));
-
- mdio_bus->name = "tg3 mdio bus";
- snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
- (tp->pdev->bus->number << 8) | tp->pdev->devfn);
- mdio_bus->priv = tp;
- mdio_bus->dev = &tp->pdev->dev;
- mdio_bus->read = &tg3_mdio_read;
- mdio_bus->write = &tg3_mdio_write;
- mdio_bus->reset = &tg3_mdio_reset;
- mdio_bus->phy_mask = ~(1 << PHY_ADDR);
- mdio_bus->irq = &tp->mdio_irq[0];
-
- for (i = 0; i < PHY_MAX_ADDR; i++)
- mdio_bus->irq[i] = PHY_POLL;
-
- /* The bus registration will look for all the PHYs on the mdio bus.
- * Unfortunately, it does not ensure the PHY is powered up before
- * accessing the PHY ID registers. A chip reset is the
- * quickest way to bring the device back to an operational state..
- */
- if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
- tg3_bmcr_reset(tp);
-
- i = mdiobus_register(mdio_bus);
- if (i) {
- printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
- tp->dev->name, i);
- return i;
- }
-
- tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
-
- phydev = tp->mdio_bus.phy_map[PHY_ADDR];
-
-if(!phydev || !phydev->drv)
- return -1;
-
- switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
- case TG3_PHY_ID_BCM57780:
- phydev->interface = PHY_INTERFACE_MODE_GMII;
- phydev->dev_flags = PHY_BRCM_WIRESPEED_ENABLE;
- break;
- case TG3_PHY_ID_BCM50610:
- phydev->interface = PHY_INTERFACE_MODE_RGMII;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
- phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
- break;
- case TG3_PHY_ID_BCMAC131:
- phydev->interface = PHY_INTERFACE_MODE_MII;
- break;
- }
-
- tg3_mdio_config(tp);
-
- return 0;
-}
-
-static void tg3_mdio_fini(struct tg3 *tp)
-{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
- mdiobus_unregister(&tp->mdio_bus);
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
- }
-}
-
/* tp->lock is held. */
static inline void tg3_generate_fw_event(struct tg3 *tp)
{
u32 old_rx_mode = tp->rx_mode;
u32 old_tx_mode = tp->tx_mode;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
- autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
- else
- autoneg = tp->link_config.autoneg;
+ autoneg = tp->link_config.autoneg;
if (autoneg == AUTONEG_ENABLE &&
(tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
tw32_f(MAC_TX_MODE, tp->tx_mode);
}
-static void tg3_adjust_link(struct net_device *dev)
-{
- u8 oldflowctrl, linkmesg = 0;
- u32 mac_mode, lcl_adv, rmt_adv;
- struct tg3 *tp = netdev_priv(dev);
- struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
-
- spin_lock(&tp->lock);
-
- mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
- MAC_MODE_HALF_DUPLEX);
-
- oldflowctrl = tp->link_config.active_flowctrl;
-
- if (phydev->link) {
- lcl_adv = 0;
- rmt_adv = 0;
-
- if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
- mac_mode |= MAC_MODE_PORT_MODE_MII;
- else
- mac_mode |= MAC_MODE_PORT_MODE_GMII;
-
- if (phydev->duplex == DUPLEX_HALF)
- mac_mode |= MAC_MODE_HALF_DUPLEX;
- else {
- lcl_adv = tg3_advert_flowctrl_1000T(
- tp->link_config.flowctrl);
-
- if (phydev->pause)
- rmt_adv = LPA_PAUSE_CAP;
- if (phydev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
- }
-
- tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
- } else
- mac_mode |= MAC_MODE_PORT_MODE_GMII;
-
- if (mac_mode != tp->mac_mode) {
- tp->mac_mode = mac_mode;
- tw32_f(MAC_MODE, tp->mac_mode);
- udelay(40);
- }
-
- if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
- tw32(MAC_TX_LENGTHS,
- ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
- (6 << TX_LENGTHS_IPG_SHIFT) |
- (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
- else
- tw32(MAC_TX_LENGTHS,
- ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
- (6 << TX_LENGTHS_IPG_SHIFT) |
- (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
-
- if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
- (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
- phydev->speed != tp->link_config.active_speed ||
- phydev->duplex != tp->link_config.active_duplex ||
- oldflowctrl != tp->link_config.active_flowctrl)
- linkmesg = 1;
-
- tp->link_config.active_speed = phydev->speed;
- tp->link_config.active_duplex = phydev->duplex;
-
- spin_unlock(&tp->lock);
-
- if (linkmesg)
- tg3_link_report(tp);
-}
-
-static int tg3_phy_init(struct tg3 *tp)
+static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
{
- struct phy_device *phydev;
-
- if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
- return 0;
-
- /* Bring the PHY back to a known state. */
- tg3_bmcr_reset(tp);
-
- phydev = tp->mdio_bus.phy_map[PHY_ADDR];
-
- /* Attach the MAC to the PHY. */
- phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
- phydev->dev_flags, phydev->interface);
- if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
- return PTR_ERR(phydev);
- }
-
- tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
-
- /* Mask with MAC supported features. */
- phydev->supported &= (PHY_GBIT_FEATURES |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
-
- phydev->advertising = phydev->supported;
-
- printk(KERN_INFO
- "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
- tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
-
- return 0;
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
}
-static void tg3_phy_start(struct tg3 *tp)
+static void tg3_phy_toggle_apd(struct tg3 *tp, int enable)
{
- struct phy_device *phydev;
+ u32 reg;
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
return;
- phydev = tp->mdio_bus.phy_map[PHY_ADDR];
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_SCR5_SEL |
+ MII_TG3_MISC_SHDW_SCR5_LPED |
+ MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+ MII_TG3_MISC_SHDW_SCR5_SDTL |
+ MII_TG3_MISC_SHDW_SCR5_C125OE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
- if (tp->link_config.phy_is_low_power) {
- tp->link_config.phy_is_low_power = 0;
- phydev->speed = tp->link_config.orig_speed;
- phydev->duplex = tp->link_config.orig_duplex;
- phydev->autoneg = tp->link_config.orig_autoneg;
- phydev->advertising = tp->link_config.orig_advertising;
- }
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
- phy_start(phydev);
- phy_start_aneg(phydev);
-}
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_APD_SEL |
+ MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ if (enable)
+ reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
-static void tg3_phy_stop(struct tg3 *tp)
-{
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return;
-
- phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
-}
-
-static void tg3_phy_fini(struct tg3 *tp)
-{
- if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
- phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
- tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
- }
-}
-
-static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
-{
- tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
- tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
}
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
u32 val;
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
udelay(40);
tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
}
-
- /* Disable GPHY autopowerdown. */
- tg3_writephy(tp, MII_TG3_MISC_SHDW,
- MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_APD_SEL |
- MII_TG3_MISC_SHDW_APD_WKTM_84MS);
}
tg3_phy_apply_otp(tp);
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 1);
+ else
+ tg3_phy_toggle_apd(tp, 0);
+
out:
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
return;
- } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
+ } else {
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
}
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+{
+ u32 addr_high, addr_low;
+ int i;
+
+ addr_high = ((tp->dev->dev_addr[0] << 8) |
+ tp->dev->dev_addr[1]);
+ addr_low = ((tp->dev->dev_addr[2] << 24) |
+ (tp->dev->dev_addr[3] << 16) |
+ (tp->dev->dev_addr[4] << 8) |
+ (tp->dev->dev_addr[5] << 0));
+ for (i = 0; i < 4; i++) {
+ if (i == 1 && skip_mac_1)
+ continue;
+ tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ for (i = 0; i < 12; i++) {
+ tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
+ }
+ }
+
+ addr_high = (tp->dev->dev_addr[0] +
+ tp->dev->dev_addr[1] +
+ tp->dev->dev_addr[2] +
+ tp->dev->dev_addr[3] +
+ tp->dev->dev_addr[4] +
+ tp->dev->dev_addr[5]) &
+ TX_BACKOFF_SEED_MASK;
+ tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
{
u32 misc_host_ctrl;
+ u16 power_control, power_caps;
+ int pm = tp->pm_cap;
/* Make sure register accesses (indirect or otherwise)
* will function correctly.
TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
+ pci_read_config_word(tp->pdev,
+ pm + PCI_PM_CTRL,
+ &power_control);
+ power_control |= PCI_PM_CTRL_PME_STATUS;
+ power_control &= ~(PCI_PM_CTRL_STATE_MASK);
switch (state) {
case PCI_D0:
- pci_enable_wake(tp->pdev, state, false);
- pci_set_power_state(tp->pdev, PCI_D0);
+ power_control |= 0;
+ pci_write_config_word(tp->pdev,
+ pm + PCI_PM_CTRL,
+ power_control);
+ udelay(100); /* Delay after power state change */
/* Switch out of Vaux if it is a NIC */
if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
return 0;
case PCI_D1:
+ power_control |= 1;
+ break;
+
case PCI_D2:
+ power_control |= 2;
+ break;
+
case PCI_D3hot:
+ power_control |= 3;
break;
default:
- printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
- tp->dev->name, state);
+ printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
+ "requested.\n",
+ tp->dev->name, state);
return -EINVAL;
}
+
+ /* Restore the CLKREQ setting. */
+ if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ u16 lnkctl;
+
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &lnkctl);
+ lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ lnkctl);
+ }
+
+ power_control |= PCI_PM_CTRL_PME_ENABLE;
+
misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
tw32(TG3PCI_MISC_HOST_CTRL,
misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
- !tp->link_config.phy_is_low_power) {
- struct phy_device *phydev;
- u32 advertising;
-
- phydev = tp->mdio_bus.phy_map[PHY_ADDR];
-
- tp->link_config.phy_is_low_power = 1;
-
- tp->link_config.orig_speed = phydev->speed;
- tp->link_config.orig_duplex = phydev->duplex;
- tp->link_config.orig_autoneg = phydev->autoneg;
- tp->link_config.orig_advertising = phydev->advertising;
-
- advertising = ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half;
-
- if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
- if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
- advertising |=
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full;
- else
- advertising |= ADVERTISED_10baseT_Full;
- }
-
- phydev->advertising = advertising;
-
- phy_start_aneg(phydev);
- }
- } else {
- if (tp->link_config.phy_is_low_power == 0) {
- tp->link_config.phy_is_low_power = 1;
- tp->link_config.orig_speed = tp->link_config.speed;
- tp->link_config.orig_duplex = tp->link_config.duplex;
- tp->link_config.orig_autoneg = tp->link_config.autoneg;
- }
+ if (tp->link_config.phy_is_low_power == 0) {
+ tp->link_config.phy_is_low_power = 1;
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+ }
- if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
- tp->link_config.speed = SPEED_10;
- tp->link_config.duplex = DUPLEX_HALF;
- tp->link_config.autoneg = AUTONEG_ENABLE;
- tg3_setup_phy(tp, 0);
- }
+ if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
+ tp->link_config.speed = SPEED_10;
+ tp->link_config.duplex = DUPLEX_HALF;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tg3_setup_phy(tp, 0);
}
+ __tg3_set_mac_addr(tp, 0);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u32 val;
tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
}
if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
WOL_DRV_WOL |
WOL_SET_MAGIC_PKT);
+ pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
+
if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
u32 mac_mode;
if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
- udelay(40);
- }
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
+ udelay(40);
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
mac_mode = MAC_MODE_PORT_MODE_GMII;
if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
tw32(MAC_LED_CTRL, tp->led_ctrl);
- if (pci_pme_capable(tp->pdev, state) &&
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
+ if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) {
mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+ if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
+ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+ }
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
mac_mode |= tp->mac_mode &
(MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
if (mac_mode & MAC_MODE_APE_TX_EN)
tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
- if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
- pci_enable_wake(tp->pdev, state, true);
-
/* Finally, set the new power state. */
- pci_set_power_state(tp->pdev, state);
+ pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
+ udelay(100); /* Delay after power state change */
return 0;
}
NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
}
+ /* Prevent send BD corruption. */
+ if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ u16 oldlnkctl, newlnkctl;
+
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &oldlnkctl);
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ else
+ newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
+ if (newlnkctl != oldlnkctl)
+ pci_write_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ newlnkctl);
+ }
+
if (current_link_up != netif_carrier_ok(tp->dev)) {
if (current_link_up)
netif_carrier_on(tp->dev);
err = tg3_setup_copper_phy(tp, force_reset);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size);
+ skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, tp->rx_offset);
mapping = pci_map_single(tp->pdev, skb->data,
- skb_size - tp->rx_offset,
+ skb_size,
PCI_DMA_FROMDEVICE);
map->skb = skb;
src_map->skb = NULL;
}
-#if TG3_VLAN_TAG_USED
-static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
-{
- return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
-}
-#endif
-
/* The RX ring scheme is composed of multiple rings which post fresh
* buffers to the chip, and one special ring the chip uses to report
* status back to the host.
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
- if (len > RX_COPY_THRESHOLD
- && tp->rx_offset == 2
- /* rx_offset != 2 iff this is a 5701 card running
- * in PCI-X mode [see tg3_get_invariants()] */
- ) {
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, opaque_key,
goto drop_it;
pci_unmap_single(tp->pdev, dma_addr,
- skb_size - tp->rx_offset,
+ skb_size,
PCI_DMA_FROMDEVICE);
skb_put(skb, len);
tg3_recycle_rx(tp, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len + 2);
+ copy_skb = netdev_alloc_skb(tp->dev, len + TG3_RAW_IP_ALIGN + VLAN_HLEN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, 2);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, tp->dev);
+
+ if ((len > (tp->dev->mtu + ETH_HLEN)) &&
+ (ntohs(skb->protocol) != ETH_P_8021Q)) {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- tg3_vlan_rx(tp, skb,
- desc->err_vlan & RXD_VLAN_MASK);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN, ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+ if (hw_vlan)
+ vlan_hwaccel_receive_skb(skb, tp->vlgrp, vtag);
+ else
netif_receive_skb(skb);
tp->dev->last_rx = jiffies;
tp->rx_rcb_ptr = sw_idx;
tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Refill RX ring(s). */
if (work_mask & RXD_OPAQUE_RING_STD) {
sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
return received;
}
+#ifdef TG3_NAPI
+
static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
{
struct tg3_hw_status *sblk = tp->hw_status;
sblk->status = SD_STATUS_UPDATED |
(sblk->status & ~SD_STATUS_LINK_CHG);
spin_lock(&tp->lock);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- tw32_f(MAC_STATUS,
- (MAC_STATUS_SYNC_CHANGED |
- MAC_STATUS_CFG_CHANGED |
- MAC_STATUS_MI_COMPLETION |
- MAC_STATUS_LNKSTATE_CHANGED));
- udelay(40);
- } else
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, 0);
spin_unlock(&tp->lock);
}
}
return work_done;
}
+#else
+
+static int tg3_poll(struct net_device *netdev, int *budget)
+{
+ struct tg3 *tp = netdev_priv(netdev);
+ struct tg3_hw_status *sblk = tp->hw_status;
+ int done;
+
+ /* handle link change and other phy events */
+ if (!(tp->tg3_flags &
+ (TG3_FLAG_USE_LINKCHG_REG |
+ TG3_FLAG_POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG) {
+ sblk->status = SD_STATUS_UPDATED |
+ (sblk->status & ~SD_STATUS_LINK_CHG);
+ spin_lock(&tp->lock);
+ tg3_setup_phy(tp, 0);
+ spin_unlock(&tp->lock);
+ }
+ }
+
+ /* run TX completion thread */
+ if (sblk->idx[0].tx_consumer != tp->tx_cons) {
+ tg3_tx(tp);
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
+ netif_rx_complete(netdev);
+ schedule_work(&tp->reset_task);
+ return 0;
+ }
+ }
+
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with dev->poll()
+ */
+ if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
+ int orig_budget = *budget;
+ int work_done;
+
+ if (orig_budget > netdev->quota)
+ orig_budget = netdev->quota;
+
+ work_done = tg3_rx(tp, orig_budget);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+ tp->last_tag = sblk->status_tag;
+ rmb();
+ } else
+ sblk->status &= ~SD_STATUS_UPDATED;
+
+ /* if no more work, tell net stack and NIC we're done */
+ done = !tg3_has_work(tp);
+ if (done) {
+ netif_rx_complete(netdev);
+ tg3_restart_ints(tp);
+ }
+
+ return (done ? 0 : 1);
+}
+
+#endif /* TG3_NAPI */
+
static void tg3_irq_quiesce(struct tg3 *tp)
{
BUG_ON(tp->irq_sync);
tp->irq_sync = 1;
smp_mb();
+#if (LINUX_VERSION_CODE >= 0x2051c)
synchronize_irq(tp->pdev->irq);
+#else
+ synchronize_irq();
+#endif
}
static inline int tg3_irq_sync(struct tg3 *tp)
/* One-shot MSI handler - Chip automatically disables interrupt
* after sending MSI so driver doesn't have to do it.
*/
+#if (LINUX_VERSION_CODE < 0x20613)
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
+#else
static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
+#ifdef TG3_NAPI
netif_rx_schedule(dev, &tp->napi);
+#else
+ netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
return IRQ_HANDLED;
}
* flush status block and interrupt mailbox. PCI ordering rules
* guarantee that MSI will arrive after the status block.
*/
+#if (LINUX_VERSION_CODE < 0x20613)
+static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
+#else
static irqreturn_t tg3_msi(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
+#ifdef TG3_NAPI
netif_rx_schedule(dev, &tp->napi);
+#else
+ netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
return IRQ_RETVAL(1);
}
+#if (LINUX_VERSION_CODE < 0x20613)
+static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+#else
static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+#ifdef TG3_NAPI
netif_rx_schedule(dev, &tp->napi);
+#else
+ netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
} else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
return IRQ_RETVAL(handled);
}
+#if (LINUX_VERSION_CODE < 0x20613)
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
+#else
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (tg3_irq_sync(tp))
goto out;
+#ifdef TG3_NAPI
if (netif_rx_schedule_prep(dev, &tp->napi)) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
/* Update last_tag to mark that this status has been
tp->last_tag = sblk->status_tag;
__netif_rx_schedule(dev, &tp->napi);
}
+#else
+ if (netif_rx_schedule_prep(dev)) {
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ /* Update last_tag to mark that this status has been
+ * seen. Because interrupt may be shared, we may be
+ * racing with tg3_poll(), so only update last_tag
+ * if tg3_poll() is not scheduled.
+ */
+ tp->last_tag = sblk->status_tag;
+ __netif_rx_schedule(dev);
+ }
+#endif
out:
return IRQ_RETVAL(handled);
}
/* ISR for interrupt test */
+#if (LINUX_VERSION_CODE < 0x020613)
+static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct pt_regs *regs)
+#else
static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
+#ifdef TG3_NAPI
napi_enable(&tp->napi);
+#else
+ netif_poll_enable(tp->dev);
+#endif
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
static void tg3_poll_controller(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
+ if (netdump_mode) {
+ tg3_interrupt(tp->pdev->irq, dev, NULL);
+ if (dev->poll_list.prev) {
+ int budget = 64;
+
+ tg3_poll(dev, &budget);
+ }
+ }
+ else
+#endif
+#if (LINUX_VERSION_CODE < 0x020613)
+ tg3_interrupt(tp->pdev->irq, dev, NULL);
+#else
tg3_interrupt(tp->pdev->irq, dev);
+#endif
}
#endif
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
static void tg3_reset_task(struct work_struct *work)
+#else
+static void tg3_reset_task(void *_data)
+#endif
{
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
struct tg3 *tp = container_of(work, struct tg3, reset_task);
- int err;
+#else
+ struct tg3 *tp = _data;
+#endif
unsigned int restart_timer;
tg3_full_lock(tp, 0);
tg3_full_unlock(tp);
- tg3_phy_stop(tp);
-
tg3_netif_stop(tp);
tg3_full_lock(tp, 1);
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- err = tg3_init_hw(tp, 1);
- if (err)
+ if (tg3_init_hw(tp, 1))
goto out;
tg3_netif_start(tp);
out:
tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
}
static void tg3_dump_short_state(struct tg3 *tp)
entry = tp->tx_prod;
base_flags = 0;
+#if TG3_TSO_SUPPORT != 0
mss = 0;
if ((mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
goto out_unlock;
}
+#ifndef BCM_NO_TSO6
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
- else {
+ else
+#endif
+ {
struct iphdr *iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
}
else if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#else
+ mss = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#endif
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
}
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
netif_wake_queue(tp->dev);
}
+#if TG3_TSO_SUPPORT != 0
out_unlock:
+#endif
mmiowb();
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
+#if TG3_TSO_SUPPORT != 0
+#ifndef NETIF_F_GSO
+
+struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = NULL;
+ struct sk_buff *tail = NULL;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int doffset = skb->data - skb->mac.raw;
+ unsigned int offset = doffset;
+ unsigned int headroom;
+ unsigned int len;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int err = -ENOMEM;
+ int i = 0;
+ int pos;
+
+ __skb_push(skb, doffset);
+ headroom = skb_headroom(skb);
+ pos = skb_headlen(skb);
+
+ do {
+ struct sk_buff *nskb;
+ skb_frag_t *frag;
+ int hsize;
+ int k;
+ int size;
+
+ len = skb->len - offset;
+ if (len > mss)
+ len = mss;
+
+ hsize = skb_headlen(skb) - offset;
+ if (hsize < 0)
+ hsize = 0;
+ if (hsize > len)
+ hsize = len;
+
+ nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
+ if (unlikely(!nskb))
+ goto err;
+
+ if (segs)
+ tail->next = nskb;
+ else
+ segs = nskb;
+ tail = nskb;
+
+ nskb->dev = skb->dev;
+ nskb->priority = skb->priority;
+ nskb->protocol = skb->protocol;
+ nskb->dst = dst_clone(skb->dst);
+ memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+ nskb->pkt_type = skb->pkt_type;
+ nskb->mac_len = skb->mac_len;
+
+ skb_reserve(nskb, headroom);
+ nskb->mac.raw = nskb->data;
+ nskb->nh.raw = nskb->data + skb->mac_len;
+ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
+ memcpy(skb_put(nskb, doffset), skb->data, doffset);
+
+ frag = skb_shinfo(nskb)->frags;
+ k = 0;
+
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum = skb->csum;
+ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+
+ while (pos < offset + len) {
+ BUG_ON(i >= nfrags);
+
+ *frag = skb_shinfo(skb)->frags[i];
+ get_page(frag->page);
+ size = frag->size;
+
+ if (pos < offset) {
+ frag->page_offset += offset - pos;
+ frag->size -= offset - pos;
+ }
+
+ k++;
+
+ if (pos + size <= offset + len) {
+ i++;
+ pos += size;
+ } else {
+ frag->size -= pos + size - (offset + len);
+ break;
+ }
+
+ frag++;
+ }
+
+ skb_shinfo(nskb)->nr_frags = k;
+ nskb->data_len = len - hsize;
+ nskb->len += nskb->data_len;
+ nskb->truesize += nskb->data_len;
+ } while ((offset += len) < skb->len);
+
+ return segs;
+
+err:
+ while ((skb = segs)) {
+ segs = skb->next;
+ kfree(skb);
+ }
+ return ERR_PTR(err);
+}
+
+static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct tcphdr *th;
+ unsigned thlen;
+ unsigned int seq;
+ u32 delta;
+ unsigned int oldlen;
+ unsigned int len;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ goto out;
+
+ th = skb->h.th;
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ goto out;
+
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+ oldlen = (u16)~skb->len;
+ __skb_pull(skb, thlen);
+
+ segs = skb_segment(skb, features);
+ if (IS_ERR(segs))
+ goto out;
+
+ len = skb_shinfo(skb)->gso_size;
+ delta = htonl(oldlen + (thlen + len));
+
+ skb = segs;
+ th = skb->h.th;
+ seq = ntohl(th->seq);
+
+ do {
+ th->fin = th->psh = 0;
+
+ th->check = ~csum_fold((u32)((u32)th->check +
+ (u32)delta));
+ seq += len;
+ skb = skb->next;
+ th = skb->h.th;
+
+ th->seq = htonl(seq);
+ th->cwr = 0;
+ } while (skb->next);
+
+ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+ th->check = ~csum_fold((u32)((u32)th->check +
+ (u32)delta));
+out:
+ return segs;
+}
+
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct iphdr *iph;
+ int ihl;
+ int id;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = skb->nh.iph;
+ ihl = iph->ihl * 4;
+ if (ihl < sizeof(*iph))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, ihl)))
+ goto out;
+
+ skb->h.raw = __skb_pull(skb, ihl);
+ iph = skb->nh.iph;
+ id = ntohs(iph->id);
+ segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ segs = tcp_tso_segment(skb, features);
+
+ if (!segs || IS_ERR(segs))
+ goto out;
+
+ skb = segs;
+ do {
+ iph = skb->nh.iph;
+ iph->id = htons(id++);
+ iph->tot_len = htons(skb->len - skb->mac_len);
+ iph->check = 0;
+ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+ } while ((skb = skb->next));
+
+out:
+ return segs;
+}
+
+static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ skb->mac.raw = skb->data;
+ skb->mac_len = skb->nh.raw - skb->data;
+ __skb_pull(skb, skb->mac_len);
+
+ segs = inet_gso_segment(skb, features);
+
+ __skb_push(skb, skb->data - skb->mac.raw);
+ return segs;
+}
+
+#endif
+
static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the
return NETDEV_TX_OK;
}
+#endif
+
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
*/
base_flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#if TG3_TSO_SUPPORT != 0
mss = 0;
- if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+ if (((mss = skb_shinfo(skb)->gso_size) != 0) &&
+ (skb_shinfo(skb)->gso_segs > 1)) {
struct iphdr *iph;
int tcp_opt_len, ip_tcp_len, hdr_len;
}
}
}
+#else
+ mss = 0;
+#endif
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
entry = start;
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
if (new_mtu > ETH_DATA_LEN) {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+#if TG3_TSO_SUPPORT != 0
ethtool_op_set_tso(dev, 0);
+#endif
}
else
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
return 0;
}
- tg3_phy_stop(tp);
-
tg3_netif_stop(tp);
tg3_full_lock(tp, 1);
tg3_full_unlock(tp);
- if (!err)
- tg3_phy_start(tp);
-
return err;
}
continue;
pci_unmap_single(tp->pdev,
pci_unmap_addr(rxp, mapping),
- tp->rx_pkt_buf_sz - tp->rx_offset,
+ tp->rx_pkt_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rxp->skb);
rxp->skb = NULL;
continue;
pci_unmap_single(tp->pdev,
pci_unmap_addr(rxp, mapping),
- RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
+ RX_JUMBO_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rxp->skb);
rxp->skb = NULL;
struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_std[i];
- rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
+ rxd->idx_len = (tp->rx_pkt_buf_sz - 64)
<< RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
rxd->opaque = (RXD_OPAQUE_RING_STD |
struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_jumbo[i];
- rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
+ rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - 64)
<< RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
RXD_FLAG_JUMBO;
event = APE_EVENT_STATUS_STATE_START;
break;
case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
event = APE_EVENT_STATUS_STATE_UNLOAD;
break;
case RESET_KIND_SUSPEND:
}
/* Make sure PCI-X relaxed ordering bit is clear. */
- if (tp->pcix_cap) {
+ if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
u16 pcix_cmd;
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
tg3_nvram_lock(tp);
- tg3_mdio_stop(tp);
-
tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
/* No matching tg3_nvram_unlock() after this because
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tw32(GRC_FASTBOOT_PC, 0);
/*
}
tp->last_tag = 0;
smp_mb();
+#if (LINUX_VERSION_CODE >= 0x2051c)
synchronize_irq(tp->pdev->irq);
+#else
+ synchronize_irq();
+#endif
/* do the reset */
val = GRC_MISC_CFG_CORECLK_RESET;
} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
- } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
if (tp->mac_mode & MAC_MODE_APE_TX_EN)
tp->mac_mode |= MAC_MODE_TDE_ENABLE;
tw32_f(MAC_MODE, 0);
udelay(40);
- tg3_mdio_start(tp);
-
tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
err = tg3_poll_fw(tp);
return 0;
}
+#if TG3_TSO_SUPPORT != 0
#define TG3_TSO_FW_RELEASE_MAJOR 0x1
#define TG3_TSO_FW_RELASE_MINOR 0x6
return 0;
}
-
-/* tp->lock is held. */
-static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
-{
- u32 addr_high, addr_low;
- int i;
-
- addr_high = ((tp->dev->dev_addr[0] << 8) |
- tp->dev->dev_addr[1]);
- addr_low = ((tp->dev->dev_addr[2] << 24) |
- (tp->dev->dev_addr[3] << 16) |
- (tp->dev->dev_addr[4] << 8) |
- (tp->dev->dev_addr[5] << 0));
- for (i = 0; i < 4; i++) {
- if (i == 1 && skip_mac_1)
- continue;
- tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
- }
-
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
- for (i = 0; i < 12; i++) {
- tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- }
- }
-
- addr_high = (tp->dev->dev_addr[0] +
- tp->dev->dev_addr[1] +
- tp->dev->dev_addr[2] +
- tp->dev->dev_addr[3] +
- tp->dev->dev_addr[4] +
- tp->dev->dev_addr[5]) &
- TX_BACKOFF_SEED_MASK;
- tw32(MAC_TX_BACKOFF_SEED, addr_high);
-}
+#endif /* TG3_TSO_SUPPORT != 0 */
static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
tg3_abort_hw(tp, 1);
}
- if (reset_phy &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
return err;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
}
+#if TG3_TSO_SUPPORT != 0
else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
tw32(BUFMGR_MB_POOL_SIZE,
NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
}
+#endif
if (tp->dev->mtu <= ETH_DATA_LEN) {
tw32(BUFMGR_MB_RDMA_LOW_WATER,
__tg3_set_mac_addr(tp, 0);
/* MTU + ethernet header + FCS + optional VLAN tag */
- tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
+ tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
/* The slot time is changed by tg3_setup_phy if we
* run at gigabit with half duplex.
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
rdmac_mode |= (1 << 27);
+#endif
/* Receive/send statistics. */
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
udelay(10);
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = 0;
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
val |= WDMAC_MODE_STATUS_TAG_FIX;
tw32_f(WDMAC_MODE, val);
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+#endif
tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
return err;
}
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
err = tg3_load_tso_firmware(tp);
if (err)
return err;
}
+#endif
tp->tx_mode = TX_MODE_ENABLE;
tw32_f(MAC_TX_MODE, tp->tx_mode);
tp->rx_mode = RX_MODE_ENABLE;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
tw32_f(MAC_RX_MODE, tp->rx_mode);
udelay(10);
+ if (tp->link_config.phy_is_low_power) {
+ tp->link_config.phy_is_low_power = 0;
+ tp->link_config.speed = tp->link_config.orig_speed;
+ tp->link_config.duplex = tp->link_config.orig_duplex;
+ tp->link_config.autoneg = tp->link_config.orig_autoneg;
+ }
+
+ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
tw32(MAC_LED_CTRL, tp->led_ctrl);
tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
}
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
- if (tp->link_config.phy_is_low_power) {
- tp->link_config.phy_is_low_power = 0;
- tp->link_config.speed = tp->link_config.orig_speed;
- tp->link_config.duplex = tp->link_config.orig_duplex;
- tp->link_config.autoneg = tp->link_config.orig_autoneg;
- }
-
- err = tg3_setup_phy(tp, 0);
- if (err)
- return err;
+ err = tg3_setup_phy(tp, 0);
+ if (err)
+ return err;
- if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
- u32 tmp;
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
+ u32 tmp;
- /* Clear CRC stats. */
- if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
- tg3_writephy(tp, MII_TG3_TEST1,
- tmp | MII_TG3_TEST1_CRC_EN);
- tg3_readphy(tp, 0x14, &tmp);
- }
+ /* Clear CRC stats. */
+ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
+ tg3_writephy(tp, MII_TG3_TEST1,
+ tmp | MII_TG3_TEST1_CRC_EN);
+ tg3_readphy(tp, 0x14, &tmp);
}
}
*/
static int tg3_init_hw(struct tg3 *tp, int reset_phy)
{
+ int err;
+
+ /* Force the chip into D0. */
+ err = tg3_set_power_state(tp, PCI_D0);
+ if (err)
+ goto out;
+
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- return tg3_reset_hw(tp, reset_phy);
+ err = tg3_reset_hw(tp, reset_phy);
+
+out:
+ return err;
}
#define TG3_STAT_ADD32(PSTAT, REG) \
static int tg3_request_irq(struct tg3 *tp)
{
+#if (LINUX_VERSION_CODE < 0x020613)
+ irqreturn_t (*fn)(int, void *, struct pt_regs *);
+#else
irq_handler_t fn;
+#endif
unsigned long flags;
struct net_device *dev = tp->dev;
break;
}
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+#else
msleep(10);
+#endif
}
tg3_disable_ints(tp);
tp->dev->name);
free_irq(tp->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
netif_carrier_off(tp->dev);
+ tg3_full_lock(tp, 0);
+
err = tg3_set_power_state(tp, PCI_D0);
- if (err)
+ if (err) {
+ tg3_full_unlock(tp);
return err;
-
- tg3_full_lock(tp, 0);
+ }
tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
if (err)
return err;
+#ifdef CONFIG_PCI_MSI
if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
} else if (pci_enable_msi(tp->pdev) == 0) {
u32 msi_mode;
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
+ /* Hardware bug - MSI won't work if INTX disabled. */
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+#if (LINUX_VERSION_CODE < 0x2060e)
+ tg3_enable_intx(tp->pdev);
+#else
+ pci_intx(tp->pdev, 1);
+#endif
+#endif
+
msi_mode = tr32(MSGINT_MODE);
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
}
}
+#endif
err = tg3_request_irq(tp);
if (err) {
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
return err;
}
+#ifdef TG3_NAPI
napi_enable(&tp->napi);
+#endif
tg3_full_lock(tp, 0);
tg3_full_unlock(tp);
if (err) {
+#ifdef TG3_NAPI
napi_disable(&tp->napi);
+#endif
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
tg3_full_lock(tp, 0);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
+#ifdef TG3_NAPI
napi_disable(&tp->napi);
+#endif
return err;
}
}
}
- tg3_phy_start(tp);
-
tg3_full_lock(tp, 0);
add_timer(&tp->timer);
{
struct tg3 *tp = netdev_priv(dev);
+#ifdef TG3_NAPI
napi_disable(&tp->napi);
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20616)
cancel_work_sync(&tp->reset_task);
+#else
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#endif
netif_stop_queue(dev);
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
return ret;
}
-static inline u64 get_estat64(tg3_stat64_t *val)
-{
- return ((u64)val->high << 32) | ((u64)val->low);
-}
-
static unsigned long calc_crc_errors(struct tg3 *tp)
{
struct tg3_hw_stats *hw_stats = tp->hw_stats;
#define ESTAT_ADD(member) \
estats->member = old_estats->member + \
- get_estat64(&hw_stats->member)
+ get_stat64(&hw_stats->member)
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
{
tg3_full_unlock(tp);
}
+#if (LINUX_VERSION_CODE >= 0x20418)
static int tg3_get_eeprom_len(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
return tp->nvram_size;
}
+#endif
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
+#ifdef ETHTOOL_GEEPROM
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
}
return 0;
}
+#endif
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+#ifdef ETHTOOL_SEEPROM
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
return ret;
}
+#endif
static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
- return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
- }
-
cmd->supported = (SUPPORTED_Autoneg);
if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
}
static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
- return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
- }
+{
+ struct tg3 *tp = netdev_priv(dev);
if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
/* These are the only valid advertisement bits allowed. */
{
struct tg3 *tp = netdev_priv(dev);
- if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
- device_can_wakeup(&tp->pdev->dev))
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
- struct device *dp = &tp->pdev->dev;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
- !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
+ !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
return -EINVAL;
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC) {
+ if (wol->wolopts & WAKE_MAGIC)
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, true);
- } else {
+ else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, false);
- }
spin_unlock_bh(&tp->lock);
return 0;
tp->msg_enable = value;
}
+#if TG3_TSO_SUPPORT != 0
static int tg3_set_tso(struct net_device *dev, u32 value)
{
struct tg3 *tp = netdev_priv(dev);
dev->features |= NETIF_F_TSO6;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))
dev->features |= NETIF_F_TSO_ECN;
} else
dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
}
return ethtool_op_set_tso(dev, value);
}
+#endif
static int tg3_nway_reset(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+ u32 bmcr;
int r;
if (!netif_running(dev))
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
return -EINVAL;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
- r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
- } else {
- u32 bmcr;
-
- spin_lock_bh(&tp->lock);
- r = -EINVAL;
- tg3_readphy(tp, MII_BMCR, &bmcr);
- if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
- ((bmcr & BMCR_ANENABLE) ||
- (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
- tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
- BMCR_ANENABLE);
- r = 0;
- }
- spin_unlock_bh(&tp->lock);
+ spin_lock_bh(&tp->lock);
+ r = -EINVAL;
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+ ((bmcr & BMCR_ANENABLE) ||
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ r = 0;
}
+ spin_unlock_bh(&tp->lock);
return r;
}
return -EINVAL;
if (netif_running(dev)) {
- tg3_phy_stop(tp);
tg3_netif_stop(tp);
irq_sync = 1;
}
tg3_full_unlock(tp);
- if (irq_sync && !err)
- tg3_phy_start(tp);
-
return err;
}
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
-
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
-
- if (epause->autoneg) {
- u32 newadv;
- struct phy_device *phydev;
-
- phydev = tp->mdio_bus.phy_map[PHY_ADDR];
-
- if (epause->rx_pause) {
- if (epause->tx_pause)
- newadv = ADVERTISED_Pause;
- else
- newadv = ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
- } else if (epause->tx_pause) {
- newadv = ADVERTISED_Asym_Pause;
- } else
- newadv = 0;
-
- if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
- u32 oldadv = phydev->advertising &
- (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- if (oldadv != newadv) {
- phydev->advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- err = phy_start_aneg(phydev);
- }
- } else {
- tp->link_config.advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- tp->link_config.advertising |= newadv;
- }
- } else {
- if (epause->rx_pause)
- tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
- else
- tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
-
- if (epause->tx_pause)
- tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
- else
- tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
-
- if (netif_running(dev))
- tg3_setup_flow_control(tp, 0, 0);
- }
- } else {
- int irq_sync = 0;
-
- if (netif_running(dev)) {
- tg3_netif_stop(tp);
- irq_sync = 1;
- }
+ int irq_sync = 0, err = 0;
- tg3_full_lock(tp, irq_sync);
+ if (netif_running(dev)) {
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
- if (epause->autoneg)
- tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
- else
- tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
- if (epause->rx_pause)
- tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
- else
- tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
- if (epause->tx_pause)
- tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
- else
- tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
+ tg3_full_lock(tp, irq_sync);
- if (netif_running(dev)) {
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 1);
- if (!err)
- tg3_netif_start(tp);
- }
+ if (epause->autoneg)
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+ if (epause->rx_pause)
+ tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
+ else
+ tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
+ if (epause->tx_pause)
+ tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
+ else
+ tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
- tg3_full_unlock(tp);
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
+ tg3_full_unlock(tp);
+
return err;
}
return 0;
}
+#ifdef BCM_HAS_SET_TX_CSUM
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM)
ethtool_op_set_tx_ipv6_csum(dev, data);
+#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM)
+ ethtool_op_set_tx_hw_csum(dev, data);
+#else
+ tg3_set_tx_hw_csum(dev, data);
+#endif
else
ethtool_op_set_tx_csum(dev, data);
return 0;
}
+#endif
static int tg3_get_sset_count (struct net_device *dev, int sset)
{
}
}
+#if (LINUX_VERSION_CODE < 0x020618)
+static int tg3_get_stats_count (struct net_device *dev)
+{
+ return tg3_get_sset_count(dev, ETH_SS_STATS);
+}
+
+static int tg3_get_test_count (struct net_device *dev)
+{
+ return tg3_get_sset_count(dev, ETH_SS_TEST);
+}
+#endif
+
static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_TRAFFIC_OVERRIDE);
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 2))
+#else
if (msleep_interruptible(500))
+#endif
break;
}
tw32(MAC_LED_CTRL, tp->led_ctrl);
}
#define TG3_SERDES_TIMEOUT_SEC 2
-#define TG3_COPPER_TIMEOUT_SEC 6
+#define TG3_COPPER_TIMEOUT_SEC 7
static int tg3_test_link(struct tg3 *tp)
{
if (netif_carrier_ok(tp->dev))
return 0;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ))
+#else
if (msleep_interruptible(1000))
+#endif
break;
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
tp->tx_prod++;
num_pkts++;
+ /* Some platforms need to sync memory here */
+ wmb();
+
tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
tp->tx_prod);
tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
if (err)
return TG3_LOOPBACK_FAILED;
+ /* Turn off gphy autopowerdown. */
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 0);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
int i;
u32 status;
err |= TG3_MAC_LOOPBACK_FAILED;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
tw32(TG3_CPMU_CTRL, cpmuctrl);
/* Release the mutex */
tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
}
- if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
err |= TG3_PHY_LOOPBACK_FAILED;
}
+ /* Re-enable gphy autopowerdown. */
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 1);
+
return err;
}
data[1] = 1;
}
if (etest->flags & ETH_TEST_FL_OFFLINE) {
- int err, err2 = 0, irq_sync = 0;
+ int err, irq_sync = 0;
if (netif_running(dev)) {
- tg3_phy_stop(tp);
tg3_netif_stop(tp);
irq_sync = 1;
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- err2 = tg3_restart_hw(tp, 1);
- if (!err2)
+ if (!tg3_restart_hw(tp, 1))
tg3_netif_start(tp);
}
tg3_full_unlock(tp);
-
- if (irq_sync && !err2)
- tg3_phy_start(tp);
}
if (tp->link_config.phy_is_low_power)
tg3_set_power_state(tp, PCI_D3hot);
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+#if (LINUX_VERSION_CODE >= 0x020607)
struct mii_ioctl_data *data = if_mii(ifr);
+#else
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
+#endif
struct tg3 *tp = netdev_priv(dev);
int err;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
- return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
- }
-
switch(cmd) {
case SIOCGMIIPHY:
data->phy_id = PHY_ADDR;
tg3_full_unlock(tp);
}
+
+static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 0);
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
+ tg3_full_unlock(tp);
+
+ if (netif_running(dev))
+ tg3_netif_start(tp);
+}
#endif
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
-static const struct ethtool_ops tg3_ethtool_ops = {
+static struct ethtool_ops tg3_ethtool_ops = {
.get_settings = tg3_get_settings,
.set_settings = tg3_set_settings,
.get_drvinfo = tg3_get_drvinfo,
.set_msglevel = tg3_set_msglevel,
.nway_reset = tg3_nway_reset,
.get_link = ethtool_op_get_link,
+#if (LINUX_VERSION_CODE >= 0x20418)
.get_eeprom_len = tg3_get_eeprom_len,
+#endif
+#ifdef ETHTOOL_GEEPROM
.get_eeprom = tg3_get_eeprom,
+#endif
+#ifdef ETHTOOL_SEEPROM
.set_eeprom = tg3_set_eeprom,
+#endif
.get_ringparam = tg3_get_ringparam,
.set_ringparam = tg3_set_ringparam,
.get_pauseparam = tg3_get_pauseparam,
.set_pauseparam = tg3_set_pauseparam,
.get_rx_csum = tg3_get_rx_csum,
.set_rx_csum = tg3_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+#ifdef BCM_HAS_SET_TX_CSUM
.set_tx_csum = tg3_set_tx_csum,
+#endif
+ .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+#if TG3_TSO_SUPPORT != 0
+ .get_tso = ethtool_op_get_tso,
.set_tso = tg3_set_tso,
+#endif
+#if (LINUX_VERSION_CODE < 0x20618)
+ .self_test_count = tg3_get_test_count,
+#endif
.self_test = tg3_self_test,
.get_strings = tg3_get_strings,
.phys_id = tg3_phys_id,
+#if (LINUX_VERSION_CODE < 0x20618)
+ .get_stats_count = tg3_get_stats_count,
+#endif
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
+#if (LINUX_VERSION_CODE >= 0x20618)
.get_sset_count = tg3_get_sset_count,
+#endif
+#if defined(ETHTOOL_GPERMADDR) && (LINUX_VERSION_CODE < 0x020617)
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
(EEPROM_DEFAULT_CLOCK_PERIOD <<
EEPROM_ADDR_CLKPERD_SHIFT)));
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
/* Enable seeprom accesses. */
tw32_f(GRC_LOCAL_CTRL,
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
tg3_get_5787_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tg3_get_5761_nvram_info(tp);
if (tmp & EEPROM_ADDR_COMPLETE)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
if (!(tmp & EEPROM_ADDR_COMPLETE))
return -EBUSY;
if (val & EEPROM_ADDR_COMPLETE)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
if (!(val & EEPROM_ADDR_COMPLETE)) {
rc = -EBUSY;
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
/* Make sure register accesses (indirect or otherwise)
* will function correctly.
if (val & VCPU_CFGSHDW_ASPM_DBNC)
tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
- (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+ (val & VCPU_CFGSHDW_WOL_MAGPKT))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(&tp->pdev->dev, true);
- }
return;
}
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg, led_cfg;
- u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
int eeprom_phy_serdes = 0;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
(ver > 0) && (ver < 0x100))
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
-
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
eeprom_phy_serdes = 1;
!(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
- if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
- (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
+ nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(&tp->pdev->dev, true);
- }
if (cfg2 & (1 << 17))
tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
if (cfg2 & (1 << 18))
tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX &&
+ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+ tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
+
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
u32 cfg3;
if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
}
-
- if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
- if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
- if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
}
}
u32 hw_phy_id, hw_phy_id_masked;
int err;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
- return tg3_phy_init(tp);
-
/* Reading the PHY ID register can conflict with ASF
* firwmare access to the PHY hardware.
*/
PCI_VPD_ADDR, &tmp16);
if (tmp16 & 0x8000)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#else
msleep(1);
+#endif
}
if (!(tmp16 & 0x8000))
goto out_not_found;
return 1;
}
+static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+ u32 offset, major, minor, build;
+
+ tp->fw_ver[0] = 's';
+ tp->fw_ver[1] = 'b';
+ tp->fw_ver[2] = '\0';
+
+ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+ return;
+
+ switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (tg3_nvram_read_swab(tp, offset, &val))
+ return;
+
+ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+ TG3_EEPROM_SB_EDH_BLD_SHFT;
+ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+ TG3_EEPROM_SB_EDH_MAJ_SHFT;
+ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+ if (minor > 99 || build > 26)
+ return;
+
+ snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+
+ if (build > 0) {
+ tp->fw_ver[8] = 'a' + build - 1;
+ tp->fw_ver[9] = '\0';
+ }
+}
+
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val, offset, start;
if (tg3_nvram_read_swab(tp, 0, &val))
return;
- if (val != TG3_EEPROM_MAGIC)
+ if (val != TG3_EEPROM_MAGIC) {
+ if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+ tg3_read_sb_ver(tp, val);
+
return;
+ }
if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
tg3_nvram_read_swab(tp, 0x4, &start))
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
PCI_DEVICE_ID_VIA_8385_0) },
{ },
};
+#endif
u32 misc_ctrl_reg;
u32 cacheline_sz_reg;
u32 pci_state_reg, grc_misc_cfg;
u32 val;
u16 pci_cmd;
- int err, pcie_cap;
+ int err;
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
&prod_id_asic_rev);
- tp->pci_chip_rev_id = prod_id_asic_rev;
+ tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
}
/* Wrong chip ID in 5752 A0. This code can be removed later
continue;
}
if (pci_id->rev != PCI_ANY_ID) {
- if (bridge->revision > pci_id->rev)
+ u8 rev;
+
+ pci_read_config_byte(bridge, PCI_REVISION_ID,
+ &rev);
+ if (rev > pci_id->rev)
continue;
}
if (bridge->subordinate &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
}
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS) ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
- pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap != 0) {
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+
+ tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
+ if (tp->pcie_cap != 0) {
+ u16 lnkctl;
+
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
pcie_set_readrq(tp->pdev, 4096);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u16 lnkctl;
-
- pci_read_config_word(tp->pdev,
- pcie_cap + PCI_EXP_LNKCTL,
- &lnkctl);
- if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
}
+ } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+ if (!tp->pcix_cap) {
+ printk(KERN_ERR PFX "Cannot find PCI-X "
+ "capability, aborting.\n");
+ return -EIO;
+ }
+
+ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+ tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
}
/* If we have an AMD 762 or VIA K8T800 chipset, write
* every mailbox register write to force the writes to be
* posted to the chip in order.
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if ((pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
+#else
if (pci_dev_present(write_reorder_chipsets) &&
+#endif
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
cacheline_sz_reg);
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
- tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
- if (!tp->pcix_cap) {
- printk(KERN_ERR PFX "Cannot find PCI-X "
- "capability, aborting.\n");
- return -EIO;
- }
- }
-
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
- &pci_state_reg);
-
- if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
- tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ /* 5700 BX chips need to have their TX producer index
+ * mailboxes written twice to workaround a bug.
+ */
+ tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
- /* If this is a 5700 BX chipset, and we are in PCI-X
- * mode, enable register write workaround.
+ /* If we are in PCI-X mode, enable register write workaround.
*
* The workaround is to use indirect register accesses
* for all chip writes not to mailbox registers.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
u32 pm_reg;
tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
}
}
- /* 5700 BX chips need to have their TX producer index mailboxes
- * written twice to workaround a bug.
- */
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
- tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
-
if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
- tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
- }
-
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
* GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
}
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
-
- err = tg3_mdio_init(tp);
- if (err)
- return err;
+ /* Initialize MAC MI mode, polling disabled. */
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
}
/* Preserve the APE MAC_MODE bits */
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->mac_mode = tr32(MAC_MODE) |
MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
pci_name(tp->pdev), err);
/* ... but do not return immediately ... */
- tg3_mdio_fini(tp);
}
tg3_read_partno(tp);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->dev->hard_start_xmit = tg3_start_xmit;
else
tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
- tp->rx_offset = 2;
+ tp->rx_offset = NET_IP_ALIGN + VLAN_HLEN;
+ tp->rx_copy_thresh = RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
#endif
return -EINVAL;
}
+#ifdef ETHTOOL_GPERMADDR
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+#endif
return 0;
}
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id dma_wait_state_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
{ },
};
+#endif
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
* DMA bug without failing the test.
*/
- if (pci_dev_present(dma_wait_state_chipsets)) {
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if (pci_find_device(PCI_VENDOR_ID_APPLE,
+ PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
+#else
+ if (pci_dev_present(dma_wait_state_chipsets))
+#endif
+ {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
}
case PHY_ID_BCM5756: return "5722/5756";
case PHY_ID_BCM5906: return "5906";
case PHY_ID_BCM5761: return "5761";
- case PHY_ID_BCM57780: return "57780";
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
const struct pci_device_id *ent)
{
static int tg3_version_printed = 0;
- resource_size_t tg3reg_base;
unsigned long tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
goto err_out_free_res;
}
- tg3reg_base = pci_resource_start(pdev, 0);
- tg3reg_len = pci_resource_len(pdev, 0);
-
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
goto err_out_free_res;
}
+ SET_MODULE_OWNER(dev);
+#if (LINUX_VERSION_CODE >= 0x20419)
SET_NETDEV_DEV(dev, &pdev->dev);
+#endif
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register;
+ dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
#endif
tp = netdev_priv(dev);
#endif
spin_lock_init(&tp->lock);
spin_lock_init(&tp->indirect_lock);
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
INIT_WORK(&tp->reset_task, tg3_reset_task);
+#else
+ INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+#endif
- tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
+ dev->mem_start = pci_resource_start(pdev, 0);
+ tg3reg_len = pci_resource_len(pdev, 0);
+ dev->mem_end = dev->mem_start + tg3reg_len;
+
+ tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
if (!tp->regs) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
dev->set_mac_address = tg3_set_mac_addr;
dev->do_ioctl = tg3_ioctl;
dev->tx_timeout = tg3_tx_timeout;
+#ifdef TG3_NAPI
netif_napi_add(dev, &tp->napi, tg3_poll, 64);
+#else
+ dev->poll = tg3_poll;
+ dev->weight = 64;
+#endif
dev->ethtool_ops = &tg3_ethtool_ops;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq;
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
dev->poll_controller = tg3_poll_controller;
#endif
tg3_init_bufmgr_config(tp);
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
dev->features |= NETIF_F_TSO6;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))
dev->features |= NETIF_F_TSO_ECN;
}
+#endif
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ resource_size_t tg3reg_base;
+
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address for APE, aborting.\n");
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+#ifdef NETIF_F_IPV6_CSUM
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_IPV6_CSUM;
+#else
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- dev->features |= NETIF_F_IPV6_CSUM;
-
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features |= NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_SG;
+#endif
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
}
err_out_free_dev:
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
err_out_free_res:
pci_release_regions(pdev);
if (dev) {
struct tg3 *tp = netdev_priv(dev);
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
-
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- tg3_phy_fini(tp);
- tg3_mdio_fini(tp);
- }
-
+#endif
unregister_netdev(dev);
if (tp->aperegs) {
iounmap(tp->aperegs);
iounmap(tp->regs);
tp->regs = NULL;
}
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
+#if (LINUX_VERSION_CODE < 0x2060b)
+static int tg3_suspend(struct pci_dev *pdev, u32 state)
+#else
static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- pci_power_t target_state;
int err;
/* PCI register 4 needs to be saved whether netif_running() or not.
* MSI address and data need to be saved if using MSI and
* netif_running().
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_save_state(pdev, tp->pci_cfg_state);
+#else
pci_save_state(pdev);
+#endif
if (!netif_running(dev))
return 0;
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
- tg3_phy_stop(tp);
+#endif
tg3_netif_stop(tp);
del_timer_sync(&tp->timer);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
- target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
-
- err = tg3_set_power_state(tp, target_state);
+#if (LINUX_VERSION_CODE < 0x2060b)
+ err = tg3_set_power_state(tp, state);
+#else
+ err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
+#endif
if (err) {
- int err2;
-
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- err2 = tg3_restart_hw(tp, 1);
- if (err2)
+ if (tg3_restart_hw(tp, 1))
goto out;
tp->timer.expires = jiffies + tp->timer_offset;
out:
tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
}
return err;
struct tg3 *tp = netdev_priv(dev);
int err;
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_restore_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_restore_state(tp->pdev);
+#endif
if (!netif_running(dev))
return 0;
if (err)
return err;
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
+ /* Hardware bug - MSI won't work if INTX disabled. */
+ if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
+ (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
+#if (LINUX_VERSION_CODE < 0x2060e)
+ tg3_enable_intx(tp->pdev);
+#else
+ pci_intx(tp->pdev, 1);
+#endif
+#endif
+
netif_device_attach(dev);
tg3_full_lock(tp, 0);
out:
tg3_full_unlock(tp);
- if (!err)
- tg3_phy_start(tp);
-
return err;
}
static int __init tg3_init(void)
{
+#if (LINUX_VERSION_CODE < 0x020613)
+ return pci_module_init(&tg3_driver);
+#else
return pci_register_driver(&tg3_driver);
+#endif
}
static void __exit tg3_cleanup(void)