* notice is accompanying it.
*/
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < 0x020612)
+#include <linux/config.h>
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020500)
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#endif
#include <linux/module.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/moduleparam.h>
+#endif
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#if (LINUX_VERSION_CODE >= 0x20600)
#include <linux/workqueue.h>
+#endif
#include <linux/prefetch.h>
+#if (LINUX_VERSION_CODE >= 0x020600)
#include <linux/dma-mapping.h>
+#endif
+#include <linux/bitops.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#include "tg3_compat.h"
-
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
#include <asm/prom.h>
#define TG3_VLAN_TAG_USED 0
#endif
+#ifdef NETIF_F_TSO
#define TG3_TSO_SUPPORT 1
+#else
+#define TG3_TSO_SUPPORT 0
+#endif
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.86"
-#define DRV_MODULE_RELDATE "November 9, 2007"
+#define DRV_MODULE_VERSION "3.92l"
+#define DRV_MODULE_RELDATE "September 26, 2008"
+
+/* The driver optimizes the hot rx code path by merging a mandatory rx double
+ * copy check with the normal double copy rx threshold check. On those
+ * architectures where the mandatory double copy is not needed, we can optimize
+ * further by saving a device structure dereference and hardcoding the double
+ * copy threshold in place.
+ */
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
-#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
+#define RX_PKT_BUF_SZ (1536 + 64)
+#define RX_JUMBO_PKT_BUF_SZ (9046 + 64)
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
+#define TG3_RAW_IP_ALIGN 2
+
/* number of ETHTOOL_GSTATS u64's */
#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
MODULE_VERSION(DRV_MODULE_VERSION);
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+#if (LINUX_VERSION_CODE >= 0x20600)
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+#endif
static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761SE)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
return 0;
switch (locknum) {
+ case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
default:
return;
switch (locknum) {
+ case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
default:
static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
+#ifdef TG3_NAPI
+ napi_disable(&tp->napi);
+#else
netif_poll_disable(tp->dev);
+#endif
netif_tx_disable(tp->dev);
}
* so long as all callers are assured to have free tx slots
* (such as after tg3_init_hw)
*/
+#ifdef TG3_NAPI
+ napi_enable(&tp->napi);
+#else
netif_poll_enable(tp->dev);
+#endif
tp->hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
return ret;
}
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+ u32 phy_control;
+ int limit, err;
+
+ /* OK, reset it, and poll the BMCR_RESET bit until it
+ * clears or we time out.
+ */
+ phy_control = BMCR_RESET;
+ err = tg3_writephy(tp, MII_BMCR, phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ limit = 5000;
+ while (limit--) {
+ err = tg3_readphy(tp, MII_BMCR, &phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ if ((phy_control & BMCR_RESET) == 0) {
+ udelay(40);
+ break;
+ }
+ udelay(10);
+ }
+ if (limit <= 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+ u32 val;
+
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= GRC_RX_CPU_DRIVER_EVENT;
+ tw32_f(GRC_RX_CPU_EVENT, val);
+
+ tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
+static void tg3_wait_for_event_ack(struct tg3 *tp)
+{
+ int i;
+ unsigned int delay_cnt;
+ long time_remain;
+
+ /* If enough time has passed, no wait is necessary. */
+ time_remain = (long)(tp->last_event_jiffies + 1 +
+ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+ (long)jiffies;
+ if (time_remain < 0)
+ return;
+
+ /* Check if we can shorten the wait time. */
+ delay_cnt = jiffies_to_usecs(time_remain);
+ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+ delay_cnt = (delay_cnt >> 3) + 1;
+
+ for (i = 0; i < delay_cnt; i++) {
+ if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
+ break;
+ udelay(8);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+ u32 reg;
+ u32 val;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ return;
+
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_BMCR, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_BMSR, ®))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_ADVERTISE, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_LPA, ®))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
+
+ val = 0;
+ if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
+ if (!tg3_readphy(tp, MII_CTRL1000, ®))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_STAT1000, ®))
+ val |= (reg & 0xffff);
+ }
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
+
+ if (!tg3_readphy(tp, MII_PHYADDR, ®))
+ val = reg << 16;
+ else
+ val = 0;
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
+
+ tg3_generate_fw_event(tp);
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+ if (!netif_carrier_ok(tp->dev)) {
+ if (netif_msg_link(tp))
+ printk(KERN_INFO PFX "%s: Link is down.\n",
+ tp->dev->name);
+ tg3_ump_link_report(tp);
+ } else if (netif_msg_link(tp)) {
+ printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
+ tp->dev->name,
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ printk(KERN_INFO PFX
+ "%s: Flow control is %s for TX and %s for RX.\n",
+ tp->dev->name,
+ (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
+ "on" : "off");
+ tg3_ump_link_report(tp);
+ }
+}
+
+static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
+ miireg = ADVERTISE_PAUSE_CAP;
+ else if (flow_ctrl & TG3_FLOW_CTRL_TX)
+ miireg = ADVERTISE_PAUSE_ASYM;
+ else if (flow_ctrl & TG3_FLOW_CTRL_RX)
+ miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
+ miireg = ADVERTISE_1000XPAUSE;
+ else if (flow_ctrl & TG3_FLOW_CTRL_TX)
+ miireg = ADVERTISE_1000XPSE_ASYM;
+ else if (flow_ctrl & TG3_FLOW_CTRL_RX)
+ miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & ADVERTISE_PAUSE_CAP) {
+ if (lcladv & ADVERTISE_PAUSE_ASYM) {
+ if (rmtadv & LPA_PAUSE_CAP)
+ cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
+ else if (rmtadv & LPA_PAUSE_ASYM)
+ cap = TG3_FLOW_CTRL_RX;
+ } else {
+ if (rmtadv & LPA_PAUSE_CAP)
+ cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
+ }
+ } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
+ if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
+ cap = TG3_FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
+static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & ADVERTISE_1000XPAUSE) {
+ if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
+ else if (rmtadv & LPA_1000XPAUSE_ASYM)
+ cap = TG3_FLOW_CTRL_RX;
+ } else {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
+ }
+ } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ cap = TG3_FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
+{
+ u8 autoneg;
+ u8 flowctrl = 0;
+ u32 old_rx_mode = tp->rx_mode;
+ u32 old_tx_mode = tp->tx_mode;
+
+ autoneg = tp->link_config.autoneg;
+
+ if (autoneg == AUTONEG_ENABLE &&
+ (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
+ if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+ flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
+ else
+ flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
+ } else
+ flowctrl = tp->link_config.flowctrl;
+
+ tp->link_config.active_flowctrl = flowctrl;
+
+ if (flowctrl & TG3_FLOW_CTRL_RX)
+ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_rx_mode != tp->rx_mode)
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+
+ if (flowctrl & TG3_FLOW_CTRL_TX)
+ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_tx_mode != tp->tx_mode)
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+}
+
+static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+}
+
+static void tg3_phy_toggle_apd(struct tg3 *tp, int enable)
+{
+ u32 reg;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ return;
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_SCR5_SEL |
+ MII_TG3_MISC_SHDW_SCR5_LPED |
+ MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+ MII_TG3_MISC_SHDW_SCR5_SDTL |
+ MII_TG3_MISC_SHDW_SCR5_C125OE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_APD_SEL |
+ MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ if (enable)
+ reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+}
+
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
u32 phy;
(val | (1 << 15) | (1 << 4)));
}
-static int tg3_bmcr_reset(struct tg3 *tp)
+static void tg3_phy_apply_otp(struct tg3 *tp)
{
- u32 phy_control;
- int limit, err;
+ u32 otp, phy;
- /* OK, reset it, and poll the BMCR_RESET bit until it
- * clears or we time out.
- */
- phy_control = BMCR_RESET;
- err = tg3_writephy(tp, MII_BMCR, phy_control);
- if (err != 0)
- return -EBUSY;
+ if (!tp->phy_otp)
+ return;
- limit = 5000;
- while (limit--) {
- err = tg3_readphy(tp, MII_BMCR, &phy_control);
- if (err != 0)
- return -EBUSY;
+ otp = tp->phy_otp;
- if ((phy_control & BMCR_RESET) == 0) {
- udelay(40);
- break;
- }
- udelay(10);
- }
- if (limit <= 0)
- return -EBUSY;
+ /* Enable SM_DSP clock and tx 6dB coding. */
+ phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
- return 0;
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+ phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
+
+ phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
+ ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
+
+ phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
+ phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
+
+ phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
+
+ phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
+
+ phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+ /* Turn off SM_DSP clock. */
+ phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
}
static int tg3_wait_macro_done(struct tg3 *tp)
return err;
}
-static void tg3_link_report(struct tg3 *);
-
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
static int tg3_phy_reset(struct tg3 *tp)
{
+ u32 cpmuctrl;
u32 phy_status;
int err;
goto out;
}
+ cpmuctrl = 0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ cpmuctrl = tr32(TG3_CPMU_CTRL);
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
+ tw32(TG3_CPMU_CTRL,
+ cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
+ }
+
err = tg3_bmcr_reset(tp);
if (err)
return err;
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
+ u32 phy;
+
+ phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
+
+ tw32(TG3_CPMU_CTRL, cpmuctrl);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
u32 val;
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
udelay(40);
tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
}
-
- /* Disable GPHY autopowerdown. */
- tg3_writephy(tp, MII_TG3_MISC_SHDW,
- MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_APD_SEL |
- MII_TG3_MISC_SHDW_APD_WKTM_84MS);
}
+ tg3_phy_apply_otp(tp);
+
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 1);
+ else
+ tg3_phy_toggle_apd(tp, 0);
+
out:
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1),
100);
+ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
+ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ tp->grc_local_ctrl;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
+
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
} else {
u32 no_gpio2;
u32 grc_local_ctrl = 0;
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
"requested.\n",
tp->dev->name, state);
return -EINVAL;
- };
+ }
+
+ /* Restore the CLKREQ setting. */
+ if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ u16 lnkctl;
+
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &lnkctl);
+ lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ lnkctl);
+ }
power_control |= PCI_PM_CTRL_PME_ENABLE;
tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
}
if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
tw32(MAC_LED_CTRL, tp->led_ctrl);
if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) {
mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+ if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
+ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ mac_mode |= tp->mac_mode &
+ (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
+ if (mac_mode & MAC_MODE_APE_TX_EN)
+ mac_mode |= MAC_MODE_TDE_ENABLE;
+ }
tw32_f(MAC_MODE, mac_mode);
udelay(100);
return 0;
}
-static void tg3_link_report(struct tg3 *tp)
-{
- if (!netif_carrier_ok(tp->dev)) {
- if (netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: Link is down.\n",
- tp->dev->name);
- } else if (netif_msg_link(tp)) {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- tp->dev->name,
- (tp->link_config.active_speed == SPEED_1000 ?
- 1000 :
- (tp->link_config.active_speed == SPEED_100 ?
- 100 : 10)),
- (tp->link_config.active_duplex == DUPLEX_FULL ?
- "full" : "half"));
-
- printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
- "%s for RX.\n",
- tp->dev->name,
- (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
- (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
- }
-}
-
-static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
-{
- u32 new_tg3_flags = 0;
- u32 old_rx_mode = tp->rx_mode;
- u32 old_tx_mode = tp->tx_mode;
-
- if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
-
- /* Convert 1000BaseX flow control bits to 1000BaseT
- * bits before resolving flow control.
- */
- if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
- local_adv &= ~(ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
-
- if (local_adv & ADVERTISE_1000XPAUSE)
- local_adv |= ADVERTISE_PAUSE_CAP;
- if (local_adv & ADVERTISE_1000XPSE_ASYM)
- local_adv |= ADVERTISE_PAUSE_ASYM;
- if (remote_adv & LPA_1000XPAUSE)
- remote_adv |= LPA_PAUSE_CAP;
- if (remote_adv & LPA_1000XPAUSE_ASYM)
- remote_adv |= LPA_PAUSE_ASYM;
- }
-
- if (local_adv & ADVERTISE_PAUSE_CAP) {
- if (local_adv & ADVERTISE_PAUSE_ASYM) {
- if (remote_adv & LPA_PAUSE_CAP)
- new_tg3_flags |=
- (TG3_FLAG_RX_PAUSE |
- TG3_FLAG_TX_PAUSE);
- else if (remote_adv & LPA_PAUSE_ASYM)
- new_tg3_flags |=
- (TG3_FLAG_RX_PAUSE);
- } else {
- if (remote_adv & LPA_PAUSE_CAP)
- new_tg3_flags |=
- (TG3_FLAG_RX_PAUSE |
- TG3_FLAG_TX_PAUSE);
- }
- } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
- if ((remote_adv & LPA_PAUSE_CAP) &&
- (remote_adv & LPA_PAUSE_ASYM))
- new_tg3_flags |= TG3_FLAG_TX_PAUSE;
- }
-
- tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
- tp->tg3_flags |= new_tg3_flags;
- } else {
- new_tg3_flags = tp->tg3_flags;
- }
-
- if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
- tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
- else
- tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
-
- if (old_rx_mode != tp->rx_mode) {
- tw32_f(MAC_RX_MODE, tp->rx_mode);
- }
-
- if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
- tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
- else
- tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
-
- if (old_tx_mode != tp->tx_mode) {
- tw32_f(MAC_TX_MODE, tp->tx_mode);
- }
-}
-
static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
{
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
*speed = SPEED_INVALID;
*duplex = DUPLEX_INVALID;
break;
- };
+ }
}
static void tg3_phy_copper_begin(struct tg3 *tp)
~(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
- new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ new_adv = ADVERTISE_CSMA;
if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
new_adv |= ADVERTISE_10HALF;
if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
new_adv |= ADVERTISE_100HALF;
if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
new_adv |= ADVERTISE_100FULL;
+
+ new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+
tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (tp->link_config.advertising &
tg3_writephy(tp, MII_TG3_CTRL, 0);
}
} else {
+ new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+ new_adv |= ADVERTISE_CSMA;
+
/* Asking for a specific link mode. */
if (tp->link_config.speed == SPEED_1000) {
- new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (tp->link_config.duplex == DUPLEX_FULL)
tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
new_adv |= (MII_TG3_CTRL_AS_MASTER |
MII_TG3_CTRL_ENABLE_AS_MASTER);
- tg3_writephy(tp, MII_TG3_CTRL, new_adv);
} else {
- tg3_writephy(tp, MII_TG3_CTRL, 0);
-
- new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
if (tp->link_config.speed == SPEED_100) {
if (tp->link_config.duplex == DUPLEX_FULL)
new_adv |= ADVERTISE_100FULL;
new_adv |= ADVERTISE_10HALF;
}
tg3_writephy(tp, MII_ADVERTISE, new_adv);
+
+ new_adv = 0;
}
+
+ tg3_writephy(tp, MII_TG3_CTRL, new_adv);
}
if (tp->link_config.autoneg == AUTONEG_DISABLE &&
case SPEED_1000:
bmcr |= TG3_BMCR_SPEED1000;
break;
- };
+ }
if (tp->link_config.duplex == DUPLEX_FULL)
bmcr |= BMCR_FULLDPLX;
return 1;
}
+static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+{
+ u32 curadv, reqadv;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return 1;
+
+ curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ if (curadv != reqadv)
+ return 0;
+
+ if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
+ tg3_readphy(tp, MII_LPA, rmtadv);
+ } else {
+ /* Reprogram the advertisement register, even if it
+ * does not affect the current link. If the link
+ * gets renegotiated in the future, we can save an
+ * additional renegotiation cycle by advertising
+ * it correctly in the first place.
+ */
+ if (curadv != reqadv) {
+ *lcladv &= ~(ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
+ }
+ }
+
+ return 1;
+}
+
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
int current_link_up;
u32 bmsr, dummy;
+ u32 lcl_adv, rmt_adv;
u16 current_speed;
u8 current_duplex;
int i, err;
MAC_STATUS_LNKSTATE_CHANGED));
udelay(40);
- tp->mi_mode = MAC_MI_MODE_BASE;
- tw32_f(MAC_MI_MODE, tp->mi_mode);
- udelay(80);
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
udelay(10);
}
- if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- if (bmcr & BMCR_ANENABLE) {
- current_link_up = 1;
+ lcl_adv = 0;
+ rmt_adv = 0;
- /* Force autoneg restart if we are exiting
- * low power mode.
- */
- if (!tg3_copper_is_advertising_all(tp,
- tp->link_config.advertising))
- current_link_up = 0;
- } else {
- current_link_up = 0;
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ if ((bmcr & BMCR_ANENABLE) &&
+ tg3_copper_is_advertising_all(tp,
+ tp->link_config.advertising)) {
+ if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
+ &rmt_adv))
+ current_link_up = 1;
}
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
- tp->link_config.duplex == current_duplex) {
+ tp->link_config.duplex == current_duplex &&
+ tp->link_config.flowctrl ==
+ tp->link_config.active_flowctrl) {
current_link_up = 1;
- } else {
- current_link_up = 0;
}
}
- tp->link_config.active_speed = current_speed;
- tp->link_config.active_duplex = current_duplex;
+ if (current_link_up == 1 &&
+ tp->link_config.active_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
}
- if (current_link_up == 1 &&
- (tp->link_config.active_duplex == DUPLEX_FULL) &&
- (tp->link_config.autoneg == AUTONEG_ENABLE)) {
- u32 local_adv, remote_adv;
-
- if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
- local_adv = 0;
- local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-
- if (tg3_readphy(tp, MII_LPA, &remote_adv))
- remote_adv = 0;
-
- remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
-
- /* If we are not advertising full pause capability,
- * something is wrong. Bring the link down and reconfigure.
- */
- if (local_adv != ADVERTISE_PAUSE_CAP) {
- current_link_up = 0;
- } else {
- tg3_setup_flow_control(tp, local_adv, remote_adv);
- }
- }
relink:
if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
u32 tmp;
NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
}
+ /* Prevent send BD corruption. */
+ if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ u16 oldlnkctl, newlnkctl;
+
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &oldlnkctl);
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ else
+ newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
+ if (newlnkctl != oldlnkctl)
+ pci_write_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ newlnkctl);
+ }
+
if (current_link_up != netif_carrier_ok(tp->dev)) {
if (current_link_up)
netif_carrier_on(tp->dev);
static int tg3_fiber_aneg_smachine(struct tg3 *tp,
struct tg3_fiber_aneginfo *ap)
{
+ u16 flowctrl;
unsigned long delta;
u32 rx_cfg_reg;
int ret;
case ANEG_STATE_ABILITY_DETECT_INIT:
ap->flags &= ~(MR_TOGGLE_TX);
- ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
+ ap->txconfig = ANEG_CFG_FD;
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ ap->txconfig |= ANEG_CFG_PS1;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ ap->txconfig |= ANEG_CFG_PS2;
tw32(MAC_TX_AUTO_NEG, ap->txconfig);
tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
tw32_f(MAC_MODE, tp->mac_mode);
default:
ret = ANEG_FAILED;
break;
- };
+ }
return ret;
}
-static int fiber_autoneg(struct tg3 *tp, u32 *flags)
+static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
{
int res = 0;
struct tg3_fiber_aneginfo aninfo;
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
- *flags = aninfo.flags;
+ *txflags = aninfo.txconfig;
+ *rxflags = aninfo.flags;
if (status == ANEG_DONE &&
(aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
{
+ u16 flowctrl;
u32 sg_dig_ctrl, sg_dig_status;
u32 serdes_cfg, expected_sg_dig_ctrl;
int workaround, port_a;
sg_dig_ctrl = tr32(SG_DIG_CTRL);
if (tp->link_config.autoneg != AUTONEG_ENABLE) {
- if (sg_dig_ctrl & (1 << 31)) {
+ if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
if (workaround) {
u32 val = serdes_cfg;
val |= 0x4010000;
tw32_f(MAC_SERDES_CFG, val);
}
- tw32_f(SG_DIG_CTRL, 0x01388400);
+
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
}
if (mac_status & MAC_STATUS_PCS_SYNCED) {
tg3_setup_flow_control(tp, 0, 0);
}
/* Want auto-negotiation. */
- expected_sg_dig_ctrl = 0x81388400;
-
- /* Pause capability */
- expected_sg_dig_ctrl |= (1 << 11);
+ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
- /* Asymettric pause */
- expected_sg_dig_ctrl |= (1 << 12);
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
if (sg_dig_ctrl != expected_sg_dig_ctrl) {
if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
restart_autoneg:
if (workaround)
tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
- tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
udelay(5);
tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
sg_dig_status = tr32(SG_DIG_STATUS);
mac_status = tr32(MAC_STATUS);
- if ((sg_dig_status & (1 << 1)) &&
+ if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
(mac_status & MAC_STATUS_PCS_SYNCED)) {
- u32 local_adv, remote_adv;
+ u32 local_adv = 0, remote_adv = 0;
+
+ if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
- local_adv = ADVERTISE_PAUSE_CAP;
- remote_adv = 0;
- if (sg_dig_status & (1 << 19))
- remote_adv |= LPA_PAUSE_CAP;
- if (sg_dig_status & (1 << 20))
- remote_adv |= LPA_PAUSE_ASYM;
+ if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
tp->serdes_counter = 0;
tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
- } else if (!(sg_dig_status & (1 << 1))) {
+ } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
if (tp->serdes_counter)
tp->serdes_counter--;
else {
tw32_f(MAC_SERDES_CFG, val);
}
- tw32_f(SG_DIG_CTRL, 0x01388400);
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
udelay(40);
/* Link parallel detection - link is up */
goto out;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 flags;
+ u32 txflags, rxflags;
int i;
- if (fiber_autoneg(tp, &flags)) {
- u32 local_adv, remote_adv;
+ if (fiber_autoneg(tp, &txflags, &rxflags)) {
+ u32 local_adv = 0, remote_adv = 0;
- local_adv = ADVERTISE_PAUSE_CAP;
- remote_adv = 0;
- if (flags & MR_LP_ADV_SYM_PAUSE)
- remote_adv |= LPA_PAUSE_CAP;
- if (flags & MR_LP_ADV_ASYM_PAUSE)
- remote_adv |= LPA_PAUSE_ASYM;
+ if (txflags & ANEG_CFG_PS1)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (txflags & ANEG_CFG_PS2)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+ if (rxflags & MR_LP_ADV_SYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (rxflags & MR_LP_ADV_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
tg3_setup_flow_control(tp, local_adv, remote_adv);
!(mac_status & MAC_STATUS_RCVD_CFG))
current_link_up = 1;
} else {
+ tg3_setup_flow_control(tp, 0, 0);
+
/* Forcing 1000FD link up. */
current_link_up = 1;
int current_link_up;
int i;
- orig_pause_cfg =
- (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
- TG3_FLAG_TX_PAUSE));
+ orig_pause_cfg = tp->link_config.active_flowctrl;
orig_active_speed = tp->link_config.active_speed;
orig_active_duplex = tp->link_config.active_duplex;
netif_carrier_off(tp->dev);
tg3_link_report(tp);
} else {
- u32 now_pause_cfg =
- tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
- TG3_FLAG_TX_PAUSE);
+ u32 now_pause_cfg = tp->link_config.active_flowctrl;
if (orig_pause_cfg != now_pause_cfg ||
orig_active_speed != tp->link_config.active_speed ||
orig_active_duplex != tp->link_config.active_duplex)
u32 bmsr, bmcr;
u16 current_speed;
u8 current_duplex;
+ u32 local_adv, remote_adv;
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
ADVERTISE_1000XPSE_ASYM |
ADVERTISE_SLCT);
- /* Always advertise symmetric PAUSE just like copper */
- new_adv |= ADVERTISE_1000XPAUSE;
+ new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
new_adv |= ADVERTISE_1000XHALF;
else
current_duplex = DUPLEX_HALF;
+ local_adv = 0;
+ remote_adv = 0;
+
if (bmcr & BMCR_ANENABLE) {
- u32 local_adv, remote_adv, common;
+ u32 common;
err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
err |= tg3_readphy(tp, MII_LPA, &remote_adv);
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
-
- tg3_setup_flow_control(tp, local_adv,
- remote_adv);
}
else
current_link_up = 0;
}
}
+ if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
err = tg3_setup_copper_phy(tp, force_reset);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
default:
return -EINVAL;
- };
+ }
/* Do not overwrite any of the map or rp information
* until we are sure we can commit to a new buffer.
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size);
+ skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, tp->rx_offset);
mapping = pci_map_single(tp->pdev, skb->data,
- skb_size - tp->rx_offset,
+ skb_size,
PCI_DMA_FROMDEVICE);
map->skb = skb;
default:
return;
- };
+ }
dest_map->skb = src_map->skb;
pci_unmap_addr_set(dest_map, mapping,
src_map->skb = NULL;
}
-#if TG3_VLAN_TAG_USED
-static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
-{
- return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
-}
-#endif
-
/* The RX ring scheme is composed of multiple rings which post fresh
* buffers to the chip, and one special ring the chip uses to report
* status back to the host.
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
- if (len > RX_COPY_THRESHOLD
- && tp->rx_offset == 2
- /* rx_offset != 2 iff this is a 5701 card running
- * in PCI-X mode [see tg3_get_invariants()] */
- ) {
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, opaque_key,
goto drop_it;
pci_unmap_single(tp->pdev, dma_addr,
- skb_size - tp->rx_offset,
+ skb_size,
PCI_DMA_FROMDEVICE);
skb_put(skb, len);
tg3_recycle_rx(tp, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len + 2);
+ copy_skb = netdev_alloc_skb(tp->dev, len + TG3_RAW_IP_ALIGN + VLAN_HLEN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, 2);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, tp->dev);
+
+ if ((len > (tp->dev->mtu + ETH_HLEN)) &&
+ (ntohs(skb->protocol) != ETH_P_8021Q)) {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- tg3_vlan_rx(tp, skb,
- desc->err_vlan & RXD_VLAN_MASK);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN, ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+ if (hw_vlan)
+ vlan_hwaccel_receive_skb(skb, tp->vlgrp, vtag);
+ else
netif_receive_skb(skb);
tp->dev->last_rx = jiffies;
tp->rx_rcb_ptr = sw_idx;
tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Refill RX ring(s). */
if (work_mask & RXD_OPAQUE_RING_STD) {
sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
return received;
}
+#ifdef TG3_NAPI
+
+static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
+{
+ struct tg3_hw_status *sblk = tp->hw_status;
+
+ /* handle link change and other phy events */
+ if (!(tp->tg3_flags &
+ (TG3_FLAG_USE_LINKCHG_REG |
+ TG3_FLAG_POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG) {
+ sblk->status = SD_STATUS_UPDATED |
+ (sblk->status & ~SD_STATUS_LINK_CHG);
+ spin_lock(&tp->lock);
+ tg3_setup_phy(tp, 0);
+ spin_unlock(&tp->lock);
+ }
+ }
+
+ /* run TX completion thread */
+ if (sblk->idx[0].tx_consumer != tp->tx_cons) {
+ tg3_tx(tp);
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ return work_done;
+ }
+
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with tg3->napi.poll()
+ */
+ if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+ work_done += tg3_rx(tp, budget - work_done);
+
+ return work_done;
+}
+
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+ struct tg3 *tp = container_of(napi, struct tg3, napi);
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tp->hw_status;
+
+ while (1) {
+ work_done = tg3_poll_work(tp, work_done, budget);
+
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+ /* tp->last_tag is used in tg3_restart_ints() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tp->last_tag = sblk->status_tag;
+ rmb();
+ } else
+ sblk->status &= ~SD_STATUS_UPDATED;
+
+ if (likely(!tg3_has_work(tp))) {
+ netif_rx_complete(tp->dev, napi);
+ tg3_restart_ints(tp);
+ break;
+ }
+ }
+
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ netif_rx_complete(tp->dev, napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
+}
+
+#else
+
static int tg3_poll(struct net_device *netdev, int *budget)
{
struct tg3 *tp = netdev_priv(netdev);
return (done ? 0 : 1);
}
+#endif /* TG3_NAPI */
+
static void tg3_irq_quiesce(struct tg3 *tp)
{
BUG_ON(tp->irq_sync);
tp->irq_sync = 1;
smp_mb();
+#if (LINUX_VERSION_CODE >= 0x2051c)
synchronize_irq(tp->pdev->irq);
+#else
+ synchronize_irq();
+#endif
}
static inline int tg3_irq_sync(struct tg3 *tp)
/* One-shot MSI handler - Chip automatically disables interrupt
* after sending MSI so driver doesn't have to do it.
*/
+#if (LINUX_VERSION_CODE < 0x20613)
static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
+#ifdef TG3_NAPI
+ netif_rx_schedule(dev, &tp->napi);
+#else
netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
return IRQ_HANDLED;
}
* flush status block and interrupt mailbox. PCI ordering rules
* guarantee that MSI will arrive after the status block.
*/
+#if (LINUX_VERSION_CODE < 0x20613)
static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
+#ifdef TG3_NAPI
+ netif_rx_schedule(dev, &tp->napi);
+#else
netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
return IRQ_RETVAL(1);
}
+#if (LINUX_VERSION_CODE < 0x20613)
static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+#ifdef TG3_NAPI
+ netif_rx_schedule(dev, &tp->napi);
+#else
netif_rx_schedule(dev); /* schedule NAPI poll */
+#endif
} else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
return IRQ_RETVAL(handled);
}
+#if (LINUX_VERSION_CODE < 0x20613)
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (tg3_irq_sync(tp))
goto out;
+#ifdef TG3_NAPI
+ if (netif_rx_schedule_prep(dev, &tp->napi)) {
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ /* Update last_tag to mark that this status has been
+ * seen. Because interrupt may be shared, we may be
+ * racing with tg3_poll(), so only update last_tag
+ * if tg3_poll() is not scheduled.
+ */
+ tp->last_tag = sblk->status_tag;
+ __netif_rx_schedule(dev, &tp->napi);
+ }
+#else
if (netif_rx_schedule_prep(dev)) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
/* Update last_tag to mark that this status has been
tp->last_tag = sblk->status_tag;
__netif_rx_schedule(dev);
}
+#endif
out:
return IRQ_RETVAL(handled);
}
/* ISR for interrupt test */
-static irqreturn_t tg3_test_isr(int irq, void *dev_id,
- struct pt_regs *regs)
+#if (LINUX_VERSION_CODE < 0x020613)
+static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+#endif
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
* Invoked with tp->lock held.
*/
static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
int err;
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
+#ifdef TG3_NAPI
+ napi_enable(&tp->napi);
+#else
netif_poll_enable(tp->dev);
+#endif
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
static void tg3_poll_controller(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
+ if (netdump_mode) {
+ tg3_interrupt(tp->pdev->irq, dev, NULL);
+ if (dev->poll_list.prev) {
+ int budget = 64;
+
+ tg3_poll(dev, &budget);
+ }
+ }
+ else
+#endif
+#if (LINUX_VERSION_CODE < 0x020613)
tg3_interrupt(tp->pdev->irq, dev, NULL);
+#else
+ tg3_interrupt(tp->pdev->irq, dev);
+#endif
}
#endif
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+static void tg3_reset_task(struct work_struct *work)
+#else
static void tg3_reset_task(void *_data)
+#endif
{
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
+#else
struct tg3 *tp = _data;
+#endif
unsigned int restart_timer;
tg3_full_lock(tp, 0);
- tp->tg3_dist_flags |= TG3_DIST_FLAG_IN_RESET_TASK;
if (!netif_running(tp->dev)) {
- tp->tg3_dist_flags &= ~TG3_DIST_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
return;
}
mod_timer(&tp->timer, jiffies + 1);
out:
- tp->tg3_dist_flags &= ~TG3_DIST_FLAG_IN_RESET_TASK;
-
tg3_full_unlock(tp);
}
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
- struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
+ struct sk_buff *new_skb;
dma_addr_t new_addr = 0;
u32 entry = *start;
int i, ret = 0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ else {
+ int more_headroom = 4 - ((unsigned long)skb->data & 3);
+
+ new_skb = skb_copy_expand(skb,
+ skb_headroom(skb) + more_headroom,
+ skb_tailroom(skb), GFP_ATOMIC);
+ }
+
if (!new_skb) {
ret = -1;
} else {
len = skb_headlen(skb);
/* We are running in BH disabled context with netif_tx_lock
- * and TX reclaim runs via tp->poll inside of a software
+ * and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
entry = tp->tx_prod;
base_flags = 0;
+#if TG3_TSO_SUPPORT != 0
mss = 0;
if ((mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
goto out_unlock;
}
+#ifndef BCM_NO_TSO6
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
- else {
+ else
+#endif
+ {
struct iphdr *iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
tcp_hdr(skb)->check = 0;
}
- else if (skb->ip_summed == CHECKSUM_HW)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#else
+ mss = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#endif
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
}
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
netif_wake_queue(tp->dev);
}
+#if TG3_TSO_SUPPORT != 0
out_unlock:
+#endif
mmiowb();
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
+#if TG3_TSO_SUPPORT != 0
+#ifndef NETIF_F_GSO
+
+struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = NULL;
+ struct sk_buff *tail = NULL;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int doffset = skb->data - skb->mac.raw;
+ unsigned int offset = doffset;
+ unsigned int headroom;
+ unsigned int len;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int err = -ENOMEM;
+ int i = 0;
+ int pos;
+
+ __skb_push(skb, doffset);
+ headroom = skb_headroom(skb);
+ pos = skb_headlen(skb);
+
+ do {
+ struct sk_buff *nskb;
+ skb_frag_t *frag;
+ int hsize;
+ int k;
+ int size;
+
+ len = skb->len - offset;
+ if (len > mss)
+ len = mss;
+
+ hsize = skb_headlen(skb) - offset;
+ if (hsize < 0)
+ hsize = 0;
+ if (hsize > len)
+ hsize = len;
+
+ nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
+ if (unlikely(!nskb))
+ goto err;
+
+ if (segs)
+ tail->next = nskb;
+ else
+ segs = nskb;
+ tail = nskb;
+
+ nskb->dev = skb->dev;
+ nskb->priority = skb->priority;
+ nskb->protocol = skb->protocol;
+ nskb->dst = dst_clone(skb->dst);
+ memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+ nskb->pkt_type = skb->pkt_type;
+ nskb->mac_len = skb->mac_len;
+
+ skb_reserve(nskb, headroom);
+ nskb->mac.raw = nskb->data;
+ nskb->nh.raw = nskb->data + skb->mac_len;
+ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
+ memcpy(skb_put(nskb, doffset), skb->data, doffset);
+
+ frag = skb_shinfo(nskb)->frags;
+ k = 0;
+
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum = skb->csum;
+ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+
+ while (pos < offset + len) {
+ BUG_ON(i >= nfrags);
+
+ *frag = skb_shinfo(skb)->frags[i];
+ get_page(frag->page);
+ size = frag->size;
+
+ if (pos < offset) {
+ frag->page_offset += offset - pos;
+ frag->size -= offset - pos;
+ }
+
+ k++;
+
+ if (pos + size <= offset + len) {
+ i++;
+ pos += size;
+ } else {
+ frag->size -= pos + size - (offset + len);
+ break;
+ }
+
+ frag++;
+ }
+
+ skb_shinfo(nskb)->nr_frags = k;
+ nskb->data_len = len - hsize;
+ nskb->len += nskb->data_len;
+ nskb->truesize += nskb->data_len;
+ } while ((offset += len) < skb->len);
+
+ return segs;
+
+err:
+ while ((skb = segs)) {
+ segs = skb->next;
+ kfree(skb);
+ }
+ return ERR_PTR(err);
+}
+
+static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct tcphdr *th;
+ unsigned thlen;
+ unsigned int seq;
+ u32 delta;
+ unsigned int oldlen;
+ unsigned int len;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ goto out;
+
+ th = skb->h.th;
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ goto out;
+
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+ oldlen = (u16)~skb->len;
+ __skb_pull(skb, thlen);
+
+ segs = skb_segment(skb, features);
+ if (IS_ERR(segs))
+ goto out;
+
+ len = skb_shinfo(skb)->gso_size;
+ delta = htonl(oldlen + (thlen + len));
+
+ skb = segs;
+ th = skb->h.th;
+ seq = ntohl(th->seq);
+
+ do {
+ th->fin = th->psh = 0;
+
+ th->check = ~csum_fold((u32)((u32)th->check +
+ (u32)delta));
+ seq += len;
+ skb = skb->next;
+ th = skb->h.th;
+
+ th->seq = htonl(seq);
+ th->cwr = 0;
+ } while (skb->next);
+
+ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+ th->check = ~csum_fold((u32)((u32)th->check +
+ (u32)delta));
+out:
+ return segs;
+}
+
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct iphdr *iph;
+ int ihl;
+ int id;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = skb->nh.iph;
+ ihl = iph->ihl * 4;
+ if (ihl < sizeof(*iph))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, ihl)))
+ goto out;
+
+ skb->h.raw = __skb_pull(skb, ihl);
+ iph = skb->nh.iph;
+ id = ntohs(iph->id);
+ segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ segs = tcp_tso_segment(skb, features);
+
+ if (!segs || IS_ERR(segs))
+ goto out;
+
+ skb = segs;
+ do {
+ iph = skb->nh.iph;
+ iph->id = htons(id++);
+ iph->tot_len = htons(skb->len - skb->mac_len);
+ iph->check = 0;
+ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+ } while ((skb = skb->next));
+
+out:
+ return segs;
+}
+
+static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ skb->mac.raw = skb->data;
+ skb->mac_len = skb->nh.raw - skb->data;
+ __skb_pull(skb, skb->mac_len);
+
+ segs = inet_gso_segment(skb, features);
+
+ __skb_push(skb, skb->data - skb->mac.raw);
+ return segs;
+}
+
+#endif
+
static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the
}
segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
- if (unlikely(IS_ERR(segs)))
+ if (IS_ERR(segs))
goto tg3_tso_bug_end;
do {
return NETDEV_TX_OK;
}
+#endif
+
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
*/
len = skb_headlen(skb);
/* We are running in BH disabled context with netif_tx_lock
- * and TX reclaim runs via tp->poll inside of a software
+ * and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
entry = tp->tx_prod;
base_flags = 0;
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#if TG3_TSO_SUPPORT != 0
mss = 0;
- if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+ if (((mss = skb_shinfo(skb)->gso_size) != 0) &&
+ (skb_shinfo(skb)->gso_segs > 1)) {
struct iphdr *iph;
int tcp_opt_len, ip_tcp_len, hdr_len;
}
}
}
+#else
+ mss = 0;
+#endif
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
would_hit_hwbug = 0;
- if (tg3_4g_overflow_test(mapping, len))
+ if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
+ would_hit_hwbug = 1;
+ else if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
tg3_set_txd(tp, entry, mapping, len, base_flags,
entry = start;
}
+ /* Some platforms need to sync memory here */
+ wmb();
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
if (new_mtu > ETH_DATA_LEN) {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+#if TG3_TSO_SUPPORT != 0
ethtool_op_set_tso(dev, 0);
+#endif
}
else
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
continue;
pci_unmap_single(tp->pdev,
pci_unmap_addr(rxp, mapping),
- tp->rx_pkt_buf_sz - tp->rx_offset,
+ tp->rx_pkt_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rxp->skb);
rxp->skb = NULL;
continue;
pci_unmap_single(tp->pdev,
pci_unmap_addr(rxp, mapping),
- RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
+ RX_JUMBO_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rxp->skb);
rxp->skb = NULL;
struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_std[i];
- rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
+ rxd->idx_len = (tp->rx_pkt_buf_sz - 64)
<< RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
rxd->opaque = (RXD_OPAQUE_RING_STD |
struct tg3_rx_buffer_desc *rxd;
rxd = &tp->rx_jumbo[i];
- rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
+ rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - 64)
<< RXD_LEN_SHIFT;
rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
RXD_FLAG_JUMBO;
default:
break;
- };
+ }
}
val = tr32(ofs);
return;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
- if (apedata != APE_FW_STATUS_READY)
+ if (!(apedata & APE_FW_STATUS_READY))
return;
/* Wait for up to 1 millisecond for APE to service previous event. */
event = APE_EVENT_STATUS_STATE_START;
break;
case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
event = APE_EVENT_STATUS_STATE_UNLOAD;
break;
case RESET_KIND_SUSPEND:
default:
break;
- };
+ }
}
if (kind == RESET_KIND_INIT ||
kind == RESET_KIND_SUSPEND)
tg3_ape_driver_state_change(tp, kind);
-
}
/* tp->lock is held. */
default:
break;
- };
+ }
}
if (kind == RESET_KIND_SHUTDOWN)
default:
break;
- };
+ }
}
}
}
/* Make sure PCI-X relaxed ordering bit is clear. */
- if (tp->pcix_cap) {
+ if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
u16 pcix_cmd;
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
tg3_nvram_lock(tp);
+ tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
/* No matching tg3_nvram_unlock() after this because
* chip reset below will undo the nvram lock.
*/
}
tp->last_tag = 0;
smp_mb();
+#if (LINUX_VERSION_CODE >= 0x2051c)
synchronize_irq(tp->pdev->irq);
+#else
+ synchronize_irq();
+#endif
/* do the reset */
val = GRC_MISC_CFG_CORECLK_RESET;
} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
+ if (tp->mac_mode & MAC_MODE_APE_TX_EN)
+ tp->mac_mode |= MAC_MODE_TDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode);
} else
tw32_f(MAC_MODE, 0);
udelay(40);
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
err = tg3_poll_fw(tp);
if (err)
return err;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+ tp->last_event_jiffies = jiffies;
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
}
{
if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
- u32 val;
- int i;
+ /* Wait for RX cpu to ACK the previous event. */
+ tg3_wait_for_event_ack(tp);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
- val = tr32(GRC_RX_CPU_EVENT);
- val |= (1 << 14);
- tw32(GRC_RX_CPU_EVENT, val);
- /* Wait for RX cpu to ACK the event. */
- for (i = 0; i < 100; i++) {
- if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
- break;
- udelay(1);
- }
+ tg3_generate_fw_event(tp);
+
+ /* Wait for RX cpu to ACK this event. */
+ tg3_wait_for_event_ack(tp);
}
}
return 0;
}
+#if TG3_TSO_SUPPORT != 0
#define TG3_TSO_FW_RELEASE_MAJOR 0x1
#define TG3_TSO_FW_RELASE_MINOR 0x6
return 0;
}
+#endif /* TG3_TSO_SUPPORT != 0 */
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
}
+#if TG3_TSO_SUPPORT != 0
else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
tw32(BUFMGR_MB_POOL_SIZE,
NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
}
+#endif
if (tp->dev->mtu <= ETH_DATA_LEN) {
tw32(BUFMGR_MB_RDMA_LOW_WATER,
__tg3_set_mac_addr(tp, 0);
/* MTU + ethernet header + FCS + optional VLAN tag */
- tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
+ tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
/* The slot time is changed by tg3_setup_phy if we
* run at gigabit with half duplex.
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
rdmac_mode |= (1 << 27);
+#endif
/* Receive/send statistics. */
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
udelay(10);
}
- tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = 0;
+ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
- val |= (1 << 29);
+ val |= WDMAC_MODE_STATUS_TAG_FIX;
tw32_f(WDMAC_MODE, val);
udelay(40);
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+#endif
tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
return err;
}
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
err = tg3_load_tso_firmware(tp);
if (err)
return err;
}
+#endif
tp->tx_mode = TX_MODE_ENABLE;
tw32_f(MAC_TX_MODE, tp->tx_mode);
tp->rx_mode = RX_MODE_ENABLE;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
tp->link_config.autoneg = tp->link_config.orig_autoneg;
}
- tp->mi_mode = MAC_MI_MODE_BASE;
+ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
default:
break;
- };
+ }
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
/* Write our heartbeat update interval to APE. */
* resets.
*/
if (!--tp->asf_counter) {
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
- u32 val;
+ if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
+ tg3_wait_for_event_ack(tp);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
FWCMD_NICDRV_ALIVE3);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
/* 5 seconds timeout */
tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
- val = tr32(GRC_RX_CPU_EVENT);
- val |= (1 << 14);
- tw32(GRC_RX_CPU_EVENT, val);
+
+ tg3_generate_fw_event(tp);
}
tp->asf_counter = tp->asf_multiplier;
}
static int tg3_request_irq(struct tg3 *tp)
{
+#if (LINUX_VERSION_CODE < 0x020613)
irqreturn_t (*fn)(int, void *, struct pt_regs *);
+#else
+ irq_handler_t fn;
+#endif
unsigned long flags;
struct net_device *dev = tp->dev;
break;
}
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+#else
msleep(10);
+#endif
}
tg3_disable_ints(tp);
tp->dev->name);
free_irq(tp->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
if (err)
return err;
+#ifdef CONFIG_PCI_MSI
if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
} else if (pci_enable_msi(tp->pdev) == 0) {
u32 msi_mode;
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
/* Hardware bug - MSI won't work if INTX disabled. */
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+#if (LINUX_VERSION_CODE < 0x2060e)
+ tg3_enable_intx(tp->pdev);
+#else
pci_intx(tp->pdev, 1);
+#endif
+#endif
msi_mode = tr32(MSGINT_MODE);
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
}
}
+#endif
err = tg3_request_irq(tp);
if (err) {
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
return err;
}
+#ifdef TG3_NAPI
+ napi_enable(&tp->napi);
+#endif
+
tg3_full_lock(tp, 0);
err = tg3_init_hw(tp, 1);
tg3_full_unlock(tp);
if (err) {
+#ifdef TG3_NAPI
+ napi_disable(&tp->napi);
+#endif
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_free_consistent(tp);
tg3_full_lock(tp, 0);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
+#ifdef TG3_NAPI
+ napi_disable(&tp->napi);
+#endif
+
return err;
}
{
struct tg3 *tp = netdev_priv(dev);
- /* Calling flush_scheduled_work() may deadlock because
- * linkwatch_event() may be on the workqueue and it will try to get
- * the rtnl_lock which we are holding.
- */
- while (tp->tg3_dist_flags & TG3_DIST_FLAG_IN_RESET_TASK)
- msleep(1);
+#ifdef TG3_NAPI
+ napi_disable(&tp->napi);
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20616)
+ cancel_work_sync(&tp->reset_task);
+#else
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#endif
netif_stop_queue(dev);
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+#ifdef CONFIG_PCI_MSI
pci_disable_msi(tp->pdev);
+#endif
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
tg3_full_unlock(tp);
}
+#if (LINUX_VERSION_CODE >= 0x20418)
static int tg3_get_eeprom_len(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
return tp->nvram_size;
}
+#endif
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
+static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
+#ifdef ETHTOOL_GEEPROM
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
int ret;
u8 *pd;
- u32 i, offset, len, val, b_offset, b_count;
+ u32 i, offset, len, b_offset, b_count;
+ __le32 val;
if (tp->link_config.phy_is_low_power)
return -EAGAIN;
/* i.e. offset=1 len=2 */
b_count = len;
}
- ret = tg3_nvram_read(tp, offset-b_offset, &val);
+ ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
if (ret)
return ret;
- val = cpu_to_le32(val);
memcpy(data, ((char*)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
/* read bytes upto the last 4 byte boundary */
pd = &data[eeprom->len];
for (i = 0; i < (len - (len & 3)); i += 4) {
- ret = tg3_nvram_read(tp, offset + i, &val);
+ ret = tg3_nvram_read_le(tp, offset + i, &val);
if (ret) {
eeprom->len += i;
return ret;
}
- val = cpu_to_le32(val);
memcpy(pd + i, &val, 4);
}
eeprom->len += i;
pd = &data[eeprom->len];
b_count = len & 3;
b_offset = offset + len - b_count;
- ret = tg3_nvram_read(tp, b_offset, &val);
+ ret = tg3_nvram_read_le(tp, b_offset, &val);
if (ret)
return ret;
- val = cpu_to_le32(val);
- memcpy(pd, ((char*)&val), b_count);
+ memcpy(pd, &val, b_count);
eeprom->len += b_count;
}
return 0;
}
+#endif
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+#ifdef ETHTOOL_SEEPROM
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
int ret;
- u32 offset, len, b_offset, odd_len, start, end;
+ u32 offset, len, b_offset, odd_len;
u8 *buf;
+ __le32 start, end;
if (tp->link_config.phy_is_low_power)
return -EAGAIN;
if ((b_offset = (offset & 3))) {
/* adjustments to start on required 4 byte boundary */
- ret = tg3_nvram_read(tp, offset-b_offset, &start);
+ ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
if (ret)
return ret;
- start = cpu_to_le32(start);
len += b_offset;
offset &= ~3;
if (len < 4)
/* adjustments to end on required 4 byte boundary */
odd_len = 1;
len = (len + 3) & ~3;
- ret = tg3_nvram_read(tp, offset+len-4, &end);
+ ret = tg3_nvram_read_le(tp, offset+len-4, &end);
if (ret)
return ret;
- end = cpu_to_le32(end);
}
buf = data;
return ret;
}
+#endif
static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct tg3 *tp = netdev_priv(dev);
+ struct tg3 *tp = netdev_priv(dev);
cmd->supported = (SUPPORTED_Autoneg);
(cmd->speed == SPEED_1000))
return -EINVAL;
else if ((cmd->speed == SPEED_1000) &&
- (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
+ (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
return -EINVAL;
tg3_full_lock(tp, 0);
tp->link_config.advertising = 0;
tp->link_config.speed = cmd->speed;
tp->link_config.duplex = cmd->duplex;
- }
+ }
tp->link_config.orig_speed = tp->link_config.speed;
tp->link_config.orig_duplex = tp->link_config.duplex;
tp->msg_enable = value;
}
+#if TG3_TSO_SUPPORT != 0
static int tg3_set_tso(struct net_device *dev, u32 value)
{
struct tg3 *tp = netdev_priv(dev);
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
if (value) {
dev->features |= NETIF_F_TSO6;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))
dev->features |= NETIF_F_TSO_ECN;
} else
dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
}
return ethtool_op_set_tso(dev, value);
}
+#endif
static int tg3_nway_reset(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
- epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
- epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
+
+ if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
+ epause->rx_pause = 1;
+ else
+ epause->rx_pause = 0;
+
+ if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
+ epause->tx_pause = 1;
+ else
+ epause->tx_pause = 0;
}
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
else
tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
if (epause->rx_pause)
- tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
+ tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
else
- tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
+ tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
if (epause->tx_pause)
- tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
+ tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
else
- tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
+ tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
return 0;
}
+#ifdef BCM_HAS_SET_TX_CSUM
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM)
+ ethtool_op_set_tx_ipv6_csum(dev, data);
+#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM)
ethtool_op_set_tx_hw_csum(dev, data);
+#else
+ tg3_set_tx_hw_csum(dev, data);
+#endif
else
ethtool_op_set_tx_csum(dev, data);
return 0;
}
+#endif
+
+static int tg3_get_sset_count (struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return TG3_NUM_TEST;
+ case ETH_SS_STATS:
+ return TG3_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#if (LINUX_VERSION_CODE < 0x020618)
static int tg3_get_stats_count (struct net_device *dev)
{
- return TG3_NUM_STATS;
+ return tg3_get_sset_count(dev, ETH_SS_STATS);
}
static int tg3_get_test_count (struct net_device *dev)
{
- return TG3_NUM_TEST;
+ return tg3_get_sset_count(dev, ETH_SS_TEST);
}
+#endif
static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
{
return -EAGAIN;
if (data == 0)
- data = 2;
+ data = UINT_MAX / 2;
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_TRAFFIC_OVERRIDE);
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 2))
+#else
if (msleep_interruptible(500))
+#endif
break;
}
tw32(MAC_LED_CTRL, tp->led_ctrl);
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 *buf, csum, magic;
+ u32 csum, magic;
+ __le32 *buf;
int i, j, k, err = 0, size;
if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
err = -EIO;
for (i = 0, j = 0; i < size; i += 4, j++) {
- u32 val;
-
- if ((err = tg3_nvram_read(tp, i, &val)) != 0)
+ if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
break;
- buf[j] = cpu_to_le32(val);
}
if (i < size)
goto out;
/* Selfboot format */
- if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
+ magic = swab32(le32_to_cpu(buf[0]));
+ if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
TG3_EEPROM_MAGIC_FW) {
u8 *buf8 = (u8 *) buf, csum8 = 0;
- if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
+ if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
TG3_EEPROM_SB_REVISION_2) {
/* For rev 2, the csum doesn't include the MBA. */
for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
goto out;
}
- if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
+ if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
TG3_EEPROM_MAGIC_HW) {
u8 data[NVRAM_SELFBOOT_DATA_SIZE];
u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if(csum != cpu_to_le32(buf[0x10/4]))
+ if(csum != le32_to_cpu(buf[0x10/4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != cpu_to_le32(buf[0xfc/4]))
+ if (csum != le32_to_cpu(buf[0xfc/4]))
goto out;
err = 0;
}
#define TG3_SERDES_TIMEOUT_SEC 2
-#define TG3_COPPER_TIMEOUT_SEC 6
+#define TG3_COPPER_TIMEOUT_SEC 7
static int tg3_test_link(struct tg3 *tp)
{
if (netif_carrier_ok(tp->dev))
return 0;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ))
+#else
if (msleep_interruptible(1000))
+#endif
break;
}
tp->tx_prod++;
num_pkts++;
+ /* Some platforms need to sync memory here */
+ wmb();
+
tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
tp->tx_prod);
tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
if (err)
return TG3_LOOPBACK_FAILED;
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ /* Turn off gphy autopowerdown. */
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 0);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
int i;
u32 status;
if (status != CPMU_MUTEX_GNT_DRIVER)
return TG3_LOOPBACK_FAILED;
- /* Turn off power management based on link speed. */
+ /* Turn off link-based power management. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
tw32(TG3_CPMU_CTRL,
cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
- if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
tw32(TG3_CPMU_CTRL, cpmuctrl);
/* Release the mutex */
err |= TG3_PHY_LOOPBACK_FAILED;
}
+ /* Re-enable gphy autopowerdown. */
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, 1);
+
return err;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+#if (LINUX_VERSION_CODE >= 0x020607)
struct mii_ioctl_data *data = if_mii(ifr);
+#else
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
+#endif
struct tg3 *tp = netdev_priv(dev);
int err;
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
tg3_full_unlock(tp);
if (netif_running(dev))
.set_msglevel = tg3_set_msglevel,
.nway_reset = tg3_nway_reset,
.get_link = ethtool_op_get_link,
+#if (LINUX_VERSION_CODE >= 0x20418)
.get_eeprom_len = tg3_get_eeprom_len,
+#endif
+#ifdef ETHTOOL_GEEPROM
.get_eeprom = tg3_get_eeprom,
+#endif
+#ifdef ETHTOOL_SEEPROM
.set_eeprom = tg3_set_eeprom,
+#endif
.get_ringparam = tg3_get_ringparam,
.set_ringparam = tg3_set_ringparam,
.get_pauseparam = tg3_get_pauseparam,
.get_rx_csum = tg3_get_rx_csum,
.set_rx_csum = tg3_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
+#ifdef BCM_HAS_SET_TX_CSUM
.set_tx_csum = tg3_set_tx_csum,
+#endif
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+#if TG3_TSO_SUPPORT != 0
.get_tso = ethtool_op_get_tso,
.set_tso = tg3_set_tso,
+#endif
+#if (LINUX_VERSION_CODE < 0x20618)
.self_test_count = tg3_get_test_count,
+#endif
.self_test = tg3_self_test,
.get_strings = tg3_get_strings,
.phys_id = tg3_phys_id,
+#if (LINUX_VERSION_CODE < 0x20618)
.get_stats_count = tg3_get_stats_count,
+#endif
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
+#if (LINUX_VERSION_CODE >= 0x20618)
+ .get_sset_count = tg3_get_sset_count,
+#endif
+#if defined(ETHTOOL_GPERMADDR) && (LINUX_VERSION_CODE < 0x020617)
.get_perm_addr = ethtool_op_get_perm_addr,
+#endif
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
return;
}
}
- tp->nvram_size = 0x80000;
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
}
static void __devinit tg3_get_nvram_info(struct tg3 *tp)
tp->nvram_pagesize = 264;
if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
- tp->nvram_size = (protect ? 0x3e200 : 0x80000);
+ tp->nvram_size = (protect ? 0x3e200 :
+ TG3_NVRAM_SIZE_512KB);
else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
- tp->nvram_size = (protect ? 0x1f200 : 0x40000);
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_256KB);
else
- tp->nvram_size = (protect ? 0x1f200 : 0x20000);
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_128KB);
break;
case FLASH_5752VENDOR_ST_M45PE10:
case FLASH_5752VENDOR_ST_M45PE20:
tp->tg3_flags2 |= TG3_FLG2_FLASH;
tp->nvram_pagesize = 256;
if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
- tp->nvram_size = (protect ? 0x10000 : 0x20000);
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_128KB);
else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
- tp->nvram_size = (protect ? 0x10000 : 0x40000);
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_256KB);
else
- tp->nvram_size = (protect ? 0x20000 : 0x80000);
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_128KB :
+ TG3_NVRAM_SIZE_512KB);
break;
}
}
case FLASH_5761VENDOR_ATMEL_MDB161D:
case FLASH_5761VENDOR_ST_A_M45PE16:
case FLASH_5761VENDOR_ST_M_M45PE16:
- tp->nvram_size = 0x100000;
+ tp->nvram_size = TG3_NVRAM_SIZE_2MB;
break;
case FLASH_5761VENDOR_ATMEL_ADB081D:
case FLASH_5761VENDOR_ATMEL_MDB081D:
case FLASH_5761VENDOR_ST_A_M45PE80:
case FLASH_5761VENDOR_ST_M_M45PE80:
- tp->nvram_size = 0x80000;
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
case FLASH_5761VENDOR_ATMEL_ADB041D:
case FLASH_5761VENDOR_ATMEL_MDB041D:
case FLASH_5761VENDOR_ST_A_M45PE40:
case FLASH_5761VENDOR_ST_M_M45PE40:
- tp->nvram_size = 0x40000;
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
break;
case FLASH_5761VENDOR_ATMEL_ADB021D:
case FLASH_5761VENDOR_ATMEL_MDB021D:
case FLASH_5761VENDOR_ST_A_M45PE20:
case FLASH_5761VENDOR_ST_M_M45PE20:
- tp->nvram_size = 0x20000;
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
break;
}
}
(EEPROM_DEFAULT_CLOCK_PERIOD <<
EEPROM_ADDR_CLKPERD_SHIFT)));
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
/* Enable seeprom accesses. */
tw32_f(GRC_LOCAL_CTRL,
if (tmp & EEPROM_ADDR_COMPLETE)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
if (!(tmp & EEPROM_ADDR_COMPLETE))
return -EBUSY;
return ret;
}
+static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
+{
+ u32 v;
+ int res = tg3_nvram_read(tp, offset, &v);
+ if (!res)
+ *val = cpu_to_le32(v);
+ return res;
+}
+
static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
{
int err;
u32 val;
for (i = 0; i < len; i += 4) {
- u32 addr, data;
+ u32 addr;
+ __le32 data;
addr = offset + i;
memcpy(&data, buf + i, 4);
- tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
+ tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
val = tr32(GRC_EEPROM_ADDR);
tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
if (val & EEPROM_ADDR_COMPLETE)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
}
if (!(val & EEPROM_ADDR_COMPLETE)) {
rc = -EBUSY;
phy_addr = offset & ~pagemask;
for (j = 0; j < pagesize; j += 4) {
- if ((ret = tg3_nvram_read(tp, phy_addr + j,
- (u32 *) (tmp + j))))
+ if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
+ (__le32 *) (tmp + j))))
break;
}
if (ret)
break;
for (j = 0; j < pagesize; j += 4) {
- u32 data;
+ __be32 data;
- data = *((u32 *) (tmp + j));
- tw32(NVRAM_WRDATA, cpu_to_be32(data));
+ data = *((__be32 *) (tmp + j));
+ /* swab32(le32_to_cpu(data)), actually */
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
tw32(NVRAM_ADDR, phy_addr + j);
int i, ret = 0;
for (i = 0; i < len; i += 4, offset += 4) {
- u32 data, page_off, phy_addr, nvram_cmd;
+ u32 page_off, phy_addr, nvram_cmd;
+ __be32 data;
memcpy(&data, buf + i, 4);
- tw32(NVRAM_WRDATA, cpu_to_be32(data));
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
page_off = offset % tp->nvram_pagesize;
pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ / 1000);
+#else
msleep(1);
+#endif
/* Make sure register accesses (indirect or otherwise)
* will function correctly.
LED_CTRL_MODE_PHY_2);
break;
- };
+ }
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
- tp->led_ctrl = LED_CTRL_MODE_MAC;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
if (cfg2 & (1 << 18))
tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX &&
+ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+ tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
+
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
u32 cfg3;
}
}
+static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+{
+ int i;
+ u32 val;
+
+ tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
+ tw32(OTP_CTRL, cmd);
+
+ /* Wait for up to 1 ms for command to execute. */
+ for (i = 0; i < 100; i++) {
+ val = tr32(OTP_STATUS);
+ if (val & OTP_STATUS_CMD_DONE)
+ break;
+ udelay(10);
+ }
+
+ return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
+}
+
+/* Read the gphy configuration from the OTP region of the chip. The gphy
+ * configuration is a 32-bit value that straddles the alignment boundary.
+ * We do two 32-bit reads and then shift and merge the results.
+ */
+static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
+{
+ u32 bhalf_otp, thalf_otp;
+
+ tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
+ return 0;
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ thalf_otp = tr32(OTP_READ_DATA);
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ bhalf_otp = tr32(OTP_READ_DATA);
+
+ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
+}
+
static int __devinit tg3_phy_probe(struct tg3 *tp)
{
u32 hw_phy_id_1, hw_phy_id_2;
vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
for (i = 0; i < 256; i += 4) {
u32 tmp, j = 0;
+ __le32 v;
u16 tmp16;
pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
PCI_VPD_ADDR, &tmp16);
if (tmp16 & 0x8000)
break;
+#if (LINUX_VERSION_CODE < 0x20607)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+#else
msleep(1);
+#endif
}
if (!(tmp16 & 0x8000))
goto out_not_found;
pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
&tmp);
- tmp = cpu_to_le32(tmp);
- memcpy(&vpd_data[i], &tmp, 4);
+ v = cpu_to_le32(tmp);
+ memcpy(&vpd_data[i], &v, 4);
}
}
return 1;
}
+static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+ u32 offset, major, minor, build;
+
+ tp->fw_ver[0] = 's';
+ tp->fw_ver[1] = 'b';
+ tp->fw_ver[2] = '\0';
+
+ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+ return;
+
+ switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (tg3_nvram_read_swab(tp, offset, &val))
+ return;
+
+ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+ TG3_EEPROM_SB_EDH_BLD_SHFT;
+ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+ TG3_EEPROM_SB_EDH_MAJ_SHFT;
+ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+ if (minor > 99 || build > 26)
+ return;
+
+ snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+
+ if (build > 0) {
+ tp->fw_ver[8] = 'a' + build - 1;
+ tp->fw_ver[9] = '\0';
+ }
+}
+
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val, offset, start;
if (tg3_nvram_read_swab(tp, 0, &val))
return;
- if (val != TG3_EEPROM_MAGIC)
+ if (val != TG3_EEPROM_MAGIC) {
+ if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+ tg3_read_sb_ver(tp, val);
+
return;
+ }
if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
tg3_nvram_read_swab(tp, 0x4, &start))
offset = offset + ver_offset - start;
for (i = 0; i < 16; i += 4) {
- if (tg3_nvram_read(tp, offset + i, &val))
+ __le32 v;
+ if (tg3_nvram_read_le(tp, offset + i, &v))
return;
- val = le32_to_cpu(val);
- memcpy(tp->fw_ver + i, &val, 4);
+ memcpy(tp->fw_ver + i, &v, 4);
}
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
tp->fw_ver[bcnt++] = ' ';
for (i = 0; i < 4; i++) {
- if (tg3_nvram_read(tp, offset, &val))
+ __le32 v;
+ if (tg3_nvram_read_le(tp, offset, &v))
return;
- val = le32_to_cpu(val);
- offset += sizeof(val);
+ offset += sizeof(v);
- if (bcnt > TG3_VER_SIZE - sizeof(val)) {
- memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
+ if (bcnt > TG3_VER_SIZE - sizeof(v)) {
+ memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
break;
}
- memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
- bcnt += sizeof(val);
+ memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
+ bcnt += sizeof(v);
}
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
PCI_DEVICE_ID_VIA_8385_0) },
{ },
};
+#endif
u32 misc_ctrl_reg;
u32 cacheline_sz_reg;
u32 pci_state_reg, grc_misc_cfg;
u32 val;
u16 pci_cmd;
- int err, pcie_cap;
+ int err;
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
}
}
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ static struct tg3_dev_id {
+ u32 vendor;
+ u32 device;
+ } bridge_chipsets[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
+ { },
+ };
+ struct tg3_dev_id *pci_id = &bridge_chipsets[0];
+ struct pci_dev *bridge = NULL;
+
+ while (pci_id->vendor != 0) {
+ bridge = pci_get_device(pci_id->vendor,
+ pci_id->device,
+ bridge);
+ if (!bridge) {
+ pci_id++;
+ continue;
+ }
+ if (bridge->subordinate &&
+ (bridge->subordinate->number <=
+ tp->pdev->bus->number) &&
+ (bridge->subordinate->subordinate >=
+ tp->pdev->bus->number)) {
+ tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
+ pci_dev_put(bridge);
+ break;
+ }
+ }
+ }
+
/* The EPB bridge inside 5714, 5715, and 5780 cannot support
* DMA addresses > 40-bit. This bridge may have other additional
* 57xx devices behind it in some 4-port NIC designs for example.
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+ if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
- pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap != 0) {
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+
+ tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
+ if (tp->pcie_cap != 0) {
+ u16 lnkctl;
+
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
pcie_set_readrq(tp->pdev, 4096);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u16 lnkctl;
-
- pci_read_config_word(tp->pdev,
- pcie_cap + PCI_EXP_LNKCTL,
- &lnkctl);
- if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ pci_read_config_word(tp->pdev,
+ tp->pcie_cap + PCI_EXP_LNKCTL,
+ &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ }
+ } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+ if (!tp->pcix_cap) {
+ printk(KERN_ERR PFX "Cannot find PCI-X "
+ "capability, aborting.\n");
+ return -EIO;
}
+
+ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+ tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
}
/* If we have an AMD 762 or VIA K8T800 chipset, write
* every mailbox register write to force the writes to be
* posted to the chip in order.
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if ((pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
+ pci_find_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
+#else
if (pci_dev_present(write_reorder_chipsets) &&
+#endif
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
cacheline_sz_reg);
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
- tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
- if (!tp->pcix_cap) {
- printk(KERN_ERR PFX "Cannot find PCI-X "
- "capability, aborting.\n");
- return -EIO;
- }
- }
-
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
- &pci_state_reg);
-
- if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
- tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ /* 5700 BX chips need to have their TX producer index
+ * mailboxes written twice to workaround a bug.
+ */
+ tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
- /* If this is a 5700 BX chipset, and we are in PCI-X
- * mode, enable register write workaround.
+ /* If we are in PCI-X mode, enable register write workaround.
*
* The workaround is to use indirect register accesses
* for all chip writes not to mailbox registers.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
u32 pm_reg;
tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
}
}
- /* 5700 BX chips need to have their TX producer index mailboxes
- * written twice to workaround a bug.
- */
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
- tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
-
if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
- tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
- }
-
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
* GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
+ /* Turn off the debug UART. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+ if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+ /* Keep VMain power. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OUTPUT0;
+ }
+
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ tp->phy_otp = tg3_read_otp_phycfg(tp);
+ if (tp->phy_otp == 0)
+ tp->phy_otp = TG3_OTP_DEFAULT;
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
+ tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
+ else
+ tp->mi_mode = MAC_MI_MODE_BASE;
+
tp->coalesce_mode = 0;
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
tp->misc_host_ctrl);
}
+ /* Preserve the APE MAC_MODE bits */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tp->mac_mode = tr32(MAC_MODE) |
+ MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = TG3_DEF_MAC_MODE;
+
/* these are limited to 10/100 only */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
(grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
else
tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
- tp->rx_offset = 2;
+ tp->rx_offset = NET_IP_ALIGN + VLAN_HLEN;
+ tp->rx_copy_thresh = RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
}
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
if (!tg3_get_default_macaddr_sparc(tp))
return 0;
#endif
return -EINVAL;
}
+#ifdef ETHTOOL_GPERMADDR
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+#endif
return 0;
}
val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
break;
- };
+ }
} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
switch (cacheline_size) {
case 16:
val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
break;
- };
+ }
} else {
switch (cacheline_size) {
case 16:
val |= (DMA_RWCTRL_READ_BNDRY_1024 |
DMA_RWCTRL_WRITE_BNDRY_1024);
break;
- };
+ }
}
out:
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
+#if (LINUX_VERSION_CODE >= 0x2060a)
static struct pci_device_id dma_wait_state_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
{ },
};
+#endif
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
* DMA bug without failing the test.
*/
- if (pci_dev_present(dma_wait_state_chipsets)) {
+#if (LINUX_VERSION_CODE < 0x2060a)
+ if (pci_find_device(PCI_VENDOR_ID_APPLE,
+ PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
+#else
+ if (pci_dev_present(dma_wait_state_chipsets))
+#endif
+ {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
}
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
- };
+ }
}
static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
const struct pci_device_id *ent)
{
static int tg3_version_printed = 0;
- unsigned long tg3reg_base, tg3reg_len;
+ unsigned long tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
- int i, err, pm_cap;
+ int err, pm_cap;
char str[40];
u64 dma_mask, persist_dma_mask;
+ DECLARE_MAC_BUF(mac);
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
goto err_out_free_res;
}
- tg3reg_base = pci_resource_start(pdev, 0);
- tg3reg_len = pci_resource_len(pdev, 0);
-
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
}
SET_MODULE_OWNER(dev);
+#if (LINUX_VERSION_CODE >= 0x20419)
SET_NETDEV_DEV(dev, &pdev->dev);
+#endif
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
tp->pdev = pdev;
tp->dev = dev;
tp->pm_cap = pm_cap;
- tp->mac_mode = TG3_DEF_MAC_MODE;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
- tp->mi_mode = MAC_MI_MODE_BASE;
+
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
else
#endif
spin_lock_init(&tp->lock);
spin_lock_init(&tp->indirect_lock);
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+ INIT_WORK(&tp->reset_task, tg3_reset_task);
+#else
INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+#endif
+
+ dev->mem_start = pci_resource_start(pdev, 0);
+ tg3reg_len = pci_resource_len(pdev, 0);
+ dev->mem_end = dev->mem_start + tg3reg_len;
- tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
+ tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
if (!tp->regs) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
dev->set_mac_address = tg3_set_mac_addr;
dev->do_ioctl = tg3_ioctl;
dev->tx_timeout = tg3_tx_timeout;
+#ifdef TG3_NAPI
+ netif_napi_add(dev, &tp->napi, tg3_poll, 64);
+#else
dev->poll = tg3_poll;
- dev->ethtool_ops = &tg3_ethtool_ops;
dev->weight = 64;
+#endif
+ dev->ethtool_ops = &tg3_ethtool_ops;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq;
-#ifdef CONFIG_NET_POLL_CONTROLLER
+#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
dev->poll_controller = tg3_poll_controller;
#endif
tg3_init_bufmgr_config(tp);
+#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
dev->features |= NETIF_F_TSO6;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))
dev->features |= NETIF_F_TSO_ECN;
}
+#endif
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ resource_size_t tg3reg_base;
+
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address for APE, aborting.\n");
tg3reg_len = pci_resource_len(pdev, 2);
tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
- if (tp->aperegs == 0UL) {
+ if (!tp->aperegs) {
printk(KERN_ERR PFX "Cannot map APE registers, "
"aborting.\n");
err = -ENOMEM;
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+#ifdef NETIF_F_IPV6_CSUM
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_IPV6_CSUM;
+#else
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
else
dev->features |= NETIF_F_IP_CSUM;
dev->features |= NETIF_F_SG;
+#endif
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
/* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
tg3_init_coal(tp);
goto err_out_apeunmap;
}
- printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
+ printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
+ "(%s) %s Ethernet %s\n",
dev->name,
tp->board_part_number,
tp->pci_chip_rev_id,
tg3_bus_string(tp, str),
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
- "10/100/1000Base-T")));
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- i == 5 ? '\n' : ':');
+ "10/100/1000Base-T")),
+ print_mac(mac, dev->dev_addr));
printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
"MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
}
err_out_free_dev:
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
err_out_free_res:
pci_release_regions(pdev);
if (dev) {
struct tg3 *tp = netdev_priv(dev);
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
+#endif
unregister_netdev(dev);
if (tp->aperegs) {
iounmap(tp->aperegs);
iounmap(tp->regs);
tp->regs = NULL;
}
+#if (LINUX_VERSION_CODE >= 0x20418)
free_netdev(dev);
+#else
+ kfree(dev);
+#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
+#if (LINUX_VERSION_CODE < 0x2060b)
+static int tg3_suspend(struct pci_dev *pdev, u32 state)
+#else
static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
* MSI address and data need to be saved if using MSI and
* netif_running().
*/
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_save_state(pdev, tp->pci_cfg_state);
+#else
pci_save_state(pdev);
+#endif
if (!netif_running(dev))
return 0;
+#if (LINUX_VERSION_CODE >= 0x20600)
flush_scheduled_work();
+#endif
tg3_netif_stop(tp);
del_timer_sync(&tp->timer);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
+#if (LINUX_VERSION_CODE < 0x2060b)
+ err = tg3_set_power_state(tp, state);
+#else
err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
+#endif
if (err) {
tg3_full_lock(tp, 0);
struct tg3 *tp = netdev_priv(dev);
int err;
+#if (LINUX_VERSION_CODE < 0x2060a)
+ pci_restore_state(tp->pdev, tp->pci_cfg_state);
+#else
pci_restore_state(tp->pdev);
+#endif
if (!netif_running(dev))
return 0;
if (err)
return err;
+#ifndef BCM_HAS_INTX_MSI_WORKAROUND
/* Hardware bug - MSI won't work if INTX disabled. */
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
+#if (LINUX_VERSION_CODE < 0x2060e)
+ tg3_enable_intx(tp->pdev);
+#else
pci_intx(tp->pdev, 1);
+#endif
+#endif
netif_device_attach(dev);
static int __init tg3_init(void)
{
+#if (LINUX_VERSION_CODE < 0x020613)
return pci_module_init(&tg3_driver);
+#else
+ return pci_register_driver(&tg3_driver);
+#endif
}
static void __exit tg3_cleanup(void)
#ifndef _T3_H
#define _T3_H
+#if !defined(PCI_DEVICE_ID_TIGON3_5704S_2)
+#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5705F)
+#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5720)
+#define PCI_DEVICE_ID_TIGON3_5720 0x1658
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5721)
+#define PCI_DEVICE_ID_TIGON3_5721 0x1659
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5750)
+#define PCI_DEVICE_ID_TIGON3_5750 0x1676
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5751)
+#define PCI_DEVICE_ID_TIGON3_5751 0x1677
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5750M)
+#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5751M)
+#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5751F)
+#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5789)
+#define PCI_DEVICE_ID_TIGON3_5789 0x169d
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5753)
+#define PCI_DEVICE_ID_TIGON3_5753 0x16f7
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5753M)
+#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5753F)
+#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5781)
+#define PCI_DEVICE_ID_TIGON3_5781 0x16dd
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5752)
+#define PCI_DEVICE_ID_TIGON3_5752 0x1600
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5752M)
+#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5714)
+#define PCI_DEVICE_ID_TIGON3_5714 0x1668
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5714S)
+#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5780)
+#define PCI_DEVICE_ID_TIGON3_5780 0x166a
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5780S)
+#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5715)
+#define PCI_DEVICE_ID_TIGON3_5715 0x1678
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5715S)
+#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5756)
+#define PCI_DEVICE_ID_TIGON3_5756 0x1674
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5754)
+#define PCI_DEVICE_ID_TIGON3_5754 0x167a
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5754M)
+#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5755)
+#define PCI_DEVICE_ID_TIGON3_5755 0x167b
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5755M)
+#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5722)
+#define PCI_DEVICE_ID_TIGON3_5722 0x165a
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5786)
+#define PCI_DEVICE_ID_TIGON3_5786 0x169a
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5787M)
+#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5787)
+#define PCI_DEVICE_ID_TIGON3_5787 0x169b
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5787F)
+#define PCI_DEVICE_ID_TIGON3_5787F 0x167f
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5906)
+#define PCI_DEVICE_ID_TIGON3_5906 0x1712
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5906M)
+#define PCI_DEVICE_ID_TIGON3_5906M 0x1713
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5784)
+#define PCI_DEVICE_ID_TIGON3_5784 0x1698
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5764)
+#define PCI_DEVICE_ID_TIGON3_5764 0x1684
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5723)
+#define PCI_DEVICE_ID_TIGON3_5723 0x165b
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5761)
+#define PCI_DEVICE_ID_TIGON3_5761 0x1681
+#endif
+
+#if !defined(PCI_DEVICE_ID_TIGON3_5761E)
+#define PCI_DEVICE_ID_TIGON3_5761E 0x1680
+#endif
+
+#define PCI_DEVICE_ID_TIGON3_5761S 0x1688
+#define PCI_DEVICE_ID_TIGON3_5761SE 0x1689
+
+#if !defined(PCI_DEVICE_ID_APPLE_TIGON3)
+#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
+#endif
+
+#if !defined(PCI_DEVICE_ID_APPLE_UNI_N_PCI15)
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
+#endif
+
+#if !defined(PCI_DEVICE_ID_VIA_8385_0)
+#define PCI_DEVICE_ID_VIA_8385_0 0x3188
+#endif
+
+#if !defined(PCI_DEVICE_ID_AMD_8131_BRIDGE)
+#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
+#endif
+
+#if !defined(PCI_DEVICE_ID_SERVERWORKS_EPB)
+#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
+#endif
+
+#if !defined(PCI_VENDOR_ID_ARIMA)
+#define PCI_VENDOR_ID_ARIMA 0x161f
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#if !defined(PCI_VPD_ADDR)
+#define PCI_VPD_ADDR 2
+#define PCI_VPD_DATA 4
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#include "tg3_compat.h"
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL)
+#define DMA_32BIT_MASK ((u64) 0x00000000ffffffffULL)
+#endif
+
+#ifndef DMA_40BIT_MASK
+#define DMA_40BIT_MASK ((u64) 0x000000ffffffffffULL)
+#endif
+
+#ifndef mmiowb
+#define mmiowb()
+#endif
+
+#ifndef PCI_D0
+typedef u32 pm_message_t;
+typedef u32 pci_power_t;
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef IRQ_RETVAL
+typedef void irqreturn_t;
+#define IRQ_RETVAL(x)
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef IRQF_SAMPLE_RANDOM
+#define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020604)
+#define MODULE_VERSION(version)
+#endif
+
+#if (LINUX_VERSION_CODE <= 0x020600)
+#define schedule_work(x) schedule_task(x)
+#define work_struct tq_struct
+#define INIT_WORK(x, y, z) INIT_TQUEUE(x, y, z)
+#endif
+
+#ifndef ADVERTISE_PAUSE
+#define ADVERTISE_PAUSE_CAP 0x0400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+#define ADVERTISE_PAUSE_ASYM 0x0800
+#endif
+#ifndef LPA_PAUSE
+#define LPA_PAUSE_CAP 0x0400
+#endif
+#ifndef LPA_PAUSE_ASYM
+#define LPA_PAUSE_ASYM 0x0800
+#endif
+#ifndef MII_CTRL1000
+#define MII_CTRL1000 0x9
+#endif
+#ifndef MII_STAT1000
+#define MII_STAT1000 0xa
+#endif
+#ifndef BMCR_SPEED1000
+#define BMCR_SPEED1000 0x40
+#endif
+#ifndef ADVERTISE_1000FULL
+#define ADVERTISE_1000FULL 0x0200
+#define ADVERTISE_1000HALF 0x0100
+#endif
+#ifndef ADVERTISE_1000XFULL
+#define ADVERTISE_1000XFULL 0x20
+#define ADVERTISE_1000XHALF 0x40
+#define ADVERTISE_1000XPAUSE 0x80
+#define ADVERTISE_1000XPSE_ASYM 0x100
+#define LPA_1000XFULL 0x20
+#define LPA_1000XHALF 0x40
+#define LPA_1000XPAUSE 0x80
+#define LPA_1000XPAUSE_ASYM 0x100
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x020618)
+#define TG3_NAPI
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020420)
+#define ETH_SS_TEST 0
+#define ETH_SS_STATS 1
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020605)
+#define pci_dma_sync_single_for_cpu(pdev, map, len, dir) \
+ pci_dma_sync_single(pdev, map, len, dir)
+
+#define pci_dma_sync_single_for_device(pdev, map, len, dir)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020600)
+#define pci_get_device(x, y, z) pci_find_device(x, y, z)
+#define pci_get_slot(x, y) pci_find_slot((x)->number, y)
+#define pci_dev_put(x)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020547)
+#define pci_set_consistent_dma_mask(pdev, mask) (0)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x020612)
+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ struct sk_buff *skb = dev_alloc_skb(length);
+ if (skb)
+ skb->dev = dev;
+ return skb;
+}
+#endif
+
+#ifndef NETIF_F_GSO
+static inline void netif_tx_lock(struct net_device *dev)
+{
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+}
+#endif
+
+#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605)
+static inline void *netdev_priv(struct net_device *dev)
+{
+ return dev->priv;
+}
+#endif
+
+#ifdef OLD_NETIF
+static inline void netif_poll_disable(struct net_device *dev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+ /* No hurry. */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+
+static inline void netif_poll_enable(struct net_device *dev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+static inline void netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+
+#endif
+
+#if (LINUX_VERSION_CODE < 0x2060c)
+static inline int skb_header_cloned(struct sk_buff *skb) { return 0; }
+#endif
+
+#ifndef VLAN_GROUP_ARRAY_SPLIT_PARTS
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+ struct net_device *dev)
+{
+ if (vg)
+ vg->vlan_devices[vlan_id] = dev;
+}
+#endif
+#if (LINUX_VERSION_CODE < 0x2060e)
+static inline void tg3_enable_intx(struct pci_dev *pdev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if (pci_command & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(pdev, PCI_COMMAND,
+ pci_command & ~PCI_COMMAND_INTX_DISABLE);
+}
+#endif
+
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif
+
+#ifndef NETIF_F_LLTX
+#define NETIF_F_LLTX 0
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
#define TG3_64BIT_REG_HIGH 0x00UL
#define TG3_64BIT_REG_LOW 0x04UL
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
+#define RX_COPY_THRESHOLD 256
#define TG3_RX_INTERNAL_RING_SZ_5906 32
#define TG3PCI_IRQ_PIN 0x0000003d
#define TG3PCI_MIN_GNT 0x0000003e
#define TG3PCI_MAX_LAT 0x0000003f
+#ifndef PCI_X_CMD_READ_2K
+#define PCI_X_CMD_READ_2K 0x0008
+#endif
/* 0x40 --> 0x64 unused */
#define TG3PCI_MSI_DATA 0x00000064
/* 0x66 --> 0x68 unused */
#define CHIPREV_ID_5752_A1 0x6001
#define CHIPREV_ID_5714_A2 0x9002
#define CHIPREV_ID_5906_A1 0xc001
-#define CHIPREV_ID_5784_A0 0x5784000
-#define CHIPREV_ID_5784_A1 0x5784001
-#define CHIPREV_ID_5761_A0 0x5761000
-#define CHIPREV_ID_5761_A1 0x5761001
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
#define CHIPREV_5704_BX 0x21
#define CHIPREV_5750_AX 0x40
#define CHIPREV_5750_BX 0x41
+#define CHIPREV_5784_AX 0x57840
+#define CHIPREV_5761_AX 0x57610
#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
#define METAL_REV_A0 0x00
#define METAL_REV_A1 0x01
#define MAC_MODE_TDE_ENABLE 0x00200000
#define MAC_MODE_RDE_ENABLE 0x00400000
#define MAC_MODE_FHDE_ENABLE 0x00800000
+#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000
+#define MAC_MODE_APE_RX_EN 0x08000000
+#define MAC_MODE_APE_TX_EN 0x10000000
#define MAC_STATUS 0x00000404
#define MAC_STATUS_PCS_SYNCED 0x00000001
#define MAC_STATUS_SIGNAL_DET 0x00000002
#define MAC_MI_MODE_CLK_10MHZ 0x00000001
#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002
#define MAC_MI_MODE_AUTO_POLL 0x00000010
-#define MAC_MI_MODE_CORE_CLK_62MHZ 0x00008000
+#define MAC_MI_MODE_500KHZ_CONST 0x00008000
#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */
#define MAC_AUTO_POLL_STATUS 0x00000458
#define MAC_AUTO_POLL_ERROR 0x00000001
#define SG_DIG_FIBER_MODE 0x00008000
#define SG_DIG_REMOTE_FAULT_MASK 0x00006000
#define SG_DIG_PAUSE_MASK 0x00001800
+#define SG_DIG_PAUSE_CAP 0x00000800
+#define SG_DIG_ASYM_PAUSE 0x00001000
#define SG_DIG_GBIC_ENABLE 0x00000400
#define SG_DIG_CHECK_END_ENABLE 0x00000200
#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100
#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004
#define SG_DIG_REMOTE_LOOPBACK 0x00000002
#define SG_DIG_LOOPBACK 0x00000001
+#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \
+ SG_DIG_LOCAL_DUPLEX_STATUS | \
+ SG_DIG_LOCAL_LINK_STATUS | \
+ (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \
+ SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE)
#define SG_DIG_STATUS 0x000005b4
#define SG_DIG_CRC16_BUS_MASK 0xffff0000
#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */
#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200
#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400
#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000
+#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000
#define TG3_CPMU_LSPD_10MB_CLK 0x00003604
#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000
#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000
#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c
#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000
+#define CPMU_LSPD_1000MB_MACCLK_30_0 0x00030000
#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000
#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000
#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610
#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
#define WDMAC_MODE_RX_ACCEL 0x00000400
+#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
#define WDMAC_STATUS 0x00004c04
#define WDMAC_STATUS_TGTABORT 0x00000004
#define WDMAC_STATUS_MSTABORT 0x00000008
#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000
#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000
#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
+#define GRC_MISC_CFG_BOARD_ID_5754 0x00008000
+#define GRC_MISC_CFG_BOARD_ID_5754M 0x0000c000
#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000
#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000
#define GRC_LOCAL_CTRL 0x00006808
#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000
#define GRC_TIMER 0x0000680c
#define GRC_RX_CPU_EVENT 0x00006810
+#define GRC_RX_CPU_DRIVER_EVENT 0x00004000
#define GRC_RX_TIMER_REF 0x00006814
#define GRC_RX_CPU_SEM 0x00006818
#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c
/* 0x702c unused */
#define NVRAM_ADDR_LOCKOUT 0x00007030
-/* 0x7034 --> 0x7c00 unused */
+/* 0x7034 --> 0x7500 unused */
+
+#define OTP_MODE 0x00007500
+#define OTP_MODE_OTP_THRU_GRC 0x00000001
+#define OTP_CTRL 0x00007504
+#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000
+#define OTP_CTRL_OTP_CMD_READ 0x00000000
+#define OTP_CTRL_OTP_CMD_INIT 0x00000008
+#define OTP_CTRL_OTP_CMD_START 0x00000001
+#define OTP_STATUS 0x00007508
+#define OTP_STATUS_CMD_DONE 0x00000001
+#define OTP_ADDRESS 0x0000750c
+#define OTP_ADDRESS_MAGIC1 0x000000a0
+#define OTP_ADDRESS_MAGIC2 0x00000080
+/* 0x7510 unused */
+
+#define OTP_READ_DATA 0x00007514
+/* 0x7518 --> 0x7c04 unused */
#define PCIE_TRANSACTION_CFG 0x00007c04
#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000
#define PCIE_PWR_MGMT_THRESH 0x00007d28
#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00
+
+/* OTP bit definitions */
+#define TG3_OTP_AGCTGT_MASK 0x000000e0
+#define TG3_OTP_AGCTGT_SHIFT 1
+#define TG3_OTP_HPFFLTR_MASK 0x00000300
+#define TG3_OTP_HPFFLTR_SHIFT 1
+#define TG3_OTP_HPFOVER_MASK 0x00000400
+#define TG3_OTP_HPFOVER_SHIFT 1
+#define TG3_OTP_LPFDIS_MASK 0x00000800
+#define TG3_OTP_LPFDIS_SHIFT 11
+#define TG3_OTP_VDAC_MASK 0xff000000
+#define TG3_OTP_VDAC_SHIFT 24
+#define TG3_OTP_10BTAMP_MASK 0x0000f000
+#define TG3_OTP_10BTAMP_SHIFT 8
+#define TG3_OTP_ROFF_MASK 0x00e00000
+#define TG3_OTP_ROFF_SHIFT 11
+#define TG3_OTP_RCOFF_MASK 0x001c0000
+#define TG3_OTP_RCOFF_SHIFT 16
+
+#define TG3_OTP_DEFAULT 0x286c1640
+
+
#define TG3_EEPROM_MAGIC 0x669955aa
#define TG3_EEPROM_MAGIC_FW 0xa5000000
#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000
#define TG3_NVM_DIRTYPE_SHIFT 24
#define TG3_NVM_DIRTYPE_ASFINI 1
+#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10
+#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
+#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
+#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
+#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
+#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
+#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
+#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800
+#define TG3_EEPROM_SB_EDH_BLD_SHFT 11
+
+
/* 32K Window into NIC internal memory */
#define NIC_SRAM_WIN_BASE 0x00008000
#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
#define FWCMD_NICDRV_FIX_DMAR 0x00000005
#define FWCMD_NICDRV_FIX_DMAW 0x00000006
+#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c
#define FWCMD_NICDRV_ALIVE2 0x0000000d
#define FWCMD_NICDRV_ALIVE3 0x0000000e
#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
#define NIC_SRAM_DATA_CFG_2 0x00000d38
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
#define SHASTA_EXT_LED_MODE_MASK 0x00018000
#define SHASTA_EXT_LED_LEGACY 0x00000000
#define SHASTA_EXT_LED_SHARED 0x00008000
#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */
-#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */
#define MII_TG3_EPHY_PTEST 0x17 /* 5906 PHY register */
+#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */
+
+#define MII_TG3_DSP_TAP1 0x0001
+#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_AADJ1CH0 0x001f
+#define MII_TG3_DSP_AADJ1CH3 0x601f
+#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_TG3_DSP_EXP8 0x0708
+#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
+#define MII_TG3_DSP_EXP8_AEDW 0x0200
+#define MII_TG3_DSP_EXP75 0x0f75
+#define MII_TG3_DSP_EXP96 0x0f96
+#define MII_TG3_DSP_EXP97 0x0f97
#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
#define MII_TG3_AUXCTL_MISC_WREN 0x8000
#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+
+#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
#define MII_TG3_AUX_STAT_LPASS 0x0004
#define MII_TG3_ISTAT 0x1a /* IRQ status register */
#define MII_TG3_IMASK 0x1b /* IRQ mask register */
-#define MII_TG3_MISC_SHDW 0x1c
-#define MII_TG3_MISC_SHDW_WREN 0x8000
-#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
-
-#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
-
/* ISTAT/IMASK event bits */
#define MII_TG3_INT_LINKCHG 0x0002
#define MII_TG3_INT_SPEEDCHG 0x0004
#define MII_TG3_INT_DUPLEXCHG 0x0008
#define MII_TG3_INT_ANEG_PAGE_RX 0x0400
+#define MII_TG3_MISC_SHDW 0x1c
+#define MII_TG3_MISC_SHDW_WREN 0x8000
+#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400
+#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
+
+#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001
+#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002
+#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004
+#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008
+#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010
+
+#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
+#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020
+
#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
#define MII_TG3_EPHY_SHADOW_EN 0x80
#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
/* APE convenience enumerations. */
+#define TG3_APE_LOCK_GRC 1
#define TG3_APE_LOCK_MEM 4
-#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
/* There are two ways to manage the TX descriptors on the tigon3.
#define TG3_HW_STATUS_SIZE 0x50
struct tg3_hw_status {
- u32 status;
+ volatile u32 status;
#define SD_STATUS_UPDATED 0x00000001
#define SD_STATUS_LINK_CHG 0x00000002
#define SD_STATUS_ERROR 0x00000004
- u32 status_tag;
+ volatile u32 status_tag;
#ifdef __BIG_ENDIAN
- u16 rx_consumer;
- u16 rx_jumbo_consumer;
+ volatile u16 rx_consumer;
+ volatile u16 rx_jumbo_consumer;
#else
- u16 rx_jumbo_consumer;
- u16 rx_consumer;
+ volatile u16 rx_jumbo_consumer;
+ volatile u16 rx_consumer;
#endif
#ifdef __BIG_ENDIAN
- u16 reserved;
- u16 rx_mini_consumer;
+ volatile u16 reserved;
+ volatile u16 rx_mini_consumer;
#else
- u16 rx_mini_consumer;
- u16 reserved;
+ volatile u16 rx_mini_consumer;
+ volatile u16 reserved;
#endif
struct {
#ifdef __BIG_ENDIAN
- u16 tx_consumer;
- u16 rx_producer;
+ volatile u16 tx_consumer;
+ volatile u16 rx_producer;
#else
- u16 rx_producer;
- u16 tx_consumer;
+ volatile u16 rx_producer;
+ volatile u16 tx_consumer;
#endif
} idx[16];
};
u16 speed;
u8 duplex;
u8 autoneg;
+ u8 flowctrl;
+#define TG3_FLOW_CTRL_TX 0x01
+#define TG3_FLOW_CTRL_RX 0x02
/* Describes what we actually have. */
- u16 active_speed;
+ u8 active_flowctrl;
+
u8 active_duplex;
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
+ u16 active_speed;
/* When we go in and out of low power mode we need
* to swap with this state.
dma_addr_t tx_desc_mapping;
/* begin "rx thread" cacheline section */
+#ifdef TG3_NAPI
+ struct napi_struct napi;
+#endif
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
u32 rx_rcb_ptr;
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
+ union {
unsigned long phy_crc_errors;
+ unsigned long last_event_jiffies;
+ };
- u32 rx_offset;
- /* RHEL5 only */
- u32 tg3_dist_flags;
+ u16 rx_offset;
+ u16 rx_copy_thresh;
u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000
#define TG3_FLAG_NVRAM 0x00002000
#define TG3_FLAG_NVRAM_BUFFERED 0x00004000
-#define TG3_FLAG_RX_PAUSE 0x00008000
-#define TG3_FLAG_TX_PAUSE 0x00010000
#define TG3_FLAG_PCIX_MODE 0x00020000
#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
#define TG3_FLAG_PCI_32BIT 0x00080000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
#define TG3_FLAG_CPMU_PRESENT 0x04000000
-#define TG3_FLAG_IN_RESET_TASK 0x04000000
#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_SUPPORT_MSI 0x20000000
u32 tg3_flags3;
#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
#define TG3_FLG3_ENABLE_APE 0x00000002
-#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
+#define TG3_FLG3_PHY_ENABLE_APD 0x00000004
+#define TG3_FLG3_5701_DMA_BUG 0x00000008
+#define TG3_FLG3_CLKREQ_BUG 0x00000010
struct timer_list timer;
u16 timer_counter;
u8 pci_lat_timer;
u8 pci_hdr_type;
u8 pci_bist;
+#if (LINUX_VERSION_CODE < 0x2060a)
+ u32 pci_cfg_state[64 / sizeof(u32)];
+#endif
int pm_cap;
int msi_cap;
+ union {
int pcix_cap;
+ int pcie_cap;
+ };
/* PHY info */
u32 phy_id;
#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
u32 led_ctrl;
+ u32 phy_otp;
u16 pci_cmd;
char board_part_number[24];
int nvram_lock_cnt;
u32 nvram_size;
+#define TG3_NVRAM_SIZE_64KB 0x00010000
+#define TG3_NVRAM_SIZE_128KB 0x00020000
+#define TG3_NVRAM_SIZE_256KB 0x00040000
+#define TG3_NVRAM_SIZE_512KB 0x00080000
+#define TG3_NVRAM_SIZE_1MB 0x00100000
+#define TG3_NVRAM_SIZE_2MB 0x00200000
+
u32 nvram_pagesize;
u32 nvram_jedecnum;
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
-#define ATMEL_AT24C64_CHIP_SIZE (64 * 1024)
+#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
#define ATMEL_AT24C64_PAGE_SIZE (32)
-#define ATMEL_AT24C512_CHIP_SIZE (512 * 1024)
+#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB
#define ATMEL_AT24C512_PAGE_SIZE (128)
#define ATMEL_AT45DB0X1B_PAGE_POS 9