obj-$(CONFIG_IXGB) += ixgb.o
-ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
+CFILES = ixgb_main.c ixgb_hw.c ixgb_ee.c ixgb_param.c \
+ ixgb_ethtool.c kcompat.c
+
+ixgb-objs := $(CFILES:.c=.o)
+
+EXTRA_CFLAGS += -DDRIVER_IXGB
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/capability.h>
#include <net/pkt_sched.h>
#include <linux/list.h>
#include <linux/reboot.h>
+#ifdef NETIF_F_TSO
#include <net/checksum.h>
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
#include <linux/if_vlan.h>
+#endif
#define BAR_0 0
#define BAR_1 1
#define BAR_5 5
+#include "kcompat.h"
+
struct ixgb_adapter;
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
#include "ixgb_ids.h"
/* TX/RX descriptor defines */
-#define DEFAULT_TXD 256
-#define MAX_TXD 4096
-#define MIN_TXD 64
+#define DEFAULT_TXD 256
+#define MAX_TXD 4096
+#define MIN_TXD 64
/* hardware cannot reliably support more than 512 descriptors owned by
- * hardware descrioptor cache otherwise an unreliable ring under heavy
+ * hardware descrioptor cache otherwise an unreliable ring under heavy
* recieve load may result */
-/* #define DEFAULT_RXD 1024 */
-/* #define MAX_RXD 4096 */
-#define DEFAULT_RXD 512
-#define MAX_RXD 512
-#define MIN_RXD 64
+
+/* #define DEFAULT_RXD 1024 */
+
+/* #define MAX_RXD 4096 */
+#define DEFAULT_RXD 512
+#define MAX_RXD 512
+#define MIN_RXD 64
/* Supported Rx Buffer Sizes */
#define IXGB_RXBUFFER_2048 2048
struct ixgb_adapter {
struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
struct vlan_group *vlgrp;
+#endif
uint32_t bd_number;
uint32_t rx_buffer_len;
uint32_t part_num;
uint16_t link_duplex;
spinlock_t tx_lock;
atomic_t irq_sem;
- struct work_struct tx_timeout_task;
+ struct work_struct reset_task;
+#ifdef ETHTOOL_PHYS_ID
struct timer_list blink_timer;
unsigned long led_status;
+#endif
/* TX */
struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
uint64_t hw_csum_tx_error;
uint32_t tx_int_delay;
uint32_t tx_timeout_count;
- boolean_t tx_int_delay_enable;
- boolean_t detect_tx_hung;
+ bool tx_int_delay_enable;
+ bool detect_tx_hung;
/* RX */
struct ixgb_desc_ring rx_ring;
+#ifdef CONFIG_IXGB_NAPI
+ struct napi_struct napi;
+#endif
uint64_t hw_csum_rx_error;
uint64_t hw_csum_rx_good;
uint32_t rx_int_delay;
- boolean_t rx_csum;
+ bool rx_csum;
/* OS defined structs */
struct net_device *netdev;
u16 msg_enable;
struct ixgb_hw_stats stats;
uint32_t alloc_rx_buff_failed;
- boolean_t have_msi;
+ bool have_msi;
+ unsigned long flags;
+};
+
+enum ixgb_state_t {
+ /* TBD
+ __IXGB_TESTING,
+ __IXGB_RESETTING,
+ */
+ __IXGB_DOWN
};
-/* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
+extern const char ixgb_driver_string[];
extern const char ixgb_driver_version[];
+extern int ixgb_up(struct ixgb_adapter *);
+extern void ixgb_down(struct ixgb_adapter *, bool);
+extern void ixgb_reset(struct ixgb_adapter *);
+extern int ixgb_setup_rx_resources(struct ixgb_adapter *);
+extern int ixgb_setup_tx_resources(struct ixgb_adapter *);
+extern void ixgb_free_rx_resources(struct ixgb_adapter *);
+extern void ixgb_free_tx_resources(struct ixgb_adapter *);
+extern void ixgb_update_stats(struct ixgb_adapter *);
+
+extern void ixgb_check_options(struct ixgb_adapter *);
+extern int ethtool_ioctl(struct ifreq *);
+extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+
+
#endif /* _IXGB_H_ */
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*******************************************************************************/
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
+
/* Local prototypes */
static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
-static void ixgb_shift_out_bits(struct ixgb_hw *hw,
- uint16_t data,
- uint16_t count);
+static void ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data,
+ uint16_t count);
static void ixgb_standby_eeprom(struct ixgb_hw *hw);
static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
* eecd_reg - EECD's current value
*****************************************************************************/
static void
-ixgb_raise_clock(struct ixgb_hw *hw,
- uint32_t *eecd_reg)
+ixgb_raise_clock(struct ixgb_hw *hw, uint32_t *eecd_reg)
{
- /* Raise the clock input to the EEPROM (by setting the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg | IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- udelay(50);
- return;
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and
+ * then wait 50 microseconds. */
+ *eecd_reg = *eecd_reg | IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ usec_delay(50);
+ return;
}
/******************************************************************************
* eecd_reg - EECD's current value
*****************************************************************************/
static void
-ixgb_lower_clock(struct ixgb_hw *hw,
- uint32_t *eecd_reg)
+ixgb_lower_clock(struct ixgb_hw *hw, uint32_t *eecd_reg)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- udelay(50);
- return;
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds. */
+ *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ usec_delay(50);
+ return;
}
/******************************************************************************
* count - number of bits to shift out
*****************************************************************************/
static void
-ixgb_shift_out_bits(struct ixgb_hw *hw,
- uint16_t data,
- uint16_t count)
+ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data, uint16_t count)
{
- uint32_t eecd_reg;
- uint32_t mask;
-
- /* We need to shift "count" bits out to the EEPROM. So, value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- */
- mask = 0x01 << (count - 1);
- eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
- */
- eecd_reg &= ~IXGB_EECD_DI;
-
- if(data & mask)
- eecd_reg |= IXGB_EECD_DI;
-
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-
- udelay(50);
-
- ixgb_raise_clock(hw, &eecd_reg);
- ixgb_lower_clock(hw, &eecd_reg);
-
- mask = mask >> 1;
-
- } while(mask);
-
- /* We leave the "DI" bit set to "0" when we leave this routine. */
- eecd_reg &= ~IXGB_EECD_DI;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
+ uint32_t eecd_reg;
+ uint32_t mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a
+ * time. In order to do this, "data" must be broken down into bits. */
+ mask = 0x01 << (count - 1);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock. */
+ eecd_reg &= ~IXGB_EECD_DI;
+
+ if(data & mask)
+ eecd_reg |= IXGB_EECD_DI;
+
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ usec_delay(50);
+
+ ixgb_raise_clock(hw, &eecd_reg);
+ ixgb_lower_clock(hw, &eecd_reg);
+
+ mask = mask >> 1;
+
+ } while(mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd_reg &= ~IXGB_EECD_DI;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ return;
}
/******************************************************************************
static uint16_t
ixgb_shift_in_bits(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
- uint32_t i;
- uint16_t data;
+ uint32_t eecd_reg;
+ uint32_t i;
+ uint16_t data;
- /* In order to read a register from the EEPROM, we need to shift 16 bits
- * in from the EEPROM. Bits are "shifted in" by raising the clock input to
- * the EEPROM (setting the SK bit), and then reading the value of the "DO"
- * bit. During this "shifting in" process the "DI" bit should always be
- * clear..
- */
+ /* In order to read a register from the EEPROM, we need to shift 16
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.. */
- eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- data = 0;
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ data = 0;
- for(i = 0; i < 16; i++) {
- data = data << 1;
- ixgb_raise_clock(hw, &eecd_reg);
+ for(i = 0; i < 16; i++) {
+ data = data << 1;
+ ixgb_raise_clock(hw, &eecd_reg);
- eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_DI);
- if(eecd_reg & IXGB_EECD_DO)
- data |= 1;
+ eecd_reg &= ~(IXGB_EECD_DI);
+ if(eecd_reg & IXGB_EECD_DO)
+ data |= 1;
- ixgb_lower_clock(hw, &eecd_reg);
- }
+ ixgb_lower_clock(hw, &eecd_reg);
+ }
- return data;
+ return data;
}
/******************************************************************************
static void
ixgb_setup_eeprom(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
+ uint32_t eecd_reg;
- eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
- /* Clear SK and DI */
- eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ /* Clear SK and DI */
+ eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
- /* Set CS */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
+ /* Set CS */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ return;
}
/******************************************************************************
static void
ixgb_standby_eeprom(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Deselct EEPROM */
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
-
- /* Clock high */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
-
- /* Select EEPROM */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
-
- /* Clock low */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
- return;
+ uint32_t eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Deselct EEPROM */
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
+
+ /* Clock high */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
+
+ /* Select EEPROM */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
+
+ /* Clock low */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
+ return;
}
/******************************************************************************
static void
ixgb_clock_eeprom(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
+ uint32_t eecd_reg;
- eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
- /* Rising edge of clock */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
+ /* Rising edge of clock */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
- /* Falling edge of clock */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- udelay(50);
- return;
+ /* Falling edge of clock */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ usec_delay(50);
+ return;
}
/******************************************************************************
static void
ixgb_cleanup_eeprom(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
+ uint32_t eecd_reg;
- eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
- ixgb_clock_eeprom(hw);
- return;
+ ixgb_clock_eeprom(hw);
+ return;
}
/******************************************************************************
static boolean_t
ixgb_wait_eeprom_command(struct ixgb_hw *hw)
{
- uint32_t eecd_reg;
- uint32_t i;
-
- /* Toggle the CS line. This in effect tells to EEPROM to actually execute
- * the command in question.
- */
- ixgb_standby_eeprom(hw);
-
- /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
- */
- for(i = 0; i < 200; i++) {
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- if(eecd_reg & IXGB_EECD_DO)
- return (TRUE);
-
- udelay(50);
- }
- ASSERT(0);
- return (FALSE);
+ uint32_t eecd_reg;
+ uint32_t i;
+
+ /* Toggle the CS line. This in effect tells to EEPROM to actually
+ * execute the command in question. */
+ ixgb_standby_eeprom(hw);
+
+ /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM
+ * will signal that the command has been completed by raising the DO
+ * signal. If DO does not go high in 10 milliseconds, then error out. */
+ for(i = 0; i < 200; i++) {
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ if(eecd_reg & IXGB_EECD_DO)
+ return (TRUE);
+
+ usec_delay(50);
+ }
+ ASSERT(0);
+ return (FALSE);
}
/******************************************************************************
* hw - Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*
* Returns:
boolean_t
ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
{
- uint16_t checksum = 0;
- uint16_t i;
+ uint16_t checksum = 0;
+ uint16_t i;
- for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
- checksum += ixgb_read_eeprom(hw, i);
+ for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
+ checksum += (uint16_t)ixgb_read_eeprom(hw, i);
- if(checksum == (uint16_t) EEPROM_SUM)
- return (TRUE);
- else
- return (FALSE);
+ if(checksum == (uint16_t)EEPROM_SUM)
+ return (TRUE);
+ else
+ return (FALSE);
}
/******************************************************************************
void
ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
{
- uint16_t checksum = 0;
- uint16_t i;
+ uint16_t checksum = 0;
+ uint16_t i;
- for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
- checksum += ixgb_read_eeprom(hw, i);
+ for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
+ checksum += ixgb_read_eeprom(hw, i);
- checksum = (uint16_t) EEPROM_SUM - checksum;
+ checksum = (uint16_t)EEPROM_SUM - checksum;
- ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
- return;
+ ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
+ return;
}
/******************************************************************************
void
ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- /* Prepare the EEPROM for writing */
- ixgb_setup_eeprom(hw);
+ /* Prepare the EEPROM for writing */
+ ixgb_setup_eeprom(hw);
- /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
- * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
+ /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit
+ * opcode plus 4-bit dummy). This puts the EEPROM into write/erase
+ * mode. */
+ ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
- /* Prepare the EEPROM */
- ixgb_standby_eeprom(hw);
+ /* Prepare the EEPROM */
+ ixgb_standby_eeprom(hw);
- /* Send the Write command (3-bit opcode + 6-bit addr) */
- ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
- ixgb_shift_out_bits(hw, offset, 6);
+ /* Send the Write command (3-bit opcode + 6-bit addr) */
+ ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
+ ixgb_shift_out_bits(hw, offset, 6);
- /* Send the data */
- ixgb_shift_out_bits(hw, data, 16);
+ /* Send the data */
+ ixgb_shift_out_bits(hw, data, 16);
- ixgb_wait_eeprom_command(hw);
+ ixgb_wait_eeprom_command(hw);
- /* Recover from write */
- ixgb_standby_eeprom(hw);
+ /* Recover from write */
+ ixgb_standby_eeprom(hw);
- /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
- * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
- * mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
+ /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
+ * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
+ * mode. */
+ ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
- /* Done with writing */
- ixgb_cleanup_eeprom(hw);
+ /* Done with writing */
+ ixgb_cleanup_eeprom(hw);
- /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
+ /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
+ ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
- return;
+ return;
}
/******************************************************************************
* The 16-bit value read from the eeprom
*****************************************************************************/
uint16_t
-ixgb_read_eeprom(struct ixgb_hw *hw,
- uint16_t offset)
+ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t offset)
{
- uint16_t data;
+ uint16_t data;
- /* Prepare the EEPROM for reading */
- ixgb_setup_eeprom(hw);
+ /* Prepare the EEPROM for reading */
+ ixgb_setup_eeprom(hw);
- /* Send the READ command (opcode + addr) */
- ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
- /*
- * We have a 64 word EEPROM, there are 6 address bits
- */
- ixgb_shift_out_bits(hw, offset, 6);
+ /* Send the READ command (opcode + addr) */
+ ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
+ /*
+ * We have a 64 word EEPROM, there are 6 address bits
+ */
+ ixgb_shift_out_bits(hw, offset, 6);
- /* Read the data */
- data = ixgb_shift_in_bits(hw);
+ /* Read the data */
+ data = ixgb_shift_in_bits(hw);
- /* End this read operation */
- ixgb_standby_eeprom(hw);
+ /* End this read operation */
+ ixgb_standby_eeprom(hw);
- return (data);
+ return (data);
}
/******************************************************************************
boolean_t
ixgb_get_eeprom_data(struct ixgb_hw *hw)
{
- uint16_t i;
+ uint16_t i;
uint16_t checksum = 0;
- struct ixgb_ee_map_type *ee_map;
+ struct ixgb_ee_map_type *ee_map;
- DEBUGFUNC("ixgb_get_eeprom_data");
+ DEBUGFUNC("ixgb_get_eeprom_data");
- ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+ ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGOUT("ixgb_ee: Reading eeprom data\n");
- for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
+ DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ for(i = 0; i < IXGB_EEPROM_SIZE; i++) {
uint16_t ee_data;
- ee_data = ixgb_read_eeprom(hw, i);
- checksum += ee_data;
- hw->eeprom[i] = cpu_to_le16(ee_data);
- }
-
- if (checksum != (uint16_t) EEPROM_SUM) {
- DEBUGOUT("ixgb_ee: Checksum invalid.\n");
- /* clear the init_ctrl_reg_1 to signify that the cache is
- * invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
- return (FALSE);
- }
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- DEBUGOUT("ixgb_ee: Signature invalid.\n");
- return(FALSE);
- }
-
- return(TRUE);
+
+ ee_data = ixgb_read_eeprom(hw, i);
+ checksum += ee_data;
+ hw->eeprom[i] = le16_to_cpu(ee_data);
+ }
+
+ if(checksum != (uint16_t)EEPROM_SUM) {
+ DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ /* clear the init_ctrl_reg_1 to signify that the cache is
+ * invalidated */
+ ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
+ return (FALSE);
+ }
+
+ if((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
+ != le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
+ DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ return (FALSE);
+ }
+
+ return (TRUE);
}
/******************************************************************************
* FALSE: otherwise.
******************************************************************************/
static boolean_t
-ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
+ixgb_check_and_get_eeprom_data(struct ixgb_hw *hw)
{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- return (TRUE);
- } else {
- return ixgb_get_eeprom_data(hw);
- }
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
+ == le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
+ return (TRUE);
+ } else {
+ return ixgb_get_eeprom_data(hw);
+ }
}
/******************************************************************************
* Returns:
* Word at indexed offset in eeprom, if valid, 0 otherwise.
******************************************************************************/
-__le16
+uint16_t
ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
{
- if ((index < IXGB_EEPROM_SIZE) &&
- (ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
- return(hw->eeprom[index]);
- }
+ if((index < IXGB_EEPROM_SIZE) &&
+ (ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
+ return (hw->eeprom[index]);
+ }
- return(0);
+ return (0);
}
/******************************************************************************
* Returns: None.
******************************************************************************/
void
-ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
- uint8_t *mac_addr)
+ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr)
{
- int i;
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+ int i;
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGFUNC("ixgb_get_ee_mac_addr");
+ DEBUGFUNC("ixgb_get_ee_mac_addr");
- if (ixgb_check_and_get_eeprom_data(hw) == TRUE) {
- for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
- mac_addr[i] = ee_map->mac_addr[i];
- DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
- }
- }
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE) {
+ for(i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
+ mac_addr[i] = ee_map->mac_addr[i];
+ DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
+ }
+ }
}
uint32_t
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
- | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
+ | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG]) << 16));
- return(0);
+ return (0);
}
uint16_t
ixgb_get_ee_device_id(struct ixgb_hw *hw)
{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->device_id));
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return (le16_to_cpu(ee_map->device_id));
- return (0);
+ return (0);
}
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#ifndef _IXGB_EE_H_
#define _IXGB_EE_H_
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
#define IXGB_ETH_LENGTH_OF_ADDRESS 6
/* EEPROM Commands */
-#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */
-#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */
-#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */
-#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */
-#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */
+#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
/* EEPROM MAP (Word Offsets) */
#define EEPROM_IA_1_2_REG 0x0000
/* EEPROM structure */
struct ixgb_ee_map_type {
- uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
- __le16 compatibility;
- __le16 reserved1[4];
- __le32 pba_number;
- __le16 init_ctrl_reg_1;
- __le16 subsystem_id;
- __le16 subvendor_id;
- __le16 device_id;
- __le16 vendor_id;
- __le16 init_ctrl_reg_2;
- __le16 oem_reserved[16];
- __le16 swdpins_reg;
- __le16 circuit_ctrl_reg;
- uint8_t d3_power;
- uint8_t d0_power;
- __le16 reserved2[28];
- __le16 checksum;
+ uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
+ uint16_t compatibility;
+ uint16_t reserved1[4];
+ uint32_t pba_number;
+ uint16_t init_ctrl_reg_1;
+ uint16_t subsystem_id;
+ uint16_t subvendor_id;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t init_ctrl_reg_2;
+ uint16_t oem_reserved[16];
+ uint16_t swdpins_reg;
+ uint16_t circuit_ctrl_reg;
+ uint8_t d3_power;
+ uint8_t d0_power;
+ uint16_t reserved2[28];
+ uint16_t checksum;
};
/* EEPROM Functions */
void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data);
-#endif /* IXGB_EE_H */
+#endif /* IXGB_EE_H */
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*******************************************************************************/
+
/* ethtool support for ixgb */
#include "ixgb.h"
+#ifdef SIOCETHTOOL
#include <asm/uaccess.h>
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
-
#define IXGB_ALL_RAR_ENTRIES 16
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+
+#ifdef ETHTOOL_GSTATS
struct ixgb_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
{"tx_restart_queue", IXGB_STAT(restart_queue) },
{"rx_long_length_errors", IXGB_STAT(stats.roc)},
{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
+#ifdef NETIF_F_TSO
{"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
{"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
+#endif
{"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
{"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
{"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
{"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
};
-#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
+#define IXGB_STATS_LEN \
+ sizeof(ixgb_gstrings_stats) / sizeof(struct ixgb_stats)
+#endif /* ETHTOOL_GSTATS */
static int
ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
- if(netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev)) {
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
} else {
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- if(ecmd->autoneg == AUTONEG_ENABLE ||
+ if (ecmd->autoneg == AUTONEG_ENABLE ||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
return -EINVAL;
-
- if(netif_running(adapter->netdev)) {
+
+ if (netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_reset(adapter);
ixgb_up(adapter);
}
static void
-ixgb_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ixgb_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
-
+
pause->autoneg = AUTONEG_DISABLE;
-
- if(hw->fc.type == ixgb_fc_rx_pause)
+
+ if (hw->fc.type == ixgb_fc_rx_pause)
pause->rx_pause = 1;
- else if(hw->fc.type == ixgb_fc_tx_pause)
+ else if (hw->fc.type == ixgb_fc_tx_pause)
pause->tx_pause = 1;
- else if(hw->fc.type == ixgb_fc_full) {
+ else if (hw->fc.type == ixgb_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
static int
-ixgb_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ixgb_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
-
- if(pause->autoneg == AUTONEG_ENABLE)
+
+ if (pause->autoneg == AUTONEG_ENABLE)
return -EINVAL;
- if(pause->rx_pause && pause->tx_pause)
+ if (pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_full;
- else if(pause->rx_pause && !pause->tx_pause)
+ else if (pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_rx_pause;
- else if(!pause->rx_pause && pause->tx_pause)
+ else if (!pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_tx_pause;
- else if(!pause->rx_pause && !pause->tx_pause)
+ else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_none;
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
-
+
return 0;
}
adapter->rx_csum = data;
- if(netif_running(netdev)) {
- ixgb_down(adapter,TRUE);
+ if (netif_running(netdev)) {
+ ixgb_down(adapter, TRUE);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
return 0;
}
-
+
static uint32_t
ixgb_get_tx_csum(struct net_device *netdev)
{
static int
ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
{
+
if (data)
netdev->features |= NETIF_F_HW_CSUM;
else
return 0;
}
+#ifdef NETIF_F_TSO
static int
ixgb_set_tso(struct net_device *netdev, uint32_t data)
{
- if(data)
+
+ if (data)
netdev->features |= NETIF_F_TSO;
else
netdev->features &= ~NETIF_F_TSO;
return 0;
-}
+}
+#endif /* NETIF_F_TSO */
static uint32_t
ixgb_get_msglevel(struct net_device *netdev)
}
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
-static int
+static int
ixgb_get_regs_len(struct net_device *netdev)
{
#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t)
}
static void
-ixgb_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ixgb_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
- *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
- *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
- *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
- *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
- *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
+ *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
+ *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
+ *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
+ *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
+ *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
/* Interrupt */
- *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
- *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
- *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
- *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
+ *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
+ *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
+ *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
+ *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
/* Receive */
- *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
- *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
- *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
- *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
- *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
- *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
- *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
- *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
- *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
- *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
- *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
- *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
+ *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
+ *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
+ *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
+ *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
+ *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
+ *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
+ *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
+ *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
+ *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
+ *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
+ *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
+ *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
/* there are 16 RAR entries in hardware, we only use 3 */
- for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
+ for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
/* Transmit */
- *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
- *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
- *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
- *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
- *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
- *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
- *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
- *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
- *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
- *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
+ *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
+ *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
+ *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
+ *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
+ *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
+ *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
+ *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
+ *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
+ *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
+ *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
/* Physical */
- *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
- *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
- *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
- *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
- *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
- *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
- *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
- *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
- *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
- *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
- *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
- *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
- *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
+ *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
+ *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
+ *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
+ *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
+ *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
+ *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
+ *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
+ *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
+ *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
+ *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
+ *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
+ *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
+ *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
+
/* Statistics */
- *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
- *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
- *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
- *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
- *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
- *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
- *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
- *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
- *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
- *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
- *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
- *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
- *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
- *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
- *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
- *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
- *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
- *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
- *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
- *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
- *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
- *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
- *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
- *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
- *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
- *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
- *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
- *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
- *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
- *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
- *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
- *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
- *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
- *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
- *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
- *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
- *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
- *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
- *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
- *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
- *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
- *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
- *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
- *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
- *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
- *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
- *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
- *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
- *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
- *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
- *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
- *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
- *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
- *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
- *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
- *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
- *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
- *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
- *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
- *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
+ *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
+ *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
+ *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
+ *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
+ *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
+ *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
+ *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
+ *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
+ *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
+ *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
+ *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
+ *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
+ *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
+ *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
+ *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
+ *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
+ *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
+ *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
+ *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
+ *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
+ *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
+ *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
+ *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
+ *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
+ *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
+ *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
+ *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
+ *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
+ *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
+ *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
+ *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
+ *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
+ *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
+ *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
+ *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
+ *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
+ *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
+ *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
+ *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
+ *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
+ *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
+ *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
+ *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
+ *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
+ *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
+ *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
+ *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
+ *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
+ *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
+ *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
+ *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
+ *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
+ *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
+ *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
+ *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
+ *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
+ *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
+ *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
regs->len = (reg - reg_start) * sizeof(uint32_t);
}
}
static int
-ixgb_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, uint8_t *bytes)
+ixgb_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
- __le16 *eeprom_buff;
+ uint16_t *eeprom_buff;
int i, max_len, first_word, last_word;
int ret_val = 0;
- if(eeprom->len == 0) {
+ if (eeprom->len == 0) {
ret_val = -EINVAL;
goto geeprom_error;
}
max_len = ixgb_get_eeprom_len(netdev);
- if(eeprom->offset > eeprom->offset + eeprom->len) {
+ if (eeprom->offset > eeprom->offset + eeprom->len) {
ret_val = -EINVAL;
goto geeprom_error;
}
- if((eeprom->offset + eeprom->len) > max_len)
+ if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(__le16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if(!eeprom_buff)
+ eeprom_buff =
+ kmalloc(sizeof(uint16_t) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
return -ENOMEM;
/* note the eeprom was good because the driver loaded */
- for(i = 0; i <= (last_word - first_word); i++) {
+ for (i = 0; i <= (last_word - first_word); i++) {
eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
}
memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
geeprom_error:
}
static int
-ixgb_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, uint8_t *bytes)
+ixgb_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
int max_len, first_word, last_word;
uint16_t i;
- if(eeprom->len == 0)
+ if (eeprom->len == 0)
return -EINVAL;
- if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
max_len = ixgb_get_eeprom_len(netdev);
- if(eeprom->offset > eeprom->offset + eeprom->len)
+ if (eeprom->offset > eeprom->offset + eeprom->len)
return -EINVAL;
- if((eeprom->offset + eeprom->len) > max_len)
+ if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if(!eeprom_buff)
+ if (!eeprom_buff)
return -ENOMEM;
ptr = (void *)eeprom_buff;
- if(eeprom->offset & 1) {
+ if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
ptr++;
}
- if((eeprom->offset + eeprom->len) & 1) {
+ if ((eeprom->offset + eeprom->len) & 1) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word]
+ eeprom_buff[last_word - first_word]
= ixgb_read_eeprom(hw, last_word);
}
memcpy(ptr, bytes, eeprom->len);
- for(i = 0; i <= (last_word - first_word); i++)
+
+ for (i = 0; i <= (last_word - first_word); i++)
ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
/* Update the checksum over the first part of the EEPROM if needed */
- if(first_word <= EEPROM_CHECKSUM_REG)
+ if (first_word <= EEPROM_CHECKSUM_REG)
ixgb_update_eeprom_checksum(hw);
kfree(eeprom_buff);
}
static void
-ixgb_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ixgb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ixgb_driver_name, 32);
+ strncpy(drvinfo->driver, ixgb_driver_name, 32);
strncpy(drvinfo->version, ixgb_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
}
static void
-ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ixgb_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- ring->rx_max_pending = MAX_RXD;
+ ring->rx_max_pending = MAX_RXD;
ring->tx_max_pending = MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rxdr->count;
- ring->tx_pending = txdr->count;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
-static int
-ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int
+ixgb_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
int err;
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
- if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- if(netif_running(adapter->netdev))
- ixgb_down(adapter,TRUE);
+ if (netif_running(adapter->netdev))
+ ixgb_down(adapter, TRUE);
- rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
- rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
- rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+ rx_ring->count = max(ring->rx_pending, (uint32_t)MIN_RXD);
+ rx_ring->count = min(rx_ring->count, (uint32_t)MAX_RXD);
+ rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
- txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD);
- txdr->count = min(txdr->count,(uint32_t)MAX_TXD);
- txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+ tx_ring->count = max(ring->tx_pending, (uint32_t)MIN_TXD);
+ tx_ring->count = min(tx_ring->count, (uint32_t)MAX_TXD);
+ tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
- if((err = ixgb_setup_rx_resources(adapter)))
+ if ((err = ixgb_setup_rx_resources(adapter)))
goto err_setup_rx;
- if((err = ixgb_setup_tx_resources(adapter)))
+ if ((err = ixgb_setup_tx_resources(adapter)))
goto err_setup_tx;
/* save the new, restore the old in order to free it,
ixgb_free_tx_resources(adapter);
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
- if((err = ixgb_up(adapter)))
+ if ((err = ixgb_up(adapter)))
return err;
ixgb_set_speed_duplex(netdev);
}
{
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
- if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
+ if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
ixgb_led_off(&adapter->hw);
else
ixgb_led_on(&adapter->hw);
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- if (!data)
- data = INT_MAX;
+ if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
- if(!adapter->blink_timer.function) {
+ if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function = ixgb_led_blink_callback;
adapter->blink_timer.data = (unsigned long)adapter;
}
mod_timer(&adapter->blink_timer, jiffies);
-
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
ixgb_led_off(&adapter->hw);
return 0;
}
-static int
+static int
ixgb_get_stats_count(struct net_device *netdev)
{
return IXGB_STATS_LEN;
}
-static void
-ixgb_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void
+ixgb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ uint64_t * data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
int i;
ixgb_update_stats(adapter);
- for(i = 0; i < IXGB_STATS_LEN; i++) {
- char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
- data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
- sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+ for (i = 0; i < IXGB_STATS_LEN; i++) {
+ char *p = (char *)adapter + ixgb_gstrings_stats[i].stat_offset;
+
+ data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
+ sizeof(uint64_t)) ? *(uint64_t *) p : *(uint32_t *)p;
}
}
-static void
+static void
ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
int i;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_STATS:
- for(i=0; i < IXGB_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- ixgb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < IXGB_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ixgb_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
}
break;
}
}
static struct ethtool_ops ixgb_ethtool_ops = {
- .get_settings = ixgb_get_settings,
- .set_settings = ixgb_set_settings,
- .get_drvinfo = ixgb_get_drvinfo,
- .get_regs_len = ixgb_get_regs_len,
- .get_regs = ixgb_get_regs,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = ixgb_get_eeprom_len,
- .get_eeprom = ixgb_get_eeprom,
- .set_eeprom = ixgb_set_eeprom,
- .get_ringparam = ixgb_get_ringparam,
- .set_ringparam = ixgb_set_ringparam,
- .get_pauseparam = ixgb_get_pauseparam,
- .set_pauseparam = ixgb_set_pauseparam,
- .get_rx_csum = ixgb_get_rx_csum,
- .set_rx_csum = ixgb_set_rx_csum,
- .get_tx_csum = ixgb_get_tx_csum,
- .set_tx_csum = ixgb_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_msglevel = ixgb_get_msglevel,
- .set_msglevel = ixgb_set_msglevel,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ixgb_set_tso,
- .get_strings = ixgb_get_strings,
- .phys_id = ixgb_phys_id,
- .get_stats_count = ixgb_get_stats_count,
- .get_ethtool_stats = ixgb_get_ethtool_stats,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_settings = ixgb_get_settings,
+ .set_settings = ixgb_set_settings,
+ .get_drvinfo = ixgb_get_drvinfo,
+ .get_regs_len = ixgb_get_regs_len,
+ .get_regs = ixgb_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgb_get_eeprom_len,
+ .get_eeprom = ixgb_get_eeprom,
+ .set_eeprom = ixgb_set_eeprom,
+ .get_ringparam = ixgb_get_ringparam,
+ .set_ringparam = ixgb_set_ringparam,
+ .get_pauseparam = ixgb_get_pauseparam,
+ .set_pauseparam = ixgb_set_pauseparam,
+ .get_rx_csum = ixgb_get_rx_csum,
+ .set_rx_csum = ixgb_set_rx_csum,
+ .get_tx_csum = ixgb_get_tx_csum,
+ .set_tx_csum = ixgb_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgb_get_msglevel,
+ .set_msglevel = ixgb_set_msglevel,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgb_set_tso,
+#endif
+ .get_strings = ixgb_get_strings,
+ .phys_id = ixgb_phys_id,
+ .get_stats_count = ixgb_get_stats_count,
+ .get_ethtool_stats = ixgb_get_ethtool_stats,
+#ifdef ETHTOOL_GPERMADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
}
+#endif /* SIOCETHTOOL */
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*******************************************************************************/
+
/* ixgb_hw.c
* Shared functions for accessing and configuring the adapter
*/
/* Local function prototypes */
-static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr);
+static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t *mc_addr);
static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
-static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
-
-static void ixgb_clear_vfta(struct ixgb_hw *hw);
-
-static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
-
-static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
- uint32_t reg_address,
- uint32_t phy_address,
- uint32_t device_type);
-
-static boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
-
-static boolean_t mac_addr_valid(uint8_t *mac_addr);
+uint32_t ixgb_mac_reset(struct ixgb_hw *hw);
-static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
+uint32_t
+ixgb_mac_reset(struct ixgb_hw *hw)
{
- uint32_t ctrl_reg;
+ uint32_t ctrl_reg;
- ctrl_reg = IXGB_CTRL0_RST |
- IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
- IXGB_CTRL0_SDP2_DIR |
- IXGB_CTRL0_SDP1_DIR |
- IXGB_CTRL0_SDP0_DIR |
- IXGB_CTRL0_SDP3 | /* Initial value 1101 */
- IXGB_CTRL0_SDP2 |
- IXGB_CTRL0_SDP0;
+ ctrl_reg = IXGB_CTRL0_RST |
+ IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
+ IXGB_CTRL0_SDP2_DIR |
+ IXGB_CTRL0_SDP1_DIR |
+ IXGB_CTRL0_SDP0_DIR |
+ IXGB_CTRL0_SDP3 | /* Initial value 1101 */
+ IXGB_CTRL0_SDP2 |
+ IXGB_CTRL0_SDP0;
#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
#else
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
#endif
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_RESET);
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+ /* Delay a few ms just to allow the reset to complete */
+ msec_delay(IXGB_DELAY_AFTER_RESET);
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
#ifdef DBG
- /* Make sure the self-clearing global reset bit did self clear */
- ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
+ /* Make sure the self-clearing global reset bit did self clear */
+ ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- ixgb_optics_reset(hw);
- }
+ if(hw->phy_type == ixgb_phy_type_txn17401) {
+ ixgb_optics_reset(hw);
+ }
- return ctrl_reg;
+ return ctrl_reg;
}
/******************************************************************************
boolean_t
ixgb_adapter_stop(struct ixgb_hw *hw)
{
- uint32_t ctrl_reg;
- uint32_t icr_reg;
-
- DEBUGFUNC("ixgb_adapter_stop");
-
- /* If we are stopped or resetting exit gracefully and wait to be
- * started again before accessing the hardware.
- */
- if(hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
- return FALSE;
- }
-
- /* Set the Adapter Stopped flag so other driver functions stop
- * touching the Hardware.
- */
- hw->adapter_stopped = TRUE;
-
- /* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
-
- /* Disable the Transmit and Receive units. Then delay to allow
- * any pending transactions to complete before we hit the MAC with
- * the global reset.
- */
- IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
- IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
- msleep(IXGB_DELAY_BEFORE_RESET);
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- DEBUGOUT("Issuing a global reset to MAC\n");
-
- ctrl_reg = ixgb_mac_reset(hw);
-
- /* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xffffffff);
-
- /* Clear any pending interrupt events. */
- icr_reg = IXGB_READ_REG(hw, ICR);
-
- return (ctrl_reg & IXGB_CTRL0_RST);
-}
+ uint32_t ctrl_reg;
+ uint32_t icr_reg;
+
+ DEBUGFUNC("ixgb_adapter_stop");
+
+ /* If we are stopped or resetting exit gracefully and wait to be
+ * started again before accessing the hardware. */
+ if(hw->adapter_stopped) {
+ DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ return FALSE;
+ }
+
+ /* Set the Adapter Stopped flag so other driver functions stop touching
+ * the Hardware. */
+ hw->adapter_stopped = TRUE;
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
+
+ /* Disable the Transmit and Receive units. Then delay to allow any
+ * pending transactions to complete before we hit the MAC with the
+ * global reset. */
+ IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
+ IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ msec_delay(IXGB_DELAY_BEFORE_RESET);
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect the
+ * current PCI configuration. The global reset bit is self- clearing,
+ * and should clear within a microsecond. */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ ctrl_reg = ixgb_mac_reset(hw);
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr_reg = IXGB_READ_REG(hw, ICR);
+
+ return (boolean_t) (ctrl_reg & IXGB_CTRL0_RST);
+}
/******************************************************************************
* Identifies the vendor of the optics module on the adapter. The SR adapters
static ixgb_xpak_vendor
ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
{
- uint32_t i;
- uint16_t vendor_name[5];
- ixgb_xpak_vendor xpak_vendor;
-
- DEBUGFUNC("ixgb_identify_xpak_vendor");
-
- /* Read the first few bytes of the vendor string from the XPAK NVR
- * registers. These are standard XENPAK/XPAK registers, so all XPAK
- * devices should implement them. */
- for (i = 0; i < 5; i++) {
- vendor_name[i] = ixgb_read_phy_reg(hw,
- MDIO_PMA_PMD_XPAK_VENDOR_NAME
- + i, IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID);
- }
-
- /* Determine the actual vendor */
- if (vendor_name[0] == 'I' &&
- vendor_name[1] == 'N' &&
- vendor_name[2] == 'T' &&
- vendor_name[3] == 'E' && vendor_name[4] == 'L') {
- xpak_vendor = ixgb_xpak_vendor_intel;
- } else {
- xpak_vendor = ixgb_xpak_vendor_infineon;
- }
-
- return (xpak_vendor);
+ uint32_t i;
+ uint16_t vendor_name[5];
+ ixgb_xpak_vendor xpak_vendor;
+
+ DEBUGFUNC("ixgb_identify_xpak_vendor");
+
+ /* Read the first few bytes of the vendor string from the XPAK NVR
+ * registers. These are standard XENPAK/XPAK registers, so all XPAK
+ * devices should implement them. */
+ for(i = 0; i < 5; i++) {
+ vendor_name[i] =
+ ixgb_read_phy_reg(hw, MDIO_PMA_PMD_XPAK_VENDOR_NAME + i,
+ IXGB_PHY_ADDRESS, MDIO_PMA_PMD_DID);
+ }
+
+ /* Determine the actual vendor */
+ if (vendor_name[0] == 'I' &&
+ vendor_name[1] == 'N' &&
+ vendor_name[2] == 'T' &&
+ vendor_name[3] == 'E' &&
+ vendor_name[4] == 'L') {
+ xpak_vendor = ixgb_xpak_vendor_intel;
+ }
+ else {
+ xpak_vendor = ixgb_xpak_vendor_infineon;
+ }
+ return (xpak_vendor);
}
/******************************************************************************
static ixgb_phy_type
ixgb_identify_phy(struct ixgb_hw *hw)
{
- ixgb_phy_type phy_type;
- ixgb_xpak_vendor xpak_vendor;
-
- DEBUGFUNC("ixgb_identify_phy");
-
- /* Infer the transceiver/phy type from the device id */
- switch (hw->device_id) {
- case IXGB_DEVICE_ID_82597EX:
- DEBUGOUT("Identified TXN17401 optics\n");
- phy_type = ixgb_phy_type_txn17401;
- break;
-
- case IXGB_DEVICE_ID_82597EX_SR:
- /* The SR adapters carry two different types of XPAK optics
- * modules; read the vendor identifier to determine the exact
- * type of optics. */
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- DEBUGOUT("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- case IXGB_DEVICE_ID_82597EX_LR:
- DEBUGOUT("Identified G6104 optics\n");
- phy_type = ixgb_phy_type_g6104;
- break;
- case IXGB_DEVICE_ID_82597EX_CX4:
- DEBUGOUT("Identified CX4\n");
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- DEBUGOUT("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- default:
- DEBUGOUT("Unknown physical layer module\n");
- phy_type = ixgb_phy_type_unknown;
- break;
- }
-
- return (phy_type);
+ ixgb_phy_type phy_type;
+ ixgb_xpak_vendor xpak_vendor;
+
+ DEBUGFUNC("ixgb_identify_phy");
+
+ /* Infer the transceiver/phy type from the device id */
+ switch(hw->device_id) {
+ case IXGB_DEVICE_ID_82597EX:
+ DEBUGOUT("Identified TXN17401 optics\n");
+ phy_type = ixgb_phy_type_txn17401;
+ break;
+
+ case IXGB_DEVICE_ID_82597EX_SR:
+ /* The SR adapters carry two different types of XPAK optics
+ * modules; read the vendor identifier to determine the exact
+ * type of optics. */
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if(xpak_vendor == ixgb_xpak_vendor_intel) {
+ DEBUGOUT("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ DEBUGOUT("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
+
+ case IXGB_DEVICE_ID_82597EX_LR:
+ DEBUGOUT("Identified G6104 optics\n");
+ phy_type = ixgb_phy_type_g6104;
+ break;
+
+ case IXGB_DEVICE_ID_82597EX_CX4:
+ DEBUGOUT("Identified CX4\n");
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if(xpak_vendor == ixgb_xpak_vendor_intel) {
+ DEBUGOUT("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ DEBUGOUT("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
+
+ default:
+ DEBUGOUT("Unknown physical layer module\n");
+ phy_type = ixgb_phy_type_unknown;
+ break;
+ }
+
+ return (phy_type);
}
/******************************************************************************
boolean_t
ixgb_init_hw(struct ixgb_hw *hw)
{
- uint32_t i;
- uint32_t ctrl_reg;
- boolean_t status;
+ uint32_t i;
+ uint32_t ctrl_reg;
+ boolean_t status;
- DEBUGFUNC("ixgb_init_hw");
+ DEBUGFUNC("ixgb_init_hw");
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- DEBUGOUT("Issuing a global reset to MAC\n");
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not affect the
+ * current PCI configuration. The global reset bit is self- clearing,
+ * and should clear within a microsecond. */
+ DEBUGOUT("Issuing a global reset to MAC\n");
- ctrl_reg = ixgb_mac_reset(hw);
+ ctrl_reg = ixgb_mac_reset(hw);
- DEBUGOUT("Issuing an EE reset to MAC\n");
+ DEBUGOUT("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
#else
- IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
+ IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
#endif
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_EE_RESET);
+ /* Delay a few ms just to allow the reset to complete */
+ msec_delay(IXGB_DELAY_AFTER_EE_RESET);
- if (ixgb_get_eeprom_data(hw) == FALSE) {
- return(FALSE);
- }
+ if(ixgb_get_eeprom_data(hw) == FALSE) {
+ return (FALSE);
+ }
- /* Use the device id to determine the type of phy/transceiver. */
- hw->device_id = ixgb_get_ee_device_id(hw);
- hw->phy_type = ixgb_identify_phy(hw);
+ /* Use the device id to determine the type of phy/transceiver. */
+ hw->device_id = ixgb_get_ee_device_id(hw);
+ hw->phy_type = ixgb_identify_phy(hw);
- /* Setup the receive addresses.
- * Receive Address Registers (RARs 0 - 15).
- */
- ixgb_init_rx_addrs(hw);
+ /* Setup the receive addresses. Receive Address Registers (RARs 0 -
+ * 15). */
+ ixgb_init_rx_addrs(hw);
- /*
- * Check that a valid MAC address has been set.
- * If it is not valid, we fail hardware init.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
- DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
- return(FALSE);
- }
+ /*
+ * Check that a valid MAC address has been set.
+ * If it is not valid, we fail hardware init.
+ */
+ if(!mac_addr_valid(hw->curr_mac_addr)) {
+ DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ return (FALSE);
+ }
- /* tell the routines in this file they can access hardware again */
- hw->adapter_stopped = FALSE;
+ /* tell the routines in this file they can access hardware again */
+ hw->adapter_stopped = FALSE;
- /* Fill in the bus_info structure */
- ixgb_get_bus_info(hw);
+ /* Fill in the bus_info structure */
+ ixgb_get_bus_info(hw);
- /* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
- for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
- /* Zero out the VLAN Filter Table Array */
- ixgb_clear_vfta(hw);
+ /* Zero out the VLAN Filter Table Array */
+ ixgb_clear_vfta(hw);
- /* Zero all of the hardware counters */
- ixgb_clear_hw_cntrs(hw);
+ /* Zero all of the hardware counters */
+ ixgb_clear_hw_cntrs(hw);
- /* Call a subroutine to setup flow control. */
- status = ixgb_setup_fc(hw);
+ /* Call a subroutine to setup flow control. */
+ status = ixgb_setup_fc(hw);
- /* 82597EX errata: Call check-for-link in case lane deskew is locked */
- ixgb_check_for_link(hw);
+ /* 82597EX errata: Call check-for-link in case lane deskew is locked */
+ ixgb_check_for_link(hw);
- return (status);
+ return (status);
}
/******************************************************************************
* of the receive addresss registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
*****************************************************************************/
-static void
+void
ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
- uint32_t i;
-
- DEBUGFUNC("ixgb_init_rx_addrs");
-
- /*
- * If the current mac address is valid, assume it is a software override
- * to the permanent address.
- * Otherwise, use the permanent address from the eeprom.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
-
- /* Get the MAC address from the eeprom for later reference */
- ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
-
- DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
- } else {
-
- /* Setup the receive address. */
- DEBUGOUT("Overriding MAC Address in RAR[0]\n");
- DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
-
- ixgb_rar_set(hw, hw->curr_mac_addr, 0);
- }
-
- /* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
- for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
- /* Write high reg first to disable the AV bit first */
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- }
-
- return;
+ uint32_t i;
+
+ DEBUGFUNC("ixgb_init_rx_addrs");
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if(!mac_addr_valid(hw->curr_mac_addr)) {
+
+ /* Get the MAC address from the eeprom for later reference */
+ ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
+
+ DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
+ hw->curr_mac_addr[0], hw->curr_mac_addr[1],
+ hw->curr_mac_addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->curr_mac_addr[3],
+ hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ } else {
+
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->curr_mac_addr[0], hw->curr_mac_addr[1],
+ hw->curr_mac_addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->curr_mac_addr[3],
+ hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+
+ ixgb_rar_set(hw, hw->curr_mac_addr, 0);
+ }
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+
+ return;
}
/******************************************************************************
* multicast table.
*****************************************************************************/
void
-ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- uint8_t *mc_addr_list,
- uint32_t mc_addr_count,
- uint32_t pad)
+ixgb_mc_addr_list_update(struct ixgb_hw *hw, uint8_t *mc_addr_list,
+ uint32_t mc_addr_count, uint32_t pad)
{
- uint32_t hash_value;
- uint32_t i;
- uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
-
- DEBUGFUNC("ixgb_mc_addr_list_update");
-
- /* Set the new number of MC addresses that we are being requested to use. */
- hw->num_mc_addrs = mc_addr_count;
-
- /* Clear RAR[1-15] */
- DEBUGOUT(" Clearing RAR[1-15]\n");
- for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- }
-
- /* Clear the MTA */
- DEBUGOUT(" Clearing MTA\n");
- for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
- }
-
- /* Add the new addresses */
- for(i = 0; i < mc_addr_count; i++) {
- DEBUGOUT(" Adding the multicast addresses:\n");
- DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 1],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 2],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 3],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 4],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 5]);
-
- /* Place this multicast address in the RAR if there is room, *
- * else put it in the MTA
- */
- if(rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw,
- mc_addr_list +
- (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
- rar_used_count);
- DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
- rar_used_count++;
- } else {
- hash_value = ixgb_hash_mc_addr(hw,
- mc_addr_list +
- (i *
- (IXGB_ETH_LENGTH_OF_ADDRESS
- + pad)));
-
- DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
-
- ixgb_mta_set(hw, hash_value);
- }
- }
-
- DEBUGOUT("MC Update Complete\n");
- return;
+ uint32_t hash_value;
+ uint32_t i;
+ uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
+
+ DEBUGFUNC("ixgb_mc_addr_list_update");
+
+ /* Set the new number of MC addresses that we are being requested to
+ * use. */
+ hw->num_mc_addrs = mc_addr_count;
+
+ /* Clear RAR[1-15] */
+ DEBUGOUT(" Clearing RAR[1-15]\n");
+ for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
+ /* Write high reg first to disable the AV bit first */
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ }
+
+ /* Clear the MTA */
+ DEBUGOUT(" Clearing MTA\n");
+ for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ }
+
+ /* Add the new addresses */
+ for(i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) + 1],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) + 2],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) + 3],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) + 4],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) + 5]);
+
+ /* Place this multicast address in the RAR if there is room, *
+ * else put it in the MTA */
+ if(rar_used_count < IXGB_RAR_ENTRIES) {
+ ixgb_rar_set(hw,
+ mc_addr_list +
+ (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
+ rar_used_count);
+ DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ rar_used_count++;
+ } else {
+ hash_value =
+ ixgb_hash_mc_addr(hw,
+ mc_addr_list +
+ (i *
+ (IXGB_ETH_LENGTH_OF_ADDRESS +
+ pad)));
+
+ DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+
+ ixgb_mta_set(hw, hash_value);
+ }
+ }
+
+ DEBUGOUT("MC Update Complete\n");
+ return;
}
/******************************************************************************
* The hash value
*****************************************************************************/
static uint32_t
-ixgb_hash_mc_addr(struct ixgb_hw *hw,
- uint8_t *mc_addr)
+ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t *mc_addr)
{
- uint32_t hash_value = 0;
-
- DEBUGFUNC("ixgb_hash_mc_addr");
-
- /* The portion of the address that is used for the hash table is
- * determined by the mc_filter_type setting.
- */
- switch (hw->mc_filter_type) {
- /* [0] [1] [2] [3] [4] [5]
- * 01 AA 00 12 34 56
- * LSB MSB - According to H/W docs */
- case 0:
- /* [47:36] i.e. 0x563 for above example address */
- hash_value =
- ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
- break;
- case 1: /* [46:35] i.e. 0xAC6 for above example address */
- hash_value =
- ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
- break;
- case 2: /* [45:34] i.e. 0x5D8 for above example address */
- hash_value =
- ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
- break;
- case 3: /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
- break;
- default:
- /* Invalid mc_filter_type, what should we do? */
- DEBUGOUT("MC filter type param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- hash_value &= 0xFFF;
- return (hash_value);
+ uint32_t hash_value = 0;
+
+ DEBUGFUNC("ixgb_hash_mc_addr");
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting. */
+ switch(hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5] 01 AA 00 12 34 56 LSB MSB -
+ * According to H/W docs */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
+ break;
+ case 1: /* [46:35] i.e. 0xAC6 for above
+ * example address */
+ hash_value =
+ ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
+ break;
+ case 2: /* [45:34] i.e. 0x5D8 for above
+ * example address */
+ hash_value =
+ ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
+ break;
+ case 3: /* [43:32] i.e. 0x634 for above
+ * example address */
+ hash_value = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
+ break;
+ default:
+ /* Invalid mc_filter_type, what should we do? */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return (hash_value);
}
/******************************************************************************
* hash_value - Multicast address hash value
*****************************************************************************/
static void
-ixgb_mta_set(struct ixgb_hw *hw,
- uint32_t hash_value)
+ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value)
{
- uint32_t hash_bit, hash_reg;
- uint32_t mta_reg;
-
- /* The MTA is a register array of 128 32-bit registers.
- * It is treated like an array of 4096 bits. We want to set
- * bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The register is determined by the
- * upper 7 bits of the hash value and the bit within that
- * register are determined by the lower 5 bits of the value.
- */
- hash_reg = (hash_value >> 5) & 0x7F;
- hash_bit = hash_value & 0x1F;
-
- mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
-
- mta_reg |= (1 << hash_bit);
-
- IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-
- return;
+ uint32_t hash_bit, hash_reg;
+ uint32_t mta_reg;
+
+ /* The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[hash_value]. So we figure out what register the bit is in,
+ * read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the hash value and the
+ * bit within that register are determined by the lower 5 bits of the
+ * value. */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+ mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
+ mta_reg |= (1 << hash_bit);
+ IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
+ return;
}
/******************************************************************************
* index - Receive address register to write
*****************************************************************************/
void
-ixgb_rar_set(struct ixgb_hw *hw,
- uint8_t *addr,
- uint32_t index)
+ixgb_rar_set(struct ixgb_hw *hw, uint8_t *addr, uint32_t index)
{
- uint32_t rar_low, rar_high;
+ uint32_t rar_low, rar_high;
- DEBUGFUNC("ixgb_rar_set");
+ DEBUGFUNC("ixgb_rar_set");
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((uint32_t) addr[0] |
- ((uint32_t)addr[1] << 8) |
- ((uint32_t)addr[2] << 16) |
- ((uint32_t)addr[3] << 24));
+ /* HW expects these in little endian so we reverse the byte order from
+ * network order (big endian) to little endian */
+ rar_low = ((uint32_t)addr[0] |
+ ((uint32_t)addr[1] << 8) |
+ ((uint32_t)addr[2] << 16) |
+ ((uint32_t)addr[3] << 24));
- rar_high = ((uint32_t) addr[4] |
- ((uint32_t)addr[5] << 8) |
- IXGB_RAH_AV);
+ rar_high = ((uint32_t)addr[4] |
+ ((uint32_t)addr[5] << 8) |
+ IXGB_RAH_AV);
- IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- return;
+ IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ return;
}
/******************************************************************************
* value - Value to write into VLAN filter table
*****************************************************************************/
void
-ixgb_write_vfta(struct ixgb_hw *hw,
- uint32_t offset,
- uint32_t value)
+ixgb_write_vfta(struct ixgb_hw *hw, uint32_t offset, uint32_t value)
{
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- return;
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ return;
}
/******************************************************************************
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
+void
ixgb_clear_vfta(struct ixgb_hw *hw)
{
- uint32_t offset;
+ uint32_t offset;
- for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
- return;
+ for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
+ return;
}
/******************************************************************************
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static boolean_t
+boolean_t
ixgb_setup_fc(struct ixgb_hw *hw)
{
- uint32_t ctrl_reg;
- uint32_t pap_reg = 0; /* by default, assume no pause time */
- boolean_t status = TRUE;
-
- DEBUGFUNC("ixgb_setup_fc");
-
- /* Get the current control reg 0 settings */
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
- ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
-
- /* The possible values of the "flow_control" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.type) {
- case ixgb_fc_none: /* 0 */
- /* Set CMDC bit to disable Rx Flow control */
- ctrl_reg |= (IXGB_CTRL0_CMDC);
- break;
- case ixgb_fc_rx_pause: /* 1 */
- /* RX Flow control is enabled, and TX Flow control is
- * disabled.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE);
- break;
- case ixgb_fc_tx_pause: /* 2 */
- /* TX Flow control is enabled, and RX Flow control is
- * disabled, by a software over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- case ixgb_fc_full: /* 3 */
- /* Flow control (both RX and TX) is enabled by a software
- * over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- default:
- /* We should never get here. The value should be 0-3. */
- DEBUGOUT("Flow control param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- /* Write the new settings */
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
-
- if (pap_reg != 0) {
- IXGB_WRITE_REG(hw, PAP, pap_reg);
- }
-
- /* Set the flow control receive threshold registers. Normally,
- * these registers will be set to a default threshold that may be
- * adjusted later by the driver's runtime code. However, if the
- * ability to transmit pause frames in not enabled, then these
- * registers will be set to 0.
- */
- if(!(hw->fc.type & ixgb_fc_tx_pause)) {
- IXGB_WRITE_REG(hw, FCRTL, 0);
- IXGB_WRITE_REG(hw, FCRTH, 0);
- } else {
- /* We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of XON
- * frames. */
- if(hw->fc.send_xon) {
- IXGB_WRITE_REG(hw, FCRTL,
- (hw->fc.low_water | IXGB_FCRTL_XONE));
- } else {
- IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
- }
- IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
- }
- return (status);
+ uint32_t ctrl_reg;
+ uint32_t pap_reg = 0; /* by default, assume no pause time */
+ boolean_t status = TRUE;
+
+ DEBUGFUNC("ixgb_setup_fc");
+
+ /* Get the current control reg 0 settings */
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
+ ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+
+ /* The possible values of the "flow_control" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames but not send
+ * pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do not
+ * support receiving pause frames)
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: Invalid. */
+ switch(hw->fc.type) {
+ case ixgb_fc_none: /* 0 */
+ /* Set CMDC bit to disable Rx Flow control */
+ ctrl_reg |= (IXGB_CTRL0_CMDC);
+ break;
+ case ixgb_fc_rx_pause: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is disabled. */
+ ctrl_reg |= (IXGB_CTRL0_RPE);
+ break;
+ case ixgb_fc_tx_pause: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is disabled,
+ * by a software over-ride. */
+ ctrl_reg |= (IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ case ixgb_fc_full: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride. */
+ ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ default:
+ /* We should never get here. The value should be 0-3. */
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* Write the new settings */
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+
+ if(pap_reg != 0) {
+ IXGB_WRITE_REG(hw, PAP, pap_reg);
+ }
+
+ /* Set the flow control receive threshold registers. Normally, these
+ * registers will be set to a default threshold that may be adjusted
+ * later by the driver's runtime code. However, if the ability to
+ * transmit pause frames in not enabled, then these registers will be
+ * set to 0. */
+ if(!(hw->fc.type & ixgb_fc_tx_pause)) {
+ IXGB_WRITE_REG(hw, FCRTL, 0);
+ IXGB_WRITE_REG(hw, FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames. */
+ if(hw->fc.send_xon) {
+ IXGB_WRITE_REG(hw, FCRTL,
+ (hw->fc.low_water | IXGB_FCRTL_XONE));
+ } else {
+ IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
+ }
+ IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
+ }
+ return (status);
}
/******************************************************************************
* This requires that first an address cycle command is sent, followed by a
* read command.
*****************************************************************************/
-static uint16_t
-ixgb_read_phy_reg(struct ixgb_hw *hw,
- uint32_t reg_address,
- uint32_t phy_address,
- uint32_t device_type)
+uint16_t
+ixgb_read_phy_reg(struct ixgb_hw *hw, uint32_t reg_address,
+ uint32_t phy_address, uint32_t device_type)
{
- uint32_t i;
- uint32_t data;
- uint32_t command = 0;
+ uint32_t i;
+ uint32_t data;
+ uint32_t command = 0;
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
- IXGB_WRITE_REG(hw, MSCA, command);
+ IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the address cycle completed
** from the CPU Write to the Ready bit assertion.
**************************************************************/
- for(i = 0; i < 10; i++)
- {
- udelay(10);
+ for(i = 0; i < 10; i++) {
+ usec_delay(10);
- command = IXGB_READ_REG(hw, MSCA);
+ command = IXGB_READ_REG(hw, MSCA);
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ if((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
- /* Address cycle complete, setup and write the read command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
+ /* Address cycle complete, setup and write the read command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
- IXGB_WRITE_REG(hw, MSCA, command);
+ IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the read command completed
** from the CPU Write to the Ready bit assertion.
**************************************************************/
- for(i = 0; i < 10; i++)
- {
- udelay(10);
+ for(i = 0; i < 10; i++) {
+ usec_delay(10);
- command = IXGB_READ_REG(hw, MSCA);
+ command = IXGB_READ_REG(hw, MSCA);
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ if((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
- /* Operation is complete, get the data from the MDIO Read/Write Data
- * register and return.
- */
- data = IXGB_READ_REG(hw, MSRWD);
- data >>= IXGB_MSRWD_READ_DATA_SHIFT;
- return((uint16_t) data);
+ /* Operation is complete, get the data from the MDIO Read/Write Data
+ * register and return. */
+ data = IXGB_READ_REG(hw, MSRWD);
+ data >>= IXGB_MSRWD_READ_DATA_SHIFT;
+ return ((uint16_t)data);
}
/******************************************************************************
* This requires that first an address cycle command is sent, followed by a
* write command.
*****************************************************************************/
-static void
-ixgb_write_phy_reg(struct ixgb_hw *hw,
- uint32_t reg_address,
- uint32_t phy_address,
- uint32_t device_type,
- uint16_t data)
+void
+ixgb_write_phy_reg(struct ixgb_hw *hw, uint32_t reg_address,
+ uint32_t phy_address, uint32_t device_type, uint16_t data)
{
- uint32_t i;
- uint32_t command = 0;
+ uint32_t i;
+ uint32_t command = 0;
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
- /* Put the data in the MDIO Read/Write Data register */
- IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
+ /* Put the data in the MDIO Read/Write Data register */
+ IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
- IXGB_WRITE_REG(hw, MSCA, command);
+ IXGB_WRITE_REG(hw, MSCA, command);
- /**************************************************************
- ** Check every 10 usec to see if the address cycle completed
- ** The COMMAND bit will clear when the operation is complete.
- ** This may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
+ /**************************************************************
+ ** Check every 10 usec to see if the address cycle completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** This may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
- for(i = 0; i < 10; i++)
- {
- udelay(10);
+ for(i = 0; i < 10; i++) {
+ usec_delay(10);
- command = IXGB_READ_REG(hw, MSCA);
+ command = IXGB_READ_REG(hw, MSCA);
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ if((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
- /* Address cycle complete, setup and write the write command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
+ /* Address cycle complete, setup and write the write command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
- IXGB_WRITE_REG(hw, MSCA, command);
+ IXGB_WRITE_REG(hw, MSCA, command);
- /**************************************************************
- ** Check every 10 usec to see if the read command completed
- ** The COMMAND bit will clear when the operation is complete.
- ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
+ /**************************************************************
+ ** Check every 10 usec to see if the read command completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
- for(i = 0; i < 10; i++)
- {
- udelay(10);
+ for(i = 0; i < 10; i++) {
+ usec_delay(10);
- command = IXGB_READ_REG(hw, MSCA);
+ command = IXGB_READ_REG(hw, MSCA);
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ if((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
- /* Operation is complete, return. */
+ /* Operation is complete, return. */
}
/******************************************************************************
void
ixgb_check_for_link(struct ixgb_hw *hw)
{
- uint32_t status_reg;
- uint32_t xpcss_reg;
-
- DEBUGFUNC("ixgb_check_for_link");
-
- xpcss_reg = IXGB_READ_REG(hw, XPCSS);
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- hw->link_up = TRUE;
- } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
- hw->link_up = ixgb_link_reset(hw);
- } else {
- /*
- * 82597EX errata. Since the lane deskew problem may prevent
- * link, reset the link before reporting link down.
- */
- hw->link_up = ixgb_link_reset(hw);
- }
- /* Anything else for 10 Gig?? */
+ uint32_t status_reg;
+ uint32_t xpcss_reg;
+
+ DEBUGFUNC("ixgb_check_for_link");
+
+ xpcss_reg = IXGB_READ_REG(hw, XPCSS);
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ if((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ hw->link_up = TRUE;
+ } else if(!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ hw->link_up = ixgb_link_reset(hw);
+ } else {
+ /*
+ * 82597EX errata. Since the lane deskew problem may prevent
+ * link, reset the link before reporting link down.
+ */
+ hw->link_up = ixgb_link_reset(hw);
+ }
+ /* Anything else for 10 Gig?? */
}
/******************************************************************************
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
-boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
+boolean_t
+ixgb_check_for_bad_link(struct ixgb_hw *hw)
{
- uint32_t newLFC, newRFC;
- boolean_t bad_link_returncode = FALSE;
-
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- newLFC = IXGB_READ_REG(hw, LFC);
- newRFC = IXGB_READ_REG(hw, RFC);
- if ((hw->lastLFC + 250 < newLFC)
- || (hw->lastRFC + 250 < newRFC)) {
- DEBUGOUT
- ("BAD LINK! too many LFC/RFC since last check\n");
- bad_link_returncode = TRUE;
- }
- hw->lastLFC = newLFC;
- hw->lastRFC = newRFC;
- }
-
- return bad_link_returncode;
+ uint32_t newLFC, newRFC;
+ boolean_t bad_link_returncode = FALSE;
+
+ if(hw->phy_type == ixgb_phy_type_txn17401) {
+ newLFC = IXGB_READ_REG(hw, LFC);
+ newRFC = IXGB_READ_REG(hw, RFC);
+ if((hw->lastLFC + 250 < newLFC) || (hw->lastRFC + 250 < newRFC)) {
+ DEBUGOUT("BAD LINK! too many LFC/RFC since last check\n");
+ bad_link_returncode = TRUE;
+ }
+ hw->lastLFC = newLFC;
+ hw->lastRFC = newRFC;
+ }
+
+ return bad_link_returncode;
}
/******************************************************************************
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
+void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
- volatile uint32_t temp_reg;
-
- DEBUGFUNC("ixgb_clear_hw_cntrs");
-
- /* if we are stopped or resetting exit gracefully */
- if(hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is stopped!!!\n");
- return;
- }
-
- temp_reg = IXGB_READ_REG(hw, TPRL);
- temp_reg = IXGB_READ_REG(hw, TPRH);
- temp_reg = IXGB_READ_REG(hw, GPRCL);
- temp_reg = IXGB_READ_REG(hw, GPRCH);
- temp_reg = IXGB_READ_REG(hw, BPRCL);
- temp_reg = IXGB_READ_REG(hw, BPRCH);
- temp_reg = IXGB_READ_REG(hw, MPRCL);
- temp_reg = IXGB_READ_REG(hw, MPRCH);
- temp_reg = IXGB_READ_REG(hw, UPRCL);
- temp_reg = IXGB_READ_REG(hw, UPRCH);
- temp_reg = IXGB_READ_REG(hw, VPRCL);
- temp_reg = IXGB_READ_REG(hw, VPRCH);
- temp_reg = IXGB_READ_REG(hw, JPRCL);
- temp_reg = IXGB_READ_REG(hw, JPRCH);
- temp_reg = IXGB_READ_REG(hw, GORCL);
- temp_reg = IXGB_READ_REG(hw, GORCH);
- temp_reg = IXGB_READ_REG(hw, TORL);
- temp_reg = IXGB_READ_REG(hw, TORH);
- temp_reg = IXGB_READ_REG(hw, RNBC);
- temp_reg = IXGB_READ_REG(hw, RUC);
- temp_reg = IXGB_READ_REG(hw, ROC);
- temp_reg = IXGB_READ_REG(hw, RLEC);
- temp_reg = IXGB_READ_REG(hw, CRCERRS);
- temp_reg = IXGB_READ_REG(hw, ICBC);
- temp_reg = IXGB_READ_REG(hw, ECBC);
- temp_reg = IXGB_READ_REG(hw, MPC);
- temp_reg = IXGB_READ_REG(hw, TPTL);
- temp_reg = IXGB_READ_REG(hw, TPTH);
- temp_reg = IXGB_READ_REG(hw, GPTCL);
- temp_reg = IXGB_READ_REG(hw, GPTCH);
- temp_reg = IXGB_READ_REG(hw, BPTCL);
- temp_reg = IXGB_READ_REG(hw, BPTCH);
- temp_reg = IXGB_READ_REG(hw, MPTCL);
- temp_reg = IXGB_READ_REG(hw, MPTCH);
- temp_reg = IXGB_READ_REG(hw, UPTCL);
- temp_reg = IXGB_READ_REG(hw, UPTCH);
- temp_reg = IXGB_READ_REG(hw, VPTCL);
- temp_reg = IXGB_READ_REG(hw, VPTCH);
- temp_reg = IXGB_READ_REG(hw, JPTCL);
- temp_reg = IXGB_READ_REG(hw, JPTCH);
- temp_reg = IXGB_READ_REG(hw, GOTCL);
- temp_reg = IXGB_READ_REG(hw, GOTCH);
- temp_reg = IXGB_READ_REG(hw, TOTL);
- temp_reg = IXGB_READ_REG(hw, TOTH);
- temp_reg = IXGB_READ_REG(hw, DC);
- temp_reg = IXGB_READ_REG(hw, PLT64C);
- temp_reg = IXGB_READ_REG(hw, TSCTC);
- temp_reg = IXGB_READ_REG(hw, TSCTFC);
- temp_reg = IXGB_READ_REG(hw, IBIC);
- temp_reg = IXGB_READ_REG(hw, RFC);
- temp_reg = IXGB_READ_REG(hw, LFC);
- temp_reg = IXGB_READ_REG(hw, PFRC);
- temp_reg = IXGB_READ_REG(hw, PFTC);
- temp_reg = IXGB_READ_REG(hw, MCFRC);
- temp_reg = IXGB_READ_REG(hw, MCFTC);
- temp_reg = IXGB_READ_REG(hw, XONRXC);
- temp_reg = IXGB_READ_REG(hw, XONTXC);
- temp_reg = IXGB_READ_REG(hw, XOFFRXC);
- temp_reg = IXGB_READ_REG(hw, XOFFTXC);
- temp_reg = IXGB_READ_REG(hw, RJC);
- return;
+ volatile uint32_t temp_reg;
+
+ DEBUGFUNC("ixgb_clear_hw_cntrs");
+
+ /* if we are stopped or resetting exit gracefully */
+ if(hw->adapter_stopped) {
+ DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ return;
+ }
+
+ temp_reg = IXGB_READ_REG(hw, TPRL);
+ temp_reg = IXGB_READ_REG(hw, TPRH);
+ temp_reg = IXGB_READ_REG(hw, GPRCL);
+ temp_reg = IXGB_READ_REG(hw, GPRCH);
+ temp_reg = IXGB_READ_REG(hw, BPRCL);
+ temp_reg = IXGB_READ_REG(hw, BPRCH);
+ temp_reg = IXGB_READ_REG(hw, MPRCL);
+ temp_reg = IXGB_READ_REG(hw, MPRCH);
+ temp_reg = IXGB_READ_REG(hw, UPRCL);
+ temp_reg = IXGB_READ_REG(hw, UPRCH);
+ temp_reg = IXGB_READ_REG(hw, VPRCL);
+ temp_reg = IXGB_READ_REG(hw, VPRCH);
+ temp_reg = IXGB_READ_REG(hw, JPRCL);
+ temp_reg = IXGB_READ_REG(hw, JPRCH);
+ temp_reg = IXGB_READ_REG(hw, GORCL);
+ temp_reg = IXGB_READ_REG(hw, GORCH);
+ temp_reg = IXGB_READ_REG(hw, TORL);
+ temp_reg = IXGB_READ_REG(hw, TORH);
+ temp_reg = IXGB_READ_REG(hw, RNBC);
+ temp_reg = IXGB_READ_REG(hw, RUC);
+ temp_reg = IXGB_READ_REG(hw, ROC);
+ temp_reg = IXGB_READ_REG(hw, RLEC);
+ temp_reg = IXGB_READ_REG(hw, CRCERRS);
+ temp_reg = IXGB_READ_REG(hw, ICBC);
+ temp_reg = IXGB_READ_REG(hw, ECBC);
+ temp_reg = IXGB_READ_REG(hw, MPC);
+ temp_reg = IXGB_READ_REG(hw, TPTL);
+ temp_reg = IXGB_READ_REG(hw, TPTH);
+ temp_reg = IXGB_READ_REG(hw, GPTCL);
+ temp_reg = IXGB_READ_REG(hw, GPTCH);
+ temp_reg = IXGB_READ_REG(hw, BPTCL);
+ temp_reg = IXGB_READ_REG(hw, BPTCH);
+ temp_reg = IXGB_READ_REG(hw, MPTCL);
+ temp_reg = IXGB_READ_REG(hw, MPTCH);
+ temp_reg = IXGB_READ_REG(hw, UPTCL);
+ temp_reg = IXGB_READ_REG(hw, UPTCH);
+ temp_reg = IXGB_READ_REG(hw, VPTCL);
+ temp_reg = IXGB_READ_REG(hw, VPTCH);
+ temp_reg = IXGB_READ_REG(hw, JPTCL);
+ temp_reg = IXGB_READ_REG(hw, JPTCH);
+ temp_reg = IXGB_READ_REG(hw, GOTCL);
+ temp_reg = IXGB_READ_REG(hw, GOTCH);
+ temp_reg = IXGB_READ_REG(hw, TOTL);
+ temp_reg = IXGB_READ_REG(hw, TOTH);
+ temp_reg = IXGB_READ_REG(hw, DC);
+ temp_reg = IXGB_READ_REG(hw, PLT64C);
+ temp_reg = IXGB_READ_REG(hw, TSCTC);
+ temp_reg = IXGB_READ_REG(hw, TSCTFC);
+ temp_reg = IXGB_READ_REG(hw, IBIC);
+ temp_reg = IXGB_READ_REG(hw, RFC);
+ temp_reg = IXGB_READ_REG(hw, LFC);
+ temp_reg = IXGB_READ_REG(hw, PFRC);
+ temp_reg = IXGB_READ_REG(hw, PFTC);
+ temp_reg = IXGB_READ_REG(hw, MCFRC);
+ temp_reg = IXGB_READ_REG(hw, MCFTC);
+ temp_reg = IXGB_READ_REG(hw, XONRXC);
+ temp_reg = IXGB_READ_REG(hw, XONTXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFRXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFTXC);
+ temp_reg = IXGB_READ_REG(hw, RJC);
+ return;
}
/******************************************************************************
void
ixgb_led_on(struct ixgb_hw *hw)
{
- uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+ uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
- /* To turn on the LED, clear software-definable pin 0 (SDP0). */
- ctrl0_reg &= ~IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
+ /* To turn on the LED, clear software-definable pin 0 (SDP0). */
+ ctrl0_reg &= ~IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+ return;
}
/******************************************************************************
void
ixgb_led_off(struct ixgb_hw *hw)
{
- uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+ uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
- /* To turn off the LED, set software-definable pin 0 (SDP0). */
- ctrl0_reg |= IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
+ /* To turn off the LED, set software-definable pin 0 (SDP0). */
+ ctrl0_reg |= IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+ return;
}
/******************************************************************************
static void
ixgb_get_bus_info(struct ixgb_hw *hw)
{
- uint32_t status_reg;
-
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
- ixgb_bus_type_pcix : ixgb_bus_type_pci;
-
- if (hw->bus.type == ixgb_bus_type_pci) {
- hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
- ixgb_bus_speed_66 : ixgb_bus_speed_33;
- } else {
- switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
- case IXGB_STATUS_PCIX_SPD_66:
- hw->bus.speed = ixgb_bus_speed_66;
- break;
- case IXGB_STATUS_PCIX_SPD_100:
- hw->bus.speed = ixgb_bus_speed_100;
- break;
- case IXGB_STATUS_PCIX_SPD_133:
- hw->bus.speed = ixgb_bus_speed_133;
- break;
- default:
- hw->bus.speed = ixgb_bus_speed_reserved;
- break;
- }
- }
-
- hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
- ixgb_bus_width_64 : ixgb_bus_width_32;
-
- return;
+ uint32_t status_reg;
+
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ hw->bus.type =
+ (status_reg & IXGB_STATUS_PCIX_MODE) ? ixgb_bus_type_pcix :
+ ixgb_bus_type_pci;
+
+ if(hw->bus.type == ixgb_bus_type_pci) {
+ hw->bus.speed =
+ (status_reg & IXGB_STATUS_PCI_SPD) ? ixgb_bus_speed_66 :
+ ixgb_bus_speed_33;
+ } else {
+ switch(status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
+ case IXGB_STATUS_PCIX_SPD_66:
+ hw->bus.speed = ixgb_bus_speed_66;
+ break;
+ case IXGB_STATUS_PCIX_SPD_100:
+ hw->bus.speed = ixgb_bus_speed_100;
+ break;
+ case IXGB_STATUS_PCIX_SPD_133:
+ hw->bus.speed = ixgb_bus_speed_133;
+ break;
+ default:
+ hw->bus.speed = ixgb_bus_speed_reserved;
+ break;
+ }
+ }
+
+ hw->bus.width =
+ (status_reg & IXGB_STATUS_BUS64) ? ixgb_bus_width_64 :
+ ixgb_bus_width_32;
+
+ return;
}
/******************************************************************************
* mac_addr - pointer to MAC address.
*
*****************************************************************************/
-static boolean_t
+boolean_t
mac_addr_valid(uint8_t *mac_addr)
{
- boolean_t is_valid = TRUE;
- DEBUGFUNC("mac_addr_valid");
-
- /* Make sure it is not a multicast address */
- if (IS_MULTICAST(mac_addr)) {
- DEBUGOUT("MAC address is multicast\n");
- is_valid = FALSE;
- }
- /* Not a broadcast address */
- else if (IS_BROADCAST(mac_addr)) {
- DEBUGOUT("MAC address is broadcast\n");
- is_valid = FALSE;
- }
- /* Reject the zero address */
- else if (mac_addr[0] == 0 &&
- mac_addr[1] == 0 &&
- mac_addr[2] == 0 &&
- mac_addr[3] == 0 &&
- mac_addr[4] == 0 &&
- mac_addr[5] == 0) {
- DEBUGOUT("MAC address is all zeros\n");
- is_valid = FALSE;
- }
- return (is_valid);
+ boolean_t is_valid = TRUE;
+
+ DEBUGFUNC("mac_addr_valid");
+
+ /* Make sure it is not a multicast address */
+ if(IS_MULTICAST(mac_addr)) {
+ DEBUGOUT("MAC address is multicast\n");
+ is_valid = FALSE;
+ }
+ /* Not a broadcast address */
+ else if(IS_BROADCAST(mac_addr)) {
+ DEBUGOUT("MAC address is broadcast\n");
+ is_valid = FALSE;
+ }
+ /* Reject the zero address */
+ else if (mac_addr[0] == 0 &&
+ mac_addr[1] == 0 &&
+ mac_addr[2] == 0 &&
+ mac_addr[3] == 0 &&
+ mac_addr[4] == 0 &&
+ mac_addr[5] == 0) {
+ DEBUGOUT("MAC address is all zeros\n");
+ is_valid = FALSE;
+ }
+ return (is_valid);
}
/******************************************************************************
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static boolean_t
+boolean_t
ixgb_link_reset(struct ixgb_hw *hw)
{
- boolean_t link_status = FALSE;
- uint8_t wait_retries = MAX_RESET_ITERATIONS;
- uint8_t lrst_retries = MAX_RESET_ITERATIONS;
-
- do {
- /* Reset the link */
- IXGB_WRITE_REG(hw, CTRL0,
- IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
-
- /* Wait for link-up and lane re-alignment */
- do {
- udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
- link_status =
- ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
- && (IXGB_READ_REG(hw, XPCSS) &
- IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
- } while (!link_status && --wait_retries);
-
- } while (!link_status && --lrst_retries);
-
- return link_status;
+ boolean_t link_status = FALSE;
+ uint8_t wait_retries = MAX_RESET_ITERATIONS;
+ uint8_t lrst_retries = MAX_RESET_ITERATIONS;
+
+ do {
+ /* Reset the link */
+ IXGB_WRITE_REG(hw, CTRL0,
+ IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
+
+ /* Wait for link-up and lane re-alignment */
+ do {
+ usec_delay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
+ link_status =
+ ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) &&
+ (IXGB_READ_REG(hw, XPCSS) &
+ IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
+ } while(!link_status && --wait_retries);
+
+ } while(!link_status && --lrst_retries);
+
+ return link_status;
}
/******************************************************************************
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
+void
ixgb_optics_reset(struct ixgb_hw *hw)
{
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- uint16_t mdio_reg;
-
- ixgb_write_phy_reg(hw,
- MDIO_PMA_PMD_CR1,
- IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID,
- MDIO_PMA_PMD_CR1_RESET);
-
- mdio_reg = ixgb_read_phy_reg( hw,
- MDIO_PMA_PMD_CR1,
- IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID);
- }
-
- return;
+ if(hw->phy_type == ixgb_phy_type_txn17401) {
+ uint16_t mdio_reg;
+
+ ixgb_write_phy_reg(hw,
+ MDIO_PMA_PMD_CR1,
+ IXGB_PHY_ADDRESS,
+ MDIO_PMA_PMD_DID,
+ MDIO_PMA_PMD_CR1_RESET);
+
+ mdio_reg = ixgb_read_phy_reg(hw,
+ MDIO_PMA_PMD_CR1,
+ IXGB_PHY_ADDRESS,
+ MDIO_PMA_PMD_DID);
+ }
+
+ return;
}
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/* Enums */
typedef enum {
- ixgb_mac_unknown = 0,
- ixgb_82597,
- ixgb_num_macs
+ ixgb_mac_unknown = 0,
+ ixgb_82597,
+ ixgb_num_macs
} ixgb_mac_type;
/* Types of physical layer modules */
typedef enum {
- ixgb_phy_type_unknown = 0,
- ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
- ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_txn17401 /* 1310nm, SM fiber, XENPAK transceiver */
+ ixgb_phy_type_unknown = 0,
+ ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17401 /* 1310nm, SM fiber, XENPAK transceiver */
} ixgb_phy_type;
/* XPAK transceiver vendors, for the SR adapters */
typedef enum {
- ixgb_xpak_vendor_intel,
- ixgb_xpak_vendor_infineon
+ ixgb_xpak_vendor_intel,
+ ixgb_xpak_vendor_infineon
} ixgb_xpak_vendor;
/* Media Types */
typedef enum {
- ixgb_media_type_unknown = 0,
- ixgb_media_type_fiber = 1,
- ixgb_media_type_copper = 2,
- ixgb_num_media_types
+ ixgb_media_type_unknown = 0,
+ ixgb_media_type_fiber = 1,
+ ixgb_media_type_copper = 2,
+ ixgb_num_media_types
} ixgb_media_type;
/* Flow Control Settings */
typedef enum {
- ixgb_fc_none = 0,
- ixgb_fc_rx_pause = 1,
- ixgb_fc_tx_pause = 2,
- ixgb_fc_full = 3,
- ixgb_fc_default = 0xFF
+ ixgb_fc_none = 0,
+ ixgb_fc_rx_pause = 1,
+ ixgb_fc_tx_pause = 2,
+ ixgb_fc_full = 3,
+ ixgb_fc_default = 0xFF
} ixgb_fc_type;
/* PCI bus types */
typedef enum {
- ixgb_bus_type_unknown = 0,
- ixgb_bus_type_pci,
- ixgb_bus_type_pcix
+ ixgb_bus_type_unknown = 0,
+ ixgb_bus_type_pci,
+ ixgb_bus_type_pcix
} ixgb_bus_type;
/* PCI bus speeds */
typedef enum {
- ixgb_bus_speed_unknown = 0,
- ixgb_bus_speed_33,
- ixgb_bus_speed_66,
- ixgb_bus_speed_100,
- ixgb_bus_speed_133,
- ixgb_bus_speed_reserved
+ ixgb_bus_speed_unknown = 0,
+ ixgb_bus_speed_33,
+ ixgb_bus_speed_66,
+ ixgb_bus_speed_100,
+ ixgb_bus_speed_133,
+ ixgb_bus_speed_reserved
} ixgb_bus_speed;
/* PCI bus widths */
typedef enum {
- ixgb_bus_width_unknown = 0,
- ixgb_bus_width_32,
- ixgb_bus_width_64
+ ixgb_bus_width_unknown = 0,
+ ixgb_bus_width_32,
+ ixgb_bus_width_64
} ixgb_bus_width;
#define IXGB_ETH_LENGTH_OF_ADDRESS 6
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
#define SPEED_10000 10000
#define FULL_DUPLEX 2
#define MIN_NUMBER_OF_DESCRIPTORS 8
-#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
-
-#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
-#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
-#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
-
-#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
- /* NOTE: this is MICROSECONDS */
-#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
+#define IXGB_MAX_NUMBER_OF_DESCRIPTORS_TX 256
+#define IXGB_MAX_NUMBER_OF_DESCRIPTORS_RX 512 /* see published errata #26 "Incorrect
+ * calculation of descriptor cache free space..." */
+
+#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling
+ * rx/tx units */
+#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
+#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM
+ * reset */
+
+#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after
+ * the reset */
+ /* NOTE: this is MICROSECONDS */
+#define MAX_RESET_ITERATIONS 8 /* number of iterations to get
+ * things right */
/* General Registers */
-#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
-#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
-#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
-#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
-#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
+#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
+#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
+#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
+#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
+#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
/* Interrupt */
-#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
-#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
-#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
-#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
+#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
+#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
+#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
+#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
/* Receive */
-#define IXGB_RCTL 0x00100 /* RX Control - RW */
-#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
-#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
-#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
-#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
-#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
-#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
-#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
-#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
-#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
-#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
-#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
-#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
-#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
-#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
-#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
-#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
+#define IXGB_RCTL 0x00100 /* RX Control - RW */
+#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
+#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
+#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
+#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
+#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
+#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
+#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
+#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
+#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
+#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Ctrl RW */
+#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
+#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
+#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
+#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
+#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
+#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Transmit */
-#define IXGB_TCTL 0x00600 /* TX Control - RW */
-#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
-#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
-#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
-#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
-#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
-#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
-#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
-#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
-#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
+#define IXGB_TCTL 0x00600 /* TX Control - RW */
+#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
+#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
+#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
+#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
+#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
+#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
+#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
+#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
+#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
/* Physical */
-#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
-#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
-#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
-#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
-#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
-#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
-#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
-#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
-#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
-#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
-#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
-#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
-#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
+#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
+#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
+#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
+#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
+#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) -
+ * RO */
+#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
+#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
+#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
+#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
+#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
+#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
+#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
+#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
/* Wake-up */
-#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
-#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
-#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
-#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
-#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
+#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
+#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
+#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
+#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
+#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
/* Statistics */
-#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
-#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
-#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
-#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
-#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
-#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
-#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
-#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
-#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
-#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
-#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
-#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
-#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
-#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
-#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
-#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
-#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
-#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
-#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
-#define IXGB_RUC 0x02050 /* Receive Undersize Count */
-#define IXGB_ROC 0x02058 /* Receive Oversize Count */
-#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
-#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
-#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
-#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
-#define IXGB_MPC 0x02080 /* Missed Packets Count */
-#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
-#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
-#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
-#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
-#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
-#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
-#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
-#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
-#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
-#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
-#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
-#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
-#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
-#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
-#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
-#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
-#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
-#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
-#define IXGB_DC 0x02148 /* Defer Count */
-#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
-#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
-#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
-#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
-#define IXGB_RFC 0x02188 /* Remote Fault Count */
-#define IXGB_LFC 0x02190 /* Local Fault Count */
-#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
-#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
-#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
-#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
-#define IXGB_XONRXC 0x021B8 /* XON Received Count */
-#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
-#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
-#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
-#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
+#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
+#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
+#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
+#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
+#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
+#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
+#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
+#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
+#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
+#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
+#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
+#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
+#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
+#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
+#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
+#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
+#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
+#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
+#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
+#define IXGB_RUC 0x02050 /* Receive Undersize Count */
+#define IXGB_ROC 0x02058 /* Receive Oversize Count */
+#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
+#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
+#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
+#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
+#define IXGB_MPC 0x02080 /* Missed Packets Count */
+#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
+#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
+#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
+#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
+#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
+#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
+#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
+#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
+#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
+#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
+#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
+#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
+#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
+#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
+#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
+#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
+#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
+#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
+#define IXGB_DC 0x02148 /* Defer Count */
+#define IXGB_PLT64C 0x02150 /* Packet Transmitted was < 64 bytes Count */
+#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
+#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
+#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
+#define IXGB_RFC 0x02188 /* Remote Fault Count */
+#define IXGB_LFC 0x02190 /* Local Fault Count */
+#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
+#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
+#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received
+ * Count */
+#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted
+ * Count */
+#define IXGB_XONRXC 0x021B8 /* XON Received Count */
+#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
+#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
+#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
+#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
+
+
+/* Diagnostic */
+#define IXGB_RDFH 0x04000 /* RX Data FIFO Head - RO */
+#define IXGB_RDFT 0x04008 /* RX Data FIFO Tail - RO */
+#define IXGB_RDFTS 0x04018 /* RX Data FIFO Tail Saved - RO */
+#define IXGB_RDFPC 0x04020 /* RX Data Packet Count - RO */
+#define IXGB_TDFH 0x04028 /* TX Data FIFO Head - RO */
+#define IXGB_TDFT 0x04030 /* TX Data FIFO Tail - RO */
+#define IXGB_TDFTS 0x04040 /* TX Data FIFO Tail Saved - RO */
+#define IXGB_TDFPC 0x04048 /* TX Data FIFO Packet Count - RO */
+#define IXGB_TREG 0x04050 /* Test Register - RW */
+#define IXGB_RPR 0x04058 /* RX Page Register - RW */
+#define IXGB_TPR 0x04060 /* TX Page Register - RW */
+#define IXGB_RPDBM 0x05000 /* RX Packet or Descriptor Buffer Memory - RO */
+#define IXGB_TPDBM 0x06000 /* TX Packet or Descriptor Buffer Memory - RO */
+
/* CTRL0 Bit Masks */
#define IXGB_CTRL0_LRST 0x00000008
#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
/* MSCA bit Masks */
+
/* New Protocol Address */
#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
#define IXGB_MSCA_NP_ADDR_SHIFT 0
+
/* Either Device Type or Register Address,depending on ST_CODE */
#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
#define IXGB_MSCA_DEV_TYPE_SHIFT 16
#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
#define IXGB_MSCA_PHY_ADDR_SHIFT 21
#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
+
/* OP_CODE == 00, Address cycle, New Protocol */
+
/* OP_CODE == 01, Write operation */
+
/* OP_CODE == 10, Read operation */
+
/* OP_CODE == 11, Read, auto increment, New Protocol */
#define IXGB_MSCA_ADDR_CYCLE 0x00000000
#define IXGB_MSCA_WRITE 0x04000000
#define IXGB_MSCA_READ_AUTOINC 0x0C000000
#define IXGB_MSCA_OP_CODE_SHIFT 26
#define IXGB_MSCA_ST_CODE_MASK 0x30000000
+
/* ST_CODE == 00, New Protocol */
+
/* ST_CODE == 01, Old Protocol */
#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
#define IXGB_MSCA_ST_CODE_SHIFT 28
+
/* Initiate command, self-clearing when command completes */
#define IXGB_MSCA_MDI_COMMAND 0x40000000
+
/*MDI In Progress Enable. */
#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
#define IXGB_MSRWD_READ_DATA_SHIFT 16
/* Definitions for the optics devices on the MDIO bus. */
-#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
+#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
/* Standard five-bit Device IDs. See IEEE 802.3ae, clause 45 */
#define MDIO_PMA_PMD_DID 0x01
#define MDIO_XGXS_DID 0x04
/* Standard PMA/PMD registers and bit definitions. */
+
/* Note: This is a very limited set of definitions, */
+
/* only implemented features are defined. */
#define MDIO_PMA_PMD_CR1 0x0000
#define MDIO_PMA_PMD_CR1_RESET 0x8000
-#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
+#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices
+ * only */
/* Vendor-specific MDIO registers */
-#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
-#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
+#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific
+ * register */
+#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific
+ * register */
#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
-#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
+#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes
+ * synchronized */
/* Layout of a single receive descriptor. The controller assumes that this
* structure is packed into 16 bytes, which is a safe assumption with most
* in which case the structure must be packed in some compiler-specific
* manner. */
struct ixgb_rx_desc {
- uint64_t buff_addr;
- uint16_t length;
- uint16_t reserved;
- uint8_t status;
- uint8_t errors;
- uint16_t special;
+ uint64_t buff_addr;
+ uint16_t length;
+ uint16_t reserved;
+ uint8_t status;
+ uint8_t errors;
+ uint16_t special;
};
#define IXGB_RX_DESC_STATUS_DD 0x01
#define IXGB_RX_DESC_ERRORS_IPE 0x40
#define IXGB_RX_DESC_ERRORS_RXE 0x80
-#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
+#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16
+ */
/* Layout of a single transmit descriptor. The controller assumes that this
* structure is packed into 16 bytes, which is a safe assumption with most
* in which case the structure must be packed in some compiler-specific
* manner. */
struct ixgb_tx_desc {
- uint64_t buff_addr;
- uint32_t cmd_type_len;
- uint8_t status;
- uint8_t popts;
- uint16_t vlan;
+ uint64_t buff_addr;
+ uint32_t cmd_type_len;
+ uint8_t status;
+ uint8_t popts;
+ uint16_t vlan;
};
#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
#define IXGB_TX_DESC_POPTS_IXSM 0x01
#define IXGB_TX_DESC_POPTS_TXSM 0x02
-#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
+/* Priority is in upper 3 of 16 */
+#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT
struct ixgb_context_desc {
- uint8_t ipcss;
- uint8_t ipcso;
- uint16_t ipcse;
- uint8_t tucss;
- uint8_t tucso;
- uint16_t tucse;
- uint32_t cmd_type_len;
- uint8_t status;
- uint8_t hdr_len;
- uint16_t mss;
+ uint8_t ipcss;
+ uint8_t ipcso;
+ uint16_t ipcse;
+ uint8_t tucss;
+ uint8_t tucso;
+ uint16_t tucse;
+ uint32_t cmd_type_len;
+ uint8_t status;
+ uint8_t hdr_len;
+ uint16_t mss;
};
#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
/* Filters */
-#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
-#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
+#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
+#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
-#define ENET_HEADER_SIZE 14
-#define ENET_FCS_LENGTH 4
-#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
-#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
-#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
+#define ENET_HEADER_SIZE 14
+#define ENET_FCS_LENGTH 4
+#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
+#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
+#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
+#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
/* Phy Addresses */
-#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
-#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
-#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
+#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
+#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
+#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
/* This structure takes a 64k flash and maps it for identification commands */
struct ixgb_flash_buffer {
- uint8_t manufacturer_id;
- uint8_t device_id;
- uint8_t filler1[0x2AA8];
- uint8_t cmd2;
- uint8_t filler2[0x2AAA];
- uint8_t cmd1;
- uint8_t filler3[0xAAAA];
+ uint8_t manufacturer_id;
+ uint8_t device_id;
+ uint8_t filler1[0x2AA8];
+ uint8_t cmd2;
+ uint8_t filler2[0x2AAA];
+ uint8_t cmd1;
+ uint8_t filler3[0xAAAA];
};
/*
/* Flow control parameters */
struct ixgb_fc {
- uint32_t high_water; /* Flow Control High-water */
- uint32_t low_water; /* Flow Control Low-water */
- uint16_t pause_time; /* Flow Control Pause timer */
- boolean_t send_xon; /* Flow control send XON */
- ixgb_fc_type type; /* Type of flow control */
+ uint32_t high_water; /* Flow Control High-water */
+ uint32_t low_water; /* Flow Control Low-water */
+ uint16_t pause_time; /* Flow Control Pause timer */
+ boolean_t send_xon; /* Flow control send XON */
+ ixgb_fc_type type; /* Type of flow control */
};
/* The historical defaults for the flow control values are given below. */
-#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
-#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
-#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
/* Phy definitions */
#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
/* Bus parameters */
struct ixgb_bus {
- ixgb_bus_speed speed;
- ixgb_bus_width width;
- ixgb_bus_type type;
+ ixgb_bus_speed speed;
+ ixgb_bus_width width;
+ ixgb_bus_type type;
};
struct ixgb_hw {
- uint8_t __iomem *hw_addr;/* Base Address of the hardware */
- void *back; /* Pointer to OS-dependent struct */
- struct ixgb_fc fc; /* Flow control parameters */
- struct ixgb_bus bus; /* Bus parameters */
- uint32_t phy_id; /* Phy Identifier */
- uint32_t phy_addr; /* XGMII address of Phy */
- ixgb_mac_type mac_type; /* Identifier for MAC controller */
- ixgb_phy_type phy_type; /* Transceiver/phy identifier */
- uint32_t max_frame_size; /* Maximum frame size supported */
- uint32_t mc_filter_type; /* Multicast filter hash type */
- uint32_t num_mc_addrs; /* Number of current Multicast addrs */
- uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
- uint32_t num_tx_desc; /* Number of Transmit descriptors */
- uint32_t num_rx_desc; /* Number of Receive descriptors */
- uint32_t rx_buffer_size; /* Size of Receive buffer */
- boolean_t link_up; /* TRUE if link is valid */
- boolean_t adapter_stopped; /* State of adapter */
- uint16_t device_id; /* device id from PCI configuration space */
- uint16_t vendor_id; /* vendor id from PCI configuration space */
- uint8_t revision_id; /* revision id from PCI configuration space */
- uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
- uint16_t subsystem_id; /* subsystem id from PCI configuration space */
- uint32_t bar0; /* Base Address registers */
- uint32_t bar1;
- uint32_t bar2;
- uint32_t bar3;
- uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */
- __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
- unsigned long io_base; /* Our I/O mapped location */
- uint32_t lastLFC;
- uint32_t lastRFC;
+ uint8_t *hw_addr; /* Base Address of the hardware */
+ void *back; /* Pointer to OS-dependent struct */
+ struct ixgb_fc fc; /* Flow control parameters */
+ struct ixgb_bus bus; /* Bus parameters */
+ uint32_t phy_id; /* Phy Identifier */
+ uint32_t phy_addr; /* XGMII address of Phy */
+ ixgb_mac_type mac_type; /* Identifier for MAC controller */
+ ixgb_phy_type phy_type; /* Transceiver/phy identifier */
+ uint32_t max_frame_size; /* Maximum frame size supported */
+ uint32_t mc_filter_type; /* Multicast filter hash type */
+ uint32_t num_mc_addrs; /* Number of current Multicast addrs */
+ /* Individual address currently programmed in MAC */
+ uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
+ uint32_t num_tx_desc; /* Number of Transmit descriptors */
+ uint32_t num_rx_desc; /* Number of Receive descriptors */
+ uint32_t rx_buffer_size; /* Size of Receive buffer */
+ boolean_t link_up; /* TRUE if link is valid */
+ boolean_t adapter_stopped; /* State of adapter */
+ uint16_t device_id; /* device id from PCI configuration space */
+ uint16_t vendor_id; /* vendor id from PCI configuration space */
+ uint8_t revision_id; /* revision id from PCI configuration space */
+ uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI
+ * configuration space */
+ uint16_t subsystem_id; /* subsystem id from PCI configuration space */
+ uint32_t bar0; /* Base Address registers */
+ uint32_t bar1;
+ uint32_t bar2;
+ uint32_t bar3;
+ uint16_t pci_cmd_word; /* PCI command register id from PCI
+ * configuration space */
+ uint16_t eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init
+ * time */
+ unsigned long io_base; /* Our I/O mapped location */
+ uint32_t lastLFC;
+ uint32_t lastRFC;
};
/* Statistics reported by the hardware */
struct ixgb_hw_stats {
- uint64_t tprl;
- uint64_t tprh;
- uint64_t gprcl;
- uint64_t gprch;
- uint64_t bprcl;
- uint64_t bprch;
- uint64_t mprcl;
- uint64_t mprch;
- uint64_t uprcl;
- uint64_t uprch;
- uint64_t vprcl;
- uint64_t vprch;
- uint64_t jprcl;
- uint64_t jprch;
- uint64_t gorcl;
- uint64_t gorch;
- uint64_t torl;
- uint64_t torh;
- uint64_t rnbc;
- uint64_t ruc;
- uint64_t roc;
- uint64_t rlec;
- uint64_t crcerrs;
- uint64_t icbc;
- uint64_t ecbc;
- uint64_t mpc;
- uint64_t tptl;
- uint64_t tpth;
- uint64_t gptcl;
- uint64_t gptch;
- uint64_t bptcl;
- uint64_t bptch;
- uint64_t mptcl;
- uint64_t mptch;
- uint64_t uptcl;
- uint64_t uptch;
- uint64_t vptcl;
- uint64_t vptch;
- uint64_t jptcl;
- uint64_t jptch;
- uint64_t gotcl;
- uint64_t gotch;
- uint64_t totl;
- uint64_t toth;
- uint64_t dc;
- uint64_t plt64c;
- uint64_t tsctc;
- uint64_t tsctfc;
- uint64_t ibic;
- uint64_t rfc;
- uint64_t lfc;
- uint64_t pfrc;
- uint64_t pftc;
- uint64_t mcfrc;
- uint64_t mcftc;
- uint64_t xonrxc;
- uint64_t xontxc;
- uint64_t xoffrxc;
- uint64_t xofftxc;
- uint64_t rjc;
+ uint64_t tprl;
+ uint64_t tprh;
+ uint64_t gprcl;
+ uint64_t gprch;
+ uint64_t bprcl;
+ uint64_t bprch;
+ uint64_t mprcl;
+ uint64_t mprch;
+ uint64_t uprcl;
+ uint64_t uprch;
+ uint64_t vprcl;
+ uint64_t vprch;
+ uint64_t jprcl;
+ uint64_t jprch;
+ uint64_t gorcl;
+ uint64_t gorch;
+ uint64_t torl;
+ uint64_t torh;
+ uint64_t rnbc;
+ uint64_t ruc;
+ uint64_t roc;
+ uint64_t rlec;
+ uint64_t crcerrs;
+ uint64_t icbc;
+ uint64_t ecbc;
+ uint64_t mpc;
+ uint64_t tptl;
+ uint64_t tpth;
+ uint64_t gptcl;
+ uint64_t gptch;
+ uint64_t bptcl;
+ uint64_t bptch;
+ uint64_t mptcl;
+ uint64_t mptch;
+ uint64_t uptcl;
+ uint64_t uptch;
+ uint64_t vptcl;
+ uint64_t vptch;
+ uint64_t jptcl;
+ uint64_t jptch;
+ uint64_t gotcl;
+ uint64_t gotch;
+ uint64_t totl;
+ uint64_t toth;
+ uint64_t dc;
+ uint64_t plt64c;
+ uint64_t tsctc;
+ uint64_t tsctfc;
+ uint64_t ibic;
+ uint64_t rfc;
+ uint64_t lfc;
+ uint64_t pfrc;
+ uint64_t pftc;
+ uint64_t mcfrc;
+ uint64_t mcftc;
+ uint64_t xonrxc;
+ uint64_t xontxc;
+ uint64_t xoffrxc;
+ uint64_t xofftxc;
+ uint64_t rjc;
};
/* Function Prototypes */
extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw);
extern void ixgb_check_for_link(struct ixgb_hw *hw);
extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
+extern void ixgb_rar_set(struct ixgb_hw *hw, uint8_t *addr, uint32_t index);
+extern void ixgb_init_rx_addrs(struct ixgb_hw *hw);
+extern boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
+extern void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
+extern boolean_t mac_addr_valid(uint8_t *mac_addr);
+
+extern uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw, uint32_t reg_addr,
+ uint32_t phy_addr, uint32_t device_type);
-extern void ixgb_rar_set(struct ixgb_hw *hw,
- uint8_t *addr,
- uint32_t index);
+extern void ixgb_write_phy_reg(struct ixgb_hw *hw, uint32_t reg_addr,
+ uint32_t phy_addr, uint32_t device_type,
+ uint16_t data);
/* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- uint8_t *mc_addr_list,
- uint32_t mc_addr_count,
- uint32_t pad);
+extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, uint8_t *mc_addr_list,
+ uint32_t mc_addr_count, uint32_t pad);
/* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
- uint32_t offset,
- uint32_t value);
+extern void ixgb_write_vfta(struct ixgb_hw *hw, uint32_t offset,
+ uint32_t value);
+
+extern void ixgb_clear_vfta(struct ixgb_hw *hw);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
-__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
+uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
/* Everything else */
void ixgb_led_on(struct ixgb_hw *hw);
void ixgb_led_off(struct ixgb_hw *hw);
-void ixgb_write_pci_cfg(struct ixgb_hw *hw,
- uint32_t reg,
- uint16_t * value);
-
+void ixgb_write_pci_cfg(struct ixgb_hw *hw, uint32_t reg, uint16_t *value);
#endif /* _IXGB_HW_H_ */
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*******************************************************************************/
+
#ifndef _IXGB_IDS_H_
#define _IXGB_IDS_H_
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
-#define INTEL_VENDOR_ID 0x8086
-#define INTEL_SUBVENDOR_ID 0x8086
-
+#define INTEL_VENDOR_ID 0x8086
+#define INTEL_SUBVENDOR_ID 0x8086
-#define IXGB_DEVICE_ID_82597EX 0x1048
-#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
+#define IXGB_DEVICE_ID_82597EX 0x1048
+#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
-#define IXGB_SUBDEVICE_ID_A11F 0xA11F
-#define IXGB_SUBDEVICE_ID_A01F 0xA01F
+#define IXGB_SUBDEVICE_ID_A11F 0xA11F
+#define IXGB_SUBDEVICE_ID_A01F 0xA01F
#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
#define IXGB_SUBDEVICE_ID_A00C 0xA00C
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#include "ixgb.h"
char ixgb_driver_name[] = "ixgb";
-static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
+const char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#ifndef CONFIG_IXGB_NAPI
#define DRIVERNAPI
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "1.0.126-k2"DRIVERNAPI
+#define DRV_VERSION "1.0.135" DRIVERNAPI
const char ixgb_driver_version[] = DRV_VERSION;
-static const char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
-/* ixgb_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
+#define IXGB_CB_LENGTH 0
+static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/*
+ * ixgb_pci_tbl - PCI Device ID Table
*/
static struct pci_device_id ixgb_pci_tbl[] = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-
/* required last entry */
{0,}
};
-
MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
/* Local Function Prototypes */
-
-int ixgb_up(struct ixgb_adapter *adapter);
-void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
-void ixgb_reset(struct ixgb_adapter *adapter);
-int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-void ixgb_update_stats(struct ixgb_adapter *adapter);
-
static int ixgb_init_module(void);
static void ixgb_exit_module(void);
static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
-static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
-static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
+static irqreturn_t ixgb_intr(int irq, void *data);
+static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
#ifdef CONFIG_IXGB_NAPI
-static int ixgb_clean(struct net_device *netdev, int *budget);
-static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
- int *work_done, int work_to_do);
+static int ixgb_poll(struct napi_struct *, int);
+static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
#else
-static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
+static bool ixgb_clean_rx_irq(struct ixgb_adapter *);
#endif
-static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
+static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter,
+ int cleaned_count);
+static int ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
+
+#ifdef NETIF_F_HW_VLAN_TX
static void ixgb_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp);
+ struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static void ixgb_netpoll(struct net_device *dev);
#endif
+#ifdef CONFIG_IXGB_PCI_ERS
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
- enum pci_channel_state state);
+ enum pci_channel_state state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
static void ixgb_io_resume (struct pci_dev *pdev);
+#endif
+
+#ifdef CONFIG_IXGB_PCI_ERS
static struct pci_error_handlers ixgb_err_handler = {
.error_detected = ixgb_io_error_detected,
.slot_reset = ixgb_io_slot_reset,
.resume = ixgb_io_resume,
};
+#endif
static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
+#ifdef CONFIG_IXGB_PCI_ERS
.err_handler = &ixgb_err_handler
+#endif
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-/* some defines for controlling descriptor fetches in h/w */
-#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
-#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
- * this */
-#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
- * is pushed this many descriptors
- * from head */
-
/**
* ixgb_init_module - Driver Registration Routine
*
static int __init
ixgb_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n",
- ixgb_driver_string, ixgb_driver_version);
+ printk(KERN_INFO "%s - version %s\n", ixgb_driver_string,
+ ixgb_driver_version);
printk(KERN_INFO "%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
}
-
module_init(ixgb_init_module);
/**
{
pci_unregister_driver(&ixgb_driver);
}
-
module_exit(ixgb_exit_module);
/**
static void
ixgb_irq_enable(struct ixgb_adapter *adapter)
{
- if(atomic_dec_and_test(&adapter->irq_sem)) {
+ if (atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_LSC);
ixgb_up(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int err, irq_flags = IRQF_SHARED;
+ int err;
int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
struct ixgb_hw *hw = &adapter->hw;
ixgb_rar_set(hw, netdev->dev_addr, 0);
ixgb_set_multi(netdev);
+#ifdef NETIF_F_HW_VLAN_TX
ixgb_restore_vlan(adapter);
+#endif
ixgb_configure_tx(adapter);
ixgb_setup_rctl(adapter);
ixgb_configure_rx(adapter);
- ixgb_alloc_rx_buffers(adapter);
+ ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
/* disable interrupts and get the hardware into a known state */
IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
- /* only enable MSI if bus is in PCI-X mode */
- if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->have_msi = 1;
- irq_flags = 0;
- }
- /* proceed to try to request regular interrupt */
+ {
+ bool pcix =
+ (IXGB_READ_REG(&adapter->hw, STATUS) &
+ IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
+ adapter->have_msi = TRUE;
+
+ if (!pcix)
+ adapter->have_msi = FALSE;
+ else if ((err = pci_enable_msi(adapter->pdev)))
+ /* proceed to try to request regular interrupt */
+ adapter->have_msi = FALSE;
}
- err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
- netdev->name, netdev);
+ err = request_irq(adapter->pdev->irq, &ixgb_intr, IRQF_SHARED,
+ netdev->name, netdev);
if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
DPRINTK(PROBE, ERR,
"Unable to allocate interrupt Error: %d\n", err);
return err;
}
- if((hw->max_frame_size != max_frame) ||
- (hw->max_frame_size !=
- (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
+ if ((hw->max_frame_size != max_frame) ||
+ (hw->max_frame_size != (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
hw->max_frame_size = max_frame;
IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
- if(hw->max_frame_size >
+ if (hw->max_frame_size >
IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
- if(!(ctrl0 & IXGB_CTRL0_JFE)) {
+ if (!(ctrl0 & IXGB_CTRL0_JFE)) {
ctrl0 |= IXGB_CTRL0_JFE;
IXGB_WRITE_REG(hw, CTRL0, ctrl0);
}
}
}
- mod_timer(&adapter->watchdog_timer, jiffies);
+ clear_bit(__IXGB_DOWN, &adapter->flags);
#ifdef CONFIG_IXGB_NAPI
- netif_poll_enable(netdev);
+ napi_enable(&adapter->napi);
#endif
ixgb_irq_enable(adapter);
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
return 0;
}
void
-ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
+ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
{
struct net_device *netdev = adapter->netdev;
+ /* prevent the interrupt handler from restarting watchdog */
+ set_bit(__IXGB_DOWN, &adapter->flags);
+
ixgb_irq_disable(adapter);
free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
+ if (adapter->have_msi == TRUE)
pci_disable_msi(adapter->pdev);
- if(kill_watchdog)
+ if (kill_watchdog)
del_timer_sync(&adapter->watchdog_timer);
#ifdef CONFIG_IXGB_NAPI
- netif_poll_disable(netdev);
+ napi_disable(&adapter->napi);
#endif
adapter->link_speed = 0;
adapter->link_duplex = 0;
{
ixgb_adapter_stop(&adapter->hw);
- if(!ixgb_init_hw(&adapter->hw))
+ if (!ixgb_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
}
**/
static int __devinit
-ixgb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct ixgb_adapter *adapter;
static int cards_found = 0;
- unsigned long mmio_start;
- int mmio_len;
int pci_using_dac;
int i;
int err;
- if((err = pci_enable_device(pdev)))
+ if ((err = pci_enable_device(pdev)))
return err;
- if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
+ !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)))
pci_using_dac = 1;
- } else {
- if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
+ else {
+ if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
+ (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))){
printk(KERN_ERR
"ixgb: No usable DMA configuration, aborting\n");
goto err_dma_mask;
pci_using_dac = 0;
}
- if((err = pci_request_regions(pdev, ixgb_driver_name)))
+ if ((err = pci_request_regions(pdev, ixgb_driver_name)))
goto err_request_regions;
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
- if(!netdev) {
+ if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
}
adapter->hw.back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
- mmio_start = pci_resource_start(pdev, BAR_0);
- mmio_len = pci_resource_len(pdev, BAR_0);
-
- adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if(!adapter->hw.hw_addr) {
+ adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+ pci_resource_len(pdev, BAR_0));
+ if (!adapter->hw.hw_addr) {
err = -EIO;
goto err_ioremap;
}
- for(i = BAR_1; i <= BAR_5; i++) {
- if(pci_resource_len(pdev, i) == 0)
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
continue;
- if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
adapter->hw.io_base = pci_resource_start(pdev, i);
break;
}
netdev->set_multicast_list = &ixgb_set_multi;
netdev->set_mac_address = &ixgb_set_mac;
netdev->change_mtu = &ixgb_change_mtu;
+ netdev->do_ioctl = &ixgb_ioctl;
ixgb_set_ethtool_ops(netdev);
netdev->tx_timeout = &ixgb_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
#ifdef CONFIG_IXGB_NAPI
- netdev->poll = &ixgb_clean;
- netdev->weight = 64;
+ netif_napi_add(adapter->netdev, &adapter->napi, ixgb_poll, 64);
#endif
+#ifdef NETIF_F_HW_VLAN_TX
netdev->vlan_rx_register = ixgb_vlan_rx_register;
netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = ixgb_netpoll;
#endif
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
- netdev->base_addr = adapter->hw.io_base;
adapter->bd_number = cards_found;
adapter->link_speed = 0;
/* setup the private structure */
- if((err = ixgb_sw_init(adapter)))
+ if ((err = ixgb_sw_init(adapter)))
goto err_sw_init;
+#ifdef MAX_SKB_FRAGS
+#ifdef NETIF_F_HW_VLAN_TX
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+#else
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
+#endif
+#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_LLTX
- netdev->features |= NETIF_F_LLTX;
#endif
- if(pci_using_dac)
+ if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+#endif
+#ifdef NETIF_F_LLTX
+ netdev->features |= NETIF_F_LLTX;
+#endif
/* make sure the EEPROM is good */
- if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+#ifdef ETHTOOL_GPERMADDR
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
adapter->watchdog_timer.function = &ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))ixgb_tx_timeout_task, netdev);
+ INIT_WORK(&adapter->reset_task, ixgb_tx_timeout_task);
+
+ /* tell the stack to leave us alone until _up() completes */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
strcpy(netdev->name, "eth%d");
- if((err = register_netdev(netdev)))
+ err = register_netdev(netdev);
+ if (err)
goto err_register;
/* we're going to reset, so assume we have no link for now */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ flush_scheduled_work();
+
unregister_netdev(netdev);
iounmap(adapter->hw.hw_addr);
hw->subsystem_id = pdev->subsystem_device;
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- adapter->rx_buffer_len = hw->max_frame_size;
+ adapter->rx_buffer_len = hw->max_frame_size + 8;/* +8 for errata */
- if((hw->device_id == IXGB_DEVICE_ID_82597EX)
+ if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
|| (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
- hw->mac_type = ixgb_82597;
+ hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
DPRINTK(PROBE, ERR, "unsupported device id\n");
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->tx_lock);
+ set_bit(__IXGB_DOWN, &adapter->flags);
return 0;
}
/* allocate transmit descriptors */
- if((err = ixgb_setup_tx_resources(adapter)))
+ if ((err = ixgb_setup_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
- if((err = ixgb_setup_rx_resources(adapter)))
+ if ((err = ixgb_setup_rx_resources(adapter)))
goto err_setup_rx;
- if((err = ixgb_up(adapter)))
+ if ((err = ixgb_up(adapter)))
goto err_up;
return 0;
int
ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
{
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct pci_dev *pdev = adapter->pdev;
int size;
- size = sizeof(struct ixgb_buffer) * txdr->count;
- txdr->buffer_info = vmalloc(size);
- if(!txdr->buffer_info) {
+ size = sizeof(struct ixgb_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vmalloc(size);
+ if (!tx_ring->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
- memset(txdr->buffer_info, 0, size);
+ memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct ixgb_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
- txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
- txdr->size = ALIGN(txdr->size, 4096);
-
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
- if(!txdr->desc) {
- vfree(txdr->buffer_info);
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc) {
+ vfree(tx_ring->buffer_info);
DPRINTK(PROBE, ERR,
"Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
- memset(txdr->desc, 0, txdr->size);
+ memset(tx_ring->desc, 0, tx_ring->size);
- txdr->next_to_use = 0;
- txdr->next_to_clean = 0;
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
return 0;
}
uint32_t tctl;
struct ixgb_hw *hw = &adapter->hw;
- /* Setup the Base and Length of the Tx Descriptor Ring
- * tx_ring.dma can be either a 32 or 64 bit value
- */
+ /* Setup the Base and Length of the Tx Descriptor Ring tx_ring.dma can
+ * be either a 32 or 64 bit value */
IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
/* Setup Transmit Descriptor Settings for this adapter */
adapter->tx_cmd_type =
- IXGB_TX_DESC_TYPE
- | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
+ IXGB_TX_DESC_TYPE |
+ (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
}
/**
int
ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
{
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
struct pci_dev *pdev = adapter->pdev;
int size;
- size = sizeof(struct ixgb_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc(size);
- if(!rxdr->buffer_info) {
+ size = sizeof(struct ixgb_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vmalloc(size);
+ if (!rx_ring->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
- memset(rxdr->buffer_info, 0, size);
+ memset(rx_ring->buffer_info, 0, size);
/* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(struct ixgb_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
- rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
- rxdr->size = ALIGN(rxdr->size, 4096);
-
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
- if(!rxdr->desc) {
- vfree(rxdr->buffer_info);
+ if (!rx_ring->desc) {
+ vfree(rx_ring->buffer_info);
DPRINTK(PROBE, ERR,
"Unable to allocate receive descriptors\n");
return -ENOMEM;
}
- memset(rxdr->desc, 0, rxdr->size);
+ memset(rx_ring->desc, 0, rx_ring->size);
- rxdr->next_to_clean = 0;
- rxdr->next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
return 0;
}
rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
rctl |=
- IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
- IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
+ IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
+ IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
rctl |= IXGB_RCTL_SECRC;
struct ixgb_hw *hw = &adapter->hw;
uint32_t rctl;
uint32_t rxcsum;
- uint32_t rxdctl;
/* make sure receives are disabled while setting up the descriptors */
IXGB_WRITE_REG(hw, RDH, 0);
IXGB_WRITE_REG(hw, RDT, 0);
- /* set up pre-fetching of receive buffers so we get some before we
- * run out (default hardware behavior is to run out before fetching
- * more). This sets up to fetch if HTHRESH rx descriptors are avail
- * and the descriptors in hw cache are below PTHRESH. This avoids
- * the hardware behavior of fetching <=512 descriptors in a single
- * burst that pre-empts all other activity, usually causing fifo
- * overflows. */
- /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
- rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
- RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
- RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
- IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
+ /* due to the hardware errata with RXDCTL, we are unable to use any of
+ * the performance enhancing features of it without causing other
+ * subtle bugs, some of the bugs could include receive length
+ * corruption at high data rates (WTHRESH > 0) and/or receive
+ * descriptor ring irregularites (particularly in hardware cache) */
+ IXGB_WRITE_REG(hw, RXDCTL, 0);
/* Enable Receive Checksum Offload for TCP and UDP */
- if(adapter->rx_csum == TRUE) {
+ if (adapter->rx_csum == TRUE) {
rxcsum = IXGB_READ_REG(hw, RXCSUM);
rxcsum |= IXGB_RXCSUM_TUOFL;
IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ pci_free_consistent(pdev, adapter->tx_ring.size, adapter->tx_ring.desc,
+ adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
static void
ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
- struct ixgb_buffer *buffer_info)
+ struct ixgb_buffer *buffer_info)
{
struct pci_dev *pdev = adapter->pdev;
if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb);
-
+
buffer_info->skb = NULL;
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
unsigned int i;
/* Free all the Tx ring sk_buffs */
-
- for(i = 0; i < tx_ring->count; i++) {
+ for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}
memset(tx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
-
memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
/* Free all the Rx ring sk_buffs */
- for(i = 0; i < rx_ring->count; i++) {
+ for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if(buffer_info->skb) {
+ if (buffer_info->skb) {
- pci_unmap_single(pdev,
- buffer_info->dma,
+ pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- if(!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
rctl = IXGB_READ_REG(hw, RCTL);
- if(netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC) {
rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
- } else if(netdev->flags & IFF_ALLMULTI) {
+ } else if (netdev->flags & IFF_ALLMULTI) {
rctl |= IXGB_RCTL_MPE;
rctl &= ~IXGB_RCTL_UPE;
} else {
rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
}
- if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
IXGB_WRITE_REG(hw, RCTL, rctl);
- for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
- i++, mc_ptr = mc_ptr->next)
+ for (i = 0, mc_ptr = netdev->mc_list; mc_ptr;
+ i++, mc_ptr = mc_ptr->next)
memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
- mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
}
{
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
struct net_device *netdev = adapter->netdev;
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
ixgb_check_for_link(&adapter->hw);
netif_stop_queue(netdev);
}
- if(adapter->hw.link_up) {
- if(!netif_carrier_ok(netdev)) {
+ if (adapter->hw.link_up) {
+ if (!netif_carrier_ok(netdev)) {
DPRINTK(LINK, INFO,
- "NIC Link is Up 10000 Mbps Full Duplex\n");
+ "NIC Link is Up 10000 Mbps Full Duplex\n");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
} else {
- if(netif_carrier_ok(netdev)) {
+ if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
DPRINTK(LINK, INFO, "NIC Link is Down\n");
ixgb_update_stats(adapter);
- if(!netif_carrier_ok(netdev)) {
- if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
- schedule_work(&adapter->tx_timeout_task);
+ if (!netif_carrier_ok(netdev)) {
+ if (IXGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
+ /* We've lost link, so the controller stops DMA, but
+ * we've got queued Tx work that's never going to get
+ * done, so reset controller to flush Tx. (Do the reset
+ * outside of interrupt context). */
+ schedule_work(&adapter->reset_task);
}
}
static int
ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
+#ifdef NETIF_F_TSO
struct ixgb_context_desc *context_desc;
unsigned int i;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
uint16_t ipcse, tucse, mss;
int err;
- if (likely(skb_is_gso(skb))) {
+ if (likely(skb_shinfo(skb)->gso_size)) {
struct ixgb_buffer *buffer_info;
struct iphdr *iph;
context_desc->mss = cpu_to_le16(mss);
context_desc->hdr_len = hdr_len;
context_desc->status = 0;
- context_desc->cmd_type_len = cpu_to_le32(
- IXGB_CONTEXT_DESC_TYPE
- | IXGB_CONTEXT_DESC_CMD_TSE
- | IXGB_CONTEXT_DESC_CMD_IP
- | IXGB_CONTEXT_DESC_CMD_TCP
- | IXGB_CONTEXT_DESC_CMD_IDE
- | (skb->len - (hdr_len)));
-
-
- if(++i == adapter->tx_ring.count) i = 0;
+ context_desc->cmd_type_len =
+ cpu_to_le32(IXGB_CONTEXT_DESC_TYPE |
+ IXGB_CONTEXT_DESC_CMD_TSE |
+ IXGB_CONTEXT_DESC_CMD_IP |
+ IXGB_CONTEXT_DESC_CMD_TCP |
+ IXGB_CONTEXT_DESC_CMD_IDE |
+ (skb->len - (hdr_len)));
+
+ if (++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
- return 1;
+ return TRUE;
}
+#endif
- return 0;
+ return FALSE;
}
-static boolean_t
+static bool
ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
struct ixgb_context_desc *context_desc;
unsigned int i;
uint8_t css, cso;
- if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
css = skb_transport_offset(skb);
- cso = (skb->h.raw + skb->csum) - skb->data;
+ cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
context_desc->hdr_len = 0;
context_desc->mss = 0;
context_desc->cmd_type_len =
- cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
- | IXGB_TX_DESC_CMD_IDE);
+ cpu_to_le32(IXGB_CONTEXT_DESC_TYPE |
+ IXGB_TX_DESC_CMD_IDE);
- if(++i == adapter->tx_ring.count) i = 0;
+ if (++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
struct ixgb_buffer *buffer_info;
int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
+#ifdef NETIF_F_TSO
unsigned int mss = skb_shinfo(skb)->gso_size;
+#endif
+#ifdef MAX_SKB_FRAGS
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
len -= skb->data_len;
+#endif
i = tx_ring->next_to_use;
- while(len) {
+ while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
+#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
+#endif
buffer_info->length = size;
WARN_ON(buffer_info->dma != 0);
- buffer_info->dma =
- pci_map_single(adapter->pdev,
- skb->data + offset,
- size,
- PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev, skb->data + offset, size,
+ PCI_DMA_TODEVICE);
buffer_info->next_to_watch = 0;
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if (++i == tx_ring->count) i = 0;
}
- for(f = 0; f < nr_frags; f++) {
+#ifdef MAX_SKB_FRAGS
+ for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
offset = 0;
- while(len) {
+ while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
-
+#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && (f == (nr_frags - 1))
- && size == len && size > 8))
+ if (unlikely(mss && (f == (nr_frags-1)) && (size == len)
+ && (size > 8)))
size -= 4;
+#endif
buffer_info->length = size;
- buffer_info->dma =
- pci_map_page(adapter->pdev,
- frag->page,
- frag->page_offset + offset,
- size,
- PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev, frag->page,
+ frag->page_offset + offset, size,
+ PCI_DMA_TODEVICE);
buffer_info->next_to_watch = 0;
len -= size;
offset += size;
count++;
- if(++i == tx_ring->count) i = 0;
+ if (++i == tx_ring->count) i = 0;
}
}
+#endif
i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
}
static void
-ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
+ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
+ int tx_flags)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct ixgb_tx_desc *tx_desc = NULL;
uint8_t popts = 0;
unsigned int i;
- if(tx_flags & IXGB_TX_FLAGS_TSO) {
+ if (tx_flags & IXGB_TX_FLAGS_TSO) {
cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
}
- if(tx_flags & IXGB_TX_FLAGS_CSUM)
+ if (tx_flags & IXGB_TX_FLAGS_CSUM)
popts |= IXGB_TX_DESC_POPTS_TXSM;
- if(tx_flags & IXGB_TX_FLAGS_VLAN) {
+ if (tx_flags & IXGB_TX_FLAGS_VLAN) {
cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
}
i = tx_ring->next_to_use;
- while(count--) {
+ while (count--) {
buffer_info = &tx_ring->buffer_info[i];
tx_desc = IXGB_TX_DESC(*tx_ring, i);
tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
tx_desc->popts = popts;
tx_desc->vlan = cpu_to_le16(vlan_id);
- if(++i == tx_ring->count) i = 0;
+ if (++i == tx_ring->count) i = 0;
}
- tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
- | IXGB_TX_DESC_CMD_RS );
+ tx_desc->cmd_type_len |=
+ cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ /* Force memory writes to complete before letting h/w know there are
+ * new descriptors to fetch. (Only applicable for weak-ordered memory
+ * model archs, such as IA-64). */
wmb();
tx_ring->next_to_use = i;
return 0;
return __ixgb_maybe_stop_tx(netdev, size);
}
-
-
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
(((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->data*/ + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
+ 1 /* one more needed for sentinel TSO workaround */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD)
+#endif
static int
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct ixgb_adapter *adapter = netdev_priv(netdev);
unsigned int first;
unsigned int tx_flags = 0;
- unsigned long flags;
+ unsigned long flags = 0;
int vlan_id = 0;
int tso;
- if(skb->len <= 0) {
- dev_kfree_skb_any(skb);
+ if (test_bit(__IXGB_DOWN, &adapter->flags)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb(skb);
return 0;
}
#ifdef NETIF_F_LLTX
- local_irq_save(flags);
- if (!spin_trylock(&adapter->tx_lock)) {
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
/* Collision - tell upper layer to requeue */
- local_irq_restore(flags);
return NETDEV_TX_LOCKED;
- }
#else
spin_lock_irqsave(&adapter->tx_lock, flags);
#endif
- if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
- DESC_NEEDED))) {
- netif_stop_queue(netdev);
+ if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED))) {
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&adapter->tx_lock, flags);
#endif
- if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
+#ifdef NETIF_F_HW_VLAN_TX
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
vlan_id = vlan_tx_tag_get(skb);
}
+#endif
first = adapter->tx_ring.next_to_use;
-
+
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
#ifdef NETIF_F_LLTX
spin_unlock_irqrestore(&adapter->tx_lock, flags);
#endif
if (likely(tso))
tx_flags |= IXGB_TX_FLAGS_TSO;
- else if(ixgb_tx_csum(adapter, skb))
+ else if (ixgb_tx_csum(adapter, skb))
tx_flags |= IXGB_TX_FLAGS_CSUM;
ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
- tx_flags);
+ tx_flags);
netdev->trans_start = jiffies;
-#ifdef NETIF_F_LLTX
/* Make sure there is space in the ring for the next send. */
ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
+#ifdef NETIF_F_LLTX
spin_unlock_irqrestore(&adapter->tx_lock, flags);
-
#endif
return NETDEV_TX_OK;
}
struct ixgb_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
- schedule_work(&adapter->tx_timeout_task);
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
}
static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter;
+ adapter = container_of(work, struct ixgb_adapter, reset_task);
- adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
}
int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
-
- if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
+ if ((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
- adapter->rx_buffer_len = max_frame;
+ adapter->rx_buffer_len = max_frame + 8 /* + 8 for errata */;
netdev->mtu = new_mtu;
ixgb_update_stats(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- /* Prevent stats update while adapter is being reset */
- if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
- return;
-
- if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
+ if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
/* fix up multicast stats by removing broadcasts */
- if(multi >= bcast)
+ if (multi >= bcast)
multi -= bcast;
adapter->stats.mprcl += (multi & 0xFFFFFFFF);
adapter->net_stats.multicast = adapter->stats.mprcl;
adapter->net_stats.collisions = 0;
- /* ignore RLEC as it reports errors for padded (<64bytes) frames
- * with a length in the type/len field */
+ /* ignore RLEC as it reports errors for padded (<64bytes) frames with a
+ * length in the type/len field */
adapter->net_stats.rx_errors =
- /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
- adapter->stats.ruc +
- adapter->stats.roc /*+ adapter->stats.rlec */ +
- adapter->stats.icbc +
- adapter->stats.ecbc + adapter->stats.mpc;
+ /* adapter->stats.rlec + adapter->stats.rnbc + */
+ adapter->stats.crcerrs + adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.icbc + adapter->stats.ecbc + adapter->stats.mpc;
+
+ adapter->net_stats.rx_dropped = adapter->stats.mpc;
/* see above
- * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
- */
+ * adapter->net_stats.rx_length_errors = adapter->stats.rlec; */
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
}
#define IXGB_MAX_INTR 10
+
/**
* ixgb_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
**/
static irqreturn_t
-ixgb_intr(int irq, void *data, struct pt_regs *regs)
+ixgb_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint32_t icr = IXGB_READ_REG(hw, ICR);
+
#ifndef CONFIG_IXGB_NAPI
unsigned int i;
#endif
- if(unlikely(!icr))
- return IRQ_NONE; /* Not our interrupt */
+ if (unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
- if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
- mod_timer(&adapter->watchdog_timer, jiffies);
- }
+ if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
+ if (!test_bit(__IXGB_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies);
#ifdef CONFIG_IXGB_NAPI
- if(netif_rx_schedule_prep(netdev)) {
-
- /* Disable interrupts and register for poll. The flush
- of the posted write is intentionally left out.
- */
+ if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
+ /* Disable interrupts and register for poll. The flush of the
+ * posted write is intentionally left out. */
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- __netif_rx_schedule(netdev);
+ __netif_rx_schedule(netdev, &adapter->napi);
}
#else
- /* yes, that is actually a & and it is meant to make sure that
- * every pass through this for loop checks both receive and
- * transmit queues for completed descriptors, intended to
- * avoid starvation issues and assist tx/rx fairness. */
- for(i = 0; i < IXGB_MAX_INTR; i++)
- if(!ixgb_clean_rx_irq(adapter) &
- !ixgb_clean_tx_irq(adapter))
+ /* yes, that is actually a & and it is meant to make sure that every
+ * pass through this for loop checks both receive and transmit queues
+ * for completed descriptors, intended to avoid starvation issues and
+ * assist tx/rx fairness. */
+ for (i = 0; i < IXGB_MAX_INTR; i++)
+ if (!ixgb_clean_rx_irq(adapter) & !ixgb_clean_tx_irq(adapter))
break;
-#endif
+#endif
return IRQ_HANDLED;
}
#ifdef CONFIG_IXGB_NAPI
+
/**
- * ixgb_clean - NAPI Rx polling callback
+ * ixgb_poll - NAPI Rx polling callback
* @adapter: board private structure
**/
static int
-ixgb_clean(struct net_device *netdev, int *budget)
+ixgb_poll(struct napi_struct *napi, int budget)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int work_to_do = min(*budget, netdev->quota);
- int tx_cleaned;
- int work_done = 0;
+ struct ixgb_adapter *adapter =
+ container_of(napi, struct ixgb_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int tx_cleaned, work_done = 0;
- tx_cleaned = ixgb_clean_tx_irq(adapter);
- ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
+ if (test_bit(__IXGB_DOWN, &adapter->flags))
+ goto quit_polling;
- *budget -= work_done;
- netdev->quota -= work_done;
+ tx_cleaned = ixgb_clean_tx_irq(adapter);
+ ixgb_clean_rx_irq(adapter, &work_done, budget);
/* if no Tx and not enough Rx work done, exit the polling mode */
- if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
- netif_rx_complete(netdev);
- ixgb_irq_enable(adapter);
+ if (!tx_cleaned && (work_done == 0)) {
+quit_polling:
+ netif_rx_complete(netdev, napi);
+ if (test_bit(__IXGB_DOWN, &adapter->flags))
+ atomic_dec(&adapter->irq_sem);
+ else
+ ixgb_irq_enable(adapter);
return 0;
}
- return 1;
+ /* indicate we did *some* work if no rx */
+ if (tx_cleaned && (work_done == 0))
+ work_done++;
+
+ return work_done;
}
#endif
+
/**
* ixgb_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
**/
-static boolean_t
+static bool
ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct ixgb_tx_desc *tx_desc, *eop_desc;
struct ixgb_buffer *buffer_info;
unsigned int i, eop;
- boolean_t cleaned = FALSE;
+ bool cleaned = FALSE;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IXGB_TX_DESC(*tx_ring, eop);
- while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
+ while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
- for(cleaned = FALSE; !cleaned; ) {
+ for (cleaned = FALSE; !cleaned;) {
tx_desc = IXGB_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
- if (tx_desc->popts
- & (IXGB_TX_DESC_POPTS_TXSM |
- IXGB_TX_DESC_POPTS_IXSM))
+ if (tx_desc->popts &
+ (IXGB_TX_DESC_POPTS_TXSM |
+ IXGB_TX_DESC_POPTS_IXSM))
adapter->hw_csum_tx_good++;
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
-
*(uint32_t *)&(tx_desc->status) = 0;
-
cleaned = (i == eop);
- if(++i == tx_ring->count) i = 0;
+ if (++i == tx_ring->count) i = 0;
}
eop = tx_ring->buffer_info[i].next_to_watch;
tx_ring->next_to_clean = i;
- if (unlikely(netif_queue_stopped(netdev))) {
- spin_lock(&adapter->tx_lock);
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
- (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
+ if (unlikely(cleaned && netif_carrier_ok(netdev) &&
+ IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean. */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__IXGB_DOWN, &adapter->flags))) {
netif_wake_queue(netdev);
- spin_unlock(&adapter->tx_lock);
+ ++adapter->restart_queue;
+ }
}
- if(adapter->detect_tx_hung) {
+ if (adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
**/
static void
-ixgb_rx_checksum(struct ixgb_adapter *adapter,
- struct ixgb_rx_desc *rx_desc,
- struct sk_buff *skb)
+ixgb_rx_checksum(struct ixgb_adapter *adapter, struct ixgb_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- /* Ignore Checksum bit is set OR
- * TCP Checksum has not been calculated
- */
- if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
+ /* Ignore Checksum bit is set OR TCP Checksum has not been calculated */
+ if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
skb->ip_summed = CHECKSUM_NONE;
return;
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
- if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
+ if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_rx_error++;
* @adapter: board private structure
**/
-static boolean_t
+static bool
#ifdef CONFIG_IXGB_NAPI
ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
#else
struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
uint32_t length;
unsigned int i, j;
- boolean_t cleaned = FALSE;
+ int cleaned_count = 0;
+ bool cleaned = FALSE;
i = rx_ring->next_to_clean;
rx_desc = IXGB_RX_DESC(*rx_ring, i);
buffer_info = &rx_ring->buffer_info[i];
- while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
- struct sk_buff *skb, *next_skb;
+ while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
+ struct sk_buff *skb;
u8 status;
#ifdef CONFIG_IXGB_NAPI
- if(*work_done >= work_to_do)
+ if (*work_done >= work_to_do)
break;
(*work_done)++;
skb = buffer_info->skb;
buffer_info->skb = NULL;
- prefetch(skb->data);
+ prefetch(skb->data - NET_IP_ALIGN);
- if(++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count) i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
- if((j = i + 1) == rx_ring->count) j = 0;
+ if ((j = i + 1) == rx_ring->count) j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
next_buffer = &rx_ring->buffer_info[i];
- next_skb = next_buffer->skb;
- prefetch(next_skb);
cleaned = TRUE;
+ cleaned_count++;
- pci_unmap_single(pdev,
- buffer_info->dma,
- buffer_info->length,
+ pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
+ rx_desc->length = 0;
- if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
+ if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
/* All receives must fit into a single buffer */
IXGB_DBG("Receive packet consumed multiple buffers "
- "length<%x>\n", length);
+ "length<%x>\n", length);
dev_kfree_skb_irq(skb);
goto rxdesc_done;
}
- if (unlikely(rx_desc->errors
- & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
- | IXGB_RX_DESC_ERRORS_P |
- IXGB_RX_DESC_ERRORS_RXE))) {
+ if (unlikely(rx_desc->errors &
+ (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
+ IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
dev_kfree_skb_irq(skb);
goto rxdesc_done;
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack */
-#define IXGB_CB_LENGTH 256
- if (length < IXGB_CB_LENGTH) {
+ if (length < copybreak) {
struct sk_buff *new_skb =
- netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+ dev_alloc_skb(length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
new_skb->dev = netdev;
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
+ memcpy(new_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_IXGB_NAPI
- if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
+#ifdef NETIF_F_HW_VLAN_TX
+ if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special) &
- IXGB_RX_DESC_SPECIAL_VLAN_MASK);
+ le16_to_cpu(rx_desc->special) &
+ IXGB_RX_DESC_SPECIAL_VLAN_MASK);
} else {
netif_receive_skb(skb);
}
+#else
+ netif_receive_skb(skb);
+#endif
#else /* CONFIG_IXGB_NAPI */
- if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
+#ifdef NETIF_F_HW_VLAN_TX
+ if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special) &
+ le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
} else {
netif_rx(skb);
}
+#else
+ netif_rx(skb);
+#endif
#endif /* CONFIG_IXGB_NAPI */
netdev->last_rx = jiffies;
/* clean up descriptor, might be written over by hw */
rx_desc->status = 0;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
+ ixgb_alloc_rx_buffers(adapter, cleaned_count);
+ cleaned_count = 0;
+ }
+
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
rx_ring->next_to_clean = i;
- ixgb_alloc_rx_buffers(adapter);
+ cleaned_count = IXGB_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ ixgb_alloc_rx_buffers(adapter, cleaned_count);
return cleaned;
}
**/
static void
-ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
+ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
{
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
struct ixgb_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
- int num_group_tail_writes;
long cleancount;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
cleancount = IXGB_DESC_UNUSED(rx_ring);
- num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
-
/* leave three descriptors unused */
- while(--cleancount > 2) {
+ while (--cleancount > 2 && cleaned_count--) {
/* recycle! its good for you */
- skb = buffer_info->skb;
- if (skb) {
+ if (!(skb = buffer_info->skb))
+ skb = dev_alloc_skb(adapter->rx_buffer_len
+ + NET_IP_ALIGN);
+ else {
skb_trim(skb, 0);
goto map_skb;
}
- skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
- + NET_IP_ALIGN);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
- /* Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
+ /* Make buffer alignment 2 beyond a 16 byte boundary this will
+ * result in a 16 byte aligned IP header after the 14 byte MAC
+ * header is removed */
skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = netdev;
+
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
* writeback. */
rx_desc->status = 0;
-
- if(++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
}
}
+/**
+ * ixgb_ioctl - perform a command - e.g: ethtool:get_driver_info.
+ * @param netdev network interface device structure
+ * @param ifr data to be used/filled in by the ioctl command
+ * @param cmd ioctl command to execute
+ **/
+
+static int
+ixgb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef NETIF_F_HW_VLAN_TX
+
/**
* ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
- *
+ *
* @param netdev network interface device structure
* @param grp indicates to enable or disable tagging/stripping
**/
ixgb_irq_disable(adapter);
adapter->vlgrp = grp;
- if(grp) {
+ if (grp) {
/* enable VLAN tag insert/strip */
ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
ctrl |= IXGB_CTRL0_VME;
ixgb_irq_disable(adapter);
- if(adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
ixgb_irq_enable(adapter);
- /* remove VID from filter table*/
+ /* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
{
ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
- if(adapter->vlgrp) {
+ if (adapter->vlgrp) {
uint16_t vid;
- for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if(!adapter->vlgrp->vlan_devices[vid])
+
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}
}
}
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
+
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
-static void ixgb_netpoll(struct net_device *dev)
+static void
+ixgb_netpoll(struct net_device *dev)
{
struct ixgb_adapter *adapter = netdev_priv(dev);
disable_irq(adapter->pdev->irq);
- ixgb_intr(adapter->pdev->irq, dev, NULL);
+ ixgb_intr(adapter->pdev->irq, dev);
enable_irq(adapter->pdev->irq);
}
#endif
+#ifdef CONFIG_IXGB_PCI_ERS
/**
* ixgb_io_error_detected() - called when PCI error is detected
* @pdev pointer to pci device with error
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
*/
-static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
- enum pci_channel_state state)
+static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter = netdev->priv;
- if(netif_running(netdev))
+ if (netif_running(netdev))
ixgb_down(adapter, TRUE);
pci_disable_device(pdev);
* ixgb_io_slot_reset - called after the pci bus has been reset.
* @pdev pointer to pci device with error
*
- * This callback is called after the PCI buss has been reset.
+ * This callback is called after the PCI bus has been reset.
* Basically, this tries to restart the card from scratch.
* This is a shortened version of the device probe/discovery code,
* it resembles the first-half of the ixgb_probe() routine.
*/
-static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
+static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter = netdev->priv;
- if(pci_enable_device(pdev)) {
+ if (pci_enable_device(pdev)) {
DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_reset(adapter);
/* Make sure the EEPROM is good */
- if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
* normal operation. Implementation resembles the second-half
* of the ixgb_probe() routine.
*/
-static void ixgb_io_resume (struct pci_dev *pdev)
+static void ixgb_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter = netdev->priv;
pci_set_master(pdev);
- if(netif_running(netdev)) {
- if(ixgb_up(adapter)) {
+ if (netif_running(netdev)) {
+ if (ixgb_up(adapter)) {
printk ("ixgb: can't bring device back up after reset\n");
return;
}
}
netif_device_attach(netdev);
- mod_timer(&adapter->watchdog_timer, jiffies);
}
+#endif /* CONFIG_IXGB_PCI_ERS */
/* ixgb_main.c */
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
*******************************************************************************/
+
/* glue for the OS independent part of ixgb
* includes register access macros
*/
#define _IXGB_OSDEP_H_
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include "kcompat.h"
+
+#ifdef bool
+#define boolean_t bool
+#endif
+
+#define usec_delay(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x) do { if(in_interrupt()) { \
+ /* Don't mdelay in interrupt context! */ \
+ BUG(); \
+ } else { \
+ msleep(x); \
+ } } while(0)
+
+#endif
-typedef enum {
-#undef FALSE
- FALSE = 0,
-#undef TRUE
- TRUE = 1
-} boolean_t;
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
#undef ASSERT
#define ASSERT(x) if(!(x)) BUG()
#define DEBUGOUT7 DEBUGOUT3
#define IXGB_WRITE_REG(a, reg, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg)))
+ writel((value), ((a)->hw_addr + IXGB_##reg)))
#define IXGB_READ_REG(a, reg) ( \
- readl((a)->hw_addr + IXGB_##reg))
+ readl((a)->hw_addr + IXGB_##reg))
#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
+ writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
+ readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
/*******************************************************************************
Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
+ Copyright(c) 1999 - 2007 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define IXGB_MAX_NIC 8
-#define OPTION_UNSET -1
+#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
*/
#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
-#define IXGB_PARAM(X, desc) \
- static int __devinitdata X[IXGB_MAX_NIC+1] \
- = IXGB_PARAM_INIT; \
- static unsigned int num_##X = 0; \
- module_param_array_named(X, X, int, &num_##X, 0); \
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when ixgb_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define IXGB_PARAM(X, desc) \
+ static const int __devinitdata X[IXGB_MAX_NIC+1] = IXGB_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(IXGB_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+#else
+#define IXGB_PARAM(X, desc) \
+ static int __devinitdata X[IXGB_MAX_NIC+1] = IXGB_PARAM_INIT; \
+ static unsigned int num_##X = 0; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
+#endif
/* Transmit Descriptor Count
*
IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
+
/* Receive Flow control low threshold (when we send a resume frame)
* (FCRTL)
*
IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
+
/* Flow control request timeout (how long to pause the link partner's tx)
* (PAP 15:0)
*
- * Valid Range: 1 - 65535
+ * Valid Range: 1 - 65535
*
* Default Value: 65535 (0xffff) (we'll send an xon if we recover)
*/
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
+
/* Interrupt Delay Enable
*
* Valid Range: 0, 1
#define MIN_FCRTH 8
#define MAX_FCRTH 0x3FFF0
-#define MIN_FCPAUSE 1
-#define MAX_FCPAUSE 0xffff
#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
+#define MIN_FCPAUSE 1
+#define MAX_FCPAUSE 0xFFFF
+
struct ixgb_option {
enum { enable_option, range_option, list_option } type;
const char *err;
int def;
union {
- struct { /* range_option info */
+ struct { /* range_option info */
int min;
int max;
} r;
- struct { /* list_option info */
+ struct { /* list_option info */
int nr;
- struct ixgb_opt_list {
- int i;
- char *str;
- } *p;
+ struct ixgb_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit
-ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
+ixgb_validate_option(unsigned int *value, struct ixgb_option *opt)
{
- if(*value == OPTION_UNSET) {
+ if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
}
break;
case range_option:
- if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
printk(KERN_INFO "%s set to %i\n", opt->name, *value);
return 0;
}
int i;
struct ixgb_opt_list *ent;
- for(i = 0; i < opt->arg.l.nr; i++) {
+ for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
- if(*value == ent->i) {
- if(ent->str[0] != '\0')
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
printk(KERN_INFO "%s\n", ent->str);
return 0;
}
}
printk(KERN_INFO "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+
/**
* ixgb_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
void __devinit
ixgb_check_options(struct ixgb_adapter *adapter)
{
- int bd = adapter->bd_number;
- if(bd >= IXGB_MAX_NIC) {
+ unsigned int bd = adapter->bd_number;
+ if (bd >= IXGB_MAX_NIC) {
printk(KERN_NOTICE
- "Warning: no configuration for board #%i\n", bd);
+ "Warning: no configuration for board #%i\n", bd);
printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+ bd = IXGB_MAX_NIC;
+#endif
}
{ /* Transmit Descriptor Count */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
};
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- if(num_TxDescriptors > bd) {
+#ifdef module_param_array
+ if (num_TxDescriptors > bd) {
+#endif
tx_ring->count = TxDescriptors[bd];
ixgb_validate_option(&tx_ring->count, &opt);
+#ifdef module_param_array
} else {
tx_ring->count = opt.def;
}
- tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+#endif
+ tx_ring->count = ALIGN(tx_ring->count,
+ IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Descriptor Count */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
};
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- if(num_RxDescriptors > bd) {
+#ifdef module_param_array
+ if (num_RxDescriptors > bd) {
+#endif
rx_ring->count = RxDescriptors[bd];
ixgb_validate_option(&rx_ring->count, &opt);
+#ifdef module_param_array
} else {
rx_ring->count = opt.def;
}
- rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+#endif
+ rx_ring->count = ALIGN(rx_ring->count,
+ IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Checksum Offload Enable */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = enable_option,
.name = "Receive Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
- if(num_XsumRX > bd) {
+#ifdef module_param_array
+ if (num_XsumRX > bd) {
+#endif
unsigned int rx_csum = XsumRX[bd];
ixgb_validate_option(&rx_csum, &opt);
adapter->rx_csum = rx_csum;
+#ifdef module_param_array
} else {
adapter->rx_csum = opt.def;
}
+#endif
}
{ /* Flow Control */
struct ixgb_opt_list fc_list[] =
- {{ ixgb_fc_none, "Flow Control Disabled" },
+ {{ ixgb_fc_none, "Flow Control Disabled" },
{ ixgb_fc_rx_pause,"Flow Control Receive Only" },
{ ixgb_fc_tx_pause,"Flow Control Transmit Only" },
- { ixgb_fc_full, "Flow Control Enabled" },
+ { ixgb_fc_full, "Flow Control Enabled" },
{ ixgb_fc_default, "Flow Control Hardware Default" }};
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = ixgb_fc_tx_pause,
- .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .arg = { .l = { .nr = LIST_LEN(fc_list),
.p = fc_list }}
};
- if(num_FlowControl > bd) {
+#ifdef module_param_array
+ if (num_FlowControl > bd) {
+#endif
unsigned int fc = FlowControl[bd];
ixgb_validate_option(&fc, &opt);
adapter->hw.fc.type = fc;
+#ifdef module_param_array
} else {
adapter->hw.fc.type = opt.def;
}
+#endif
}
{ /* Receive Flow Control High Threshold */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control High Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
.max = MAX_FCRTH}}
};
- if(num_RxFCHighThresh > bd) {
+#ifdef module_param_array
+ if (num_RxFCHighThresh > bd) {
+#endif
adapter->hw.fc.high_water = RxFCHighThresh[bd];
ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
+#ifdef module_param_array
} else {
adapter->hw.fc.high_water = opt.def;
}
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+#endif
+ if ( !(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO
"Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control Low Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
.max = MAX_FCRTL}}
};
- if(num_RxFCLowThresh > bd) {
+#ifdef module_param_array
+ if (num_RxFCLowThresh > bd) {
+#endif
adapter->hw.fc.low_water = RxFCLowThresh[bd];
ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
+#ifdef module_param_array
} else {
adapter->hw.fc.low_water = opt.def;
}
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+#endif
+ if ( !(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO
"Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Flow Control Pause Time Request",
.err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
.max = MAX_FCPAUSE}}
};
- if(num_FCReqTimeout > bd) {
+#ifdef module_param_array
+ if (num_FCReqTimeout > bd) {
+#endif
unsigned int pause_time = FCReqTimeout[bd];
ixgb_validate_option(&pause_time, &opt);
adapter->hw.fc.pause_time = pause_time;
+#ifdef module_param_array
} else {
adapter->hw.fc.pause_time = opt.def;
}
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+#endif
+ if ( !(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO
"Ignoring FCReqTimeout when no RxFC\n");
}
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
- printk (KERN_INFO
+ printk (KERN_INFO
"RxFCHighThresh must be >= (RxFCLowThresh + 8), "
"Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
}
}
{ /* Receive Interrupt Delay */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.max = MAX_RDTR}}
};
- if(num_RxIntDelay > bd) {
+#ifdef module_param_array
+ if (num_RxIntDelay > bd) {
+#endif
adapter->rx_int_delay = RxIntDelay[bd];
ixgb_validate_option(&adapter->rx_int_delay, &opt);
+#ifdef module_param_array
} else {
adapter->rx_int_delay = opt.def;
}
+#endif
}
{ /* Transmit Interrupt Delay */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.max = MAX_TIDV}}
};
- if(num_TxIntDelay > bd) {
+#ifdef module_param_array
+ if (num_TxIntDelay > bd) {
+#endif
adapter->tx_int_delay = TxIntDelay[bd];
ixgb_validate_option(&adapter->tx_int_delay, &opt);
+#ifdef module_param_array
} else {
adapter->tx_int_delay = opt.def;
}
+#endif
}
{ /* Transmit Interrupt Delay Enable */
- const struct ixgb_option opt = {
+ struct ixgb_option opt = {
.type = enable_option,
.name = "Tx Interrupt Delay Enable",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
- if(num_IntDelayEnable > bd) {
+#ifdef module_param_array
+ if (num_IntDelayEnable > bd) {
+#endif
unsigned int ide = IntDelayEnable[bd];
ixgb_validate_option(&ide, &opt);
adapter->tx_int_delay_enable = ide;
+#ifdef module_param_array
} else {
adapter->tx_int_delay_enable = opt.def;
}
+#endif
}
}
+
--- /dev/null
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2007 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+
+
+
+#ifdef DRIVER_IXGB
+#include "ixgb.h"
+#endif
+
+
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+ PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
+ direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+ int direction)
+{
+ return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+ dev->dma_mask = mask;
+ return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+ }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *dev;
+ int alloc_size;
+
+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+ dev = kmalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, alloc_size);
+
+ if (sizeof_priv)
+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+ dev->name[0] = '\0';
+ ether_setup(dev);
+
+ return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+ const char zaddr[6] = { 0, };
+
+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+ return 0;
+}
+
+int
+_kc_pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ return 0;
+}
+
+int
+_kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer)
+{
+ return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffs(tmp);
+}
+
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ /* 16 == NET_PAD_SKB */
+ struct sk_buff *skb;
+ skb = alloc_skb(length + 16, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ skb_reserve(skb, 16);
+ skb->dev = dev;
+ }
+ return skb;
+}
+#endif /* <= 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+ int work_done;
+ int work_to_do = min(*budget, netdev->quota);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+#ifdef DRIVER_IXGB
+ struct napi_struct *napi = &adapter->napi;
+#else
+ struct napi_struct *napi = &adapter->rx_ring[0].napi;
+#endif
+
+ work_done = napi->poll(napi, work_to_do);
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ return work_done ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
--- /dev/null
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2007 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+/* NAPI enable/disable flags here */
+
+#ifdef _E1000_H_
+#ifdef CONFIG_E1000_NAPI
+#define NAPI
+#endif
+#ifdef E1000_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IGB_H_
+#define NAPI
+#endif
+
+#ifdef _IXGB_H_
+#ifdef CONFIG_IXGB_NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IXGBE_H_
+#ifdef CONFIG_IXGBE_NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+
+
+
+#ifdef DRIVER_IXGB
+#define adapter_struct ixgb_adapter
+#endif
+
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_E1000_NAPI
+#define CONFIG_E1000_NAPI
+#endif
+#ifndef CONFIG_IXGB_NAPI
+#define CONFIG_IXGB_NAPI
+#endif
+#ifdef _IXGBE_H_
+#ifndef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#endif
+#endif /* _IXGBE_H */
+#else
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#endif
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#define pci_enable_msi(a) -ENOTSUPP
+#define pci_disable_msi(a) do {} while (0)
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#endif
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+ just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#else
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+#endif /* HAVE_NETIF_MSG */
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved... */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+ u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char reserved1[32];
+ char reserved2[16];
+ u32 n_stats;
+ u32 testinfo_len;
+ u32 eedump_len;
+ u32 regdump_len;
+};
+
+struct ethtool_stats {
+ u32 cmd;
+ u32 n_stats;
+ u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+ u32 cmd; /* ETHTOOL_GSTRINGS */
+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+ u32 len; /* number of strings in the string set */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+ ETH_TEST_FL_OFFLINE = (1 << 0),
+ ETH_TEST_FL_FAILED = (1 << 1),
+};
+struct ethtool_test {
+ u32 cmd;
+ u32 flags;
+ u32 reserved;
+ u32 len;
+ u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+ u32 cmd;
+ u32 magic;
+ u32 offset;
+ u32 len;
+ u8 data[0];
+};
+
+struct ethtool_value {
+ u32 cmd;
+ u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+ u32 cmd;
+ u32 version; /* driver-specific, indicates different chips/revs */
+ u32 len; /* bytes */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+ /* How many usecs to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_max_coalesced_frames
+ * is used.
+ */
+ u32 rx_coalesce_usecs;
+
+ /* How many packets to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause RX interrupts to never be
+ * generated.
+ */
+ u32 rx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 rx_coalesce_usecs_irq;
+ u32 rx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_max_coalesced_frames
+ * is used.
+ */
+ u32 tx_coalesce_usecs;
+
+ /* How many packets to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause TX interrupts to never be
+ * generated.
+ */
+ u32 tx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 tx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay in-memory statistics
+ * block updates. Some drivers do not have an in-memory
+ * statistic block, and in such cases this value is ignored.
+ * This value must not be zero.
+ */
+ u32 stats_block_coalesce_usecs;
+
+ /* Adaptive RX/TX coalescing is an algorithm implemented by
+ * some drivers to improve latency under low packet rates and
+ * improve throughput under high packet rates. Some drivers
+ * only implement one of RX or TX adaptive coalescing. Anything
+ * not implemented by the driver causes these values to be
+ * silently ignored.
+ */
+ u32 use_adaptive_rx_coalesce;
+ u32 use_adaptive_tx_coalesce;
+
+ /* When the packet rate (measured in packets per second)
+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+ * used.
+ */
+ u32 pkt_rate_low;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_max_coalesced_frames_low;
+ u32 tx_coalesce_usecs_low;
+ u32 tx_max_coalesced_frames_low;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ u32 pkt_rate_high;
+ u32 rx_coalesce_usecs_high;
+ u32 rx_max_coalesced_frames_high;
+ u32 tx_coalesce_usecs_high;
+ u32 tx_max_coalesced_frames_high;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+ /* Read only attributes. These indicate the maximum number
+ * of pending RX/TX ring entries the driver will allow the
+ * user to set.
+ */
+ u32 rx_max_pending;
+ u32 rx_mini_max_pending;
+ u32 rx_jumbo_max_pending;
+ u32 tx_max_pending;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ u32 rx_pending;
+ u32 rx_mini_pending;
+ u32 rx_jumbo_pending;
+ u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+ * being true) the user may set 'autoneg' here non-zero to have the
+ * pause parameters be auto-negotiated too. In such a case, the
+ * {rx,tx}_pause values below determine what capabilities are
+ * advertised.
+ *
+ * If 'autoneg' is zero or the link is not being auto-negotiated,
+ * then {rx,tx}_pause force the driver to use/not-use pause
+ * flow control.
+ */
+ u32 autoneg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
+ * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
+ * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_save_state
+#define pci_save_state _kc_pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer);
+#endif
+
+#ifndef pci_restore_state
+#define pci_restore_state _kc_pci_restore_state
+extern int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+ PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+ ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+ #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+#endif
+
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+#endif /* 2.4.13 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+ #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x) ((x)->slot_name)
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IXGBE_NO_LRO
+/* Don't enable LRO for these legacy kernels */
+#define IXGBE_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+ /* No hurry */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+#endif
+
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#include <net/sock.h>
+#define sk_protocol protocol
+
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+#define dev_err(__unused_dev, format, arg...) \
+ printk(KERN_ERR "%s: " format, pci_name(pdev) , ## arg)
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = NULL;
+ n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+static inline void _kc_synchronize_irq(void)
+{
+ synchronize_irq();
+}
+#undef synchronize_irq
+#define synchronize_irq(X) _kc_synchronize_irq()
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+
+#define pci_set_consistent_dma_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#endif /* 2.6.0 => 2.5.28 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
+#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+ return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return m * (HZ / MSEC_PER_SEC);
+#else
+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current)) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef pci_save_state
+#undef pci_save_state
+#endif
+#define pci_save_state(X) { \
+ int i; \
+ if (adapter->pci_state) { \
+ for (i = 0; i < 16; i++) { \
+ pci_read_config_dword((X), \
+ i * 4, \
+ &adapter->pci_state[i]); \
+ } \
+ } \
+}
+
+#ifdef pci_restore_state
+#undef pci_restore_state
+#endif
+#define pci_restore_state(X) { \
+ int i; \
+ if (adapter->pci_state) { \
+ for (i = 0; i < 16; i++) { \
+ pci_write_config_dword((X), \
+ i * 4, \
+ adapter->pci_state[i]); \
+ } \
+ } else { \
+ for (i = 0; i < 6; i++) { \
+ pci_write_config_dword((X), \
+ PCI_BASE_ADDRESS_0 + (i * 4), \
+ (X)->resource[i].start); \
+ } \
+ } \
+}
+#endif /* 2.4.6 <= x < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+ sizeof(array[0]), array }; \
+ module_param_call(name, param_array_set, param_array_get, \
+ &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef netdev_alloc_skb
+#define netdev_alloc_skb _kc_netdev_alloc_skb
+extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+ unsigned int length);
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#ifndef RHEL_RELEASE_CODE
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) 0
+#endif
+#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) )))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif
+{
+ irq_handler_t new_handler = (irq_handler_t) handler;
+ return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#undef pci_save_state
+#define pci_save_state(pdev) _kc_pci_save_state(adapter)
+#define _kc_pci_save_state(adapter) 0; { \
+ int size, i; \
+ u16 pcie_link_status; \
+ \
+ u16 cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \
+ if (cap_offset) { \
+ if (pci_read_config_word(pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status)) \
+ size = PCI_CONFIG_SPACE_LEN; \
+ else \
+ size = PCIE_CONFIG_SPACE_LEN; \
+ WARN_ON(adapter->config_space != NULL); \
+ adapter->config_space = kmalloc(size, GFP_KERNEL); \
+ if (!adapter->config_space) { \
+ printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); \
+ return -ENOMEM; \
+ } \
+ for (i = 0; i < (size / 4); i++) \
+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); \
+ } \
+}
+#undef pci_restore_state
+#define pci_restore_state(pdev) _kc_pci_restore_state(adapter)
+#define _kc_pci_restore_state(adapter) { \
+ int size, i; \
+ u16 pcie_link_status; \
+ \
+ u16 cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); \
+ if (cap_offset) { \
+ if (adapter->config_space != NULL) { \
+ if (pci_read_config_word(pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status)) \
+ size = PCI_CONFIG_SPACE_LEN; \
+ else \
+ size = PCIE_CONFIG_SPACE_LEN; \
+ \
+ for (i = 0; i < (size / 4); i++) \
+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); \
+ kfree(adapter->config_space); \
+ adapter->config_space = NULL; \
+ } \
+ } \
+}
+
+#endif /* < 2.6.19 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ (_work)->func = (void (*)(void *))_func; \
+ (_work)->data = _work; \
+ init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev) \
+ PCI_VENDOR_ID_##ven, (dev), \
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
+#define pci_channel_offline(pdev) (pdev->error_state && \
+ pdev->error_state != pci_channel_io_normal)
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+ memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+ /* used to look up the real NAPI polling routine */
+ int (*poll)(struct napi_struct *, int);
+};
+extern int __kc_adapter_clean(struct net_device *, int *);
+#define netif_rx_complete(netdev, napi) netif_rx_complete(netdev)
+#define netif_rx_schedule_prep(netdev, napi) netif_rx_schedule_prep(netdev)
+#define netif_rx_schedule(netdev, napi) netif_rx_schedule(netdev)
+#define __netif_rx_schedule(netdev, napi) __netif_rx_schedule(netdev)
+#define napi_enable(napi) netif_poll_enable(adapter->netdev)
+#define napi_disable(napi) netif_poll_disable(adapter->netdev)
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = _napi; \
+ _netdev->poll = &(__kc_adapter_clean); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ netif_poll_disable(_netdev); \
+ } while (0)
+#endif /* < 2.6.24 */
+
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#undef ETHTOOL_GPERMADDR
+#undef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif /* > 2.6.22 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#endif /* < 2.6.24 */
+
+#endif /* _KCOMPAT_H_ */
+
--- /dev/null
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2007 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs. We fall back to calling do_ioctl()
+ * for drivers which haven't been converted to ethtool_ops yet.
+ *
+ * It's GPL, stupid.
+ *
+ * Modification by sfeldma@pobox.com to work as backward compat
+ * solution for pre-ethtool_ops kernels.
+ * - copied struct ethtool_ops from ethtool.h
+ * - defined SET_ETHTOOL_OPS
+ * - put in some #ifndef NETIF_F_xxx wrappers
+ * - changes refs to dev->ethtool_ops to ethtool_ops
+ * - changed dev_ethtool to ethtool_ioctl
+ * - remove EXPORT_SYMBOL()s
+ * - added _kc_ prefix in built-in ethtool_op_xxx ops.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+#include "kcompat.h"
+
+#undef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#undef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#undef SPEED_10000
+#define SPEED_10000 10000
+
+#undef ethtool_ops
+#define ethtool_ops _kc_ethtool_ops
+
+struct _kc_ethtool_ops {
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ u32 (*get_rx_csum)(struct net_device *);
+ int (*set_rx_csum)(struct net_device *, u32);
+ u32 (*get_tx_csum)(struct net_device *);
+ int (*set_tx_csum)(struct net_device *, u32);
+ u32 (*get_sg)(struct net_device *);
+ int (*set_sg)(struct net_device *, u32);
+ u32 (*get_tso)(struct net_device *);
+ int (*set_tso)(struct net_device *, u32);
+ int (*self_test_count)(struct net_device *);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+ int (*phys_id)(struct net_device *, u32);
+ int (*get_stats_count)(struct net_device *);
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
+ u64 *);
+} *ethtool_ops = NULL;
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
+
+/*
+ * Some useful ethtool_ops methods that are device independent. If we find that
+ * all drivers want to do the same thing here, we can turn these into dev_()
+ * function calls.
+ */
+
+#undef ethtool_op_get_link
+#define ethtool_op_get_link _kc_ethtool_op_get_link
+u32 _kc_ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+#undef ethtool_op_get_tx_csum
+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
+{
+#ifdef NETIF_F_IP_CSUM
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tx_csum
+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_IP_CSUM
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_sg
+#define ethtool_op_get_sg _kc_ethtool_op_get_sg
+u32 _kc_ethtool_op_get_sg(struct net_device *dev)
+{
+#ifdef NETIF_F_SG
+ return (dev->features & NETIF_F_SG) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_sg
+#define ethtool_op_set_sg _kc_ethtool_op_set_sg
+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_SG
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_tso
+#define ethtool_op_get_tso _kc_ethtool_op_get_tso
+u32 _kc_ethtool_op_get_tso(struct net_device *dev)
+{
+#ifdef NETIF_F_TSO
+ return (dev->features & NETIF_F_TSO) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tso
+#define ethtool_op_set_tso _kc_ethtool_op_set_tso
+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_TSO
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+#endif
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_drvinfo info;
+ struct ethtool_ops *ops = ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_regs regs;
+ struct ethtool_ops *ops = ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(®s, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, ®s, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, ®s, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, reglen))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+
+ if (!ethtool_ops->get_msglevel)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_msglevel(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_msglevel)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_msglevel(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_link(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ if (!ethtool_ops->get_link)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_link(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ goto out;
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ ret = -EFAULT;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ if (!ethtool_ops->get_rx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_rx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_rx_csum(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ if (!ethtool_ops->get_tx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_get_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ if (!ethtool_ops->get_sg)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_sg(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_sg(dev, edata.data);
+}
+
+static int ethtool_get_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTSO };
+
+ if (!ethtool_ops->get_tso)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tso(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_self_test(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_test test;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->self_test || !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = ops->self_test_count(dev);
+ data = kmalloc(test.len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_stats stats;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->get_ethtool_stats || !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = ops->get_stats_count(dev);
+ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+#define ETHTOOL_OPS_COMPAT
+int ethtool_ioctl(struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+ void *useraddr = (void *) ifr->ifr_data;
+ u32 ethcmd;
+
+ /*
+ * XXX: This can be pushed down into the ethtool_* handlers that
+ * need it. Keep existing behavior for the moment.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ return ethtool_get_settings(dev, useraddr);
+ case ETHTOOL_SSET:
+ return ethtool_set_settings(dev, useraddr);
+ case ETHTOOL_GDRVINFO:
+ return ethtool_get_drvinfo(dev, useraddr);
+ case ETHTOOL_GREGS:
+ return ethtool_get_regs(dev, useraddr);
+ case ETHTOOL_GWOL:
+ return ethtool_get_wol(dev, useraddr);
+ case ETHTOOL_SWOL:
+ return ethtool_set_wol(dev, useraddr);
+ case ETHTOOL_GMSGLVL:
+ return ethtool_get_msglevel(dev, useraddr);
+ case ETHTOOL_SMSGLVL:
+ return ethtool_set_msglevel(dev, useraddr);
+ case ETHTOOL_NWAY_RST:
+ return ethtool_nway_reset(dev);
+ case ETHTOOL_GLINK:
+ return ethtool_get_link(dev, useraddr);
+ case ETHTOOL_GEEPROM:
+ return ethtool_get_eeprom(dev, useraddr);
+ case ETHTOOL_SEEPROM:
+ return ethtool_set_eeprom(dev, useraddr);
+ case ETHTOOL_GCOALESCE:
+ return ethtool_get_coalesce(dev, useraddr);
+ case ETHTOOL_SCOALESCE:
+ return ethtool_set_coalesce(dev, useraddr);
+ case ETHTOOL_GRINGPARAM:
+ return ethtool_get_ringparam(dev, useraddr);
+ case ETHTOOL_SRINGPARAM:
+ return ethtool_set_ringparam(dev, useraddr);
+ case ETHTOOL_GPAUSEPARAM:
+ return ethtool_get_pauseparam(dev, useraddr);
+ case ETHTOOL_SPAUSEPARAM:
+ return ethtool_set_pauseparam(dev, useraddr);
+ case ETHTOOL_GRXCSUM:
+ return ethtool_get_rx_csum(dev, useraddr);
+ case ETHTOOL_SRXCSUM:
+ return ethtool_set_rx_csum(dev, useraddr);
+ case ETHTOOL_GTXCSUM:
+ return ethtool_get_tx_csum(dev, useraddr);
+ case ETHTOOL_STXCSUM:
+ return ethtool_set_tx_csum(dev, useraddr);
+ case ETHTOOL_GSG:
+ return ethtool_get_sg(dev, useraddr);
+ case ETHTOOL_SSG:
+ return ethtool_set_sg(dev, useraddr);
+ case ETHTOOL_GTSO:
+ return ethtool_get_tso(dev, useraddr);
+ case ETHTOOL_STSO:
+ return ethtool_set_tso(dev, useraddr);
+ case ETHTOOL_TEST:
+ return ethtool_self_test(dev, useraddr);
+ case ETHTOOL_GSTRINGS:
+ return ethtool_get_strings(dev, useraddr);
+ case ETHTOOL_PHYS_ID:
+ return ethtool_phys_id(dev, useraddr);
+ case ETHTOOL_GSTATS:
+ return ethtool_get_stats(dev, useraddr);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+#define mii_if_info _kc_mii_if_info
+struct _kc_mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#undef mii_link_ok
+#define mii_link_ok _kc_mii_link_ok
+#undef mii_nway_restart
+#define mii_nway_restart _kc_mii_nway_restart
+#undef mii_ethtool_gset
+#define mii_ethtool_gset _kc_mii_ethtool_gset
+#undef mii_ethtool_sset
+#define mii_ethtool_sset _kc_mii_ethtool_sset
+#undef mii_check_link
+#define mii_check_link _kc_mii_check_link
+#undef generic_mii_ioctl
+#define generic_mii_ioctl _kc_generic_mii_ioctl
+extern int _kc_mii_link_ok (struct mii_if_info *mii);
+extern int _kc_mii_nway_restart (struct mii_if_info *mii);
+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern void _kc_mii_check_link (struct mii_if_info *mii);
+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+
+
+struct _kc_pci_dev_ext {
+ struct pci_dev *dev;
+ void *pci_drvdata;
+ struct pci_driver *driver;
+};
+
+struct _kc_net_dev_ext {
+ struct net_device *dev;
+ unsigned int carrier;
+};
+
+
+/**************************************/
+/* mii support */
+
+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 advert, bmcr, lpa, nego;
+
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* only supports twisted-pair */
+ ecmd->port = PORT_MII;
+
+ /* only supports internal transceiver */
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ /* this isn't fully supported at higher layers */
+ ecmd->phy_address = mii->phy_id;
+
+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ if (advert & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ if (bmcr & BMCR_ANENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ nego = mii_nway_result(advert & lpa);
+ if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if (nego == LPA_100FULL || nego == LPA_10FULL) {
+ ecmd->duplex = DUPLEX_FULL;
+ mii->full_duplex = 1;
+ } else {
+ ecmd->duplex = DUPLEX_HALF;
+ mii->full_duplex = 0;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != mii->phy_id)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ADVERTISED_10baseT_Half)
+ tmp |= ADVERTISE_10HALF;
+ if (ADVERTISED_10baseT_Full)
+ tmp |= ADVERTISE_10FULL;
+ if (ADVERTISED_100baseT_Half)
+ tmp |= ADVERTISE_100HALF;
+ if (ADVERTISED_100baseT_Full)
+ tmp |= ADVERTISE_100FULL;
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ecmd->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (ecmd->duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else
+ mii->full_duplex = 0;
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+int _kc_mii_link_ok (struct mii_if_info *mii)
+{
+ /* first, a dummy read, needed to latch some MII phys */
+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+ return 1;
+ return 0;
+}
+
+int _kc_mii_nway_restart (struct mii_if_info *mii)
+{
+ int bmcr;
+ int r = -EINVAL;
+
+ /* if autoneg is off, it's an error */
+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr |= BMCR_ANRESTART;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+ r = 0;
+ }
+
+ return r;
+}
+
+void _kc_mii_check_link (struct mii_if_info *mii)
+{
+ int cur_link = mii_link_ok(mii);
+ int prev_link = netif_carrier_ok(mii->dev);
+
+ if (cur_link && !prev_link)
+ netif_carrier_on(mii->dev);
+ else if (prev_link && !cur_link)
+ netif_carrier_off(mii->dev);
+}
+
+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_chg_out)
+{
+ int rc = 0;
+ unsigned int duplex_changed = 0;
+
+ if (duplex_chg_out)
+ *duplex_chg_out = 0;
+
+ mii_data->phy_id &= mii_if->phy_id_mask;
+ mii_data->reg_num &= mii_if->reg_num_mask;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
+ case SIOCGMIIPHY:
+ mii_data->phy_id = mii_if->phy_id;
+ /* fall through */
+
+ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
+ case SIOCGMIIREG:
+ mii_data->val_out =
+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+
+ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
+ case SIOCSMIIREG: {
+ u16 val = mii_data->val_in;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == mii_if->phy_id) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR: {
+ unsigned int new_duplex = 0;
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ mii_if->force_media = 0;
+ else
+ mii_if->force_media = 1;
+ if (mii_if->force_media &&
+ (val & BMCR_FULLDPLX))
+ new_duplex = 1;
+ if (mii_if->full_duplex != new_duplex) {
+ duplex_changed = 1;
+ mii_if->full_duplex = new_duplex;
+ }
+ break;
+ }
+ case MII_ADVERTISE:
+ mii_if->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num, val);
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+ *duplex_chg_out = 1;
+
+ return rc;
+}
+