If in doubt, say N.
-config IXGBE_DCB
- bool "Data Center Bridging (DCB) Support"
- default n
- depends on IXGBE && DCB
- ---help---
- Say Y here if you want to use Data Center Bridging (DCB) in the
- driver.
-
- If unsure, say N.
-
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82598.o ixgbe_phy.o
+CFILES = ixgbe_main.c ixgbe_common.c ixgbe_api.c ixgbe_param.c \
+ ixgbe_ethtool.c kcompat.c ixgbe_82598.c \
+ ixgbe_dcb.c ixgbe_dcb_nl.c ixgbe_dcb_82598.c \
+ ixgbe_phy.c ixgbe_sysfs.c
-ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o
+ixgbe-objs := $(CFILES:.c=.o)
+
+EXTRA_CFLAGS += -DDRIVER_IXGBE -DIXGBE_NO_LRO
#ifndef _IXGBE_H_
#define _IXGBE_H_
-#include <linux/types.h>
+#ifndef IXGBE_NO_LRO
+#include <net/tcp.h>
+#endif
+
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IXGBE_DCA
+#include <linux/dca.h>
-#ifdef CONFIG_IXGBE_LRO
-#include <linux/inet_lro.h>
-#define IXGBE_MAX_LRO_AGGREGATE 32
-#define IXGBE_MAX_LRO_DESCRIPTORS 8
#endif
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-#include <linux/dca.h>
+#include "kcompat.h"
+
+#include "ixgbe_api.h"
+
+#define IXGBE_NO_INET_LRO
+#ifndef IXGBE_NO_LRO
+#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
+#include <linux/inet_lro.h>
+#define IXGBE_MAX_LRO_DESCRIPTORS 8
+#undef IXGBE_NO_INET_LRO
+#define IXGBE_NO_LRO
#endif
+#endif /* IXGBE_NO_LRO */
#define PFX "ixgbe: "
#define DPRINTK(nlevel, klevel, fmt, args...) \
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
+
/* flow control */
#define IXGBE_DEFAULT_FCRTL 0x10000
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
#define IXGBE_RXBUFFER_2048 2048
-#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#if defined(CONFIG_IXGBE_DCB) || defined(CONFIG_IXGBE_RSS) || \
+ defined(CONFIG_IXGBE_VMDQ)
+#define CONFIG_IXGBE_MQ
+#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+#ifndef IXGBE_NO_LRO
+#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
+#define IXGBE_LRO_GLOBAL 10
+
+struct ixgbe_lro_stats {
+ u32 flushed;
+ u32 coal;
+};
+
+struct ixgbe_lro_desc {
+ struct hlist_node lro_node;
+ struct sk_buff *skb;
+ struct sk_buff *last_skb;
+ int timestamp;
+ u32 tsval;
+ u32 tsecr;
+ u32 source_ip;
+ u32 dest_ip;
+ u32 next_seq;
+ u32 ack_seq;
+ u16 window;
+ u16 source_port;
+ u16 dest_port;
+ u16 append_cnt;
+ u16 mss;
+ u32 data_size; /*TCP data size*/
+ u16 vlan_tag;
+};
+
+struct ixgbe_lro_info {
+ struct ixgbe_lro_stats stats;
+ int max; /*Maximum number of packet to coalesce.*/
+};
+
+struct ixgbe_lro_list {
+ struct hlist_head active;
+ struct hlist_head free;
+ int active_cnt;
+};
+
+#endif /* IXGBE_NO_LRO */
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
unsigned int total_packets;
u16 reg_idx; /* holds the special value that gets the hardware register
- * offset associated with this ring, which is different
- * for DCB and RSS modes */
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
/* cpu for tx queue */
int cpu;
#endif
-#ifdef CONFIG_IXGBE_LRO
- struct net_lro_mgr lro_mgr;
- bool lro_used;
-#endif
+
struct ixgbe_queue_stats stats;
u16 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
* and friends that represents the vector for this ring */
-
-
+#ifndef IXGBE_NO_LRO
+ /* LRO list for rx queue */
+ struct ixgbe_lro_list *lrolist;
+#endif
+#ifndef IXGBE_NO_INET_LRO
+ struct net_lro_mgr lro_mgr;
+ bool lro_used;
+#endif
u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
#define MAX_TX_QUEUES 32
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
+ ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_MSIX_Q_VECTORS of these are allocated,
*/
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
+#ifdef CONFIG_IXGBE_NAPI
struct napi_struct napi;
+#endif
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
u8 rxr_count; /* Rx ring count assigned to this vector */
u32 eitr;
};
+
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways.
*/
(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define IXGBE_TX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc)
+#define IXGBE_RX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc)
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#ifdef IXGBE_TCP_TIMER
+#define TCP_TIMER_VECTOR 1
+#else
+#define TCP_TIMER_VECTOR 0
+#endif
#define OTHER_VECTOR 1
-#define NON_Q_VECTORS (OTHER_VECTOR)
+#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
#define MAX_MSIX_Q_VECTORS 16
#define MIN_MSIX_Q_VECTORS 2
/* board specific private data structure */
struct ixgbe_adapter {
struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
struct vlan_group *vlgrp;
+#endif
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
u64 non_eop_descs;
+#ifndef CONFIG_IXGBE_NAPI
+ u64 rx_dropped_backlog; /* count drops from rx intr handler */
+#endif
int num_msix_vectors;
struct ixgbe_ring_feature ring_feature[3];
struct msix_entry *msix_entries;
+#ifdef IXGBE_TCP_TIMER
+ irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data,
+ struct pt_regs *regs);
+#endif
u64 rx_hdr_split;
u32 alloc_rx_page_failed;
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
+#ifndef IXGBE_NO_LLI
+#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 5)
+#endif
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 15)
#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
+#ifndef IXGBE_NO_LRO
+ struct ixgbe_lro_info lro_data;
+#endif
+
+#ifdef ETHTOOL_TEST
+ u32 test_icr;
+ struct ixgbe_ring test_tx_ring;
+ struct ixgbe_ring test_rx_ring;
+#endif
/* structs defined in ixgbe_hw.h */
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbe_hw_stats stats;
-
+#ifndef IXGBE_NO_LLI
+ u32 lli_port;
+ u32 lli_size;
+ u64 lli_int;
+#endif
/* Interrupt Throttle Rate */
u32 eitr_param;
unsigned long state;
+ u32 *config_space;
u64 tx_busy;
#ifndef IXGBE_NO_INET_LRO
- u64 lro_aggregated;
- u64 lro_flushed;
- u64 lro_no_desc;
+ unsigned int lro_max_aggr;
+ unsigned int lro_aggregated;
+ unsigned int lro_flushed;
+ unsigned int lro_no_desc;
#endif
unsigned int tx_ring_count;
unsigned int rx_ring_count;
__IXGBE_SFP_MODULE_NOT_FOUND
};
-enum ixgbe_boards {
- board_82598,
-};
-
-extern struct ixgbe_info ixgbe_82598_info;
-#ifdef CONFIG_DCBNL
-extern struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
- struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max);
-#endif
-
+/* needed by ixgbe_main.c */
+extern int ixgbe_validate_mac_addr(u8 *mc_addr);
+extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
+/* needed by ixgbe_ethtool.c */
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+
+/* needed by ixgbe_dcb_nl.c */
extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
+extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern int ixgbe_dcb_netlink_register(void);
+extern int ixgbe_dcb_netlink_unregister(void);
+
+extern int ixgbe_sysfs_create(struct ixgbe_adapter *adapter);
+extern void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter);
+
+#ifdef CONFIG_IXGBE_NAPI
+extern void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
+#endif
+
#endif /* _IXGBE_H_ */
*******************************************************************************/
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-#define IXGBE_82598_MAX_TX_QUEUES 32
-#define IXGBE_82598_MAX_RX_QUEUES 64
-#define IXGBE_82598_RAR_ENTRIES 16
-#define IXGBE_82598_MC_TBL_SIZE 128
-#define IXGBE_82598_VFT_TBL_SIZE 128
-
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
/**
- */
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82598.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 list_offset, data_offset;
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+
+ /* LEDs */
+ mac->ops.blink_led_start = &ixgbe_blink_led_start_82598;
+ mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+ /* Flow Control */
+ mac->ops.setup_fc = &ixgbe_setup_fc_82598;
+
+ /* Link */
+ mac->ops.check_link = &ixgbe_check_mac_link_82598;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.setup_link_speed =
+ &ixgbe_setup_copper_link_speed_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_82598;
+ } else {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_link_capabilities_82598;
+ }
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 16;
+ mac->max_tx_queues = 32;
+ mac->max_rx_queues = 64;
+
+ /* SFP+ Module */
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
/* Call PHY identify routine to get the phy type */
- ixgbe_identify_phy_generic(hw);
+ phy->ops.identify(hw);
/* PHY Init */
- switch (phy->type) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
}
break;
- case ixgbe_phy_tn:
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
default:
break;
}
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
- mac->ops.setup_link_speed =
- &ixgbe_setup_copper_link_speed_82598;
- mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82598;
- }
-
- mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
- mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
- mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
- mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
- mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-
out:
return ret_val;
}
bool *autoneg)
{
s32 status = 0;
- s32 autoc_reg;
-
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- if (hw->mac.link_settings_loaded) {
- autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
- autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
- autoc_reg |= hw->mac.link_attach_type;
- autoc_reg |= hw->mac.link_mode_select;
- }
-
- switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) {
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults.
+ */
+ switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = false;
case IXGBE_AUTOC_LMS_KX4_AN:
case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- if (autoc_reg & IXGBE_AUTOC_KX4_SUPP)
+ if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc_reg & IXGBE_AUTOC_KX_SUPP)
+ if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
break;
if (status == 0) {
if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
}
return status;
/* Media type for I82598 is based on device ID */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
}
/**
- * ixgbe_setup_fc_82598 - Configure flow control settings
+ * ixgbe_fc_enable_82598 - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
- * Configures the flow control settings based on SW configuration. This
- * function is used for 802.3x flow control configuration only.
+ * Enable flow control according to the current settings.
**/
-s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
{
- u32 frctl_reg;
+ s32 ret_val = 0;
+ u32 fctrl_reg;
u32 rmcs_reg;
+ u32 reg;
- if (packetbuf_num < 0 || packetbuf_num > 7) {
- hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
- " 0-7\n", packetbuf_num);
- }
-
- frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
/*
- * 10 gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.type == ixgbe_fc_default)
- hw->fc.type = ixgbe_fc_full;
-
- /*
- * We want to save off the original Flow Control configuration just in
- * case we get disconnected and then reconnected into a different hub
- * or switch with different Flow Control capabilities.
- */
- hw->fc.original_type = hw->fc.type;
-
- /*
- * The possible values of the "flow_control" parameter are:
+ * The possible values of fc.current_mode are:
* 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames but not
- * send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do not
- * support receiving pause frames)
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: Invalid.
*/
- switch (hw->fc.type) {
+ switch (hw->fc.current_mode) {
case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
break;
case ixgbe_fc_rx_pause:
/*
- * Rx Flow control is enabled,
- * and Tx Flow control is disabled.
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
*/
- frctl_reg |= IXGBE_FCTRL_RFCE;
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
break;
case ixgbe_fc_tx_pause:
/*
- * Tx Flow control is enabled, and Rx Flow control is disabled,
- * by a software over-ride.
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
*/
rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
break;
case ixgbe_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
- * over-ride.
- */
- frctl_reg |= IXGBE_FCTRL_RFCE;
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
break;
default:
- /* We should never get here. The value should be 0-3. */
hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = -IXGBE_ERR_CONFIG;
+ goto out;
break;
}
/* Enable 802.3x based flow control settings. */
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- /*
- * Check for invalid software configuration, zeros are completely
- * invalid for all parameters used past this point, and if we enable
- * flow control with zero water marks, we blast flow control packets.
- */
- if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
- hw_dbg(hw, "Flow control structure initialized incorrectly\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
-
- /*
- * We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of
- * XON frames.
- */
- if (hw->fc.type & ixgbe_fc_tx_pause) {
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
if (hw->fc.send_xon) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
(hw->fc.low_water | IXGBE_FCRTL_XONE));
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
hw->fc.low_water);
}
+
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+ (hw->fc.high_water | IXGBE_FCRTH_FCEN));
}
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+ /* Configure pause time (2 TCs per register) */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ if ((packetbuf_num & 1) == 0)
+ reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+ else
+ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
- return 0;
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_fc_82598 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Sets up flow control.
+ **/
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ s32 ret_val = 0;
+
+ /* Validate the packetbuf configuration */
+ if (packetbuf_num < 0 || packetbuf_num > 7) {
+ hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
+ " 0-7\n", packetbuf_num);
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the water mark configuration. Zero water marks are invalid
+ * because it causes the controller to just blast out fc packets.
+ */
+ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause testing anomalies.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+
+ ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
+
+out:
+ return ret_val;
}
/**
u32 i;
s32 status = 0;
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- if (hw->mac.link_settings_loaded) {
- autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
- autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
- autoc_reg |= hw->mac.link_attach_type;
- autoc_reg |= hw->mac.link_mode_select;
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
- IXGBE_WRITE_FLUSH(hw);
- msleep(50);
- }
-
/* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
/* Only poll for autoneg to complete if specified to do so */
if (hw->phy.autoneg_wait_to_complete) {
- if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN ||
- hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
links_reg = 0; /* Just in case Autoneg time = 0 */
for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
}
}
- /*
- * We want to save off the original Flow Control configuration just in
- * case we get disconnected and then reconnected into a different hub
- * or switch with different Flow Control capabilities.
- */
- hw->fc.original_type = hw->fc.type;
- ixgbe_setup_fc_82598(hw, 0);
+ /* Set up flow control */
+ status = ixgbe_setup_fc_82598(hw, 0);
/* Add delay to filter out noises during initial link setup */
msleep(50);
u16 link_reg, adapt_comp_reg;
/*
- * SERDES PHY requires us to read link status from register 0xC79F.
- * Bit 0 set indicates link is up/ready; clear indicates link down.
- * OxC00C is read to check that the XAUI lanes are active. Bit 0
- * clear indicates active; set indicates inactive.
+ * SERDES PHY requires us to read link status from undocumented
+ * register 0xC79F. Bit 0 set indicates link is up/ready; clear
+ * indicates link down. OxC00C is read to check that the XAUI lanes
+ * are active. Bit 0 clear indicates active; set indicates inactive.
*/
if (hw->phy.type == ixgbe_phy_nl) {
hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- IXGBE_TWINAX_DEV,
- &link_reg);
+ IXGBE_TWINAX_DEV,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) &&
return 0;
}
-
/**
* ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if auto-negotiation enabled
- * @autoneg_wait_to_complete: true if waiting is needed to complete
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
+ s32 status = 0;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
- /* If speed is 10G, then check for CX4 or XAUI. */
- if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
- (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
- } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
- } else if (autoneg) {
- /* BX mode - Autonegotiate 1G */
- if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
- else /* KX/KX4 mode */
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN;
- } else {
+ /* Check to see if speed passed in is supported. */
+ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
status = IXGBE_ERR_LINK_SETUP;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
}
if (status == 0) {
hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
- hw->mac.link_settings_loaded = true;
/*
* Setup and restart the link based on the new values in
* ixgbe_hw This will write the AUTOC register based on the new
* stored values
*/
- ixgbe_setup_mac_link_82598(hw);
+ status = ixgbe_setup_mac_link_82598(hw);
}
return status;
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
{
s32 status;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
/* Restart autonegotiation on PHY */
status = hw->phy.ops.setup_link(hw);
/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
- hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_KX4_AN;
+
+ autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
+ autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
/* Set up MAC */
ixgbe_setup_mac_link_82598(hw);
bool autoneg_wait_to_complete)
{
s32 status;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
autoneg_wait_to_complete);
/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
- hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_KX4_AN;
+
+ autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
+ autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
/* Set up MAC */
ixgbe_setup_mac_link_82598(hw);
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
- * AUTOC register which stores link settings gets cleared
- * and reloaded from EEPROM after reset. We need to restore
- * our stored value from init in case SW changed the attach
- * type or speed. If this is the first time and link settings
- * have not been stored, store default settings from AUTOC.
+ * Store the original AUTOC value if it has not been
+ * stored off yet. Otherwise restore the stored original
+ * AUTOC value since the reset operation sets back to deaults.
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- if (hw->mac.link_settings_loaded) {
- autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE);
- autoc &= ~(IXGBE_AUTOC_LMS_MASK);
- autoc |= hw->mac.link_attach_type;
- autoc |= hw->mac.link_mode_select;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- } else {
- hw->mac.link_attach_type =
- (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
- hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
- hw->mac.link_settings_loaded = true;
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_link_settings_stored = true;
+ }
+ else if (autoc != hw->mac.orig_autoc) {
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
}
/* Store the permanent mac address */
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
+
if (rar < rar_entries) {
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
if (rar_high & IXGBE_RAH_VIND_MASK) {
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
u32 regindex;
u32 bitindex;
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit EEPROM word of an SFP+ module
- * over I2C interface through an intermediate phy.
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to read
* @eeprom_data: value read
*
- * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
s32 status = 0;
u16 sfp_addr = 0;
if (hw->phy.type == ixgbe_phy_nl) {
/*
- * phy SDA/SCL registers are at addresses 0xC30A to
+ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- sfp_addr);
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &sfp_stat);
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
/* Read data */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
*
* Determines physical layer capabilities of the current configuration.
**/
-s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
- s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ /* Default device ID is mezzanine card KX/KX4 */
+ physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX);
+ break;
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
case IXGBE_DEV_ID_82598EB_XF_LR:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
break;
+ case IXGBE_DEV_ID_82598AT:
+ physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
+ IXGBE_PHYSICAL_LAYER_1000BASE_T);
+ break;
case IXGBE_DEV_ID_82598EB_SFP_LOM:
hw->phy.ops.identify_sfp(hw);
break;
}
break;
- case IXGBE_DEV_ID_82598AT:
- physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
- IXGBE_PHYSICAL_LAYER_1000BASE_T);
- break;
default:
physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
return physical_layer;
}
-
-static struct ixgbe_mac_operations mac_ops_82598 = {
- .init_hw = &ixgbe_init_hw_generic,
- .reset_hw = &ixgbe_reset_hw_82598,
- .start_hw = &ixgbe_start_hw_generic,
- .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
- .get_media_type = &ixgbe_get_media_type_82598,
- .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
- .get_mac_addr = &ixgbe_get_mac_addr_generic,
- .stop_adapter = &ixgbe_stop_adapter_generic,
- .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
- .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
- .setup_link = &ixgbe_setup_mac_link_82598,
- .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
- .check_link = &ixgbe_check_mac_link_82598,
- .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
- .led_on = &ixgbe_led_on_generic,
- .led_off = &ixgbe_led_off_generic,
- .blink_led_start = &ixgbe_blink_led_start_82598,
- .blink_led_stop = &ixgbe_blink_led_stop_82598,
- .set_rar = &ixgbe_set_rar_generic,
- .clear_rar = &ixgbe_clear_rar_generic,
- .set_vmdq = &ixgbe_set_vmdq_82598,
- .clear_vmdq = &ixgbe_clear_vmdq_82598,
- .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
- .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
- .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
- .enable_mc = &ixgbe_enable_mc_generic,
- .disable_mc = &ixgbe_disable_mc_generic,
- .clear_vfta = &ixgbe_clear_vfta_82598,
- .set_vfta = &ixgbe_set_vfta_82598,
- .setup_fc = &ixgbe_setup_fc_82598,
-};
-
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
- .init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
- .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
- .update_checksum = &ixgbe_update_eeprom_checksum_generic,
-};
-
-static struct ixgbe_phy_operations phy_ops_82598 = {
- .identify = &ixgbe_identify_phy_generic,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
- .reset = &ixgbe_reset_phy_generic,
- .read_reg = &ixgbe_read_phy_reg_generic,
- .write_reg = &ixgbe_write_phy_reg_generic,
- .setup_link = &ixgbe_setup_phy_link_generic,
- .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
- .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
-};
-
-struct ixgbe_info ixgbe_82598_info = {
- .mac = ixgbe_mac_82598EB,
- .get_invariants = &ixgbe_get_invariants_82598,
- .mac_ops = &mac_ops_82598,
- .eeprom_ops = &eeprom_ops_82598,
- .phy_ops = &phy_ops_82598,
-};
-
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The ixgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /*
+ * Set the mac type
+ */
+ ixgbe_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ status = ixgbe_init_ops_82598(hw);
+ break;
+ default:
+ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ default:
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+ } else {
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_reset_hw - Performs a hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type,
+ * clears all on chip counters, initializes receive address registers,
+ * multicast table, VLAN filter table, calls routine to setup link and
+ * flow control settings, and leaves transmit and receive units disabled
+ * and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_hw_cntrs - Clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_media_type - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+ ixgbe_media_type_unknown);
+}
+
+/**
+ * ixgbe_get_mac_addr - Get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from the first Receive Address Register
+ * (RAR0) A reset of the adapter must have been performed prior to calling
+ * this function in order for the MAC address to have been loaded from the
+ * EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues - Get Tx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_tx_queues;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues - Get Rx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_rx_queues;
+}
+
+/**
+ * ixgbe_stop_adapter - Disable Rx/Tx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ return ixgbe_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ * ixgbe_identify_phy - Get PHY type
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ status = ixgbe_call_func(hw,
+ hw->phy.ops.identify,
+ (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy - Perform a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ if (ixgbe_identify_phy(hw) != 0)
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version -
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+ (hw, firmware_version),
+ IXGBE_NOT_IMPLEMENTED);
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg - Read PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_phy_reg - Write PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link - Restart PHY autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_phy_link - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads a PHY register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+ link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link_speed - Set auto advertise
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ *
+ * Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_link - Configure link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_link - Get link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+ link_up, link_up_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_link_speed - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ *
+ * Set the link speed and restarts the link.
+ **/
+s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link_speed, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_link_capabilities - Returns link capabilities
+ * @hw: pointer to hardware structure
+ *
+ * Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_on - Turn on LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ *
+ * Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_off - Turn off LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ *
+ * Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_start - Blink LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_stop - Stop blinking LEDs
+ * @hw: pointer to hardware structure
+ *
+ * Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_write_eeprom - Write word to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom - Read word from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set"
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_rar - Clear Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to associate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to disassociate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ * @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+ return hw->mac.num_rar_entries;
+}
+
+/**
+ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new multicast addresses
+ * @addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+ mc_addr_list, mc_addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+ vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_fc - Set flow control
+ * @hw: pointer to hardware structure
+ * @packetbuf_num: packet buffer number (0-7)
+ *
+ * Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw, packetbuf_num),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_API_H_ */
*******************************************************************************/
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+
+/**
+ * ixgbe_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+ if (eec & (1 << 8))
+ eeprom->ops.read = &ixgbe_read_eeprom_generic;
+ else
+ eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.validate_checksum =
+ &ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+
+ /* MAC */
+ mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.reset_hw = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.get_media_type = NULL;
+ mac->ops.get_supported_physical_layer = NULL;
+ mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+
+ /* LEDs */
+ mac->ops.led_on = &ixgbe_led_on_generic;
+ mac->ops.led_off = &ixgbe_led_off_generic;
+ mac->ops.blink_led_start = NULL;
+ mac->ops.blink_led_stop = NULL;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_rar = &ixgbe_set_rar_generic;
+ mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+ mac->ops.set_vmdq = NULL;
+ mac->ops.clear_vmdq = NULL;
+ mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.clear_vfta = NULL;
+ mac->ops.set_vfta = NULL;
+ mac->ops.init_uta_tables = NULL;
+
+
+ /* Link */
+ mac->ops.get_link_capabilities = NULL;
+ mac->ops.setup_link = NULL;
+ mac->ops.setup_link_speed = NULL;
+ mac->ops.check_link = NULL;
+
+ return 0;
+}
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+ /* Set bus info */
+ hw->mac.ops.get_bus_info(hw);
+
/* Identify the PHY */
hw->phy.ops.identify(hw);
IXGBE_READ_REG(hw, IXGBE_MRFC);
IXGBE_READ_REG(hw, IXGBE_RLEC);
IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
for (i = 0; i < 8; i++) {
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
-
IXGBE_READ_REG(hw, IXGBE_PRC64);
IXGBE_READ_REG(hw, IXGBE_PRC127);
IXGBE_READ_REG(hw, IXGBE_PRC255);
return 0;
}
+/**
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u16 link_status;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ /* Get the negotiated link width and speed from PCI config space */
+ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+}
+
/**
* ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
else
eeprom->address_bits = 8;
hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
}
return 0;
}
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == 0) {
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == 0) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((hw->eeprom.address_bits == 8) && (offset >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
+ hw->eeprom.address_bits);
+
+ /* Send the data */
+ data = (data >> 8) | (data << 8);
+ ixgbe_shift_out_eeprom_bits(hw, data, 16);
+ ixgbe_standby_eeprom(hw);
+
+ msleep(hw->eeprom.semaphore_delay);
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+out:
+ return status;
+}
+
/**
* ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
* @hw: pointer to hardware structure
*/
if (i >= timeout) {
hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
- "not granted.\n");
+ "not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
if (status == 0) {
checksum = ixgbe_calc_eeprom_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
+ checksum);
} else {
hw_dbg(hw, "EEPROM read failed\n");
}
s32 status = 0;
/* Make sure it is not a multicast address */
- if (IXGBE_IS_MULTICAST(mac_addr))
+ if (IXGBE_IS_MULTICAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is multicast\n");
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Not a broadcast address */
- else if (IXGBE_IS_BROADCAST(mac_addr))
+ } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is broadcast\n");
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Reject the zero address */
- else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ hw_dbg(hw, "MAC address is all zeros\n");
status = IXGBE_ERR_INVALID_MAC_ADDR;
-
+ }
return status;
}
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw->mac.addr[4], hw->mac.addr[5]);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
- if (hw->mac.ops.init_uta_tables)
- hw->mac.ops.init_uta_tables(hw);
+ ixgbe_init_uta_tables(hw);
return 0;
}
*
* Adds it to unused receive address register or goes into promiscuous mode.
**/
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
u32 rar_entries = hw->mac.num_rar_entries;
u32 rar;
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr next)
+ u32 addr_count, ixgbe_mc_addr_itr next)
{
u8 *addr;
u32 i;
*
* Sets the bit-vector in the multicast table.
**/
-static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector;
u32 vector_bit;
*
* Adds it to unused receive address register or to the multicast table.
**/
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
+void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 rar_entries = hw->mac.num_rar_entries;
u32 rar;
hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
- mc_addr[0], mc_addr[1], mc_addr[2],
- mc_addr[3], mc_addr[4], mc_addr[5]);
+ mc_addr[0], mc_addr[1], mc_addr[2],
+ mc_addr[3], mc_addr[4], mc_addr[5]);
/*
* Place this multicast address in the RAR if there is room,
return 0;
}
+
+
+
/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
+
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
-
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-
-#ifdef DEBUG
-#define hw_dbg(hw, format, arg...) \
-printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg);
-#else
-static inline int __attribute__ ((format (printf, 2, 3)))
-hw_dbg(struct ixgbe_hw *hw, const char *format, ...)
-{
- return 0;
-}
-#endif
-
#endif /* IXGBE_COMMON */
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2007 Intel Corporation.
+ Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
goto err_config;
}
} else if (bw_sum[i][j] != BW_PERCENT &&
- bw_sum[i][j] != 0) {
+ bw_sum[i][j] != 0) {
ret_val = DCB_ERR_TC_BW;
goto err_config;
}
}
}
+ return DCB_SUCCESS;
+
err_config:
+ hw_dbg(hw, "DCB error code %d while checking %s settings.\n",
+ ret_val, (j == DCB_TX_CONFIG) ? "Tx" : "Rx");
+
return ret_val;
}
* ixgbe_dcb_check_config().
*/
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
- u8 direction)
+ u8 direction)
{
struct tc_bw_alloc *p;
s32 ret_val = 0;
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
- if (credit_max &&
- (credit_max < MINIMUM_CREDIT_FOR_TSO))
+ if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO))
credit_max = MINIMUM_CREDIT_FOR_TSO;
dcb_config->tc_config[i].desc_credits_max =
- (u16)credit_max;
+ (u16)credit_max;
}
p->data_credits_max = (u16)credit_max;
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count)
+ u8 tc_count)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
/**
* ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
- * hw - pointer to hardware structure
- * stats - pointer to statistics structure
- * tc_count - Number of elements in bwg_array.
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
*
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count)
+ u8 tc_count)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
return ret;
}
-
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2007 Intel Corporation.
+ Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
/* Traffic class bandwidth allocation per direction */
struct tc_bw_alloc {
- u8 bwg_id; /* Bandwidth Group (BWG) ID */
- u8 bwg_percent; /* % of BWG's bandwidth */
- u8 link_percent; /* % of link bandwidth */
- u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
u16 data_credits_refill; /* Credit refill amount in 64B granularity */
- u16 data_credits_max; /* Max credits for a configured packet buffer
- * in 64B granularity.*/
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
enum strict_prio_type prio_type; /* Link or Group Strict Priority */
};
pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
};
-/*
- * This structure contains many values encoded as fixed-point
- * numbers, meaning that some of bits are dedicated to the
- * magnitude and others to the fraction part. In the comments
- * this is shown as f=n, where n is the number of fraction bits.
- * These fraction bits are always the low-order bits. The size
- * of the magnitude is not specified.
- */
-struct bcn_config {
- u32 rp_admin_mode[MAX_TRAFFIC_CLASS]; /* BCN enabled, per TC */
- u32 bcna_option[2]; /* BCNA Port + MAC Addr */
- u32 rp_w; /* Derivative Weight, f=3 */
- u32 rp_gi; /* Increase Gain, f=12 */
- u32 rp_gd; /* Decrease Gain, f=12 */
- u32 rp_ru; /* Rate Unit */
- u32 rp_alpha; /* Max Decrease Factor, f=12 */
- u32 rp_beta; /* Max Increase Factor, f=12 */
- u32 rp_ri; /* Initial Rate */
- u32 rp_td; /* Drift Interval Timer */
- u32 rp_rd; /* Drift Increase */
- u32 rp_tmax; /* Severe Congestion Backoff Timer Range */
- u32 rp_rmin; /* Severe Congestion Restart Rate */
- u32 rp_wrtt; /* RTT Moving Average Weight */
-};
struct ixgbe_dcb_config {
- struct bcn_config bcn;
-
struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
-
+ bool pfc_mode_enable;
bool round_robin_enable;
enum dcb_rx_pba_cfg rx_pba_cfg;
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *config,
- u8 direction);
+ u8 direction);
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count);
+ u8 tc_count);
/* DCB traffic class stats */
s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
- u8 tc_count);
+ u8 tc_count);
/* DCB config arbiters */
s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *config);
+
/* DCB definitions for credit calculation */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
-#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
-#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
-#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
-#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
+#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145 = UpperBound((9*1024+54)/64B)
+ * for 9KB jumbo frame */
+#define DCB_MAX_TSO_SIZE 32*1024 /* MAX TSO packet size supported
+ * in DCB mode */
+#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO
+ * packet */
+#define MAX_CREDIT 4095 /* Maximum credit supported:
+ * 256KB * 1204 / 64B */
#endif /* _DCB_CONFIG_H */
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2007 Intel Corporation.
+ Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#include "ixgbe.h"
+
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
{
int tc;
if (tc_count > MAX_TRAFFIC_CLASS)
return DCB_ERR_PARAM;
-
/* Statistics pertaining to each traffic class */
for (tc = 0; tc < tc_count; tc++) {
/* Transmitted Packets */
stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
/* Received Bytes */
stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+ /* Can we get rid of these?? Consequently, getting rid
+ * of the tc_stats structure.
+ */
+ tc_stats_array[up]->in_overflow_discards = 0;
+ tc_stats_array[up]->out_overflow_discards = 0;
+#endif
}
return 0;
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count)
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
{
int tc;
if (tc_count > MAX_TRAFFIC_CLASS)
return DCB_ERR_PARAM;
-
for (tc = 0; tc < tc_count; tc++) {
/* Priority XOFF Transmitted */
stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
* Configure packet buffers for DCB mode.
*/
s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
- IXGBE_TXPBSIZE_40KB);
+ IXGBE_TXPBSIZE_40KB);
}
break;
}
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg = 0;
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg, max_credits;
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg;
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
u32 reg, rx_pba_size;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
/* correct the reporting of our flow control status */
- hw->fc.type = ixgbe_fc_none;
+ hw->fc.current_mode = ixgbe_fc_none;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
rx_pba_size = IXGBE_RXPBSIZE_64KB;
} else {
rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
+ : IXGBE_RXPBSIZE_48KB;
}
reg = ((rx_pba_size >> 5) & 0xFFF0);
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
- /* Transmit Queues stats setting - 4 queues per statistics reg */
+ /* Transmit Queues stats setting - 4 queues per statistics reg*/
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
+ u32 pap = 0;
+
ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
ixgbe_dcb_config_pfc_82598(hw, dcb_config);
ixgbe_dcb_config_tc_stats_82598(hw);
+ /* TODO: For DCB SV purpose only,
+ * remove it before product release */
+ if (dcb_config->link_speed > 0 && dcb_config->link_speed <= 9) {
+ pap = IXGBE_READ_REG(hw, IXGBE_PAP);
+ pap |= (dcb_config->link_speed << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_PAP, pap);
+ }
+
return 0;
}
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2007 Intel Corporation.
+ Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
/* DCB register definitions */
#define IXGBE_DPMCS_MTSOS_SHIFT 16
-#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
-#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
-#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable */
#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
#define IXGBE_TDPT2TCCR_GSP 0x40000000
#define IXGBE_TDPT2TCCR_LSP 0x80000000
-#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
-
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count);
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count);
/* DCB traffic class stats */
s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw);
s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
- struct ixgbe_hw_stats *stats,
- u8 tc_count);
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count);
/* DCB config arbiters */
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+ struct ixgbe_dcb_config *dcb_config);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *config);
+ struct ixgbe_dcb_config *config);
#endif /* _DCB_82598_CONFIG_H */
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
-#include <linux/dcbnl.h>
+
+#include <linux/netlink.h>
+#include <linux/genetlink.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
-#define BIT_PFC 0x02
-#define BIT_PG_RX 0x04
-#define BIT_PG_TX 0x08
+#define BIT_DCB_MODE 0x01
+#define BIT_PFC 0x02
+#define BIT_PG_RX 0x04
+#define BIT_PG_TX 0x08
#define BIT_BCN 0x10
+#define BIT_LINKSPEED 0x80
+
+/* DCB configuration commands */
+enum {
+ DCB_C_UNDEFINED,
+ DCB_C_GSTATE,
+ DCB_C_SSTATE,
+ DCB_C_PG_STATS,
+ DCB_C_PGTX_GCFG,
+ DCB_C_PGTX_SCFG,
+ DCB_C_PGRX_GCFG,
+ DCB_C_PGRX_SCFG,
+ DCB_C_PFC_GCFG,
+ DCB_C_PFC_SCFG,
+ DCB_C_PFC_STATS,
+ DCB_C_GLINK_SPD,
+ DCB_C_SLINK_SPD,
+ DCB_C_SET_ALL,
+ DCB_C_GPERM_HWADDR,
+ __DCB_C_ENUM_MAX,
+};
+
+#define IXGBE_DCB_C_MAX (__DCB_C_ENUM_MAX - 1)
+
+/* DCB configuration attributes */
+enum {
+ DCB_A_UNDEFINED = 0,
+ DCB_A_IFNAME,
+ DCB_A_STATE,
+ DCB_A_PFC_STATS,
+ DCB_A_PFC_CFG,
+ DCB_A_PG_STATS,
+ DCB_A_PG_CFG,
+ DCB_A_LINK_SPD,
+ DCB_A_SET_ALL,
+ DCB_A_PERM_HWADDR,
+ __DCB_A_ENUM_MAX,
+};
+
+#define IXGBE_DCB_A_MAX (__DCB_A_ENUM_MAX - 1)
+
+/* PERM HWADDR attributes */
+enum {
+ PERM_HW_A_UNDEFINED,
+ PERM_HW_A_0,
+ PERM_HW_A_1,
+ PERM_HW_A_2,
+ PERM_HW_A_3,
+ PERM_HW_A_4,
+ PERM_HW_A_5,
+ PERM_HW_A_ALL,
+ __PERM_HW_A_ENUM_MAX,
+};
+
+#define IXGBE_DCB_PERM_HW_A_MAX (__PERM_HW_A_ENUM_MAX - 1)
+
+/* PFC configuration attributes */
+enum {
+ PFC_A_UP_UNDEFINED,
+ PFC_A_UP_0,
+ PFC_A_UP_1,
+ PFC_A_UP_2,
+ PFC_A_UP_3,
+ PFC_A_UP_4,
+ PFC_A_UP_5,
+ PFC_A_UP_6,
+ PFC_A_UP_7,
+ PFC_A_UP_MAX, /* Used as an iterator cap */
+ PFC_A_UP_ALL,
+ __PFC_A_UP_ENUM_MAX,
+};
+
+#define IXGBE_DCB_PFC_A_UP_MAX (__PFC_A_UP_ENUM_MAX - 1)
+
+/* Priority Group Traffic Class and Bandwidth Group
+ * configuration attributes
+ */
+enum {
+ PG_A_UNDEFINED,
+ PG_A_TC_0,
+ PG_A_TC_1,
+ PG_A_TC_2,
+ PG_A_TC_3,
+ PG_A_TC_4,
+ PG_A_TC_5,
+ PG_A_TC_6,
+ PG_A_TC_7,
+ PG_A_TC_MAX, /* Used as an iterator cap */
+ PG_A_TC_ALL,
+ PG_A_BWG_0,
+ PG_A_BWG_1,
+ PG_A_BWG_2,
+ PG_A_BWG_3,
+ PG_A_BWG_4,
+ PG_A_BWG_5,
+ PG_A_BWG_6,
+ PG_A_BWG_7,
+ PG_A_BWG_MAX, /* Used as an iterator cap */
+ PG_A_BWG_ALL,
+ __PG_A_ENUM_MAX,
+};
+
+#define IXGBE_DCB_PG_A_MAX (__PG_A_ENUM_MAX - 1)
+
+enum {
+ TC_A_PARAM_UNDEFINED,
+ TC_A_PARAM_STRICT_PRIO,
+ TC_A_PARAM_BW_GROUP_ID,
+ TC_A_PARAM_BW_PCT_IN_GROUP,
+ TC_A_PARAM_UP_MAPPING,
+ TC_A_PARAM_MAX, /* Used as an iterator cap */
+ TC_A_PARAM_ALL,
+ __TC_A_PARAM_ENUM_MAX,
+};
+
+#define IXGBE_DCB_TC_A_PARAM_MAX (__TC_A_PARAM_ENUM_MAX - 1)
+
+#define DCB_PROTO_VERSION 0x1
+#define is_pci_device(dev) ((dev)->bus == &pci_bus_type)
+
+static struct genl_family dcb_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = "IXGBE_DCB",
+ .version = DCB_PROTO_VERSION,
+ .maxattr = IXGBE_DCB_A_MAX,
+};
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
- struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
+/* DCB NETLINK attributes policy */
+static struct nla_policy dcb_genl_policy[IXGBE_DCB_A_MAX + 1] = {
+ [DCB_A_IFNAME] = {.type = NLA_STRING, .len = IFNAMSIZ - 1},
+ [DCB_A_STATE] = {.type = NLA_U8},
+ [DCB_A_PG_CFG] = {.type = NLA_NESTED},
+ [DCB_A_PFC_CFG] = {.type = NLA_NESTED},
+ [DCB_A_PFC_STATS] = {.type = NLA_NESTED},
+ [DCB_A_PG_STATS] = {.type = NLA_NESTED},
+ [DCB_A_LINK_SPD] = {.type = NLA_U8},
+ [DCB_A_SET_ALL] = {.type = NLA_U8},
+ [DCB_A_PERM_HWADDR] = {.type = NLA_NESTED},
+};
+
+/* DCB_A_PERM_HWADDR nested attributes... an array. */
+static struct nla_policy dcb_perm_hwaddr_nest[IXGBE_DCB_PERM_HW_A_MAX + 1] = {
+ [PERM_HW_A_0] = {.type = NLA_U8},
+ [PERM_HW_A_1] = {.type = NLA_U8},
+ [PERM_HW_A_2] = {.type = NLA_U8},
+ [PERM_HW_A_3] = {.type = NLA_U8},
+ [PERM_HW_A_4] = {.type = NLA_U8},
+ [PERM_HW_A_5] = {.type = NLA_U8},
+ [PERM_HW_A_ALL] = {.type = NLA_FLAG},
+};
+
+/* DCB_A_PFC_CFG nested attributes...like an array. */
+static struct nla_policy dcb_pfc_up_nest[IXGBE_DCB_PFC_A_UP_MAX + 1] = {
+ [PFC_A_UP_0] = {.type = NLA_U8},
+ [PFC_A_UP_1] = {.type = NLA_U8},
+ [PFC_A_UP_2] = {.type = NLA_U8},
+ [PFC_A_UP_3] = {.type = NLA_U8},
+ [PFC_A_UP_4] = {.type = NLA_U8},
+ [PFC_A_UP_5] = {.type = NLA_U8},
+ [PFC_A_UP_6] = {.type = NLA_U8},
+ [PFC_A_UP_7] = {.type = NLA_U8},
+ [PFC_A_UP_ALL] = {.type = NLA_FLAG},
+};
+
+/* DCB_A_PG_CFG nested attributes...like a struct. */
+static struct nla_policy dcb_pg_nest[IXGBE_DCB_PG_A_MAX + 1] = {
+ [PG_A_TC_0] = {.type = NLA_NESTED},
+ [PG_A_TC_1] = {.type = NLA_NESTED},
+ [PG_A_TC_2] = {.type = NLA_NESTED},
+ [PG_A_TC_3] = {.type = NLA_NESTED},
+ [PG_A_TC_4] = {.type = NLA_NESTED},
+ [PG_A_TC_5] = {.type = NLA_NESTED},
+ [PG_A_TC_6] = {.type = NLA_NESTED},
+ [PG_A_TC_7] = {.type = NLA_NESTED},
+ [PG_A_TC_ALL] = {.type = NLA_NESTED},
+ [PG_A_BWG_0] = {.type = NLA_U8},
+ [PG_A_BWG_1] = {.type = NLA_U8},
+ [PG_A_BWG_2] = {.type = NLA_U8},
+ [PG_A_BWG_3] = {.type = NLA_U8},
+ [PG_A_BWG_4] = {.type = NLA_U8},
+ [PG_A_BWG_5] = {.type = NLA_U8},
+ [PG_A_BWG_6] = {.type = NLA_U8},
+ [PG_A_BWG_7] = {.type = NLA_U8},
+ [PG_A_BWG_ALL]= {.type = NLA_FLAG},
+};
+
+/* TC_A_CLASS_X nested attributes. */
+static struct nla_policy dcb_tc_param_nest[IXGBE_DCB_TC_A_PARAM_MAX + 1] = {
+ [TC_A_PARAM_STRICT_PRIO] = {.type = NLA_U8},
+ [TC_A_PARAM_BW_GROUP_ID] = {.type = NLA_U8},
+ [TC_A_PARAM_BW_PCT_IN_GROUP] = {.type = NLA_U8},
+ [TC_A_PARAM_UP_MAPPING] = {.type = NLA_U8},
+ [TC_A_PARAM_ALL] = {.type = NLA_FLAG},
+};
+
+static int ixgbe_dcb_check_adapter(struct net_device *netdev)
+{
+ struct device *busdev;
+ struct pci_dev *pcidev;
+
+ busdev = netdev->dev.parent;
+ if (!busdev)
+ return -EINVAL;
+
+ if (!is_pci_device(busdev))
+ return -EINVAL;
+
+ pcidev = to_pci_dev(busdev);
+ if (!pcidev)
+ return -EINVAL;
+
+ if (ixgbe_is_ixgbe(pcidev))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
{
struct tc_configuration *src_tc_cfg = NULL;
struct tc_configuration *dst_tc_cfg = NULL;
if (!src_dcb_cfg || !dst_dcb_cfg)
return -EINVAL;
- for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
- src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
- dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+ dst_dcb_cfg->link_speed = src_dcb_cfg->link_speed;
+
+ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
+ src_tc_cfg = &src_dcb_cfg->tc_config[i - PG_A_TC_0];
+ dst_tc_cfg = &dst_dcb_cfg->tc_config[i - PG_A_TC_0];
dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
}
- for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
- dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
- [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
- [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
- dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
- [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
- [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
+ dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0] =
+ src_dcb_cfg->bw_percentage[DCB_TX_CONFIG][i - PG_A_BWG_0];
+ dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0] =
+ src_dcb_cfg->bw_percentage[DCB_RX_CONFIG][i - PG_A_BWG_0];
}
- for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
- dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
- src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
+ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
+ dst_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc =
+ src_dcb_cfg->tc_config[i - PFC_A_UP_0].dcb_pfc;
}
- for (i = DCB_BCN_ATTR_RP_0; i < DCB_BCN_ATTR_RP_ALL; i++) {
- dst_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0] =
- src_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0];
- }
- dst_dcb_cfg->bcn.bcna_option[0] = src_dcb_cfg->bcn.bcna_option[0];
- dst_dcb_cfg->bcn.bcna_option[1] = src_dcb_cfg->bcn.bcna_option[1];
- dst_dcb_cfg->bcn.rp_alpha = src_dcb_cfg->bcn.rp_alpha;
- dst_dcb_cfg->bcn.rp_beta = src_dcb_cfg->bcn.rp_beta;
- dst_dcb_cfg->bcn.rp_gd = src_dcb_cfg->bcn.rp_gd;
- dst_dcb_cfg->bcn.rp_gi = src_dcb_cfg->bcn.rp_gi;
- dst_dcb_cfg->bcn.rp_tmax = src_dcb_cfg->bcn.rp_tmax;
- dst_dcb_cfg->bcn.rp_td = src_dcb_cfg->bcn.rp_td;
- dst_dcb_cfg->bcn.rp_rmin = src_dcb_cfg->bcn.rp_rmin;
- dst_dcb_cfg->bcn.rp_w = src_dcb_cfg->bcn.rp_w;
- dst_dcb_cfg->bcn.rp_rd = src_dcb_cfg->bcn.rp_rd;
- dst_dcb_cfg->bcn.rp_ru = src_dcb_cfg->bcn.rp_ru;
- dst_dcb_cfg->bcn.rp_wrtt = src_dcb_cfg->bcn.rp_wrtt;
- dst_dcb_cfg->bcn.rp_ri = src_dcb_cfg->bcn.rp_ri;
-
return 0;
}
-static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
+static int ixgbe_nl_reply(u8 value, u8 cmd, u8 attr, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct sk_buff *dcb_skb = NULL;
+ void *data;
+ int ret;
- DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
+ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcb_skb)
+ return -EINVAL;
- return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
-}
+ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0, cmd);
+ if (!data)
+ goto err;
+
+ ret = nla_put_u8(dcb_skb, attr, value);
+ if (ret)
+ goto err;
+
+ /* end the message, assign the nlmsg_len. */
+ genlmsg_end(dcb_skb, data);
+ ret = genlmsg_reply(dcb_skb, info);
+ if (ret)
+ goto err;
-static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /* All traffic should default to class 0 */
return 0;
+
+err:
+ kfree(dcb_skb);
+ return -EINVAL;
}
-static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
+static int ixgbe_dcb_gstate(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = -ENOMEM;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
- DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
+ if (!info->attrs[DCB_A_IFNAME])
+ return -EINVAL;
- if (state > 0) {
- /* Turn on DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- return 0;
- }
-
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- DPRINTK(DRV, ERR, "Enable Failed, needs MSI-X\n");
- return 1;
- }
-
- if (netif_running(netdev))
- netdev->stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
- netdev->select_queue = &ixgbe_dcb_select_queue;
-
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- ixgbe_init_interrupt_scheme(adapter);
- ixgbe_napi_add_all(adapter);
- if (netif_running(netdev))
- netdev->open(netdev);
- } else {
- /* Turn off DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (netif_running(netdev))
- netdev->stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
- netdev->select_queue = NULL;
-
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- ixgbe_init_interrupt_scheme(adapter);
- ixgbe_napi_add_all(adapter);
- if (netif_running(netdev))
- netdev->open(netdev);
- }
- }
- return 0;
-}
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
-static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
- u8 *perm_addr)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
- for (i = 0; i < netdev->addr_len; i++)
- perm_addr[i] = adapter->hw.mac.perm_addr[i];
-}
+ ret = ixgbe_nl_reply(!!(adapter->flags & IXGBE_FLAG_DCB_ENABLED),
+ DCB_C_GSTATE, DCB_A_STATE, info);
+ if (ret)
+ goto err_out;
-static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- if (prio != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
- if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
- if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
- bw_pct;
- if (up_map != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
- up_map;
-
- if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
- adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
- adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
- adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
- adapter->dcb_set_bitmap |= BIT_PG_TX;
+ DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
+
+err_out:
+ dev_put(netdev);
+ return ret;
}
-static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+extern void ixgbe_napi_add_all(struct ixgbe_adapter *);
+extern void ixgbe_napi_del_all(struct ixgbe_adapter *);
+
+static int ixgbe_dcb_sstate(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ int ret = -EINVAL;
+ u8 value;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_STATE])
+ goto err;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ goto err;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
+
+ value = nla_get_u8(info->attrs[DCB_A_STATE]);
+ if ((value & 1) != value) {
+ DPRINTK(DRV, INFO, "Value is not 1 or 0, it is %d.\n", value);
+ } else {
+ switch (value) {
+ case 0:
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ if (netdev->flags & IFF_UP)
+ netdev->stop(netdev);
+ ixgbe_reset_interrupt_capability(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+ ixgbe_napi_del_all(adapter);
+#endif
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ adapter->tx_ring = NULL;
+ adapter->rx_ring = NULL;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_RSS_CAPABLE)
+ adapter->flags |=
+ IXGBE_FLAG_RSS_ENABLED;
+ ixgbe_init_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+ ixgbe_napi_add_all(adapter);
+#endif
+ ixgbe_reset(adapter);
+ if (netdev->flags & IFF_UP)
+ netdev->open(netdev);
+ break;
+ } else {
+ /* Nothing to do, already off */
+ goto out;
+ }
+ case 1:
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ /* Nothing to do, already on */
+ goto out;
+ } else if (!(adapter->flags & IXGBE_FLAG_DCB_CAPABLE)) {
+ DPRINTK(DRV, ERR, "Enable failed. Make sure "
+ "the driver can enable MSI-X.\n");
+ ret = -EINVAL;
+ goto err_out;
+ } else {
+ if (netdev->flags & IFF_UP)
+ netdev->stop(netdev);
+ ixgbe_reset_interrupt_capability(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+ ixgbe_napi_del_all(adapter);
+#endif
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ adapter->tx_ring = NULL;
+ adapter->rx_ring = NULL;
+
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+ adapter->ring_feature[RING_F_DCB].indices = 8;
+ ixgbe_init_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_NAPI
+ ixgbe_napi_add_all(adapter);
+#endif
+ ixgbe_reset(adapter);
+ if (netdev->flags & IFF_UP)
+ netdev->open(netdev);
+ break;
+ }
+ }
+ }
- adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
+out:
+ ret = ixgbe_nl_reply(0, DCB_C_SSTATE, DCB_A_STATE, info);
+ if (ret)
+ goto err_out;
- if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[0][bwg_id])
- adapter->dcb_set_bitmap |= BIT_PG_RX;
-}
+ DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
-static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- if (prio != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
- if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
- if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
- bw_pct;
- if (up_map != DCB_ATTR_VALUE_UNDEFINED)
- adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
- up_map;
-
- if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
- adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
- adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
- adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
- (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
- adapter->dcb_set_bitmap |= BIT_PG_RX;
+err_out:
+ dev_put(netdev);
+err:
+ return ret;
}
-static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+static int ixgbe_dcb_glink_spd(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = -ENOMEM;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
- adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
+ if (!info->attrs[DCB_A_IFNAME])
+ return -EINVAL;
- if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[1][bwg_id])
- adapter->dcb_set_bitmap |= BIT_PG_RX;
-}
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
-static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
- *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
- *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
- *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
- *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
-}
+ ret = ixgbe_nl_reply(adapter->dcb_cfg.link_speed & 0xff,
+ DCB_C_GLINK_SPD, DCB_A_LINK_SPD, info);
+ if (ret)
+ goto err_out;
-static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ DPRINTK(DRV, INFO, "Get DCB Link Speed.\n");
- *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
+err_out:
+ dev_put(netdev);
+ return ret;
}
-static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+static int ixgbe_dcb_slink_spd(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ int ret = -EINVAL;
+ u8 value;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_LINK_SPD])
+ goto err;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ goto err;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
+
+ value = nla_get_u8(info->attrs[DCB_A_LINK_SPD]);
+ if (value > 9) {
+ DPRINTK(DRV, ERR, "Value is not 0 thru 9, it is %d.\n", value);
+ } else {
+ if (!adapter->dcb_set_bitmap &&
+ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
- *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
- *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
- *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
- *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
-}
+ adapter->temp_dcb_cfg.link_speed = value;
+ adapter->dcb_set_bitmap |= BIT_LINKSPEED;
+ }
-static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ ret = ixgbe_nl_reply(0, DCB_C_SLINK_SPD, DCB_A_LINK_SPD, info);
+ if (ret)
+ goto err_out;
- *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
+ DPRINTK(DRV, INFO, "Set DCB Link Speed to %d.\n", value);
+
+err_out:
+ dev_put(netdev);
+err:
+ return ret;
}
-static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
- u8 setting)
+static int ixgbe_dcb_gperm_hwaddr(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ void *data;
+ struct sk_buff *dcb_skb = NULL;
+ struct nlattr *tb[IXGBE_DCB_PERM_HW_A_MAX + 1], *nest;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ int ret = -ENOMEM;
+ int i;
- adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
- if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
- adapter->dcb_cfg.tc_config[priority].dcb_pfc)
- adapter->dcb_set_bitmap |= BIT_PFC;
-}
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PERM_HWADDR])
+ return -EINVAL;
-static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
- u8 *setting)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
- *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
-}
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
-static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int ret;
+ hw = &adapter->hw;
- adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */
- if (!adapter->dcb_set_bitmap)
- return 1;
+ ret = nla_parse_nested(tb, IXGBE_DCB_PERM_HW_A_MAX,
+ info->attrs[DCB_A_PERM_HWADDR],
+ dcb_perm_hwaddr_nest);
+ if (ret)
+ goto err;
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- msleep(1);
+ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcb_skb)
+ goto err;
- if (netif_running(netdev))
- ixgbe_down(adapter);
+ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
+ DCB_C_GPERM_HWADDR);
+ if (!data)
+ goto err;
- ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
- if (ret) {
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return ret;
+ nest = nla_nest_start(dcb_skb, DCB_A_PERM_HWADDR);
+ if (!nest)
+ goto err;
+
+ for (i = 0; i < netdev->addr_len; i++) {
+ if (!tb[i+PERM_HW_A_0] && !tb[PERM_HW_A_ALL])
+ goto err;
+
+ ret = nla_put_u8(dcb_skb, DCB_A_PERM_HWADDR,
+ hw->mac.perm_addr[i]);
+
+ if (ret) {
+ nla_nest_cancel(dcb_skb, nest);
+ goto err;
+ }
}
- if (netif_running(netdev))
- ixgbe_up(adapter);
+ nla_nest_end(dcb_skb, nest);
+
+ genlmsg_end(dcb_skb, data);
+
+ ret = genlmsg_reply(dcb_skb, info);
+ if (ret)
+ goto err;
+
+ dev_put(netdev);
+ return 0;
- adapter->dcb_set_bitmap = 0x00;
- clear_bit(__IXGBE_RESETTING, &adapter->state);
+err:
+ DPRINTK(DRV, ERR, "Error in get permanent hwaddr.\n");
+ kfree(dcb_skb);
+err_out:
+ dev_put(netdev);
return ret;
}
-static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+static int ixgbe_dcb_pg_scfg(struct sk_buff *skb, struct genl_info *info,
+ int dir)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
-
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- switch (capid) {
- case DCB_CAP_ATTR_PG:
- *cap = true;
- break;
- case DCB_CAP_ATTR_PFC:
- *cap = true;
- break;
- case DCB_CAP_ATTR_UP2TC:
- *cap = false;
- break;
- case DCB_CAP_ATTR_PG_TCS:
- *cap = 0x80;
- break;
- case DCB_CAP_ATTR_PFC_TCS:
- *cap = 0x80;
- break;
- case DCB_CAP_ATTR_GSP:
- *cap = true;
- break;
- case DCB_CAP_ATTR_BCN:
- *cap = false;
- break;
- default:
- rval = -EINVAL;
- break;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ struct tc_configuration *tc_config = NULL;
+ struct tc_configuration *tc_tmpcfg = NULL;
+ struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1];
+ struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1];
+ int i, ret, tc_max;
+ u8 value;
+ u8 changed = 0;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG])
+ return -EINVAL;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err;
+ else
+ adapter = netdev_priv(netdev);
+
+ ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX,
+ info->attrs[DCB_A_PG_CFG], dcb_pg_nest);
+ if (ret)
+ goto err;
+
+ if (!adapter->dcb_set_bitmap &&
+ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices))
+ goto err;
+
+ tc_max = adapter->ring_feature[RING_F_DCB].indices;
+ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
+ if (!pg_tb[i])
+ continue;
+
+ ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX,
+ pg_tb[i], dcb_tc_param_nest);
+ if (ret)
+ goto err;
+
+ tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0];
+ tc_tmpcfg = &adapter->temp_dcb_cfg.tc_config[i - PG_A_TC_0];
+ if (param_tb[TC_A_PARAM_STRICT_PRIO]) {
+ value = nla_get_u8(param_tb[TC_A_PARAM_STRICT_PRIO]);
+ tc_tmpcfg->path[dir].prio_type = value;
+ if (tc_tmpcfg->path[dir].prio_type !=
+ tc_config->path[dir].prio_type)
+ changed = 1;
+ }
+ if (param_tb[TC_A_PARAM_BW_GROUP_ID]) {
+ value = nla_get_u8(param_tb[TC_A_PARAM_BW_GROUP_ID]);
+ tc_tmpcfg->path[dir].bwg_id = value;
+ if (tc_tmpcfg->path[dir].bwg_id !=
+ tc_config->path[dir].bwg_id)
+ changed = 1;
+ }
+ if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]) {
+ value = nla_get_u8(param_tb[TC_A_PARAM_BW_PCT_IN_GROUP]);
+ tc_tmpcfg->path[dir].bwg_percent = value;
+ if (tc_tmpcfg->path[dir].bwg_percent !=
+ tc_config->path[dir].bwg_percent)
+ changed = 1;
+ }
+ if (param_tb[TC_A_PARAM_UP_MAPPING]) {
+ value = nla_get_u8(param_tb[TC_A_PARAM_UP_MAPPING]);
+ tc_tmpcfg->path[dir].up_to_tc_bitmap = value;
+ if (tc_tmpcfg->path[dir].up_to_tc_bitmap !=
+ tc_config->path[dir].up_to_tc_bitmap)
+ changed = 1;
}
- } else {
- rval = -EINVAL;
}
- return rval;
-}
+ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
+ if (!pg_tb[i])
+ continue;
-static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
-
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- switch (tcid) {
- case DCB_NUMTCS_ATTR_PG:
- *num = MAX_TRAFFIC_CLASS;
- break;
- case DCB_NUMTCS_ATTR_PFC:
- *num = MAX_TRAFFIC_CLASS;
- break;
- default:
- rval = -EINVAL;
- break;
- }
+ value = nla_get_u8(pg_tb[i]);
+ adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] = value;
+
+ if (adapter->temp_dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0] !=
+ adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0])
+ changed = 1;
+ }
+
+ adapter->temp_dcb_cfg.round_robin_enable = false;
+
+ if (changed) {
+ if (dir == DCB_TX_CONFIG)
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
+ else
+ adapter->dcb_set_bitmap |= BIT_PG_RX;
+
+ DPRINTK(DRV, INFO, "Set DCB PG\n");
} else {
- rval = -EINVAL;
+ DPRINTK(DRV, INFO, "Set DCB PG - no changes\n");
}
- return rval;
+ ret = ixgbe_nl_reply(0, (dir? DCB_C_PGRX_SCFG : DCB_C_PGTX_SCFG),
+ DCB_A_PG_CFG, info);
+ if (ret)
+ goto err;
+
+err:
+ dev_put(netdev);
+ return ret;
}
-static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+static int ixgbe_dcb_pgtx_scfg(struct sk_buff *skb, struct genl_info *info)
{
- return -EINVAL;
+ return ixgbe_dcb_pg_scfg(skb, info, DCB_TX_CONFIG);
}
-static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
+static int ixgbe_dcb_pgrx_scfg(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
+ return ixgbe_dcb_pg_scfg(skb, info, DCB_RX_CONFIG);
}
-static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+static int ixgbe_dcb_pg_gcfg(struct sk_buff *skb, struct genl_info *info,
+ int dir)
{
- return;
+ void *data;
+ struct sk_buff *dcb_skb = NULL;
+ struct nlattr *pg_nest, *param_nest, *tb;
+ struct nlattr *pg_tb[IXGBE_DCB_PG_A_MAX + 1];
+ struct nlattr *param_tb[IXGBE_DCB_TC_A_PARAM_MAX + 1];
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ struct tc_configuration *tc_config = NULL;
+ struct tc_bw_alloc *tc = NULL;
+ int ret = -ENOMEM;
+ int i, tc_max;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PG_CFG])
+ return -EINVAL;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
+
+ ret = nla_parse_nested(pg_tb, IXGBE_DCB_PG_A_MAX,
+ info->attrs[DCB_A_PG_CFG], dcb_pg_nest);
+ if (ret)
+ goto err;
+
+ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcb_skb)
+ goto err;
+
+ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
+ (dir) ? DCB_C_PGRX_GCFG : DCB_C_PGTX_GCFG);
+
+ if (!data)
+ goto err;
+
+ pg_nest = nla_nest_start(dcb_skb, DCB_A_PG_CFG);
+ if (!pg_nest)
+ goto err;
+
+ tc_max = adapter->ring_feature[RING_F_DCB].indices;
+ for (i = PG_A_TC_0; i < tc_max + PG_A_TC_0; i++) {
+ if (!pg_tb[i] && !pg_tb[PG_A_TC_ALL])
+ continue;
+
+ if (pg_tb[PG_A_TC_ALL])
+ tb = pg_tb[PG_A_TC_ALL];
+ else
+ tb = pg_tb[i];
+ ret = nla_parse_nested(param_tb, IXGBE_DCB_TC_A_PARAM_MAX,
+ tb, dcb_tc_param_nest);
+ if (ret)
+ goto err_pg;
+
+ param_nest = nla_nest_start(dcb_skb, i);
+ if (!param_nest)
+ goto err_pg;
+
+ tc_config = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0];
+ tc = &adapter->dcb_cfg.tc_config[i - PG_A_TC_0].path[dir];
+
+ if (param_tb[TC_A_PARAM_STRICT_PRIO] ||
+ param_tb[TC_A_PARAM_ALL]) {
+ ret = nla_put_u8(dcb_skb, TC_A_PARAM_STRICT_PRIO,
+ tc->prio_type);
+ if (ret)
+ goto err_param;
+ }
+ if (param_tb[TC_A_PARAM_BW_GROUP_ID] ||
+ param_tb[TC_A_PARAM_ALL]) {
+ ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_GROUP_ID,
+ tc->bwg_id);
+ if (ret)
+ goto err_param;
+ }
+ if (param_tb[TC_A_PARAM_BW_PCT_IN_GROUP] ||
+ param_tb[TC_A_PARAM_ALL]) {
+ ret = nla_put_u8(dcb_skb, TC_A_PARAM_BW_PCT_IN_GROUP,
+ tc->bwg_percent);
+ if (ret)
+ goto err_param;
+ }
+ if (param_tb[TC_A_PARAM_UP_MAPPING] ||
+ param_tb[TC_A_PARAM_ALL]) {
+ ret = nla_put_u8(dcb_skb, TC_A_PARAM_UP_MAPPING,
+ tc->up_to_tc_bitmap);
+ if (ret)
+ goto err_param;
+ }
+ nla_nest_end(dcb_skb, param_nest);
+ }
+
+ for (i = PG_A_BWG_0; i < PG_A_BWG_MAX; i++) {
+ if (!pg_tb[i] && !pg_tb[PG_A_BWG_ALL])
+ continue;
+
+ ret = nla_put_u8(dcb_skb, i,
+ adapter->dcb_cfg.bw_percentage[dir][i-PG_A_BWG_0]);
+
+ if (ret)
+ goto err_pg;
+ }
+
+ nla_nest_end(dcb_skb, pg_nest);
+
+ genlmsg_end(dcb_skb, data);
+ ret = genlmsg_reply(dcb_skb, info);
+ if (ret)
+ goto err;
+
+ DPRINTK(DRV, INFO, "Get PG %s Attributes.\n", dir?"RX":"TX");
+ dev_put(netdev);
+ return 0;
+
+err_param:
+ DPRINTK(DRV, ERR, "Error in get pg %s.\n", dir?"rx":"tx");
+ nla_nest_cancel(dcb_skb, param_nest);
+err_pg:
+ nla_nest_cancel(dcb_skb, pg_nest);
+err:
+ kfree(dcb_skb);
+err_out:
+ dev_put(netdev);
+ return ret;
}
-static void ixgbe_dcbnl_getbcnrp(struct net_device *netdev, int priority,
- u8 *setting)
+static int ixgbe_dcb_pgtx_gcfg(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- *setting = adapter->dcb_cfg.bcn.rp_admin_mode[priority];
+ return ixgbe_dcb_pg_gcfg(skb, info, DCB_TX_CONFIG);
}
+static int ixgbe_dcb_pgrx_gcfg(struct sk_buff *skb, struct genl_info *info)
+{
+ return ixgbe_dcb_pg_gcfg(skb, info, DCB_RX_CONFIG);
+}
-static void ixgbe_dcbnl_getbcncfg(struct net_device *netdev, int enum_index,
- u32 *setting)
+static int ixgbe_dcb_spfccfg(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- switch (enum_index) {
- case DCB_BCN_ATTR_BCNA_0:
- *setting = adapter->dcb_cfg.bcn.bcna_option[0];
- break;
- case DCB_BCN_ATTR_BCNA_1:
- *setting = adapter->dcb_cfg.bcn.bcna_option[1];
- break;
- case DCB_BCN_ATTR_ALPHA:
- *setting = adapter->dcb_cfg.bcn.rp_alpha;
- break;
- case DCB_BCN_ATTR_BETA:
- *setting = adapter->dcb_cfg.bcn.rp_beta;
- break;
- case DCB_BCN_ATTR_GD:
- *setting = adapter->dcb_cfg.bcn.rp_gd;
- break;
- case DCB_BCN_ATTR_GI:
- *setting = adapter->dcb_cfg.bcn.rp_gi;
- break;
- case DCB_BCN_ATTR_TMAX:
- *setting = adapter->dcb_cfg.bcn.rp_tmax;
- break;
- case DCB_BCN_ATTR_TD:
- *setting = adapter->dcb_cfg.bcn.rp_td;
- break;
- case DCB_BCN_ATTR_RMIN:
- *setting = adapter->dcb_cfg.bcn.rp_rmin;
- break;
- case DCB_BCN_ATTR_W:
- *setting = adapter->dcb_cfg.bcn.rp_w;
- break;
- case DCB_BCN_ATTR_RD:
- *setting = adapter->dcb_cfg.bcn.rp_rd;
- break;
- case DCB_BCN_ATTR_RU:
- *setting = adapter->dcb_cfg.bcn.rp_ru;
- break;
- case DCB_BCN_ATTR_WRTT:
- *setting = adapter->dcb_cfg.bcn.rp_wrtt;
- break;
- case DCB_BCN_ATTR_RI:
- *setting = adapter->dcb_cfg.bcn.rp_ri;
- break;
- default:
- *setting = -1;
+ struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1];
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ int i, ret = -ENOMEM;
+ u8 setting;
+ u8 changed = 0;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
+
+ adapter = netdev_priv(netdev);
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG]) {
+ DPRINTK(DRV, INFO, "set pfc: ifname:%d pfc_cfg:%d\n",
+ !info->attrs[DCB_A_IFNAME],
+ !info->attrs[DCB_A_PFC_CFG]);
+ return -EINVAL;
+ }
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err;
+ else
+ adapter = netdev_priv(netdev);
+
+ ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX,
+ info->attrs[DCB_A_PFC_CFG],
+ dcb_pfc_up_nest);
+ if (ret)
+ goto err;
+
+ if (!adapter->dcb_set_bitmap &&
+ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ setting = nla_get_u8(tb[i]);
+ adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc = setting;
+
+ if (adapter->temp_dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc !=
+ adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc)
+ changed = 1;
}
+
+ if (changed) {
+ adapter->dcb_set_bitmap |= BIT_PFC;
+ DPRINTK(DRV, INFO, "Set DCB PFC\n");
+ } else {
+ DPRINTK(DRV, INFO, "Set DCB PFC - no changes\n");
+ }
+
+ ret = ixgbe_nl_reply(0, DCB_C_PFC_SCFG, DCB_A_PFC_CFG, info);
+ if (ret)
+ goto err;
+
+err:
+ dev_put(netdev);
+ return ret;
}
-static void ixgbe_dcbnl_setbcnrp(struct net_device *netdev, int priority,
- u8 setting)
+static int ixgbe_dcb_gpfccfg(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ void *data;
+ struct sk_buff *dcb_skb = NULL;
+ struct nlattr *tb[IXGBE_DCB_PFC_A_UP_MAX + 1], *nest;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ int ret = -ENOMEM;
+ int i;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_PFC_CFG])
+ return -EINVAL;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ return -EINVAL;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
+
+ ret = nla_parse_nested(tb, IXGBE_DCB_PFC_A_UP_MAX,
+ info->attrs[DCB_A_PFC_CFG], dcb_pfc_up_nest);
+ if (ret)
+ goto err;
+
+ dcb_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcb_skb)
+ goto err;
+
+ data = genlmsg_put_reply(dcb_skb, info, &dcb_family, 0,
+ DCB_C_PFC_GCFG);
+ if (!data)
+ goto err;
+
+ nest = nla_nest_start(dcb_skb, DCB_A_PFC_CFG);
+ if (!nest)
+ goto err;
+
+ for (i = PFC_A_UP_0; i < PFC_A_UP_MAX; i++) {
+ if (!tb[i] && !tb[PFC_A_UP_ALL])
+ continue;
+
+ ret = nla_put_u8(dcb_skb, i,
+ adapter->dcb_cfg.tc_config[i-PFC_A_UP_0].dcb_pfc);
+ if (ret) {
+ nla_nest_cancel(dcb_skb, nest);
+ goto err;
+ }
+ }
- adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] = setting;
+ nla_nest_end(dcb_skb, nest);
- if (adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] !=
- adapter->dcb_cfg.bcn.rp_admin_mode[priority])
- adapter->dcb_set_bitmap |= BIT_BCN;
+ genlmsg_end(dcb_skb, data);
+
+ ret = genlmsg_reply(dcb_skb, info);
+ if (ret)
+ goto err;
+
+ DPRINTK(DRV, INFO, "Get PFC CFG.\n");
+ dev_put(netdev);
+ return 0;
+
+err:
+ DPRINTK(DRV, ERR, "Error in get pfc stats.\n");
+ kfree(dcb_skb);
+err_out:
+ dev_put(netdev);
+ return ret;
}
-static void ixgbe_dcbnl_setbcncfg(struct net_device *netdev, int enum_index,
- u32 setting)
+static int ixgbe_dcb_set_all(struct sk_buff *skb, struct genl_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- switch (enum_index) {
- case DCB_BCN_ATTR_BCNA_0:
- adapter->temp_dcb_cfg.bcn.bcna_option[0] = setting;
- if (adapter->temp_dcb_cfg.bcn.bcna_option[0] !=
- adapter->dcb_cfg.bcn.bcna_option[0])
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_BCNA_1:
- adapter->temp_dcb_cfg.bcn.bcna_option[1] = setting;
- if (adapter->temp_dcb_cfg.bcn.bcna_option[1] !=
- adapter->dcb_cfg.bcn.bcna_option[1])
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_ALPHA:
- adapter->temp_dcb_cfg.bcn.rp_alpha = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_alpha !=
- adapter->dcb_cfg.bcn.rp_alpha)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_BETA:
- adapter->temp_dcb_cfg.bcn.rp_beta = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_beta !=
- adapter->dcb_cfg.bcn.rp_beta)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_GD:
- adapter->temp_dcb_cfg.bcn.rp_gd = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_gd !=
- adapter->dcb_cfg.bcn.rp_gd)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_GI:
- adapter->temp_dcb_cfg.bcn.rp_gi = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_gi !=
- adapter->dcb_cfg.bcn.rp_gi)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_TMAX:
- adapter->temp_dcb_cfg.bcn.rp_tmax = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_tmax !=
- adapter->dcb_cfg.bcn.rp_tmax)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_TD:
- adapter->temp_dcb_cfg.bcn.rp_td = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_td !=
- adapter->dcb_cfg.bcn.rp_td)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_RMIN:
- adapter->temp_dcb_cfg.bcn.rp_rmin = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_rmin !=
- adapter->dcb_cfg.bcn.rp_rmin)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_W:
- adapter->temp_dcb_cfg.bcn.rp_w = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_w !=
- adapter->dcb_cfg.bcn.rp_w)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_RD:
- adapter->temp_dcb_cfg.bcn.rp_rd = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_rd !=
- adapter->dcb_cfg.bcn.rp_rd)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_RU:
- adapter->temp_dcb_cfg.bcn.rp_ru = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_ru !=
- adapter->dcb_cfg.bcn.rp_ru)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_WRTT:
- adapter->temp_dcb_cfg.bcn.rp_wrtt = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_wrtt !=
- adapter->dcb_cfg.bcn.rp_wrtt)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- case DCB_BCN_ATTR_RI:
- adapter->temp_dcb_cfg.bcn.rp_ri = setting;
- if (adapter->temp_dcb_cfg.bcn.rp_ri !=
- adapter->dcb_cfg.bcn.rp_ri)
- adapter->dcb_set_bitmap |= BIT_BCN;
- break;
- default:
- break;
+ struct net_device *netdev = NULL;
+ struct ixgbe_adapter *adapter = NULL;
+ int ret = -ENOMEM;
+ u8 value;
+ u8 retval = 0;
+
+ if (!info->attrs[DCB_A_IFNAME] || !info->attrs[DCB_A_SET_ALL])
+ goto err;
+
+ netdev = dev_get_by_name(&init_net,
+ nla_data(info->attrs[DCB_A_IFNAME]));
+ if (!netdev)
+ goto err;
+
+ ret = ixgbe_dcb_check_adapter(netdev);
+ if (ret)
+ goto err_out;
+ else
+ adapter = netdev_priv(netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ value = nla_get_u8(info->attrs[DCB_A_SET_ALL]);
+ if ((value & 1) != value) {
+ DPRINTK(DRV, INFO, "Value is not 1 or 0, it is %d.\n", value);
+ } else {
+ if (!adapter->dcb_set_bitmap) {
+ retval = 1;
+ goto out;
+ }
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+
+ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
+ &adapter->dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices);
+ if (ret) {
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+ goto err_out;
+ }
+
+ ixgbe_down(adapter);
+ ixgbe_up(adapter);
+ adapter->dcb_set_bitmap = 0x00;
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
}
+
+out:
+ ret = ixgbe_nl_reply(retval, DCB_C_SET_ALL, DCB_A_SET_ALL, info);
+ if (ret)
+ goto err_out;
+
+ DPRINTK(DRV, INFO, "Set all pfc pg and link speed configuration.\n");
+
+err_out:
+ dev_put(netdev);
+err:
+ return ret;
}
-struct dcbnl_rtnl_ops dcbnl_ops = {
- .getstate = ixgbe_dcbnl_get_state,
- .setstate = ixgbe_dcbnl_set_state,
- .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
- .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
- .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
- .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
- .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
- .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
- .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
- .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
- .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
- .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
- .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
- .setall = ixgbe_dcbnl_set_all,
- .getcap = ixgbe_dcbnl_getcap,
- .getnumtcs = ixgbe_dcbnl_getnumtcs,
- .setnumtcs = ixgbe_dcbnl_setnumtcs,
- .getpfcstate = ixgbe_dcbnl_getpfcstate,
- .setpfcstate = ixgbe_dcbnl_setpfcstate,
- .getbcncfg = ixgbe_dcbnl_getbcncfg,
- .getbcnrp = ixgbe_dcbnl_getbcnrp,
- .setbcncfg = ixgbe_dcbnl_setbcncfg,
- .setbcnrp = ixgbe_dcbnl_setbcnrp
+
+/* DCB Generic NETLINK command Definitions */
+/* Get DCB Admin Mode */
+static struct genl_ops ixgbe_dcb_genl_c_gstate = {
+ .cmd = DCB_C_GSTATE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_gstate,
+ .dumpit = NULL,
+};
+
+/* Set DCB Admin Mode */
+static struct genl_ops ixgbe_dcb_genl_c_sstate = {
+ .cmd = DCB_C_SSTATE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_sstate,
+ .dumpit = NULL,
+};
+
+/* Set TX Traffic Attributes */
+static struct genl_ops ixgbe_dcb_genl_c_spgtx = {
+ .cmd = DCB_C_PGTX_SCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_pgtx_scfg,
+ .dumpit = NULL,
+};
+
+/* Set RX Traffic Attributes */
+static struct genl_ops ixgbe_dcb_genl_c_spgrx = {
+ .cmd = DCB_C_PGRX_SCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_pgrx_scfg,
+ .dumpit = NULL,
+};
+
+/* Set PFC CFG */
+static struct genl_ops ixgbe_dcb_genl_c_spfc = {
+ .cmd = DCB_C_PFC_SCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_spfccfg,
+ .dumpit = NULL,
+};
+
+/* Get TX Traffic Attributes */
+static struct genl_ops ixgbe_dcb_genl_c_gpgtx = {
+ .cmd = DCB_C_PGTX_GCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_pgtx_gcfg,
+ .dumpit = NULL,
+};
+
+/* Get RX Traffic Attributes */
+static struct genl_ops ixgbe_dcb_genl_c_gpgrx = {
+ .cmd = DCB_C_PGRX_GCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_pgrx_gcfg,
+ .dumpit = NULL,
+};
+
+/* Get PFC CFG */
+static struct genl_ops ixgbe_dcb_genl_c_gpfc = {
+ .cmd = DCB_C_PFC_GCFG,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_gpfccfg,
+ .dumpit = NULL,
+};
+
+
+/* Get Link Speed setting */
+static struct genl_ops ixgbe_dcb_genl_c_glink_spd = {
+ .cmd = DCB_C_GLINK_SPD,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_glink_spd,
+ .dumpit = NULL,
+};
+
+/* Set Link Speed setting */
+static struct genl_ops ixgbe_dcb_genl_c_slink_spd = {
+ .cmd = DCB_C_SLINK_SPD,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_slink_spd,
+ .dumpit = NULL,
+};
+
+/* Set all "set" feature */
+static struct genl_ops ixgbe_dcb_genl_c_set_all= {
+ .cmd = DCB_C_SET_ALL,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_set_all,
+ .dumpit = NULL,
+};
+
+/* Get permanent HW address */
+static struct genl_ops ixgbe_dcb_genl_c_gperm_hwaddr = {
+ .cmd = DCB_C_GPERM_HWADDR,
+ .flags = GENL_ADMIN_PERM,
+ .policy = dcb_genl_policy,
+ .doit = ixgbe_dcb_gperm_hwaddr,
+ .dumpit = NULL,
};
+/**
+ * ixgbe_dcb_netlink_register - Initialize the NETLINK communication channel
+ *
+ * Description:
+ * Call out to the DCB components so they can register their families and
+ * commands with Generic NETLINK mechanism. Return zero on success and
+ * non-zero on failure.
+ *
+ */
+int ixgbe_dcb_netlink_register(void)
+{
+ int ret = 1;
+
+ /* consider writing as:
+ * ret = genl_register_family(aaa)
+ * || genl_register_ops(bbb, bbb)
+ * || genl_register_ops(ccc, ccc);
+ * if (ret)
+ * goto err;
+ */
+ ret = genl_register_family(&dcb_family);
+ if (ret)
+ return ret;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gstate);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_sstate);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgtx);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spgrx);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_spfc);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpfc);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgtx);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gpgrx);
+ if (ret)
+ goto err;
+
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_glink_spd);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_slink_spd);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_set_all);
+ if (ret)
+ goto err;
+
+ ret = genl_register_ops(&dcb_family, &ixgbe_dcb_genl_c_gperm_hwaddr);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ genl_unregister_family(&dcb_family);
+ return ret;
+}
+
+int ixgbe_dcb_netlink_unregister(void)
+{
+ return genl_unregister_family(&dcb_family);
+}
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
-#include <linux/uaccess.h>
+#ifdef SIOCETHTOOL
+#include <asm/uaccess.h>
#include "ixgbe.h"
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
#define IXGBE_ALL_RAR_ENTRIES 16
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
};
#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
- offsetof(struct ixgbe_adapter, m)
+ offsetof(struct ixgbe_adapter, m)
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
{"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
{"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
{"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
+#ifndef CONFIG_IXGBE_NAPI
+ {"rx_dropped_backlog", IXGBE_STAT(rx_dropped_backlog)},
+#endif
{"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
{"multicast", IXGBE_STAT(net_stats.multicast)},
{"broadcast", IXGBE_STAT(stats.bprc)},
{"tx_restart_queue", IXGBE_STAT(restart_queue)},
{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
+#ifdef NETIF_F_TSO
{"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
+#ifdef NETIF_F_TSO6
{"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
+#endif
+#endif
{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
{"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
+#ifndef IXGBE_NO_LLI
+ {"low_latency_interrupt", IXGBE_STAT(lli_int)},
+#endif
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_LRO
+ {"lro_aggregated", IXGBE_STAT(lro_data.stats.coal)},
+ {"lro_flushed", IXGBE_STAT(lro_data.stats.flushed)},
+#endif /* IXGBE_NO_LRO */
+#ifndef IXGBE_NO_INET_LRO
{"lro_aggregated", IXGBE_STAT(lro_aggregated)},
{"lro_flushed", IXGBE_STAT(lro_flushed)},
#endif
};
#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
- (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev->priv)->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
-#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN)
+ (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
+ IXGBE_FLAG_DCB_ENABLED) ? \
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64) : 0)
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_PB_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+#endif /* ETHTOOL_TEST */
static int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_DISABLE;
}
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (!in_interrupt()) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /*
+ * this case is a special workaround for RHEL5 bonding
+ * that calls this routine from interrupt context
+ */
+ link_speed = adapter->link_speed;
+ link_up = adapter->link_up;
+ }
+
if (link_up) {
ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
SPEED_10000 : SPEED_1000;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
+ pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0);
- if (hw->fc.type == ixgbe_fc_rx_pause) {
+ if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
- } else if (hw->fc.type == ixgbe_fc_tx_pause) {
+ } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
pause->tx_pause = 1;
- } else if (hw->fc.type == ixgbe_fc_full) {
+ } else if (hw->fc.current_mode == ixgbe_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
if ((pause->autoneg == AUTONEG_ENABLE) ||
(pause->rx_pause && pause->tx_pause))
- hw->fc.type = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgbe_fc_rx_pause;
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.type = ixgbe_fc_tx_pause;
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgbe_fc_none;
+ hw->fc.current_mode = ixgbe_fc_none;
else
return -EINVAL;
- hw->fc.original_type = hw->fc.type;
+ hw->fc.requested_mode = hw->fc.current_mode;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
if (data)
+#ifdef NETIF_F_IPV6_CSUM
netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
else
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+ netdev->features |= NETIF_F_IP_CSUM;
+ else
+ netdev->features &= ~NETIF_F_IP_CSUM;
+#endif
return 0;
}
+#ifdef NETIF_F_TSO
static int ixgbe_set_tso(struct net_device *netdev, u32 data)
{
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
if (data) {
netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
netdev->features |= NETIF_F_TSO6;
+#endif
} else {
netif_tx_stop_all_queues(netdev);
netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
netdev->features &= ~NETIF_F_TSO6;
+#endif
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+#ifdef NETIF_F_HW_VLAN_TX
+ /* disable TSO on all VLANs if they're present */
+ if (adapter->vlgrp) {
+ int i;
+ struct net_device *v_netdev;
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ v_netdev =
+ vlan_group_get_device(adapter->vlgrp, i);
+ if (v_netdev) {
+ v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+ vlan_group_set_device(adapter->vlgrp, i,
+ v_netdev);
+ }
+ }
+ }
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
netif_tx_start_all_queues(netdev);
}
return 0;
}
+#endif /* NETIF_F_TSO */
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
-static void ixgbe_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
- regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
+ regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+ /* DCB */
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
return -ENOMEM;
for (i = 0; i < eeprom_len; i++) {
- if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
- &eeprom_buff[i])))
+ if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
+ &eeprom_buff[i])))
break;
}
return ret_val;
}
+static int ixgbe_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = ixgbe_read_eeprom(hw, last_word,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i <= (last_word - first_word); i++)
+ ret_val |= ixgbe_write_eeprom(hw, first_word + i, eeprom_buff[i]);
+
+ /* Update the checksum */
+ ixgbe_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_stats = IXGBE_STATS_LEN;
+ drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
}
return err;
}
-static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
+static int ixgbe_get_stats_count(struct net_device *netdev)
{
- switch (sset) {
- case ETH_SS_STATS:
- return IXGBE_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
+ return IXGBE_STATS_LEN;
}
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
int j, k;
int i;
-#ifdef CONFIG_IXGBE_LRO
- u64 aggregated = 0, flushed = 0, no_desc = 0;
+#ifndef IXGBE_NO_INET_LRO
+ unsigned int aggregated = 0, flushed = 0, no_desc = 0;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
adapter->lro_aggregated = aggregated;
adapter->lro_flushed = flushed;
adapter->lro_no_desc = no_desc;
-#endif
+#endif
ixgbe_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
int i;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string,
p += ETH_GSTRING_LEN;
}
}
- /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
+static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (link_up)
+ return *data;
+ else
+ *data = 1;
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbe_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default register test */
+static struct ixgbe_reg_test reg_test_82598[] = {
+ { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ /* RDH is read-only for 82598, only test RDT. */
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
+ { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_reg_test *test;
+ u32 value, before, after;
+ u32 i, toggle;
+
+ toggle = 0x7FFFF3FF;
+ test = reg_test_82598;
+
+ /*
+ * Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable on newer MACs.
+ */
+ before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+ value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
+ after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ if (value != after) {
+ DPRINTK(DRV, ERR, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+
+ /*
+ * Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
+ *data = 1;
+ else
+ *data = 0;
+ return *data;
+}
+
+static irqreturn_t ixgbe_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
+
+ return IRQ_HANDLED;
+}
+
+static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0, shared_int = true;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ /* NOTE: we don't test MSI-X interrupts here, yet */
+ return 0;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ shared_int = false;
+ if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+ netdev)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, netdev)) {
+ shared_int = false;
+ } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ DPRINTK(HW, INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupts to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in the cause
+ * register and then force the same interrupt and see
+ * if one gets posted. If an interrupt was not posted
+ * to the bus, the test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ msleep(10);
+
+ if (!(adapter->test_icr &mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (tx_ring->desc && tx_ring->tx_buffer_info) {
+ for (i = 0; i < tx_ring->count; i++) {
+ struct ixgbe_tx_buffer *buf =
+ &(tx_ring->tx_buffer_info[i]);
+ if (buf->dma)
+ pci_unmap_single(pdev, buf->dma, buf->length,
+ PCI_DMA_TODEVICE);
+ if (buf->skb)
+ dev_kfree_skb(buf->skb);
+ }
+ }
+
+ if (rx_ring->desc && rx_ring->rx_buffer_info) {
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *buf =
+ &(rx_ring->rx_buffer_info[i]);
+ if (buf->dma)
+ pci_unmap_single(pdev, buf->dma,
+ IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ if (buf->skb)
+ dev_kfree_skb(buf->skb);
+ }
+ }
+
+ if (tx_ring->desc) {
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+ if (rx_ring->desc) {
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+
+ kfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ kfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ return;
+}
+
+static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 rctl, reg_data;
+ int i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!tx_ring->count)
+ tx_ring->count = IXGBE_DEFAULT_TXD;
+
+ tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
+ sizeof(struct ixgbe_tx_buffer),
+ GFP_KERNEL);
+ if (!(tx_ring->tx_buffer_info)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma))) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
+ ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
+ ((u64) tx_ring->dma >> 32));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
+ tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data |= IXGBE_HLREG0_TXPADEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
+ reg_data |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
+
+ for (i = 0; i < tx_ring->count; i++) {
+ struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[i].length = skb->len;
+ tx_ring->tx_buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
+ desc->lower.data = cpu_to_le32(skb->len);
+ desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
+ IXGBE_TXD_CMD_IFCS |
+ IXGBE_TXD_CMD_RS);
+ desc->upper.data = 0;
+ }
+
+ /* Setup Rx Descriptor ring and Rx buffers */
+
+ if (!rx_ring->count)
+ rx_ring->count = IXGBE_DEFAULT_RXD;
+
+ rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
+ sizeof(struct ixgbe_rx_buffer),
+ GFP_KERNEL);
+ if (!(rx_ring->rx_buffer_info)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma))) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
+ ((u64)rx_ring->dma & 0xFFFFFFFF));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
+ ((u64) rx_ring->dma >> 32));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
+#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
+ Threshold Size mask */
+ reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
+#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
+ reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
+ reg_data |= adapter->hw.mac.mc_filter_type;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
+ reg_data |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
+
+ rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_legacy_rx_desc *rx_desc =
+ IXGBE_RX_DESC(*rx_ring, i);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_ring->rx_buffer_info[i].skb = skb;
+ rx_ring->rx_buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->buffer_addr =
+ cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ ixgbe_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_data;
+
+ /* right now we only support MAC loopback in the driver */
+
+ /* Setup MAC loopback */
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
+ reg_data &= ~IXGBE_AUTOC_LMS_MASK;
+ reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
+
+ /* Disable Atlas Tx lanes; re-enabled in reset path */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u8 atlas;
+
+ ixgbe_read_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_LPBK, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+ ixgbe_write_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_LPBK, atlas);
+
+ ixgbe_read_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_10G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ ixgbe_write_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_10G, atlas);
+
+ ixgbe_read_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_1G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ ixgbe_write_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_1G, atlas);
+
+ ixgbe_read_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_AN, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ ixgbe_write_analog_reg8(&adapter->hw,
+ IXGBE_ATLAS_PDN_AN, atlas);
+ }
+
+ return 0;
+}
+
+static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
+{
+ u32 reg_data;
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+}
+
+static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j, k, l, lc, good_cnt, ret_val = 0;
+ unsigned long time;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ k = l = 0;
+ for (j = 0; j <= lc; j++) {
+ for (i = 0; i < 64; i++) {
+ ixgbe_create_lbtest_frame(
+ tx_ring->tx_buffer_info[k].skb,
+ 1024);
+ pci_dma_sync_single_for_device(pdev,
+ tx_ring->tx_buffer_info[k].dma,
+ tx_ring->tx_buffer_info[k].length,
+ PCI_DMA_TODEVICE);
+ if (unlikely(++k == tx_ring->count))
+ k = 0;
+ }
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
+ msleep(200);
+ /* set the start time for the receive */
+ time = jiffies;
+ good_cnt = 0;
+ do {
+ /* receive the sent packets */
+ pci_dma_sync_single_for_cpu(pdev,
+ rx_ring->rx_buffer_info[l].dma,
+ IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ ret_val = ixgbe_check_lbtest_frame(
+ rx_ring->rx_buffer_info[l].skb, 1024);
+ if (!ret_val)
+ good_cnt++;
+ if (++l == rx_ring->count)
+ l = 0;
+ /*
+ * time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while (good_cnt < 64 && jiffies < (time + 20));
+ if (good_cnt != 64) {
+ /* ret_val is the same as mis-compare */
+ ret_val = 13;
+ break;
+ }
+ if (jiffies >= (time + 20)) {
+ /* Error code for time out error */
+ ret_val = 14;
+ break;
+ }
+ }
+
+ return ret_val;
+}
+
+static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ *data = ixgbe_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = ixgbe_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = ixgbe_run_loopback_test(adapter);
+ ixgbe_loopback_cleanup(adapter);
+
+err_loopback:
+ ixgbe_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int ixgbe_diag_test_count(struct net_device *netdev)
+{
+ return IXGBE_TEST_LEN;
+}
+
+static void ixgbe_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBE_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ DPRINTK(HW, INFO, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
+ DPRINTK(HW, INFO, "register testing starting\n");
+ if (ixgbe_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "eeprom testing starting\n");
+ if (ixgbe_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "interrupt testing starting\n");
+ if (ixgbe_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "loopback testing starting\n");
+ if (ixgbe_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ DPRINTK(HW, INFO, "online testing starting\n");
+ /* Online tests */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
static int ixgbe_phys_id(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
u32 i;
if (!data || data > 300)
data = 300;
for (i = 0; i < (data * 1000); i += 400) {
- hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
msleep_interruptible(200);
- hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
msleep_interruptible(200);
}
struct ixgbe_adapter *adapter = netdev_priv(netdev);
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+#ifndef CONFIG_IXGBE_NAPI
+ ec->rx_max_coalesced_frames_irq = adapter->rx_ring[0].work_limit;
+#endif
/* only valid if in constant ITR mode */
- switch (adapter->itr_setting) {
- case 0:
- /* throttling disabled */
- ec->rx_coalesce_usecs = 0;
- break;
- case 1:
- /* dynamic ITR mode */
- ec->rx_coalesce_usecs = 1;
- break;
- default:
- /* fixed interrupt rate mode */
+ if (adapter->itr_setting == 0)
ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
- break;
- }
+
return 0;
}
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
- if (ec->rx_coalesce_usecs > 1) {
+#ifndef CONFIG_IXGBE_NAPI
+ if (ec->rx_max_coalesced_frames_irq)
+ adapter->rx_ring[0].work_limit = ec->rx_max_coalesced_frames_irq;
+
+#endif
+ if (ec->rx_coalesce_usecs > 3) {
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
/* store the value in ints/second */
adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++){
+ struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
+ EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ }
+
/* static value of interrupt rate */
adapter->itr_setting = adapter->eitr_param;
- /* clear the lower bit */
- adapter->itr_setting &= ~1;
- } else if (ec->rx_coalesce_usecs == 1) {
- /* 1 means dynamic mode */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
} else {
- /* any other value means disable eitr, which is best
- * served by setting the interrupt rate very high */
- adapter->eitr_param = 3000000;
- adapter->itr_setting = 0;
+ /* 1,2,3 means dynamic mode */
+ adapter->itr_setting = ec->rx_coalesce_usecs;
}
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
- if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
- else
- /* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
- IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
- EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
- }
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
return 0;
}
-static const struct ethtool_ops ixgbe_ethtool_ops = {
+static struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
.get_drvinfo = ixgbe_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam,
.set_sg = ethtool_op_set_sg,
.get_msglevel = ixgbe_get_msglevel,
.set_msglevel = ixgbe_set_msglevel,
+#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso,
.set_tso = ixgbe_set_tso,
+#endif
+ .self_test_count = ixgbe_diag_test_count,
+ .self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
.phys_id = ixgbe_phys_id,
- .get_sset_count = ixgbe_get_sset_count,
+ .get_stats_count = ixgbe_get_stats_count,
.get_ethtool_stats = ixgbe_get_ethtool_stats,
+#ifdef ETHTOOL_GPERMADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+#ifndef IXGBE_NO_INET_LRO
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
+#endif
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
}
+#endif /* SIOCETHTOOL */
*******************************************************************************/
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#ifdef NETIF_F_TSO
#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
#include <linux/if_vlan.h>
+#endif
#include "ixgbe.h"
-#include "ixgbe_common.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
-
-#define DRV_VERSION "1.3.30-k2"
-const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
+#define DRV_HW_PERF
-static const struct ixgbe_info *ixgbe_info_tbl[] = {
- [board_82598] = &ixgbe_82598_info,
-};
+#ifndef CONFIG_IXGBE_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+#define DRV_VERSION "1.3.56.5" DRIVERNAPI DRV_HW_PERF
+const char ixgbe_driver_version[] = DRV_VERSION;
+static char ixgbe_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
/* ixgbe_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Class, Class Mask, private data (not used) }
*/
static struct pci_device_id ixgbe_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
- board_82598 },
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
- board_82598 },
-
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
/* required last entry */
{0, }
};
return false;
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
#define GET_TX_HEAD_FROM_RING(ring) (\
*(volatile u32 *) \
skb = tx_buffer_info->skb;
if (skb) {
+#ifdef NETIF_F_TSO
unsigned int segs, bytecount;
/* gso_segs is currently only valid for tcp */
skb->len;
total_packets += segs;
total_bytes += bytecount;
+#else
+ total_packets++;
+ total_bytes += skb->len;
+#endif
}
ixgbe_unmap_and_free_tx_resource(adapter,
* sees the new next_to_clean.
*/
smp_mb();
+#ifdef HAVE_TX_MQ
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
++adapter->restart_queue;
}
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
}
if (adapter->detect_tx_hung) {
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
- tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
adapter->net_stats.tx_bytes += total_bytes;
adapter->net_stats.tx_packets += total_packets;
return (total_packets ? true : false);
struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
+ int ret;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_INET_LRO
if (adapter->netdev->features & NETIF_F_LRO &&
skb->ip_summed == CHECKSUM_UNNECESSARY) {
+#ifdef NETIF_F_HW_VLAN_TX
if (adapter->vlgrp && is_vlan && (tag != 0))
lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
adapter->vlgrp, tag,
rx_desc);
else
+#endif
lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
ring->lro_used = true;
} else {
-#endif
+#endif /* IXGBE_NO_INET_LRO */
+#ifdef CONFIG_IXGBE_NAPI
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+#ifdef NETIF_F_HW_VLAN_TX
if (adapter->vlgrp && is_vlan && (tag != 0))
vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
else
netif_receive_skb(skb);
+#else
+ netif_receive_skb(skb);
+#endif
} else {
+#endif /* CONFIG_IXGBE_NAPI */
+
+#ifdef NETIF_F_HW_VLAN_TX
if (adapter->vlgrp && is_vlan && (tag != 0))
- vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
else
- netif_rx(skb);
+ ret = netif_rx(skb);
+#else
+ ret = netif_rx(skb);
+#endif
+#ifndef CONFIG_IXGBE_NAPI
+ if (ret == NET_RX_DROP)
+ adapter->rx_dropped_backlog++;
+#endif
+#ifdef CONFIG_IXGBE_NAPI
}
-#ifdef CONFIG_IXGBE_LRO
+#endif /* CONFIG_IXGBE_NAPI */
+#ifndef IXGBE_NO_INET_LRO
}
#endif
}
int cleaned_count)
{
struct pci_dev *pdev = adapter->pdev;
- struct net_device *netdev = adapter->netdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
+ bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
+#ifndef IXGBE_NO_LRO
+static int lromax = 44;
+
+/**
+ * ixgbe_lro_ring_flush - Indicate packets to upper layer.
+ *
+ * Update IP and TCP header part of head skb if more than one
+ * skb's chained and indicate packets to upper layer.
+ **/
+static void ixgbe_lro_ring_flush(struct ixgbe_lro_list *lrolist,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_lro_desc *lrod, u8 status,
+ struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct sk_buff *skb;
+ u32 *ts_ptr;
+ struct ixgbe_lro_info *lro_data = &adapter->lro_data;
+ struct net_device *netdev = adapter->netdev;
+
+ hlist_del(&lrod->lro_node);
+ lrolist->active_cnt--;
+
+ skb = lrod->skb;
+
+ if (lrod->append_cnt) {
+ /* incorporate ip header and re-calculate checksum */
+ iph = (struct iphdr *)skb->data;
+ iph->tot_len = ntohs(skb->len);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ /* incorporate the latest ack into the tcp header */
+ th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph));
+ th->ack_seq = lrod->ack_seq;
+ th->window = lrod->window;
+
+ /* incorporate latest timestamp into the tcp header */
+ if (lrod->timestamp) {
+ ts_ptr = (u32 *)(th + 1);
+ ts_ptr[1] = htonl(lrod->tsval);
+ ts_ptr[2] = lrod->tsecr;
+ }
+ }
+
+#ifdef NETIF_F_TSO
+ skb_shinfo(skb)->gso_size = lrod->mss;
+#endif
+ ixgbe_receive_skb(adapter, skb, status, rx_ring, rx_desc);
+
+ netdev->last_rx = jiffies;
+ lro_data->stats.coal += lrod->append_cnt + 1;
+ lro_data->stats.flushed++;
+
+ lrod->skb = NULL;
+ lrod->last_skb = NULL;
+ lrod->timestamp = 0;
+ lrod->append_cnt = 0;
+ lrod->data_size = 0;
+ hlist_add_head(&lrod->lro_node, &lrolist->free);
+}
+
+static void ixgbe_lro_ring_flush_all(struct ixgbe_lro_list *lrolist,
+ struct ixgbe_adapter *adapter, u8 status,
+ struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbe_lro_desc *lrod;
+ struct hlist_node *node, *node2;
+
+ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node)
+ ixgbe_lro_ring_flush(lrolist, adapter, lrod, status, rx_ring,
+ rx_desc);
+}
+
+/*
+ * ixgbe_lro_header_ok - Main LRO function.
+ **/
+static int ixgbe_lro_header_ok(struct ixgbe_lro_info *lro_data,
+ struct sk_buff *new_skb, struct iphdr *iph,
+ struct tcphdr *th)
+{
+ int opt_bytes, tcp_data_len;
+ u32 *ts_ptr = NULL;
+
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
+ return -1;
+
+ /* ensure there are no options */
+ if ((iph->ihl << 2) != sizeof(*iph))
+ return -1;
+
+ /* .. and the packet is not fragmented */
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ return -1;
+
+ /* ensure no bits set besides ack or psh */
+ if (th->fin || th->syn || th->rst ||
+ th->urg || th->ece || th->cwr || !th->ack)
+ return -1;
+
+ /* ensure that the checksum is valid */
+ if (new_skb->ip_summed != CHECKSUM_UNNECESSARY)
+ return -1;
+
+ /*
+ * check for timestamps. Since the only option we handle are timestamps,
+ * we only have to handle the simple case of aligned timestamps
+ */
+
+ opt_bytes = (th->doff << 2) - sizeof(*th);
+ if (opt_bytes != 0) {
+ ts_ptr = (u32 *)(th + 1);
+ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
+ (*ts_ptr != ntohl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP))) {
+ return -1;
+ }
+ }
+
+ tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph);
+
+ if (tcp_data_len == 0)
+ return -1;
+
+ return tcp_data_len;
+}
+
+/**
+ * ixgbe_lro_ring_queue - if able, queue skb into lro chain
+ * @lrolist: pointer to structure for lro entries
+ * @adapter: address of board private structure
+ * @new_skb: pointer to current skb being checked
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ *
+ * Checks whether the skb given is eligible for LRO and if that's
+ * fine chains it to the existing lro_skb based on flowid. If an LRO for
+ * the flow doesn't exist create one.
+ **/
+static int ixgbe_lro_ring_queue(struct ixgbe_lro_list *lrolist,
+ struct ixgbe_adapter *adapter,
+ struct sk_buff *new_skb, u8 status,
+ struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ethhdr *eh;
+ struct iphdr *iph;
+ struct tcphdr *th, *header_th;
+ int opt_bytes, header_ok = 1;
+ u32 *ts_ptr = NULL;
+ struct sk_buff *lro_skb;
+ struct ixgbe_lro_desc *lrod;
+ struct hlist_node *node;
+ u32 seq;
+ struct ixgbe_lro_info *lro_data = &adapter->lro_data;
+ int tcp_data_len;
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ /* Disable LRO when in promiscuous mode, useful for debugging LRO */
+ if (adapter->netdev->flags & IFF_PROMISC)
+ return -1;
+
+ eh = (struct ethhdr *)skb_mac_header(new_skb);
+ iph = (struct iphdr *)(eh + 1);
+
+ /* check to see if it is IPv4/TCP */
+ if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
+ (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
+ return -1;
+
+ /* find the TCP header */
+ th = (struct tcphdr *) (iph + 1);
+
+ tcp_data_len = ixgbe_lro_header_ok(lro_data, new_skb, iph, th);
+ if (tcp_data_len == -1)
+ header_ok = 0;
+
+ /* make sure any packet we are about to chain doesn't include any pad */
+ skb_trim(new_skb, ntohs(iph->tot_len));
+
+ opt_bytes = (th->doff << 2) - sizeof(*th);
+ if (opt_bytes != 0)
+ ts_ptr = (u32 *)(th + 1);
+
+ seq = ntohl(th->seq);
+ /*
+ * we have a packet that might be eligible for LRO,
+ * so see if it matches anything we might expect
+ */
+ hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) {
+ if (lrod->source_port == th->source &&
+ lrod->dest_port == th->dest &&
+ lrod->source_ip == iph->saddr &&
+ lrod->dest_ip == iph->daddr &&
+ lrod->vlan_tag == tag) {
+
+ if (!header_ok) {
+ ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+ status, rx_ring, rx_desc);
+ return -1;
+ }
+
+ if (seq != lrod->next_seq) {
+ /* out of order packet */
+ ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+ status, rx_ring, rx_desc);
+ return -1;
+ }
+
+ if (lrod->timestamp) {
+ u32 tsval = ntohl(*(ts_ptr + 1));
+ /* make sure timestamp values are increasing */
+ if (lrod->tsval > tsval || *(ts_ptr + 2) == 0) {
+ ixgbe_lro_ring_flush(lrolist, adapter,
+ lrod, status,
+ rx_ring, rx_desc);
+ return -1;
+ }
+ lrod->tsval = tsval;
+ lrod->tsecr = *(ts_ptr + 2);
+ }
+
+ lro_skb = lrod->skb;
+
+ lro_skb->len += tcp_data_len;
+ lro_skb->data_len += tcp_data_len;
+ lro_skb->truesize += tcp_data_len;
+
+ lrod->next_seq += tcp_data_len;
+ lrod->ack_seq = th->ack_seq;
+ lrod->window = th->window;
+ lrod->data_size += tcp_data_len;
+ if (tcp_data_len > lrod->mss)
+ lrod->mss = tcp_data_len;
+
+ /* Remove IP and TCP header*/
+ skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len);
+
+ /* Chain this new skb in frag_list */
+ if (skb_shinfo(lro_skb)->frag_list != NULL )
+ lrod->last_skb->next = new_skb;
+ else
+ skb_shinfo(lro_skb)->frag_list = new_skb;
+
+ lrod->last_skb = new_skb ;
+
+ lrod->append_cnt++;
+
+ /* New packet with push flag, flush the whole packet. */
+ if (th->psh) {
+ header_th =
+ (struct tcphdr *)(lro_skb->data + sizeof(*iph));
+ header_th->psh |= th->psh;
+ ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+ status, rx_ring, rx_desc);
+ return 0;
+ }
+
+ if (lrod->append_cnt >= lro_data->max)
+ ixgbe_lro_ring_flush(lrolist, adapter, lrod,
+ status, rx_ring, rx_desc);
+
+ return 0;
+ } /*End of if*/
+ }
+
+ /* start a new packet */
+ if (header_ok && !hlist_empty(&lrolist->free)) {
+ lrod = hlist_entry(lrolist->free.first, struct ixgbe_lro_desc,
+ lro_node);
+
+ lrod->skb = new_skb;
+ lrod->source_ip = iph->saddr;
+ lrod->dest_ip = iph->daddr;
+ lrod->source_port = th->source;
+ lrod->dest_port = th->dest;
+ lrod->next_seq = seq + tcp_data_len;
+ lrod->mss = tcp_data_len;
+ lrod->ack_seq = th->ack_seq;
+ lrod->window = th->window;
+ lrod->data_size = tcp_data_len;
+ lrod->vlan_tag = tag;
+
+ /* record timestamp if it is present */
+ if (opt_bytes) {
+ lrod->timestamp = 1;
+ lrod->tsval = ntohl(*(ts_ptr + 1));
+ lrod->tsecr = *(ts_ptr + 2);
+ }
+ /* remove first packet from freelist.. */
+ hlist_del(&lrod->lro_node);
+ /* .. and insert at the front of the active list */
+ hlist_add_head(&lrod->lro_node, &lrolist->active);
+ lrolist->active_cnt++;
+
+ return 0;
+ }
+
+ return -1;
+}
+
+static void ixgbe_lro_ring_exit(struct ixgbe_lro_list *lrolist)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_lro_desc *lrod;
+
+ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active,
+ lro_node) {
+ hlist_del(&lrod->lro_node);
+ kfree(lrod);
+ }
+
+ hlist_for_each_entry_safe(lrod, node, node2, &lrolist->free,
+ lro_node) {
+ hlist_del(&lrod->lro_node);
+ kfree(lrod);
+ }
+}
+
+static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist,
+ struct ixgbe_adapter *adapter)
+{
+ int j, bytes;
+ struct ixgbe_lro_desc *lrod;
+
+ bytes = sizeof(struct ixgbe_lro_desc);
+
+ INIT_HLIST_HEAD(&lrolist->free);
+ INIT_HLIST_HEAD(&lrolist->active);
+
+ for (j = 0; j < IXGBE_LRO_MAX; j++) {
+ lrod = kzalloc(bytes, GFP_KERNEL);
+ if (lrod != NULL) {
+ INIT_HLIST_NODE(&lrod->lro_node);
+ hlist_add_head(&lrod->lro_node, &lrolist->free);
+ } else {
+ DPRINTK(PROBE, ERR,
+ "Allocation for LRO descriptor %u failed\n", j);
+ }
+ }
+}
+
+#endif /* IXGBE_NO_LRO */
+#ifdef CONFIG_IXGBE_NAPI
static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
+#else
+static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+#endif
{
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
u16 hdr_info;
bool cleaned = false;
int cleaned_count = 0;
+#ifndef CONFIG_IXGBE_NAPI
+ int work_to_do = rx_ring->work_limit, local_work_done = 0;
+ int *work_done = &local_work_done;
+#endif
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
len = le16_to_cpu(rx_desc->wb.upper.length);
}
+#ifndef IXGBE_NO_LLI
+ if (staterr & IXGBE_RXD_STAT_DYNINT)
+ adapter->lli_int++;
+#endif
+
cleaned = true;
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
pci_unmap_page(pdev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
rx_buffer_info->page_dma = 0;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
(page_count(rx_buffer_info->page) != 1))
else
get_page(rx_buffer_info->page);
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
}
i++;
goto next_desc;
}
- if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
total_rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->netdev);
+#ifndef IXGBE_NO_LRO
+ if (ixgbe_lro_ring_queue(rx_ring->lrolist,
+ adapter, skb, staterr, rx_ring, rx_desc) == 0) {
+ adapter->netdev->last_rx = jiffies;
+ rx_ring->stats.packets++;
+ if (upper_len)
+ rx_ring->stats.bytes += upper_len;
+ else
+ rx_ring->stats.bytes += skb->len;
+ goto next_desc;
+ }
+#endif
ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
adapter->netdev->last_rx = jiffies;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
-#ifdef CONFIG_IXGBE_LRO
+ rx_ring->next_to_clean = i;
+#ifndef IXGBE_NO_LRO
+ ixgbe_lro_ring_flush_all(rx_ring->lrolist, adapter,
+ staterr, rx_ring, rx_desc);
+#endif /* IXGBE_NO_LRO */
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+#ifndef IXGBE_NO_INET_LRO
if (rx_ring->lro_used) {
lro_flush_all(&rx_ring->lro_mgr);
rx_ring->lro_used = false;
}
#endif
- rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
-
if (cleaned_count)
ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
adapter->net_stats.rx_bytes += total_rx_bytes;
adapter->net_stats.rx_packets += total_rx_packets;
+#ifndef CONFIG_IXGBE_NAPI
+ /* re-arm the interrupt if we had to bail early and have more work */
+ if (*work_done >= work_to_do)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rx_ring->v_idx);
+#endif
return cleaned;
}
+#ifdef CONFIG_IXGBE_NAPI
static int ixgbe_clean_rxonly(struct napi_struct *, int);
+#endif
/**
* ixgbe_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+#ifdef IXGBE_TCP_TIMER
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_TCP_TIMER_INDEX, ++v_idx);
+#endif
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
+ if (bytes_perint > adapter->eitr_low) {
retval = low_latency;
+ }
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
+ if (bytes_perint > adapter->eitr_high) {
retval = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
+ }
+ else if (bytes_perint <= adapter->eitr_low) {
retval = lowest_latency;
+ }
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
+ if (bytes_perint <= adapter->eitr_high) {
retval = low_latency;
+ }
break;
}
u32 new_itr;
u8 current_itr, ret_itr;
int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
- sizeof(struct ixgbe_q_vector);
+ sizeof(struct ixgbe_q_vector);
struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
q_vector->eitr = new_itr;
itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
/* must write high and low 16 bits to reset counter */
- DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
- itr_reg);
+ DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, itr_reg);
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
}
return IRQ_HANDLED;
}
+#ifdef IXGBE_TCP_TIMER
+static irqreturn_t ixgbe_msix_pba(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ u32 pba = readl(adapter->msix_addr + IXGBE_MSIXPBA);
+ for (i = 0; i < MAX_MSIX_COUNT; i++) {
+ if (pba & (1 << i))
+ adapter->msix_handlers[i](irq, data, regs);
+ else
+ adapter->pba_zero[i]++;
+ }
+
+ adapter->msix_pba++;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_tcp_timer(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msix_tcp_timer++;
+
+ return IRQ_HANDLED;
+}
+
+#endif /* IXGBE_TCP_TIMER */
static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
r_idx + 1);
}
+ /*
+ * possibly later we can enable tx auto-adjustment if necessary
+ *
+ if (adapter->itr_setting & 3)
+ ixgbe_set_itr_msix(q_vector);
+ */
+
return IRQ_HANDLED;
}
int i;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = &(adapter->rx_ring[r_idx]);
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
+#ifndef CONFIG_IXGBE_NAPI
+ ixgbe_clean_rx_irq(adapter, rx_ring);
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_rx_dca(adapter, rx_ring);
+
+#endif
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 3)
+ ixgbe_set_itr_msix(q_vector);
+#else
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
}
/* disable interrupts on this vector only */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
netif_rx_schedule(adapter->netdev, &q_vector->napi);
+#endif
return IRQ_HANDLED;
}
return IRQ_HANDLED;
}
+#ifdef CONFIG_IXGBE_NAPI
/**
* ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
* @napi: napi struct with our devices info in it
ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
+ if ((work_done == 0) || !netif_running(adapter->netdev)) {
netif_rx_complete(adapter->netdev, napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+ return 0;
}
return work_done;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
rx_ring = &(adapter->rx_ring[r_idx]);
/* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
+ if ((work_done == 0) || !netif_running(adapter->netdev)) {
netif_rx_complete(adapter->netdev, napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
return work_done;
}
+
+#endif /* CONFIG_IXGBE_NAPI */
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
a->q_vector[v_idx].rxr_count++;
a->rx_ring[r_idx].v_idx = 1 << v_idx;
+
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, int vectors)
{
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
struct net_device *netdev = adapter->netdev;
irqreturn_t (*handler)(int, void *);
int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
&ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(&adapter->q_vector[vector]);
- sprintf(adapter->name[vector], "%s:v%d-%s",
- netdev->name, vector,
- (handler == &ixgbe_msix_clean_rx) ? "Rx" :
- ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
+
+ if (handler == &ixgbe_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbe_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ }
err = request_irq(adapter->msix_entries[vector].vector,
handler, 0, adapter->name[vector],
&(adapter->q_vector[vector]));
&ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
DPRINTK(PROBE, ERR,
- "request_irq for msix_lsc failed: %d\n", err);
+ "request_irq for msix_lsc failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+#ifdef IXGBE_TCP_TIMER
+ vector++;
+ sprintf(adapter->name[vector], "%s:timer", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbe_msix_tcp_timer, 0, adapter->name[vector],
+ netdev);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "request_irq for msix_tcp_timer failed: %d\n", err);
+ /* Free "Other" interrupt */
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
goto free_queue_irqs;
}
+#endif
return 0;
free_queue_irqs:
return;
}
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
+/**
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+{
+ u32 mask;
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+}
+
/**
* ixgbe_intr - legacy mode Interrupt Handler
* therefore no explict interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
+#ifdef CONFIG_IXGBE_NAPI
/* shared interrupt alert!
* make sure interrupts are enabled because the read will
* have disabled interrupts due to EIAM */
ixgbe_irq_enable(adapter);
- return IRQ_NONE; /* Not our interrupt */
+#endif
+ return IRQ_NONE; /* Not our interrupt */
}
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_fan_failure(adapter, eicr);
+#ifdef CONFIG_IXGBE_NAPI
if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
adapter->tx_ring[0].total_packets = 0;
adapter->tx_ring[0].total_bytes = 0;
__netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
}
+#else
+ adapter->tx_ring[0].total_packets = 0;
+ adapter->tx_ring[0].total_bytes = 0;
+ adapter->rx_ring[0].total_packets = 0;
+ adapter->rx_ring[0].total_bytes = 0;
+ ixgbe_clean_rx_irq(adapter, adapter->rx_ring);
+ ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+
+ /* dynamically adjust throttle */
+ if (adapter->itr_setting & 3)
+ ixgbe_set_itr(adapter);
+
+#endif
return IRQ_HANDLED;
}
q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1;
+#ifdef IXGBE_TCP_TIMER
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+#endif
free_irq(adapter->msix_entries[i].vector, netdev);
i--;
}
}
-/**
- * ixgbe_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
{
- u32 mask;
- mask = IXGBE_EIMS_ENABLE_MASK;
- if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP1;
+ u32 mask = IXGBE_EIMS_RTX_QUEUE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- IXGBE_WRITE_FLUSH(&adapter->hw);
+ /* skip the flush */
}
/**
int queue0;
unsigned long mask;
- /* we must program one srrctl register per RSS queue since we
- * have enabled RDRXCTL.MVMEN
+ /* program one srrctl register per VMDq index */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ long shift, len;
+ mask = (unsigned long) adapter->ring_feature[RING_F_VMDQ].mask;
+ len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
+ shift = find_first_bit(&mask, len);
+ queue0 = (index & mask);
+ index = (index & mask) >> shift;
+ /* if VMDq is not active we must program one srrctl register per
+ * RSS queue since we have enabled RDRXCTL.MVMEN
*/
- mask = (unsigned long)adapter->ring_feature[RING_F_RSS].mask;
- queue0 = index & mask;
- index = index & mask;
+ } else {
+ mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+ queue0 = index & mask;
+ index = index & mask;
+ }
rx_ring = &adapter->rx_ring[queue0];
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBE_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
srrctl |= ((IXGBE_RX_HDR_SIZE <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
}
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_INET_LRO
/**
* ixgbe_get_skb_hdr - helper function for LRO header processing
* @skb: pointer to sk_buff to be added to LRO packet
return 0;
}
-#endif /* CONFIG_IXGBE_LRO */
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
-
+#endif /* IXGBE_NO_INET_LRO */
/**
* ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
* @adapter: board private structure
0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
0x6A3E67EA, 0x14364D17, 0x3BED200D};
u32 fctrl, hlreg0;
- u32 pages;
u32 reta = 0, mrqc;
+ u32 vmdctl;
u32 rdrxctl;
int rx_buf_len;
- /* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+#ifndef IXGBE_NO_LRO
+ adapter->lro_data.max = lromax;
- /* Set the RX buffer length according to the mode */
+ if (lromax * netdev->mtu > (1 << 16))
+ adapter->lro_data.max = ((1 << 16) / netdev->mtu) - 1;
+
+#endif
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) {
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
} else {
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_BAM;
fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- pages = PAGE_USE_COUNT(adapter->netdev->mtu);
-
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
adapter->rx_ring[i].head = IXGBE_RDH(j);
adapter->rx_ring[i].tail = IXGBE_RDT(j);
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
-#ifdef CONFIG_IXGBE_LRO
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* Reserve VMDq set 1 for FCoE, using 3k buffers */
+ if ((i & adapter->ring_feature[RING_F_VMDQ].mask) == 1)
+ adapter->rx_ring[i].rx_buf_len = 3072;
+ else
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ } else {
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ }
+#ifndef IXGBE_NO_INET_LRO
/* Intitial LRO Settings */
- adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
+ adapter->rx_ring[i].lro_mgr.max_aggr = adapter->lro_max_aggr;
adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+#ifdef CONFIG_IXGBE_NAPI
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+#endif
adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-#endif
+#endif
ixgbe_configure_srrctl(adapter, j);
}
* effects of setting this bit are only that SRRCTL must be
* fully programmed [0..15]
*/
- if (adapter->flags &
- (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl |= IXGBE_RDRXCTL_MVMEN;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL,
+ vmdctl | IXGBE_VMD_CTL_VMDQ_EN);
}
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
}
+#ifdef NETIF_F_HW_VLAN_TX
static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
/*
* For a DCB driver, always enable VLAN tag stripping so we can
- * still receive traffic from a DCB-enabled host even if we're
- * not in DCB mode.
+ * still receive traffic from a DCB-enabled host.
*/
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ struct net_device *v_netdev;
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable(adapter);
-
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
}
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
}
+#endif
+/**
+ * compare_ether_oui - Compare two OUIs
+ * @addr1: pointer to a 6 byte array containing an Ethernet address
+ * @addr2: pointer to a 6 byte array containing an Ethernet address
+ *
+ * Compare the Organizationally Unique Identifiers from two Ethernet addresses,
+ * returns 0 if equal
+ */
+static inline int compare_ether_oui(const u8 *a, const u8 *b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+
+/**
+ * is_fcoe_ether_addr - Compare an Ethernet address to FCoE OUI
+ * @addr1: pointer to a 6 byte array containing an Ethernet address
+ * @addr2: pointer to a 6 byte array containing an Ethernet address
+ *
+ * Compare the Organizationally Unique Identifier from an Ethernet addresses
+ * with the well known Fibre Channel over Ethernet OUI
+ *
+ * Returns 1 if the address has an FCoE OUI
+ */
+static inline int is_fcoe_ether_addr(const u8 *addr)
+{
+ static const u8 fcoe_oui[] = { 0x0e, 0xfc, 0x00 };
+ return compare_ether_oui(addr, fcoe_oui) == 0;
+}
+
static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
{
+ struct ixgbe_adapter *adapter = hw->back;
struct dev_mc_list *mc_ptr;
u8 *addr = *mc_addr_ptr;
*vmdq = 0;
else
*mc_addr_ptr = NULL;
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* VMDQ set 1 is used for FCoE */
+ if (adapter->ring_feature[RING_F_VMDQ].indices)
+ *vmdq = is_fcoe_ether_addr(addr) ? 1 : 0;
+ if (*vmdq == 1) {
+ u32 hlreg0, mhadd;
+
+ /* Make sure that jumbo frames are enabled */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* set the max frame size to pass receive filtering */
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ mhadd &= IXGBE_MHADD_MFS_MASK;
+ mhadd |= 3072 << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
+ }
return addr;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+#ifdef HAVE_SET_RX_MODE
/* reprogram secondary unicast list */
addr_count = netdev->uc_count;
if (addr_count)
addr_list = netdev->uc_list->dmi_addr;
- hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ if (hw->mac.ops.update_uc_addr_list)
+ hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
+ ixgbe_addr_list_itr);
+#endif
/* reprogram multicast list */
addr_count = netdev->mc_count;
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbe_addr_list_itr);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
+#ifdef CONFIG_IXGBE_NAPI
int q_idx;
struct ixgbe_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
napi_enable(napi);
}
+#endif
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
+#ifdef CONFIG_IXGBE_NAPI
int q_idx;
struct ixgbe_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
continue;
napi_disable(&q_vector->napi);
}
+#endif
}
-#ifdef CONFIG_IXGBE_DCB
/*
* ixgbe_configure_dcb - Configure DCB hardware
* @adapter: ixgbe adapter struct
{
struct ixgbe_hw *hw = &adapter->hw;
u32 txdctl, vlnctrl;
+ s32 err;
int i, j;
- ixgbe_dcb_check_config(&adapter->dcb_cfg);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+ err = ixgbe_dcb_check_config(&adapter->dcb_cfg);
+ if (err)
+ DPRINTK(DRV, ERR, "err in dcb_check_config\n");
+ err = ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
+ if (err)
+ DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (TX)\n");
+ err = ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+ if (err)
+ DPRINTK(DRV, ERR, "err in dcb_calculate_tc_credits (RX)\n");
/* reconfigure the hardware */
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, 0, 0, true);
+}
+
+#ifndef IXGBE_NO_LLI
+static void ixgbe_configure_lli(struct ixgbe_adapter *adapter)
+{
+ u16 port;
+
+ if (adapter->lli_port) {
+ /* use filter 0 for port */
+ port = ntohs((u16)adapter->lli_port);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0),
+ (port | IXGBE_IMIR_PORT_IM_EN));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0),
+ (IXGBE_IMIREXT_SIZE_BP |
+ IXGBE_IMIREXT_CTRL_BP));
+ }
+
+ if (adapter->flags & IXGBE_FLAG_LLI_PUSH) {
+ /* use filter 1 for push flag */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1),
+ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1),
+ (IXGBE_IMIREXT_SIZE_BP |
+ IXGBE_IMIREXT_CTRL_PSH));
+ }
+
+ if (adapter->lli_size) {
+ /* use filter 2 for size */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2),
+ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2),
+ (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP));
+ }
}
-#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_NO_LLI */
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
ixgbe_set_rx_mode(netdev);
+#ifdef NETIF_F_HW_VLAN_TX
ixgbe_restore_vlan(adapter);
-#ifdef CONFIG_IXGBE_DCB
+#endif
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
netif_set_gso_max_size(netdev, 32768);
ixgbe_configure_dcb(adapter);
} else {
netif_set_gso_max_size(netdev, 65536);
}
-#else
- netif_set_gso_max_size(netdev, 65536);
-#endif /* CONFIG_IXGBE_DCB */
-
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ IXGBE_DESC_UNUSED(&adapter->rx_ring[i]));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i, j = 0;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_TCP_TIMER
+ u32 tcp_timer;
+#endif
u32 txdctl, rxdctl, mhadd;
u32 gpie;
ixgbe_get_hw_control(adapter);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+ if (adapter->num_tx_queues > 1)
+ netdev->features |= NETIF_F_MULTI_QUEUE;
+
+#endif
if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
(adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+#ifdef IXGBE_TCP_TIMER
+
+ tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
+ tcp_timer |= IXGBE_TCPTIMER_DURATION_MASK;
+ tcp_timer |= (IXGBE_TCPTIMER_KS |
+ IXGBE_TCPTIMER_COUNT_ENABLE |
+ IXGBE_TCPTIMER_LOOP);
+ IXGBE_WRITE_REG(hw, IXGBE_TCPTIMER, tcp_timer);
+ tcp_timer = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
+#endif
}
+#ifdef CONFIG_IXGBE_NAPI
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
+#endif
/* Enable fan failure interrupt if media type is copper */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
ixgbe_configure_msix(adapter);
else
ixgbe_configure_msi_and_legacy(adapter);
+#ifndef IXGBE_NO_LLI
+ /* lli should only be enabled with MSI-X and MSI */
+ if (adapter->flags & IXGBE_FLAG_MSI_ENABLED ||
+ adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ ixgbe_configure_lli(adapter);
+#endif
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
int ixgbe_up(struct ixgbe_adapter *adapter)
{
- /* hardware has been reset, we need to reload some things */
ixgbe_configure(adapter);
return ixgbe_up_complete(adapter);
{
struct ixgbe_hw *hw = &adapter->hw;
if (hw->mac.ops.init_hw(hw))
- dev_err(&adapter->pdev->dev, "Hardware Error\n");
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
/**
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
pci_unmap_single(pdev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len + NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
rx_buffer_info->dma = 0;
}
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
- /* Zero out the descriptor ring */
memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
ixgbe_napi_disable_all(adapter);
del_timer_sync(&adapter->watchdog_timer);
- cancel_work_sync(&adapter->watchdog_task);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
}
#endif
+#ifdef HAVE_PCI_ERS
if (!pci_channel_offline(adapter->pdev))
+#endif
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
/* since we reset the hardware DCA settings were cleared */
- if (dca_add_requester(&adapter->pdev->dev) == 0) {
- adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- /* always use CB2 mode, difference is masked
- * in the CB driver */
- IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
- ixgbe_setup_dca(adapter);
+ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
+ if (dca_add_requester(&adapter->pdev->dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ /* always use CB2 mode, difference is masked
+ * in the CB driver */
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+ ixgbe_setup_dca(adapter);
+ }
}
#endif
}
+#ifdef CONFIG_IXGBE_NAPI
/**
* ixgbe_poll - NAPI Rx polling callback
* @napi: structure for representing this polling device
**/
static int ixgbe_poll(struct napi_struct *napi, int budget)
{
- struct ixgbe_q_vector *q_vector = container_of(napi,
- struct ixgbe_q_vector, napi);
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
int tx_cleaned, work_done = 0;
if (tx_cleaned)
work_done = budget;
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
+ /* If no Tx and not enough Rx work done, exit the polling mode */
+ if ((work_done == 0) || !netif_running(adapter->netdev)) {
netif_rx_complete(adapter->netdev, napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable(adapter);
+ ixgbe_irq_enable_queues(adapter);
+ return 0;
}
return work_done;
}
+#endif /* CONFIG_IXGBE_NAPI */
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
int nrq = 1, ntq = 1;
int feature_mask = 0, rss_i, rss_m;
int dcb_i, dcb_m;
+ int vmdq_i, vmdq_m;
/* Number of supported queues */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
dcb_i = adapter->ring_feature[RING_F_DCB].indices;
dcb_m = 0;
+ vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
+ vmdq_m = 0;
rss_i = adapter->ring_feature[RING_F_RSS].indices;
rss_m = 0;
- feature_mask |= IXGBE_FLAG_RSS_ENABLED;
feature_mask |= IXGBE_FLAG_DCB_ENABLED;
+ feature_mask |= IXGBE_FLAG_VMDQ_ENABLED;
+ feature_mask |= IXGBE_FLAG_RSS_ENABLED;
switch (adapter->flags & feature_mask) {
+ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED):
+ dcb_m = 0x7 << 3;
+ vmdq_i = min(2, vmdq_i);
+ vmdq_m = 0x1 << 2;
+ rss_i = min(4, rss_i);
+ rss_m = 0x3;
+ nrq = dcb_i * vmdq_i * rss_i;
+ ntq = dcb_i * vmdq_i;
+ break;
+ case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+ dcb_m = 0x7 << 3;
+ vmdq_i = min(8, vmdq_i);
+ vmdq_m = 0x7;
+ nrq = dcb_i * vmdq_i;
+ ntq = min(MAX_TX_QUEUES, dcb_i * vmdq_i);
+ break;
case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
dcb_m = 0x7 << 3;
rss_i = min(8, rss_i);
ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
break;
case (IXGBE_FLAG_DCB_ENABLED):
+#ifdef HAVE_TX_MQ
dcb_m = 0x7 << 3;
nrq = dcb_i;
ntq = dcb_i;
+#else
+ DPRINTK(DRV, INFO, "Kernel has no multiqueue "
+ "support, disabling DCB.\n");
+ /* Fall back onto RSS */
+ rss_m = 0xF;
+ nrq = rss_i;
+ ntq = 1;
+ dcb_m = 0;
+ dcb_i = 0;
+#endif
+ break;
+ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
+ vmdq_i = min(4, vmdq_i);
+ vmdq_m = 0x3 << 3;
+ rss_m = 0xF;
+ nrq = vmdq_i * rss_i;
+ ntq = min(MAX_TX_QUEUES, vmdq_i * rss_i);
+ break;
+ case (IXGBE_FLAG_VMDQ_ENABLED):
+ vmdq_m = 0xF;
+ nrq = vmdq_i;
+ ntq = vmdq_i;
break;
case (IXGBE_FLAG_RSS_ENABLED):
rss_m = 0xF;
nrq = rss_i;
+#ifdef HAVE_TX_MQ
ntq = rss_i;
+#else
+ ntq = 1;
+#endif
break;
case 0:
default:
dcb_m = 0;
rss_i = 0;
rss_m = 0;
+ vmdq_i = 0;
+ vmdq_m = 0;
nrq = 1;
ntq = 1;
break;
}
- /* Sanity check, we should never have zero queues */
+ /* sanity check, we should never have zero queues */
nrq = (nrq ?:1);
ntq = (ntq ?:1);
adapter->ring_feature[RING_F_DCB].indices = dcb_i;
adapter->ring_feature[RING_F_DCB].mask = dcb_m;
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
adapter->ring_feature[RING_F_RSS].indices = rss_i;
adapter->ring_feature[RING_F_RSS].mask = rss_m;
break;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
ixgbe_set_num_queues(adapter);
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
int feature_mask = 0, rss_i;
int i, txr_idx, rxr_idx;
int dcb_i;
+ int vmdq_i, k;
/* Number of supported queues */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
rss_i = adapter->ring_feature[RING_F_RSS].indices;
txr_idx = 0;
rxr_idx = 0;
feature_mask |= IXGBE_FLAG_DCB_ENABLED;
+ feature_mask |= IXGBE_FLAG_VMDQ_ENABLED;
feature_mask |= IXGBE_FLAG_RSS_ENABLED;
switch (adapter->flags & feature_mask) {
+ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED):
+ for (i = 0; i < dcb_i; i++) {
+ int j;
+ for (j = 0; j < vmdq_i; j++) {
+ for (k = 0; k < rss_i; k++) {
+ adapter->rx_ring[rxr_idx].reg_idx = i << 3 |
+ j << 2 |
+ k;
+ rxr_idx++;
+ }
+ adapter->tx_ring[txr_idx].reg_idx = i << 2 | j;
+ txr_idx++;
+ }
+ }
+ break;
+ case (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_DCB_ENABLED):
+ for (i = 0; i < dcb_i; i++) {
+ int j;
+ for (j = 0; j < vmdq_i; j++) {
+ adapter->rx_ring[rxr_idx].reg_idx = i << 3 | j;
+ adapter->tx_ring[txr_idx].reg_idx = i << 2 |
+ (j >> 1);
+ rxr_idx++;
+ if (j & 1)
+ txr_idx++;
+ }
+ }
+ break;
case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
for (i = 0; i < dcb_i; i++) {
- int j;
- /* Rx first */
- for (j = 0; j < adapter->num_rx_queues; j++) {
- adapter->rx_ring[rxr_idx].reg_idx =
- i << 3 | j;
- rxr_idx++;
- }
- /* Tx now */
- for (j = 0; j < adapter->num_tx_queues; j++) {
- adapter->tx_ring[txr_idx].reg_idx =
- i << 2 | (j >> 1);
- if (j & 1)
- txr_idx++;
- }
+ int j;
+ /* Rx first */
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ adapter->rx_ring[rxr_idx].reg_idx = i << 3 | j;
+ rxr_idx++;
}
+ /* Tx now */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ adapter->tx_ring[txr_idx].reg_idx = i << 2 |
+ (j >> 1);
+ if (j & 1)
+ txr_idx++;
+ }
+ }
+ break;
case (IXGBE_FLAG_DCB_ENABLED):
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
adapter->tx_ring[i].reg_idx = i << 2;
}
break;
+ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
+ for (i = 0; i < vmdq_i; i++) {
+ int j;
+ for (j = 0; j < rss_i; j++) {
+ adapter->rx_ring[rxr_idx].reg_idx = i << 4 | j;
+ adapter->tx_ring[txr_idx].reg_idx = i << 3 |
+ (j >> 1);
+ rxr_idx++;
+ if (j & 1)
+ txr_idx++;
+ }
+ }
+ break;
+ case (IXGBE_FLAG_VMDQ_ENABLED):
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].reg_idx = i;
+ break;
case (IXGBE_FLAG_RSS_ENABLED):
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].reg_idx = i;
}
}
+
/**
* ixgbe_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
int err = 0;
int vector, v_budget;
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
+ goto try_msi;
+
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) {
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
ixgbe_set_num_queues(adapter);
kfree(adapter->tx_ring);
err = ixgbe_alloc_queues(adapter);
if (err) {
DPRINTK(PROBE, ERR, "Unable to allocate memory "
- "for queues\n");
+ "for queues\n");
goto out;
}
goto out;
try_msi:
+ if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE))
+ goto out;
+
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
} else {
DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ "falling back to legacy. Error: %d\n", err);
/* reset err */
err = 0;
}
out:
+#ifdef HAVE_TX_MQ
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
/* Notify the stack of the (possibly) reduced Tx Queue count. */
+ adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
+#else /* CONFIG_NETDEVICES_MULTIQUEUE */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
-
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* HAVE_TX_MQ */
return err;
}
}
DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
- "Tx Queue count = %u\n",
+ "Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" :
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
static void ixgbe_sfp_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_task);
+ struct ixgbe_adapter,
+ sfp_task);
struct ixgbe_hw *hw = &adapter->hw;
if ((hw->phy.type == ixgbe_phy_nl) &&
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ "unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
- hw->phy.sfp_type);
+ hw->phy.sfp_type);
}
/* don't need this routine any more */
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
reschedule:
if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
+ round_jiffies(jiffies + (2 * HZ)));
}
/**
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
-#ifdef CONFIG_IXGBE_DCB
- int j;
- struct tc_configuration *tc;
-#endif /* CONFIG_IXGBE_DCB */
+ int err;
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ err = ixgbe_init_shared_code(hw);
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /* start a kernel thread to watch for a module to arrive */
+ set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+ mod_timer(&adapter->sfp_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+ err = 0;
+ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ DPRINTK(PROBE, ERR, "failed to load because an "
+ "unsupported SFP+ module type was detected.\n");
+ goto out;
+ } else if (err) {
+ DPRINTK(PROBE, ERR, "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+
/* Set capability flags */
- rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
- adapter->ring_feature[RING_F_RSS].indices = rss;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- if (hw->mac.ops.get_media_type &&
- (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
- adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
- adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-
-#ifdef CONFIG_IXGBE_DCB
- /* Configure DCB traffic classes */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[DCB_TX_CONFIG].bwg_id = 0;
- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->path[DCB_RX_CONFIG].bwg_id = 0;
- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->dcb_pfc = pfc_disabled;
- }
- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.rx_pba_cfg = pba_equal;
- adapter->dcb_cfg.round_robin_enable = false;
- adapter->dcb_set_bitmap = 0x00;
- ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
-#endif /* CONFIG_IXGBE_DCB */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->mac.ops.get_media_type &&
+ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
+ if (adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
+ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+#ifdef CONFIG_IXGBE_RSS
+ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RSS_CAPABLE;
+#endif
+ if (adapter->flags & IXGBE_FLAG_MQ_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_VMDQ_CAPABLE;
+ break;
+ default:
+ break;
+ }
+
+ /* Default DCB settings, if applicable */
+ adapter->ring_feature[RING_F_DCB].indices = 8;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) {
+ int j;
+ struct tc_configuration *tc;
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.rx_pba_cfg = pba_equal;
+ adapter->dcb_cfg.round_robin_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ }
/* default flow control settings */
- hw->fc.original_type = ixgbe_fc_none;
- hw->fc.type = ixgbe_fc_none;
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.requested_mode = ixgbe_fc_none;
hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- /* select 10G link by default */
- hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
-
- /* enable itr by default in dynamic mode */
- adapter->itr_setting = 1;
- adapter->eitr_param = 20000;
-
/* set defaults for eitr in MegaBytes */
adapter->eitr_low = 10;
adapter->eitr_high = 20;
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
- /* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
- dev_err(&pdev->dev, "EEPROM initialization failed\n");
- return -EIO;
- }
-
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
set_bit(__IXGBE_DOWN, &adapter->state);
-
- return 0;
+out:
+ return err;
}
/**
return -ENOMEM;
}
-/**
- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
- int i, err = 0;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
- if (!err)
- continue;
- DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
- break;
- }
-
- return err;
-}
-
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure
struct pci_dev *pdev = adapter->pdev;
int size;
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_INET_LRO
size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
rx_ring->lro_mgr.lro_arr = vmalloc(size);
if (!rx_ring->lro_mgr.lro_arr)
return -ENOMEM;
memset(rx_ring->lro_mgr.lro_arr, 0, size);
+
+#endif /* IXGBE_NO_INET_LRO */
+#ifndef IXGBE_NO_LRO
+ size = sizeof(struct ixgbe_lro_list);
+ rx_ring->lrolist = vmalloc(size);
+ if (!rx_ring->lrolist)
+ return -ENOMEM;
+ memset(rx_ring->lrolist, 0, size);
+
#endif
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
- "vmalloc allocation failed for the rx desc ring\n");
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
goto alloc_failed;
}
memset(rx_ring->rx_buffer_info, 0, size);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
- "Memory allocation failed for the rx desc ring\n");
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
goto alloc_failed;
}
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+#ifndef CONFIG_IXGBE_NAPI
+ rx_ring->work_limit = rx_ring->count / 2;
+#endif
+#ifndef IXGBE_NO_LRO
+ ixgbe_lro_ring_init(rx_ring->lrolist, adapter);
+#endif
return 0;
-
alloc_failed:
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_INET_LRO
vfree(rx_ring->lro_mgr.lro_arr);
rx_ring->lro_mgr.lro_arr = NULL;
+#endif
+#ifndef IXGBE_NO_LRO
+ vfree(rx_ring->lrolist);
+ rx_ring->lrolist = NULL;
#endif
return -ENOMEM;
}
-/**
- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
- int i, err = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
- if (!err)
- continue;
- DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
- break;
- }
-
- return err;
-}
-
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
* @adapter: board private structure
{
struct pci_dev *pdev = adapter->pdev;
-#ifdef CONFIG_IXGBE_LRO
+#ifndef IXGBE_NO_INET_LRO
vfree(rx_ring->lro_mgr.lro_arr);
rx_ring->lro_mgr.lro_arr = NULL;
#endif
-
+#ifndef IXGBE_NO_LRO
+ if (rx_ring->lrolist)
+ ixgbe_lro_ring_exit(rx_ring->lrolist);
+ vfree(rx_ring->lrolist);
+ rx_ring->lrolist = NULL;
+#endif
ixgbe_clean_rx_ring(adapter, rx_ring);
vfree(rx_ring->rx_buffer_info);
ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
}
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
return 0;
}
+#ifdef CONFIG_IXGBE_NAPI
/**
* ixgbe_napi_add_all - prep napi structs for use
* @adapter: private struct
+ *
* helper function to napi_add each possible q_vector->napi
*/
void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
}
}
+#endif
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
- "suspend\n");
+ "suspend\n");
return err;
}
pci_set_master(pdev);
err = ixgbe_init_interrupt_scheme(adapter);
if (err) {
printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
- "device\n");
+ "device\n");
return err;
}
+#ifdef CONFIG_IXGBE_NAPI
ixgbe_napi_add_all(adapter);
+
+#endif
ixgbe_reset(adapter);
if (netif_running(netdev)) {
ixgbe_free_all_rx_resources(adapter);
}
ixgbe_reset_interrupt_capability(adapter);
+
+#ifdef CONFIG_IXGBE_NAPI
ixgbe_napi_del_all(adapter);
+#endif
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
return 0;
}
+#ifndef USE_REBOOT_NOTIFIER
static void ixgbe_shutdown(struct pci_dev *pdev)
{
ixgbe_suspend(pdev, PMSG_SUSPEND);
}
+#endif
+
/**
* ixgbe_update_stats - Update the board statistics counters.
* @adapter: board private structure
adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONRXC(i));
+ IXGBE_PXONRXC(i));
adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONTXC(i));
+ IXGBE_PXONTXC(i));
adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFRXC(i));
+ IXGBE_PXOFFRXC(i));
adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFTXC(i));
+ IXGBE_PXOFFTXC(i));
}
adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
/* work around hardware counting issue */
(1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
} else {
- /* For legacy and MSI interrupts don't set any bits that
+ /* for legacy and MSI interrupts don't set any bits that
* are enabled for EIAM, because this operation would
* set *both* EIMS and EICS for any bit in EIAM */
IXGBE_WRITE_REG(hw, IXGBE_EICS,
- (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
}
/* Reset the timer */
mod_timer(&adapter->watchdog_timer,
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (hw->mac.ops.check_link) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /* always assume link is up, if no check link function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
if (link_up ||
time_after(jiffies, (adapter->link_check_timeout +
IXGBE_TRY_LINK_TIMEOUT))) {
adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
}
-static int ixgbe_tso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+static int ixgbe_tso(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
{
+#ifdef NETIF_F_TSO
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
int err;
IPPROTO_TCP,
0);
adapter->hw_tso_ctxt++;
+#ifdef NETIF_F_TSO6
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ 0, IPPROTO_TCP,
+ 0);
adapter->hw_tso6_ctxt++;
+#endif
}
i = tx_ring->next_to_use;
/* VLAN MACLEN IPLEN */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= ((skb_network_offset(skb)) <<
IXGBE_ADVTXD_MACLEN_SHIFT);
*hdr_len += skb_network_offset(skb);
vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
+ (skb_transport_header(skb) - skb_network_header(skb));
*hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
+ (skb_transport_header(skb) - skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ IXGBE_ADVTXD_DTYP_CTXT);
if (skb->protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
/* MSS L4LEN IDX */
mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
/* use index 1 for TSO */
mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
return true;
}
+
+#endif
return false;
}
context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= (skb_network_offset(skb) <<
IXGBE_ADVTXD_MACLEN_SHIFT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
+#ifdef NETIF_F_IPV6_CSUM
case __constant_htons(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
+#endif
default:
if (unlikely(net_ratelimit())) {
DPRINTK(PROBE, WARNING,
}
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, unsigned int first)
+ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ unsigned int first)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
+#ifdef MAX_SKB_FRAGS
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
len -= skb->data_len;
+#endif
i = tx_ring->next_to_use;
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
tx_buffer_info->dma = pci_map_single(adapter->pdev,
- skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ skb->data + offset, size,
+ PCI_DMA_TODEVICE);
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
i = 0;
}
+#ifdef MAX_SKB_FRAGS
for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
tx_buffer_info->dma = pci_map_page(adapter->pdev,
- frag->page,
- offset,
+ frag->page, offset,
size,
PCI_DMA_TODEVICE);
tx_buffer_info->time_stamp = jiffies;
i = 0;
}
}
+
+#endif
if (i == 0)
i = tx_ring->count - 1;
else
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- int tx_flags, int count, u32 paylen, u8 hdr_len)
+ struct ixgbe_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_buffer_info;
u32 olinfo_status = 0, cmd_type_len = 0;
unsigned int i;
+
u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++;
if (i == tx_ring->count)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
netif_stop_subqueue(netdev, tx_ring->queue_index);
+
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
}
static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+ struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
u8 hdr_len = 0;
int r_idx = 0, tso;
int count = 0;
+
+#ifdef MAX_SKB_FRAGS
unsigned int f;
+#endif
+#ifdef HAVE_TX_MQ
r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
+#endif
tx_ring = &adapter->tx_ring[r_idx];
+#ifdef NETIF_F_HW_VLAN_TX
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
+#ifdef HAVE_TX_MQ
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
tx_flags |= (skb->queue_mapping << 13);
}
+#endif
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
+#ifdef HAVE_TX_MQ
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags |= (skb->queue_mapping << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
+#endif
}
+#endif
/* three things can cause us to need a context descriptor */
if (skb_is_gso(skb) ||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
count++;
count += TXD_USE_COUNT(skb_headlen(skb));
+#ifdef MAX_SKB_FRAGS
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#endif
if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ (skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbe_tx_queue(adapter, tx_ring, tx_flags,
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
return 0;
}
+#ifdef ETHTOOL_OPS_COMPAT
+/**
+ * ixgbe_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
+ /* XXX is disable_irq the right thing to do here instead? */
+ ixgbe_irq_disable(adapter);
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbe_intr(adapter->pdev->irq, netdev);
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
- enable_irq(adapter->pdev->irq);
+ ixgbe_irq_enable(adapter);
}
+
#endif
/**
**/
static int ixgbe_link_config(struct ixgbe_hw *hw)
{
- u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* must always autoneg for both 1G and 10G link */
- hw->mac.autoneg = true;
-
- if ((hw->mac.type == ixgbe_mac_82598EB) &&
- (hw->phy.media_type == ixgbe_media_type_copper))
- autoneg = IXGBE_LINK_SPEED_82598_AUTONEG;
-
- return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+ u32 autoneg;
+ bool link_up = false;
+ u32 ret = IXGBE_ERR_LINK_SETUP;
+
+ if (hw->mac.ops.check_link)
+ ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (ret || !link_up)
+ goto link_cfg_out;
+
+ if (hw->mac.ops.get_link_capabilities)
+ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+ &hw->mac.autoneg);
+ if (ret)
+ goto link_cfg_out;
+
+ if (hw->mac.ops.setup_link_speed)
+ ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+link_cfg_out:
+ return ret;
}
/**
{
struct net_device *netdev;
struct ixgbe_adapter *adapter = NULL;
- struct ixgbe_hw *hw;
- const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+ struct ixgbe_hw *hw = NULL;
static int cards_found;
int i, err, pci_using_dac;
- u16 link_status, link_speed, link_width;
- u32 part_num, eec;
err = pci_enable_device(pdev);
if (err)
}
pci_set_master(pdev);
- pci_save_state(pdev);
+#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
hw->back = adapter;
adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+#ifdef HAVE_PCI_ERS
+ pci_save_state(pdev);
+#endif
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!hw->hw_addr) {
netdev->stop = &ixgbe_close;
netdev->hard_start_xmit = &ixgbe_xmit_frame;
netdev->get_stats = &ixgbe_get_stats;
+#ifdef HAVE_SET_RX_MODE
netdev->set_rx_mode = &ixgbe_set_rx_mode;
+#endif
netdev->set_multicast_list = &ixgbe_set_rx_mode;
netdev->set_mac_address = &ixgbe_set_mac;
netdev->change_mtu = &ixgbe_change_mtu;
+#ifdef ETHTOOL_OPS_COMPAT
+ netdev->do_ioctl = &ixgbe_ioctl;
+#endif
ixgbe_set_ethtool_ops(netdev);
+#ifdef HAVE_TX_TIMEOUT
netdev->tx_timeout = &ixgbe_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
netdev->vlan_rx_register = ixgbe_vlan_rx_register;
netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = ixgbe_netpoll;
#endif
adapter->bd_number = cards_found;
- /* Setup hw api */
- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
- hw->mac.type = ii->mac;
-
- /* EEPROM */
- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
- /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
- if (!(eec & (1 << 8)))
- hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
-
- /* PHY */
- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
- hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+#ifdef IXGBE_TCP_TIMER
+ adapter->msix_addr = ioremap(pci_resource_start(pdev, 3),
+ pci_resource_len(pdev, 3));
+ if (!adapter->msix_addr) {
+ err = -EIO;
+ printk("Error in ioremap of BAR3\n");
+ goto err_map_msix;
+ }
- /* set up this timer and work struct before calling get_invariants
- * which might start the timer */
+#endif
+ /* set up this timer and work struct before calling sw_init which
+ * might start the timer */
init_timer(&adapter->sfp_timer);
adapter->sfp_timer.function = &ixgbe_sfp_timer;
adapter->sfp_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
- err = ii->get_invariants(hw);
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
- /* start a kernel thread to watch for a module to arrive */
- set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
- err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
- goto err_hw_init;
- } else if (err) {
- goto err_hw_init;
- }
-
/* setup the private structure */
err = ixgbe_sw_init(adapter);
if (err)
/* reset_hw fills in the perm_addr as well */
err = hw->mac.ops.reset_hw(hw);
if (err) {
- dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+ DPRINTK(PROBE, ERR, "HW Init failed: %d\n", err);
goto err_sw_init;
}
+ /* check_options must be called before setup_link_speed to set up
+ * hw->fc completely
+ */
+ ixgbe_check_options(adapter);
+
+#ifdef MAX_SKB_FRAGS
+#ifdef NETIF_F_HW_VLAN_TX
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+#else
+ netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+#endif
+#ifdef NETIF_F_IPV6_CSUM
netdev->features |= NETIF_F_IPV6_CSUM;
+#endif
+#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
netdev->features |= NETIF_F_TSO6;
-#ifdef CONFIG_IXGBE_LRO
+#endif /* NETIF_F_TSO6 */
+#endif /* NETIF_F_TSO */
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+#ifndef IXGBE_NO_INET_LRO
netdev->features |= NETIF_F_LRO;
-#endif
+#endif
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+#ifdef NETIF_F_TSO
netdev->vlan_features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
netdev->vlan_features |= NETIF_F_TSO6;
+#endif /* NETIF_F_TSO6 */
+#endif /* NETIF_F_TSO */
netdev->vlan_features |= NETIF_F_IP_CSUM;
netdev->vlan_features |= NETIF_F_SG;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-
-#ifdef CONFIG_IXGBE_DCB
- netdev->dcbnl_ops = &dcbnl_ops;
-#endif /* CONFIG_IXGBE_DCB */
-
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+#endif /* MAX_SKB_FRAGS */
/* make sure the EEPROM is good */
- if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
- dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+ if (hw->eeprom.ops.validate_checksum &&
+ (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) {
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
- goto err_eeprom;
+ goto err_sw_init;
}
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "invalid MAC address\n");
+ DPRINTK(PROBE, INFO, "invalid MAC address\n");
err = -EIO;
- goto err_eeprom;
+ goto err_sw_init;
+ }
+#else
+ if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+ DPRINTK(PROBE, INFO, "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
}
+#endif
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &ixgbe_watchdog;
if (err)
goto err_sw_init;
- /* print bus type/speed/width info */
- pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
- link_speed = link_status & IXGBE_PCI_LINK_SPEED;
- link_width = link_status & IXGBE_PCI_LINK_WIDTH;
- dev_info(&pdev->dev, "(PCI Express:%s:%s) "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
- (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
- "Unknown"),
- ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
- (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
- (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
- (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
- "Unknown"),
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
- ixgbe_read_pba_num_generic(hw, &part_num);
- dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type,
- (part_num >> 8), (part_num & 0xff));
-
- if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
- dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
- "this card is not sufficient for optimal "
- "performance.\n");
- dev_warn(&pdev->dev, "For optimal performance a x8 "
- "PCI-Express slot is required.\n");
- }
-
/* reset the hardware with the new settings */
hw->mac.ops.start_hw(hw);
- /* link_config depends on start_hw being called at least once */
+ /* link_config depends on ixgbe_start_hw being called at least once */
err = ixgbe_link_config(hw);
if (err) {
- dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
+ DPRINTK(PROBE, ERR, "setup_link_speed FAILED %d\n", err);
goto err_register;
}
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+#ifdef CONFIG_IXGBE_NAPI
ixgbe_napi_add_all(adapter);
+#endif
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+ ixgbe_sysfs_create(adapter);
+
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- if (dca_add_requester(&pdev->dev) == 0) {
- adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- /* always use CB2 mode, difference is masked
- * in the CB driver */
- IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
- ixgbe_setup_dca(adapter);
+ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) {
+ if (dca_add_requester(&pdev->dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ /* always use CB2 mode, difference is masked
+ * in the CB driver */
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+ ixgbe_setup_dca(adapter);
+ }
}
+
#endif
+ /* print all messages at the end so that we use our eth%d name */
+ /* print bus type/speed/width info */
+ DPRINTK(PROBE, INFO, "(PCI Express:%s:%s) ",
+ ((hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ("Unknown"));
+
+ /* print the MAC address */
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
- dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
+ if ((hw->phy.type == ixgbe_phy_nl) &&
+ (hw->phy.sfp_type != ixgbe_sfp_type_not_present))
+ DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d, SFP+: %d\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type);
+ else
+ DPRINTK(PROBE, INFO, "MAC: %d, PHY: %d\n",
+ hw->mac.type, hw->phy.type);
+
+ if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
+ DPRINTK(PROBE, WARNING, "PCI-Express bandwidth available for "
+ "this card is not sufficient for optimal "
+ "performance.\n");
+ DPRINTK(PROBE, WARNING, "For optimal performance a x8 "
+ "PCI-Express slot is required.\n");
+ }
+
+#ifndef IXGBE_NO_INET_LRO
+ DPRINTK(PROBE, INFO, "In-kernel LRO is enabled \n");
+#else
+#ifndef IXGBE_NO_LRO
+ DPRINTK(PROBE, INFO, "Internal LRO is enabled \n");
+#else
+ DPRINTK(PROBE, INFO, "LRO is disabled \n");
+#endif
+#endif
+ DPRINTK(PROBE, INFO, "Intel(R) 10 Gigabit Network Connection\n");
cards_found++;
return 0;
err_register:
ixgbe_release_hw_control(adapter);
-err_hw_init:
err_sw_init:
- ixgbe_reset_interrupt_capability(adapter);
-err_eeprom:
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
+ ixgbe_reset_interrupt_capability(adapter);
+#ifdef IXGBE_TCP_TIMER
+ iounmap(adapter->msix_addr);
+err_map_msix:
+#endif
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+ ixgbe_sysfs_remove(adapter);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
ixgbe_release_hw_control(adapter);
+#ifdef IXGBE_TCP_TIMER
+ iounmap(adapter->msix_addr);
+#endif
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
DPRINTK(PROBE, INFO, "complete\n");
+#ifdef CONFIG_IXGBE_NAPI
ixgbe_napi_del_all(adapter);
+#endif
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
pci_disable_device(pdev);
}
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ u16 value;
+ struct ixgbe_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, &value);
+ return value;
+}
+
+#ifdef HAVE_PCI_ERS
/**
* ixgbe_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev->priv;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
ixgbe_down(adapter);
pci_disable_device(pdev);
- /* Request a slot reset. */
+ /* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev->priv;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
DPRINTK(PROBE, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
static void ixgbe_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev->priv;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (ixgbe_up(adapter)) {
.resume = ixgbe_io_resume,
};
+#endif
static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.suspend = ixgbe_suspend,
.resume = ixgbe_resume,
#endif
+#ifndef USE_REBOOT_NOTIFIER
.shutdown = ixgbe_shutdown,
+#endif
+#ifdef HAVE_PCI_ERS
.err_handler = &ixgbe_err_handler
+#endif
};
+bool ixgbe_is_ixgbe(struct pci_dev *pcidev)
+{
+ if (pci_dev_driver(pcidev) != &ixgbe_driver)
+ return false;
+ else
+ return true;
+}
+
/**
* ixgbe_init_module - Driver Registration Routine
*
**/
static int __init ixgbe_init_module(void)
{
- printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
- ixgbe_driver_string, ixgbe_driver_version);
+ printk(KERN_INFO "ixgbe: %s - version %s\n", ixgbe_driver_string,
+ ixgbe_driver_version);
- printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+ printk(KERN_INFO "%s\n", ixgbe_copyright);
+ ixgbe_dcb_netlink_register();
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
dca_register_notify(&dca_notifier);
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
dca_unregister_notify(&dca_notifier);
#endif
+ ixgbe_dcb_netlink_unregister();
pci_unregister_driver(&ixgbe_driver);
}
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
-
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of ixgbe
+ * includes register access macros
+ */
+
+#ifndef _IXGBE_OSDEP_H_
+#define _IXGBE_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+
+#ifndef msleep
+#define msleep(x) do { if(in_interrupt()) { \
+ /* Don't mdelay in interrupt context! */ \
+ BUG(); \
+ } else { \
+ msleep(x); \
+ } } while (0)
+
+#endif
+
+#undef ASSERT
+
+#ifdef DBG
+#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, A)
+#else
+#define hw_dbg(hw, S, A...) do {} while (0)
+#endif
+
+#ifdef DBG
+#define IXGBE_WRITE_REG(a, reg, value) do {\
+ switch (reg) { \
+ case IXGBE_EIMS: \
+ case IXGBE_EIMC: \
+ case IXGBE_EIAM: \
+ case IXGBE_EIAC: \
+ case IXGBE_EICR: \
+ case IXGBE_EICS: \
+ printk("%s: Reg - 0x%05X, value - 0x%08X\n", __FUNCTION__, \
+ reg, (u32)(value)); \
+ default: \
+ break; \
+ } \
+ writel((value), ((a)->hw_addr + (reg))); \
+} while (0)
+#else
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#endif
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+struct ixgbe_hw;
+extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
+#define IXGBE_EEPROM_GRANT_ATTEMPS 100
+
+#endif /* _IXGBE_OSDEP_H_ */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ixgbe.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IXGBE_MAX_NIC 8
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when ixgbe_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define IXGBE_PARAM(X, desc) \
+ static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+#else
+#define IXGBE_PARAM(X, desc) \
+ static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Interrupt Type
+ *
+ * Valid Range: 0-2
+ * - 0 - Legacy Interrupt
+ * - 1 - MSI Interrupt
+ * - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 2
+ */
+IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
+#define IXGBE_INT_LEGACY 0
+#define IXGBE_INT_MSI 1
+#define IXGBE_INT_MSIX 2
+#define IXGBE_DEFAULT_INT IXGBE_INT_MSIX
+
+/* MQ - Multiple Queue enable/disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables MQ
+ * - 1 - enables MQ
+ *
+ * Default Value: 1
+ */
+
+IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1");
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+/* DCA - Direct Cache Access (DCA) Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables DCA
+ * - 1 - enables DCA
+ *
+ * Default Value: 1
+ */
+
+IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, default 1");
+
+#endif
+/* RSS - Receive-Side Scaling (RSS) Descriptor Queues
+ *
+ * Valid Range: 0-16
+ * - 0 - disables RSS
+ * - 1 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()).
+ * - 2-16 - enables RSS and sets the Desc. Q's to the specified value.
+ *
+ * Default Value: 1
+ */
+
+IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, default 1=number of cpus");
+
+/* VMDQ - Virtual Machine Device Queues (VMDQ)
+ *
+ * Valid Range: 1-16
+ * - 1 Disables VMDQ by allocating only a single queue.
+ * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value.
+ *
+ * Default Value: 1
+ */
+
+IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable (default), 2-16 enable");
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-500000 (0=off)
+ *
+ * Default Value: 8000
+ */
+#define DEFAULT_ITR 8000
+IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, (100-500000), default 8000");
+#define MAX_ITR 500000
+#define MIN_ITR 100
+
+#ifndef IXGBE_NO_LLI
+/* LLIPort (Low Latency Interrupt TCP Port)
+ *
+ * Valid Range: 0 - 65535
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)");
+
+#define DEFAULT_LLIPORT 0
+#define MAX_LLIPORT 0xFFFF
+#define MIN_LLIPORT 0
+
+/* LLIPush (Low Latency Interrupt on TCP Push flag)
+ *
+ * Valid Range: 0,1
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)");
+
+#define DEFAULT_LLIPUSH 0
+#define MAX_LLIPUSH 1
+#define MIN_LLIPUSH 0
+
+/* LLISize (Low Latency Interrupt on Packet Size)
+ *
+ * Valid Range: 0 - 1500
+ *
+ * Default Value: 0 (disabled)
+ */
+IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)");
+
+#define DEFAULT_LLISIZE 0
+#define MAX_LLISIZE 1500
+#define MIN_LLISIZE 0
+#endif /* IXGBE_NO_LLI */
+
+#ifndef IXGBE_NO_INET_LRO
+/* LROAggr (Large Receive Offload)
+ *
+ * Valid Range: 2 - 44
+ *
+ * Default Value: 32
+ */
+IXGBE_PARAM(LROAggr, "LRO - Maximum packets to aggregate");
+
+#define DEFAULT_LRO_AGGR 32
+#define MAX_LRO_AGGR 44
+#define MIN_LRO_AGGR 2
+
+#endif
+/* Rx buffer mode
+ *
+ * Valid Range: 0-2 0 = 1buf_mode_always, 1 = ps_mode_always and 2 = optimal
+ *
+ * Default Value: 2
+ */
+IXGBE_PARAM(RxBufferMode, "0=1 descriptor per packet,\n"
+ "\t\t\t1=use packet split, multiple descriptors per jumbo frame\n"
+ "\t\t\t2 (default)=use 1buf mode for 1500 mtu, packet split for jumbo");
+
+#define IXGBE_RXBUFMODE_1BUF_ALWAYS 0
+#define IXGBE_RXBUFMODE_PS_ALWAYS 1
+#define IXGBE_RXBUFMODE_OPTIMAL 2
+#define IXGBE_DEFAULT_RXBUFMODE IXGBE_RXBUFMODE_OPTIMAL
+
+
+
+struct ixgbe_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct ixgbe_opt_list {
+ int i;
+ char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit ixgbe_validate_option(unsigned int *value,
+ struct ixgbe_option *opt)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ printk(KERN_INFO "ixgbe: %s set to %d\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct ixgbe_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ printk(KERN_INFO "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+
+/**
+ * ixgbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+
+ if (bd >= IXGBE_MAX_NIC) {
+ printk(KERN_NOTICE
+ "Warning: no configuration for board #%d\n", bd);
+ printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+ bd = IXGBE_MAX_NIC;
+#endif
+ }
+
+ { /* Interrupt Type */
+ unsigned int i_type;
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Interrupt Type",
+ .err =
+ "using default of "__MODULE_STRING(IXGBE_DEFAULT_INT),
+ .def = IXGBE_DEFAULT_INT,
+ .arg = { .r = { .min = IXGBE_INT_LEGACY,
+ .max = IXGBE_INT_MSIX}}
+ };
+
+#ifdef module_param_array
+ if (num_InterruptType > bd) {
+#endif
+ i_type = InterruptType[bd];
+ ixgbe_validate_option(&i_type, &opt);
+ switch (i_type) {
+ case IXGBE_INT_MSIX:
+ if (!adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)
+ printk(KERN_INFO
+ "Ignoring MSI-X setting; "
+ "support unavailable.\n");
+ break;
+ case IXGBE_INT_MSI:
+ if (!adapter->flags & IXGBE_FLAG_MSI_CAPABLE) {
+ printk(KERN_INFO
+ "Ignoring MSI setting; "
+ "support unavailable.\n");
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+ }
+ break;
+ case IXGBE_INT_LEGACY:
+ default:
+ adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_MSI_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE;
+ }
+#endif
+ }
+ { /* Multiple Queue Support */
+ static struct ixgbe_option opt = {
+ .type = enable_option,
+ .name = "Multiple Queue Support",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+#ifdef module_param_array
+ if (num_MQ > bd) {
+#endif
+ unsigned int mq = MQ[bd];
+ ixgbe_validate_option(&mq, &opt);
+ if (mq)
+ adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
+ else
+ adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
+#ifdef module_param_array
+ } else {
+ if (opt.def == OPTION_ENABLED)
+ adapter->flags |= IXGBE_FLAG_MQ_CAPABLE;
+ else
+ adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
+ }
+#endif
+ /* Check Interoperability */
+ if ((adapter->flags & IXGBE_FLAG_MQ_CAPABLE) &&
+ !(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) {
+ DPRINTK(PROBE, INFO,
+ "Multiple queues are not supported while MSI-X "
+ "is disabled. Disabling Multiple Queues.\n");
+ adapter->flags &= ~IXGBE_FLAG_MQ_CAPABLE;
+ }
+ }
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ { /* Direct Cache Access (DCA) */
+ static struct ixgbe_option opt = {
+ .type = enable_option,
+ .name = "Direct Cache Access (DCA)",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+ unsigned int dca = opt.def;
+
+#ifdef module_param_array
+ if (num_DCA > bd) {
+#endif
+ dca = DCA[bd];
+ ixgbe_validate_option(&dca, &opt);
+ if (!dca)
+ adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
+
+ /* Check Interoperability */
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) {
+ DPRINTK(PROBE, INFO, "DCA is disabled\n");
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ }
+#ifdef module_param_array
+ } else {
+ /* make sure to clear the capability flag if the
+ * option is disabled by default above */
+ if (opt.def == OPTION_DISABLED)
+ adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
+ }
+#endif
+ }
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+ { /* Receive-Side Scaling (RSS) */
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Receive-Side Scaling (RSS)",
+ .err = "using default.",
+ .def = OPTION_ENABLED,
+ .arg = { .r = { .min = OPTION_DISABLED,
+ .max = IXGBE_MAX_RSS_INDICES}}
+ };
+ unsigned int rss = RSS[bd];
+
+#ifdef module_param_array
+ if (num_RSS > bd) {
+#endif
+ switch (rss) {
+ case 1:
+ /*
+ * Base it off num_online_cpus() with
+ * a hardware limit cap.
+ */
+ rss = min(IXGBE_MAX_RSS_INDICES,
+ (int)num_online_cpus());
+ break;
+ default:
+ ixgbe_validate_option(&rss, &opt);
+ break;
+ }
+ adapter->ring_feature[RING_F_RSS].indices = rss;
+ if (rss)
+ adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+#ifdef module_param_array
+ } else {
+ if (opt.def == OPTION_DISABLED) {
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ } else {
+ rss = min(IXGBE_MAX_RSS_INDICES,
+ (int)num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].indices = rss;
+ adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ }
+ }
+#endif
+ /* Check Interoperability */
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if (!(adapter->flags & IXGBE_FLAG_RSS_CAPABLE)) {
+ DPRINTK(PROBE, INFO,
+ "RSS is not supported on this "
+ "hardware. Disabling RSS.\n");
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ adapter->ring_feature[RING_F_RSS].indices = 0;
+ } else if (!(adapter->flags & IXGBE_FLAG_MQ_CAPABLE)) {
+ DPRINTK(PROBE, INFO,
+ "RSS is not supported while multiple "
+ "queues are disabled. "
+ "Disabling RSS.\n");
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+ adapter->ring_feature[RING_F_RSS].indices = 0;
+ }
+ }
+ }
+ { /* Virtual Machine Device Queues (VMDQ) */
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Virtual Machine Device Queues (VMDQ)",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED,
+ .arg = { .r = { .min = OPTION_DISABLED,
+ .max = IXGBE_MAX_VMDQ_INDICES}}
+ };
+
+#ifdef module_param_array
+ if (num_VMDQ > bd) {
+#endif
+ unsigned int vmdq = VMDQ[bd];
+ ixgbe_validate_option(&vmdq, &opt);
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq;
+ /* zero or one both mean disabled from our driver's
+ * perspective */
+ if (vmdq > 1)
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+#ifdef module_param_array
+ } else {
+ if (opt.def == OPTION_DISABLED) {
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ } else {
+ adapter->ring_feature[RING_F_VMDQ].indices = 8;
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ }
+ }
+#endif
+ /* Check Interoperability */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_CAPABLE)) {
+ DPRINTK(PROBE, INFO,
+ "VMDQ is not supported on this "
+ "hardware. Disabling VMDQ.\n");
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].indices = 0;
+ } else if (!(adapter->flags & IXGBE_FLAG_MQ_CAPABLE)) {
+ DPRINTK(PROBE, INFO,
+ "VMDQ is not supported while multiple "
+ "queues are disabled. "
+ "Disabling VMDQ.\n");
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].indices = 0;
+ }
+ /* for now, disable RSS when using VMDQ mode */
+ adapter->flags &= ~IXGBE_FLAG_RSS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of "__MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR }}
+ };
+ u32 eitr;
+
+#ifdef module_param_array
+ if (num_InterruptThrottleRate > bd) {
+#endif
+ eitr = InterruptThrottleRate[bd];
+ switch (eitr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ /* zero is a special value, we don't want to
+ * turn off ITR completely, just set it to an
+ * insane interrupt rate (like 3.5 Million
+ * ints/s */
+ eitr = EITR_REG_TO_INTS_PER_SEC(1);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "dynamic interrupt "
+ "throttling enabled\n");
+ adapter->itr_setting = 1;
+ eitr = DEFAULT_ITR;
+ break;
+ default:
+ ixgbe_validate_option(&eitr, &opt);
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ eitr = DEFAULT_ITR;
+ }
+#endif
+ adapter->eitr_param = eitr;
+ }
+#ifndef IXGBE_NO_LLI
+ { /* Low Latency Interrupt TCP Port*/
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt TCP Port",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_LLIPORT),
+ .def = DEFAULT_LLIPORT,
+ .arg = { .r = { .min = MIN_LLIPORT,
+ .max = MAX_LLIPORT }}
+ };
+
+#ifdef module_param_array
+ if (num_LLIPort > bd) {
+#endif
+ adapter->lli_port = LLIPort[bd];
+ if (adapter->lli_port) {
+ ixgbe_validate_option(&adapter->lli_port, &opt);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_port = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt on Packet Size */
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt on Packet Size",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_LLISIZE),
+ .def = DEFAULT_LLISIZE,
+ .arg = { .r = { .min = MIN_LLISIZE,
+ .max = MAX_LLISIZE }}
+ };
+
+#ifdef module_param_array
+ if (num_LLISize > bd) {
+#endif
+ adapter->lli_size = LLISize[bd];
+ if (adapter->lli_size) {
+ ixgbe_validate_option(&adapter->lli_size, &opt);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_size = opt.def;
+ }
+#endif
+ }
+ { /*Low Latency Interrupt on TCP Push flag*/
+ static struct ixgbe_option opt = {
+ .type = enable_option,
+ .name = "Low Latency Interrupt on TCP Push flag",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+#ifdef module_param_array
+ if (num_LLIPush > bd) {
+#endif
+ unsigned int lli_push = LLIPush[bd];
+ ixgbe_validate_option(&lli_push, &opt);
+ if (lli_push)
+ adapter->flags |= IXGBE_FLAG_LLI_PUSH;
+ else
+ adapter->flags &= ~IXGBE_FLAG_LLI_PUSH;
+#ifdef module_param_array
+ } else {
+ if (opt.def == OPTION_ENABLED)
+ adapter->flags |= IXGBE_FLAG_LLI_PUSH;
+ else
+ adapter->flags &= ~IXGBE_FLAG_LLI_PUSH;
+ }
+#endif
+ }
+#endif /* IXGBE_NO_LLI */
+#ifndef IXGBE_NO_INET_LRO
+ { /* Large Receive Offload - Maximum packets to aggregate */
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "LRO - Maximum packets to aggregate",
+ .err = "using default of " __MODULE_STRING(DEFAULT_LRO_AGGR),
+ .def = DEFAULT_LRO_AGGR,
+ .arg = { .r = { .min = MIN_LRO_AGGR,
+ .max = MAX_LRO_AGGR }}
+ };
+
+#ifdef module_param_array
+ if (num_LROAggr > bd) {
+#endif
+ adapter->lro_max_aggr = LROAggr[bd];
+ if (adapter->lro_max_aggr) {
+ ixgbe_validate_option(&adapter->lro_max_aggr, &opt);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lro_max_aggr = opt.def;
+ }
+#endif
+ }
+#endif /* IXGBE_NO_INET_LRO */
+ { /* Rx buffer mode */
+ unsigned int rx_buf_mode;
+ static struct ixgbe_option opt = {
+ .type = range_option,
+ .name = "Rx buffer mode",
+ .err = "using default of "
+ __MODULE_STRING(IXGBE_DEFAULT_RXBUFMODE),
+ .def = IXGBE_DEFAULT_RXBUFMODE,
+ .arg = {.r = {.min = IXGBE_RXBUFMODE_1BUF_ALWAYS,
+ .max = IXGBE_RXBUFMODE_OPTIMAL}}
+ };
+
+#ifdef module_param_array
+ if (num_RxBufferMode > bd) {
+#endif
+ rx_buf_mode = RxBufferMode[bd];
+ ixgbe_validate_option(&rx_buf_mode, &opt);
+ switch (rx_buf_mode) {
+ case IXGBE_RXBUFMODE_OPTIMAL:
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
+ break;
+ case IXGBE_RXBUFMODE_PS_ALWAYS:
+ adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
+ break;
+ case IXGBE_RXBUFMODE_1BUF_ALWAYS:
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+ default:
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
+ }
+#endif
+ }
+}
+
*******************************************************************************/
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
+#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+/**
+ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_generic;
+ phy->ops.reset = &ixgbe_reset_phy_generic;
+ phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+ phy->ops.check_link = NULL;
+ phy->ops.get_firmware_version = NULL;
+ phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
+ phy->sfp_type = ixgbe_sfp_type_unknown;
+
+ return 0;
+}
/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
**/
-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
{
u16 phy_id = 0;
bool valid = false;
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
u32 status;
u16 phy_id_high = 0;
* @hw: pointer to hardware structure
*
**/
-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
{
enum ixgbe_phy_type phy_type;
break;
}
+ hw_dbg(hw, "phy type found is %d\n", phy_type);
return phy_type;
}
return 0;
}
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &phy_data);
+ phy_link = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
u32 i;
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
break;
msleep(10);
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto out;
*/
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
hw_dbg(hw, "DATA: \n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
+ &phy_offset);
for (i = 0; i < edata; i++) {
hw->eeprom.ops.read(hw, data_offset, &eword);
hw->phy.ops.write_reg(hw, phy_offset,
- IXGBE_TWINAX_DEV, eword);
+ IXGBE_TWINAX_DEV, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
- phy_offset);
+ phy_offset);
data_offset++;
phy_offset++;
}
}
/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns
- * the PHY type.
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
* @hw: pointer to hardware structure
*
- * Searches for and identifies the SFP module. Assigns appropriate PHY type.
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
u8 transmission_media = 0;
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
+ &comp_codes_1g);
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
+ &comp_codes_10g);
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
- &transmission_media);
-
- /* ID Module
- * ============
- * 0 SFP_DA_CU
- * 1 SFP_SR
- * 2 SFP_LR
- */
+ &transmission_media);
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ */
if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.id = identifier;
hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
vendor_oui =
((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
}
/**
- * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see
- * if it supports a given SFP+ module type, if so it returns the offsets to the
- * phy init sequence block.
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
* @list_offset: offset to the SFP ID list
* @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
return 0;
}
-/**
- * ixgbe_check_phy_link_tnx - Determine link and speed status
- * @hw: pointer to hardware structure
- *
- * Reads the VS1 register to determine if link is up and the current speed for
- * the PHY.
- **/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
-{
- s32 status = 0;
- u32 time_out;
- u32 max_time_out = 10;
- u16 phy_link = 0;
- u16 phy_speed = 0;
- u16 phy_data = 0;
-
- /* Initialize speed and link to default case */
- *link_up = false;
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /*
- * Check current speed and link status of the PHY register.
- * This is a vendor specific register and may have to
- * be changed for other copper PHYs.
- */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- udelay(10);
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &phy_data);
- phy_link = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
- phy_speed = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
- if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
- *link_up = true;
- if (phy_speed ==
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status = 0;
-
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- firmware_version);
-
- return status;
-}
-
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
-/* PHY specific */
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
#endif /* _IXGBE_PHY_H_ */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include "ixgbe.h"
+
+/* Ethernet payload size for FCoE to be able to carry full sized FC Frames
+ * 14 byte FCoE header + 24 byte FC header + 2112 max payload + 4 byte CRC
+ * + 4 byte FCoE trailing encapsulation = 2158
+ * This is the Ethernet payload, replacing the default of 1500, and does
+ * not include Ethernet headers, VLAN tags, or Ethernet CRC.
+ */
+#define IXGBE_FCOE_MTU 2158
+
+static ssize_t ixgbe_show_fcoe_mtu(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", IXGBE_FCOE_MTU);
+}
+
+static struct device_attribute ixgbe_attrs[] = {
+ __ATTR(fcoe-mtu, S_IRUGO, ixgbe_show_fcoe_mtu, NULL),
+};
+
+int ixgbe_sysfs_create(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++) {
+ err = device_create_file(&netdev->dev, &ixgbe_attrs[i]);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+fail:
+ while (i-- >= 0)
+ device_remove_file(&netdev->dev, &ixgbe_attrs[i]);
+ return err;
+}
+
+void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(ixgbe_attrs); i++)
+ device_remove_file(&netdev->dev, &ixgbe_attrs[i]);
+}
+
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
-#include <linux/types.h>
+#include "ixgbe_osdep.h"
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
-#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
#define IXGBE_EIMC 0x00888
#define IXGBE_EIAC 0x00810
#define IXGBE_EIAM 0x00890
-#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + ((_i) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FFF
#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
#define IXGBE_TFCS 0x0CE00
/* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
-#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
-#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + ((_i - 64) * 0x40)))
/*
* Split and Replication Receive Control Registers
* 00-15 : 0x02100 + n*4
#define IXGBE_DRECCCTL_DISABLE 0
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
/* Packet split receive type */
-#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
-
-/* Music registers */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
+ * Filter Table */
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
#define IXGBE_BPTC 0x040F4
#define IXGBE_XEC 0x04120
-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_MHADD_MFS_SHIFT 16
/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
#define IXGBE_PHY_INIT_END_NL 0xFFFF
#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE 0x80000000 /* bit 31, XON enable */
-#define IXGBE_FCRTH_FCEN 0x80000000 /* Rx Flow control enable */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
/* PAP bit masks*/
#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
#define IXGBE_RMCS_RAC 0x00000004
#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
-#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority flow control ena */
-#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000001
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR 0x00000008 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
#define IXGBE_LED_OFF 0xF
/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
#define IXGBE_AUTOC_KX_SUPP 0x40000000
#define IXGBE_AUTOC_PAUSE 0x30000000
#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
-#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define FIBER_LINK_UP_LIMIT 50
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define IXGBE_PCI_LINK_SPEED 0xF
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
/* Number of 100 microseconds we wait for PCI Express master disable */
#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+#ifndef __le16
+/* Little Endian defines */
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#endif
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
};
/* Flow Control Settings */
-enum ixgbe_fc_type {
+enum ixgbe_fc_mode {
ixgbe_fc_none = 0,
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_default
};
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33,
+ ixgbe_bus_speed_66,
+ ixgbe_bus_speed_100,
+ ixgbe_bus_speed_120,
+ ixgbe_bus_speed_133,
+ ixgbe_bus_speed_2500,
+ ixgbe_bus_speed_5000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1,
+ ixgbe_bus_width_pcie_x2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32,
+ ixgbe_bus_width_64,
+ ixgbe_bus_width_reserved
+};
+
struct ixgbe_addr_filter_info {
u32 num_mc_addrs;
u32 rar_used_count;
bool user_set_promisc;
};
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+
+ u16 func;
+};
+
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water; /* Flow Control High-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
- enum ixgbe_fc_type type; /* Type of flow control */
- enum ixgbe_fc_type original_type;
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
};
/* Statistics counters collected by the MAC */
s32 (*start_hw)(struct ixgbe_hw *);
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
struct ixgbe_eeprom_info {
struct ixgbe_eeprom_operations ops;
enum ixgbe_eeprom_type type;
- u32 semaphore_delay;
+ u32 semaphore_delay;
u16 word_size;
u16 address_bits;
};
u32 num_rar_entries;
u32 max_tx_queues;
u32 max_rx_queues;
- u32 link_attach_type;
- u32 link_mode_select;
- bool link_settings_loaded;
+ u32 orig_autoc;
+ bool orig_link_settings_stored;
bool autoneg;
- bool autoneg_failed;
+ bool autoneg_succeeded;
};
struct ixgbe_phy_info {
};
struct ixgbe_hw {
- u8 __iomem *hw_addr;
- void *back;
- struct ixgbe_mac_info mac;
- struct ixgbe_addr_filter_info addr_ctrl;
- struct ixgbe_fc_info fc;
- struct ixgbe_phy_info phy;
- struct ixgbe_eeprom_info eeprom;
- u16 device_id;
- u16 vendor_id;
- u16 subsystem_device_id;
- u16 subsystem_vendor_id;
- u8 revision_id;
- bool adapter_stopped;
-};
-
-struct ixgbe_info {
- enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
- struct ixgbe_mac_operations *mac_ops;
- struct ixgbe_eeprom_operations *eeprom_ops;
- struct ixgbe_phy_operations *phy_ops;
+ u8 __iomem *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
};
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params : error
/* Error Codes */
#define IXGBE_ERR_EEPROM -1
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
#endif /* _IXGBE_TYPE_H_ */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+
+
+
+
+
+#ifdef DRIVER_IXGBE
+#include "ixgbe.h"
+#endif
+
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+ PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
+ direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+ int direction)
+{
+ return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+ dev->dma_mask = mask;
+ return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+ }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *dev;
+ int alloc_size;
+
+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+ dev = kmalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, alloc_size);
+
+ if (sizeof_priv)
+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+ dev->name[0] = '\0';
+ ether_setup(dev);
+
+ return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+ const char zaddr[6] = { 0, };
+
+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+ return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffs(tmp);
+}
+
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ /* 16 == NET_PAD_SKB */
+ struct sk_buff *skb;
+ skb = alloc_skb(length + 16, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ skb_reserve(skb, 16);
+ skb->dev = dev;
+ }
+ return skb;
+}
+#endif /* <= 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ u16 pcie_link_status;
+
+ if (pcie_cap_offset) {
+ if (!pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+ }
+ pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+ if (adapter->config_space == NULL)
+#else
+ WARN_ON(adapter->config_space != NULL);
+#endif
+ adapter->config_space = kmalloc(size, GFP_KERNEL);
+ if (!adapter->config_space) {
+ printk(KERN_ERR "Out of memory in pci_save_state\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < (size / 4); i++)
+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+ return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev * pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset;
+ u16 pcie_link_status;
+
+ if (adapter->config_space != NULL) {
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset &&
+ !pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+
+ pci_config_space_ich8lan();
+ for (i = 0; i < (size / 4); i++)
+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+ kfree(adapter->config_space);
+ adapter->config_space = NULL;
+#endif
+ }
+}
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+
+ if (adapter->config_space != NULL)
+ kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+ if (netdev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)netdev - netdev->padded);
+ } else {
+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+ netdev->reg_state = NETREG_RELEASED;
+ class_device_put(&netdev->class_dev);
+ }
+#else
+ kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+#endif /* <= 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#ifdef DRIVER_IXGBE
+int ixgbe_sysfs_create(struct ixgbe_adapter *adapter)
+{
+ return 0;
+}
+
+void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter)
+{
+ return;
+}
+
+int ixgbe_dcb_netlink_register()
+{
+ return 0;
+}
+
+int ixgbe_dcb_netlink_unregister()
+{
+ return 0;
+}
+#endif /* DRIVER_IXGBE */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+ int work_done;
+ int work_to_do = min(*budget, netdev->quota);
+#ifdef DRIVER_IXGBE
+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+ struct napi_struct *napi = netdev->priv;
+#else
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ struct napi_struct *napi = &adapter->rx_ring[0].napi;
+#endif
+ work_done = napi->poll(napi, work_to_do);
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ return work_done ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_stop_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_wake_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_start_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+#endif /* <= 2.6.27 */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+/* NAPI enable/disable flags here */
+
+
+
+#ifdef _E1000_H_
+#ifdef CONFIG_E1000_NAPI
+#define NAPI
+#endif
+#ifdef E1000_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000E_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000_NO_NAPI
+#undef NAPI
+#endif
+#ifdef E1000E_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IGB_H_
+#define NAPI
+#endif
+
+#ifdef _IXGB_H_
+#ifdef CONFIG_IXGB_NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef DRIVER_IXGBE
+/* enable NAPI for ixgbe by default */
+#undef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#define NAPI
+#endif
+
+#ifdef _IXGBE_H_
+#ifdef CONFIG_IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+
+
+
+
+
+
+#ifdef DRIVER_IXGBE
+#define adapter_struct ixgbe_adapter
+#endif
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_E1000_NAPI
+#define CONFIG_E1000_NAPI
+#endif
+#ifndef CONFIG_E1000E_NAPI
+#define CONFIG_E1000E_NAPI
+#endif
+#ifndef CONFIG_IXGB_NAPI
+#define CONFIG_IXGB_NAPI
+#endif
+#ifdef _IXGBE_H_
+#ifndef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#endif
+#endif /* _IXGBE_H */
+#else
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_E1000E_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#undef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT
+#endif
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#define pci_enable_msi(a) -ENOTSUPP
+#define pci_disable_msi(a) do {} while (0)
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#endif
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+ just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#else
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+#endif /* HAVE_NETIF_MSG */
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved... */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+ u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char reserved1[32];
+ char reserved2[16];
+ u32 n_stats;
+ u32 testinfo_len;
+ u32 eedump_len;
+ u32 regdump_len;
+};
+
+struct ethtool_stats {
+ u32 cmd;
+ u32 n_stats;
+ u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+ u32 cmd; /* ETHTOOL_GSTRINGS */
+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+ u32 len; /* number of strings in the string set */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+ ETH_TEST_FL_OFFLINE = (1 << 0),
+ ETH_TEST_FL_FAILED = (1 << 1),
+};
+struct ethtool_test {
+ u32 cmd;
+ u32 flags;
+ u32 reserved;
+ u32 len;
+ u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+ u32 cmd;
+ u32 magic;
+ u32 offset;
+ u32 len;
+ u8 data[0];
+};
+
+struct ethtool_value {
+ u32 cmd;
+ u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+ u32 cmd;
+ u32 version; /* driver-specific, indicates different chips/revs */
+ u32 len; /* bytes */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+ /* How many usecs to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_max_coalesced_frames
+ * is used.
+ */
+ u32 rx_coalesce_usecs;
+
+ /* How many packets to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause RX interrupts to never be
+ * generated.
+ */
+ u32 rx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 rx_coalesce_usecs_irq;
+ u32 rx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_max_coalesced_frames
+ * is used.
+ */
+ u32 tx_coalesce_usecs;
+
+ /* How many packets to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause TX interrupts to never be
+ * generated.
+ */
+ u32 tx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 tx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay in-memory statistics
+ * block updates. Some drivers do not have an in-memory
+ * statistic block, and in such cases this value is ignored.
+ * This value must not be zero.
+ */
+ u32 stats_block_coalesce_usecs;
+
+ /* Adaptive RX/TX coalescing is an algorithm implemented by
+ * some drivers to improve latency under low packet rates and
+ * improve throughput under high packet rates. Some drivers
+ * only implement one of RX or TX adaptive coalescing. Anything
+ * not implemented by the driver causes these values to be
+ * silently ignored.
+ */
+ u32 use_adaptive_rx_coalesce;
+ u32 use_adaptive_tx_coalesce;
+
+ /* When the packet rate (measured in packets per second)
+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+ * used.
+ */
+ u32 pkt_rate_low;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_max_coalesced_frames_low;
+ u32 tx_coalesce_usecs_low;
+ u32 tx_max_coalesced_frames_low;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ u32 pkt_rate_high;
+ u32 rx_coalesce_usecs_high;
+ u32 rx_max_coalesced_frames_high;
+ u32 tx_coalesce_usecs_high;
+ u32 tx_max_coalesced_frames_high;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+ /* Read only attributes. These indicate the maximum number
+ * of pending RX/TX ring entries the driver will allow the
+ * user to set.
+ */
+ u32 rx_max_pending;
+ u32 rx_mini_max_pending;
+ u32 rx_jumbo_max_pending;
+ u32 tx_max_pending;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ u32 rx_pending;
+ u32 rx_mini_pending;
+ u32 rx_jumbo_pending;
+ u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+ * being true) the user may set 'autoneg' here non-zero to have the
+ * pause parameters be auto-negotiated too. In such a case, the
+ * {rx,tx}_pause values below determine what capabilities are
+ * advertised.
+ *
+ * If 'autoneg' is zero or the link is not being auto-negotiated,
+ * then {rx,tx}_pause force the driver to use/not-use pause
+ * flow control.
+ */
+ u32 autoneg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
+ * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
+ * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+ PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+ ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+ #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+#endif
+
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+#endif /* 2.4.13 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+ #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_E1000E_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x) ((x)->slot_name)
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IXGBE_NO_LRO
+/* Don't enable LRO for these legacy kernels */
+#define IXGBE_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+ /* No hurry */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+#endif
+
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#include <net/sock.h>
+#define sk_protocol protocol
+
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+#define dev_err(__unused_dev, format, arg...) \
+ printk(KERN_ERR "%s: " format, pci_name(adapter->pdev) , ## arg)
+#define dev_warn(__unused_dev, format, arg...) \
+ printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg)
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = NULL;
+ n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+/* we ignore GFP here */
+#define dma_alloc_coherent(dv, sz, dma, gfp) \
+ pci_alloc_consistent(pdev, (sz), (dma))
+#define dma_free_coherent(dv, sz, addr, dma_addr) \
+ pci_free_consistent(pdev, (sz), (addr), (dma_addr))
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+
+#ifndef NETREG_REGISTERED
+#define NETREG_REGISTERED 1
+#define reg_state deadbeaf
+#endif
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+static inline void _kc_synchronize_irq(void)
+{
+ synchronize_irq();
+}
+#undef synchronize_irq
+#define synchronize_irq(X) _kc_synchronize_irq()
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+
+#define pci_set_consistent_dma_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#endif /* 2.6.0 => 2.5.28 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
+#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+ return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return m * (HZ / MSEC_PER_SEC);
+#else
+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current)) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+
+#ifdef pci_dma_mapping_error
+#undef pci_dma_mapping_error
+#endif
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev,
+ dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+ sizeof(array[0]), array }; \
+ module_param_call(name, param_array_set, param_array_get, \
+ &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC 1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+ return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return m * (HZ / KC_USEC_PER_SEC);
+#else
+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) do{}while(0)
+#endif
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef netdev_alloc_skb
+#define netdev_alloc_skb _kc_netdev_alloc_skb
+extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+ unsigned int length);
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#ifndef RHEL_RELEASE_CODE
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) 0
+#endif
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) 0
+#endif
+#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && ( RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+ irq_handler_t new_handler = (irq_handler_t) handler;
+ return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while (0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+#define pci_enable_pcie_error_reporting(dev) do {} while (0)
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#endif /* < 2.6.19 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ (_work)->func = (void (*)(void *))_func; \
+ (_work)->data = _work; \
+ init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev) \
+ PCI_VENDOR_ID_##ven, (dev), \
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
+#define pci_channel_offline(pdev) (pdev->error_state && \
+ pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+ pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+ memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#undef ETHTOOL_GPERMADDR
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+ /* used to look up the real NAPI polling routine */
+ int (*poll)(struct napi_struct *, int);
+ struct net_device poll_dev;
+ int weight;
+};
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+#if defined(DRIVER_IGB) || defined(DRIVER_IXGBE)
+#define netif_rx_complete(_netdev, napi) netif_rx_complete(&(napi)->poll_dev)
+#define netif_rx_schedule_prep(_netdev, napi) \
+ (netif_running(_netdev) && netif_rx_schedule_prep(&(napi)->poll_dev))
+#define netif_rx_schedule(_netdev, napi) netif_rx_schedule(&(napi)->poll_dev)
+#define __netif_rx_schedule(_netdev, napi) __netif_rx_schedule(&(napi)->poll_dev)
+#define napi_enable(napi) do { \
+ /* abuse if_port as a counter */ \
+ if (!adapter->netdev->if_port) { \
+ netif_poll_enable(adapter->netdev); \
+ } \
+ ++adapter->netdev->if_port; \
+ netif_poll_enable(&(napi)->poll_dev); \
+ } while (0)
+#define napi_disable(_napi) do { \
+ netif_poll_disable(&(_napi)->poll_dev); \
+ --adapter->netdev->if_port; \
+ if (!adapter->netdev->if_port) \
+ netif_poll_disable(adapter->netdev); \
+ } while (0)
+
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = (_napi); \
+ __napi->poll_dev.poll = &(__kc_adapter_clean); \
+ __napi->poll_dev.priv = (_napi); \
+ __napi->poll_dev.weight = (_weight); \
+ dev_hold(&__napi->poll_dev); \
+ set_bit(__LINK_STATE_START, &__napi->poll_dev.state);\
+ _netdev->poll = &(__kc_adapter_clean); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ set_bit(__LINK_STATE_RX_SCHED, &(_netdev)->state); \
+ set_bit(__LINK_STATE_RX_SCHED, &__napi->poll_dev.state); \
+ } while (0)
+#define netif_napi_del(_napi) \
+ do { \
+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &(_napi)->poll_dev.state)); \
+ dev_put(&(_napi)->poll_dev); \
+ memset(&(_napi)->poll_dev, 0, sizeof(struct napi_struct));\
+ } while (0)
+#else /* DRIVER_IGB || DRIVER_IXGBE */
+#define netif_rx_complete(netdev, napi) netif_rx_complete(netdev)
+#define netif_rx_schedule_prep(netdev, napi) netif_rx_schedule_prep(netdev)
+#define netif_rx_schedule(netdev, napi) netif_rx_schedule(netdev)
+#define __netif_rx_schedule(netdev, napi) __netif_rx_schedule(netdev)
+#define napi_enable(napi) netif_poll_enable(adapter->netdev)
+#define napi_disable(napi) netif_poll_disable(adapter->netdev)
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = (_napi); \
+ _netdev->poll = &(__kc_adapter_clean); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ netif_poll_disable(_netdev); \
+ } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* DRIVER_IGB || DRIVER_IXGBE */
+#else /* NAPI */
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = _napi; \
+ _netdev->poll = &(_poll); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#include <linux/pm_qos_params.h>
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+ set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+ remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+ modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+ if (value != PM_QOS_DEFAULT_VALUE) { \
+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+ pci_name(adapter->pdev)); \
+ } \
+}
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifdef DRIVER_IXGBE
+#ifdef NETIF_F_TSO
+#ifdef NETIF_F_TSO6
+#define netif_set_gso_max_size(_netdev, size) \
+ do { \
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
+ _netdev->features &= ~NETIF_F_TSO; \
+ _netdev->features &= ~NETIF_F_TSO6; \
+ } else { \
+ _netdev->features |= NETIF_F_TSO; \
+ _netdev->features |= NETIF_F_TSO6; \
+ } \
+ } while (0)
+#else /* NETIF_F_TSO6 */
+#define netif_set_gso_max_size(_netdev, size) \
+ do { \
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+ _netdev->features &= ~NETIF_F_TSO; \
+ else \
+ _netdev->features |= NETIF_F_TSO; \
+ } while (0)
+#endif /* NETIF_F_TSO6 */
+#else
+#define netif_set_gso_max_size(_netdev, size) do {} while (0)
+#endif /* NETIF_F_TSO */
+#endif /* DRIVER_IXGBE */
+#else /* < 2.6.26 */
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+ do { \
+ u16 pmc = 0; \
+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+ if (pm) { \
+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+ &pmc); \
+ } \
+ if (val && (pmc >> 11)) \
+ (dev)->power.can_wakeup = !!(val); \
+ (dev)->power.should_wakeup = !!(val); \
+ } while (0)
+#endif /* 2.6.15 through 2.6.27 */
+
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr)
+#endif
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#ifdef DRIVER_IXGBE
+#define HAVE_TX_MQ
+#endif
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_stop_subqueue((_ndev), (_qi)); \
+ else \
+ netif_stop_queue((_ndev)); \
+ } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_start_subqueue((_ndev), (_qi)); \
+ else \
+ netif_start_queue((_ndev)); \
+ } while (0)
+#else /* CONFIG_NETDEVICES_MULTIQUEUE */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#endif /* < 2.6.27 */
+
+#endif /* _KCOMPAT_H_ */
--- /dev/null
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs. We fall back to calling do_ioctl()
+ * for drivers which haven't been converted to ethtool_ops yet.
+ *
+ * It's GPL, stupid.
+ *
+ * Modification by sfeldma@pobox.com to work as backward compat
+ * solution for pre-ethtool_ops kernels.
+ * - copied struct ethtool_ops from ethtool.h
+ * - defined SET_ETHTOOL_OPS
+ * - put in some #ifndef NETIF_F_xxx wrappers
+ * - changes refs to dev->ethtool_ops to ethtool_ops
+ * - changed dev_ethtool to ethtool_ioctl
+ * - remove EXPORT_SYMBOL()s
+ * - added _kc_ prefix in built-in ethtool_op_xxx ops.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+#include "kcompat.h"
+
+#undef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#undef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#undef SPEED_10000
+#define SPEED_10000 10000
+
+#undef ethtool_ops
+#define ethtool_ops _kc_ethtool_ops
+
+struct _kc_ethtool_ops {
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ u32 (*get_rx_csum)(struct net_device *);
+ int (*set_rx_csum)(struct net_device *, u32);
+ u32 (*get_tx_csum)(struct net_device *);
+ int (*set_tx_csum)(struct net_device *, u32);
+ u32 (*get_sg)(struct net_device *);
+ int (*set_sg)(struct net_device *, u32);
+ u32 (*get_tso)(struct net_device *);
+ int (*set_tso)(struct net_device *, u32);
+ int (*self_test_count)(struct net_device *);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+ int (*phys_id)(struct net_device *, u32);
+ int (*get_stats_count)(struct net_device *);
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
+ u64 *);
+} *ethtool_ops = NULL;
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
+
+/*
+ * Some useful ethtool_ops methods that are device independent. If we find that
+ * all drivers want to do the same thing here, we can turn these into dev_()
+ * function calls.
+ */
+
+#undef ethtool_op_get_link
+#define ethtool_op_get_link _kc_ethtool_op_get_link
+u32 _kc_ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+#undef ethtool_op_get_tx_csum
+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
+{
+#ifdef NETIF_F_IP_CSUM
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tx_csum
+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_IP_CSUM
+ if (data)
+#ifdef NETIF_F_IPV6_CSUM
+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+#endif
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_sg
+#define ethtool_op_get_sg _kc_ethtool_op_get_sg
+u32 _kc_ethtool_op_get_sg(struct net_device *dev)
+{
+#ifdef NETIF_F_SG
+ return (dev->features & NETIF_F_SG) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_sg
+#define ethtool_op_set_sg _kc_ethtool_op_set_sg
+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_SG
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_tso
+#define ethtool_op_get_tso _kc_ethtool_op_get_tso
+u32 _kc_ethtool_op_get_tso(struct net_device *dev)
+{
+#ifdef NETIF_F_TSO
+ return (dev->features & NETIF_F_TSO) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tso
+#define ethtool_op_set_tso _kc_ethtool_op_set_tso
+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_TSO
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+#endif
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_drvinfo info;
+ struct ethtool_ops *ops = ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_regs regs;
+ struct ethtool_ops *ops = ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(®s, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, ®s, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, ®s, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, reglen))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+
+ if (!ethtool_ops->get_msglevel)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_msglevel(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_msglevel)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_msglevel(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_link(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ if (!ethtool_ops->get_link)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_link(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ goto out;
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ ret = -EFAULT;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ if (!ethtool_ops->get_rx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_rx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_rx_csum(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ if (!ethtool_ops->get_tx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_get_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ if (!ethtool_ops->get_sg)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_sg(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_sg(dev, edata.data);
+}
+
+static int ethtool_get_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTSO };
+
+ if (!ethtool_ops->get_tso)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tso(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_self_test(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_test test;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->self_test || !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = ops->self_test_count(dev);
+ data = kmalloc(test.len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_stats stats;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->get_ethtool_stats || !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = ops->get_stats_count(dev);
+ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+#define ETHTOOL_OPS_COMPAT
+int ethtool_ioctl(struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+ void *useraddr = (void *) ifr->ifr_data;
+ u32 ethcmd;
+
+ /*
+ * XXX: This can be pushed down into the ethtool_* handlers that
+ * need it. Keep existing behavior for the moment.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ return ethtool_get_settings(dev, useraddr);
+ case ETHTOOL_SSET:
+ return ethtool_set_settings(dev, useraddr);
+ case ETHTOOL_GDRVINFO:
+ return ethtool_get_drvinfo(dev, useraddr);
+ case ETHTOOL_GREGS:
+ return ethtool_get_regs(dev, useraddr);
+ case ETHTOOL_GWOL:
+ return ethtool_get_wol(dev, useraddr);
+ case ETHTOOL_SWOL:
+ return ethtool_set_wol(dev, useraddr);
+ case ETHTOOL_GMSGLVL:
+ return ethtool_get_msglevel(dev, useraddr);
+ case ETHTOOL_SMSGLVL:
+ return ethtool_set_msglevel(dev, useraddr);
+ case ETHTOOL_NWAY_RST:
+ return ethtool_nway_reset(dev);
+ case ETHTOOL_GLINK:
+ return ethtool_get_link(dev, useraddr);
+ case ETHTOOL_GEEPROM:
+ return ethtool_get_eeprom(dev, useraddr);
+ case ETHTOOL_SEEPROM:
+ return ethtool_set_eeprom(dev, useraddr);
+ case ETHTOOL_GCOALESCE:
+ return ethtool_get_coalesce(dev, useraddr);
+ case ETHTOOL_SCOALESCE:
+ return ethtool_set_coalesce(dev, useraddr);
+ case ETHTOOL_GRINGPARAM:
+ return ethtool_get_ringparam(dev, useraddr);
+ case ETHTOOL_SRINGPARAM:
+ return ethtool_set_ringparam(dev, useraddr);
+ case ETHTOOL_GPAUSEPARAM:
+ return ethtool_get_pauseparam(dev, useraddr);
+ case ETHTOOL_SPAUSEPARAM:
+ return ethtool_set_pauseparam(dev, useraddr);
+ case ETHTOOL_GRXCSUM:
+ return ethtool_get_rx_csum(dev, useraddr);
+ case ETHTOOL_SRXCSUM:
+ return ethtool_set_rx_csum(dev, useraddr);
+ case ETHTOOL_GTXCSUM:
+ return ethtool_get_tx_csum(dev, useraddr);
+ case ETHTOOL_STXCSUM:
+ return ethtool_set_tx_csum(dev, useraddr);
+ case ETHTOOL_GSG:
+ return ethtool_get_sg(dev, useraddr);
+ case ETHTOOL_SSG:
+ return ethtool_set_sg(dev, useraddr);
+ case ETHTOOL_GTSO:
+ return ethtool_get_tso(dev, useraddr);
+ case ETHTOOL_STSO:
+ return ethtool_set_tso(dev, useraddr);
+ case ETHTOOL_TEST:
+ return ethtool_self_test(dev, useraddr);
+ case ETHTOOL_GSTRINGS:
+ return ethtool_get_strings(dev, useraddr);
+ case ETHTOOL_PHYS_ID:
+ return ethtool_phys_id(dev, useraddr);
+ case ETHTOOL_GSTATS:
+ return ethtool_get_stats(dev, useraddr);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+#define mii_if_info _kc_mii_if_info
+struct _kc_mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#undef mii_link_ok
+#define mii_link_ok _kc_mii_link_ok
+#undef mii_nway_restart
+#define mii_nway_restart _kc_mii_nway_restart
+#undef mii_ethtool_gset
+#define mii_ethtool_gset _kc_mii_ethtool_gset
+#undef mii_ethtool_sset
+#define mii_ethtool_sset _kc_mii_ethtool_sset
+#undef mii_check_link
+#define mii_check_link _kc_mii_check_link
+#undef generic_mii_ioctl
+#define generic_mii_ioctl _kc_generic_mii_ioctl
+extern int _kc_mii_link_ok (struct mii_if_info *mii);
+extern int _kc_mii_nway_restart (struct mii_if_info *mii);
+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern void _kc_mii_check_link (struct mii_if_info *mii);
+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+
+
+struct _kc_pci_dev_ext {
+ struct pci_dev *dev;
+ void *pci_drvdata;
+ struct pci_driver *driver;
+};
+
+struct _kc_net_dev_ext {
+ struct net_device *dev;
+ unsigned int carrier;
+};
+
+
+/**************************************/
+/* mii support */
+
+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 advert, bmcr, lpa, nego;
+
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* only supports twisted-pair */
+ ecmd->port = PORT_MII;
+
+ /* only supports internal transceiver */
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ /* this isn't fully supported at higher layers */
+ ecmd->phy_address = mii->phy_id;
+
+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ if (advert & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ if (bmcr & BMCR_ANENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ nego = mii_nway_result(advert & lpa);
+ if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if (nego == LPA_100FULL || nego == LPA_10FULL) {
+ ecmd->duplex = DUPLEX_FULL;
+ mii->full_duplex = 1;
+ } else {
+ ecmd->duplex = DUPLEX_HALF;
+ mii->full_duplex = 0;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != mii->phy_id)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ADVERTISED_10baseT_Half)
+ tmp |= ADVERTISE_10HALF;
+ if (ADVERTISED_10baseT_Full)
+ tmp |= ADVERTISE_10FULL;
+ if (ADVERTISED_100baseT_Half)
+ tmp |= ADVERTISE_100HALF;
+ if (ADVERTISED_100baseT_Full)
+ tmp |= ADVERTISE_100FULL;
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ecmd->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (ecmd->duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else
+ mii->full_duplex = 0;
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+int _kc_mii_link_ok (struct mii_if_info *mii)
+{
+ /* first, a dummy read, needed to latch some MII phys */
+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+ return 1;
+ return 0;
+}
+
+int _kc_mii_nway_restart (struct mii_if_info *mii)
+{
+ int bmcr;
+ int r = -EINVAL;
+
+ /* if autoneg is off, it's an error */
+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr |= BMCR_ANRESTART;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+ r = 0;
+ }
+
+ return r;
+}
+
+void _kc_mii_check_link (struct mii_if_info *mii)
+{
+ int cur_link = mii_link_ok(mii);
+ int prev_link = netif_carrier_ok(mii->dev);
+
+ if (cur_link && !prev_link)
+ netif_carrier_on(mii->dev);
+ else if (prev_link && !cur_link)
+ netif_carrier_off(mii->dev);
+}
+
+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_chg_out)
+{
+ int rc = 0;
+ unsigned int duplex_changed = 0;
+
+ if (duplex_chg_out)
+ *duplex_chg_out = 0;
+
+ mii_data->phy_id &= mii_if->phy_id_mask;
+ mii_data->reg_num &= mii_if->reg_num_mask;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
+ case SIOCGMIIPHY:
+ mii_data->phy_id = mii_if->phy_id;
+ /* fall through */
+
+ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
+ case SIOCGMIIREG:
+ mii_data->val_out =
+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+
+ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
+ case SIOCSMIIREG: {
+ u16 val = mii_data->val_in;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == mii_if->phy_id) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR: {
+ unsigned int new_duplex = 0;
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ mii_if->force_media = 0;
+ else
+ mii_if->force_media = 1;
+ if (mii_if->force_media &&
+ (val & BMCR_FULLDPLX))
+ new_duplex = 1;
+ if (mii_if->full_duplex != new_duplex) {
+ duplex_changed = 1;
+ mii_if->full_duplex = new_duplex;
+ }
+ break;
+ }
+ case MII_ADVERTISE:
+ mii_if->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num, val);
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+ *duplex_chg_out = 1;
+
+ return rc;
+}
+