ia64/linux-2.6.18-xen.hg

changeset 787:98897f04b338

net: Intel ixgbe driver

Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 10:52:47 2009 +0000 (2009-01-30)
parents b790b287bf47
children 26ddc59c674d
files drivers/net/Kconfig drivers/net/Makefile drivers/net/ixgbe/Makefile drivers/net/ixgbe/ixgbe.h drivers/net/ixgbe/ixgbe_82598.c drivers/net/ixgbe/ixgbe_api.c drivers/net/ixgbe/ixgbe_api.h drivers/net/ixgbe/ixgbe_common.c drivers/net/ixgbe/ixgbe_common.h drivers/net/ixgbe/ixgbe_dcb.c drivers/net/ixgbe/ixgbe_dcb.h drivers/net/ixgbe/ixgbe_dcb_82598.c drivers/net/ixgbe/ixgbe_dcb_82598.h drivers/net/ixgbe/ixgbe_ethtool.c drivers/net/ixgbe/ixgbe_main.c drivers/net/ixgbe/ixgbe_osdep.h drivers/net/ixgbe/ixgbe_param.c drivers/net/ixgbe/ixgbe_phy.c drivers/net/ixgbe/ixgbe_phy.h drivers/net/ixgbe/ixgbe_type.h drivers/net/ixgbe/kcompat.c drivers/net/ixgbe/kcompat.h
line diff
     1.1 --- a/drivers/net/Kconfig	Fri Jan 30 10:53:27 2009 +0900
     1.2 +++ b/drivers/net/Kconfig	Fri Jan 30 10:52:47 2009 +0000
     1.3 @@ -2318,12 +2318,31 @@ config CHELSIO_T1
     1.4            To compile this driver as a module, choose M here: the module
     1.5            will be called cxgb.
     1.6  
     1.7 +config IXGBE
     1.8 +	tristate "Intel(R) 10GbE PCI Express adapters support"
     1.9 +	depends on PCI && INET
    1.10 +	---help---
    1.11 +	  This driver supports Intel(R) 10GbE PCI Express family of
    1.12 +	  adapters.  For more information on how to identify your adapter, go
    1.13 +	  to the Adapter & Driver ID Guide at:
    1.14 +
    1.15 +	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
    1.16 +
    1.17 +	  For general information and support, go to the Intel support
    1.18 +	  website at:
    1.19 +
    1.20 +	  <http://support.intel.com>
    1.21 +
    1.22 +	  To compile this driver as a module, choose M here. The module
    1.23 +	  will be called ixgbe.
    1.24 +
    1.25  config IXGB
    1.26  	tristate "Intel(R) PRO/10GbE support"
    1.27  	depends on PCI
    1.28  	---help---
    1.29 -	  This driver supports Intel(R) PRO/10GbE family of
    1.30 -	  adapters.  For more information on how to identify your adapter, go
    1.31 +	  This driver supports Intel(R) PRO/10GbE family of adapters for
    1.32 +	  PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
    1.33 +	  instead. For more information on how to identify your adapter, go
    1.34  	  to the Adapter & Driver ID Guide at:
    1.35  
    1.36  	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
    1.37 @@ -2336,8 +2355,7 @@ config IXGB
    1.38  	  More specific information on configuring the driver is in 
    1.39  	  <file:Documentation/networking/ixgb.txt>.
    1.40  
    1.41 -	  To compile this driver as a module, choose M here and read
    1.42 -	  <file:Documentation/networking/net-modules.txt>.  The module
    1.43 +	  To compile this driver as a module, choose M here. The module
    1.44  	  will be called ixgb.
    1.45  
    1.46  config IXGB_NAPI
     2.1 --- a/drivers/net/Makefile	Fri Jan 30 10:53:27 2009 +0900
     2.2 +++ b/drivers/net/Makefile	Fri Jan 30 10:52:47 2009 +0000
     2.3 @@ -9,6 +9,7 @@ endif
     2.4  obj-$(CONFIG_E1000) += e1000/
     2.5  obj-$(CONFIG_IBM_EMAC) += ibm_emac/
     2.6  obj-$(CONFIG_IXGB) += ixgb/
     2.7 +obj-$(CONFIG_IXGBE) += ixgbe/
     2.8  obj-$(CONFIG_CHELSIO_T1) += chelsio/
     2.9  obj-$(CONFIG_BONDING) += bonding/
    2.10  obj-$(CONFIG_GIANFAR) += gianfar_driver.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/drivers/net/ixgbe/Makefile	Fri Jan 30 10:52:47 2009 +0000
     3.3 @@ -0,0 +1,39 @@
     3.4 +################################################################################
     3.5 +#
     3.6 +# Intel 10 Gigabit PCI Express Linux driver
     3.7 +# Copyright(c) 1999 - 2007 Intel Corporation.
     3.8 +#
     3.9 +# This program is free software; you can redistribute it and/or modify it
    3.10 +# under the terms and conditions of the GNU General Public License,
    3.11 +# version 2, as published by the Free Software Foundation.
    3.12 +#
    3.13 +# This program is distributed in the hope it will be useful, but WITHOUT
    3.14 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.15 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    3.16 +# more details.
    3.17 +#
    3.18 +# You should have received a copy of the GNU General Public License along with
    3.19 +# this program; if not, write to the Free Software Foundation, Inc.,
    3.20 +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 +#
    3.22 +# The full GNU General Public License is included in this distribution in
    3.23 +# the file called "COPYING".
    3.24 +#
    3.25 +# Contact Information:
    3.26 +# Linux NICS <linux.nics@intel.com>
    3.27 +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    3.28 +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    3.29 +#
    3.30 +################################################################################
    3.31 +
    3.32 +#
    3.33 +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
    3.34 +#
    3.35 +
    3.36 +obj-$(CONFIG_IXGBE) += ixgbe.o
    3.37 +
    3.38 +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
    3.39 +              ixgbe_82598.o ixgbe_phy.o ixgbe_api.o ixgbe_dcb_82598.o \
    3.40 +              ixgbe_dcb.o ixgbe_param.o ixgbe_phy.o kcompat.o
    3.41 +EXTRA_CFLAGS += -DDRIVER_IXGBE -DCONFIG_IXGBE_RSS 
    3.42 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/drivers/net/ixgbe/ixgbe.h	Fri Jan 30 10:52:47 2009 +0000
     4.3 @@ -0,0 +1,479 @@
     4.4 +/*******************************************************************************
     4.5 +
     4.6 +  Intel 10 Gigabit PCI Express Linux driver
     4.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     4.8 +
     4.9 +  This program is free software; you can redistribute it and/or modify it
    4.10 +  under the terms and conditions of the GNU General Public License,
    4.11 +  version 2, as published by the Free Software Foundation.
    4.12 +
    4.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    4.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    4.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    4.16 +  more details.
    4.17 +
    4.18 +  You should have received a copy of the GNU General Public License along with
    4.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    4.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    4.21 +
    4.22 +  The full GNU General Public License is included in this distribution in
    4.23 +  the file called "COPYING".
    4.24 +
    4.25 +  Contact Information:
    4.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    4.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    4.28 +
    4.29 +*******************************************************************************/
    4.30 +
    4.31 +#ifndef _IXGBE_H_
    4.32 +#define _IXGBE_H_
    4.33 +
    4.34 +#ifndef IXGBE_NO_LRO
    4.35 +#include <net/tcp.h>
    4.36 +#endif
    4.37 +
    4.38 +#include <linux/pci.h>
    4.39 +#include <linux/netdevice.h>
    4.40 +#include <linux/vmalloc.h>
    4.41 +
    4.42 +#ifdef SIOCETHTOOL
    4.43 +#include <linux/ethtool.h>
    4.44 +#endif
    4.45 +#ifdef NETIF_F_HW_VLAN_TX
    4.46 +#include <linux/if_vlan.h>
    4.47 +#endif
    4.48 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
    4.49 +#define IXGBE_DCA
    4.50 +#include <linux/dca.h>
    4.51 +
    4.52 +#endif
    4.53 +
    4.54 +#include "ixgbe_dcb.h"
    4.55 +
    4.56 +#include "kcompat.h"
    4.57 +
    4.58 +#include "ixgbe_api.h"
    4.59 +
    4.60 +#define IXGBE_NO_INET_LRO
    4.61 +#ifndef IXGBE_NO_LRO
    4.62 +#if defined(CONFIG_INET_LRO) || defined(CONFIG_INET_LRO_MODULE)
    4.63 +#include <linux/inet_lro.h>
    4.64 +#define IXGBE_MAX_LRO_DESCRIPTORS		   8
    4.65 +#undef IXGBE_NO_INET_LRO
    4.66 +#define IXGBE_NO_LRO
    4.67 +#endif
    4.68 +#endif /* IXGBE_NO_LRO */
    4.69 +
    4.70 +#define PFX "ixgbe: "
    4.71 +#define DPRINTK(nlevel, klevel, fmt, args...) \
    4.72 +	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
    4.73 +	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
    4.74 +		__FUNCTION__ , ## args)))
    4.75 +
    4.76 +/* TX/RX descriptor defines */
    4.77 +#define IXGBE_DEFAULT_TXD		   1024
    4.78 +#define IXGBE_MAX_TXD			   4096
    4.79 +#define IXGBE_MIN_TXD			     64
    4.80 +
    4.81 +#define IXGBE_DEFAULT_RXD		   1024
    4.82 +#define IXGBE_MAX_RXD			   4096
    4.83 +#define IXGBE_MIN_RXD			     64
    4.84 +
    4.85 +
    4.86 +/* flow control */
    4.87 +#define IXGBE_DEFAULT_FCRTL		0x10000
    4.88 +#define IXGBE_MIN_FCRTL			   0x40
    4.89 +#define IXGBE_MAX_FCRTL			0x7FF80
    4.90 +#define IXGBE_DEFAULT_FCRTH		0x20000
    4.91 +#define IXGBE_MIN_FCRTH			  0x600
    4.92 +#define IXGBE_MAX_FCRTH			0x7FFF0
    4.93 +#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
    4.94 +#define IXGBE_MIN_FCPAUSE		      0
    4.95 +#define IXGBE_MAX_FCPAUSE		 0xFFFF
    4.96 +
    4.97 +/* Supported Rx Buffer Sizes */
    4.98 +#define IXGBE_RXBUFFER_64    64     /* Used for packet split */
    4.99 +#define IXGBE_RXBUFFER_128   128    /* Used for packet split */
   4.100 +#define IXGBE_RXBUFFER_256   256    /* Used for packet split */
   4.101 +#define IXGBE_RXBUFFER_2048  2048
   4.102 +
   4.103 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
   4.104 +
   4.105 +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
   4.106 +
   4.107 +#if defined(CONFIG_IXGBE_DCB) || defined(CONFIG_IXGBE_RSS) || \
   4.108 +    defined(CONFIG_IXGBE_VMDQ)
   4.109 +#define CONFIG_IXGBE_MQ
   4.110 +#endif
   4.111 +
   4.112 +/* How many Rx Buffers do we bundle into one write to the hardware ? */
   4.113 +#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
   4.114 +
   4.115 +#define IXGBE_TX_FLAGS_CSUM		(u32)(1)
   4.116 +#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
   4.117 +#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
   4.118 +#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
   4.119 +#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
   4.120 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
   4.121 +#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
   4.122 +
   4.123 +#ifndef IXGBE_NO_LRO
   4.124 +#define IXGBE_LRO_MAX 32	/*Maximum number of LRO descriptors*/
   4.125 +#define IXGBE_LRO_GLOBAL 10
   4.126 +
   4.127 +struct ixgbe_lro_stats {
   4.128 +	u32 flushed;
   4.129 +	u32 coal;
   4.130 +};
   4.131 +
   4.132 +struct ixgbe_lro_desc {
   4.133 +	struct  hlist_node lro_node;
   4.134 +	struct  sk_buff *skb;
   4.135 +	struct  sk_buff *last_skb;
   4.136 +	int     timestamp;
   4.137 +	u32   tsval;
   4.138 +	u32   tsecr;
   4.139 +	u32   source_ip;
   4.140 +	u32   dest_ip;
   4.141 +	u32   next_seq;
   4.142 +	u32   ack_seq;
   4.143 +	u16   window;
   4.144 +	u16   source_port;
   4.145 +	u16   dest_port;
   4.146 +	u16   append_cnt;
   4.147 +	u16   mss;
   4.148 +	u32   data_size;	/*TCP data size*/
   4.149 +	u16   vlan_tag;
   4.150 +};
   4.151 +
   4.152 +struct ixgbe_lro_info {
   4.153 +	struct ixgbe_lro_stats stats;
   4.154 +	int max;		/*Maximum number of packet to coalesce.*/
   4.155 +};
   4.156 +
   4.157 +struct ixgbe_lro_list {
   4.158 +	struct hlist_head active;
   4.159 +	struct hlist_head free;
   4.160 +	int active_cnt;
   4.161 +};
   4.162 +
   4.163 +#endif /* IXGBE_NO_LRO */
   4.164 +/* wrapper around a pointer to a socket buffer,
   4.165 + * so a DMA handle can be stored along with the buffer */
   4.166 +struct ixgbe_tx_buffer {
   4.167 +	struct sk_buff *skb;
   4.168 +	dma_addr_t dma;
   4.169 +	unsigned long time_stamp;
   4.170 +	u16 length;
   4.171 +	u16 next_to_watch;
   4.172 +};
   4.173 +
   4.174 +struct ixgbe_rx_buffer {
   4.175 +	struct sk_buff *skb;
   4.176 +	dma_addr_t dma;
   4.177 +	struct page *page;
   4.178 +	dma_addr_t page_dma;
   4.179 +	unsigned int page_offset;
   4.180 +};
   4.181 +
   4.182 +struct ixgbe_queue_stats {
   4.183 +	u64 packets;
   4.184 +	u64 bytes;
   4.185 +};
   4.186 +
   4.187 +struct ixgbe_ring {
   4.188 +	void *desc;			/* descriptor ring memory */
   4.189 +	dma_addr_t dma;			/* phys. address of descriptor ring */
   4.190 +	unsigned int size;		/* length in bytes */
   4.191 +	unsigned int count;		/* amount of descriptors */
   4.192 +	unsigned int next_to_use;
   4.193 +	unsigned int next_to_clean;
   4.194 +
   4.195 +	int queue_index; /* needed for multiqueue queue management */
   4.196 +	union {
   4.197 +		struct ixgbe_tx_buffer *tx_buffer_info;
   4.198 +		struct ixgbe_rx_buffer *rx_buffer_info;
   4.199 +	};
   4.200 +
   4.201 +	u16 head;
   4.202 +	u16 tail;
   4.203 +
   4.204 +	unsigned int total_bytes;
   4.205 +	unsigned int total_packets;
   4.206 +
   4.207 +	u16 reg_idx; /* holds the special value that gets the hardware register
   4.208 +	              * offset associated with this ring, which is different
   4.209 +	              * for DCB and RSS modes */
   4.210 +
   4.211 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
   4.212 +	/* cpu for tx queue */
   4.213 +	int cpu;
   4.214 +#endif
   4.215 +
   4.216 +	struct ixgbe_queue_stats stats;
   4.217 +	u16 v_idx; /* maps directly to the index for this ring in the hardware
   4.218 +	           * vector array, can also be used for finding the bit in EICR
   4.219 +	           * and friends that represents the vector for this ring */
   4.220 +#ifndef IXGBE_NO_LRO
   4.221 +	/* LRO list for rx queue */
   4.222 +	struct ixgbe_lro_list *lrolist;
   4.223 +#endif
   4.224 +#ifndef IXGBE_NO_INET_LRO
   4.225 +	struct net_lro_mgr  lro_mgr;
   4.226 +	bool lro_used;
   4.227 +#endif
   4.228 +	u16 work_limit;                /* max work per interrupt */
   4.229 +	u16 rx_buf_len;
   4.230 +};
   4.231 +
   4.232 +#define RING_F_DCB  0
   4.233 +#define RING_F_VMDQ 1
   4.234 +#define RING_F_RSS  2
   4.235 +#define IXGBE_MAX_DCB_INDICES   8
   4.236 +#define IXGBE_MAX_RSS_INDICES  16
   4.237 +#define IXGBE_MAX_VMDQ_INDICES 16
   4.238 +struct ixgbe_ring_feature {
   4.239 +	int indices;
   4.240 +	int mask;
   4.241 +};
   4.242 +
   4.243 +#define MAX_RX_QUEUES 64
   4.244 +#define MAX_TX_QUEUES 32
   4.245 +
   4.246 +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
   4.247 +                               ? 8 : 1)
   4.248 +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
   4.249 +
   4.250 +/* MAX_MSIX_Q_VECTORS of these are allocated,
   4.251 + * but we only use one per queue-specific vector.
   4.252 + */
   4.253 +struct ixgbe_q_vector {
   4.254 +	struct ixgbe_adapter *adapter;
   4.255 +#ifdef CONFIG_IXGBE_NAPI
   4.256 +	struct napi_struct napi;
   4.257 +#endif
   4.258 +	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
   4.259 +	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
   4.260 +	u8 rxr_count;     /* Rx ring count assigned to this vector */
   4.261 +	u8 txr_count;     /* Tx ring count assigned to this vector */
   4.262 +	u8 tx_itr;
   4.263 +	u8 rx_itr;
   4.264 +	u32 eitr;
   4.265 +};
   4.266 +
   4.267 +
   4.268 +/* Helper macros to switch between ints/sec and what the register uses.
   4.269 + * And yes, it's the same math going both ways.
   4.270 + */
   4.271 +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
   4.272 +	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 0)
   4.273 +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
   4.274 +
   4.275 +#define IXGBE_DESC_UNUSED(R) \
   4.276 +	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
   4.277 +	(R)->next_to_clean - (R)->next_to_use - 1)
   4.278 +
   4.279 +#define IXGBE_RX_DESC_ADV(R, i)	    \
   4.280 +	(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
   4.281 +#define IXGBE_TX_DESC_ADV(R, i)	    \
   4.282 +	(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
   4.283 +#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \
   4.284 +	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
   4.285 +#define IXGBE_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
   4.286 +#define IXGBE_TX_DESC(R, i)	IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc)
   4.287 +#define IXGBE_RX_DESC(R, i)	IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc)
   4.288 +
   4.289 +#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
   4.290 +
   4.291 +#ifdef IXGBE_TCP_TIMER
   4.292 +#define TCP_TIMER_VECTOR 1
   4.293 +#else
   4.294 +#define TCP_TIMER_VECTOR 0
   4.295 +#endif
   4.296 +#define OTHER_VECTOR 1
   4.297 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
   4.298 +
   4.299 +#define MAX_MSIX_Q_VECTORS 16
   4.300 +#define MIN_MSIX_Q_VECTORS 2
   4.301 +#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
   4.302 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
   4.303 +
   4.304 +/* board specific private data structure */
   4.305 +struct ixgbe_adapter {
   4.306 +	struct timer_list watchdog_timer;
   4.307 +#ifdef NETIF_F_HW_VLAN_TX
   4.308 +	struct vlan_group *vlgrp;
   4.309 +#endif
   4.310 +	u16 bd_number;
   4.311 +	struct work_struct reset_task;
   4.312 +	struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
   4.313 +	char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
   4.314 +	struct ixgbe_dcb_config dcb_cfg;
   4.315 +	struct ixgbe_dcb_config temp_dcb_cfg;
   4.316 +	u8 dcb_set_bitmap;
   4.317 +
   4.318 +	/* Interrupt Throttle Rate */
   4.319 +	u32 itr_setting;
   4.320 +	u16 eitr_low;
   4.321 +	u16 eitr_high;
   4.322 +
   4.323 +	/* TX */
   4.324 +	struct ixgbe_ring *tx_ring;	/* One per active queue */
   4.325 +	int num_tx_queues;
   4.326 +	u64 restart_queue;
   4.327 +	u64 hw_csum_tx_good;
   4.328 +	u64 lsc_int;
   4.329 +	u64 hw_tso_ctxt;
   4.330 +	u64 hw_tso6_ctxt;
   4.331 +	u32 tx_timeout_count;
   4.332 +	bool detect_tx_hung;
   4.333 +
   4.334 +	/* RX */
   4.335 +	struct ixgbe_ring *rx_ring;	/* One per active queue */
   4.336 +	int num_rx_queues;
   4.337 +	u64 hw_csum_rx_error;
   4.338 +	u64 hw_csum_rx_good;
   4.339 +	u64 non_eop_descs;
   4.340 +#ifndef CONFIG_IXGBE_NAPI
   4.341 +	u64 rx_dropped_backlog;		/* count drops from rx intr handler */
   4.342 +#endif
   4.343 +	int num_msix_vectors;
   4.344 +	struct ixgbe_ring_feature ring_feature[3];
   4.345 +	struct msix_entry *msix_entries;
   4.346 +#ifdef IXGBE_TCP_TIMER
   4.347 +	irqreturn_t (*msix_handlers[MAX_MSIX_COUNT])(int irq, void *data,
   4.348 +	                                             struct pt_regs *regs);
   4.349 +#endif
   4.350 +
   4.351 +	u64 rx_hdr_split;
   4.352 +	u32 alloc_rx_page_failed;
   4.353 +	u32 alloc_rx_buff_failed;
   4.354 +
   4.355 +	/* Some features need tri-state capability,
   4.356 +	 * thus the additional *_CAPABLE flags.
   4.357 +	 */
   4.358 +	u32 flags;
   4.359 +#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
   4.360 +#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1)
   4.361 +#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2)
   4.362 +#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3)
   4.363 +#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4)
   4.364 +#ifndef IXGBE_NO_LLI
   4.365 +#define IXGBE_FLAG_LLI_PUSH                     (u32)(1 << 5)
   4.366 +#endif
   4.367 +#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6)
   4.368 +#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7)
   4.369 +#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8)
   4.370 +#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9)
   4.371 +#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10)
   4.372 +#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11)
   4.373 +#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12)
   4.374 +#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13)
   4.375 +#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14)
   4.376 +#define IXGBE_FLAG_DCB_CAPABLE                  (u32)(1 << 15)
   4.377 +#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16)
   4.378 +#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17)
   4.379 +#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18)
   4.380 +#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19)
   4.381 +#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
   4.382 +#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
   4.383 +#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 23)
   4.384 +
   4.385 +/* default to trying for four seconds */
   4.386 +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
   4.387 +
   4.388 +	/* OS defined structs */
   4.389 +	struct net_device *netdev;
   4.390 +	struct pci_dev *pdev;
   4.391 +	struct net_device_stats net_stats;
   4.392 +#ifndef IXGBE_NO_LRO
   4.393 +	struct ixgbe_lro_info lro_data;
   4.394 +#endif
   4.395 +
   4.396 +#ifdef ETHTOOL_TEST
   4.397 +	u32 test_icr;
   4.398 +	struct ixgbe_ring test_tx_ring;
   4.399 +	struct ixgbe_ring test_rx_ring;
   4.400 +#endif
   4.401 +
   4.402 +	/* structs defined in ixgbe_hw.h */
   4.403 +	struct ixgbe_hw hw;
   4.404 +	u16 msg_enable;
   4.405 +	struct ixgbe_hw_stats stats;
   4.406 +#ifndef IXGBE_NO_LLI
   4.407 +	u32 lli_port;
   4.408 +	u32 lli_size;
   4.409 +	u64 lli_int;
   4.410 +#endif
   4.411 +	/* Interrupt Throttle Rate */
   4.412 +	u32 eitr_param;
   4.413 +
   4.414 +	unsigned long state;
   4.415 +	u32 *config_space;
   4.416 +	u64 tx_busy;
   4.417 +#ifndef IXGBE_NO_INET_LRO
   4.418 +	unsigned int lro_max_aggr;
   4.419 +	unsigned int lro_aggregated;
   4.420 +	unsigned int lro_flushed;
   4.421 +	unsigned int lro_no_desc;
   4.422 +#endif
   4.423 +	unsigned int tx_ring_count;
   4.424 +	unsigned int rx_ring_count;
   4.425 +
   4.426 +	u32 link_speed;
   4.427 +	bool link_up;
   4.428 +	unsigned long link_check_timeout;
   4.429 +
   4.430 +	struct work_struct watchdog_task;
   4.431 +	struct work_struct sfp_task;
   4.432 +	struct timer_list sfp_timer;
   4.433 +};
   4.434 +
   4.435 +enum ixbge_state_t {
   4.436 +	__IXGBE_TESTING,
   4.437 +	__IXGBE_RESETTING,
   4.438 +	__IXGBE_DOWN,
   4.439 +	__IXGBE_SFP_MODULE_NOT_FOUND
   4.440 +};
   4.441 +
   4.442 +
   4.443 +/* needed by ixgbe_main.c */
   4.444 +extern int ixgbe_validate_mac_addr(u8 *mc_addr);
   4.445 +extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
   4.446 +
   4.447 +/* needed by ixgbe_ethtool.c */
   4.448 +extern char ixgbe_driver_name[];
   4.449 +extern const char ixgbe_driver_version[];
   4.450 +
   4.451 +extern int ixgbe_up(struct ixgbe_adapter *adapter);
   4.452 +extern void ixgbe_down(struct ixgbe_adapter *adapter);
   4.453 +extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
   4.454 +extern void ixgbe_reset(struct ixgbe_adapter *adapter);
   4.455 +extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
   4.456 +extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
   4.457 +extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
   4.458 +extern void ixgbe_free_rx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
   4.459 +extern void ixgbe_free_tx_resources(struct ixgbe_adapter *,struct ixgbe_ring *);
   4.460 +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
   4.461 +
   4.462 +/* needed by ixgbe_dcb_nl.c */
   4.463 +extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
   4.464 +extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
   4.465 +extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
   4.466 +
   4.467 +#ifdef ETHTOOL_OPS_COMPAT
   4.468 +extern int ethtool_ioctl(struct ifreq *ifr);
   4.469 +
   4.470 +#endif
   4.471 +extern int ixgbe_dcb_netlink_register(void);
   4.472 +extern int ixgbe_dcb_netlink_unregister(void);
   4.473 +
   4.474 +extern int ixgbe_sysfs_create(struct ixgbe_adapter *adapter);
   4.475 +extern void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter);
   4.476 +
   4.477 +#ifdef CONFIG_IXGBE_NAPI
   4.478 +extern void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
   4.479 +extern void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
   4.480 +#endif
   4.481 +
   4.482 +#endif /* _IXGBE_H_ */
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/drivers/net/ixgbe/ixgbe_82598.c	Fri Jan 30 10:52:47 2009 +0000
     5.3 @@ -0,0 +1,1147 @@
     5.4 +/*******************************************************************************
     5.5 +
     5.6 +  Intel 10 Gigabit PCI Express Linux driver
     5.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     5.8 +
     5.9 +  This program is free software; you can redistribute it and/or modify it
    5.10 +  under the terms and conditions of the GNU General Public License,
    5.11 +  version 2, as published by the Free Software Foundation.
    5.12 +
    5.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    5.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    5.16 +  more details.
    5.17 +
    5.18 +  You should have received a copy of the GNU General Public License along with
    5.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    5.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 +
    5.22 +  The full GNU General Public License is included in this distribution in
    5.23 +  the file called "COPYING".
    5.24 +
    5.25 +  Contact Information:
    5.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    5.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    5.28 +
    5.29 +*******************************************************************************/
    5.30 +
    5.31 +#include "ixgbe_type.h"
    5.32 +#include "ixgbe_api.h"
    5.33 +#include "ixgbe_common.h"
    5.34 +#include "ixgbe_phy.h"
    5.35 +
    5.36 +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
    5.37 +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
    5.38 +                                             ixgbe_link_speed *speed,
    5.39 +                                             bool *autoneg);
    5.40 +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
    5.41 +                                             ixgbe_link_speed *speed,
    5.42 +                                             bool *autoneg);
    5.43 +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
    5.44 +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
    5.45 +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
    5.46 +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
    5.47 +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
    5.48 +                                      ixgbe_link_speed *speed, bool *link_up,
    5.49 +                                      bool link_up_wait_to_complete);
    5.50 +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
    5.51 +                                            ixgbe_link_speed speed,
    5.52 +                                            bool autoneg,
    5.53 +                                            bool autoneg_wait_to_complete);
    5.54 +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
    5.55 +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
    5.56 +                                               ixgbe_link_speed speed,
    5.57 +                                               bool autoneg,
    5.58 +                                               bool autoneg_wait_to_complete);
    5.59 +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
    5.60 +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
    5.61 +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
    5.62 +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
    5.63 +                         u32 vind, bool vlan_on);
    5.64 +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
    5.65 +static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
    5.66 +static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
    5.67 +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
    5.68 +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
    5.69 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
    5.70 +                                u8 *eeprom_data);
    5.71 +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
    5.72 +
    5.73 +/**
    5.74 + *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
    5.75 + *  @hw: pointer to hardware structure
    5.76 + *
    5.77 + *  Initialize the function pointers and assign the MAC type for 82598.
    5.78 + *  Does not touch the hardware.
    5.79 + **/
    5.80 +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
    5.81 +{
    5.82 +	struct ixgbe_mac_info *mac = &hw->mac;
    5.83 +	struct ixgbe_phy_info *phy = &hw->phy;
    5.84 +	s32 ret_val;
    5.85 +	u16 list_offset, data_offset;
    5.86 +
    5.87 +	ret_val = ixgbe_init_phy_ops_generic(hw);
    5.88 +	ret_val = ixgbe_init_ops_generic(hw);
    5.89 +
    5.90 +	/* MAC */
    5.91 +	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
    5.92 +	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
    5.93 +	mac->ops.get_supported_physical_layer =
    5.94 +	                            &ixgbe_get_supported_physical_layer_82598;
    5.95 +	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
    5.96 +	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
    5.97 +
    5.98 +	/* LEDs */
    5.99 +	mac->ops.blink_led_start = &ixgbe_blink_led_start_82598;
   5.100 +	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598;
   5.101 +
   5.102 +	/* RAR, Multicast, VLAN */
   5.103 +	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
   5.104 +	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
   5.105 +	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
   5.106 +	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
   5.107 +
   5.108 +	/* Flow Control */
   5.109 +	mac->ops.setup_fc = &ixgbe_setup_fc_82598;
   5.110 +
   5.111 +	/* Link */
   5.112 +	mac->ops.check_link = &ixgbe_check_mac_link_82598;
   5.113 +	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
   5.114 +		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
   5.115 +		mac->ops.setup_link_speed =
   5.116 +		                     &ixgbe_setup_copper_link_speed_82598;
   5.117 +		mac->ops.get_link_capabilities =
   5.118 +		                     &ixgbe_get_copper_link_capabilities_82598;
   5.119 +	} else {
   5.120 +		mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
   5.121 +		mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598;
   5.122 +		mac->ops.get_link_capabilities =
   5.123 +		                       &ixgbe_get_link_capabilities_82598;
   5.124 +	}
   5.125 +
   5.126 +	mac->mcft_size       = 128;
   5.127 +	mac->vft_size        = 128;
   5.128 +	mac->num_rar_entries = 16;
   5.129 +	mac->max_tx_queues   = 32;
   5.130 +	mac->max_rx_queues   = 64;
   5.131 +
   5.132 +	/* SFP+ Module */
   5.133 +	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
   5.134 +
   5.135 +	/* Call PHY identify routine to get the phy type */
   5.136 +	phy->ops.identify(hw);
   5.137 +
   5.138 +	/* PHY Init */
   5.139 +	switch (hw->phy.type) {
   5.140 +	case ixgbe_phy_tn:
   5.141 +		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
   5.142 +		phy->ops.get_firmware_version =
   5.143 +		             &ixgbe_get_phy_firmware_version_tnx;
   5.144 +		break;
   5.145 +	case ixgbe_phy_nl:
   5.146 +		phy->ops.reset = &ixgbe_reset_phy_nl;
   5.147 +
   5.148 +		/* Call SFP+ identify routine to get the SFP+ module type */
   5.149 +		ret_val = phy->ops.identify_sfp(hw);
   5.150 +		if (ret_val != 0)
   5.151 +			goto out;
   5.152 +		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
   5.153 +			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
   5.154 +			goto out;
   5.155 +		}
   5.156 +
   5.157 +		/* Check to see if SFP+ module is supported */
   5.158 +		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
   5.159 +		                                            &list_offset,
   5.160 +		                                            &data_offset);
   5.161 +		if (ret_val != 0) {
   5.162 +			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
   5.163 +			goto out;
   5.164 +		}
   5.165 +		break;
   5.166 +	default:
   5.167 +		break;
   5.168 +	}
   5.169 +
   5.170 +out:
   5.171 +	return ret_val;
   5.172 +}
   5.173 +
   5.174 +/**
   5.175 + *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
   5.176 + *  @hw: pointer to hardware structure
   5.177 + *  @speed: pointer to link speed
   5.178 + *  @autoneg: boolean auto-negotiation value
   5.179 + *
   5.180 + *  Determines the link capabilities by reading the AUTOC register.
   5.181 + **/
   5.182 +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
   5.183 +                                             ixgbe_link_speed *speed,
   5.184 +                                             bool *autoneg)
   5.185 +{
   5.186 +	s32 status = 0;
   5.187 +
   5.188 +	/*
   5.189 +	 * Determine link capabilities based on the stored value of AUTOC,
   5.190 +	 * which represents EEPROM defaults.
   5.191 +	 */
   5.192 +	switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
   5.193 +	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
   5.194 +		*speed = IXGBE_LINK_SPEED_1GB_FULL;
   5.195 +		*autoneg = false;
   5.196 +		break;
   5.197 +
   5.198 +	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
   5.199 +		*speed = IXGBE_LINK_SPEED_10GB_FULL;
   5.200 +		*autoneg = false;
   5.201 +		break;
   5.202 +
   5.203 +	case IXGBE_AUTOC_LMS_1G_AN:
   5.204 +		*speed = IXGBE_LINK_SPEED_1GB_FULL;
   5.205 +		*autoneg = true;
   5.206 +		break;
   5.207 +
   5.208 +	case IXGBE_AUTOC_LMS_KX4_AN:
   5.209 +	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
   5.210 +		*speed = IXGBE_LINK_SPEED_UNKNOWN;
   5.211 +		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
   5.212 +			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5.213 +		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
   5.214 +			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5.215 +		*autoneg = true;
   5.216 +		break;
   5.217 +
   5.218 +	default:
   5.219 +		status = IXGBE_ERR_LINK_SETUP;
   5.220 +		break;
   5.221 +	}
   5.222 +
   5.223 +	return status;
   5.224 +}
   5.225 +
   5.226 +/**
   5.227 + *  ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
   5.228 + *  @hw: pointer to hardware structure
   5.229 + *  @speed: pointer to link speed
   5.230 + *  @autoneg: boolean auto-negotiation value
   5.231 + *
   5.232 + *  Determines the link capabilities by reading the AUTOC register.
   5.233 + **/
   5.234 +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
   5.235 +                                             ixgbe_link_speed *speed,
   5.236 +                                             bool *autoneg)
   5.237 +{
   5.238 +	s32 status = IXGBE_ERR_LINK_SETUP;
   5.239 +	u16 speed_ability;
   5.240 +
   5.241 +	*speed = 0;
   5.242 +	*autoneg = true;
   5.243 +
   5.244 +	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
   5.245 +	                              IXGBE_MDIO_PMA_PMD_DEV_TYPE,
   5.246 +	                              &speed_ability);
   5.247 +
   5.248 +	if (status == 0) {
   5.249 +		if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
   5.250 +			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5.251 +		if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
   5.252 +			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5.253 +	}
   5.254 +
   5.255 +	return status;
   5.256 +}
   5.257 +
   5.258 +/**
   5.259 + *  ixgbe_get_media_type_82598 - Determines media type
   5.260 + *  @hw: pointer to hardware structure
   5.261 + *
   5.262 + *  Returns the media type (fiber, copper, backplane)
   5.263 + **/
   5.264 +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
   5.265 +{
   5.266 +	enum ixgbe_media_type media_type;
   5.267 +
   5.268 +	/* Media type for I82598 is based on device ID */
   5.269 +	switch (hw->device_id) {
   5.270 +	case IXGBE_DEV_ID_82598:
   5.271 +		/* Default device ID is mezzanine card KX/KX4 */
   5.272 +		media_type = ixgbe_media_type_backplane;
   5.273 +		break;
   5.274 +	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
   5.275 +	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
   5.276 +	case IXGBE_DEV_ID_82598EB_CX4:
   5.277 +	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
   5.278 +	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
   5.279 +	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
   5.280 +	case IXGBE_DEV_ID_82598EB_XF_LR:
   5.281 +	case IXGBE_DEV_ID_82598EB_SFP_LOM:
   5.282 +		media_type = ixgbe_media_type_fiber;
   5.283 +		break;
   5.284 +	case IXGBE_DEV_ID_82598AT:
   5.285 +		media_type = ixgbe_media_type_copper;
   5.286 +		break;
   5.287 +	default:
   5.288 +		media_type = ixgbe_media_type_unknown;
   5.289 +		break;
   5.290 +	}
   5.291 +
   5.292 +	return media_type;
   5.293 +}
   5.294 +
   5.295 +/**
   5.296 + *  ixgbe_fc_enable_82598 - Enable flow control
   5.297 + *  @hw: pointer to hardware structure
   5.298 + *  @packetbuf_num: packet buffer number (0-7)
   5.299 + *
   5.300 + *  Enable flow control according to the current settings.
   5.301 + **/
   5.302 +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
   5.303 +{
   5.304 +	s32 ret_val = 0;
   5.305 +	u32 fctrl_reg;
   5.306 +	u32 rmcs_reg;
   5.307 +	u32 reg;
   5.308 +
   5.309 +	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   5.310 +	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
   5.311 +
   5.312 +	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
   5.313 +	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
   5.314 +
   5.315 +	/*
   5.316 +	 * The possible values of fc.current_mode are:
   5.317 +	 * 0: Flow control is completely disabled
   5.318 +	 * 1: Rx flow control is enabled (we can receive pause frames,
   5.319 +	 *    but not send pause frames).
   5.320 +	 * 2:  Tx flow control is enabled (we can send pause frames but
   5.321 +	 *     we do not support receiving pause frames).
   5.322 +	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
   5.323 +	 * other: Invalid.
   5.324 +	 */
   5.325 +	switch (hw->fc.current_mode) {
   5.326 +	case ixgbe_fc_none:
   5.327 +		/* Flow control completely disabled by software override. */
   5.328 +		break;
   5.329 +	case ixgbe_fc_rx_pause:
   5.330 +		/*
   5.331 +		 * Rx Flow control is enabled and Tx Flow control is
   5.332 +		 * disabled by software override. Since there really
   5.333 +		 * isn't a way to advertise that we are capable of RX
   5.334 +		 * Pause ONLY, we will advertise that we support both
   5.335 +		 * symmetric and asymmetric Rx PAUSE.  Later, we will
   5.336 +		 * disable the adapter's ability to send PAUSE frames.
   5.337 +		 */
   5.338 +		fctrl_reg |= IXGBE_FCTRL_RFCE;
   5.339 +		break;
   5.340 +	case ixgbe_fc_tx_pause:
   5.341 +		/*
   5.342 +		 * Tx Flow control is enabled, and Rx Flow control is
   5.343 +		 * disabled by software override.
   5.344 +		 */
   5.345 +		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
   5.346 +		break;
   5.347 +	case ixgbe_fc_full:
   5.348 +		/* Flow control (both Rx and Tx) is enabled by SW override. */
   5.349 +		fctrl_reg |= IXGBE_FCTRL_RFCE;
   5.350 +		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
   5.351 +		break;
   5.352 +	default:
   5.353 +		hw_dbg(hw, "Flow control param set incorrectly\n");
   5.354 +		ret_val = -IXGBE_ERR_CONFIG;
   5.355 +		goto out;
   5.356 +		break;
   5.357 +	}
   5.358 +
   5.359 +	/* Enable 802.3x based flow control settings. */
   5.360 +	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
   5.361 +	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
   5.362 +
   5.363 +	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
   5.364 +	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
   5.365 +		if (hw->fc.send_xon) {
   5.366 +			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
   5.367 +			                (hw->fc.low_water | IXGBE_FCRTL_XONE));
   5.368 +		} else {
   5.369 +			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
   5.370 +			                hw->fc.low_water);
   5.371 +		}
   5.372 +
   5.373 +		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
   5.374 +		                (hw->fc.high_water | IXGBE_FCRTH_FCEN));
   5.375 +	}
   5.376 +
   5.377 +	/* Configure pause time (2 TCs per register) */
   5.378 +	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
   5.379 +	if ((packetbuf_num & 1) == 0)
   5.380 +		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
   5.381 +	else
   5.382 +		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
   5.383 +	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
   5.384 +
   5.385 +	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
   5.386 +
   5.387 +out:
   5.388 +	return ret_val;
   5.389 +}
   5.390 +
   5.391 +/**
   5.392 + *  ixgbe_setup_fc_82598 - Set up flow control
   5.393 + *  @hw: pointer to hardware structure
   5.394 + *
   5.395 + *  Sets up flow control.
   5.396 + **/
   5.397 +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
   5.398 +{
   5.399 +	s32 ret_val = 0;
   5.400 +
   5.401 +	/* Validate the packetbuf configuration */
   5.402 +	if (packetbuf_num < 0 || packetbuf_num > 7) {
   5.403 +		hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
   5.404 +		          " 0-7\n", packetbuf_num);
   5.405 +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
   5.406 +		goto out;
   5.407 +	}
   5.408 +
   5.409 +	/*
   5.410 +	 * Validate the water mark configuration.  Zero water marks are invalid
   5.411 +	 * because it causes the controller to just blast out fc packets.
   5.412 +	 */
   5.413 +	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
   5.414 +		hw_dbg(hw, "Invalid water mark configuration\n");
   5.415 +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
   5.416 +		goto out;
   5.417 +	}
   5.418 +
   5.419 +	/*
   5.420 +	 * Validate the requested mode.  Strict IEEE mode does not allow
   5.421 +	 * ixgbe_fc_rx_pause because it will cause testing anomalies.
   5.422 +	 */
   5.423 +	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
   5.424 +		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
   5.425 +		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
   5.426 +		goto out;
   5.427 +	}
   5.428 +
   5.429 +	/*
   5.430 +	 * 10gig parts do not have a word in the EEPROM to determine the
   5.431 +	 * default flow control setting, so we explicitly set it to full.
   5.432 +	 */
   5.433 +	if (hw->fc.requested_mode == ixgbe_fc_default)
   5.434 +		hw->fc.requested_mode = ixgbe_fc_full;
   5.435 +
   5.436 +	/*
   5.437 +	 * Save off the requested flow control mode for use later.  Depending
   5.438 +	 * on the link partner's capabilities, we may or may not use this mode.
   5.439 +	 */
   5.440 +	hw->fc.current_mode = hw->fc.requested_mode;
   5.441 +
   5.442 +
   5.443 +	ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
   5.444 +
   5.445 +out:
   5.446 +	return ret_val;
   5.447 +}
   5.448 +
   5.449 +/**
   5.450 + *  ixgbe_setup_mac_link_82598 - Configures MAC link settings
   5.451 + *  @hw: pointer to hardware structure
   5.452 + *
   5.453 + *  Configures link settings based on values in the ixgbe_hw struct.
   5.454 + *  Restarts the link.  Performs autonegotiation if needed.
   5.455 + **/
   5.456 +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
   5.457 +{
   5.458 +	u32 autoc_reg;
   5.459 +	u32 links_reg;
   5.460 +	u32 i;
   5.461 +	s32 status = 0;
   5.462 +
   5.463 +	/* Restart link */
   5.464 +	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.465 +	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   5.466 +	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   5.467 +
   5.468 +	/* Only poll for autoneg to complete if specified to do so */
   5.469 +	if (hw->phy.autoneg_wait_to_complete) {
   5.470 +		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
   5.471 +		     IXGBE_AUTOC_LMS_KX4_AN ||
   5.472 +		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
   5.473 +		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
   5.474 +			links_reg = 0; /* Just in case Autoneg time = 0 */
   5.475 +			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
   5.476 +				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
   5.477 +				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
   5.478 +					break;
   5.479 +				msleep(100);
   5.480 +			}
   5.481 +			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
   5.482 +				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
   5.483 +				hw_dbg(hw, "Autonegotiation did not complete.\n");
   5.484 +			}
   5.485 +		}
   5.486 +	}
   5.487 +
   5.488 +	/* Set up flow control */
   5.489 +	status = ixgbe_setup_fc_82598(hw, 0);
   5.490 +
   5.491 +	/* Add delay to filter out noises during initial link setup */
   5.492 +	msleep(50);
   5.493 +
   5.494 +	return status;
   5.495 +}
   5.496 +
   5.497 +/**
   5.498 + *  ixgbe_check_mac_link_82598 - Get link/speed status
   5.499 + *  @hw: pointer to hardware structure
   5.500 + *  @speed: pointer to link speed
   5.501 + *  @link_up: true is link is up, false otherwise
   5.502 + *  @link_up_wait_to_complete: bool used to wait for link up or not
   5.503 + *
   5.504 + *  Reads the links register to determine if link is up and the current speed
   5.505 + **/
   5.506 +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
   5.507 +                                      ixgbe_link_speed *speed, bool *link_up,
   5.508 +                                      bool link_up_wait_to_complete)
   5.509 +{
   5.510 +	u32 links_reg;
   5.511 +	u32 i;
   5.512 +	u16 link_reg, adapt_comp_reg;
   5.513 +
   5.514 +	/*
   5.515 +	 * SERDES PHY requires us to read link status from undocumented
   5.516 +	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
   5.517 +	 * indicates link down.  OxC00C is read to check that the XAUI lanes
   5.518 +	 * are active.  Bit 0 clear indicates active; set indicates inactive.
   5.519 +	 */
   5.520 +	if (hw->phy.type == ixgbe_phy_nl) {
   5.521 +		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
   5.522 +		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
   5.523 +		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
   5.524 +		                     &adapt_comp_reg);
   5.525 +		if (link_up_wait_to_complete) {
   5.526 +			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
   5.527 +				if ((link_reg & 1) &&
   5.528 +				    ((adapt_comp_reg & 1) == 0)) {
   5.529 +					*link_up = true;
   5.530 +					break;
   5.531 +				} else {
   5.532 +					*link_up = false;
   5.533 +				}
   5.534 +				msleep(100);
   5.535 +				hw->phy.ops.read_reg(hw, 0xC79F,
   5.536 +				                     IXGBE_TWINAX_DEV,
   5.537 +				                     &link_reg);
   5.538 +				hw->phy.ops.read_reg(hw, 0xC00C,
   5.539 +				                     IXGBE_TWINAX_DEV,
   5.540 +				                     &adapt_comp_reg);
   5.541 +			}
   5.542 +		} else {
   5.543 +			if ((link_reg & 1) &&
   5.544 +			    ((adapt_comp_reg & 1) == 0))
   5.545 +				*link_up = true;
   5.546 +			else
   5.547 +				*link_up = false;
   5.548 +		}
   5.549 +
   5.550 +		if (*link_up == false)
   5.551 +			goto out;
   5.552 +	}
   5.553 +
   5.554 +	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
   5.555 +	if (link_up_wait_to_complete) {
   5.556 +		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
   5.557 +			if (links_reg & IXGBE_LINKS_UP) {
   5.558 +				*link_up = true;
   5.559 +				break;
   5.560 +			} else {
   5.561 +				*link_up = false;
   5.562 +			}
   5.563 +			msleep(100);
   5.564 +			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
   5.565 +		}
   5.566 +	} else {
   5.567 +		if (links_reg & IXGBE_LINKS_UP)
   5.568 +			*link_up = true;
   5.569 +		else
   5.570 +			*link_up = false;
   5.571 +	}
   5.572 +
   5.573 +	if (links_reg & IXGBE_LINKS_SPEED)
   5.574 +		*speed = IXGBE_LINK_SPEED_10GB_FULL;
   5.575 +	else
   5.576 +		*speed = IXGBE_LINK_SPEED_1GB_FULL;
   5.577 +
   5.578 +out:
   5.579 +	return 0;
   5.580 +}
   5.581 +
   5.582 +/**
   5.583 + *  ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
   5.584 + *  @hw: pointer to hardware structure
   5.585 + *  @speed: new link speed
   5.586 + *  @autoneg: true if autonegotiation enabled
   5.587 + *  @autoneg_wait_to_complete: true when waiting for completion is needed
   5.588 + *
   5.589 + *  Set the link speed in the AUTOC register and restarts link.
   5.590 + **/
   5.591 +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
   5.592 +                                           ixgbe_link_speed speed, bool autoneg,
   5.593 +                                           bool autoneg_wait_to_complete)
   5.594 +{
   5.595 +	s32              status            = 0;
   5.596 +	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
   5.597 +	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.598 +	u32              autoc             = curr_autoc;
   5.599 +	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
   5.600 +
   5.601 +	/* Check to see if speed passed in is supported. */
   5.602 +	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
   5.603 +	speed &= link_capabilities;
   5.604 +
   5.605 +	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
   5.606 +		status = IXGBE_ERR_LINK_SETUP;
   5.607 +
   5.608 +	/* Set KX4/KX support according to speed requested */
   5.609 +	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
   5.610 +	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
   5.611 +		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
   5.612 +		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
   5.613 +			autoc |= IXGBE_AUTOC_KX4_SUPP;
   5.614 +		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
   5.615 +			autoc |= IXGBE_AUTOC_KX_SUPP;
   5.616 +		if (autoc != curr_autoc)
   5.617 +			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
   5.618 +	}
   5.619 +
   5.620 +	if (status == 0) {
   5.621 +		hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
   5.622 +
   5.623 +		/*
   5.624 +		 * Setup and restart the link based on the new values in
   5.625 +		 * ixgbe_hw This will write the AUTOC register based on the new
   5.626 +		 * stored values
   5.627 +		 */
   5.628 +		status = ixgbe_setup_mac_link_82598(hw);
   5.629 +	}
   5.630 +
   5.631 +	return status;
   5.632 +}
   5.633 +
   5.634 +
   5.635 +/**
   5.636 + *  ixgbe_setup_copper_link_82598 - Setup copper link settings
   5.637 + *  @hw: pointer to hardware structure
   5.638 + *
   5.639 + *  Configures link settings based on values in the ixgbe_hw struct.
   5.640 + *  Restarts the link.  Performs autonegotiation if needed.  Restart
   5.641 + *  phy and wait for autonegotiate to finish.  Then synchronize the
   5.642 + *  MAC and PHY.
   5.643 + **/
   5.644 +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
   5.645 +{
   5.646 +	s32 status;
   5.647 +	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.648 +	u32 autoc = curr_autoc;
   5.649 +
   5.650 +	/* Restart autonegotiation on PHY */
   5.651 +	status = hw->phy.ops.setup_link(hw);
   5.652 +
   5.653 +	/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
   5.654 +	autoc &= ~IXGBE_AUTOC_LMS_MASK;
   5.655 +	autoc |= IXGBE_AUTOC_LMS_KX4_AN;
   5.656 +
   5.657 +	autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
   5.658 +	autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
   5.659 +
   5.660 +	if (autoc != curr_autoc)
   5.661 +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
   5.662 +
   5.663 +	/* Set up MAC */
   5.664 +	ixgbe_setup_mac_link_82598(hw);
   5.665 +
   5.666 +	return status;
   5.667 +}
   5.668 +
   5.669 +/**
   5.670 + *  ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field
   5.671 + *  @hw: pointer to hardware structure
   5.672 + *  @speed: new link speed
   5.673 + *  @autoneg: true if autonegotiation enabled
   5.674 + *  @autoneg_wait_to_complete: true if waiting is needed to complete
   5.675 + *
   5.676 + *  Sets the link speed in the AUTOC register in the MAC and restarts link.
   5.677 + **/
   5.678 +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
   5.679 +                                               ixgbe_link_speed speed,
   5.680 +                                               bool autoneg,
   5.681 +                                               bool autoneg_wait_to_complete)
   5.682 +{
   5.683 +	s32 status;
   5.684 +	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.685 +	u32 autoc = curr_autoc;
   5.686 +
   5.687 +	/* Setup the PHY according to input speed */
   5.688 +	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
   5.689 +	                                      autoneg_wait_to_complete);
   5.690 +
   5.691 +	/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
   5.692 +	autoc &= ~IXGBE_AUTOC_LMS_MASK;
   5.693 +	autoc |= IXGBE_AUTOC_LMS_KX4_AN;
   5.694 +
   5.695 +	autoc &= ~(IXGBE_AUTOC_1G_PMA_PMD_MASK | IXGBE_AUTOC_10G_PMA_PMD_MASK);
   5.696 +	autoc |= (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
   5.697 +
   5.698 +	if (autoc != curr_autoc)
   5.699 +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
   5.700 +
   5.701 +	/* Set up MAC */
   5.702 +	ixgbe_setup_mac_link_82598(hw);
   5.703 +
   5.704 +	return status;
   5.705 +}
   5.706 +
   5.707 +/**
   5.708 + *  ixgbe_reset_hw_82598 - Performs hardware reset
   5.709 + *  @hw: pointer to hardware structure
   5.710 + *
   5.711 + *  Resets the hardware by resetting the transmit and receive units, masks and
   5.712 + *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
   5.713 + *  reset.
   5.714 + **/
   5.715 +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
   5.716 +{
   5.717 +	s32 status = 0;
   5.718 +	u32 ctrl;
   5.719 +	u32 gheccr;
   5.720 +	u32 i;
   5.721 +	u32 autoc;
   5.722 +	u8  analog_val;
   5.723 +
   5.724 +	/* Call adapter stop to disable tx/rx and clear interrupts */
   5.725 +	hw->mac.ops.stop_adapter(hw);
   5.726 +
   5.727 +	/*
   5.728 +	 * Power up the Atlas Tx lanes if they are currently powered down.
   5.729 +	 * Atlas Tx lanes are powered down for MAC loopback tests, but
   5.730 +	 * they are not automatically restored on reset.
   5.731 +	 */
   5.732 +	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
   5.733 +	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
   5.734 +		/* Enable Tx Atlas so packets can be transmitted again */
   5.735 +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
   5.736 +		                             &analog_val);
   5.737 +		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
   5.738 +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
   5.739 +		                              analog_val);
   5.740 +
   5.741 +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
   5.742 +		                             &analog_val);
   5.743 +		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
   5.744 +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
   5.745 +		                              analog_val);
   5.746 +
   5.747 +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
   5.748 +		                             &analog_val);
   5.749 +		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
   5.750 +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
   5.751 +		                              analog_val);
   5.752 +
   5.753 +		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
   5.754 +		                             &analog_val);
   5.755 +		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
   5.756 +		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
   5.757 +		                              analog_val);
   5.758 +	}
   5.759 +
   5.760 +	/* Reset PHY */
   5.761 +	if (hw->phy.reset_disable == false)
   5.762 +		hw->phy.ops.reset(hw);
   5.763 +
   5.764 +	/*
   5.765 +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
   5.766 +	 * access and verify no pending requests before reset
   5.767 +	 */
   5.768 +	if (ixgbe_disable_pcie_master(hw) != 0) {
   5.769 +		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
   5.770 +		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
   5.771 +	}
   5.772 +
   5.773 +	/*
   5.774 +	 * Issue global reset to the MAC.  This needs to be a SW reset.
   5.775 +	 * If link reset is used, it might reset the MAC when mng is using it
   5.776 +	 */
   5.777 +	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   5.778 +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
   5.779 +	IXGBE_WRITE_FLUSH(hw);
   5.780 +
   5.781 +	/* Poll for reset bit to self-clear indicating reset is complete */
   5.782 +	for (i = 0; i < 10; i++) {
   5.783 +		udelay(1);
   5.784 +		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
   5.785 +		if (!(ctrl & IXGBE_CTRL_RST))
   5.786 +			break;
   5.787 +	}
   5.788 +	if (ctrl & IXGBE_CTRL_RST) {
   5.789 +		status = IXGBE_ERR_RESET_FAILED;
   5.790 +		hw_dbg(hw, "Reset polling failed to complete.\n");
   5.791 +	}
   5.792 +
   5.793 +	msleep(50);
   5.794 +
   5.795 +	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
   5.796 +	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
   5.797 +	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
   5.798 +
   5.799 +	/*
   5.800 +	 * Store the original AUTOC value if it has not been
   5.801 +	 * stored off yet.  Otherwise restore the stored original
   5.802 +	 * AUTOC value since the reset operation sets back to deaults.
   5.803 +	 */
   5.804 +	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.805 +	if (hw->mac.orig_link_settings_stored == false) {
   5.806 +		hw->mac.orig_autoc = autoc;
   5.807 +		hw->mac.orig_link_settings_stored = true;
   5.808 +	}
   5.809 +    else if (autoc != hw->mac.orig_autoc) {
   5.810 +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
   5.811 +	}
   5.812 +
   5.813 +	/* Store the permanent mac address */
   5.814 +	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
   5.815 +
   5.816 +	return status;
   5.817 +}
   5.818 +
   5.819 +/**
   5.820 + *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
   5.821 + *  @hw: pointer to hardware struct
   5.822 + *  @rar: receive address register index to associate with a VMDq index
   5.823 + *  @vmdq: VMDq set index
   5.824 + **/
   5.825 +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   5.826 +{
   5.827 +	u32 rar_high;
   5.828 +
   5.829 +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
   5.830 +	rar_high &= ~IXGBE_RAH_VIND_MASK;
   5.831 +	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
   5.832 +	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
   5.833 +	return 0;
   5.834 +}
   5.835 +
   5.836 +/**
   5.837 + *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
   5.838 + *  @hw: pointer to hardware struct
   5.839 + *  @rar: receive address register index to associate with a VMDq index
   5.840 + *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
   5.841 + **/
   5.842 +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   5.843 +{
   5.844 +	u32 rar_high;
   5.845 +	u32 rar_entries = hw->mac.num_rar_entries;
   5.846 +
   5.847 +
   5.848 +	if (rar < rar_entries) {
   5.849 +		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
   5.850 +		if (rar_high & IXGBE_RAH_VIND_MASK) {
   5.851 +			rar_high &= ~IXGBE_RAH_VIND_MASK;
   5.852 +			IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
   5.853 +		}
   5.854 +	} else {
   5.855 +		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
   5.856 +	}
   5.857 +
   5.858 +	return 0;
   5.859 +}
   5.860 +
   5.861 +/**
   5.862 + *  ixgbe_set_vfta_82598 - Set VLAN filter table
   5.863 + *  @hw: pointer to hardware structure
   5.864 + *  @vlan: VLAN id to write to VLAN filter
   5.865 + *  @vind: VMDq output index that maps queue to VLAN id in VFTA
   5.866 + *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
   5.867 + *
   5.868 + *  Turn on/off specified VLAN in the VLAN filter table.
   5.869 + **/
   5.870 +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
   5.871 +	                                              bool vlan_on)
   5.872 +{
   5.873 +	u32 regindex;
   5.874 +	u32 bitindex;
   5.875 +	u32 bits;
   5.876 +	u32 vftabyte;
   5.877 +
   5.878 +	if (vlan > 4095)
   5.879 +		return IXGBE_ERR_PARAM;
   5.880 +
   5.881 +	/* Determine 32-bit word position in array */
   5.882 +	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
   5.883 +
   5.884 +	/* Determine the location of the (VMD) queue index */
   5.885 +	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
   5.886 +	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
   5.887 +
   5.888 +	/* Set the nibble for VMD queue index */
   5.889 +	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
   5.890 +	bits &= (~(0x0F << bitindex));
   5.891 +	bits |= (vind << bitindex);
   5.892 +	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
   5.893 +
   5.894 +	/* Determine the location of the bit for this VLAN id */
   5.895 +	bitindex = vlan & 0x1F;   /* lower five bits */
   5.896 +
   5.897 +	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
   5.898 +	if (vlan_on)
   5.899 +		/* Turn on this VLAN id */
   5.900 +		bits |= (1 << bitindex);
   5.901 +	else
   5.902 +		/* Turn off this VLAN id */
   5.903 +		bits &= ~(1 << bitindex);
   5.904 +	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
   5.905 +
   5.906 +	return 0;
   5.907 +}
   5.908 +
   5.909 +/**
   5.910 + *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
   5.911 + *  @hw: pointer to hardware structure
   5.912 + *
   5.913 + *  Clears the VLAN filer table, and the VMDq index associated with the filter
   5.914 + **/
   5.915 +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
   5.916 +{
   5.917 +	u32 offset;
   5.918 +	u32 vlanbyte;
   5.919 +
   5.920 +	for (offset = 0; offset < hw->mac.vft_size; offset++)
   5.921 +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
   5.922 +
   5.923 +	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
   5.924 +		for (offset = 0; offset < hw->mac.vft_size; offset++)
   5.925 +			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
   5.926 +			                0);
   5.927 +
   5.928 +	return 0;
   5.929 +}
   5.930 +
   5.931 +/**
   5.932 + *  ixgbe_blink_led_start_82598 - Blink LED based on index.
   5.933 + *  @hw: pointer to hardware structure
   5.934 + *  @index: led number to blink
   5.935 + **/
   5.936 +static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
   5.937 +{
   5.938 +	ixgbe_link_speed speed = 0;
   5.939 +	bool link_up = 0;
   5.940 +	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.941 +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   5.942 +
   5.943 +	/*
   5.944 +	 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
   5.945 +	 * force it if link is down.
   5.946 +	 */
   5.947 +	hw->mac.ops.check_link(hw, &speed, &link_up, false);
   5.948 +
   5.949 +	if (!link_up) {
   5.950 +		autoc_reg |= IXGBE_AUTOC_FLU;
   5.951 +		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   5.952 +		msleep(10);
   5.953 +	}
   5.954 +
   5.955 +	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   5.956 +	led_reg |= IXGBE_LED_BLINK(index);
   5.957 +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   5.958 +	IXGBE_WRITE_FLUSH(hw);
   5.959 +
   5.960 +	return 0;
   5.961 +}
   5.962 +
   5.963 +/**
   5.964 + *  ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
   5.965 + *  @hw: pointer to hardware structure
   5.966 + *  @index: led number to stop blinking
   5.967 + **/
   5.968 +static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
   5.969 +{
   5.970 +	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
   5.971 +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   5.972 +
   5.973 +	autoc_reg &= ~IXGBE_AUTOC_FLU;
   5.974 +	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
   5.975 +	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
   5.976 +
   5.977 +	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   5.978 +	led_reg &= ~IXGBE_LED_BLINK(index);
   5.979 +	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
   5.980 +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   5.981 +	IXGBE_WRITE_FLUSH(hw);
   5.982 +
   5.983 +	return 0;
   5.984 +}
   5.985 +
   5.986 +/**
   5.987 + *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
   5.988 + *  @hw: pointer to hardware structure
   5.989 + *  @reg: analog register to read
   5.990 + *  @val: read value
   5.991 + *
   5.992 + *  Performs read operation to Atlas analog register specified.
   5.993 + **/
   5.994 +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
   5.995 +{
   5.996 +	u32  atlas_ctl;
   5.997 +
   5.998 +	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
   5.999 +	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
  5.1000 +	IXGBE_WRITE_FLUSH(hw);
  5.1001 +	udelay(10);
  5.1002 +	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  5.1003 +	*val = (u8)atlas_ctl;
  5.1004 +
  5.1005 +	return 0;
  5.1006 +}
  5.1007 +
  5.1008 +/**
  5.1009 + *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
  5.1010 + *  @hw: pointer to hardware structure
  5.1011 + *  @reg: atlas register to write
  5.1012 + *  @val: value to write
  5.1013 + *
  5.1014 + *  Performs write operation to Atlas analog register specified.
  5.1015 + **/
  5.1016 +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
  5.1017 +{
  5.1018 +	u32  atlas_ctl;
  5.1019 +
  5.1020 +	atlas_ctl = (reg << 8) | val;
  5.1021 +	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
  5.1022 +	IXGBE_WRITE_FLUSH(hw);
  5.1023 +	udelay(10);
  5.1024 +
  5.1025 +	return 0;
  5.1026 +}
  5.1027 +
  5.1028 +/**
  5.1029 + *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
  5.1030 + *  @hw: pointer to hardware structure
  5.1031 + *  @byte_offset: EEPROM byte offset to read
  5.1032 + *  @eeprom_data: value read
  5.1033 + *
  5.1034 + *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
  5.1035 + **/
  5.1036 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
  5.1037 +                                u8 *eeprom_data)
  5.1038 +{
  5.1039 +	s32 status = 0;
  5.1040 +	u16 sfp_addr = 0;
  5.1041 +	u16 sfp_data = 0;
  5.1042 +	u16 sfp_stat = 0;
  5.1043 +	u32 i;
  5.1044 +
  5.1045 +	if (hw->phy.type == ixgbe_phy_nl) {
  5.1046 +		/*
  5.1047 +		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
  5.1048 +		 * 0xC30D. These registers are used to talk to the SFP+
  5.1049 +		 * module's EEPROM through the SDA/SCL (I2C) interface.
  5.1050 +		 */
  5.1051 +		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
  5.1052 +		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
  5.1053 +		hw->phy.ops.write_reg(hw,
  5.1054 +		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
  5.1055 +		                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
  5.1056 +		                      sfp_addr);
  5.1057 +
  5.1058 +		/* Poll status */
  5.1059 +		for (i = 0; i < 100; i++) {
  5.1060 +			hw->phy.ops.read_reg(hw,
  5.1061 +			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
  5.1062 +			                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
  5.1063 +			                     &sfp_stat);
  5.1064 +			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
  5.1065 +			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
  5.1066 +				break;
  5.1067 +			msleep(10);
  5.1068 +		}
  5.1069 +
  5.1070 +		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
  5.1071 +			hw_dbg(hw, "EEPROM read did not pass.\n");
  5.1072 +			status = IXGBE_ERR_SFP_NOT_PRESENT;
  5.1073 +			goto out;
  5.1074 +		}
  5.1075 +
  5.1076 +		/* Read data */
  5.1077 +		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
  5.1078 +		                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
  5.1079 +
  5.1080 +		*eeprom_data = (u8)(sfp_data >> 8);
  5.1081 +	} else {
  5.1082 +		status = IXGBE_ERR_PHY;
  5.1083 +		goto out;
  5.1084 +	}
  5.1085 +
  5.1086 +out:
  5.1087 +	return status;
  5.1088 +}
  5.1089 +
  5.1090 +/**
  5.1091 + *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
  5.1092 + *  @hw: pointer to hardware structure
  5.1093 + *
  5.1094 + *  Determines physical layer capabilities of the current configuration.
  5.1095 + **/
  5.1096 +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
  5.1097 +{
  5.1098 +	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
  5.1099 +
  5.1100 +	switch (hw->device_id) {
  5.1101 +	case IXGBE_DEV_ID_82598:
  5.1102 +		/* Default device ID is mezzanine card KX/KX4 */
  5.1103 +		physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
  5.1104 +		                  IXGBE_PHYSICAL_LAYER_1000BASE_KX);
  5.1105 +		break;
  5.1106 +	case IXGBE_DEV_ID_82598EB_CX4:
  5.1107 +	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
  5.1108 +		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
  5.1109 +		break;
  5.1110 +	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
  5.1111 +		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
  5.1112 +		break;
  5.1113 +	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
  5.1114 +	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
  5.1115 +	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
  5.1116 +		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
  5.1117 +		break;
  5.1118 +	case IXGBE_DEV_ID_82598EB_XF_LR:
  5.1119 +		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
  5.1120 +		break;
  5.1121 +	case IXGBE_DEV_ID_82598AT:
  5.1122 +		physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
  5.1123 +		                  IXGBE_PHYSICAL_LAYER_1000BASE_T);
  5.1124 +		break;
  5.1125 +	case IXGBE_DEV_ID_82598EB_SFP_LOM:
  5.1126 +		hw->phy.ops.identify_sfp(hw);
  5.1127 +
  5.1128 +		switch (hw->phy.sfp_type) {
  5.1129 +		case ixgbe_sfp_type_da_cu:
  5.1130 +			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
  5.1131 +			break;
  5.1132 +		case ixgbe_sfp_type_sr:
  5.1133 +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
  5.1134 +			break;
  5.1135 +		case ixgbe_sfp_type_lr:
  5.1136 +			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
  5.1137 +			break;
  5.1138 +		default:
  5.1139 +			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
  5.1140 +			break;
  5.1141 +		}
  5.1142 +		break;
  5.1143 +
  5.1144 +	default:
  5.1145 +		physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
  5.1146 +		break;
  5.1147 +	}
  5.1148 +
  5.1149 +	return physical_layer;
  5.1150 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/drivers/net/ixgbe/ixgbe_api.c	Fri Jan 30 10:52:47 2009 +0000
     6.3 @@ -0,0 +1,796 @@
     6.4 +/*******************************************************************************
     6.5 +
     6.6 +  Intel 10 Gigabit PCI Express Linux driver
     6.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     6.8 +
     6.9 +  This program is free software; you can redistribute it and/or modify it
    6.10 +  under the terms and conditions of the GNU General Public License,
    6.11 +  version 2, as published by the Free Software Foundation.
    6.12 +
    6.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    6.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    6.16 +  more details.
    6.17 +
    6.18 +  You should have received a copy of the GNU General Public License along with
    6.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    6.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 +
    6.22 +  The full GNU General Public License is included in this distribution in
    6.23 +  the file called "COPYING".
    6.24 +
    6.25 +  Contact Information:
    6.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    6.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    6.28 +
    6.29 +*******************************************************************************/
    6.30 +
    6.31 +#include "ixgbe_api.h"
    6.32 +#include "ixgbe_common.h"
    6.33 +
    6.34 +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
    6.35 +
    6.36 +/**
    6.37 + *  ixgbe_init_shared_code - Initialize the shared code
    6.38 + *  @hw: pointer to hardware structure
    6.39 + *
    6.40 + *  This will assign function pointers and assign the MAC type and PHY code.
    6.41 + *  Does not touch the hardware. This function must be called prior to any
    6.42 + *  other function in the shared code. The ixgbe_hw structure should be
    6.43 + *  memset to 0 prior to calling this function.  The following fields in
    6.44 + *  hw structure should be filled in prior to calling this function:
    6.45 + *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
    6.46 + *  subsystem_vendor_id, and revision_id
    6.47 + **/
    6.48 +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
    6.49 +{
    6.50 +	s32 status;
    6.51 +
    6.52 +	/*
    6.53 +	 * Set the mac type
    6.54 +	 */
    6.55 +	ixgbe_set_mac_type(hw);
    6.56 +
    6.57 +	switch (hw->mac.type) {
    6.58 +	case ixgbe_mac_82598EB:
    6.59 +		status = ixgbe_init_ops_82598(hw);
    6.60 +		break;
    6.61 +	default:
    6.62 +		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
    6.63 +		break;
    6.64 +	}
    6.65 +
    6.66 +	return status;
    6.67 +}
    6.68 +
    6.69 +/**
    6.70 + *  ixgbe_set_mac_type - Sets MAC type
    6.71 + *  @hw: pointer to the HW structure
    6.72 + *
    6.73 + *  This function sets the mac type of the adapter based on the
    6.74 + *  vendor ID and device ID stored in the hw structure.
    6.75 + **/
    6.76 +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
    6.77 +{
    6.78 +	s32 ret_val = 0;
    6.79 +
    6.80 +	if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
    6.81 +		switch (hw->device_id) {
    6.82 +		case IXGBE_DEV_ID_82598:
    6.83 +		case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
    6.84 +		case IXGBE_DEV_ID_82598AF_DUAL_PORT:
    6.85 +		case IXGBE_DEV_ID_82598AT:
    6.86 +		case IXGBE_DEV_ID_82598EB_CX4:
    6.87 +		case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
    6.88 +		case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
    6.89 +		case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
    6.90 +		case IXGBE_DEV_ID_82598EB_XF_LR:
    6.91 +		case IXGBE_DEV_ID_82598EB_SFP_LOM:
    6.92 +			hw->mac.type = ixgbe_mac_82598EB;
    6.93 +			break;
    6.94 +		default:
    6.95 +			ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
    6.96 +			break;
    6.97 +		}
    6.98 +	} else {
    6.99 +		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
   6.100 +	}
   6.101 +
   6.102 +	hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
   6.103 +	          hw->mac.type, ret_val);
   6.104 +	return ret_val;
   6.105 +}
   6.106 +
   6.107 +/**
   6.108 + *  ixgbe_init_hw - Initialize the hardware
   6.109 + *  @hw: pointer to hardware structure
   6.110 + *
   6.111 + *  Initialize the hardware by resetting and then starting the hardware
   6.112 + **/
   6.113 +s32 ixgbe_init_hw(struct ixgbe_hw *hw)
   6.114 +{
   6.115 +	return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
   6.116 +	                       IXGBE_NOT_IMPLEMENTED);
   6.117 +}
   6.118 +
   6.119 +/**
   6.120 + *  ixgbe_reset_hw - Performs a hardware reset
   6.121 + *  @hw: pointer to hardware structure
   6.122 + *
   6.123 + *  Resets the hardware by resetting the transmit and receive units, masks and
   6.124 + *  clears all interrupts, performs a PHY reset, and performs a MAC reset
   6.125 + **/
   6.126 +s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
   6.127 +{
   6.128 +	return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
   6.129 +	                       IXGBE_NOT_IMPLEMENTED);
   6.130 +}
   6.131 +
   6.132 +/**
   6.133 + *  ixgbe_start_hw - Prepares hardware for Rx/Tx
   6.134 + *  @hw: pointer to hardware structure
   6.135 + *
   6.136 + *  Starts the hardware by filling the bus info structure and media type,
   6.137 + *  clears all on chip counters, initializes receive address registers,
   6.138 + *  multicast table, VLAN filter table, calls routine to setup link and
   6.139 + *  flow control settings, and leaves transmit and receive units disabled
   6.140 + *  and uninitialized.
   6.141 + **/
   6.142 +s32 ixgbe_start_hw(struct ixgbe_hw *hw)
   6.143 +{
   6.144 +	return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
   6.145 +	                       IXGBE_NOT_IMPLEMENTED);
   6.146 +}
   6.147 +
   6.148 +/**
   6.149 + *  ixgbe_clear_hw_cntrs - Clear hardware counters
   6.150 + *  @hw: pointer to hardware structure
   6.151 + *
   6.152 + *  Clears all hardware statistics counters by reading them from the hardware
   6.153 + *  Statistics counters are clear on read.
   6.154 + **/
   6.155 +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
   6.156 +{
   6.157 +	return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
   6.158 +	                       IXGBE_NOT_IMPLEMENTED);
   6.159 +}
   6.160 +
   6.161 +/**
   6.162 + *  ixgbe_get_media_type - Get media type
   6.163 + *  @hw: pointer to hardware structure
   6.164 + *
   6.165 + *  Returns the media type (fiber, copper, backplane)
   6.166 + **/
   6.167 +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
   6.168 +{
   6.169 +	return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
   6.170 +	                       ixgbe_media_type_unknown);
   6.171 +}
   6.172 +
   6.173 +/**
   6.174 + *  ixgbe_get_mac_addr - Get MAC address
   6.175 + *  @hw: pointer to hardware structure
   6.176 + *  @mac_addr: Adapter MAC address
   6.177 + *
   6.178 + *  Reads the adapter's MAC address from the first Receive Address Register
   6.179 + *  (RAR0) A reset of the adapter must have been performed prior to calling
   6.180 + *  this function in order for the MAC address to have been loaded from the
   6.181 + *  EEPROM into RAR0
   6.182 + **/
   6.183 +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
   6.184 +{
   6.185 +	return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
   6.186 +	                       (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
   6.187 +}
   6.188 +
   6.189 +/**
   6.190 + *  ixgbe_get_bus_info - Set PCI bus info
   6.191 + *  @hw: pointer to hardware structure
   6.192 + *
   6.193 + *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
   6.194 + **/
   6.195 +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
   6.196 +{
   6.197 +	return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
   6.198 +	                       IXGBE_NOT_IMPLEMENTED);
   6.199 +}
   6.200 +
   6.201 +/**
   6.202 + *  ixgbe_get_num_of_tx_queues - Get Tx queues
   6.203 + *  @hw: pointer to hardware structure
   6.204 + *
   6.205 + *  Returns the number of transmit queues for the given adapter.
   6.206 + **/
   6.207 +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
   6.208 +{
   6.209 +	return hw->mac.max_tx_queues;
   6.210 +}
   6.211 +
   6.212 +/**
   6.213 + *  ixgbe_get_num_of_rx_queues - Get Rx queues
   6.214 + *  @hw: pointer to hardware structure
   6.215 + *
   6.216 + *  Returns the number of receive queues for the given adapter.
   6.217 + **/
   6.218 +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
   6.219 +{
   6.220 +	return hw->mac.max_rx_queues;
   6.221 +}
   6.222 +
   6.223 +/**
   6.224 + *  ixgbe_stop_adapter - Disable Rx/Tx units
   6.225 + *  @hw: pointer to hardware structure
   6.226 + *
   6.227 + *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
   6.228 + *  disables transmit and receive units. The adapter_stopped flag is used by
   6.229 + *  the shared code and drivers to determine if the adapter is in a stopped
   6.230 + *  state and should not touch the hardware.
   6.231 + **/
   6.232 +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
   6.233 +{
   6.234 +	return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
   6.235 +	                       IXGBE_NOT_IMPLEMENTED);
   6.236 +}
   6.237 +
   6.238 +/**
   6.239 + *  ixgbe_read_pba_num - Reads part number from EEPROM
   6.240 + *  @hw: pointer to hardware structure
   6.241 + *  @pba_num: stores the part number from the EEPROM
   6.242 + *
   6.243 + *  Reads the part number from the EEPROM.
   6.244 + **/
   6.245 +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
   6.246 +{
   6.247 +	return ixgbe_read_pba_num_generic(hw, pba_num);
   6.248 +}
   6.249 +
   6.250 +/**
   6.251 + *  ixgbe_identify_phy - Get PHY type
   6.252 + *  @hw: pointer to hardware structure
   6.253 + *
   6.254 + *  Determines the physical layer module found on the current adapter.
   6.255 + **/
   6.256 +s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
   6.257 +{
   6.258 +	s32 status = 0;
   6.259 +
   6.260 +	if (hw->phy.type == ixgbe_phy_unknown) {
   6.261 +		status = ixgbe_call_func(hw,
   6.262 +		                         hw->phy.ops.identify,
   6.263 +		                         (hw),
   6.264 +		                         IXGBE_NOT_IMPLEMENTED);
   6.265 +	}
   6.266 +
   6.267 +	return status;
   6.268 +}
   6.269 +
   6.270 +/**
   6.271 + *  ixgbe_reset_phy - Perform a PHY reset
   6.272 + *  @hw: pointer to hardware structure
   6.273 + **/
   6.274 +s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
   6.275 +{
   6.276 +	s32 status = 0;
   6.277 +
   6.278 +	if (hw->phy.type == ixgbe_phy_unknown) {
   6.279 +		if (ixgbe_identify_phy(hw) != 0)
   6.280 +			status = IXGBE_ERR_PHY;
   6.281 +	}
   6.282 +
   6.283 +	if (status == 0) {
   6.284 +		status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
   6.285 +		                         IXGBE_NOT_IMPLEMENTED);
   6.286 +	}
   6.287 +	return status;
   6.288 +}
   6.289 +
   6.290 +/**
   6.291 + *  ixgbe_get_phy_firmware_version -
   6.292 + *  @hw: pointer to hardware structure
   6.293 + *  @firmware_version: pointer to firmware version
   6.294 + **/
   6.295 +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
   6.296 +{
   6.297 +	s32 status = 0;
   6.298 +
   6.299 +	status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
   6.300 +	                         (hw, firmware_version),
   6.301 +	                         IXGBE_NOT_IMPLEMENTED);
   6.302 +	return status;
   6.303 +}
   6.304 +
   6.305 +/**
   6.306 + *  ixgbe_read_phy_reg - Read PHY register
   6.307 + *  @hw: pointer to hardware structure
   6.308 + *  @reg_addr: 32 bit address of PHY register to read
   6.309 + *  @phy_data: Pointer to read data from PHY register
   6.310 + *
   6.311 + *  Reads a value from a specified PHY register
   6.312 + **/
   6.313 +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
   6.314 +                       u16 *phy_data)
   6.315 +{
   6.316 +	return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
   6.317 +	                       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
   6.318 +}
   6.319 +
   6.320 +/**
   6.321 + *  ixgbe_write_phy_reg - Write PHY register
   6.322 + *  @hw: pointer to hardware structure
   6.323 + *  @reg_addr: 32 bit PHY register to write
   6.324 + *  @phy_data: Data to write to the PHY register
   6.325 + *
   6.326 + *  Writes a value to specified PHY register
   6.327 + **/
   6.328 +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
   6.329 +                        u16 phy_data)
   6.330 +{
   6.331 +	return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
   6.332 +	                       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
   6.333 +}
   6.334 +
   6.335 +/**
   6.336 + *  ixgbe_setup_phy_link - Restart PHY autoneg
   6.337 + *  @hw: pointer to hardware structure
   6.338 + *
   6.339 + *  Restart autonegotiation and PHY and waits for completion.
   6.340 + **/
   6.341 +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
   6.342 +{
   6.343 +	return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
   6.344 +	                       IXGBE_NOT_IMPLEMENTED);
   6.345 +}
   6.346 +
   6.347 +/**
   6.348 + *  ixgbe_check_phy_link - Determine link and speed status
   6.349 + *  @hw: pointer to hardware structure
   6.350 + *
   6.351 + *  Reads a PHY register to determine if link is up and the current speed for
   6.352 + *  the PHY.
   6.353 + **/
   6.354 +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
   6.355 +                         bool *link_up)
   6.356 +{
   6.357 +	return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
   6.358 +	                       link_up), IXGBE_NOT_IMPLEMENTED);
   6.359 +}
   6.360 +
   6.361 +/**
   6.362 + *  ixgbe_setup_phy_link_speed - Set auto advertise
   6.363 + *  @hw: pointer to hardware structure
   6.364 + *  @speed: new link speed
   6.365 + *  @autoneg: true if autonegotiation enabled
   6.366 + *
   6.367 + *  Sets the auto advertised capabilities
   6.368 + **/
   6.369 +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
   6.370 +                               bool autoneg,
   6.371 +                               bool autoneg_wait_to_complete)
   6.372 +{
   6.373 +	return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
   6.374 +	                       autoneg, autoneg_wait_to_complete),
   6.375 +	                       IXGBE_NOT_IMPLEMENTED);
   6.376 +}
   6.377 +
   6.378 +/**
   6.379 + *  ixgbe_setup_link - Configure link settings
   6.380 + *  @hw: pointer to hardware structure
   6.381 + *
   6.382 + *  Configures link settings based on values in the ixgbe_hw struct.
   6.383 + *  Restarts the link.  Performs autonegotiation if needed.
   6.384 + **/
   6.385 +s32 ixgbe_setup_link(struct ixgbe_hw *hw)
   6.386 +{
   6.387 +	return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw),
   6.388 +	                       IXGBE_NOT_IMPLEMENTED);
   6.389 +}
   6.390 +
   6.391 +/**
   6.392 + *  ixgbe_check_link - Get link and speed status
   6.393 + *  @hw: pointer to hardware structure
   6.394 + *
   6.395 + *  Reads the links register to determine if link is up and the current speed
   6.396 + **/
   6.397 +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
   6.398 +                     bool *link_up, bool link_up_wait_to_complete)
   6.399 +{
   6.400 +	return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
   6.401 +	                       link_up, link_up_wait_to_complete),
   6.402 +	                       IXGBE_NOT_IMPLEMENTED);
   6.403 +}
   6.404 +
   6.405 +/**
   6.406 + *  ixgbe_setup_link_speed - Set link speed
   6.407 + *  @hw: pointer to hardware structure
   6.408 + *  @speed: new link speed
   6.409 + *  @autoneg: true if autonegotiation enabled
   6.410 + *
   6.411 + *  Set the link speed and restarts the link.
   6.412 + **/
   6.413 +s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
   6.414 +                           bool autoneg,
   6.415 +                           bool autoneg_wait_to_complete)
   6.416 +{
   6.417 +	return ixgbe_call_func(hw, hw->mac.ops.setup_link_speed, (hw, speed,
   6.418 +	                       autoneg, autoneg_wait_to_complete),
   6.419 +	                       IXGBE_NOT_IMPLEMENTED);
   6.420 +}
   6.421 +
   6.422 +/**
   6.423 + *  ixgbe_get_link_capabilities - Returns link capabilities
   6.424 + *  @hw: pointer to hardware structure
   6.425 + *
   6.426 + *  Determines the link capabilities of the current configuration.
   6.427 + **/
   6.428 +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
   6.429 +                                bool *autoneg)
   6.430 +{
   6.431 +	return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
   6.432 +	                       speed, autoneg), IXGBE_NOT_IMPLEMENTED);
   6.433 +}
   6.434 +
   6.435 +/**
   6.436 + *  ixgbe_led_on - Turn on LEDs
   6.437 + *  @hw: pointer to hardware structure
   6.438 + *  @index: led number to turn on
   6.439 + *
   6.440 + *  Turns on the software controllable LEDs.
   6.441 + **/
   6.442 +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
   6.443 +{
   6.444 +	return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
   6.445 +	                       IXGBE_NOT_IMPLEMENTED);
   6.446 +}
   6.447 +
   6.448 +/**
   6.449 + *  ixgbe_led_off - Turn off LEDs
   6.450 + *  @hw: pointer to hardware structure
   6.451 + *  @index: led number to turn off
   6.452 + *
   6.453 + *  Turns off the software controllable LEDs.
   6.454 + **/
   6.455 +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
   6.456 +{
   6.457 +	return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
   6.458 +	                       IXGBE_NOT_IMPLEMENTED);
   6.459 +}
   6.460 +
   6.461 +/**
   6.462 + *  ixgbe_blink_led_start - Blink LEDs
   6.463 + *  @hw: pointer to hardware structure
   6.464 + *  @index: led number to blink
   6.465 + *
   6.466 + *  Blink LED based on index.
   6.467 + **/
   6.468 +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
   6.469 +{
   6.470 +	return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
   6.471 +	                       IXGBE_NOT_IMPLEMENTED);
   6.472 +}
   6.473 +
   6.474 +/**
   6.475 + *  ixgbe_blink_led_stop - Stop blinking LEDs
   6.476 + *  @hw: pointer to hardware structure
   6.477 + *
   6.478 + *  Stop blinking LED based on index.
   6.479 + **/
   6.480 +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
   6.481 +{
   6.482 +	return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
   6.483 +	                       IXGBE_NOT_IMPLEMENTED);
   6.484 +}
   6.485 +
   6.486 +/**
   6.487 + *  ixgbe_init_eeprom_params - Initialize EEPROM parameters
   6.488 + *  @hw: pointer to hardware structure
   6.489 + *
   6.490 + *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
   6.491 + *  ixgbe_hw struct in order to set up EEPROM access.
   6.492 + **/
   6.493 +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
   6.494 +{
   6.495 +	return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
   6.496 +	                       IXGBE_NOT_IMPLEMENTED);
   6.497 +}
   6.498 +
   6.499 +
   6.500 +/**
   6.501 + *  ixgbe_write_eeprom - Write word to EEPROM
   6.502 + *  @hw: pointer to hardware structure
   6.503 + *  @offset: offset within the EEPROM to be written to
   6.504 + *  @data: 16 bit word to be written to the EEPROM
   6.505 + *
   6.506 + *  Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
   6.507 + *  called after this function, the EEPROM will most likely contain an
   6.508 + *  invalid checksum.
   6.509 + **/
   6.510 +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
   6.511 +{
   6.512 +	return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
   6.513 +	                       IXGBE_NOT_IMPLEMENTED);
   6.514 +}
   6.515 +
   6.516 +/**
   6.517 + *  ixgbe_read_eeprom - Read word from EEPROM
   6.518 + *  @hw: pointer to hardware structure
   6.519 + *  @offset: offset within the EEPROM to be read
   6.520 + *  @data: read 16 bit value from EEPROM
   6.521 + *
   6.522 + *  Reads 16 bit value from EEPROM
   6.523 + **/
   6.524 +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
   6.525 +{
   6.526 +	return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
   6.527 +	                       IXGBE_NOT_IMPLEMENTED);
   6.528 +}
   6.529 +
   6.530 +/**
   6.531 + *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
   6.532 + *  @hw: pointer to hardware structure
   6.533 + *  @checksum_val: calculated checksum
   6.534 + *
   6.535 + *  Performs checksum calculation and validates the EEPROM checksum
   6.536 + **/
   6.537 +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
   6.538 +{
   6.539 +	return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
   6.540 +	                       (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
   6.541 +}
   6.542 +
   6.543 +/**
   6.544 + *  ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
   6.545 + *  @hw: pointer to hardware structure
   6.546 + **/
   6.547 +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
   6.548 +{
   6.549 +	return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
   6.550 +	                       IXGBE_NOT_IMPLEMENTED);
   6.551 +}
   6.552 +
   6.553 +/**
   6.554 + *  ixgbe_set_rar - Set Rx address register
   6.555 + *  @hw: pointer to hardware structure
   6.556 + *  @index: Receive address register to write
   6.557 + *  @addr: Address to put into receive address register
   6.558 + *  @vmdq: VMDq "set"
   6.559 + *  @enable_addr: set flag that address is active
   6.560 + *
   6.561 + *  Puts an ethernet address into a receive address register.
   6.562 + **/
   6.563 +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
   6.564 +                  u32 enable_addr)
   6.565 +{
   6.566 +	return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
   6.567 +	                       enable_addr), IXGBE_NOT_IMPLEMENTED);
   6.568 +}
   6.569 +
   6.570 +/**
   6.571 + *  ixgbe_clear_rar - Clear Rx address register
   6.572 + *  @hw: pointer to hardware structure
   6.573 + *  @index: Receive address register to write
   6.574 + *
   6.575 + *  Puts an ethernet address into a receive address register.
   6.576 + **/
   6.577 +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
   6.578 +{
   6.579 +	return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
   6.580 +	                       IXGBE_NOT_IMPLEMENTED);
   6.581 +}
   6.582 +
   6.583 +/**
   6.584 + *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
   6.585 + *  @hw: pointer to hardware structure
   6.586 + *  @rar: receive address register index to associate with VMDq index
   6.587 + *  @vmdq: VMDq set or pool index
   6.588 + **/
   6.589 +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   6.590 +{
   6.591 +	return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
   6.592 +	                       IXGBE_NOT_IMPLEMENTED);
   6.593 +}
   6.594 +
   6.595 +/**
   6.596 + *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
   6.597 + *  @hw: pointer to hardware structure
   6.598 + *  @rar: receive address register index to disassociate with VMDq index
   6.599 + *  @vmdq: VMDq set or pool index
   6.600 + **/
   6.601 +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
   6.602 +{
   6.603 +	return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
   6.604 +	                       IXGBE_NOT_IMPLEMENTED);
   6.605 +}
   6.606 +
   6.607 +/**
   6.608 + *  ixgbe_init_rx_addrs - Initializes receive address filters.
   6.609 + *  @hw: pointer to hardware structure
   6.610 + *
   6.611 + *  Places the MAC address in receive address register 0 and clears the rest
   6.612 + *  of the receive address registers. Clears the multicast table. Assumes
   6.613 + *  the receiver is in reset when the routine is called.
   6.614 + **/
   6.615 +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
   6.616 +{
   6.617 +	return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
   6.618 +	                       IXGBE_NOT_IMPLEMENTED);
   6.619 +}
   6.620 +
   6.621 +/**
   6.622 + *  ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
   6.623 + *  @hw: pointer to hardware structure
   6.624 + **/
   6.625 +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
   6.626 +{
   6.627 +	return hw->mac.num_rar_entries;
   6.628 +}
   6.629 +
   6.630 +/**
   6.631 + *  ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
   6.632 + *  @hw: pointer to hardware structure
   6.633 + *  @addr_list: the list of new multicast addresses
   6.634 + *  @addr_count: number of addresses
   6.635 + *  @func: iterator function to walk the multicast address list
   6.636 + *
   6.637 + *  The given list replaces any existing list. Clears the secondary addrs from
   6.638 + *  receive address registers. Uses unused receive address registers for the
   6.639 + *  first secondary addresses, and falls back to promiscuous mode as needed.
   6.640 + **/
   6.641 +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
   6.642 +                              u32 addr_count, ixgbe_mc_addr_itr func)
   6.643 +{
   6.644 +	return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
   6.645 +	                       addr_list, addr_count, func),
   6.646 +	                       IXGBE_NOT_IMPLEMENTED);
   6.647 +}
   6.648 +
   6.649 +/**
   6.650 + *  ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
   6.651 + *  @hw: pointer to hardware structure
   6.652 + *  @mc_addr_list: the list of new multicast addresses
   6.653 + *  @mc_addr_count: number of addresses
   6.654 + *  @func: iterator function to walk the multicast address list
   6.655 + *
   6.656 + *  The given list replaces any existing list. Clears the MC addrs from receive
   6.657 + *  address registers and the multicast table. Uses unused receive address
   6.658 + *  registers for the first multicast addresses, and hashes the rest into the
   6.659 + *  multicast table.
   6.660 + **/
   6.661 +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
   6.662 +                              u32 mc_addr_count, ixgbe_mc_addr_itr func)
   6.663 +{
   6.664 +	return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
   6.665 +	                       mc_addr_list, mc_addr_count, func),
   6.666 +	                       IXGBE_NOT_IMPLEMENTED);
   6.667 +}
   6.668 +
   6.669 +/**
   6.670 + *  ixgbe_enable_mc - Enable multicast address in RAR
   6.671 + *  @hw: pointer to hardware structure
   6.672 + *
   6.673 + *  Enables multicast address in RAR and the use of the multicast hash table.
   6.674 + **/
   6.675 +s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
   6.676 +{
   6.677 +	return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
   6.678 +	                       IXGBE_NOT_IMPLEMENTED);
   6.679 +}
   6.680 +
   6.681 +/**
   6.682 + *  ixgbe_disable_mc - Disable multicast address in RAR
   6.683 + *  @hw: pointer to hardware structure
   6.684 + *
   6.685 + *  Disables multicast address in RAR and the use of the multicast hash table.
   6.686 + **/
   6.687 +s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
   6.688 +{
   6.689 +	return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
   6.690 +	                       IXGBE_NOT_IMPLEMENTED);
   6.691 +}
   6.692 +
   6.693 +/**
   6.694 + *  ixgbe_clear_vfta - Clear VLAN filter table
   6.695 + *  @hw: pointer to hardware structure
   6.696 + *
   6.697 + *  Clears the VLAN filer table, and the VMDq index associated with the filter
   6.698 + **/
   6.699 +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
   6.700 +{
   6.701 +	return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
   6.702 +	                       IXGBE_NOT_IMPLEMENTED);
   6.703 +}
   6.704 +
   6.705 +/**
   6.706 + *  ixgbe_set_vfta - Set VLAN filter table
   6.707 + *  @hw: pointer to hardware structure
   6.708 + *  @vlan: VLAN id to write to VLAN filter
   6.709 + *  @vind: VMDq output index that maps queue to VLAN id in VFTA
   6.710 + *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
   6.711 + *
   6.712 + *  Turn on/off specified VLAN in the VLAN filter table.
   6.713 + **/
   6.714 +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
   6.715 +{
   6.716 +	return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
   6.717 +	                       vlan_on), IXGBE_NOT_IMPLEMENTED);
   6.718 +}
   6.719 +
   6.720 +/**
   6.721 + *  ixgbe_setup_fc - Set flow control
   6.722 + *  @hw: pointer to hardware structure
   6.723 + *  @packetbuf_num: packet buffer number (0-7)
   6.724 + *
   6.725 + *  Configures the flow control settings based on SW configuration.
   6.726 + **/
   6.727 +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
   6.728 +{
   6.729 +	return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw, packetbuf_num),
   6.730 +	                       IXGBE_NOT_IMPLEMENTED);
   6.731 +}
   6.732 +
   6.733 +/**
   6.734 + *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
   6.735 + *  @hw: pointer to hardware structure
   6.736 + *  @reg: analog register to read
   6.737 + *  @val: read value
   6.738 + *
   6.739 + *  Performs write operation to analog register specified.
   6.740 + **/
   6.741 +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
   6.742 +{
   6.743 +	return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
   6.744 +	                       val), IXGBE_NOT_IMPLEMENTED);
   6.745 +}
   6.746 +
   6.747 +/**
   6.748 + *  ixgbe_write_analog_reg8 - Writes 8 bit analog register
   6.749 + *  @hw: pointer to hardware structure
   6.750 + *  @reg: analog register to write
   6.751 + *  @val: value to write
   6.752 + *
   6.753 + *  Performs write operation to Atlas analog register specified.
   6.754 + **/
   6.755 +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
   6.756 +{
   6.757 +	return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
   6.758 +	                       val), IXGBE_NOT_IMPLEMENTED);
   6.759 +}
   6.760 +
   6.761 +/**
   6.762 + *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
   6.763 + *  @hw: pointer to hardware structure
   6.764 + *
   6.765 + *  Initializes the Unicast Table Arrays to zero on device load.  This
   6.766 + *  is part of the Rx init addr execution path.
   6.767 + **/
   6.768 +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
   6.769 +{
   6.770 +	return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
   6.771 +	                       IXGBE_NOT_IMPLEMENTED);
   6.772 +}
   6.773 +
   6.774 +/**
   6.775 + *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
   6.776 + *  @hw: pointer to hardware structure
   6.777 + *  @byte_offset: EEPROM byte offset to read
   6.778 + *  @eeprom_data: value read
   6.779 + *
   6.780 + *  Performs byte read operation to SFP module's EEPROM over I2C interface.
   6.781 + **/
   6.782 +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
   6.783 +{
   6.784 +	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
   6.785 +	                      (hw, byte_offset, eeprom_data),
   6.786 +	                      IXGBE_NOT_IMPLEMENTED);
   6.787 +}
   6.788 +
   6.789 +/**
   6.790 + *  ixgbe_get_supported_physical_layer - Returns physical layer type
   6.791 + *  @hw: pointer to hardware structure
   6.792 + *
   6.793 + *  Determines physical layer capabilities of the current configuration.
   6.794 + **/
   6.795 +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
   6.796 +{
   6.797 +	return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
   6.798 +	                       (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
   6.799 +}
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/drivers/net/ixgbe/ixgbe_api.h	Fri Jan 30 10:52:47 2009 +0000
     7.3 @@ -0,0 +1,109 @@
     7.4 +/*******************************************************************************
     7.5 +
     7.6 +  Intel 10 Gigabit PCI Express Linux driver
     7.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     7.8 +
     7.9 +  This program is free software; you can redistribute it and/or modify it
    7.10 +  under the terms and conditions of the GNU General Public License,
    7.11 +  version 2, as published by the Free Software Foundation.
    7.12 +
    7.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    7.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    7.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    7.16 +  more details.
    7.17 +
    7.18 +  You should have received a copy of the GNU General Public License along with
    7.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    7.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    7.21 +
    7.22 +  The full GNU General Public License is included in this distribution in
    7.23 +  the file called "COPYING".
    7.24 +
    7.25 +  Contact Information:
    7.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    7.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    7.28 +
    7.29 +*******************************************************************************/
    7.30 +
    7.31 +#ifndef _IXGBE_API_H_
    7.32 +#define _IXGBE_API_H_
    7.33 +
    7.34 +#include "ixgbe_type.h"
    7.35 +
    7.36 +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
    7.37 +
    7.38 +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
    7.39 +s32 ixgbe_init_hw(struct ixgbe_hw *hw);
    7.40 +s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
    7.41 +s32 ixgbe_start_hw(struct ixgbe_hw *hw);
    7.42 +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
    7.43 +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
    7.44 +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
    7.45 +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
    7.46 +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
    7.47 +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
    7.48 +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
    7.49 +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
    7.50 +
    7.51 +s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
    7.52 +s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
    7.53 +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
    7.54 +                       u16 *phy_data);
    7.55 +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
    7.56 +                        u16 phy_data);
    7.57 +
    7.58 +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
    7.59 +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
    7.60 +                         ixgbe_link_speed *speed,
    7.61 +                         bool *link_up);
    7.62 +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
    7.63 +                               ixgbe_link_speed speed,
    7.64 +                               bool autoneg,
    7.65 +                               bool autoneg_wait_to_complete);
    7.66 +s32 ixgbe_setup_link(struct ixgbe_hw *hw);
    7.67 +s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
    7.68 +                           bool autoneg, bool autoneg_wait_to_complete);
    7.69 +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
    7.70 +                     bool *link_up, bool link_up_wait_to_complete);
    7.71 +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
    7.72 +                            bool *autoneg);
    7.73 +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
    7.74 +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
    7.75 +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
    7.76 +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
    7.77 +
    7.78 +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
    7.79 +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
    7.80 +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
    7.81 +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
    7.82 +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
    7.83 +
    7.84 +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
    7.85 +                  u32 enable_addr);
    7.86 +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
    7.87 +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
    7.88 +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
    7.89 +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
    7.90 +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
    7.91 +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
    7.92 +                              u32 addr_count, ixgbe_mc_addr_itr func);
    7.93 +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
    7.94 +                              u32 mc_addr_count, ixgbe_mc_addr_itr func);
    7.95 +s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
    7.96 +s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
    7.97 +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
    7.98 +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
    7.99 +                   u32 vind, bool vlan_on);
   7.100 +
   7.101 +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
   7.102 +
   7.103 +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
   7.104 +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
   7.105 +                                   u16 *firmware_version);
   7.106 +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
   7.107 +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
   7.108 +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
   7.109 +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
   7.110 +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
   7.111 +
   7.112 +#endif /* _IXGBE_API_H_ */
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/drivers/net/ixgbe/ixgbe_common.c	Fri Jan 30 10:52:47 2009 +0000
     8.3 @@ -0,0 +1,1807 @@
     8.4 +/*******************************************************************************
     8.5 +
     8.6 +  Intel 10 Gigabit PCI Express Linux driver
     8.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     8.8 +
     8.9 +  This program is free software; you can redistribute it and/or modify it
    8.10 +  under the terms and conditions of the GNU General Public License,
    8.11 +  version 2, as published by the Free Software Foundation.
    8.12 +
    8.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    8.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    8.16 +  more details.
    8.17 +
    8.18 +  You should have received a copy of the GNU General Public License along with
    8.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    8.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 +
    8.22 +  The full GNU General Public License is included in this distribution in
    8.23 +  the file called "COPYING".
    8.24 +
    8.25 +  Contact Information:
    8.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    8.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    8.28 +
    8.29 +*******************************************************************************/
    8.30 +
    8.31 +#include "ixgbe_common.h"
    8.32 +#include "ixgbe_api.h"
    8.33 +
    8.34 +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
    8.35 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
    8.36 +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
    8.37 +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
    8.38 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
    8.39 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
    8.40 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
    8.41 +                                        u16 count);
    8.42 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
    8.43 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
    8.44 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
    8.45 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
    8.46 +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
    8.47 +
    8.48 +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
    8.49 +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
    8.50 +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
    8.51 +void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
    8.52 +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
    8.53 +
    8.54 +/**
    8.55 + *  ixgbe_init_ops_generic - Inits function ptrs
    8.56 + *  @hw: pointer to the hardware structure
    8.57 + *
    8.58 + *  Initialize the function pointers.
    8.59 + **/
    8.60 +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
    8.61 +{
    8.62 +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
    8.63 +	struct ixgbe_mac_info *mac = &hw->mac;
    8.64 +	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
    8.65 +
    8.66 +	/* EEPROM */
    8.67 +	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
    8.68 +	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
    8.69 +	if (eec & (1 << 8))
    8.70 +		eeprom->ops.read = &ixgbe_read_eeprom_generic;
    8.71 +	else
    8.72 +		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
    8.73 +	eeprom->ops.write = &ixgbe_write_eeprom_generic;
    8.74 +	eeprom->ops.validate_checksum =
    8.75 +	                              &ixgbe_validate_eeprom_checksum_generic;
    8.76 +	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
    8.77 +
    8.78 +	/* MAC */
    8.79 +	mac->ops.init_hw = &ixgbe_init_hw_generic;
    8.80 +	mac->ops.reset_hw = NULL;
    8.81 +	mac->ops.start_hw = &ixgbe_start_hw_generic;
    8.82 +	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
    8.83 +	mac->ops.get_media_type = NULL;
    8.84 +	mac->ops.get_supported_physical_layer = NULL;
    8.85 +	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
    8.86 +	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
    8.87 +	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
    8.88 +	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
    8.89 +
    8.90 +	/* LEDs */
    8.91 +	mac->ops.led_on = &ixgbe_led_on_generic;
    8.92 +	mac->ops.led_off = &ixgbe_led_off_generic;
    8.93 +	mac->ops.blink_led_start = NULL;
    8.94 +	mac->ops.blink_led_stop = NULL;
    8.95 +
    8.96 +	/* RAR, Multicast, VLAN */
    8.97 +	mac->ops.set_rar = &ixgbe_set_rar_generic;
    8.98 +	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
    8.99 +	mac->ops.set_vmdq = NULL;
   8.100 +	mac->ops.clear_vmdq = NULL;
   8.101 +	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
   8.102 +	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
   8.103 +	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
   8.104 +	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
   8.105 +	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
   8.106 +	mac->ops.clear_vfta = NULL;
   8.107 +	mac->ops.set_vfta = NULL;
   8.108 +	mac->ops.init_uta_tables = NULL;
   8.109 +
   8.110 +
   8.111 +	/* Link */
   8.112 +	mac->ops.get_link_capabilities = NULL;
   8.113 +	mac->ops.setup_link = NULL;
   8.114 +	mac->ops.setup_link_speed = NULL;
   8.115 +	mac->ops.check_link = NULL;
   8.116 +
   8.117 +	return 0;
   8.118 +}
   8.119 +
   8.120 +/**
   8.121 + *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
   8.122 + *  @hw: pointer to hardware structure
   8.123 + *
   8.124 + *  Starts the hardware by filling the bus info structure and media type, clears
   8.125 + *  all on chip counters, initializes receive address registers, multicast
   8.126 + *  table, VLAN filter table, calls routine to set up link and flow control
   8.127 + *  settings, and leaves transmit and receive units disabled and uninitialized
   8.128 + **/
   8.129 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
   8.130 +{
   8.131 +	u32 ctrl_ext;
   8.132 +
   8.133 +	/* Set the media type */
   8.134 +	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
   8.135 +
   8.136 +	/* Set bus info */
   8.137 +	hw->mac.ops.get_bus_info(hw);
   8.138 +
   8.139 +	/* Identify the PHY */
   8.140 +	hw->phy.ops.identify(hw);
   8.141 +
   8.142 +	/*
   8.143 +	 * Store MAC address from RAR0, clear receive address registers, and
   8.144 +	 * clear the multicast table
   8.145 +	 */
   8.146 +	hw->mac.ops.init_rx_addrs(hw);
   8.147 +
   8.148 +	/* Clear the VLAN filter table */
   8.149 +	hw->mac.ops.clear_vfta(hw);
   8.150 +
   8.151 +	/* Set up link */
   8.152 +	hw->mac.ops.setup_link(hw);
   8.153 +
   8.154 +	/* Clear statistics registers */
   8.155 +	hw->mac.ops.clear_hw_cntrs(hw);
   8.156 +
   8.157 +	/* Set No Snoop Disable */
   8.158 +	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   8.159 +	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
   8.160 +	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   8.161 +	IXGBE_WRITE_FLUSH(hw);
   8.162 +
   8.163 +	/* Clear adapter stopped flag */
   8.164 +	hw->adapter_stopped = false;
   8.165 +
   8.166 +	return 0;
   8.167 +}
   8.168 +
   8.169 +/**
   8.170 + *  ixgbe_init_hw_generic - Generic hardware initialization
   8.171 + *  @hw: pointer to hardware structure
   8.172 + *
   8.173 + *  Initialize the hardware by resetting the hardware, filling the bus info
   8.174 + *  structure and media type, clears all on chip counters, initializes receive
   8.175 + *  address registers, multicast table, VLAN filter table, calls routine to set
   8.176 + *  up link and flow control settings, and leaves transmit and receive units
   8.177 + *  disabled and uninitialized
   8.178 + **/
   8.179 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
   8.180 +{
   8.181 +	/* Reset the hardware */
   8.182 +	hw->mac.ops.reset_hw(hw);
   8.183 +
   8.184 +	/* Start the HW */
   8.185 +	hw->mac.ops.start_hw(hw);
   8.186 +
   8.187 +	return 0;
   8.188 +}
   8.189 +
   8.190 +/**
   8.191 + *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
   8.192 + *  @hw: pointer to hardware structure
   8.193 + *
   8.194 + *  Clears all hardware statistics counters by reading them from the hardware
   8.195 + *  Statistics counters are clear on read.
   8.196 + **/
   8.197 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
   8.198 +{
   8.199 +	u16 i = 0;
   8.200 +
   8.201 +	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   8.202 +	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   8.203 +	IXGBE_READ_REG(hw, IXGBE_ERRBC);
   8.204 +	IXGBE_READ_REG(hw, IXGBE_MSPDC);
   8.205 +	for (i = 0; i < 8; i++)
   8.206 +		IXGBE_READ_REG(hw, IXGBE_MPC(i));
   8.207 +
   8.208 +	IXGBE_READ_REG(hw, IXGBE_MLFC);
   8.209 +	IXGBE_READ_REG(hw, IXGBE_MRFC);
   8.210 +	IXGBE_READ_REG(hw, IXGBE_RLEC);
   8.211 +	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   8.212 +	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   8.213 +	IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   8.214 +	IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   8.215 +
   8.216 +	for (i = 0; i < 8; i++) {
   8.217 +		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   8.218 +		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   8.219 +		IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   8.220 +		IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   8.221 +	}
   8.222 +	IXGBE_READ_REG(hw, IXGBE_PRC64);
   8.223 +	IXGBE_READ_REG(hw, IXGBE_PRC127);
   8.224 +	IXGBE_READ_REG(hw, IXGBE_PRC255);
   8.225 +	IXGBE_READ_REG(hw, IXGBE_PRC511);
   8.226 +	IXGBE_READ_REG(hw, IXGBE_PRC1023);
   8.227 +	IXGBE_READ_REG(hw, IXGBE_PRC1522);
   8.228 +	IXGBE_READ_REG(hw, IXGBE_GPRC);
   8.229 +	IXGBE_READ_REG(hw, IXGBE_BPRC);
   8.230 +	IXGBE_READ_REG(hw, IXGBE_MPRC);
   8.231 +	IXGBE_READ_REG(hw, IXGBE_GPTC);
   8.232 +	IXGBE_READ_REG(hw, IXGBE_GORCL);
   8.233 +	IXGBE_READ_REG(hw, IXGBE_GORCH);
   8.234 +	IXGBE_READ_REG(hw, IXGBE_GOTCL);
   8.235 +	IXGBE_READ_REG(hw, IXGBE_GOTCH);
   8.236 +	for (i = 0; i < 8; i++)
   8.237 +		IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   8.238 +	IXGBE_READ_REG(hw, IXGBE_RUC);
   8.239 +	IXGBE_READ_REG(hw, IXGBE_RFC);
   8.240 +	IXGBE_READ_REG(hw, IXGBE_ROC);
   8.241 +	IXGBE_READ_REG(hw, IXGBE_RJC);
   8.242 +	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   8.243 +	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   8.244 +	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   8.245 +	IXGBE_READ_REG(hw, IXGBE_TORL);
   8.246 +	IXGBE_READ_REG(hw, IXGBE_TORH);
   8.247 +	IXGBE_READ_REG(hw, IXGBE_TPR);
   8.248 +	IXGBE_READ_REG(hw, IXGBE_TPT);
   8.249 +	IXGBE_READ_REG(hw, IXGBE_PTC64);
   8.250 +	IXGBE_READ_REG(hw, IXGBE_PTC127);
   8.251 +	IXGBE_READ_REG(hw, IXGBE_PTC255);
   8.252 +	IXGBE_READ_REG(hw, IXGBE_PTC511);
   8.253 +	IXGBE_READ_REG(hw, IXGBE_PTC1023);
   8.254 +	IXGBE_READ_REG(hw, IXGBE_PTC1522);
   8.255 +	IXGBE_READ_REG(hw, IXGBE_MPTC);
   8.256 +	IXGBE_READ_REG(hw, IXGBE_BPTC);
   8.257 +	for (i = 0; i < 16; i++) {
   8.258 +		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   8.259 +		IXGBE_READ_REG(hw, IXGBE_QBRC(i));
   8.260 +		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   8.261 +		IXGBE_READ_REG(hw, IXGBE_QBTC(i));
   8.262 +	}
   8.263 +
   8.264 +	return 0;
   8.265 +}
   8.266 +
   8.267 +/**
   8.268 + *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
   8.269 + *  @hw: pointer to hardware structure
   8.270 + *  @pba_num: stores the part number from the EEPROM
   8.271 + *
   8.272 + *  Reads the part number from the EEPROM.
   8.273 + **/
   8.274 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
   8.275 +{
   8.276 +	s32 ret_val;
   8.277 +	u16 data;
   8.278 +
   8.279 +	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
   8.280 +	if (ret_val) {
   8.281 +		hw_dbg(hw, "NVM Read Error\n");
   8.282 +		return ret_val;
   8.283 +	}
   8.284 +	*pba_num = (u32)(data << 16);
   8.285 +
   8.286 +	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
   8.287 +	if (ret_val) {
   8.288 +		hw_dbg(hw, "NVM Read Error\n");
   8.289 +		return ret_val;
   8.290 +	}
   8.291 +	*pba_num |= data;
   8.292 +
   8.293 +	return 0;
   8.294 +}
   8.295 +
   8.296 +/**
   8.297 + *  ixgbe_get_mac_addr_generic - Generic get MAC address
   8.298 + *  @hw: pointer to hardware structure
   8.299 + *  @mac_addr: Adapter MAC address
   8.300 + *
   8.301 + *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
   8.302 + *  A reset of the adapter must be performed prior to calling this function
   8.303 + *  in order for the MAC address to have been loaded from the EEPROM into RAR0
   8.304 + **/
   8.305 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
   8.306 +{
   8.307 +	u32 rar_high;
   8.308 +	u32 rar_low;
   8.309 +	u16 i;
   8.310 +
   8.311 +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
   8.312 +	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
   8.313 +
   8.314 +	for (i = 0; i < 4; i++)
   8.315 +		mac_addr[i] = (u8)(rar_low >> (i*8));
   8.316 +
   8.317 +	for (i = 0; i < 2; i++)
   8.318 +		mac_addr[i+4] = (u8)(rar_high >> (i*8));
   8.319 +
   8.320 +	return 0;
   8.321 +}
   8.322 +
   8.323 +/**
   8.324 + *  ixgbe_get_bus_info_generic - Generic set PCI bus info
   8.325 + *  @hw: pointer to hardware structure
   8.326 + *
   8.327 + *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
   8.328 + **/
   8.329 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
   8.330 +{
   8.331 +	struct ixgbe_mac_info *mac = &hw->mac;
   8.332 +	u16 link_status;
   8.333 +
   8.334 +	hw->bus.type = ixgbe_bus_type_pci_express;
   8.335 +
   8.336 +	/* Get the negotiated link width and speed from PCI config space */
   8.337 +	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
   8.338 +
   8.339 +	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
   8.340 +	case IXGBE_PCI_LINK_WIDTH_1:
   8.341 +		hw->bus.width = ixgbe_bus_width_pcie_x1;
   8.342 +		break;
   8.343 +	case IXGBE_PCI_LINK_WIDTH_2:
   8.344 +		hw->bus.width = ixgbe_bus_width_pcie_x2;
   8.345 +		break;
   8.346 +	case IXGBE_PCI_LINK_WIDTH_4:
   8.347 +		hw->bus.width = ixgbe_bus_width_pcie_x4;
   8.348 +		break;
   8.349 +	case IXGBE_PCI_LINK_WIDTH_8:
   8.350 +		hw->bus.width = ixgbe_bus_width_pcie_x8;
   8.351 +		break;
   8.352 +	default:
   8.353 +		hw->bus.width = ixgbe_bus_width_unknown;
   8.354 +		break;
   8.355 +	}
   8.356 +
   8.357 +	switch (link_status & IXGBE_PCI_LINK_SPEED) {
   8.358 +	case IXGBE_PCI_LINK_SPEED_2500:
   8.359 +		hw->bus.speed = ixgbe_bus_speed_2500;
   8.360 +		break;
   8.361 +	case IXGBE_PCI_LINK_SPEED_5000:
   8.362 +		hw->bus.speed = ixgbe_bus_speed_5000;
   8.363 +		break;
   8.364 +	default:
   8.365 +		hw->bus.speed = ixgbe_bus_speed_unknown;
   8.366 +		break;
   8.367 +	}
   8.368 +
   8.369 +	mac->ops.set_lan_id(hw);
   8.370 +
   8.371 +	return 0;
   8.372 +}
   8.373 +
   8.374 +/**
   8.375 + *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
   8.376 + *  @hw: pointer to the HW structure
   8.377 + *
   8.378 + *  Determines the LAN function id by reading memory-mapped registers
   8.379 + *  and swaps the port value if requested.
   8.380 + **/
   8.381 +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
   8.382 +{
   8.383 +	struct ixgbe_bus_info *bus = &hw->bus;
   8.384 +	u32 reg;
   8.385 +
   8.386 +	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
   8.387 +	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
   8.388 +
   8.389 +	/* check for a port swap */
   8.390 +	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
   8.391 +	if (reg & IXGBE_FACTPS_LFS)
   8.392 +		bus->func ^= 0x1;
   8.393 +}
   8.394 +
   8.395 +/**
   8.396 + *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
   8.397 + *  @hw: pointer to hardware structure
   8.398 + *
   8.399 + *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
   8.400 + *  disables transmit and receive units. The adapter_stopped flag is used by
   8.401 + *  the shared code and drivers to determine if the adapter is in a stopped
   8.402 + *  state and should not touch the hardware.
   8.403 + **/
   8.404 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
   8.405 +{
   8.406 +	u32 number_of_queues;
   8.407 +	u32 reg_val;
   8.408 +	u16 i;
   8.409 +
   8.410 +	/*
   8.411 +	 * Set the adapter_stopped flag so other driver functions stop touching
   8.412 +	 * the hardware
   8.413 +	 */
   8.414 +	hw->adapter_stopped = true;
   8.415 +
   8.416 +	/* Disable the receive unit */
   8.417 +	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   8.418 +	reg_val &= ~(IXGBE_RXCTRL_RXEN);
   8.419 +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
   8.420 +	IXGBE_WRITE_FLUSH(hw);
   8.421 +	msleep(2);
   8.422 +
   8.423 +	/* Clear interrupt mask to stop from interrupts being generated */
   8.424 +	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   8.425 +
   8.426 +	/* Clear any pending interrupts */
   8.427 +	IXGBE_READ_REG(hw, IXGBE_EICR);
   8.428 +
   8.429 +	/* Disable the transmit unit.  Each queue must be disabled. */
   8.430 +	number_of_queues = hw->mac.max_tx_queues;
   8.431 +	for (i = 0; i < number_of_queues; i++) {
   8.432 +		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   8.433 +		if (reg_val & IXGBE_TXDCTL_ENABLE) {
   8.434 +			reg_val &= ~IXGBE_TXDCTL_ENABLE;
   8.435 +			IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
   8.436 +		}
   8.437 +	}
   8.438 +
   8.439 +	/*
   8.440 +	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
   8.441 +	 * access and verify no pending requests
   8.442 +	 */
   8.443 +	if (ixgbe_disable_pcie_master(hw) != 0)
   8.444 +		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
   8.445 +
   8.446 +	return 0;
   8.447 +}
   8.448 +
   8.449 +/**
   8.450 + *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
   8.451 + *  @hw: pointer to hardware structure
   8.452 + *  @index: led number to turn on
   8.453 + **/
   8.454 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
   8.455 +{
   8.456 +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   8.457 +
   8.458 +	/* To turn on the LED, set mode to ON. */
   8.459 +	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   8.460 +	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
   8.461 +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   8.462 +	IXGBE_WRITE_FLUSH(hw);
   8.463 +
   8.464 +	return 0;
   8.465 +}
   8.466 +
   8.467 +/**
   8.468 + *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
   8.469 + *  @hw: pointer to hardware structure
   8.470 + *  @index: led number to turn off
   8.471 + **/
   8.472 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
   8.473 +{
   8.474 +	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
   8.475 +
   8.476 +	/* To turn off the LED, set mode to OFF. */
   8.477 +	led_reg &= ~IXGBE_LED_MODE_MASK(index);
   8.478 +	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
   8.479 +	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
   8.480 +	IXGBE_WRITE_FLUSH(hw);
   8.481 +
   8.482 +	return 0;
   8.483 +}
   8.484 +
   8.485 +/**
   8.486 + *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
   8.487 + *  @hw: pointer to hardware structure
   8.488 + *
   8.489 + *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
   8.490 + *  ixgbe_hw struct in order to set up EEPROM access.
   8.491 + **/
   8.492 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
   8.493 +{
   8.494 +	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
   8.495 +	u32 eec;
   8.496 +	u16 eeprom_size;
   8.497 +
   8.498 +	if (eeprom->type == ixgbe_eeprom_uninitialized) {
   8.499 +		eeprom->type = ixgbe_eeprom_none;
   8.500 +		/* Set default semaphore delay to 10ms which is a well
   8.501 +		 * tested value */
   8.502 +		eeprom->semaphore_delay = 10;
   8.503 +
   8.504 +		/*
   8.505 +		 * Check for EEPROM present first.
   8.506 +		 * If not present leave as none
   8.507 +		 */
   8.508 +		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.509 +		if (eec & IXGBE_EEC_PRES) {
   8.510 +			eeprom->type = ixgbe_eeprom_spi;
   8.511 +
   8.512 +			/*
   8.513 +			 * SPI EEPROM is assumed here.  This code would need to
   8.514 +			 * change if a future EEPROM is not SPI.
   8.515 +			 */
   8.516 +			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
   8.517 +			                    IXGBE_EEC_SIZE_SHIFT);
   8.518 +			eeprom->word_size = 1 << (eeprom_size +
   8.519 +			                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
   8.520 +		}
   8.521 +
   8.522 +		if (eec & IXGBE_EEC_ADDR_SIZE)
   8.523 +			eeprom->address_bits = 16;
   8.524 +		else
   8.525 +			eeprom->address_bits = 8;
   8.526 +		hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
   8.527 +		          "%d\n", eeprom->type, eeprom->word_size,
   8.528 +		          eeprom->address_bits);
   8.529 +	}
   8.530 +
   8.531 +	return 0;
   8.532 +}
   8.533 +
   8.534 +/**
   8.535 + *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
   8.536 + *  @hw: pointer to hardware structure
   8.537 + *  @offset: offset within the EEPROM to be written to
   8.538 + *  @data: 16 bit word to be written to the EEPROM
   8.539 + *
   8.540 + *  If ixgbe_eeprom_update_checksum is not called after this function, the
   8.541 + *  EEPROM will most likely contain an invalid checksum.
   8.542 + **/
   8.543 +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
   8.544 +{
   8.545 +	s32 status;
   8.546 +	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
   8.547 +
   8.548 +	hw->eeprom.ops.init_params(hw);
   8.549 +
   8.550 +	if (offset >= hw->eeprom.word_size) {
   8.551 +		status = IXGBE_ERR_EEPROM;
   8.552 +		goto out;
   8.553 +	}
   8.554 +
   8.555 +	/* Prepare the EEPROM for writing  */
   8.556 +	status = ixgbe_acquire_eeprom(hw);
   8.557 +
   8.558 +	if (status == 0) {
   8.559 +		if (ixgbe_ready_eeprom(hw) != 0) {
   8.560 +			ixgbe_release_eeprom(hw);
   8.561 +			status = IXGBE_ERR_EEPROM;
   8.562 +		}
   8.563 +	}
   8.564 +
   8.565 +	if (status == 0) {
   8.566 +		ixgbe_standby_eeprom(hw);
   8.567 +
   8.568 +		/*  Send the WRITE ENABLE command (8 bit opcode )  */
   8.569 +		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
   8.570 +		                            IXGBE_EEPROM_OPCODE_BITS);
   8.571 +
   8.572 +		ixgbe_standby_eeprom(hw);
   8.573 +
   8.574 +		/*
   8.575 +		 * Some SPI eeproms use the 8th address bit embedded in the
   8.576 +		 * opcode
   8.577 +		 */
   8.578 +		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
   8.579 +			write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
   8.580 +
   8.581 +		/* Send the Write command (8-bit opcode + addr) */
   8.582 +		ixgbe_shift_out_eeprom_bits(hw, write_opcode,
   8.583 +		                            IXGBE_EEPROM_OPCODE_BITS);
   8.584 +		ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
   8.585 +		                            hw->eeprom.address_bits);
   8.586 +
   8.587 +		/* Send the data */
   8.588 +		data = (data >> 8) | (data << 8);
   8.589 +		ixgbe_shift_out_eeprom_bits(hw, data, 16);
   8.590 +		ixgbe_standby_eeprom(hw);
   8.591 +
   8.592 +		msleep(hw->eeprom.semaphore_delay);
   8.593 +		/* Done with writing - release the EEPROM */
   8.594 +		ixgbe_release_eeprom(hw);
   8.595 +	}
   8.596 +
   8.597 +out:
   8.598 +	return status;
   8.599 +}
   8.600 +
   8.601 +/**
   8.602 + *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
   8.603 + *  @hw: pointer to hardware structure
   8.604 + *  @offset: offset within the EEPROM to be read
   8.605 + *  @data: read 16 bit value from EEPROM
   8.606 + *
   8.607 + *  Reads 16 bit value from EEPROM through bit-bang method
   8.608 + **/
   8.609 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
   8.610 +                                       u16 *data)
   8.611 +{
   8.612 +	s32 status;
   8.613 +	u16 word_in;
   8.614 +	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
   8.615 +
   8.616 +	hw->eeprom.ops.init_params(hw);
   8.617 +
   8.618 +	if (offset >= hw->eeprom.word_size) {
   8.619 +		status = IXGBE_ERR_EEPROM;
   8.620 +		goto out;
   8.621 +	}
   8.622 +
   8.623 +	/* Prepare the EEPROM for reading  */
   8.624 +	status = ixgbe_acquire_eeprom(hw);
   8.625 +
   8.626 +	if (status == 0) {
   8.627 +		if (ixgbe_ready_eeprom(hw) != 0) {
   8.628 +			ixgbe_release_eeprom(hw);
   8.629 +			status = IXGBE_ERR_EEPROM;
   8.630 +		}
   8.631 +	}
   8.632 +
   8.633 +	if (status == 0) {
   8.634 +		ixgbe_standby_eeprom(hw);
   8.635 +
   8.636 +		/*
   8.637 +		 * Some SPI eeproms use the 8th address bit embedded in the
   8.638 +		 * opcode
   8.639 +		 */
   8.640 +		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
   8.641 +			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
   8.642 +
   8.643 +		/* Send the READ command (opcode + addr) */
   8.644 +		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
   8.645 +		                            IXGBE_EEPROM_OPCODE_BITS);
   8.646 +		ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
   8.647 +		                            hw->eeprom.address_bits);
   8.648 +
   8.649 +		/* Read the data. */
   8.650 +		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
   8.651 +		*data = (word_in >> 8) | (word_in << 8);
   8.652 +
   8.653 +		/* End this read operation */
   8.654 +		ixgbe_release_eeprom(hw);
   8.655 +	}
   8.656 +
   8.657 +out:
   8.658 +	return status;
   8.659 +}
   8.660 +
   8.661 +/**
   8.662 + *  ixgbe_read_eeprom_generic - Read EEPROM word using EERD
   8.663 + *  @hw: pointer to hardware structure
   8.664 + *  @offset: offset of  word in the EEPROM to read
   8.665 + *  @data: word read from the EEPROM
   8.666 + *
   8.667 + *  Reads a 16 bit word from the EEPROM using the EERD register.
   8.668 + **/
   8.669 +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
   8.670 +{
   8.671 +	u32 eerd;
   8.672 +	s32 status;
   8.673 +
   8.674 +	hw->eeprom.ops.init_params(hw);
   8.675 +
   8.676 +	if (offset >= hw->eeprom.word_size) {
   8.677 +		status = IXGBE_ERR_EEPROM;
   8.678 +		goto out;
   8.679 +	}
   8.680 +
   8.681 +	eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
   8.682 +	       IXGBE_EEPROM_READ_REG_START;
   8.683 +
   8.684 +	IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
   8.685 +	status = ixgbe_poll_eeprom_eerd_done(hw);
   8.686 +
   8.687 +	if (status == 0)
   8.688 +		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
   8.689 +		         IXGBE_EEPROM_READ_REG_DATA);
   8.690 +	else
   8.691 +		hw_dbg(hw, "Eeprom read timed out\n");
   8.692 +
   8.693 +out:
   8.694 +	return status;
   8.695 +}
   8.696 +
   8.697 +/**
   8.698 + *  ixgbe_poll_eeprom_eerd_done - Poll EERD status
   8.699 + *  @hw: pointer to hardware structure
   8.700 + *
   8.701 + *  Polls the status bit (bit 1) of the EERD to determine when the read is done.
   8.702 + **/
   8.703 +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
   8.704 +{
   8.705 +	u32 i;
   8.706 +	u32 reg;
   8.707 +	s32 status = IXGBE_ERR_EEPROM;
   8.708 +
   8.709 +	for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
   8.710 +		reg = IXGBE_READ_REG(hw, IXGBE_EERD);
   8.711 +		if (reg & IXGBE_EEPROM_READ_REG_DONE) {
   8.712 +			status = 0;
   8.713 +			break;
   8.714 +		}
   8.715 +		udelay(5);
   8.716 +	}
   8.717 +	return status;
   8.718 +}
   8.719 +
   8.720 +/**
   8.721 + *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
   8.722 + *  @hw: pointer to hardware structure
   8.723 + *
   8.724 + *  Prepares EEPROM for access using bit-bang method. This function should
   8.725 + *  be called before issuing a command to the EEPROM.
   8.726 + **/
   8.727 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
   8.728 +{
   8.729 +	s32 status = 0;
   8.730 +	u32 eec;
   8.731 +	u32 i;
   8.732 +
   8.733 +	if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
   8.734 +		status = IXGBE_ERR_SWFW_SYNC;
   8.735 +
   8.736 +	if (status == 0) {
   8.737 +		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.738 +
   8.739 +		/* Request EEPROM Access */
   8.740 +		eec |= IXGBE_EEC_REQ;
   8.741 +		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.742 +
   8.743 +		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
   8.744 +			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.745 +			if (eec & IXGBE_EEC_GNT)
   8.746 +				break;
   8.747 +			udelay(5);
   8.748 +		}
   8.749 +
   8.750 +		/* Release if grant not acquired */
   8.751 +		if (!(eec & IXGBE_EEC_GNT)) {
   8.752 +			eec &= ~IXGBE_EEC_REQ;
   8.753 +			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.754 +			hw_dbg(hw, "Could not acquire EEPROM grant\n");
   8.755 +
   8.756 +			ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
   8.757 +			status = IXGBE_ERR_EEPROM;
   8.758 +		}
   8.759 +	}
   8.760 +
   8.761 +	/* Setup EEPROM for Read/Write */
   8.762 +	if (status == 0) {
   8.763 +		/* Clear CS and SK */
   8.764 +		eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
   8.765 +		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.766 +		IXGBE_WRITE_FLUSH(hw);
   8.767 +		udelay(1);
   8.768 +	}
   8.769 +	return status;
   8.770 +}
   8.771 +
   8.772 +/**
   8.773 + *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
   8.774 + *  @hw: pointer to hardware structure
   8.775 + *
   8.776 + *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
   8.777 + **/
   8.778 +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
   8.779 +{
   8.780 +	s32 status = IXGBE_ERR_EEPROM;
   8.781 +	u32 timeout;
   8.782 +	u32 i;
   8.783 +	u32 swsm;
   8.784 +
   8.785 +	/* Set timeout value based on size of EEPROM */
   8.786 +	timeout = hw->eeprom.word_size + 1;
   8.787 +
   8.788 +	/* Get SMBI software semaphore between device drivers first */
   8.789 +	for (i = 0; i < timeout; i++) {
   8.790 +		/*
   8.791 +		 * If the SMBI bit is 0 when we read it, then the bit will be
   8.792 +		 * set and we have the semaphore
   8.793 +		 */
   8.794 +		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
   8.795 +		if (!(swsm & IXGBE_SWSM_SMBI)) {
   8.796 +			status = 0;
   8.797 +			break;
   8.798 +		}
   8.799 +		msleep(1);
   8.800 +	}
   8.801 +
   8.802 +	/* Now get the semaphore between SW/FW through the SWESMBI bit */
   8.803 +	if (status == 0) {
   8.804 +		for (i = 0; i < timeout; i++) {
   8.805 +			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
   8.806 +
   8.807 +			/* Set the SW EEPROM semaphore bit to request access */
   8.808 +			swsm |= IXGBE_SWSM_SWESMBI;
   8.809 +			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
   8.810 +
   8.811 +			/*
   8.812 +			 * If we set the bit successfully then we got the
   8.813 +			 * semaphore.
   8.814 +			 */
   8.815 +			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
   8.816 +			if (swsm & IXGBE_SWSM_SWESMBI)
   8.817 +				break;
   8.818 +
   8.819 +			udelay(50);
   8.820 +		}
   8.821 +
   8.822 +		/*
   8.823 +		 * Release semaphores and return error if SW EEPROM semaphore
   8.824 +		 * was not granted because we don't have access to the EEPROM
   8.825 +		 */
   8.826 +		if (i >= timeout) {
   8.827 +			hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
   8.828 +			         "not granted.\n");
   8.829 +			ixgbe_release_eeprom_semaphore(hw);
   8.830 +			status = IXGBE_ERR_EEPROM;
   8.831 +		}
   8.832 +	}
   8.833 +
   8.834 +	return status;
   8.835 +}
   8.836 +
   8.837 +/**
   8.838 + *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
   8.839 + *  @hw: pointer to hardware structure
   8.840 + *
   8.841 + *  This function clears hardware semaphore bits.
   8.842 + **/
   8.843 +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
   8.844 +{
   8.845 +	u32 swsm;
   8.846 +
   8.847 +	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
   8.848 +
   8.849 +	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
   8.850 +	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
   8.851 +	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
   8.852 +	IXGBE_WRITE_FLUSH(hw);
   8.853 +}
   8.854 +
   8.855 +/**
   8.856 + *  ixgbe_ready_eeprom - Polls for EEPROM ready
   8.857 + *  @hw: pointer to hardware structure
   8.858 + **/
   8.859 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
   8.860 +{
   8.861 +	s32 status = 0;
   8.862 +	u16 i;
   8.863 +	u8 spi_stat_reg;
   8.864 +
   8.865 +	/*
   8.866 +	 * Read "Status Register" repeatedly until the LSB is cleared.  The
   8.867 +	 * EEPROM will signal that the command has been completed by clearing
   8.868 +	 * bit 0 of the internal status register.  If it's not cleared within
   8.869 +	 * 5 milliseconds, then error out.
   8.870 +	 */
   8.871 +	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
   8.872 +		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
   8.873 +		                            IXGBE_EEPROM_OPCODE_BITS);
   8.874 +		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
   8.875 +		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
   8.876 +			break;
   8.877 +
   8.878 +		udelay(5);
   8.879 +		ixgbe_standby_eeprom(hw);
   8.880 +	};
   8.881 +
   8.882 +	/*
   8.883 +	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
   8.884 +	 * devices (and only 0-5mSec on 5V devices)
   8.885 +	 */
   8.886 +	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
   8.887 +		hw_dbg(hw, "SPI EEPROM Status error\n");
   8.888 +		status = IXGBE_ERR_EEPROM;
   8.889 +	}
   8.890 +
   8.891 +	return status;
   8.892 +}
   8.893 +
   8.894 +/**
   8.895 + *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
   8.896 + *  @hw: pointer to hardware structure
   8.897 + **/
   8.898 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
   8.899 +{
   8.900 +	u32 eec;
   8.901 +
   8.902 +	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.903 +
   8.904 +	/* Toggle CS to flush commands */
   8.905 +	eec |= IXGBE_EEC_CS;
   8.906 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.907 +	IXGBE_WRITE_FLUSH(hw);
   8.908 +	udelay(1);
   8.909 +	eec &= ~IXGBE_EEC_CS;
   8.910 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.911 +	IXGBE_WRITE_FLUSH(hw);
   8.912 +	udelay(1);
   8.913 +}
   8.914 +
   8.915 +/**
   8.916 + *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
   8.917 + *  @hw: pointer to hardware structure
   8.918 + *  @data: data to send to the EEPROM
   8.919 + *  @count: number of bits to shift out
   8.920 + **/
   8.921 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
   8.922 +                                        u16 count)
   8.923 +{
   8.924 +	u32 eec;
   8.925 +	u32 mask;
   8.926 +	u32 i;
   8.927 +
   8.928 +	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.929 +
   8.930 +	/*
   8.931 +	 * Mask is used to shift "count" bits of "data" out to the EEPROM
   8.932 +	 * one bit at a time.  Determine the starting bit based on count
   8.933 +	 */
   8.934 +	mask = 0x01 << (count - 1);
   8.935 +
   8.936 +	for (i = 0; i < count; i++) {
   8.937 +		/*
   8.938 +		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
   8.939 +		 * "1", and then raising and then lowering the clock (the SK
   8.940 +		 * bit controls the clock input to the EEPROM).  A "0" is
   8.941 +		 * shifted out to the EEPROM by setting "DI" to "0" and then
   8.942 +		 * raising and then lowering the clock.
   8.943 +		 */
   8.944 +		if (data & mask)
   8.945 +			eec |= IXGBE_EEC_DI;
   8.946 +		else
   8.947 +			eec &= ~IXGBE_EEC_DI;
   8.948 +
   8.949 +		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.950 +		IXGBE_WRITE_FLUSH(hw);
   8.951 +
   8.952 +		udelay(1);
   8.953 +
   8.954 +		ixgbe_raise_eeprom_clk(hw, &eec);
   8.955 +		ixgbe_lower_eeprom_clk(hw, &eec);
   8.956 +
   8.957 +		/*
   8.958 +		 * Shift mask to signify next bit of data to shift in to the
   8.959 +		 * EEPROM
   8.960 +		 */
   8.961 +		mask = mask >> 1;
   8.962 +	};
   8.963 +
   8.964 +	/* We leave the "DI" bit set to "0" when we leave this routine. */
   8.965 +	eec &= ~IXGBE_EEC_DI;
   8.966 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
   8.967 +	IXGBE_WRITE_FLUSH(hw);
   8.968 +}
   8.969 +
   8.970 +/**
   8.971 + *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
   8.972 + *  @hw: pointer to hardware structure
   8.973 + **/
   8.974 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
   8.975 +{
   8.976 +	u32 eec;
   8.977 +	u32 i;
   8.978 +	u16 data = 0;
   8.979 +
   8.980 +	/*
   8.981 +	 * In order to read a register from the EEPROM, we need to shift
   8.982 +	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
   8.983 +	 * the clock input to the EEPROM (setting the SK bit), and then reading
   8.984 +	 * the value of the "DO" bit.  During this "shifting in" process the
   8.985 +	 * "DI" bit should always be clear.
   8.986 +	 */
   8.987 +	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.988 +
   8.989 +	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
   8.990 +
   8.991 +	for (i = 0; i < count; i++) {
   8.992 +		data = data << 1;
   8.993 +		ixgbe_raise_eeprom_clk(hw, &eec);
   8.994 +
   8.995 +		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
   8.996 +
   8.997 +		eec &= ~(IXGBE_EEC_DI);
   8.998 +		if (eec & IXGBE_EEC_DO)
   8.999 +			data |= 1;
  8.1000 +
  8.1001 +		ixgbe_lower_eeprom_clk(hw, &eec);
  8.1002 +	}
  8.1003 +
  8.1004 +	return data;
  8.1005 +}
  8.1006 +
  8.1007 +/**
  8.1008 + *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
  8.1009 + *  @hw: pointer to hardware structure
  8.1010 + *  @eec: EEC register's current value
  8.1011 + **/
  8.1012 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
  8.1013 +{
  8.1014 +	/*
  8.1015 +	 * Raise the clock input to the EEPROM
  8.1016 +	 * (setting the SK bit), then delay
  8.1017 +	 */
  8.1018 +	*eec = *eec | IXGBE_EEC_SK;
  8.1019 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
  8.1020 +	IXGBE_WRITE_FLUSH(hw);
  8.1021 +	udelay(1);
  8.1022 +}
  8.1023 +
  8.1024 +/**
  8.1025 + *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
  8.1026 + *  @hw: pointer to hardware structure
  8.1027 + *  @eecd: EECD's current value
  8.1028 + **/
  8.1029 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
  8.1030 +{
  8.1031 +	/*
  8.1032 +	 * Lower the clock input to the EEPROM (clearing the SK bit), then
  8.1033 +	 * delay
  8.1034 +	 */
  8.1035 +	*eec = *eec & ~IXGBE_EEC_SK;
  8.1036 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
  8.1037 +	IXGBE_WRITE_FLUSH(hw);
  8.1038 +	udelay(1);
  8.1039 +}
  8.1040 +
  8.1041 +/**
  8.1042 + *  ixgbe_release_eeprom - Release EEPROM, release semaphores
  8.1043 + *  @hw: pointer to hardware structure
  8.1044 + **/
  8.1045 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
  8.1046 +{
  8.1047 +	u32 eec;
  8.1048 +
  8.1049 +	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
  8.1050 +
  8.1051 +	eec |= IXGBE_EEC_CS;  /* Pull CS high */
  8.1052 +	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
  8.1053 +
  8.1054 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
  8.1055 +	IXGBE_WRITE_FLUSH(hw);
  8.1056 +
  8.1057 +	udelay(1);
  8.1058 +
  8.1059 +	/* Stop requesting EEPROM access */
  8.1060 +	eec &= ~IXGBE_EEC_REQ;
  8.1061 +	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
  8.1062 +
  8.1063 +	ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
  8.1064 +}
  8.1065 +
  8.1066 +/**
  8.1067 + *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
  8.1068 + *  @hw: pointer to hardware structure
  8.1069 + **/
  8.1070 +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
  8.1071 +{
  8.1072 +	u16 i;
  8.1073 +	u16 j;
  8.1074 +	u16 checksum = 0;
  8.1075 +	u16 length = 0;
  8.1076 +	u16 pointer = 0;
  8.1077 +	u16 word = 0;
  8.1078 +
  8.1079 +	/* Include 0x0-0x3F in the checksum */
  8.1080 +	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
  8.1081 +		if (hw->eeprom.ops.read(hw, i, &word) != 0) {
  8.1082 +			hw_dbg(hw, "EEPROM read failed\n");
  8.1083 +			break;
  8.1084 +		}
  8.1085 +		checksum += word;
  8.1086 +	}
  8.1087 +
  8.1088 +	/* Include all data from pointers except for the fw pointer */
  8.1089 +	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
  8.1090 +		hw->eeprom.ops.read(hw, i, &pointer);
  8.1091 +
  8.1092 +		/* Make sure the pointer seems valid */
  8.1093 +		if (pointer != 0xFFFF && pointer != 0) {
  8.1094 +			hw->eeprom.ops.read(hw, pointer, &length);
  8.1095 +
  8.1096 +			if (length != 0xFFFF && length != 0) {
  8.1097 +				for (j = pointer+1; j <= pointer+length; j++) {
  8.1098 +					hw->eeprom.ops.read(hw, j, &word);
  8.1099 +					checksum += word;
  8.1100 +				}
  8.1101 +			}
  8.1102 +		}
  8.1103 +	}
  8.1104 +
  8.1105 +	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
  8.1106 +
  8.1107 +	return checksum;
  8.1108 +}
  8.1109 +
  8.1110 +/**
  8.1111 + *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
  8.1112 + *  @hw: pointer to hardware structure
  8.1113 + *  @checksum_val: calculated checksum
  8.1114 + *
  8.1115 + *  Performs checksum calculation and validates the EEPROM checksum.  If the
  8.1116 + *  caller does not need checksum_val, the value can be NULL.
  8.1117 + **/
  8.1118 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
  8.1119 +                                           u16 *checksum_val)
  8.1120 +{
  8.1121 +	s32 status;
  8.1122 +	u16 checksum;
  8.1123 +	u16 read_checksum = 0;
  8.1124 +
  8.1125 +	/*
  8.1126 +	 * Read the first word from the EEPROM. If this times out or fails, do
  8.1127 +	 * not continue or we could be in for a very long wait while every
  8.1128 +	 * EEPROM read fails
  8.1129 +	 */
  8.1130 +	status = hw->eeprom.ops.read(hw, 0, &checksum);
  8.1131 +
  8.1132 +	if (status == 0) {
  8.1133 +		checksum = ixgbe_calc_eeprom_checksum(hw);
  8.1134 +
  8.1135 +		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
  8.1136 +
  8.1137 +		/*
  8.1138 +		 * Verify read checksum from EEPROM is the same as
  8.1139 +		 * calculated checksum
  8.1140 +		 */
  8.1141 +		if (read_checksum != checksum)
  8.1142 +			status = IXGBE_ERR_EEPROM_CHECKSUM;
  8.1143 +
  8.1144 +		/* If the user cares, return the calculated checksum */
  8.1145 +		if (checksum_val)
  8.1146 +			*checksum_val = checksum;
  8.1147 +	} else {
  8.1148 +		hw_dbg(hw, "EEPROM read failed\n");
  8.1149 +	}
  8.1150 +
  8.1151 +	return status;
  8.1152 +}
  8.1153 +
  8.1154 +/**
  8.1155 + *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
  8.1156 + *  @hw: pointer to hardware structure
  8.1157 + **/
  8.1158 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
  8.1159 +{
  8.1160 +	s32 status;
  8.1161 +	u16 checksum;
  8.1162 +
  8.1163 +	/*
  8.1164 +	 * Read the first word from the EEPROM. If this times out or fails, do
  8.1165 +	 * not continue or we could be in for a very long wait while every
  8.1166 +	 * EEPROM read fails
  8.1167 +	 */
  8.1168 +	status = hw->eeprom.ops.read(hw, 0, &checksum);
  8.1169 +
  8.1170 +	if (status == 0) {
  8.1171 +		checksum = ixgbe_calc_eeprom_checksum(hw);
  8.1172 +		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
  8.1173 +		                              checksum);
  8.1174 +	} else {
  8.1175 +		hw_dbg(hw, "EEPROM read failed\n");
  8.1176 +	}
  8.1177 +
  8.1178 +	return status;
  8.1179 +}
  8.1180 +
  8.1181 +/**
  8.1182 + *  ixgbe_validate_mac_addr - Validate MAC address
  8.1183 + *  @mac_addr: pointer to MAC address.
  8.1184 + *
  8.1185 + *  Tests a MAC address to ensure it is a valid Individual Address
  8.1186 + **/
  8.1187 +s32 ixgbe_validate_mac_addr(u8 *mac_addr)
  8.1188 +{
  8.1189 +	s32 status = 0;
  8.1190 +
  8.1191 +	/* Make sure it is not a multicast address */
  8.1192 +	if (IXGBE_IS_MULTICAST(mac_addr)) {
  8.1193 +		hw_dbg(hw, "MAC address is multicast\n");
  8.1194 +		status = IXGBE_ERR_INVALID_MAC_ADDR;
  8.1195 +	/* Not a broadcast address */
  8.1196 +	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
  8.1197 +		hw_dbg(hw, "MAC address is broadcast\n");
  8.1198 +		status = IXGBE_ERR_INVALID_MAC_ADDR;
  8.1199 +	/* Reject the zero address */
  8.1200 +	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
  8.1201 +	           mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
  8.1202 +		hw_dbg(hw, "MAC address is all zeros\n");
  8.1203 +		status = IXGBE_ERR_INVALID_MAC_ADDR;
  8.1204 +	}
  8.1205 +	return status;
  8.1206 +}
  8.1207 +
  8.1208 +/**
  8.1209 + *  ixgbe_set_rar_generic - Set Rx address register
  8.1210 + *  @hw: pointer to hardware structure
  8.1211 + *  @index: Receive address register to write
  8.1212 + *  @addr: Address to put into receive address register
  8.1213 + *  @vmdq: VMDq "set" or "pool" index
  8.1214 + *  @enable_addr: set flag that address is active
  8.1215 + *
  8.1216 + *  Puts an ethernet address into a receive address register.
  8.1217 + **/
  8.1218 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
  8.1219 +                          u32 enable_addr)
  8.1220 +{
  8.1221 +	u32 rar_low, rar_high;
  8.1222 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1223 +
  8.1224 +	/* setup VMDq pool selection before this RAR gets enabled */
  8.1225 +	hw->mac.ops.set_vmdq(hw, index, vmdq);
  8.1226 +
  8.1227 +	/* Make sure we are using a valid rar index range */
  8.1228 +	if (index < rar_entries) {
  8.1229 +		/*
  8.1230 +		 * HW expects these in little endian so we reverse the byte
  8.1231 +		 * order from network order (big endian) to little endian
  8.1232 +		 */
  8.1233 +		rar_low = ((u32)addr[0] |
  8.1234 +		           ((u32)addr[1] << 8) |
  8.1235 +		           ((u32)addr[2] << 16) |
  8.1236 +		           ((u32)addr[3] << 24));
  8.1237 +		/*
  8.1238 +		 * Some parts put the VMDq setting in the extra RAH bits,
  8.1239 +		 * so save everything except the lower 16 bits that hold part
  8.1240 +		 * of the address and the address valid bit.
  8.1241 +		 */
  8.1242 +		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
  8.1243 +		rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
  8.1244 +		rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
  8.1245 +
  8.1246 +		if (enable_addr != 0)
  8.1247 +			rar_high |= IXGBE_RAH_AV;
  8.1248 +
  8.1249 +		IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
  8.1250 +		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
  8.1251 +	} else {
  8.1252 +		hw_dbg(hw, "RAR index %d is out of range.\n", index);
  8.1253 +	}
  8.1254 +
  8.1255 +	return 0;
  8.1256 +}
  8.1257 +
  8.1258 +/**
  8.1259 + *  ixgbe_clear_rar_generic - Remove Rx address register
  8.1260 + *  @hw: pointer to hardware structure
  8.1261 + *  @index: Receive address register to write
  8.1262 + *
  8.1263 + *  Clears an ethernet address from a receive address register.
  8.1264 + **/
  8.1265 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
  8.1266 +{
  8.1267 +	u32 rar_high;
  8.1268 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1269 +
  8.1270 +	/* Make sure we are using a valid rar index range */
  8.1271 +	if (index < rar_entries) {
  8.1272 +		/*
  8.1273 +		 * Some parts put the VMDq setting in the extra RAH bits,
  8.1274 +		 * so save everything except the lower 16 bits that hold part
  8.1275 +		 * of the address and the address valid bit.
  8.1276 +		 */
  8.1277 +		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
  8.1278 +		rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
  8.1279 +
  8.1280 +		IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
  8.1281 +		IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
  8.1282 +	} else {
  8.1283 +		hw_dbg(hw, "RAR index %d is out of range.\n", index);
  8.1284 +	}
  8.1285 +
  8.1286 +	/* clear VMDq pool/queue selection for this RAR */
  8.1287 +	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
  8.1288 +
  8.1289 +	return 0;
  8.1290 +}
  8.1291 +
  8.1292 +/**
  8.1293 + *  ixgbe_enable_rar - Enable Rx address register
  8.1294 + *  @hw: pointer to hardware structure
  8.1295 + *  @index: index into the RAR table
  8.1296 + *
  8.1297 + *  Enables the select receive address register.
  8.1298 + **/
  8.1299 +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
  8.1300 +{
  8.1301 +	u32 rar_high;
  8.1302 +
  8.1303 +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
  8.1304 +	rar_high |= IXGBE_RAH_AV;
  8.1305 +	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
  8.1306 +}
  8.1307 +
  8.1308 +/**
  8.1309 + *  ixgbe_disable_rar - Disable Rx address register
  8.1310 + *  @hw: pointer to hardware structure
  8.1311 + *  @index: index into the RAR table
  8.1312 + *
  8.1313 + *  Disables the select receive address register.
  8.1314 + **/
  8.1315 +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
  8.1316 +{
  8.1317 +	u32 rar_high;
  8.1318 +
  8.1319 +	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
  8.1320 +	rar_high &= (~IXGBE_RAH_AV);
  8.1321 +	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
  8.1322 +}
  8.1323 +
  8.1324 +/**
  8.1325 + *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
  8.1326 + *  @hw: pointer to hardware structure
  8.1327 + *
  8.1328 + *  Places the MAC address in receive address register 0 and clears the rest
  8.1329 + *  of the receive address registers. Clears the multicast table. Assumes
  8.1330 + *  the receiver is in reset when the routine is called.
  8.1331 + **/
  8.1332 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
  8.1333 +{
  8.1334 +	u32 i;
  8.1335 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1336 +
  8.1337 +	/*
  8.1338 +	 * If the current mac address is valid, assume it is a software override
  8.1339 +	 * to the permanent address.
  8.1340 +	 * Otherwise, use the permanent address from the eeprom.
  8.1341 +	 */
  8.1342 +	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
  8.1343 +	    IXGBE_ERR_INVALID_MAC_ADDR) {
  8.1344 +		/* Get the MAC address from the RAR0 for later reference */
  8.1345 +		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  8.1346 +
  8.1347 +		hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
  8.1348 +		          hw->mac.addr[0], hw->mac.addr[1],
  8.1349 +		          hw->mac.addr[2]);
  8.1350 +		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
  8.1351 +		          hw->mac.addr[4], hw->mac.addr[5]);
  8.1352 +	} else {
  8.1353 +		/* Setup the receive address. */
  8.1354 +		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
  8.1355 +		hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
  8.1356 +		          hw->mac.addr[0], hw->mac.addr[1],
  8.1357 +		          hw->mac.addr[2]);
  8.1358 +		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
  8.1359 +		          hw->mac.addr[4], hw->mac.addr[5]);
  8.1360 +
  8.1361 +		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
  8.1362 +	}
  8.1363 +	hw->addr_ctrl.overflow_promisc = 0;
  8.1364 +
  8.1365 +	hw->addr_ctrl.rar_used_count = 1;
  8.1366 +
  8.1367 +	/* Zero out the other receive addresses. */
  8.1368 +	hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
  8.1369 +	for (i = 1; i < rar_entries; i++) {
  8.1370 +		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
  8.1371 +		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
  8.1372 +	}
  8.1373 +
  8.1374 +	/* Clear the MTA */
  8.1375 +	hw->addr_ctrl.mc_addr_in_rar_count = 0;
  8.1376 +	hw->addr_ctrl.mta_in_use = 0;
  8.1377 +	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
  8.1378 +
  8.1379 +	hw_dbg(hw, " Clearing MTA\n");
  8.1380 +	for (i = 0; i < hw->mac.mcft_size; i++)
  8.1381 +		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
  8.1382 +
  8.1383 +	ixgbe_init_uta_tables(hw);
  8.1384 +
  8.1385 +	return 0;
  8.1386 +}
  8.1387 +
  8.1388 +/**
  8.1389 + *  ixgbe_add_uc_addr - Adds a secondary unicast address.
  8.1390 + *  @hw: pointer to hardware structure
  8.1391 + *  @addr: new address
  8.1392 + *
  8.1393 + *  Adds it to unused receive address register or goes into promiscuous mode.
  8.1394 + **/
  8.1395 +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
  8.1396 +{
  8.1397 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1398 +	u32 rar;
  8.1399 +
  8.1400 +	hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
  8.1401 +	          addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
  8.1402 +
  8.1403 +	/*
  8.1404 +	 * Place this address in the RAR if there is room,
  8.1405 +	 * else put the controller into promiscuous mode
  8.1406 +	 */
  8.1407 +	if (hw->addr_ctrl.rar_used_count < rar_entries) {
  8.1408 +		rar = hw->addr_ctrl.rar_used_count -
  8.1409 +		      hw->addr_ctrl.mc_addr_in_rar_count;
  8.1410 +		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
  8.1411 +		hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
  8.1412 +		hw->addr_ctrl.rar_used_count++;
  8.1413 +	} else {
  8.1414 +		hw->addr_ctrl.overflow_promisc++;
  8.1415 +	}
  8.1416 +
  8.1417 +	hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
  8.1418 +}
  8.1419 +
  8.1420 +/**
  8.1421 + *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
  8.1422 + *  @hw: pointer to hardware structure
  8.1423 + *  @addr_list: the list of new addresses
  8.1424 + *  @addr_count: number of addresses
  8.1425 + *  @next: iterator function to walk the address list
  8.1426 + *
  8.1427 + *  The given list replaces any existing list.  Clears the secondary addrs from
  8.1428 + *  receive address registers.  Uses unused receive address registers for the
  8.1429 + *  first secondary addresses, and falls back to promiscuous mode as needed.
  8.1430 + *
  8.1431 + *  Drivers using secondary unicast addresses must set user_set_promisc when
  8.1432 + *  manually putting the device into promiscuous mode.
  8.1433 + **/
  8.1434 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
  8.1435 +                                      u32 addr_count, ixgbe_mc_addr_itr next)
  8.1436 +{
  8.1437 +	u8 *addr;
  8.1438 +	u32 i;
  8.1439 +	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
  8.1440 +	u32 uc_addr_in_use;
  8.1441 +	u32 fctrl;
  8.1442 +	u32 vmdq;
  8.1443 +
  8.1444 +	/*
  8.1445 +	 * Clear accounting of old secondary address list,
  8.1446 +	 * don't count RAR[0]
  8.1447 +	 */
  8.1448 +	uc_addr_in_use = hw->addr_ctrl.rar_used_count -
  8.1449 +	                 hw->addr_ctrl.mc_addr_in_rar_count - 1;
  8.1450 +	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
  8.1451 +	hw->addr_ctrl.overflow_promisc = 0;
  8.1452 +
  8.1453 +	/* Zero out the other receive addresses */
  8.1454 +	hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
  8.1455 +	for (i = 1; i <= uc_addr_in_use; i++) {
  8.1456 +		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
  8.1457 +		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
  8.1458 +	}
  8.1459 +
  8.1460 +	/* Add the new addresses */
  8.1461 +	for (i = 0; i < addr_count; i++) {
  8.1462 +		hw_dbg(hw, " Adding the secondary addresses:\n");
  8.1463 +		addr = next(hw, &addr_list, &vmdq);
  8.1464 +		ixgbe_add_uc_addr(hw, addr, vmdq);
  8.1465 +	}
  8.1466 +
  8.1467 +	if (hw->addr_ctrl.overflow_promisc) {
  8.1468 +		/* enable promisc if not already in overflow or set by user */
  8.1469 +		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
  8.1470 +			hw_dbg(hw, " Entering address overflow promisc mode\n");
  8.1471 +			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  8.1472 +			fctrl |= IXGBE_FCTRL_UPE;
  8.1473 +			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  8.1474 +		}
  8.1475 +	} else {
  8.1476 +		/* only disable if set by overflow, not by user */
  8.1477 +		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
  8.1478 +			hw_dbg(hw, " Leaving address overflow promisc mode\n");
  8.1479 +			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  8.1480 +			fctrl &= ~IXGBE_FCTRL_UPE;
  8.1481 +			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  8.1482 +		}
  8.1483 +	}
  8.1484 +
  8.1485 +	hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
  8.1486 +	return 0;
  8.1487 +}
  8.1488 +
  8.1489 +/**
  8.1490 + *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
  8.1491 + *  @hw: pointer to hardware structure
  8.1492 + *  @mc_addr: the multicast address
  8.1493 + *
  8.1494 + *  Extracts the 12 bits, from a multicast address, to determine which
  8.1495 + *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
  8.1496 + *  incoming rx multicast addresses, to determine the bit-vector to check in
  8.1497 + *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
  8.1498 + *  by the MO field of the MCSTCTRL. The MO field is set during initialization
  8.1499 + *  to mc_filter_type.
  8.1500 + **/
  8.1501 +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  8.1502 +{
  8.1503 +	u32 vector = 0;
  8.1504 +
  8.1505 +	switch (hw->mac.mc_filter_type) {
  8.1506 +	case 0:   /* use bits [47:36] of the address */
  8.1507 +		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
  8.1508 +		break;
  8.1509 +	case 1:   /* use bits [46:35] of the address */
  8.1510 +		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
  8.1511 +		break;
  8.1512 +	case 2:   /* use bits [45:34] of the address */
  8.1513 +		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
  8.1514 +		break;
  8.1515 +	case 3:   /* use bits [43:32] of the address */
  8.1516 +		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
  8.1517 +		break;
  8.1518 +	default:  /* Invalid mc_filter_type */
  8.1519 +		hw_dbg(hw, "MC filter type param set incorrectly\n");
  8.1520 +		break;
  8.1521 +	}
  8.1522 +
  8.1523 +	/* vector can only be 12-bits or boundary will be exceeded */
  8.1524 +	vector &= 0xFFF;
  8.1525 +	return vector;
  8.1526 +}
  8.1527 +
  8.1528 +/**
  8.1529 + *  ixgbe_set_mta - Set bit-vector in multicast table
  8.1530 + *  @hw: pointer to hardware structure
  8.1531 + *  @hash_value: Multicast address hash value
  8.1532 + *
  8.1533 + *  Sets the bit-vector in the multicast table.
  8.1534 + **/
  8.1535 +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
  8.1536 +{
  8.1537 +	u32 vector;
  8.1538 +	u32 vector_bit;
  8.1539 +	u32 vector_reg;
  8.1540 +	u32 mta_reg;
  8.1541 +
  8.1542 +	hw->addr_ctrl.mta_in_use++;
  8.1543 +
  8.1544 +	vector = ixgbe_mta_vector(hw, mc_addr);
  8.1545 +	hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
  8.1546 +
  8.1547 +	/*
  8.1548 +	 * The MTA is a register array of 128 32-bit registers. It is treated
  8.1549 +	 * like an array of 4096 bits.  We want to set bit
  8.1550 +	 * BitArray[vector_value]. So we figure out what register the bit is
  8.1551 +	 * in, read it, OR in the new bit, then write back the new value.  The
  8.1552 +	 * register is determined by the upper 7 bits of the vector value and
  8.1553 +	 * the bit within that register are determined by the lower 5 bits of
  8.1554 +	 * the value.
  8.1555 +	 */
  8.1556 +	vector_reg = (vector >> 5) & 0x7F;
  8.1557 +	vector_bit = vector & 0x1F;
  8.1558 +	mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
  8.1559 +	mta_reg |= (1 << vector_bit);
  8.1560 +	IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
  8.1561 +}
  8.1562 +
  8.1563 +/**
  8.1564 + *  ixgbe_add_mc_addr - Adds a multicast address.
  8.1565 + *  @hw: pointer to hardware structure
  8.1566 + *  @mc_addr: new multicast address
  8.1567 + *
  8.1568 + *  Adds it to unused receive address register or to the multicast table.
  8.1569 + **/
  8.1570 +void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
  8.1571 +{
  8.1572 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1573 +	u32 rar;
  8.1574 +
  8.1575 +	hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
  8.1576 +	          mc_addr[0], mc_addr[1], mc_addr[2],
  8.1577 +	          mc_addr[3], mc_addr[4], mc_addr[5]);
  8.1578 +
  8.1579 +	/*
  8.1580 +	 * Place this multicast address in the RAR if there is room,
  8.1581 +	 * else put it in the MTA
  8.1582 +	 */
  8.1583 +	if (hw->addr_ctrl.rar_used_count < rar_entries) {
  8.1584 +		/* use RAR from the end up for multicast */
  8.1585 +		rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
  8.1586 +		hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
  8.1587 +		hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
  8.1588 +		hw->addr_ctrl.rar_used_count++;
  8.1589 +		hw->addr_ctrl.mc_addr_in_rar_count++;
  8.1590 +	} else {
  8.1591 +		ixgbe_set_mta(hw, mc_addr);
  8.1592 +	}
  8.1593 +
  8.1594 +	hw_dbg(hw, "ixgbe_add_mc_addr Complete\n");
  8.1595 +}
  8.1596 +
  8.1597 +/**
  8.1598 + *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
  8.1599 + *  @hw: pointer to hardware structure
  8.1600 + *  @mc_addr_list: the list of new multicast addresses
  8.1601 + *  @mc_addr_count: number of addresses
  8.1602 + *  @next: iterator function to walk the multicast address list
  8.1603 + *
  8.1604 + *  The given list replaces any existing list. Clears the MC addrs from receive
  8.1605 + *  address registers and the multicast table. Uses unused receive address
  8.1606 + *  registers for the first multicast addresses, and hashes the rest into the
  8.1607 + *  multicast table.
  8.1608 + **/
  8.1609 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
  8.1610 +                                      u32 mc_addr_count, ixgbe_mc_addr_itr next)
  8.1611 +{
  8.1612 +	u32 i;
  8.1613 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1614 +	u32 vmdq;
  8.1615 +
  8.1616 +	/*
  8.1617 +	 * Set the new number of MC addresses that we are being requested to
  8.1618 +	 * use.
  8.1619 +	 */
  8.1620 +	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
  8.1621 +	hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
  8.1622 +	hw->addr_ctrl.mc_addr_in_rar_count = 0;
  8.1623 +	hw->addr_ctrl.mta_in_use = 0;
  8.1624 +
  8.1625 +	/* Zero out the other receive addresses. */
  8.1626 +	hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
  8.1627 +	          rar_entries - 1);
  8.1628 +	for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
  8.1629 +		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
  8.1630 +		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
  8.1631 +	}
  8.1632 +
  8.1633 +	/* Clear the MTA */
  8.1634 +	hw_dbg(hw, " Clearing MTA\n");
  8.1635 +	for (i = 0; i < hw->mac.mcft_size; i++)
  8.1636 +		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
  8.1637 +
  8.1638 +	/* Add the new addresses */
  8.1639 +	for (i = 0; i < mc_addr_count; i++) {
  8.1640 +		hw_dbg(hw, " Adding the multicast addresses:\n");
  8.1641 +		ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
  8.1642 +	}
  8.1643 +
  8.1644 +	/* Enable mta */
  8.1645 +	if (hw->addr_ctrl.mta_in_use > 0)
  8.1646 +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
  8.1647 +		                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
  8.1648 +
  8.1649 +	hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
  8.1650 +	return 0;
  8.1651 +}
  8.1652 +
  8.1653 +/**
  8.1654 + *  ixgbe_enable_mc_generic - Enable multicast address in RAR
  8.1655 + *  @hw: pointer to hardware structure
  8.1656 + *
  8.1657 + *  Enables multicast address in RAR and the use of the multicast hash table.
  8.1658 + **/
  8.1659 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
  8.1660 +{
  8.1661 +	u32 i;
  8.1662 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1663 +	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
  8.1664 +
  8.1665 +	if (a->mc_addr_in_rar_count > 0)
  8.1666 +		for (i = (rar_entries - a->mc_addr_in_rar_count);
  8.1667 +		     i < rar_entries; i++)
  8.1668 +			ixgbe_enable_rar(hw, i);
  8.1669 +
  8.1670 +	if (a->mta_in_use > 0)
  8.1671 +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
  8.1672 +		                hw->mac.mc_filter_type);
  8.1673 +
  8.1674 +	return 0;
  8.1675 +}
  8.1676 +
  8.1677 +/**
  8.1678 + *  ixgbe_disable_mc_generic - Disable multicast address in RAR
  8.1679 + *  @hw: pointer to hardware structure
  8.1680 + *
  8.1681 + *  Disables multicast address in RAR and the use of the multicast hash table.
  8.1682 + **/
  8.1683 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
  8.1684 +{
  8.1685 +	u32 i;
  8.1686 +	u32 rar_entries = hw->mac.num_rar_entries;
  8.1687 +	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
  8.1688 +
  8.1689 +	if (a->mc_addr_in_rar_count > 0)
  8.1690 +		for (i = (rar_entries - a->mc_addr_in_rar_count);
  8.1691 +		     i < rar_entries; i++)
  8.1692 +			ixgbe_disable_rar(hw, i);
  8.1693 +
  8.1694 +	if (a->mta_in_use > 0)
  8.1695 +		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
  8.1696 +
  8.1697 +	return 0;
  8.1698 +}
  8.1699 +
  8.1700 +
  8.1701 +
  8.1702 +
  8.1703 +/**
  8.1704 + *  ixgbe_disable_pcie_master - Disable PCI-express master access
  8.1705 + *  @hw: pointer to hardware structure
  8.1706 + *
  8.1707 + *  Disables PCI-Express master access and verifies there are no pending
  8.1708 + *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
  8.1709 + *  bit hasn't caused the master requests to be disabled, else 0
  8.1710 + *  is returned signifying master requests disabled.
  8.1711 + **/
  8.1712 +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
  8.1713 +{
  8.1714 +	u32 i;
  8.1715 +	u32 reg_val;
  8.1716 +	u32 number_of_queues;
  8.1717 +	s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
  8.1718 +
  8.1719 +	/* Disable the receive unit by stopping each queue */
  8.1720 +	number_of_queues = hw->mac.max_rx_queues;
  8.1721 +	for (i = 0; i < number_of_queues; i++) {
  8.1722 +		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  8.1723 +		if (reg_val & IXGBE_RXDCTL_ENABLE) {
  8.1724 +			reg_val &= ~IXGBE_RXDCTL_ENABLE;
  8.1725 +			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
  8.1726 +		}
  8.1727 +	}
  8.1728 +
  8.1729 +	reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
  8.1730 +	reg_val |= IXGBE_CTRL_GIO_DIS;
  8.1731 +	IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
  8.1732 +
  8.1733 +	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
  8.1734 +		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
  8.1735 +			status = 0;
  8.1736 +			break;
  8.1737 +		}
  8.1738 +		udelay(100);
  8.1739 +	}
  8.1740 +
  8.1741 +	return status;
  8.1742 +}
  8.1743 +
  8.1744 +
  8.1745 +/**
  8.1746 + *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
  8.1747 + *  @hw: pointer to hardware structure
  8.1748 + *  @mask: Mask to specify which semaphore to acquire
  8.1749 + *
  8.1750 + *  Acquires the SWFW semaphore thought the GSSR register for the specified
  8.1751 + *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  8.1752 + **/
  8.1753 +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
  8.1754 +{
  8.1755 +	u32 gssr;
  8.1756 +	u32 swmask = mask;
  8.1757 +	u32 fwmask = mask << 5;
  8.1758 +	s32 timeout = 200;
  8.1759 +
  8.1760 +	while (timeout) {
  8.1761 +		if (ixgbe_get_eeprom_semaphore(hw))
  8.1762 +			return -IXGBE_ERR_SWFW_SYNC;
  8.1763 +
  8.1764 +		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
  8.1765 +		if (!(gssr & (fwmask | swmask)))
  8.1766 +			break;
  8.1767 +
  8.1768 +		/*
  8.1769 +		 * Firmware currently using resource (fwmask) or other software
  8.1770 +		 * thread currently using resource (swmask)
  8.1771 +		 */
  8.1772 +		ixgbe_release_eeprom_semaphore(hw);
  8.1773 +		msleep(5);
  8.1774 +		timeout--;
  8.1775 +	}
  8.1776 +
  8.1777 +	if (!timeout) {
  8.1778 +		hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
  8.1779 +		return -IXGBE_ERR_SWFW_SYNC;
  8.1780 +	}
  8.1781 +
  8.1782 +	gssr |= swmask;
  8.1783 +	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
  8.1784 +
  8.1785 +	ixgbe_release_eeprom_semaphore(hw);
  8.1786 +	return 0;
  8.1787 +}
  8.1788 +
  8.1789 +/**
  8.1790 + *  ixgbe_release_swfw_sync - Release SWFW semaphore
  8.1791 + *  @hw: pointer to hardware structure
  8.1792 + *  @mask: Mask to specify which semaphore to release
  8.1793 + *
  8.1794 + *  Releases the SWFW semaphore thought the GSSR register for the specified
  8.1795 + *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  8.1796 + **/
  8.1797 +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
  8.1798 +{
  8.1799 +	u32 gssr;
  8.1800 +	u32 swmask = mask;
  8.1801 +
  8.1802 +	ixgbe_get_eeprom_semaphore(hw);
  8.1803 +
  8.1804 +	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
  8.1805 +	gssr &= ~swmask;
  8.1806 +	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
  8.1807 +
  8.1808 +	ixgbe_release_eeprom_semaphore(hw);
  8.1809 +}
  8.1810 +
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/drivers/net/ixgbe/ixgbe_common.h	Fri Jan 30 10:52:47 2009 +0000
     9.3 @@ -0,0 +1,77 @@
     9.4 +/*******************************************************************************
     9.5 +
     9.6 +  Intel 10 Gigabit PCI Express Linux driver
     9.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
     9.8 +
     9.9 +  This program is free software; you can redistribute it and/or modify it
    9.10 +  under the terms and conditions of the GNU General Public License,
    9.11 +  version 2, as published by the Free Software Foundation.
    9.12 +
    9.13 +  This program is distributed in the hope it will be useful, but WITHOUT
    9.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    9.16 +  more details.
    9.17 +
    9.18 +  You should have received a copy of the GNU General Public License along with
    9.19 +  this program; if not, write to the Free Software Foundation, Inc.,
    9.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 +
    9.22 +  The full GNU General Public License is included in this distribution in
    9.23 +  the file called "COPYING".
    9.24 +
    9.25 +  Contact Information:
    9.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
    9.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
    9.28 +
    9.29 +*******************************************************************************/
    9.30 +
    9.31 +#ifndef _IXGBE_COMMON_H_
    9.32 +#define _IXGBE_COMMON_H_
    9.33 +
    9.34 +#include "ixgbe_type.h"
    9.35 +
    9.36 +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
    9.37 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
    9.38 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
    9.39 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
    9.40 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
    9.41 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
    9.42 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
    9.43 +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
    9.44 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
    9.45 +
    9.46 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
    9.47 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
    9.48 +
    9.49 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
    9.50 +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
    9.51 +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
    9.52 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
    9.53 +                                       u16 *data);
    9.54 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
    9.55 +                                           u16 *checksum_val);
    9.56 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
    9.57 +
    9.58 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
    9.59 +                          u32 enable_addr);
    9.60 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
    9.61 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
    9.62 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
    9.63 +                                      u32 mc_addr_count,
    9.64 +                                      ixgbe_mc_addr_itr func);
    9.65 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
    9.66 +                                      u32 addr_count, ixgbe_mc_addr_itr func);
    9.67 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
    9.68 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
    9.69 +
    9.70 +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
    9.71 +s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
    9.72 +
    9.73 +s32 ixgbe_validate_mac_addr(u8 *mac_addr);
    9.74 +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
    9.75 +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
    9.76 +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
    9.77 +
    9.78 +s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
    9.79 +s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
    9.80 +#endif /* IXGBE_COMMON */
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/drivers/net/ixgbe/ixgbe_dcb.c	Fri Jan 30 10:52:47 2009 +0000
    10.3 @@ -0,0 +1,333 @@
    10.4 +/*******************************************************************************
    10.5 +
    10.6 +  Intel 10 Gigabit PCI Express Linux driver
    10.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    10.8 +
    10.9 +  This program is free software; you can redistribute it and/or modify it
   10.10 +  under the terms and conditions of the GNU General Public License,
   10.11 +  version 2, as published by the Free Software Foundation.
   10.12 +
   10.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   10.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   10.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   10.16 +  more details.
   10.17 +
   10.18 +  You should have received a copy of the GNU General Public License along with
   10.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   10.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   10.21 +
   10.22 +  The full GNU General Public License is included in this distribution in
   10.23 +  the file called "COPYING".
   10.24 +
   10.25 +  Contact Information:
   10.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   10.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   10.28 +
   10.29 +*******************************************************************************/
   10.30 +
   10.31 +
   10.32 +#include "ixgbe_type.h"
   10.33 +#include "ixgbe_dcb.h"
   10.34 +#include "ixgbe_dcb_82598.h"
   10.35 +
   10.36 +/**
   10.37 + * ixgbe_dcb_config - Struct containing DCB settings.
   10.38 + * @dcb_config: Pointer to DCB config structure
   10.39 + *
   10.40 + * This function checks DCB rules for DCB settings.
   10.41 + * The following rules are checked:
   10.42 + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
   10.43 + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
   10.44 + *    Group must total 100.
   10.45 + * 3. A Traffic Class should not be set to both Link Strict Priority
   10.46 + *    and Group Strict Priority.
   10.47 + * 4. Link strict Bandwidth Groups can only have link strict traffic classes
   10.48 + *    with zero bandwidth.
   10.49 + */
   10.50 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
   10.51 +{
   10.52 +	struct tc_bw_alloc *p;
   10.53 +	s32 ret_val = 0;
   10.54 +	u8 i, j, bw = 0, bw_id;
   10.55 +	u8 bw_sum[2][MAX_BW_GROUP];
   10.56 +	bool link_strict[2][MAX_BW_GROUP];
   10.57 +
   10.58 +	memset(bw_sum, 0, sizeof(bw_sum));
   10.59 +	memset(link_strict, 0, sizeof(link_strict));
   10.60 +
   10.61 +	/* First Tx, then Rx */
   10.62 +	for (i = 0; i < 2; i++) {
   10.63 +		/* Check each traffic class for rule violation */
   10.64 +		for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
   10.65 +			p = &dcb_config->tc_config[j].path[i];
   10.66 +
   10.67 +			bw = p->bwg_percent;
   10.68 +			bw_id = p->bwg_id;
   10.69 +
   10.70 +			if (bw_id >= MAX_BW_GROUP) {
   10.71 +				ret_val = DCB_ERR_CONFIG;
   10.72 +				goto err_config;
   10.73 +			}
   10.74 +			if (p->prio_type == prio_link) {
   10.75 +				link_strict[i][bw_id] = true;
   10.76 +				/* Link strict should have zero bandwidth */
   10.77 +				if (bw) {
   10.78 +					ret_val = DCB_ERR_LS_BW_NONZERO;
   10.79 +					goto err_config;
   10.80 +				}
   10.81 +			} else if (!bw) {
   10.82 +				/*
   10.83 +				 * Traffic classes without link strict
   10.84 +				 * should have non-zero bandwidth.
   10.85 +				 */
   10.86 +				ret_val = DCB_ERR_TC_BW_ZERO;
   10.87 +				goto err_config;
   10.88 +			}
   10.89 +			bw_sum[i][bw_id] += bw;
   10.90 +		}
   10.91 +
   10.92 +		bw = 0;
   10.93 +
   10.94 +		/* Check each bandwidth group for rule violation */
   10.95 +		for (j = 0; j < MAX_BW_GROUP; j++) {
   10.96 +			bw += dcb_config->bw_percentage[i][j];
   10.97 +			/*
   10.98 +			 * Sum of bandwidth percentages of all traffic classes
   10.99 +			 * within a Bandwidth Group must total 100 except for
  10.100 +			 * link strict group (zero bandwidth).
  10.101 +			 */
  10.102 +			if (link_strict[i][j]) {
  10.103 +				if (bw_sum[i][j]) {
  10.104 +					/*
  10.105 +					 * Link strict group should have zero
  10.106 +					 * bandwidth.
  10.107 +					 */
  10.108 +					ret_val = DCB_ERR_LS_BWG_NONZERO;
  10.109 +					goto err_config;
  10.110 +				}
  10.111 +			} else if (bw_sum[i][j] != BW_PERCENT &&
  10.112 +			           bw_sum[i][j] != 0) {
  10.113 +				ret_val = DCB_ERR_TC_BW;
  10.114 +				goto err_config;
  10.115 +			}
  10.116 +		}
  10.117 +
  10.118 +		if (bw != BW_PERCENT) {
  10.119 +			ret_val = DCB_ERR_BW_GROUP;
  10.120 +			goto err_config;
  10.121 +		}
  10.122 +	}
  10.123 +
  10.124 +	return DCB_SUCCESS;
  10.125 +
  10.126 +err_config:
  10.127 +	hw_dbg(hw, "DCB error code %d while checking %s settings.\n",
  10.128 +	          ret_val, (j == DCB_TX_CONFIG) ? "Tx" : "Rx");
  10.129 +
  10.130 +	return ret_val;
  10.131 +}
  10.132 +
  10.133 +/**
  10.134 + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
  10.135 + * @ixgbe_dcb_config: Struct containing DCB settings.
  10.136 + * @direction: Configuring either Tx or Rx.
  10.137 + *
  10.138 + * This function calculates the credits allocated to each traffic class.
  10.139 + * It should be called only after the rules are checked by
  10.140 + * ixgbe_dcb_check_config().
  10.141 + */
  10.142 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
  10.143 +                                   u8 direction)
  10.144 +{
  10.145 +	struct tc_bw_alloc *p;
  10.146 +	s32 ret_val = 0;
  10.147 +	/* Initialization values default for Tx settings */
  10.148 +	u32 credit_refill       = 0;
  10.149 +	u32 credit_max          = 0;
  10.150 +	u16 link_percentage     = 0;
  10.151 +	u8  bw_percent          = 0;
  10.152 +	u8  i;
  10.153 +
  10.154 +	if (dcb_config == NULL) {
  10.155 +		ret_val = DCB_ERR_CONFIG;
  10.156 +		goto out;
  10.157 +	}
  10.158 +
  10.159 +	/* Find out the link percentage for each TC first */
  10.160 +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  10.161 +		p = &dcb_config->tc_config[i].path[direction];
  10.162 +		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
  10.163 +
  10.164 +		link_percentage = p->bwg_percent;
  10.165 +		/* Must be careful of integer division for very small nums */
  10.166 +		link_percentage = (link_percentage * bw_percent) / 100;
  10.167 +		if (p->bwg_percent > 0 && link_percentage == 0)
  10.168 +			link_percentage = 1;
  10.169 +
  10.170 +		/* Save link_percentage for reference */
  10.171 +		p->link_percent = (u8)link_percentage;
  10.172 +
  10.173 +		/* Calculate credit refill and save it */
  10.174 +		credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
  10.175 +		p->data_credits_refill = (u16)credit_refill;
  10.176 +
  10.177 +		/* Calculate maximum credit for the TC */
  10.178 +		credit_max = (link_percentage * MAX_CREDIT) / 100;
  10.179 +
  10.180 +		/*
  10.181 +		 * Adjustment based on rule checking, if the percentage
  10.182 +		 * of a TC is too small, the maximum credit may not be
  10.183 +		 * enough to send out a jumbo frame in data plane arbitration.
  10.184 +		 */
  10.185 +		if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
  10.186 +			credit_max = MINIMUM_CREDIT_FOR_JUMBO;
  10.187 +
  10.188 +		if (direction == DCB_TX_CONFIG) {
  10.189 +			/*
  10.190 +			 * Adjustment based on rule checking, if the
  10.191 +			 * percentage of a TC is too small, the maximum
  10.192 +			 * credit may not be enough to send out a TSO
  10.193 +			 * packet in descriptor plane arbitration.
  10.194 +			 */
  10.195 +			if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO))
  10.196 +				credit_max = MINIMUM_CREDIT_FOR_TSO;
  10.197 +
  10.198 +			dcb_config->tc_config[i].desc_credits_max =
  10.199 +			   (u16)credit_max;
  10.200 +		}
  10.201 +
  10.202 +		p->data_credits_max = (u16)credit_max;
  10.203 +	}
  10.204 +
  10.205 +out:
  10.206 +	return ret_val;
  10.207 +}
  10.208 +
  10.209 +/**
  10.210 + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
  10.211 + * @hw: pointer to hardware structure
  10.212 + * @stats: pointer to statistics structure
  10.213 + * @tc_count:  Number of elements in bwg_array.
  10.214 + *
  10.215 + * This function returns the status data for each of the Traffic Classes in use.
  10.216 + */
  10.217 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
  10.218 +                           u8 tc_count)
  10.219 +{
  10.220 +	s32 ret = 0;
  10.221 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.222 +		ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
  10.223 +	return ret;
  10.224 +}
  10.225 +
  10.226 +/**
  10.227 + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
  10.228 + * @hw: pointer to hardware structure
  10.229 + * @stats: pointer to statistics structure
  10.230 + * @tc_count:  Number of elements in bwg_array.
  10.231 + *
  10.232 + * This function returns the CBFC status data for each of the Traffic Classes.
  10.233 + */
  10.234 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
  10.235 +                            u8 tc_count)
  10.236 +{
  10.237 +	s32 ret = 0;
  10.238 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.239 +		ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
  10.240 +	return ret;
  10.241 +}
  10.242 +
  10.243 +/**
  10.244 + * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
  10.245 + * @hw: pointer to hardware structure
  10.246 + * @dcb_config: pointer to ixgbe_dcb_config structure
  10.247 + *
  10.248 + * Configure Rx Data Arbiter and credits for each traffic class.
  10.249 + */
  10.250 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
  10.251 +                                struct ixgbe_dcb_config *dcb_config)
  10.252 +{
  10.253 +	s32 ret = 0;
  10.254 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.255 +		ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
  10.256 +	return ret;
  10.257 +}
  10.258 +
  10.259 +/**
  10.260 + * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
  10.261 + * @hw: pointer to hardware structure
  10.262 + * @dcb_config: pointer to ixgbe_dcb_config structure
  10.263 + *
  10.264 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
  10.265 + */
  10.266 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
  10.267 +                                     struct ixgbe_dcb_config *dcb_config)
  10.268 +{
  10.269 +	s32 ret = 0;
  10.270 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.271 +		ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
  10.272 +	return ret;
  10.273 +}
  10.274 +
  10.275 +/**
  10.276 + * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
  10.277 + * @hw: pointer to hardware structure
  10.278 + * @dcb_config: pointer to ixgbe_dcb_config structure
  10.279 + *
  10.280 + * Configure Tx Data Arbiter and credits for each traffic class.
  10.281 + */
  10.282 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
  10.283 +                                     struct ixgbe_dcb_config *dcb_config)
  10.284 +{
  10.285 +	s32 ret = 0;
  10.286 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.287 +		ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
  10.288 +	return ret;
  10.289 +}
  10.290 +
  10.291 +/**
  10.292 + * ixgbe_dcb_config_pfc - Config priority flow control
  10.293 + * @hw: pointer to hardware structure
  10.294 + * @dcb_config: pointer to ixgbe_dcb_config structure
  10.295 + *
  10.296 + * Configure Priority Flow Control for each traffic class.
  10.297 + */
  10.298 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
  10.299 +                         struct ixgbe_dcb_config *dcb_config)
  10.300 +{
  10.301 +	s32 ret = 0;
  10.302 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.303 +		ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
  10.304 +	return ret;
  10.305 +}
  10.306 +
  10.307 +/**
  10.308 + * ixgbe_dcb_config_tc_stats - Config traffic class statistics
  10.309 + * @hw: pointer to hardware structure
  10.310 + *
  10.311 + * Configure queue statistics registers, all queues belonging to same traffic
  10.312 + * class uses a single set of queue statistics counters.
  10.313 + */
  10.314 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
  10.315 +{
  10.316 +	s32 ret = 0;
  10.317 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.318 +		ret = ixgbe_dcb_config_tc_stats_82598(hw);
  10.319 +	return ret;
  10.320 +}
  10.321 +
  10.322 +/**
  10.323 + * ixgbe_dcb_hw_config - Config and enable DCB
  10.324 + * @hw: pointer to hardware structure
  10.325 + * @dcb_config: pointer to ixgbe_dcb_config structure
  10.326 + *
  10.327 + * Configure dcb settings and enable dcb mode.
  10.328 + */
  10.329 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
  10.330 +                        struct ixgbe_dcb_config *dcb_config)
  10.331 +{
  10.332 +	s32 ret = 0;
  10.333 +	if (hw->mac.type == ixgbe_mac_82598EB)
  10.334 +		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
  10.335 +	return ret;
  10.336 +}
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/drivers/net/ixgbe/ixgbe_dcb.h	Fri Jan 30 10:52:47 2009 +0000
    11.3 @@ -0,0 +1,167 @@
    11.4 +/*******************************************************************************
    11.5 +
    11.6 +  Intel 10 Gigabit PCI Express Linux driver
    11.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    11.8 +
    11.9 +  This program is free software; you can redistribute it and/or modify it
   11.10 +  under the terms and conditions of the GNU General Public License,
   11.11 +  version 2, as published by the Free Software Foundation.
   11.12 +
   11.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   11.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   11.16 +  more details.
   11.17 +
   11.18 +  You should have received a copy of the GNU General Public License along with
   11.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   11.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   11.21 +
   11.22 +  The full GNU General Public License is included in this distribution in
   11.23 +  the file called "COPYING".
   11.24 +
   11.25 +  Contact Information:
   11.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   11.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   11.28 +
   11.29 +*******************************************************************************/
   11.30 +
   11.31 +#ifndef _DCB_CONFIG_H_
   11.32 +#define _DCB_CONFIG_H_
   11.33 +
   11.34 +#include "ixgbe_type.h"
   11.35 +
   11.36 +/* DCB data structures */
   11.37 +
   11.38 +#define IXGBE_MAX_PACKET_BUFFERS 8
   11.39 +#define MAX_USER_PRIORITY        8
   11.40 +#define MAX_TRAFFIC_CLASS        8
   11.41 +#define MAX_BW_GROUP             8
   11.42 +#define BW_PERCENT               100
   11.43 +
   11.44 +#define DCB_TX_CONFIG            0
   11.45 +#define DCB_RX_CONFIG            1
   11.46 +
   11.47 +/* DCB error Codes */
   11.48 +#define DCB_SUCCESS              0
   11.49 +#define DCB_ERR_CONFIG           -1
   11.50 +#define DCB_ERR_PARAM            -2
   11.51 +
   11.52 +/* Transmit and receive Errors */
   11.53 +/* Error in bandwidth group allocation */
   11.54 +#define DCB_ERR_BW_GROUP        -3
   11.55 +/* Error in traffic class bandwidth allocation */
   11.56 +#define DCB_ERR_TC_BW           -4
   11.57 +/* Traffic class has both link strict and group strict enabled */
   11.58 +#define DCB_ERR_LS_GS           -5
   11.59 +/* Link strict traffic class has non zero bandwidth */
   11.60 +#define DCB_ERR_LS_BW_NONZERO   -6
   11.61 +/* Link strict bandwidth group has non zero bandwidth */
   11.62 +#define DCB_ERR_LS_BWG_NONZERO  -7
   11.63 +/*  Traffic class has zero bandwidth */
   11.64 +#define DCB_ERR_TC_BW_ZERO      -8
   11.65 +
   11.66 +#define DCB_NOT_IMPLEMENTED      0x7FFFFFFF
   11.67 +
   11.68 +struct dcb_pfc_tc_debug {
   11.69 +	u8  tc;
   11.70 +	u8  pause_status;
   11.71 +	u64 pause_quanta;
   11.72 +};
   11.73 +
   11.74 +enum strict_prio_type {
   11.75 +	prio_none = 0,
   11.76 +	prio_group,
   11.77 +	prio_link
   11.78 +};
   11.79 +
   11.80 +/* Traffic class bandwidth allocation per direction */
   11.81 +struct tc_bw_alloc {
   11.82 +	u8 bwg_id;                /* Bandwidth Group (BWG) ID */
   11.83 +	u8 bwg_percent;           /* % of BWG's bandwidth */
   11.84 +	u8 link_percent;          /* % of link bandwidth */
   11.85 +	u8 up_to_tc_bitmap;       /* User Priority to Traffic Class mapping */
   11.86 +	u16 data_credits_refill;  /* Credit refill amount in 64B granularity */
   11.87 +	u16 data_credits_max;     /* Max credits for a configured packet buffer
   11.88 +	                           * in 64B granularity.*/
   11.89 +	enum strict_prio_type prio_type; /* Link or Group Strict Priority */
   11.90 +};
   11.91 +
   11.92 +enum dcb_pfc_type {
   11.93 +	pfc_disabled = 0,
   11.94 +	pfc_enabled_full,
   11.95 +	pfc_enabled_tx,
   11.96 +	pfc_enabled_rx
   11.97 +};
   11.98 +
   11.99 +/* Traffic class configuration */
  11.100 +struct tc_configuration {
  11.101 +	struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
  11.102 +	enum dcb_pfc_type  dcb_pfc; /* Class based flow control setting */
  11.103 +
  11.104 +	u16 desc_credits_max; /* For Tx Descriptor arbitration */
  11.105 +	u8 tc; /* Traffic class (TC) */
  11.106 +};
  11.107 +
  11.108 +enum dcb_rx_pba_cfg {
  11.109 +	pba_equal,     /* PBA[0-7] each use 64KB FIFO */
  11.110 +	pba_80_48      /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
  11.111 +};
  11.112 +
  11.113 +
  11.114 +struct ixgbe_dcb_config {
  11.115 +	struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
  11.116 +	u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
  11.117 +	bool pfc_mode_enable;
  11.118 +	bool  round_robin_enable;
  11.119 +
  11.120 +	enum dcb_rx_pba_cfg rx_pba_cfg;
  11.121 +
  11.122 +	u32  dcb_cfg_version; /* Not used...OS-specific? */
  11.123 +	u32  link_speed; /* For bandwidth allocation validation purpose */
  11.124 +};
  11.125 +
  11.126 +/* DCB driver APIs */
  11.127 +
  11.128 +/* DCB rule checking function.*/
  11.129 +s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
  11.130 +
  11.131 +/* DCB credits calculation */
  11.132 +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *config,
  11.133 +                                   u8 direction);
  11.134 +
  11.135 +/* DCB PFC functions */
  11.136 +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
  11.137 +                         struct ixgbe_dcb_config *dcb_config);
  11.138 +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
  11.139 +                            u8 tc_count);
  11.140 +
  11.141 +/* DCB traffic class stats */
  11.142 +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
  11.143 +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
  11.144 +                           u8 tc_count);
  11.145 +
  11.146 +/* DCB config arbiters */
  11.147 +s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
  11.148 +                                     struct ixgbe_dcb_config *dcb_config);
  11.149 +s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
  11.150 +                                     struct ixgbe_dcb_config *dcb_config);
  11.151 +s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
  11.152 +                                struct ixgbe_dcb_config *dcb_config);
  11.153 +
  11.154 +/* DCB hw initialization */
  11.155 +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *config);
  11.156 +
  11.157 +
  11.158 +/* DCB definitions for credit calculation */
  11.159 +#define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */
  11.160 +#define MINIMUM_CREDIT_REFILL   5    /* 5*64B = 320B */
  11.161 +#define MINIMUM_CREDIT_FOR_JUMBO 145  /* 145 = UpperBound((9*1024+54)/64B)
  11.162 +                                       * for 9KB jumbo frame */
  11.163 +#define DCB_MAX_TSO_SIZE        32*1024 /* MAX TSO packet size supported
  11.164 +                                         * in DCB mode */
  11.165 +#define MINIMUM_CREDIT_FOR_TSO  (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO
  11.166 +                                                           * packet */
  11.167 +#define MAX_CREDIT              4095 /* Maximum credit supported:
  11.168 +                                      * 256KB * 1204 / 64B */
  11.169 +
  11.170 +#endif /* _DCB_CONFIG_H */
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c	Fri Jan 30 10:52:47 2009 +0000
    12.3 @@ -0,0 +1,413 @@
    12.4 +/*******************************************************************************
    12.5 +
    12.6 +  Intel 10 Gigabit PCI Express Linux driver
    12.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    12.8 +
    12.9 +  This program is free software; you can redistribute it and/or modify it
   12.10 +  under the terms and conditions of the GNU General Public License,
   12.11 +  version 2, as published by the Free Software Foundation.
   12.12 +
   12.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   12.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   12.16 +  more details.
   12.17 +
   12.18 +  You should have received a copy of the GNU General Public License along with
   12.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   12.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   12.21 +
   12.22 +  The full GNU General Public License is included in this distribution in
   12.23 +  the file called "COPYING".
   12.24 +
   12.25 +  Contact Information:
   12.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   12.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   12.28 +
   12.29 +*******************************************************************************/
   12.30 +
   12.31 +
   12.32 +#include "ixgbe_type.h"
   12.33 +#include "ixgbe_dcb.h"
   12.34 +#include "ixgbe_dcb_82598.h"
   12.35 +
   12.36 +/**
   12.37 + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
   12.38 + * @hw: pointer to hardware structure
   12.39 + * @stats: pointer to statistics structure
   12.40 + * @tc_count:  Number of elements in bwg_array.
   12.41 + *
   12.42 + * This function returns the status data for each of the Traffic Classes in use.
   12.43 + */
   12.44 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
   12.45 +                                 struct ixgbe_hw_stats *stats,
   12.46 +                                 u8 tc_count)
   12.47 +{
   12.48 +	int tc;
   12.49 +
   12.50 +	if (tc_count > MAX_TRAFFIC_CLASS)
   12.51 +		return DCB_ERR_PARAM;
   12.52 +	/* Statistics pertaining to each traffic class */
   12.53 +	for (tc = 0; tc < tc_count; tc++) {
   12.54 +		/* Transmitted Packets */
   12.55 +		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
   12.56 +		/* Transmitted Bytes */
   12.57 +		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
   12.58 +		/* Received Packets */
   12.59 +		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
   12.60 +		/* Received Bytes */
   12.61 +		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
   12.62 +
   12.63 +#if 0
   12.64 +		/* Can we get rid of these??  Consequently, getting rid
   12.65 +		 * of the tc_stats structure.
   12.66 +		 */
   12.67 +		tc_stats_array[up]->in_overflow_discards = 0;
   12.68 +		tc_stats_array[up]->out_overflow_discards = 0;
   12.69 +#endif
   12.70 +	}
   12.71 +
   12.72 +	return 0;
   12.73 +}
   12.74 +
   12.75 +/**
   12.76 + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
   12.77 + * @hw: pointer to hardware structure
   12.78 + * @stats: pointer to statistics structure
   12.79 + * @tc_count:  Number of elements in bwg_array.
   12.80 + *
   12.81 + * This function returns the CBFC status data for each of the Traffic Classes.
   12.82 + */
   12.83 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
   12.84 +                                  struct ixgbe_hw_stats *stats,
   12.85 +                                  u8 tc_count)
   12.86 +{
   12.87 +	int tc;
   12.88 +
   12.89 +	if (tc_count > MAX_TRAFFIC_CLASS)
   12.90 +		return DCB_ERR_PARAM;
   12.91 +	for (tc = 0; tc < tc_count; tc++) {
   12.92 +		/* Priority XOFF Transmitted */
   12.93 +		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
   12.94 +		/* Priority XOFF Received */
   12.95 +		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
   12.96 +	}
   12.97 +
   12.98 +	return 0;
   12.99 +}
  12.100 +
  12.101 +/**
  12.102 + * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
  12.103 + * @hw: pointer to hardware structure
  12.104 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.105 + *
  12.106 + * Configure packet buffers for DCB mode.
  12.107 + */
  12.108 +s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
  12.109 +                                          struct ixgbe_dcb_config *dcb_config)
  12.110 +{
  12.111 +	s32 ret_val = 0;
  12.112 +	u32 value = IXGBE_RXPBSIZE_64KB;
  12.113 +	u8  i = 0;
  12.114 +
  12.115 +	/* Setup Rx packet buffer sizes */
  12.116 +	switch (dcb_config->rx_pba_cfg) {
  12.117 +	case pba_80_48:
  12.118 +		/* Setup the first four at 80KB */
  12.119 +		value = IXGBE_RXPBSIZE_80KB;
  12.120 +		for (; i < 4; i++)
  12.121 +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
  12.122 +		/* Setup the last four at 48KB...don't re-init i */
  12.123 +		value = IXGBE_RXPBSIZE_48KB;
  12.124 +		/* Fall Through */
  12.125 +	case pba_equal:
  12.126 +	default:
  12.127 +		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
  12.128 +			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
  12.129 +
  12.130 +		/* Setup Tx packet buffer sizes */
  12.131 +		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  12.132 +			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
  12.133 +			                IXGBE_TXPBSIZE_40KB);
  12.134 +		}
  12.135 +		break;
  12.136 +	}
  12.137 +
  12.138 +	return ret_val;
  12.139 +}
  12.140 +
  12.141 +/**
  12.142 + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
  12.143 + * @hw: pointer to hardware structure
  12.144 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.145 + *
  12.146 + * Configure Rx Data Arbiter and credits for each traffic class.
  12.147 + */
  12.148 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
  12.149 +                                      struct ixgbe_dcb_config *dcb_config)
  12.150 +{
  12.151 +	struct tc_bw_alloc    *p;
  12.152 +	u32    reg           = 0;
  12.153 +	u32    credit_refill = 0;
  12.154 +	u32    credit_max    = 0;
  12.155 +	u8     i             = 0;
  12.156 +
  12.157 +	reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
  12.158 +	IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
  12.159 +
  12.160 +	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
  12.161 +	/* Enable Arbiter */
  12.162 +	reg &= ~IXGBE_RMCS_ARBDIS;
  12.163 +	/* Enable Receive Recycle within the BWG */
  12.164 +	reg |= IXGBE_RMCS_RRM;
  12.165 +	/* Enable Deficit Fixed Priority arbitration*/
  12.166 +	reg |= IXGBE_RMCS_DFP;
  12.167 +
  12.168 +	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
  12.169 +
  12.170 +	/* Configure traffic class credits and priority */
  12.171 +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  12.172 +		p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
  12.173 +		credit_refill = p->data_credits_refill;
  12.174 +		credit_max    = p->data_credits_max;
  12.175 +
  12.176 +		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
  12.177 +
  12.178 +		if (p->prio_type == prio_link)
  12.179 +			reg |= IXGBE_RT2CR_LSP;
  12.180 +
  12.181 +		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
  12.182 +	}
  12.183 +
  12.184 +	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  12.185 +	reg |= IXGBE_RDRXCTL_RDMTS_1_2;
  12.186 +	reg |= IXGBE_RDRXCTL_MPBEN;
  12.187 +	reg |= IXGBE_RDRXCTL_MCEN;
  12.188 +	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
  12.189 +
  12.190 +	reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  12.191 +	/* Make sure there is enough descriptors before arbitration */
  12.192 +	reg &= ~IXGBE_RXCTRL_DMBYPS;
  12.193 +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
  12.194 +
  12.195 +	return 0;
  12.196 +}
  12.197 +
  12.198 +/**
  12.199 + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
  12.200 + * @hw: pointer to hardware structure
  12.201 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.202 + *
  12.203 + * Configure Tx Descriptor Arbiter and credits for each traffic class.
  12.204 + */
  12.205 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
  12.206 +                                           struct ixgbe_dcb_config *dcb_config)
  12.207 +{
  12.208 +	struct tc_bw_alloc *p;
  12.209 +	u32    reg, max_credits;
  12.210 +	u8     i;
  12.211 +
  12.212 +	reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  12.213 +
  12.214 +	/* Enable arbiter */
  12.215 +	reg &= ~IXGBE_DPMCS_ARBDIS;
  12.216 +	if (!(dcb_config->round_robin_enable)) {
  12.217 +		/* Enable DFP and Recycle mode */
  12.218 +		reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
  12.219 +	}
  12.220 +	reg |= IXGBE_DPMCS_TSOEF;
  12.221 +	/* Configure Max TSO packet size 34KB including payload and headers */
  12.222 +	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
  12.223 +
  12.224 +	IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
  12.225 +
  12.226 +	/* Configure traffic class credits and priority */
  12.227 +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  12.228 +		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
  12.229 +		max_credits = dcb_config->tc_config[i].desc_credits_max;
  12.230 +		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
  12.231 +		reg |= p->data_credits_refill;
  12.232 +		reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
  12.233 +
  12.234 +		if (p->prio_type == prio_group)
  12.235 +			reg |= IXGBE_TDTQ2TCCR_GSP;
  12.236 +
  12.237 +		if (p->prio_type == prio_link)
  12.238 +			reg |= IXGBE_TDTQ2TCCR_LSP;
  12.239 +
  12.240 +		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
  12.241 +	}
  12.242 +
  12.243 +	return 0;
  12.244 +}
  12.245 +
  12.246 +/**
  12.247 + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
  12.248 + * @hw: pointer to hardware structure
  12.249 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.250 + *
  12.251 + * Configure Tx Data Arbiter and credits for each traffic class.
  12.252 + */
  12.253 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
  12.254 +                                           struct ixgbe_dcb_config *dcb_config)
  12.255 +{
  12.256 +	struct tc_bw_alloc *p;
  12.257 +	u32 reg;
  12.258 +	u8 i;
  12.259 +
  12.260 +	reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
  12.261 +	/* Enable Data Plane Arbiter */
  12.262 +	reg &= ~IXGBE_PDPMCS_ARBDIS;
  12.263 +	/* Enable DFP and Transmit Recycle Mode */
  12.264 +	reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
  12.265 +
  12.266 +	IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
  12.267 +
  12.268 +	/* Configure traffic class credits and priority */
  12.269 +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  12.270 +		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
  12.271 +		reg = p->data_credits_refill;
  12.272 +		reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
  12.273 +		reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
  12.274 +
  12.275 +		if (p->prio_type == prio_group)
  12.276 +			reg |= IXGBE_TDPT2TCCR_GSP;
  12.277 +
  12.278 +		if (p->prio_type == prio_link)
  12.279 +			reg |= IXGBE_TDPT2TCCR_LSP;
  12.280 +
  12.281 +		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
  12.282 +	}
  12.283 +
  12.284 +	/* Enable Tx packet buffer division */
  12.285 +	reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  12.286 +	reg |= IXGBE_DTXCTL_ENDBUBD;
  12.287 +	IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
  12.288 +
  12.289 +	return 0;
  12.290 +}
  12.291 +
  12.292 +/**
  12.293 + * ixgbe_dcb_config_pfc_82598 - Config priority flow control
  12.294 + * @hw: pointer to hardware structure
  12.295 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.296 + *
  12.297 + * Configure Priority Flow Control for each traffic class.
  12.298 + */
  12.299 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
  12.300 +                               struct ixgbe_dcb_config *dcb_config)
  12.301 +{
  12.302 +	u32 reg, rx_pba_size;
  12.303 +	u8  i;
  12.304 +
  12.305 +	/* Enable Transmit Priority Flow Control */
  12.306 +	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
  12.307 +	reg &= ~IXGBE_RMCS_TFCE_802_3X;
  12.308 +	/* correct the reporting of our flow control status */
  12.309 +	hw->fc.current_mode = ixgbe_fc_none;
  12.310 +	reg |= IXGBE_RMCS_TFCE_PRIORITY;
  12.311 +	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
  12.312 +
  12.313 +	/* Enable Receive Priority Flow Control */
  12.314 +	reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  12.315 +	reg &= ~IXGBE_FCTRL_RFCE;
  12.316 +	reg |= IXGBE_FCTRL_RPFCE;
  12.317 +	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
  12.318 +
  12.319 +	/*
  12.320 +	 * Configure flow control thresholds and enable priority flow control
  12.321 +	 * for each traffic class.
  12.322 +	 */
  12.323 +	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  12.324 +		if (dcb_config->rx_pba_cfg == pba_equal) {
  12.325 +			rx_pba_size = IXGBE_RXPBSIZE_64KB;
  12.326 +		} else {
  12.327 +			rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
  12.328 +			                      : IXGBE_RXPBSIZE_48KB;
  12.329 +		}
  12.330 +
  12.331 +		reg = ((rx_pba_size >> 5) &  0xFFF0);
  12.332 +		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
  12.333 +		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
  12.334 +			reg |= IXGBE_FCRTL_XONE;
  12.335 +
  12.336 +		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
  12.337 +
  12.338 +		reg = ((rx_pba_size >> 2) & 0xFFF0);
  12.339 +		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
  12.340 +		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
  12.341 +			reg |= IXGBE_FCRTH_FCEN;
  12.342 +
  12.343 +		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
  12.344 +	}
  12.345 +
  12.346 +	/* Configure pause time */
  12.347 +	for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
  12.348 +		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
  12.349 +
  12.350 +	/* Configure flow control refresh threshold value */
  12.351 +	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
  12.352 +
  12.353 +	return 0;
  12.354 +}
  12.355 +
  12.356 +/**
  12.357 + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
  12.358 + * @hw: pointer to hardware structure
  12.359 + *
  12.360 + * Configure queue statistics registers, all queues belonging to same traffic
  12.361 + * class uses a single set of queue statistics counters.
  12.362 + */
  12.363 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
  12.364 +{
  12.365 +	u32 reg = 0;
  12.366 +	u8  i   = 0;
  12.367 +	u8  j   = 0;
  12.368 +
  12.369 +	/* Receive Queues stats setting -  8 queues per statistics reg */
  12.370 +	for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
  12.371 +		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
  12.372 +		reg |= ((0x1010101) * j);
  12.373 +		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
  12.374 +		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
  12.375 +		reg |= ((0x1010101) * j);
  12.376 +		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
  12.377 +	}
  12.378 +	/* Transmit Queues stats setting -  4 queues per statistics reg*/
  12.379 +	for (i = 0; i < 8; i++) {
  12.380 +		reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
  12.381 +		reg |= ((0x1010101) * i);
  12.382 +		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
  12.383 +	}
  12.384 +
  12.385 +	return 0;
  12.386 +}
  12.387 +
  12.388 +/**
  12.389 + * ixgbe_dcb_hw_config_82598 - Config and enable DCB
  12.390 + * @hw: pointer to hardware structure
  12.391 + * @dcb_config: pointer to ixgbe_dcb_config structure
  12.392 + *
  12.393 + * Configure dcb settings and enable dcb mode.
  12.394 + */
  12.395 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
  12.396 +                              struct ixgbe_dcb_config *dcb_config)
  12.397 +{
  12.398 +	u32  pap = 0;
  12.399 +
  12.400 +	ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
  12.401 +	ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
  12.402 +	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
  12.403 +	ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
  12.404 +	ixgbe_dcb_config_pfc_82598(hw, dcb_config);
  12.405 +	ixgbe_dcb_config_tc_stats_82598(hw);
  12.406 +
  12.407 +	/* TODO: For DCB SV purpose only,
  12.408 +	 * remove it before product release */
  12.409 +	if (dcb_config->link_speed > 0 && dcb_config->link_speed <= 9) {
  12.410 +		pap = IXGBE_READ_REG(hw, IXGBE_PAP);
  12.411 +		pap |= (dcb_config->link_speed << 16);
  12.412 +		IXGBE_WRITE_REG(hw, IXGBE_PAP, pap);
  12.413 +	}
  12.414 +
  12.415 +	return 0;
  12.416 +}
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h	Fri Jan 30 10:52:47 2009 +0000
    13.3 @@ -0,0 +1,99 @@
    13.4 +/*******************************************************************************
    13.5 +
    13.6 +  Intel 10 Gigabit PCI Express Linux driver
    13.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    13.8 +
    13.9 +  This program is free software; you can redistribute it and/or modify it
   13.10 +  under the terms and conditions of the GNU General Public License,
   13.11 +  version 2, as published by the Free Software Foundation.
   13.12 +
   13.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   13.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   13.16 +  more details.
   13.17 +
   13.18 +  You should have received a copy of the GNU General Public License along with
   13.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   13.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   13.21 +
   13.22 +  The full GNU General Public License is included in this distribution in
   13.23 +  the file called "COPYING".
   13.24 +
   13.25 +  Contact Information:
   13.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   13.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   13.28 +
   13.29 +*******************************************************************************/
   13.30 +
   13.31 +#ifndef _DCB_82598_CONFIG_H_
   13.32 +#define _DCB_82598_CONFIG_H_
   13.33 +
   13.34 +/* DCB register definitions */
   13.35 +
   13.36 +#define IXGBE_DPMCS_MTSOS_SHIFT 16
   13.37 +#define IXGBE_DPMCS_TDPAC       0x00000001 /* 0 Round Robin,
   13.38 +                                            * 1 DFP - Deficit Fixed Priority */
   13.39 +#define IXGBE_DPMCS_TRM         0x00000010 /* Transmit Recycle Mode */
   13.40 +#define IXGBE_DPMCS_ARBDIS      0x00000040 /* DCB arbiter disable */
   13.41 +#define IXGBE_DPMCS_TSOEF       0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
   13.42 +
   13.43 +#define IXGBE_RUPPBMR_MQA       0x80000000 /* Enable UP to queue mapping */
   13.44 +
   13.45 +#define IXGBE_RT2CR_MCL_SHIFT   12 /* Offset to Max Credit Limit setting */
   13.46 +#define IXGBE_RT2CR_LSP         0x80000000 /* LSP enable bit */
   13.47 +
   13.48 +#define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet
   13.49 +                                            * buffers enable */
   13.50 +#define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores
   13.51 +                                            * (RSS) enable */
   13.52 +
   13.53 +#define IXGBE_TDTQ2TCCR_MCL_SHIFT   12
   13.54 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT   9
   13.55 +#define IXGBE_TDTQ2TCCR_GSP     0x40000000
   13.56 +#define IXGBE_TDTQ2TCCR_LSP     0x80000000
   13.57 +
   13.58 +#define IXGBE_TDPT2TCCR_MCL_SHIFT   12
   13.59 +#define IXGBE_TDPT2TCCR_BWG_SHIFT   9
   13.60 +#define IXGBE_TDPT2TCCR_GSP     0x40000000
   13.61 +#define IXGBE_TDPT2TCCR_LSP     0x80000000
   13.62 +
   13.63 +#define IXGBE_PDPMCS_TPPAC      0x00000020 /* 0 Round Robin,
   13.64 +                                            * 1 DFP - Deficit Fixed Priority */
   13.65 +#define IXGBE_PDPMCS_ARBDIS     0x00000040 /* Arbiter disable */
   13.66 +#define IXGBE_PDPMCS_TRM        0x00000100 /* Transmit Recycle Mode enable */
   13.67 +
   13.68 +#define IXGBE_DTXCTL_ENDBUBD    0x00000004 /* Enable DBU buffer division */
   13.69 +
   13.70 +#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */
   13.71 +#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */
   13.72 +#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */
   13.73 +#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */
   13.74 +
   13.75 +/* DCB hardware-specific driver APIs */
   13.76 +
   13.77 +/* DCB PFC functions */
   13.78 +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
   13.79 +                               struct ixgbe_dcb_config *dcb_config);
   13.80 +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
   13.81 +                                  struct ixgbe_hw_stats *stats,
   13.82 +                                  u8 tc_count);
   13.83 +
   13.84 +/* DCB traffic class stats */
   13.85 +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw);
   13.86 +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
   13.87 +                                 struct ixgbe_hw_stats *stats,
   13.88 +                                 u8 tc_count);
   13.89 +
   13.90 +/* DCB config arbiters */
   13.91 +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
   13.92 +                                           struct ixgbe_dcb_config *dcb_config);
   13.93 +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
   13.94 +                                           struct ixgbe_dcb_config *dcb_config);
   13.95 +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
   13.96 +                                      struct ixgbe_dcb_config *dcb_config);
   13.97 +
   13.98 +/* DCB hw initialization */
   13.99 +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
  13.100 +                              struct ixgbe_dcb_config *config);
  13.101 +
  13.102 +#endif /* _DCB_82598_CONFIG_H */
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/drivers/net/ixgbe/ixgbe_ethtool.c	Fri Jan 30 10:52:47 2009 +0000
    14.3 @@ -0,0 +1,1941 @@
    14.4 +/*******************************************************************************
    14.5 +
    14.6 +  Intel 10 Gigabit PCI Express Linux driver
    14.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    14.8 +
    14.9 +  This program is free software; you can redistribute it and/or modify it
   14.10 +  under the terms and conditions of the GNU General Public License,
   14.11 +  version 2, as published by the Free Software Foundation.
   14.12 +
   14.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   14.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   14.16 +  more details.
   14.17 +
   14.18 +  You should have received a copy of the GNU General Public License along with
   14.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   14.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   14.21 +
   14.22 +  The full GNU General Public License is included in this distribution in
   14.23 +  the file called "COPYING".
   14.24 +
   14.25 +  Contact Information:
   14.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   14.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   14.28 +
   14.29 +*******************************************************************************/
   14.30 +
   14.31 +/* ethtool support for ixgbe */
   14.32 +
   14.33 +#include <linux/types.h>
   14.34 +#include <linux/module.h>
   14.35 +#include <linux/pci.h>
   14.36 +#include <linux/netdevice.h>
   14.37 +#include <linux/ethtool.h>
   14.38 +#include <linux/vmalloc.h>
   14.39 +#ifdef SIOCETHTOOL
   14.40 +#include <asm/uaccess.h>
   14.41 +
   14.42 +#include "ixgbe.h"
   14.43 +
   14.44 +#ifndef ETH_GSTRING_LEN
   14.45 +#define ETH_GSTRING_LEN 32
   14.46 +#endif
   14.47 +
   14.48 +#define IXGBE_ALL_RAR_ENTRIES 16
   14.49 +
   14.50 +#ifdef ETHTOOL_OPS_COMPAT
   14.51 +#include "kcompat_ethtool.c"
   14.52 +#endif
   14.53 +#ifdef ETHTOOL_GSTATS
   14.54 +struct ixgbe_stats {
   14.55 +	char stat_string[ETH_GSTRING_LEN];
   14.56 +	int sizeof_stat;
   14.57 +	int stat_offset;
   14.58 +};
   14.59 +
   14.60 +#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
   14.61 +		      offsetof(struct ixgbe_adapter, m)
   14.62 +static struct ixgbe_stats ixgbe_gstrings_stats[] = {
   14.63 +	{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
   14.64 +	{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
   14.65 +	{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
   14.66 +	{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
   14.67 +	{"lsc_int", IXGBE_STAT(lsc_int)},
   14.68 +	{"tx_busy", IXGBE_STAT(tx_busy)},
   14.69 +	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
   14.70 +	{"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
   14.71 +	{"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
   14.72 +	{"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
   14.73 +#ifndef CONFIG_IXGBE_NAPI
   14.74 +	{"rx_dropped_backlog", IXGBE_STAT(rx_dropped_backlog)},
   14.75 +#endif
   14.76 +	{"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
   14.77 +	{"multicast", IXGBE_STAT(net_stats.multicast)},
   14.78 +	{"broadcast", IXGBE_STAT(stats.bprc)},
   14.79 +	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
   14.80 +	{"collisions", IXGBE_STAT(net_stats.collisions)},
   14.81 +	{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
   14.82 +	{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
   14.83 +	{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
   14.84 +	{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
   14.85 +	{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
   14.86 +	{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
   14.87 +	{"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
   14.88 +	{"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
   14.89 +	{"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
   14.90 +	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
   14.91 +	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
   14.92 +	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
   14.93 +	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
   14.94 +#ifdef NETIF_F_TSO
   14.95 +	{"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
   14.96 +#ifdef NETIF_F_TSO6
   14.97 +	{"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
   14.98 +#endif
   14.99 +#endif
  14.100 +	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  14.101 +	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  14.102 +	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  14.103 +	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  14.104 +	{"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
  14.105 +	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  14.106 +	{"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
  14.107 +	{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
  14.108 +#ifndef IXGBE_NO_LLI
  14.109 +	{"low_latency_interrupt", IXGBE_STAT(lli_int)},
  14.110 +#endif
  14.111 +	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  14.112 +	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  14.113 +#ifndef IXGBE_NO_LRO
  14.114 +	{"lro_aggregated", IXGBE_STAT(lro_data.stats.coal)},
  14.115 +	{"lro_flushed", IXGBE_STAT(lro_data.stats.flushed)},
  14.116 +#endif /* IXGBE_NO_LRO */
  14.117 +#ifndef IXGBE_NO_INET_LRO
  14.118 +	{"lro_aggregated", IXGBE_STAT(lro_aggregated)},
  14.119 +	{"lro_flushed", IXGBE_STAT(lro_flushed)},
  14.120 +#endif
  14.121 +};
  14.122 +
  14.123 +#define IXGBE_QUEUE_STATS_LEN \
  14.124 +           ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
  14.125 +             ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
  14.126 +             (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  14.127 +#define IXGBE_PB_STATS_LEN ( \
  14.128 +		(((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
  14.129 +		 IXGBE_FLAG_DCB_ENABLED) ? \
  14.130 +		 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  14.131 +		  sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  14.132 +		  sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  14.133 +		  sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  14.134 +		 / sizeof(u64) : 0)
  14.135 +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_PB_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
  14.136 +#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
  14.137 +#endif /* ETHTOOL_GSTATS */
  14.138 +#ifdef ETHTOOL_TEST
  14.139 +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  14.140 +	"Register test  (offline)", "Eeprom test    (offline)",
  14.141 +	"Interrupt test (offline)", "Loopback test  (offline)",
  14.142 +	"Link test   (on/offline)"
  14.143 +};
  14.144 +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  14.145 +#endif /* ETHTOOL_TEST */
  14.146 +
  14.147 +static int ixgbe_get_settings(struct net_device *netdev,
  14.148 +                              struct ethtool_cmd *ecmd)
  14.149 +{
  14.150 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.151 +	struct ixgbe_hw *hw = &adapter->hw;
  14.152 +	u32 link_speed = 0;
  14.153 +	bool link_up;
  14.154 +
  14.155 +	ecmd->supported = SUPPORTED_10000baseT_Full;
  14.156 +	ecmd->autoneg = AUTONEG_ENABLE;
  14.157 +	ecmd->transceiver = XCVR_EXTERNAL;
  14.158 +	if (hw->phy.media_type == ixgbe_media_type_copper) {
  14.159 +		ecmd->supported |= (SUPPORTED_1000baseT_Full |
  14.160 +		                    SUPPORTED_TP | SUPPORTED_Autoneg);
  14.161 +
  14.162 +		ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
  14.163 +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  14.164 +			ecmd->advertising |= ADVERTISED_10000baseT_Full;
  14.165 +		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
  14.166 +			ecmd->advertising |= ADVERTISED_1000baseT_Full;
  14.167 +
  14.168 +		ecmd->port = PORT_TP;
  14.169 +	} else {
  14.170 +		ecmd->supported |= SUPPORTED_FIBRE;
  14.171 +		ecmd->advertising = (ADVERTISED_10000baseT_Full |
  14.172 +		                     ADVERTISED_FIBRE);
  14.173 +		ecmd->port = PORT_FIBRE;
  14.174 +		ecmd->autoneg = AUTONEG_DISABLE;
  14.175 +	}
  14.176 +
  14.177 +	if (!in_interrupt()) {
  14.178 +		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  14.179 +	} else {
  14.180 +		/*
  14.181 +		 * this case is a special workaround for RHEL5 bonding
  14.182 +		 * that calls this routine from interrupt context
  14.183 +		 */
  14.184 +		link_speed = adapter->link_speed;
  14.185 +		link_up = adapter->link_up;
  14.186 +	}
  14.187 +
  14.188 +	if (link_up) {
  14.189 +		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
  14.190 +		               SPEED_10000 : SPEED_1000;
  14.191 +		ecmd->duplex = DUPLEX_FULL;
  14.192 +	} else {
  14.193 +		ecmd->speed = -1;
  14.194 +		ecmd->duplex = -1;
  14.195 +	}
  14.196 +
  14.197 +	return 0;
  14.198 +}
  14.199 +
  14.200 +static int ixgbe_set_settings(struct net_device *netdev,
  14.201 +                              struct ethtool_cmd *ecmd)
  14.202 +{
  14.203 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.204 +	struct ixgbe_hw *hw = &adapter->hw;
  14.205 +	u32 advertised, old;
  14.206 +	s32 err;
  14.207 +
  14.208 +	switch (hw->phy.media_type) {
  14.209 +	case ixgbe_media_type_fiber:
  14.210 +		if ((ecmd->autoneg == AUTONEG_ENABLE) ||
  14.211 +		    (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
  14.212 +			return -EINVAL;
  14.213 +		/* in this case we currently only support 10Gb/FULL */
  14.214 +		break;
  14.215 +	case ixgbe_media_type_copper:
  14.216 +		/* 10000/copper and 1000/copper must autoneg
  14.217 +		 * this function does not support any duplex forcing, but can
  14.218 +		 * limit the advertising of the adapter to only 10000 or 1000 */
  14.219 +		if (ecmd->autoneg == AUTONEG_DISABLE)
  14.220 +			return -EINVAL;
  14.221 +
  14.222 +		old = hw->phy.autoneg_advertised;
  14.223 +		advertised = 0;
  14.224 +		if (ecmd->advertising & ADVERTISED_10000baseT_Full)
  14.225 +			advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  14.226 +
  14.227 +		if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  14.228 +			advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  14.229 +
  14.230 +		if (old == advertised)
  14.231 +			break;
  14.232 +		/* this sets the link speed and restarts auto-neg */
  14.233 +		err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
  14.234 +		if (err) {
  14.235 +			DPRINTK(PROBE, INFO,
  14.236 +			        "setup link failed with code %d\n", err);
  14.237 +			hw->mac.ops.setup_link_speed(hw, old, true, true);
  14.238 +		}
  14.239 +		break;
  14.240 +	default:
  14.241 +		break;
  14.242 +	}
  14.243 +
  14.244 +	return 0;
  14.245 +}
  14.246 +
  14.247 +static void ixgbe_get_pauseparam(struct net_device *netdev,
  14.248 +                                 struct ethtool_pauseparam *pause)
  14.249 +{
  14.250 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.251 +	struct ixgbe_hw *hw = &adapter->hw;
  14.252 +
  14.253 +	pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0);
  14.254 +
  14.255 +	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  14.256 +		pause->rx_pause = 1;
  14.257 +	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  14.258 +		pause->tx_pause = 1;
  14.259 +	} else if (hw->fc.current_mode == ixgbe_fc_full) {
  14.260 +		pause->rx_pause = 1;
  14.261 +		pause->tx_pause = 1;
  14.262 +	}
  14.263 +}
  14.264 +
  14.265 +static int ixgbe_set_pauseparam(struct net_device *netdev,
  14.266 +                                struct ethtool_pauseparam *pause)
  14.267 +{
  14.268 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.269 +	struct ixgbe_hw *hw = &adapter->hw;
  14.270 +
  14.271 +	if ((pause->autoneg == AUTONEG_ENABLE) ||
  14.272 +	    (pause->rx_pause && pause->tx_pause))
  14.273 +		hw->fc.current_mode = ixgbe_fc_full;
  14.274 +	else if (pause->rx_pause && !pause->tx_pause)
  14.275 +		hw->fc.current_mode = ixgbe_fc_rx_pause;
  14.276 +	else if (!pause->rx_pause && pause->tx_pause)
  14.277 +		hw->fc.current_mode = ixgbe_fc_tx_pause;
  14.278 +	else if (!pause->rx_pause && !pause->tx_pause)
  14.279 +		hw->fc.current_mode = ixgbe_fc_none;
  14.280 +	else
  14.281 +		return -EINVAL;
  14.282 +
  14.283 +	hw->fc.requested_mode = hw->fc.current_mode;
  14.284 +
  14.285 +	if (netif_running(netdev))
  14.286 +		ixgbe_reinit_locked(adapter);
  14.287 +	else
  14.288 +		ixgbe_reset(adapter);
  14.289 +
  14.290 +	return 0;
  14.291 +}
  14.292 +
  14.293 +static u32 ixgbe_get_rx_csum(struct net_device *netdev)
  14.294 +{
  14.295 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.296 +	return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
  14.297 +}
  14.298 +
  14.299 +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
  14.300 +{
  14.301 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.302 +	if (data)
  14.303 +		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
  14.304 +	else
  14.305 +		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
  14.306 +
  14.307 +	if (netif_running(netdev))
  14.308 +		ixgbe_reinit_locked(adapter);
  14.309 +	else
  14.310 +		ixgbe_reset(adapter);
  14.311 +
  14.312 +	return 0;
  14.313 +}
  14.314 +
  14.315 +static u32 ixgbe_get_tx_csum(struct net_device *netdev)
  14.316 +{
  14.317 +	return (netdev->features & NETIF_F_IP_CSUM) != 0;
  14.318 +}
  14.319 +
  14.320 +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
  14.321 +{
  14.322 +	if (data)
  14.323 +#ifdef NETIF_F_IPV6_CSUM
  14.324 +		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
  14.325 +	else
  14.326 +		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
  14.327 +#else
  14.328 +		netdev->features |= NETIF_F_IP_CSUM;
  14.329 +	else
  14.330 +		netdev->features &= ~NETIF_F_IP_CSUM;
  14.331 +#endif
  14.332 +
  14.333 +	return 0;
  14.334 +}
  14.335 +
  14.336 +#ifdef NETIF_F_TSO
  14.337 +static int ixgbe_set_tso(struct net_device *netdev, u32 data)
  14.338 +{
  14.339 +#ifndef HAVE_NETDEV_VLAN_FEATURES
  14.340 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.341 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
  14.342 +	if (data) {
  14.343 +		netdev->features |= NETIF_F_TSO;
  14.344 +#ifdef NETIF_F_TSO6
  14.345 +		netdev->features |= NETIF_F_TSO6;
  14.346 +#endif
  14.347 +	} else {
  14.348 +		netif_tx_stop_all_queues(netdev);
  14.349 +		netdev->features &= ~NETIF_F_TSO;
  14.350 +#ifdef NETIF_F_TSO6
  14.351 +		netdev->features &= ~NETIF_F_TSO6;
  14.352 +#endif
  14.353 +#ifndef HAVE_NETDEV_VLAN_FEATURES
  14.354 +#ifdef NETIF_F_HW_VLAN_TX
  14.355 +		/* disable TSO on all VLANs if they're present */
  14.356 +		if (adapter->vlgrp) {
  14.357 +			int i;
  14.358 +			struct net_device *v_netdev;
  14.359 +			for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
  14.360 +				v_netdev =
  14.361 +				       vlan_group_get_device(adapter->vlgrp, i);
  14.362 +				if (v_netdev) {
  14.363 +					v_netdev->features &= ~NETIF_F_TSO;
  14.364 +#ifdef NETIF_F_TSO6
  14.365 +					v_netdev->features &= ~NETIF_F_TSO6;
  14.366 +#endif
  14.367 +					vlan_group_set_device(adapter->vlgrp, i,
  14.368 +					                      v_netdev);
  14.369 +				}
  14.370 +			}
  14.371 +		}
  14.372 +#endif
  14.373 +#endif /* HAVE_NETDEV_VLAN_FEATURES */
  14.374 +		netif_tx_start_all_queues(netdev);
  14.375 +	}
  14.376 +	return 0;
  14.377 +}
  14.378 +#endif /* NETIF_F_TSO */
  14.379 +
  14.380 +static u32 ixgbe_get_msglevel(struct net_device *netdev)
  14.381 +{
  14.382 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.383 +	return adapter->msg_enable;
  14.384 +}
  14.385 +
  14.386 +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  14.387 +{
  14.388 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.389 +	adapter->msg_enable = data;
  14.390 +}
  14.391 +
  14.392 +static int ixgbe_get_regs_len(struct net_device *netdev)
  14.393 +{
  14.394 +#define IXGBE_REGS_LEN  1128
  14.395 +	return IXGBE_REGS_LEN * sizeof(u32);
  14.396 +}
  14.397 +
  14.398 +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  14.399 +
  14.400 +static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
  14.401 +                           void *p)
  14.402 +{
  14.403 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.404 +	struct ixgbe_hw *hw = &adapter->hw;
  14.405 +	u32 *regs_buff = p;
  14.406 +	u8 i;
  14.407 +
  14.408 +	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  14.409 +
  14.410 +	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
  14.411 +
  14.412 +	/* General Registers */
  14.413 +	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  14.414 +	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  14.415 +	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  14.416 +	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  14.417 +	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  14.418 +	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  14.419 +	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  14.420 +	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  14.421 +
  14.422 +	/* NVM Register */
  14.423 +	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
  14.424 +	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  14.425 +	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
  14.426 +	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  14.427 +	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  14.428 +	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  14.429 +	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  14.430 +	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  14.431 +	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  14.432 +	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
  14.433 +
  14.434 +	/* Interrupt */
  14.435 +	/* don't read EICR because it can clear interrupt causes, instead
  14.436 +	 * read EICS which is a shadow but doesn't clear EICR */
  14.437 +	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  14.438 +	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  14.439 +	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  14.440 +	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  14.441 +	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  14.442 +	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  14.443 +	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  14.444 +	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  14.445 +	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  14.446 +	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  14.447 +	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  14.448 +	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  14.449 +
  14.450 +	/* Flow Control */
  14.451 +	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  14.452 +	regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
  14.453 +	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
  14.454 +	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
  14.455 +	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
  14.456 +	for (i = 0; i < 8; i++)
  14.457 +		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  14.458 +	for (i = 0; i < 8; i++)
  14.459 +		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  14.460 +	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  14.461 +	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  14.462 +
  14.463 +	/* Receive DMA */
  14.464 +	for (i = 0; i < 64; i++)
  14.465 +		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  14.466 +	for (i = 0; i < 64; i++)
  14.467 +		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  14.468 +	for (i = 0; i < 64; i++)
  14.469 +		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  14.470 +	for (i = 0; i < 64; i++)
  14.471 +		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  14.472 +	for (i = 0; i < 64; i++)
  14.473 +		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  14.474 +	for (i = 0; i < 64; i++)
  14.475 +		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  14.476 +	for (i = 0; i < 16; i++)
  14.477 +		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  14.478 +	for (i = 0; i < 16; i++)
  14.479 +		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  14.480 +	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  14.481 +	for (i = 0; i < 8; i++)
  14.482 +		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  14.483 +	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  14.484 +	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  14.485 +
  14.486 +	/* Receive */
  14.487 +	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  14.488 +	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  14.489 +	for (i = 0; i < 16; i++)
  14.490 +		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  14.491 +	for (i = 0; i < 16; i++)
  14.492 +		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  14.493 +	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  14.494 +	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  14.495 +	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  14.496 +	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  14.497 +	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  14.498 +	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  14.499 +	for (i = 0; i < 8; i++)
  14.500 +		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  14.501 +	for (i = 0; i < 8; i++)
  14.502 +		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  14.503 +	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  14.504 +
  14.505 +	/* Transmit */
  14.506 +	for (i = 0; i < 32; i++)
  14.507 +		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  14.508 +	for (i = 0; i < 32; i++)
  14.509 +		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  14.510 +	for (i = 0; i < 32; i++)
  14.511 +		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  14.512 +	for (i = 0; i < 32; i++)
  14.513 +		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  14.514 +	for (i = 0; i < 32; i++)
  14.515 +		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  14.516 +	for (i = 0; i < 32; i++)
  14.517 +		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  14.518 +	for (i = 0; i < 32; i++)
  14.519 +		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  14.520 +	for (i = 0; i < 32; i++)
  14.521 +		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  14.522 +	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  14.523 +	for (i = 0; i < 16; i++)
  14.524 +		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  14.525 +	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  14.526 +	for (i = 0; i < 8; i++)
  14.527 +		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  14.528 +	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  14.529 +
  14.530 +	/* Wake Up */
  14.531 +	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  14.532 +	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  14.533 +	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  14.534 +	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  14.535 +	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  14.536 +	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  14.537 +	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  14.538 +	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  14.539 +	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  14.540 +
  14.541 +	/* DCB */
  14.542 +	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
  14.543 +	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  14.544 +	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
  14.545 +	regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  14.546 +	for (i = 0; i < 8; i++)
  14.547 +		regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  14.548 +	for (i = 0; i < 8; i++)
  14.549 +		regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  14.550 +	for (i = 0; i < 8; i++)
  14.551 +		regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  14.552 +	for (i = 0; i < 8; i++)
  14.553 +		regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  14.554 +	for (i = 0; i < 8; i++)
  14.555 +		regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
  14.556 +	for (i = 0; i < 8; i++)
  14.557 +		regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
  14.558 +
  14.559 +	/* Statistics */
  14.560 +	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  14.561 +	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  14.562 +	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  14.563 +	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  14.564 +	for (i = 0; i < 8; i++)
  14.565 +		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  14.566 +	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  14.567 +	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  14.568 +	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  14.569 +	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  14.570 +	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  14.571 +	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  14.572 +	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  14.573 +	for (i = 0; i < 8; i++)
  14.574 +		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  14.575 +	for (i = 0; i < 8; i++)
  14.576 +		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  14.577 +	for (i = 0; i < 8; i++)
  14.578 +		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  14.579 +	for (i = 0; i < 8; i++)
  14.580 +		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  14.581 +	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  14.582 +	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  14.583 +	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  14.584 +	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  14.585 +	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  14.586 +	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  14.587 +	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  14.588 +	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  14.589 +	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  14.590 +	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  14.591 +	regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
  14.592 +	regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
  14.593 +	for (i = 0; i < 8; i++)
  14.594 +		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  14.595 +	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  14.596 +	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  14.597 +	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  14.598 +	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  14.599 +	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  14.600 +	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  14.601 +	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  14.602 +	regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
  14.603 +	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  14.604 +	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  14.605 +	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  14.606 +	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  14.607 +	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  14.608 +	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  14.609 +	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  14.610 +	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  14.611 +	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  14.612 +	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  14.613 +	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  14.614 +	for (i = 0; i < 16; i++)
  14.615 +		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  14.616 +	for (i = 0; i < 16; i++)
  14.617 +		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  14.618 +	for (i = 0; i < 16; i++)
  14.619 +		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  14.620 +	for (i = 0; i < 16; i++)
  14.621 +		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  14.622 +
  14.623 +	/* MAC */
  14.624 +	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  14.625 +	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  14.626 +	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  14.627 +	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  14.628 +	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  14.629 +	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  14.630 +	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  14.631 +	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  14.632 +	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  14.633 +	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  14.634 +	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  14.635 +	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  14.636 +	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  14.637 +	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  14.638 +	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  14.639 +	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  14.640 +	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  14.641 +	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  14.642 +	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  14.643 +	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  14.644 +	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  14.645 +	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  14.646 +	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  14.647 +	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  14.648 +	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  14.649 +	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  14.650 +	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  14.651 +	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  14.652 +	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  14.653 +	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  14.654 +	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  14.655 +	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  14.656 +	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  14.657 +
  14.658 +	/* Diagnostic */
  14.659 +	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  14.660 +	for (i = 0; i < 8; i++)
  14.661 +		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  14.662 +	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  14.663 +	for (i = 0; i < 4; i++)
  14.664 +		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  14.665 +	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  14.666 +	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  14.667 +	for (i = 0; i < 8; i++)
  14.668 +		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  14.669 +	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  14.670 +	for (i = 0; i < 4; i++)
  14.671 +		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  14.672 +	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  14.673 +	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  14.674 +	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
  14.675 +	regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
  14.676 +	regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
  14.677 +	regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
  14.678 +	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  14.679 +	regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
  14.680 +	regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
  14.681 +	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
  14.682 +	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
  14.683 +	for (i = 0; i < 8; i++)
  14.684 +		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  14.685 +	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  14.686 +	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  14.687 +	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  14.688 +	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  14.689 +	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  14.690 +	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  14.691 +	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  14.692 +	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  14.693 +	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  14.694 +}
  14.695 +
  14.696 +static int ixgbe_get_eeprom_len(struct net_device *netdev)
  14.697 +{
  14.698 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.699 +	return adapter->hw.eeprom.word_size * 2;
  14.700 +}
  14.701 +
  14.702 +static int ixgbe_get_eeprom(struct net_device *netdev,
  14.703 +                            struct ethtool_eeprom *eeprom, u8 *bytes)
  14.704 +{
  14.705 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.706 +	struct ixgbe_hw *hw = &adapter->hw;
  14.707 +	u16 *eeprom_buff;
  14.708 +	int first_word, last_word, eeprom_len;
  14.709 +	int ret_val = 0;
  14.710 +	u16 i;
  14.711 +
  14.712 +	if (eeprom->len == 0)
  14.713 +		return -EINVAL;
  14.714 +
  14.715 +	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  14.716 +
  14.717 +	first_word = eeprom->offset >> 1;
  14.718 +	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  14.719 +	eeprom_len = last_word - first_word + 1;
  14.720 +
  14.721 +	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
  14.722 +	if (!eeprom_buff)
  14.723 +		return -ENOMEM;
  14.724 +
  14.725 +	for (i = 0; i < eeprom_len; i++) {
  14.726 +		if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
  14.727 +						 &eeprom_buff[i])))
  14.728 +			break;
  14.729 +	}
  14.730 +
  14.731 +	/* Device's eeprom is always little-endian, word addressable */
  14.732 +	for (i = 0; i < eeprom_len; i++)
  14.733 +		le16_to_cpus(&eeprom_buff[i]);
  14.734 +
  14.735 +	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  14.736 +	kfree(eeprom_buff);
  14.737 +
  14.738 +	return ret_val;
  14.739 +}
  14.740 +
  14.741 +static int ixgbe_set_eeprom(struct net_device *netdev,
  14.742 +                            struct ethtool_eeprom *eeprom, u8 *bytes)
  14.743 +{
  14.744 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.745 +	struct ixgbe_hw *hw = &adapter->hw;
  14.746 +	u16 *eeprom_buff;
  14.747 +	void *ptr;
  14.748 +	int max_len, first_word, last_word, ret_val = 0;
  14.749 +	u16 i;
  14.750 +
  14.751 +	if (eeprom->len == 0)
  14.752 +		return -EOPNOTSUPP;
  14.753 +
  14.754 +	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  14.755 +		return -EFAULT;
  14.756 +
  14.757 +	max_len = hw->eeprom.word_size * 2;
  14.758 +
  14.759 +	first_word = eeprom->offset >> 1;
  14.760 +	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  14.761 +	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  14.762 +	if (!eeprom_buff)
  14.763 +		return -ENOMEM;
  14.764 +
  14.765 +	ptr = (void *)eeprom_buff;
  14.766 +
  14.767 +	if (eeprom->offset & 1) {
  14.768 +		/* need read/modify/write of first changed EEPROM word */
  14.769 +		/* only the second byte of the word is being modified */
  14.770 +		ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
  14.771 +		ptr++;
  14.772 +	}
  14.773 +	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
  14.774 +		/* need read/modify/write of last changed EEPROM word */
  14.775 +		/* only the first byte of the word is being modified */
  14.776 +		ret_val = ixgbe_read_eeprom(hw, last_word,
  14.777 +		                  &eeprom_buff[last_word - first_word]);
  14.778 +	}
  14.779 +
  14.780 +	/* Device's eeprom is always little-endian, word addressable */
  14.781 +	for (i = 0; i < last_word - first_word + 1; i++)
  14.782 +		le16_to_cpus(&eeprom_buff[i]);
  14.783 +
  14.784 +	memcpy(ptr, bytes, eeprom->len);
  14.785 +
  14.786 +	for (i = 0; i <= (last_word - first_word); i++)
  14.787 +		ret_val |= ixgbe_write_eeprom(hw, first_word + i, eeprom_buff[i]);
  14.788 +
  14.789 +	/* Update the checksum */
  14.790 +	ixgbe_update_eeprom_checksum(hw);
  14.791 +
  14.792 +	kfree(eeprom_buff);
  14.793 +	return ret_val;
  14.794 +}
  14.795 +
  14.796 +static void ixgbe_get_drvinfo(struct net_device *netdev,
  14.797 +                              struct ethtool_drvinfo *drvinfo)
  14.798 +{
  14.799 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.800 +
  14.801 +	strncpy(drvinfo->driver, ixgbe_driver_name, 32);
  14.802 +	strncpy(drvinfo->version, ixgbe_driver_version, 32);
  14.803 +	strncpy(drvinfo->fw_version, "N/A", 32);
  14.804 +	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
  14.805 +	drvinfo->n_stats = IXGBE_STATS_LEN;
  14.806 +	drvinfo->testinfo_len = IXGBE_TEST_LEN;
  14.807 +	drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
  14.808 +}
  14.809 +
  14.810 +static void ixgbe_get_ringparam(struct net_device *netdev,
  14.811 +                                struct ethtool_ringparam *ring)
  14.812 +{
  14.813 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.814 +	struct ixgbe_ring *tx_ring = adapter->tx_ring;
  14.815 +	struct ixgbe_ring *rx_ring = adapter->rx_ring;
  14.816 +
  14.817 +	ring->rx_max_pending = IXGBE_MAX_RXD;
  14.818 +	ring->tx_max_pending = IXGBE_MAX_TXD;
  14.819 +	ring->rx_mini_max_pending = 0;
  14.820 +	ring->rx_jumbo_max_pending = 0;
  14.821 +	ring->rx_pending = rx_ring->count;
  14.822 +	ring->tx_pending = tx_ring->count;
  14.823 +	ring->rx_mini_pending = 0;
  14.824 +	ring->rx_jumbo_pending = 0;
  14.825 +}
  14.826 +
  14.827 +static int ixgbe_set_ringparam(struct net_device *netdev,
  14.828 +                               struct ethtool_ringparam *ring)
  14.829 +{
  14.830 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.831 +	struct ixgbe_ring *temp_ring;
  14.832 +	int i, err;
  14.833 +	u32 new_rx_count, new_tx_count;
  14.834 +
  14.835 +	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  14.836 +		return -EINVAL;
  14.837 +
  14.838 +	new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
  14.839 +	new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
  14.840 +	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  14.841 +
  14.842 +	new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
  14.843 +	new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
  14.844 +	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  14.845 +
  14.846 +	if ((new_tx_count == adapter->tx_ring->count) &&
  14.847 +	    (new_rx_count == adapter->rx_ring->count)) {
  14.848 +		/* nothing to do */
  14.849 +		return 0;
  14.850 +	}
  14.851 +
  14.852 +	if (adapter->num_tx_queues > adapter->num_rx_queues)
  14.853 +		temp_ring = vmalloc(adapter->num_tx_queues *
  14.854 +		                    sizeof(struct ixgbe_ring));
  14.855 +	else
  14.856 +		temp_ring = vmalloc(adapter->num_rx_queues *
  14.857 +		                    sizeof(struct ixgbe_ring));
  14.858 +	if (!temp_ring)
  14.859 +		return -ENOMEM;
  14.860 +
  14.861 +	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  14.862 +		msleep(1);
  14.863 +
  14.864 +	if (netif_running(netdev))
  14.865 +		ixgbe_down(adapter);
  14.866 +
  14.867 +	/*
  14.868 +	 * We can't just free everything and then setup again,
  14.869 +	 * because the ISRs in MSI-X mode get passed pointers
  14.870 +	 * to the tx and rx ring structs.
  14.871 +	 */
  14.872 +	if (new_tx_count != adapter->tx_ring->count) {
  14.873 +		memcpy(temp_ring, adapter->tx_ring,
  14.874 +		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
  14.875 +
  14.876 +		for (i = 0; i < adapter->num_tx_queues; i++) {
  14.877 +			temp_ring[i].count = new_tx_count;
  14.878 +			err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
  14.879 +			if (err) {
  14.880 +				while (i) {
  14.881 +					i--;
  14.882 +					ixgbe_free_tx_resources(adapter,
  14.883 +					                        &temp_ring[i]);
  14.884 +				}
  14.885 +				goto err_setup;
  14.886 +			}
  14.887 +		}
  14.888 +
  14.889 +		for (i = 0; i < adapter->num_tx_queues; i++)
  14.890 +			ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
  14.891 +
  14.892 +		memcpy(adapter->tx_ring, temp_ring,
  14.893 +		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
  14.894 +
  14.895 +		adapter->tx_ring_count = new_tx_count;
  14.896 +	}
  14.897 +
  14.898 +	if (new_rx_count != adapter->rx_ring->count) {
  14.899 +		memcpy(temp_ring, adapter->rx_ring,
  14.900 +		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
  14.901 +
  14.902 +		for (i = 0; i < adapter->num_rx_queues; i++) {
  14.903 +			temp_ring[i].count = new_rx_count;
  14.904 +			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
  14.905 +			if (err) {
  14.906 +				while (i) {
  14.907 +					i--;
  14.908 +					ixgbe_free_rx_resources(adapter,
  14.909 +					                        &temp_ring[i]);
  14.910 +				}
  14.911 +				goto err_setup;
  14.912 +			}
  14.913 +		}
  14.914 +
  14.915 +		for (i = 0; i < adapter->num_rx_queues; i++)
  14.916 +			ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
  14.917 +
  14.918 +		memcpy(adapter->rx_ring, temp_ring,
  14.919 +		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
  14.920 +
  14.921 +		adapter->rx_ring_count = new_rx_count;
  14.922 +	}
  14.923 +
  14.924 +	/* success! */
  14.925 +	err = 0;
  14.926 +err_setup:
  14.927 +	if (netif_running(netdev))
  14.928 +		ixgbe_up(adapter);
  14.929 +
  14.930 +	clear_bit(__IXGBE_RESETTING, &adapter->state);
  14.931 +	return err;
  14.932 +}
  14.933 +
  14.934 +static int ixgbe_get_stats_count(struct net_device *netdev)
  14.935 +{
  14.936 +	return IXGBE_STATS_LEN;
  14.937 +}
  14.938 +
  14.939 +static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  14.940 +                                    struct ethtool_stats *stats, u64 *data)
  14.941 +{
  14.942 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.943 +	u64 *queue_stat;
  14.944 +	int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
  14.945 +	int j, k;
  14.946 +	int i;
  14.947 +
  14.948 +#ifndef IXGBE_NO_INET_LRO
  14.949 +	unsigned int aggregated = 0, flushed = 0, no_desc = 0;
  14.950 +
  14.951 +	for (i = 0; i < adapter->num_rx_queues; i++) {
  14.952 +		aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
  14.953 +		flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
  14.954 +		no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
  14.955 +	}
  14.956 +	adapter->lro_aggregated = aggregated;
  14.957 +	adapter->lro_flushed = flushed;
  14.958 +	adapter->lro_no_desc = no_desc;
  14.959 +
  14.960 +#endif
  14.961 +	ixgbe_update_stats(adapter);
  14.962 +	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  14.963 +		char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
  14.964 +		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  14.965 +		           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  14.966 +	}
  14.967 +	for (j = 0; j < adapter->num_tx_queues; j++) {
  14.968 +		queue_stat = (u64 *)&adapter->tx_ring[j].stats;
  14.969 +		for (k = 0; k < stat_count; k++)
  14.970 +			data[i + k] = queue_stat[k];
  14.971 +		i += k;
  14.972 +	}
  14.973 +	for (j = 0; j < adapter->num_rx_queues; j++) {
  14.974 +		queue_stat = (u64 *)&adapter->rx_ring[j].stats;
  14.975 +		for (k = 0; k < stat_count; k++)
  14.976 +			data[i + k] = queue_stat[k];
  14.977 +		i += k;
  14.978 +	}
  14.979 +	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
  14.980 +		for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
  14.981 +			data[i++] = adapter->stats.pxontxc[j];
  14.982 +			data[i++] = adapter->stats.pxofftxc[j];
  14.983 +		}
  14.984 +		for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
  14.985 +			data[i++] = adapter->stats.pxonrxc[j];
  14.986 +			data[i++] = adapter->stats.pxoffrxc[j];
  14.987 +		}
  14.988 +	}
  14.989 +}
  14.990 +
  14.991 +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  14.992 +                              u8 *data)
  14.993 +{
  14.994 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  14.995 +	char *p = (char *)data;
  14.996 +	int i;
  14.997 +
  14.998 +	switch (stringset) {
  14.999 +	case ETH_SS_TEST:
 14.1000 +		memcpy(data, *ixgbe_gstrings_test,
 14.1001 +		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
 14.1002 +		break;
 14.1003 +	case ETH_SS_STATS:
 14.1004 +		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 14.1005 +			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
 14.1006 +			       ETH_GSTRING_LEN);
 14.1007 +			p += ETH_GSTRING_LEN;
 14.1008 +		}
 14.1009 +		for (i = 0; i < adapter->num_tx_queues; i++) {
 14.1010 +			sprintf(p, "tx_queue_%u_packets", i);
 14.1011 +			p += ETH_GSTRING_LEN;
 14.1012 +			sprintf(p, "tx_queue_%u_bytes", i);
 14.1013 +			p += ETH_GSTRING_LEN;
 14.1014 +		}
 14.1015 +		for (i = 0; i < adapter->num_rx_queues; i++) {
 14.1016 +			sprintf(p, "rx_queue_%u_packets", i);
 14.1017 +			p += ETH_GSTRING_LEN;
 14.1018 +			sprintf(p, "rx_queue_%u_bytes", i);
 14.1019 +			p += ETH_GSTRING_LEN;
 14.1020 +		}
 14.1021 +		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 14.1022 +			for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
 14.1023 +				sprintf(p, "tx_pb_%u_pxon", i);
 14.1024 +				p += ETH_GSTRING_LEN;
 14.1025 +				sprintf(p, "tx_pb_%u_pxoff", i);
 14.1026 +				p += ETH_GSTRING_LEN;
 14.1027 +			}
 14.1028 +			for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
 14.1029 +				sprintf(p, "rx_pb_%u_pxon", i);
 14.1030 +				p += ETH_GSTRING_LEN;
 14.1031 +				sprintf(p, "rx_pb_%u_pxoff", i);
 14.1032 +				p += ETH_GSTRING_LEN;
 14.1033 +			}
 14.1034 +		}
 14.1035 +/*		BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
 14.1036 +		break;
 14.1037 +	}
 14.1038 +}
 14.1039 +
 14.1040 +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
 14.1041 +{
 14.1042 +	struct ixgbe_hw *hw = &adapter->hw;
 14.1043 +	bool link_up;
 14.1044 +	u32 link_speed = 0;
 14.1045 +	*data = 0;
 14.1046 +
 14.1047 +	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
 14.1048 +	if (link_up)
 14.1049 +		return *data;
 14.1050 +	else
 14.1051 +		*data = 1;
 14.1052 +	return *data;
 14.1053 +}
 14.1054 +
 14.1055 +/* ethtool register test data */
 14.1056 +struct ixgbe_reg_test {
 14.1057 +	u16 reg;
 14.1058 +	u8  array_len;
 14.1059 +	u8  test_type;
 14.1060 +	u32 mask;
 14.1061 +	u32 write;
 14.1062 +};
 14.1063 +
 14.1064 +/* In the hardware, registers are laid out either singly, in arrays
 14.1065 + * spaced 0x40 bytes apart, or in contiguous tables.  We assume
 14.1066 + * most tests take place on arrays or single registers (handled
 14.1067 + * as a single-element array) and special-case the tables.
 14.1068 + * Table tests are always pattern tests.
 14.1069 + *
 14.1070 + * We also make provision for some required setup steps by specifying
 14.1071 + * registers to be written without any read-back testing.
 14.1072 + */
 14.1073 +
 14.1074 +#define PATTERN_TEST	1
 14.1075 +#define SET_READ_TEST	2
 14.1076 +#define WRITE_NO_TEST	3
 14.1077 +#define TABLE32_TEST	4
 14.1078 +#define TABLE64_TEST_LO	5
 14.1079 +#define TABLE64_TEST_HI	6
 14.1080 +
 14.1081 +/* default register test */
 14.1082 +static struct ixgbe_reg_test reg_test_82598[] = {
 14.1083 +	{ IXGBE_FCRTL(0),	1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 14.1084 +	{ IXGBE_FCRTH(0),	1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 14.1085 +	{ IXGBE_PFCTOP,		1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1086 +	{ IXGBE_VLNCTRL,	1, PATTERN_TEST, 0x00000000, 0x00000000 },
 14.1087 +	{ IXGBE_RDBAL(0),	4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
 14.1088 +	{ IXGBE_RDBAH(0),	4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1089 +	{ IXGBE_RDLEN(0),	4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
 14.1090 +	/* Enable all four RX queues before testing. */
 14.1091 +	{ IXGBE_RXDCTL(0),	4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
 14.1092 +	/* RDH is read-only for 82598, only test RDT. */
 14.1093 +	{ IXGBE_RDT(0),		4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
 14.1094 +	{ IXGBE_RXDCTL(0),	4, WRITE_NO_TEST, 0, 0 },
 14.1095 +	{ IXGBE_FCRTH(0),	1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 14.1096 +	{ IXGBE_FCTTV(0),	1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1097 +	{ IXGBE_TIPG,		1, PATTERN_TEST, 0x000000FF, 0x000000FF },
 14.1098 +	{ IXGBE_TDBAL(0),	4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
 14.1099 +	{ IXGBE_TDBAH(0),	4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1100 +	{ IXGBE_TDLEN(0),	4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
 14.1101 +	{ IXGBE_RXCTRL,		1, SET_READ_TEST, 0x00000003, 0x00000003 },
 14.1102 +	{ IXGBE_DTXCTL,		1, SET_READ_TEST, 0x00000005, 0x00000005 },
 14.1103 +	{ IXGBE_RAL(0),		16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1104 +	{ IXGBE_RAL(0),		16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
 14.1105 +	{ IXGBE_MTA(0), 	128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 14.1106 +	{ 0, 0, 0, 0 }
 14.1107 +};
 14.1108 +
 14.1109 +#define REG_PATTERN_TEST(R, M, W)                                             \
 14.1110 +{                                                                             \
 14.1111 +	u32 pat, val, before;                                                 \
 14.1112 +	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
 14.1113 +	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
 14.1114 +		before = readl(adapter->hw.hw_addr + R);                      \
 14.1115 +		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
 14.1116 +		val = readl(adapter->hw.hw_addr + R);                         \
 14.1117 +		if (val != (_test[pat] & W & M)) {                            \
 14.1118 +			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
 14.1119 +					  "0x%08X expected 0x%08X\n",         \
 14.1120 +				R, val, (_test[pat] & W & M));                \
 14.1121 +			*data = R;                                            \
 14.1122 +			writel(before, adapter->hw.hw_addr + R);              \
 14.1123 +			return 1;                                             \
 14.1124 +		}                                                             \
 14.1125 +		writel(before, adapter->hw.hw_addr + R);                      \
 14.1126 +	}                                                                     \
 14.1127 +}
 14.1128 +
 14.1129 +#define REG_SET_AND_CHECK(R, M, W)                                            \
 14.1130 +{                                                                             \
 14.1131 +	u32 val, before;                                                      \
 14.1132 +	before = readl(adapter->hw.hw_addr + R);                              \
 14.1133 +	writel((W & M), (adapter->hw.hw_addr + R));                           \
 14.1134 +	val = readl(adapter->hw.hw_addr + R);                                 \
 14.1135 +	if ((W & M) != (val & M)) {                                           \
 14.1136 +		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
 14.1137 +				 "expected 0x%08X\n", R, (val & M), (W & M)); \
 14.1138 +		*data = R;                                                    \
 14.1139 +		writel(before, (adapter->hw.hw_addr + R));                    \
 14.1140 +		return 1;                                                     \
 14.1141 +	}                                                                     \
 14.1142 +	writel(before, (adapter->hw.hw_addr + R));                            \
 14.1143 +}
 14.1144 +
 14.1145 +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 14.1146 +{
 14.1147 +	struct ixgbe_reg_test *test;
 14.1148 +	u32 value, before, after;
 14.1149 +	u32 i, toggle;
 14.1150 +
 14.1151 +	toggle = 0x7FFFF3FF;
 14.1152 +	test = reg_test_82598;
 14.1153 +
 14.1154 +	/*
 14.1155 +	 * Because the status register is such a special case,
 14.1156 +	 * we handle it separately from the rest of the register
 14.1157 +	 * tests.  Some bits are read-only, some toggle, and some
 14.1158 +	 * are writeable on newer MACs.
 14.1159 +	 */
 14.1160 +	before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
 14.1161 +	value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
 14.1162 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
 14.1163 +	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
 14.1164 +	if (value != after) {
 14.1165 +		DPRINTK(DRV, ERR, "failed STATUS register test got: "
 14.1166 +		        "0x%08X expected: 0x%08X\n", after, value);
 14.1167 +		*data = 1;
 14.1168 +		return 1;
 14.1169 +	}
 14.1170 +	/* restore previous status */
 14.1171 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
 14.1172 +
 14.1173 +	/*
 14.1174 +	 * Perform the remainder of the register test, looping through
 14.1175 +	 * the test table until we either fail or reach the null entry.
 14.1176 +	 */
 14.1177 +	while (test->reg) {
 14.1178 +		for (i = 0; i < test->array_len; i++) {
 14.1179 +			switch (test->test_type) {
 14.1180 +			case PATTERN_TEST:
 14.1181 +				REG_PATTERN_TEST(test->reg + (i * 0x40),
 14.1182 +						test->mask,
 14.1183 +						test->write);
 14.1184 +				break;
 14.1185 +			case SET_READ_TEST:
 14.1186 +				REG_SET_AND_CHECK(test->reg + (i * 0x40),
 14.1187 +						test->mask,
 14.1188 +						test->write);
 14.1189 +				break;
 14.1190 +			case WRITE_NO_TEST:
 14.1191 +				writel(test->write,
 14.1192 +				       (adapter->hw.hw_addr + test->reg)
 14.1193 +				       + (i * 0x40));
 14.1194 +				break;
 14.1195 +			case TABLE32_TEST:
 14.1196 +				REG_PATTERN_TEST(test->reg + (i * 4),
 14.1197 +						test->mask,
 14.1198 +						test->write);
 14.1199 +				break;
 14.1200 +			case TABLE64_TEST_LO:
 14.1201 +				REG_PATTERN_TEST(test->reg + (i * 8),
 14.1202 +						test->mask,
 14.1203 +						test->write);
 14.1204 +				break;
 14.1205 +			case TABLE64_TEST_HI:
 14.1206 +				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
 14.1207 +						test->mask,
 14.1208 +						test->write);
 14.1209 +				break;
 14.1210 +			}
 14.1211 +		}
 14.1212 +		test++;
 14.1213 +	}
 14.1214 +
 14.1215 +	*data = 0;
 14.1216 +	return 0;
 14.1217 +}
 14.1218 +
 14.1219 +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
 14.1220 +{
 14.1221 +	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
 14.1222 +		*data = 1;
 14.1223 +	else
 14.1224 +		*data = 0;
 14.1225 +	return *data;
 14.1226 +}
 14.1227 +
 14.1228 +static irqreturn_t ixgbe_test_intr(int irq, void *data)
 14.1229 +{
 14.1230 +	struct net_device *netdev = (struct net_device *) data;
 14.1231 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1232 +
 14.1233 +	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
 14.1234 +
 14.1235 +	return IRQ_HANDLED;
 14.1236 +}
 14.1237 +
 14.1238 +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
 14.1239 +{
 14.1240 +	struct net_device *netdev = adapter->netdev;
 14.1241 +	u32 mask, i = 0, shared_int = true;
 14.1242 +	u32 irq = adapter->pdev->irq;
 14.1243 +
 14.1244 +	*data = 0;
 14.1245 +
 14.1246 +	/* Hook up test interrupt handler just for this test */
 14.1247 +	if (adapter->msix_entries) {
 14.1248 +		/* NOTE: we don't test MSI-X interrupts here, yet */
 14.1249 +		return 0;
 14.1250 +	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
 14.1251 +		shared_int = false;
 14.1252 +		if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
 14.1253 +				netdev)) {
 14.1254 +			*data = 1;
 14.1255 +			return -1;
 14.1256 +		}
 14.1257 +	} else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
 14.1258 +	                        netdev->name, netdev)) {
 14.1259 +		shared_int = false;
 14.1260 +	} else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
 14.1261 +	                       netdev->name, netdev)) {
 14.1262 +		*data = 1;
 14.1263 +		return -1;
 14.1264 +	}
 14.1265 +	DPRINTK(HW, INFO, "testing %s interrupt\n",
 14.1266 +		(shared_int ? "shared" : "unshared"));
 14.1267 +
 14.1268 +	/* Disable all the interrupts */
 14.1269 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
 14.1270 +	msleep(10);
 14.1271 +
 14.1272 +	/* Test each interrupt */
 14.1273 +	for (; i < 10; i++) {
 14.1274 +		/* Interrupt to test */
 14.1275 +		mask = 1 << i;
 14.1276 +
 14.1277 +		if (!shared_int) {
 14.1278 +			/*
 14.1279 +			 * Disable the interrupts to be reported in
 14.1280 +			 * the cause register and then force the same
 14.1281 +			 * interrupt and see if one gets posted.  If
 14.1282 +			 * an interrupt was posted to the bus, the
 14.1283 +			 * test failed.
 14.1284 +			 */
 14.1285 +			adapter->test_icr = 0;
 14.1286 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
 14.1287 +			                ~mask & 0x00007FFF);
 14.1288 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
 14.1289 +			                ~mask & 0x00007FFF);
 14.1290 +			msleep(10);
 14.1291 +
 14.1292 +			if (adapter->test_icr & mask) {
 14.1293 +				*data = 3;
 14.1294 +				break;
 14.1295 +			}
 14.1296 +		}
 14.1297 +
 14.1298 +		/*
 14.1299 +		 * Enable the interrupt to be reported in the cause
 14.1300 +		 * register and then force the same interrupt and see
 14.1301 +		 * if one gets posted.  If an interrupt was not posted
 14.1302 +		 * to the bus, the test failed.
 14.1303 +		 */
 14.1304 +		adapter->test_icr = 0;
 14.1305 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 14.1306 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
 14.1307 +		msleep(10);
 14.1308 +
 14.1309 +		if (!(adapter->test_icr &mask)) {
 14.1310 +			*data = 4;
 14.1311 +			break;
 14.1312 +		}
 14.1313 +
 14.1314 +		if (!shared_int) {
 14.1315 +			/*
 14.1316 +			 * Disable the other interrupts to be reported in
 14.1317 +			 * the cause register and then force the other
 14.1318 +			 * interrupts and see if any get posted.  If
 14.1319 +			 * an interrupt was posted to the bus, the
 14.1320 +			 * test failed.
 14.1321 +			 */
 14.1322 +			adapter->test_icr = 0;
 14.1323 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
 14.1324 +			                ~mask & 0x00007FFF);
 14.1325 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
 14.1326 +			                ~mask & 0x00007FFF);
 14.1327 +			msleep(10);
 14.1328 +
 14.1329 +			if (adapter->test_icr) {
 14.1330 +				*data = 5;
 14.1331 +				break;
 14.1332 +			}
 14.1333 +		}
 14.1334 +	}
 14.1335 +
 14.1336 +	/* Disable all the interrupts */
 14.1337 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
 14.1338 +	msleep(10);
 14.1339 +
 14.1340 +	/* Unhook test interrupt handler */
 14.1341 +	free_irq(irq, netdev);
 14.1342 +
 14.1343 +	return *data;
 14.1344 +}
 14.1345 +
 14.1346 +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
 14.1347 +{
 14.1348 +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
 14.1349 +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
 14.1350 +	struct pci_dev *pdev = adapter->pdev;
 14.1351 +	int i;
 14.1352 +
 14.1353 +	if (tx_ring->desc && tx_ring->tx_buffer_info) {
 14.1354 +		for (i = 0; i < tx_ring->count; i++) {
 14.1355 +			struct ixgbe_tx_buffer *buf =
 14.1356 +					&(tx_ring->tx_buffer_info[i]);
 14.1357 +			if (buf->dma)
 14.1358 +				pci_unmap_single(pdev, buf->dma, buf->length,
 14.1359 +				                 PCI_DMA_TODEVICE);
 14.1360 +			if (buf->skb)
 14.1361 +				dev_kfree_skb(buf->skb);
 14.1362 +		}
 14.1363 +	}
 14.1364 +
 14.1365 +	if (rx_ring->desc && rx_ring->rx_buffer_info) {
 14.1366 +		for (i = 0; i < rx_ring->count; i++) {
 14.1367 +			struct ixgbe_rx_buffer *buf =
 14.1368 +					&(rx_ring->rx_buffer_info[i]);
 14.1369 +			if (buf->dma)
 14.1370 +				pci_unmap_single(pdev, buf->dma,
 14.1371 +						 IXGBE_RXBUFFER_2048,
 14.1372 +						 PCI_DMA_FROMDEVICE);
 14.1373 +			if (buf->skb)
 14.1374 +				dev_kfree_skb(buf->skb);
 14.1375 +		}
 14.1376 +	}
 14.1377 +
 14.1378 +	if (tx_ring->desc) {
 14.1379 +		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
 14.1380 +		                    tx_ring->dma);
 14.1381 +		tx_ring->desc = NULL;
 14.1382 +	}
 14.1383 +	if (rx_ring->desc) {
 14.1384 +		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
 14.1385 +		                    rx_ring->dma);
 14.1386 +		rx_ring->desc = NULL;
 14.1387 +	}
 14.1388 +
 14.1389 +	kfree(tx_ring->tx_buffer_info);
 14.1390 +	tx_ring->tx_buffer_info = NULL;
 14.1391 +	kfree(rx_ring->rx_buffer_info);
 14.1392 +	rx_ring->rx_buffer_info = NULL;
 14.1393 +
 14.1394 +	return;
 14.1395 +}
 14.1396 +
 14.1397 +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
 14.1398 +{
 14.1399 +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
 14.1400 +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
 14.1401 +	struct pci_dev *pdev = adapter->pdev;
 14.1402 +	u32 rctl, reg_data;
 14.1403 +	int i, ret_val;
 14.1404 +
 14.1405 +	/* Setup Tx descriptor ring and Tx buffers */
 14.1406 +
 14.1407 +	if (!tx_ring->count)
 14.1408 +		tx_ring->count = IXGBE_DEFAULT_TXD;
 14.1409 +
 14.1410 +	tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
 14.1411 +	                                  sizeof(struct ixgbe_tx_buffer),
 14.1412 +	                                  GFP_KERNEL);
 14.1413 +	if (!(tx_ring->tx_buffer_info)) {
 14.1414 +		ret_val = 1;
 14.1415 +		goto err_nomem;
 14.1416 +	}
 14.1417 +
 14.1418 +	tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
 14.1419 +	tx_ring->size = ALIGN(tx_ring->size, 4096);
 14.1420 +	if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
 14.1421 +						   &tx_ring->dma))) {
 14.1422 +		ret_val = 2;
 14.1423 +		goto err_nomem;
 14.1424 +	}
 14.1425 +	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
 14.1426 +
 14.1427 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
 14.1428 +			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
 14.1429 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
 14.1430 +			((u64) tx_ring->dma >> 32));
 14.1431 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
 14.1432 +			tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
 14.1433 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
 14.1434 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
 14.1435 +
 14.1436 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
 14.1437 +	reg_data |= IXGBE_HLREG0_TXPADEN;
 14.1438 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
 14.1439 +
 14.1440 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
 14.1441 +	reg_data |= IXGBE_TXDCTL_ENABLE;
 14.1442 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
 14.1443 +
 14.1444 +	for (i = 0; i < tx_ring->count; i++) {
 14.1445 +		struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
 14.1446 +		struct sk_buff *skb;
 14.1447 +		unsigned int size = 1024;
 14.1448 +
 14.1449 +		skb = alloc_skb(size, GFP_KERNEL);
 14.1450 +		if (!skb) {
 14.1451 +			ret_val = 3;
 14.1452 +			goto err_nomem;
 14.1453 +		}
 14.1454 +		skb_put(skb, size);
 14.1455 +		tx_ring->tx_buffer_info[i].skb = skb;
 14.1456 +		tx_ring->tx_buffer_info[i].length = skb->len;
 14.1457 +		tx_ring->tx_buffer_info[i].dma =
 14.1458 +			pci_map_single(pdev, skb->data, skb->len,
 14.1459 +					PCI_DMA_TODEVICE);
 14.1460 +		desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
 14.1461 +		desc->lower.data = cpu_to_le32(skb->len);
 14.1462 +		desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
 14.1463 +		                                IXGBE_TXD_CMD_IFCS |
 14.1464 +		                                IXGBE_TXD_CMD_RS);
 14.1465 +		desc->upper.data = 0;
 14.1466 +	}
 14.1467 +
 14.1468 +	/* Setup Rx Descriptor ring and Rx buffers */
 14.1469 +
 14.1470 +	if (!rx_ring->count)
 14.1471 +		rx_ring->count = IXGBE_DEFAULT_RXD;
 14.1472 +
 14.1473 +	rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
 14.1474 +	                                  sizeof(struct ixgbe_rx_buffer),
 14.1475 +	                                  GFP_KERNEL);
 14.1476 +	if (!(rx_ring->rx_buffer_info)) {
 14.1477 +		ret_val = 4;
 14.1478 +		goto err_nomem;
 14.1479 +	}
 14.1480 +
 14.1481 +	rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
 14.1482 +	rx_ring->size = ALIGN(rx_ring->size, 4096);
 14.1483 +	if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
 14.1484 +						   &rx_ring->dma))) {
 14.1485 +		ret_val = 5;
 14.1486 +		goto err_nomem;
 14.1487 +	}
 14.1488 +	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
 14.1489 +
 14.1490 +	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
 14.1491 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
 14.1492 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
 14.1493 +			((u64)rx_ring->dma & 0xFFFFFFFF));
 14.1494 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
 14.1495 +			((u64) rx_ring->dma >> 32));
 14.1496 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
 14.1497 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
 14.1498 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
 14.1499 +
 14.1500 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 14.1501 +	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
 14.1502 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
 14.1503 +
 14.1504 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
 14.1505 +	reg_data &= ~IXGBE_HLREG0_LPBK;
 14.1506 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
 14.1507 +
 14.1508 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
 14.1509 +#define IXGBE_RDRXCTL_RDMTS_MASK    0x00000003 /* Receive Descriptor Minimum
 14.1510 +                                                  Threshold Size mask */
 14.1511 +	reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
 14.1512 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
 14.1513 +
 14.1514 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
 14.1515 +#define IXGBE_MCSTCTRL_MO_MASK      0x00000003 /* Multicast Offset mask */
 14.1516 +	reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
 14.1517 +	reg_data |= adapter->hw.mac.mc_filter_type;
 14.1518 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
 14.1519 +
 14.1520 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
 14.1521 +	reg_data |= IXGBE_RXDCTL_ENABLE;
 14.1522 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
 14.1523 +
 14.1524 +	rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
 14.1525 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
 14.1526 +
 14.1527 +	for (i = 0; i < rx_ring->count; i++) {
 14.1528 +		struct ixgbe_legacy_rx_desc *rx_desc =
 14.1529 +					IXGBE_RX_DESC(*rx_ring, i);
 14.1530 +		struct sk_buff *skb;
 14.1531 +
 14.1532 +		skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
 14.1533 +		if (!skb) {
 14.1534 +			ret_val = 6;
 14.1535 +			goto err_nomem;
 14.1536 +		}
 14.1537 +		skb_reserve(skb, NET_IP_ALIGN);
 14.1538 +		rx_ring->rx_buffer_info[i].skb = skb;
 14.1539 +		rx_ring->rx_buffer_info[i].dma =
 14.1540 +			pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
 14.1541 +			               PCI_DMA_FROMDEVICE);
 14.1542 +		rx_desc->buffer_addr =
 14.1543 +				cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
 14.1544 +		memset(skb->data, 0x00, skb->len);
 14.1545 +	}
 14.1546 +
 14.1547 +	return 0;
 14.1548 +
 14.1549 +err_nomem:
 14.1550 +	ixgbe_free_desc_rings(adapter);
 14.1551 +	return ret_val;
 14.1552 +}
 14.1553 +
 14.1554 +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
 14.1555 +{
 14.1556 +	struct ixgbe_hw *hw = &adapter->hw;
 14.1557 +	u32 reg_data;
 14.1558 +
 14.1559 +	/* right now we only support MAC loopback in the driver */
 14.1560 +
 14.1561 +	/* Setup MAC loopback */
 14.1562 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
 14.1563 +	reg_data |= IXGBE_HLREG0_LPBK;
 14.1564 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
 14.1565 +
 14.1566 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
 14.1567 +	reg_data &= ~IXGBE_AUTOC_LMS_MASK;
 14.1568 +	reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
 14.1569 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
 14.1570 +
 14.1571 +	/* Disable Atlas Tx lanes; re-enabled in reset path */
 14.1572 +	if (hw->mac.type == ixgbe_mac_82598EB) {
 14.1573 +		u8 atlas;
 14.1574 +
 14.1575 +		ixgbe_read_analog_reg8(&adapter->hw,
 14.1576 +		                       IXGBE_ATLAS_PDN_LPBK, &atlas);
 14.1577 +		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
 14.1578 +		ixgbe_write_analog_reg8(&adapter->hw,
 14.1579 +		                        IXGBE_ATLAS_PDN_LPBK, atlas);
 14.1580 +
 14.1581 +		ixgbe_read_analog_reg8(&adapter->hw,
 14.1582 +		                       IXGBE_ATLAS_PDN_10G, &atlas);
 14.1583 +		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
 14.1584 +		ixgbe_write_analog_reg8(&adapter->hw,
 14.1585 +		                        IXGBE_ATLAS_PDN_10G, atlas);
 14.1586 +
 14.1587 +		ixgbe_read_analog_reg8(&adapter->hw,
 14.1588 +		                       IXGBE_ATLAS_PDN_1G, &atlas);
 14.1589 +		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
 14.1590 +		ixgbe_write_analog_reg8(&adapter->hw,
 14.1591 +		                        IXGBE_ATLAS_PDN_1G, atlas);
 14.1592 +
 14.1593 +		ixgbe_read_analog_reg8(&adapter->hw,
 14.1594 +		                       IXGBE_ATLAS_PDN_AN, &atlas);
 14.1595 +		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
 14.1596 +		ixgbe_write_analog_reg8(&adapter->hw,
 14.1597 +		                        IXGBE_ATLAS_PDN_AN, atlas);
 14.1598 +	}
 14.1599 +
 14.1600 +	return 0;
 14.1601 +}
 14.1602 +
 14.1603 +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
 14.1604 +{
 14.1605 +	u32 reg_data;
 14.1606 +
 14.1607 +	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
 14.1608 +	reg_data &= ~IXGBE_HLREG0_LPBK;
 14.1609 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
 14.1610 +}
 14.1611 +
 14.1612 +static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
 14.1613 +                                      unsigned int frame_size)
 14.1614 +{
 14.1615 +	memset(skb->data, 0xFF, frame_size);
 14.1616 +	frame_size &= ~1;
 14.1617 +	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
 14.1618 +	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
 14.1619 +	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
 14.1620 +}
 14.1621 +
 14.1622 +static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
 14.1623 +                                    unsigned int frame_size)
 14.1624 +{
 14.1625 +	frame_size &= ~1;
 14.1626 +	if (*(skb->data + 3) == 0xFF) {
 14.1627 +		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
 14.1628 +		    (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
 14.1629 +			return 0;
 14.1630 +		}
 14.1631 +	}
 14.1632 +	return 13;
 14.1633 +}
 14.1634 +
 14.1635 +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
 14.1636 +{
 14.1637 +	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
 14.1638 +	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
 14.1639 +	struct pci_dev *pdev = adapter->pdev;
 14.1640 +	int i, j, k, l, lc, good_cnt, ret_val = 0;
 14.1641 +	unsigned long time;
 14.1642 +
 14.1643 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
 14.1644 +
 14.1645 +	/*
 14.1646 +	 * Calculate the loop count based on the largest descriptor ring
 14.1647 +	 * The idea is to wrap the largest ring a number of times using 64
 14.1648 +	 * send/receive pairs during each loop
 14.1649 +	 */
 14.1650 +
 14.1651 +	if (rx_ring->count <= tx_ring->count)
 14.1652 +		lc = ((tx_ring->count / 64) * 2) + 1;
 14.1653 +	else
 14.1654 +		lc = ((rx_ring->count / 64) * 2) + 1;
 14.1655 +
 14.1656 +	k = l = 0;
 14.1657 +	for (j = 0; j <= lc; j++) {
 14.1658 +		for (i = 0; i < 64; i++) {
 14.1659 +			ixgbe_create_lbtest_frame(
 14.1660 +					tx_ring->tx_buffer_info[k].skb,
 14.1661 +					1024);
 14.1662 +			pci_dma_sync_single_for_device(pdev,
 14.1663 +				tx_ring->tx_buffer_info[k].dma,
 14.1664 +				tx_ring->tx_buffer_info[k].length,
 14.1665 +				PCI_DMA_TODEVICE);
 14.1666 +			if (unlikely(++k == tx_ring->count))
 14.1667 +				k = 0;
 14.1668 +		}
 14.1669 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
 14.1670 +		msleep(200);
 14.1671 +		/* set the start time for the receive */
 14.1672 +		time = jiffies;
 14.1673 +		good_cnt = 0;
 14.1674 +		do {
 14.1675 +			/* receive the sent packets */
 14.1676 +			pci_dma_sync_single_for_cpu(pdev,
 14.1677 +					rx_ring->rx_buffer_info[l].dma,
 14.1678 +					IXGBE_RXBUFFER_2048,
 14.1679 +					PCI_DMA_FROMDEVICE);
 14.1680 +			ret_val = ixgbe_check_lbtest_frame(
 14.1681 +					rx_ring->rx_buffer_info[l].skb, 1024);
 14.1682 +			if (!ret_val)
 14.1683 +				good_cnt++;
 14.1684 +			if (++l == rx_ring->count)
 14.1685 +				l = 0;
 14.1686 +			/*
 14.1687 +			 * time + 20 msecs (200 msecs on 2.4) is more than
 14.1688 +			 * enough time to complete the receives, if it's
 14.1689 +			 * exceeded, break and error off
 14.1690 +			 */
 14.1691 +		} while (good_cnt < 64 && jiffies < (time + 20));
 14.1692 +		if (good_cnt != 64) {
 14.1693 +			/* ret_val is the same as mis-compare */
 14.1694 +			ret_val = 13;
 14.1695 +			break;
 14.1696 +		}
 14.1697 +		if (jiffies >= (time + 20)) {
 14.1698 +			/* Error code for time out error */
 14.1699 +			ret_val = 14;
 14.1700 +			break;
 14.1701 +		}
 14.1702 +	}
 14.1703 +
 14.1704 +	return ret_val;
 14.1705 +}
 14.1706 +
 14.1707 +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
 14.1708 +{
 14.1709 +	*data = ixgbe_setup_desc_rings(adapter);
 14.1710 +	if (*data)
 14.1711 +		goto out;
 14.1712 +	*data = ixgbe_setup_loopback_test(adapter);
 14.1713 +	if (*data)
 14.1714 +		goto err_loopback;
 14.1715 +	*data = ixgbe_run_loopback_test(adapter);
 14.1716 +	ixgbe_loopback_cleanup(adapter);
 14.1717 +
 14.1718 +err_loopback:
 14.1719 +	ixgbe_free_desc_rings(adapter);
 14.1720 +out:
 14.1721 +	return *data;
 14.1722 +}
 14.1723 +
 14.1724 +static int ixgbe_diag_test_count(struct net_device *netdev)
 14.1725 +{
 14.1726 +	return IXGBE_TEST_LEN;
 14.1727 +}
 14.1728 +
 14.1729 +static void ixgbe_diag_test(struct net_device *netdev,
 14.1730 +                            struct ethtool_test *eth_test, u64 *data)
 14.1731 +{
 14.1732 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1733 +	bool if_running = netif_running(netdev);
 14.1734 +
 14.1735 +	set_bit(__IXGBE_TESTING, &adapter->state);
 14.1736 +	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 14.1737 +		/* Offline tests */
 14.1738 +
 14.1739 +		DPRINTK(HW, INFO, "offline testing starting\n");
 14.1740 +
 14.1741 +		/* Link test performed before hardware reset so autoneg doesn't
 14.1742 +		 * interfere with test result */
 14.1743 +		if (ixgbe_link_test(adapter, &data[4]))
 14.1744 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1745 +
 14.1746 +		if (if_running)
 14.1747 +			/* indicate we're in test mode */
 14.1748 +			dev_close(netdev);
 14.1749 +		else
 14.1750 +			ixgbe_reset(adapter);
 14.1751 +
 14.1752 +		DPRINTK(HW, INFO, "register testing starting\n");
 14.1753 +		if (ixgbe_reg_test(adapter, &data[0]))
 14.1754 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1755 +
 14.1756 +		ixgbe_reset(adapter);
 14.1757 +		DPRINTK(HW, INFO, "eeprom testing starting\n");
 14.1758 +		if (ixgbe_eeprom_test(adapter, &data[1]))
 14.1759 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1760 +
 14.1761 +		ixgbe_reset(adapter);
 14.1762 +		DPRINTK(HW, INFO, "interrupt testing starting\n");
 14.1763 +		if (ixgbe_intr_test(adapter, &data[2]))
 14.1764 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1765 +
 14.1766 +		ixgbe_reset(adapter);
 14.1767 +		DPRINTK(HW, INFO, "loopback testing starting\n");
 14.1768 +		if (ixgbe_loopback_test(adapter, &data[3]))
 14.1769 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1770 +
 14.1771 +		ixgbe_reset(adapter);
 14.1772 +
 14.1773 +		clear_bit(__IXGBE_TESTING, &adapter->state);
 14.1774 +		if (if_running)
 14.1775 +			dev_open(netdev);
 14.1776 +	} else {
 14.1777 +		DPRINTK(HW, INFO, "online testing starting\n");
 14.1778 +		/* Online tests */
 14.1779 +		if (ixgbe_link_test(adapter, &data[4]))
 14.1780 +			eth_test->flags |= ETH_TEST_FL_FAILED;
 14.1781 +
 14.1782 +		/* Online tests aren't run; pass by default */
 14.1783 +		data[0] = 0;
 14.1784 +		data[1] = 0;
 14.1785 +		data[2] = 0;
 14.1786 +		data[3] = 0;
 14.1787 +
 14.1788 +		clear_bit(__IXGBE_TESTING, &adapter->state);
 14.1789 +	}
 14.1790 +	msleep_interruptible(4 * 1000);
 14.1791 +}
 14.1792 +
 14.1793 +static void ixgbe_get_wol(struct net_device *netdev,
 14.1794 +                          struct ethtool_wolinfo *wol)
 14.1795 +{
 14.1796 +	wol->supported = 0;
 14.1797 +	wol->wolopts = 0;
 14.1798 +
 14.1799 +	return;
 14.1800 +}
 14.1801 +
 14.1802 +static int ixgbe_nway_reset(struct net_device *netdev)
 14.1803 +{
 14.1804 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1805 +
 14.1806 +	if (netif_running(netdev))
 14.1807 +		ixgbe_reinit_locked(adapter);
 14.1808 +
 14.1809 +	return 0;
 14.1810 +}
 14.1811 +
 14.1812 +static int ixgbe_phys_id(struct net_device *netdev, u32 data)
 14.1813 +{
 14.1814 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1815 +	u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
 14.1816 +	u32 i;
 14.1817 +
 14.1818 +	if (!data || data > 300)
 14.1819 +		data = 300;
 14.1820 +
 14.1821 +	for (i = 0; i < (data * 1000); i += 400) {
 14.1822 +		ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
 14.1823 +		msleep_interruptible(200);
 14.1824 +		ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
 14.1825 +		msleep_interruptible(200);
 14.1826 +	}
 14.1827 +
 14.1828 +	/* Restore LED settings */
 14.1829 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
 14.1830 +
 14.1831 +	return 0;
 14.1832 +}
 14.1833 +
 14.1834 +static int ixgbe_get_coalesce(struct net_device *netdev,
 14.1835 +                              struct ethtool_coalesce *ec)
 14.1836 +{
 14.1837 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1838 +
 14.1839 +	ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
 14.1840 +#ifndef CONFIG_IXGBE_NAPI
 14.1841 +	ec->rx_max_coalesced_frames_irq = adapter->rx_ring[0].work_limit;
 14.1842 +#endif
 14.1843 +
 14.1844 +	/* only valid if in constant ITR mode */
 14.1845 +	if (adapter->itr_setting == 0)
 14.1846 +		ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
 14.1847 +
 14.1848 +	return 0;
 14.1849 +}
 14.1850 +
 14.1851 +static int ixgbe_set_coalesce(struct net_device *netdev,
 14.1852 +                              struct ethtool_coalesce *ec)
 14.1853 +{
 14.1854 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 14.1855 +
 14.1856 +	if (ec->tx_max_coalesced_frames_irq)
 14.1857 +		adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
 14.1858 +
 14.1859 +#ifndef CONFIG_IXGBE_NAPI
 14.1860 +	if (ec->rx_max_coalesced_frames_irq)
 14.1861 +		adapter->rx_ring[0].work_limit = ec->rx_max_coalesced_frames_irq;
 14.1862 +
 14.1863 +#endif
 14.1864 +	if (ec->rx_coalesce_usecs > 3) {
 14.1865 +		struct ixgbe_hw *hw = &adapter->hw;
 14.1866 +		int i;
 14.1867 +		/* store the value in ints/second */
 14.1868 +		adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
 14.1869 +
 14.1870 +		for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++){
 14.1871 +			struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
 14.1872 +			if (q_vector->txr_count && !q_vector->rxr_count)
 14.1873 +				q_vector->eitr = (adapter->eitr_param >> 1);
 14.1874 +			else
 14.1875 +				/* rx only */
 14.1876 +				q_vector->eitr = adapter->eitr_param;
 14.1877 +			IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
 14.1878 +			              EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
 14.1879 +		}
 14.1880 +
 14.1881 +		/* static value of interrupt rate */
 14.1882 +		adapter->itr_setting = adapter->eitr_param;
 14.1883 +	} else {
 14.1884 +		/* 1,2,3 means dynamic mode */
 14.1885 +		adapter->itr_setting = ec->rx_coalesce_usecs;
 14.1886 +	}
 14.1887 +
 14.1888 +	if (netif_running(netdev))
 14.1889 +		ixgbe_reinit_locked(adapter);
 14.1890 +
 14.1891 +	return 0;
 14.1892 +}
 14.1893 +
 14.1894 +
 14.1895 +static struct ethtool_ops ixgbe_ethtool_ops = {
 14.1896 +	.get_settings           = ixgbe_get_settings,
 14.1897 +	.set_settings           = ixgbe_set_settings,
 14.1898 +	.get_drvinfo            = ixgbe_get_drvinfo,
 14.1899 +	.get_regs_len           = ixgbe_get_regs_len,
 14.1900 +	.get_regs               = ixgbe_get_regs,
 14.1901 +	.get_wol                = ixgbe_get_wol,
 14.1902 +	.nway_reset             = ixgbe_nway_reset,
 14.1903 +	.get_link               = ethtool_op_get_link,
 14.1904 +	.get_eeprom_len         = ixgbe_get_eeprom_len,
 14.1905 +	.get_eeprom             = ixgbe_get_eeprom,
 14.1906 +	.set_eeprom             = ixgbe_set_eeprom,
 14.1907 +	.get_ringparam          = ixgbe_get_ringparam,
 14.1908 +	.set_ringparam          = ixgbe_set_ringparam,
 14.1909 +	.get_pauseparam         = ixgbe_get_pauseparam,
 14.1910 +	.set_pauseparam         = ixgbe_set_pauseparam,
 14.1911 +	.get_rx_csum            = ixgbe_get_rx_csum,
 14.1912 +	.set_rx_csum            = ixgbe_set_rx_csum,
 14.1913 +	.get_tx_csum            = ixgbe_get_tx_csum,
 14.1914 +	.set_tx_csum            = ixgbe_set_tx_csum,
 14.1915 +	.get_sg                 = ethtool_op_get_sg,
 14.1916 +	.set_sg                 = ethtool_op_set_sg,
 14.1917 +	.get_msglevel           = ixgbe_get_msglevel,
 14.1918 +	.set_msglevel           = ixgbe_set_msglevel,
 14.1919 +#ifdef NETIF_F_TSO
 14.1920 +	.get_tso                = ethtool_op_get_tso,
 14.1921 +	.set_tso                = ixgbe_set_tso,
 14.1922 +#endif
 14.1923 +	.self_test_count        = ixgbe_diag_test_count,
 14.1924 +	.self_test              = ixgbe_diag_test,
 14.1925 +	.get_strings            = ixgbe_get_strings,
 14.1926 +	.phys_id                = ixgbe_phys_id,
 14.1927 +	.get_stats_count        = ixgbe_get_stats_count,
 14.1928 +	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
 14.1929 +#ifdef ETHTOOL_GPERMADDR
 14.1930 +	.get_perm_addr          = ethtool_op_get_perm_addr,
 14.1931 +#endif
 14.1932 +	.get_coalesce           = ixgbe_get_coalesce,
 14.1933 +	.set_coalesce           = ixgbe_set_coalesce,
 14.1934 +#ifndef IXGBE_NO_INET_LRO
 14.1935 +	.get_flags              = ethtool_op_get_flags,
 14.1936 +	.set_flags              = ethtool_op_set_flags,
 14.1937 +#endif
 14.1938 +};
 14.1939 +
 14.1940 +void ixgbe_set_ethtool_ops(struct net_device *netdev)
 14.1941 +{
 14.1942 +	SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
 14.1943 +}
 14.1944 +#endif /* SIOCETHTOOL */
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/drivers/net/ixgbe/ixgbe_main.c	Fri Jan 30 10:52:47 2009 +0000
    15.3 @@ -0,0 +1,5464 @@
    15.4 +/*******************************************************************************
    15.5 +
    15.6 +  Intel 10 Gigabit PCI Express Linux driver
    15.7 +  Copyright(c) 1999 - 2008 Intel Corporation.
    15.8 +
    15.9 +  This program is free software; you can redistribute it and/or modify it
   15.10 +  under the terms and conditions of the GNU General Public License,
   15.11 +  version 2, as published by the Free Software Foundation.
   15.12 +
   15.13 +  This program is distributed in the hope it will be useful, but WITHOUT
   15.14 +  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   15.15 +  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   15.16 +  more details.
   15.17 +
   15.18 +  You should have received a copy of the GNU General Public License along with
   15.19 +  this program; if not, write to the Free Software Foundation, Inc.,
   15.20 +  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   15.21 +
   15.22 +  The full GNU General Public License is included in this distribution in
   15.23 +  the file called "COPYING".
   15.24 +
   15.25 +  Contact Information:
   15.26 +  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   15.27 +  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
   15.28 +
   15.29 +*******************************************************************************/
   15.30 +
   15.31 +
   15.32 +/******************************************************************************
   15.33 + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
   15.34 +******************************************************************************/
   15.35 +
   15.36 +#include <linux/types.h>
   15.37 +#include <linux/module.h>
   15.38 +#include <linux/pci.h>
   15.39 +#include <linux/netdevice.h>
   15.40 +#include <linux/vmalloc.h>
   15.41 +#include <linux/string.h>
   15.42 +#include <linux/in.h>
   15.43 +#include <linux/ip.h>
   15.44 +#include <linux/tcp.h>
   15.45 +#include <linux/ipv6.h>
   15.46 +#ifdef NETIF_F_TSO
   15.47 +#include <net/checksum.h>
   15.48 +#ifdef NETIF_F_TSO6
   15.49 +#include <net/ip6_checksum.h>
   15.50 +#endif
   15.51 +#endif
   15.52 +#ifdef SIOCETHTOOL
   15.53 +#include <linux/ethtool.h>
   15.54 +#endif
   15.55 +#ifdef NETIF_F_HW_VLAN_TX
   15.56 +#include <linux/if_vlan.h>
   15.57 +#endif
   15.58 +
   15.59 +#include "ixgbe.h"
   15.60 +
   15.61 +char ixgbe_driver_name[] = "ixgbe";
   15.62 +static const char ixgbe_driver_string[] =
   15.63 +	"Intel(R) 10 Gigabit PCI Express Network Driver";
   15.64 +#define DRV_HW_PERF
   15.65 +
   15.66 +#ifndef CONFIG_IXGBE_NAPI
   15.67 +#define DRIVERNAPI
   15.68 +#else
   15.69 +#define DRIVERNAPI "-NAPI"
   15.70 +#endif
   15.71 +
   15.72 +#define DRV_VERSION "1.3.56.5" DRIVERNAPI DRV_HW_PERF
   15.73 +const char ixgbe_driver_version[] = DRV_VERSION;
   15.74 +static char ixgbe_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
   15.75 +/* ixgbe_pci_tbl - PCI Device ID Table
   15.76 + *
   15.77 + * Wildcard entries (PCI_ANY_ID) should come last
   15.78 + * Last entry must be all 0s
   15.79 + *
   15.80 + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
   15.81 + *   Class, Class Mask, private data (not used) }
   15.82 + */
   15.83 +static struct pci_device_id ixgbe_pci_tbl[] = {
   15.84 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)},
   15.85 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
   15.86 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
   15.87 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)},
   15.88 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)},
   15.89 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
   15.90 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
   15.91 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
   15.92 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
   15.93 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
   15.94 +	/* required last entry */
   15.95 +	{0, }
   15.96 +};
   15.97 +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
   15.98 +
   15.99 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
  15.100 +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
  15.101 +                            void *p);
  15.102 +static struct notifier_block dca_notifier = {
  15.103 +	.notifier_call = ixgbe_notify_dca,
  15.104 +	.next          = NULL,
  15.105 +	.priority      = 0
  15.106 +};
  15.107 +#endif
  15.108 +
  15.109 +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  15.110 +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  15.111 +MODULE_LICENSE("GPL");
  15.112 +MODULE_VERSION(DRV_VERSION);
  15.113 +
  15.114 +#define DEFAULT_DEBUG_LEVEL_SHIFT 3
  15.115 +
  15.116 +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  15.117 +{
  15.118 +	u32 ctrl_ext;
  15.119 +
  15.120 +	/* Let firmware take over control of h/w */
  15.121 +	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  15.122 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  15.123 +	                ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  15.124 +}
  15.125 +
  15.126 +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
  15.127 +{
  15.128 +	u32 ctrl_ext;
  15.129 +
  15.130 +	/* Let firmware know the driver has taken over */
  15.131 +	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  15.132 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  15.133 +	                ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  15.134 +}
  15.135 +
  15.136 +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
  15.137 +                           u8 msix_vector)
  15.138 +{
  15.139 +	u32 ivar, index;
  15.140 +
  15.141 +	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  15.142 +	index = (int_alloc_entry >> 2) & 0x1F;
  15.143 +	ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
  15.144 +	ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
  15.145 +	ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
  15.146 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
  15.147 +}
  15.148 +
  15.149 +static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
  15.150 +                                             struct ixgbe_tx_buffer
  15.151 +                                             *tx_buffer_info)
  15.152 +{
  15.153 +	if (tx_buffer_info->dma) {
  15.154 +		pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
  15.155 +		               tx_buffer_info->length, PCI_DMA_TODEVICE);
  15.156 +		tx_buffer_info->dma = 0;
  15.157 +	}
  15.158 +	if (tx_buffer_info->skb) {
  15.159 +		dev_kfree_skb_any(tx_buffer_info->skb);
  15.160 +		tx_buffer_info->skb = NULL;
  15.161 +	}
  15.162 +	/* tx_buffer_info must be completely set up in the transmit path */
  15.163 +}
  15.164 +
  15.165 +static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
  15.166 +                                       struct ixgbe_ring *tx_ring,
  15.167 +                                       unsigned int eop)
  15.168 +{
  15.169 +	struct ixgbe_hw *hw = &adapter->hw;
  15.170 +	u32 head, tail;
  15.171 +
  15.172 +	/* Detect a transmit hang in hardware, this serializes the
  15.173 +	 * check with the clearing of time_stamp and movement of eop */
  15.174 +	head = IXGBE_READ_REG(hw, tx_ring->head);
  15.175 +	tail = IXGBE_READ_REG(hw, tx_ring->tail);
  15.176 +	adapter->detect_tx_hung = false;
  15.177 +	if ((head != tail) &&
  15.178 +	    tx_ring->tx_buffer_info[eop].time_stamp &&
  15.179 +	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
  15.180 +	    !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
  15.181 +		/* detected Tx unit hang */
  15.182 +		union ixgbe_adv_tx_desc *tx_desc;
  15.183 +		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
  15.184 +		DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  15.185 +			"  Tx Queue             <%d>\n"
  15.186 +			"  TDH, TDT             <%x>, <%x>\n"
  15.187 +			"  next_to_use          <%x>\n"
  15.188 +			"  next_to_clean        <%x>\n"
  15.189 +			"tx_buffer_info[next_to_clean]\n"
  15.190 +			"  time_stamp           <%lx>\n"
  15.191 +			"  jiffies              <%lx>\n",
  15.192 +			tx_ring->queue_index,
  15.193 +			head, tail,
  15.194 +			tx_ring->next_to_use, eop,
  15.195 +			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
  15.196 +		return true;
  15.197 +	}
  15.198 +
  15.199 +	return false;
  15.200 +}
  15.201 +
  15.202 +#define IXGBE_MAX_TXD_PWR	14
  15.203 +#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
  15.204 +
  15.205 +/* Tx Descriptors needed, worst case */
  15.206 +#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
  15.207 +			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  15.208 +#ifdef MAX_SKB_FRAGS
  15.209 +#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
  15.210 +	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
  15.211 +#else
  15.212 +#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
  15.213 +#endif
  15.214 +
  15.215 +#define GET_TX_HEAD_FROM_RING(ring) (\
  15.216 +	*(volatile u32 *) \
  15.217 +	((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
  15.218 +static void ixgbe_tx_timeout(struct net_device *netdev);
  15.219 +
  15.220 +/**
  15.221 + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  15.222 + * @adapter: board private structure
  15.223 + * @tx_ring: tx ring to clean
  15.224 + **/
  15.225 +static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
  15.226 +                               struct ixgbe_ring *tx_ring)
  15.227 +{
  15.228 +	union ixgbe_adv_tx_desc *tx_desc;
  15.229 +	struct ixgbe_tx_buffer *tx_buffer_info;
  15.230 +	struct net_device *netdev = adapter->netdev;
  15.231 +	struct sk_buff *skb;
  15.232 +	unsigned int i;
  15.233 +	u32 head, oldhead;
  15.234 +	unsigned int count = 0;
  15.235 +	unsigned int total_bytes = 0, total_packets = 0;
  15.236 +
  15.237 +	rmb();
  15.238 +	head = GET_TX_HEAD_FROM_RING(tx_ring);
  15.239 +	head = le32_to_cpu(head);
  15.240 +	i = tx_ring->next_to_clean;
  15.241 +	while (1) {
  15.242 +		while (i != head) {
  15.243 +			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
  15.244 +			tx_buffer_info = &tx_ring->tx_buffer_info[i];
  15.245 +			skb = tx_buffer_info->skb;
  15.246 +
  15.247 +			if (skb) {
  15.248 +#ifdef NETIF_F_TSO
  15.249 +				unsigned int segs, bytecount;
  15.250 +
  15.251 +				/* gso_segs is currently only valid for tcp */
  15.252 +				segs = skb_shinfo(skb)->gso_segs ?: 1;
  15.253 +				/* multiply data chunks by size of headers */
  15.254 +				bytecount = ((segs - 1) * skb_headlen(skb)) +
  15.255 +				            skb->len;
  15.256 +				total_packets += segs;
  15.257 +				total_bytes += bytecount;
  15.258 +#else
  15.259 +				total_packets++;
  15.260 +				total_bytes += skb->len;
  15.261 +#endif
  15.262 +			}
  15.263 +
  15.264 +			ixgbe_unmap_and_free_tx_resource(adapter,
  15.265 +			                                 tx_buffer_info);
  15.266 +
  15.267 +			i++;
  15.268 +			if (i == tx_ring->count)
  15.269 +				i = 0;
  15.270 +
  15.271 +			count++;
  15.272 +			if (count == tx_ring->count)
  15.273 +				goto done_cleaning;
  15.274 +		}
  15.275 +		oldhead = head;
  15.276 +		rmb();
  15.277 +		head = GET_TX_HEAD_FROM_RING(tx_ring);
  15.278 +		head = le32_to_cpu(head);
  15.279 +		if (head == oldhead)
  15.280 +			goto done_cleaning;
  15.281 +	} /* while (1) */
  15.282 +
  15.283 +done_cleaning:
  15.284 +	tx_ring->next_to_clean = i;
  15.285 +
  15.286 +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  15.287 +	if (unlikely(count && netif_carrier_ok(netdev) &&
  15.288 +	             (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
  15.289 +		/* Make sure that anybody stopping the queue after this
  15.290 +		 * sees the new next_to_clean.
  15.291 +		 */
  15.292 +		smp_mb();
  15.293 +#ifdef HAVE_TX_MQ
  15.294 +		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
  15.295 +		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
  15.296 +			netif_wake_subqueue(netdev, tx_ring->queue_index);
  15.297 +			++adapter->restart_queue;
  15.298 +		}
  15.299 +#else
  15.300 +		if (netif_queue_stopped(netdev) &&
  15.301 +		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
  15.302 +			netif_wake_queue(netdev);
  15.303 +			++adapter->restart_queue;
  15.304 +		}
  15.305 +#endif
  15.306 +	}
  15.307 +
  15.308 +	if (adapter->detect_tx_hung) {
  15.309 +		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
  15.310 +			/* schedule immediate reset if we believe we hung */
  15.311 +			DPRINTK(PROBE, INFO,
  15.312 +			        "tx hang %d detected, resetting adapter\n",
  15.313 +			        adapter->tx_timeout_count + 1);
  15.314 +			ixgbe_tx_timeout(adapter->netdev);
  15.315 +		}
  15.316 +	}
  15.317 +
  15.318 +	/* re-arm the interrupt */
  15.319 +	if ((total_packets >= tx_ring->work_limit) ||
  15.320 +	    (count == tx_ring->count))
  15.321 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
  15.322 +
  15.323 +	tx_ring->total_bytes += total_bytes;
  15.324 +	tx_ring->total_packets += total_packets;
  15.325 +	tx_ring->stats.packets += total_packets;
  15.326 +	tx_ring->stats.bytes += total_bytes;
  15.327 +	adapter->net_stats.tx_bytes += total_bytes;
  15.328 +	adapter->net_stats.tx_packets += total_packets;
  15.329 +	return (total_packets ? true : false);
  15.330 +}
  15.331 +
  15.332 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
  15.333 +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
  15.334 +                                struct ixgbe_ring *rx_ring)
  15.335 +{
  15.336 +	u32 rxctrl;
  15.337 +	int cpu = get_cpu();
  15.338 +	int q = rx_ring - adapter->rx_ring;
  15.339 +
  15.340 +	if (rx_ring->cpu != cpu) {
  15.341 +		rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
  15.342 +		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
  15.343 +		rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  15.344 +		rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
  15.345 +		rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
  15.346 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
  15.347 +		rx_ring->cpu = cpu;
  15.348 +	}
  15.349 +	put_cpu();
  15.350 +}
  15.351 +
  15.352 +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
  15.353 +                                struct ixgbe_ring *tx_ring)
  15.354 +{
  15.355 +	u32 txctrl;
  15.356 +	int cpu = get_cpu();
  15.357 +	int q = tx_ring - adapter->tx_ring;
  15.358 +
  15.359 +	if (tx_ring->cpu != cpu) {
  15.360 +		txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
  15.361 +		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
  15.362 +		txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  15.363 +		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  15.364 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
  15.365 +		tx_ring->cpu = cpu;
  15.366 +	}
  15.367 +	put_cpu();
  15.368 +}
  15.369 +
  15.370 +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  15.371 +{
  15.372 +	int i;
  15.373 +
  15.374 +	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
  15.375 +		return;
  15.376 +
  15.377 +	for (i = 0; i < adapter->num_tx_queues; i++) {
  15.378 +		adapter->tx_ring[i].cpu = -1;
  15.379 +		ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
  15.380 +	}
  15.381 +	for (i = 0; i < adapter->num_rx_queues; i++) {
  15.382 +		adapter->rx_ring[i].cpu = -1;
  15.383 +		ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
  15.384 +	}
  15.385 +}
  15.386 +
  15.387 +static int __ixgbe_notify_dca(struct device *dev, void *data)
  15.388 +{
  15.389 +	struct net_device *netdev = dev_get_drvdata(dev);
  15.390 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
  15.391 +	unsigned long event = *(unsigned long *)data;
  15.392 +
  15.393 +	switch (event) {
  15.394 +	case DCA_PROVIDER_ADD:
  15.395 +		/* if we're already enabled, don't do it again */
  15.396 +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  15.397 +			break;
  15.398 +		/* Always use CB2 mode, difference is masked
  15.399 +		 * in the CB driver. */
  15.400 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
  15.401 +		if (dca_add_requester(dev) == 0) {
  15.402 +			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  15.403 +			ixgbe_setup_dca(adapter);
  15.404 +			break;
  15.405 +		}
  15.406 +		/* Fall Through since DCA is disabled. */
  15.407 +	case DCA_PROVIDER_REMOVE:
  15.408 +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  15.409 +			dca_remove_requester(dev);
  15.410 +			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  15.411 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
  15.412 +		}
  15.413 +		break;
  15.414 +	}
  15.415 +
  15.416 +	return 0;
  15.417 +}
  15.418 +
  15.419 +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
  15.420 +/**
  15.421 + * ixgbe_receive_skb - Send a completed packet up the stack
  15.422 + * @adapter: board private structure
  15.423 + * @skb: packet to send up
  15.424 + * @status: hardware indication of status of receive
  15.425 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
  15.426 + * @rx_desc: rx descriptor
  15.427 + **/
  15.428 +static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
  15.429 +                              struct sk_buff *skb, u8 status,
  15.430 +                              struct ixgbe_ring *ring,
  15.431 +                              union ixgbe_adv_rx_desc *rx_desc)
  15.432 +{
  15.433 +	int ret;
  15.434 +	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
  15.435 +	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  15.436 +
  15.437 +#ifndef IXGBE_NO_INET_LRO
  15.438 +	if (adapter->netdev->features & NETIF_F_LRO &&
  15.439 +	    skb->ip_summed == CHECKSUM_UNNECESSARY) {
  15.440 +#ifdef NETIF_F_HW_VLAN_TX
  15.441 +		if (adapter->vlgrp && is_vlan && (tag != 0))
  15.442 +			lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
  15.443 +			                             adapter->vlgrp, tag,
  15.444 +			                             rx_desc);
  15.445 +		else
  15.446 +#endif
  15.447 +			lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
  15.448 +		ring->lro_used = true;
  15.449 +	} else {
  15.450 +#endif /* IXGBE_NO_INET_LRO */
  15.451 +#ifdef CONFIG_IXGBE_NAPI
  15.452 +		if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
  15.453 +#ifdef NETIF_F_HW_VLAN_TX
  15.454 +			if (adapter->vlgrp && is_vlan && (tag != 0))
  15.455 +				vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
  15.456 +			else
  15.457 +				netif_receive_skb(skb);
  15.458 +#else
  15.459 +			netif_receive_skb(skb);
  15.460 +#endif
  15.461 +		} else {
  15.462 +#endif /* CONFIG_IXGBE_NAPI */
  15.463 +
  15.464 +#ifdef NETIF_F_HW_VLAN_TX
  15.465 +			if (adapter->vlgrp && is_vlan && (tag != 0))
  15.466 +				ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
  15.467 +			else
  15.468 +				ret = netif_rx(skb);
  15.469 +#else
  15.470 +			ret = netif_rx(skb);
  15.471 +#endif
  15.472 +#ifndef CONFIG_IXGBE_NAPI
  15.473 +			if (ret == NET_RX_DROP)
  15.474 +				adapter->rx_dropped_backlog++;
  15.475 +#endif
  15.476 +#ifdef CONFIG_IXGBE_NAPI
  15.477 +		}
  15.478 +#endif /* CONFIG_IXGBE_NAPI */
  15.479 +#ifndef IXGBE_NO_INET_LRO
  15.480 +	}
  15.481 +#endif
  15.482 +}
  15.483 +
  15.484 +/**
  15.485 + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  15.486 + * @adapter: address of board private structure
  15.487 + * @status_err: hardware indication of status of receive
  15.488 + * @skb: skb currently being received and modified
  15.489 + **/
  15.490 +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
  15.491 +                                     u32 status_err, struct sk_buff *skb)
  15.492 +{
  15.493 +	skb->ip_summed = CHECKSUM_NONE;
  15.494 +
  15.495 +	/* Rx csum disabled */
  15.496 +	if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
  15.497 +		return;
  15.498 +
  15.499 +	/* if IP and error */
  15.500 +	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
  15.501 +	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
  15.502 +		adapter->hw_csum_rx_error++;
  15.503 +		return;
  15.504 +	}
  15.505 +
  15.506 +	if (!(status_err & IXGBE_RXD_STAT_L4CS))
  15.507 +		return;
  15.508 +
  15.509 +	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
  15.510 +		adapter->hw_csum_rx_error++;
  15.511 +		return;
  15.512 +	}
  15.513 +
  15.514 +	/* It must be a TCP or UDP packet with a valid checksum */
  15.515 +	skb->ip_summed = CHECKSUM_UNNECESSARY;
  15.516 +	adapter->hw_csum_rx_good++;
  15.517 +}
  15.518 +
  15.519 +/**
  15.520 + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
  15.521 + * @adapter: address of board private structure
  15.522 + **/
  15.523 +static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
  15.524 +                                   struct ixgbe_ring *rx_ring,
  15.525 +                                   int cleaned_count)
  15.526 +{
  15.527 +	struct pci_dev *pdev = adapter->pdev;
  15.528 +	union ixgbe_adv_rx_desc *rx_desc;
  15.529 +	struct ixgbe_rx_buffer *bi;
  15.530 +	unsigned int i;
  15.531 +	unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
  15.532 +
  15.533 +	i = rx_ring->next_to_use;
  15.534 +	bi = &rx_ring->rx_buffer_info[i];
  15.535 +
  15.536 +	while (cleaned_count--) {
  15.537 +		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  15.538 +
  15.539 +		if (!bi->page_dma &&
  15.540 +		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
  15.541 +			if (!bi->page) {
  15.542 +				bi->page = alloc_page(GFP_ATOMIC);
  15.543 +				if (!bi->page) {
  15.544 +					adapter->alloc_rx_page_failed++;
  15.545 +					goto no_buffers;
  15.546 +				}
  15.547 +				bi->page_offset = 0;
  15.548 +			} else {
  15.549 +				/* use a half page if we're re-using */
  15.550 +				bi->page_offset ^= (PAGE_SIZE / 2);
  15.551 +			}
  15.552 +
  15.553 +			bi->page_dma = pci_map_page(pdev, bi->page,
  15.554 +			                            bi->page_offset,
  15.555 +			                            (PAGE_SIZE / 2),
  15.556 +			                            PCI_DMA_FROMDEVICE);
  15.557 +		}
  15.558 +
  15.559 +		if (!bi->skb) {
  15.560 +			struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
  15.561 +			                                       bufsz);
  15.562 +
  15.563 +			if (!skb) {
  15.564 +				adapter->alloc_rx_buff_failed++;
  15.565 +				goto no_buffers;
  15.566 +			}
  15.567 +
  15.568 +			/*
  15.569 +			 * Make buffer alignment 2 beyond a 16 byte boundary
  15.570 +			 * this will result in a 16 byte aligned IP header after
  15.571 +			 * the 14 byte MAC header is removed
  15.572 +			 */
  15.573 +			skb_reserve(skb, NET_IP_ALIGN);
  15.574 +
  15.575 +			bi->skb = skb;
  15.576 +			bi->dma = pci_map_single(pdev, skb->data, bufsz,
  15.577 +			                         PCI_DMA_FROMDEVICE);
  15.578 +		}
  15.579 +		/* Refresh the desc even if buffer_addrs didn't change because
  15.580 +		 * each write-back erases this info. */
  15.581 +		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
  15.582 +			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
  15.583 +			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  15.584 +		} else {
  15.585 +			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  15.586 +		}
  15.587 +
  15.588 +		i++;
  15.589 +		if (i == rx_ring->count)
  15.590 +			i = 0;
  15.591 +		bi = &rx_ring->rx_buffer_info[i];
  15.592 +	}
  15.593 +
  15.594 +no_buffers:
  15.595 +	if (rx_ring->next_to_use != i) {
  15.596 +		rx_ring->next_to_use = i;
  15.597 +		if (i-- == 0)
  15.598 +			i = (rx_ring->count - 1);
  15.599 +
  15.600 +		/*
  15.601 +		 * Force memory writes to complete before letting h/w
  15.602 +		 * know there are new descriptors to fetch.  (Only
  15.603 +		 * applicable for weak-ordered memory model archs,
  15.604 +		 * such as IA-64).
  15.605 +		 */
  15.606 +		wmb();
  15.607 +		writel(i, adapter->hw.hw_addr + rx_ring->tail);
  15.608 +	}
  15.609 +}
  15.610 +
  15.611 +static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
  15.612 +{
  15.613 +	return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
  15.614 +}
  15.615 +
  15.616 +static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
  15.617 +{
  15.618 +	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  15.619 +}
  15.620 +
  15.621 +#ifndef IXGBE_NO_LRO
  15.622 +static int lromax = 44;
  15.623 +
  15.624 +/**
  15.625 + * ixgbe_lro_ring_flush - Indicate packets to upper layer.
  15.626 + *
  15.627 + * Update IP and TCP header part of head skb if more than one
  15.628 + * skb's chained and indicate packets to upper layer.
  15.629 + **/
  15.630 +static void ixgbe_lro_ring_flush(struct ixgbe_lro_list *lrolist,
  15.631 +                                 struct ixgbe_adapter *adapter,
  15.632 +                                 struct ixgbe_lro_desc *lrod, u8 status,
  15.633 +                                 struct ixgbe_ring *rx_ring,
  15.634 +                                 union ixgbe_adv_rx_desc *rx_desc)
  15.635 +{
  15.636 +	struct iphdr *iph;
  15.637 +	struct tcphdr *th;
  15.638 +	struct sk_buff *skb;
  15.639 +	u32 *ts_ptr;
  15.640 +	struct ixgbe_lro_info *lro_data = &adapter->lro_data;
  15.641 +	struct net_device *netdev = adapter->netdev;
  15.642 +
  15.643 +	hlist_del(&lrod->lro_node);
  15.644 +	lrolist->active_cnt--;
  15.645 +
  15.646 +	skb = lrod->skb;
  15.647 +
  15.648 +	if (lrod->append_cnt) {
  15.649 +		/* incorporate ip header and re-calculate checksum */
  15.650 +		iph = (struct iphdr *)skb->data;
  15.651 +		iph->tot_len = ntohs(skb->len);
  15.652 +		iph->check = 0;
  15.653 +		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  15.654 +
  15.655 +		/* incorporate the latest ack into the tcp header */
  15.656 +		th = (struct tcphdr *) ((char *)skb->data + sizeof(*iph));
  15.657 +		th->ack_seq = lrod->ack_seq;
  15.658 +		th->window = lrod->window;
  15.659 +
  15.660 +		/* incorporate latest timestamp into the tcp header */
  15.661 +		if (lrod->timestamp) {
  15.662 +			ts_ptr = (u32 *)(th + 1);
  15.663 +			ts_ptr[1] = htonl(lrod->tsval);
  15.664 +			ts_ptr[2] = lrod->tsecr;
  15.665 +		}
  15.666 +	}
  15.667 +
  15.668 +#ifdef NETIF_F_TSO
  15.669 +	skb_shinfo(skb)->gso_size = lrod->mss;
  15.670 +#endif
  15.671 +	ixgbe_receive_skb(adapter, skb, status, rx_ring, rx_desc);
  15.672 +
  15.673 +	netdev->last_rx = jiffies;
  15.674 +	lro_data->stats.coal += lrod->append_cnt + 1;
  15.675 +	lro_data->stats.flushed++;
  15.676 +
  15.677 +	lrod->skb = NULL;
  15.678 +	lrod->last_skb = NULL;
  15.679 +	lrod->timestamp = 0;
  15.680 +	lrod->append_cnt = 0;
  15.681 +	lrod->data_size = 0;
  15.682 +	hlist_add_head(&lrod->lro_node, &lrolist->free);
  15.683 +}
  15.684 +
  15.685 +static void ixgbe_lro_ring_flush_all(struct ixgbe_lro_list *lrolist,
  15.686 +                                     struct ixgbe_adapter *adapter, u8 status,
  15.687 +                                     struct ixgbe_ring *rx_ring,
  15.688 +                                     union ixgbe_adv_rx_desc *rx_desc)
  15.689 +{
  15.690 +	struct ixgbe_lro_desc *lrod;
  15.691 +	struct hlist_node *node, *node2;
  15.692 +
  15.693 +	hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active, lro_node)
  15.694 +		ixgbe_lro_ring_flush(lrolist, adapter, lrod, status, rx_ring,
  15.695 +		                     rx_desc);
  15.696 +}
  15.697 +
  15.698 +/*
  15.699 + * ixgbe_lro_header_ok - Main LRO function.
  15.700 + **/
  15.701 +static int ixgbe_lro_header_ok(struct ixgbe_lro_info *lro_data,
  15.702 +                               struct sk_buff *new_skb, struct iphdr *iph,
  15.703 +                               struct tcphdr *th)
  15.704 +{
  15.705 +	int opt_bytes, tcp_data_len;
  15.706 +	u32 *ts_ptr = NULL;
  15.707 +
  15.708 +	/* If we see CE codepoint in IP header, packet is not mergeable */
  15.709 +	if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
  15.710 +		return -1;
  15.711 +
  15.712 +	/* ensure there are no options */
  15.713 +	if ((iph->ihl << 2) != sizeof(*iph))
  15.714 +		return -1;
  15.715 +
  15.716 +	/* .. and the packet is not fragmented */
  15.717 +	if (iph->frag_off & htons(IP_MF|IP_OFFSET))
  15.718 +		return -1;
  15.719 +
  15.720 +	/* ensure no bits set besides ack or psh */
  15.721 +	if (th->fin || th->syn || th->rst ||
  15.722 +	    th->urg || th->ece || th->cwr || !th->ack)
  15.723 +		return -1;
  15.724 +
  15.725 +	/* ensure that the checksum is valid */
  15.726 +	if (new_skb->ip_summed != CHECKSUM_UNNECESSARY)
  15.727 +		return -1;
  15.728 +
  15.729 +	/*
  15.730 +	 * check for timestamps. Since the only option we handle are timestamps,
  15.731 +	 * we only have to handle the simple case of aligned timestamps
  15.732 +	 */
  15.733 +
  15.734 +	opt_bytes = (th->doff << 2) - sizeof(*th);
  15.735 +	if (opt_bytes != 0) {
  15.736 +		ts_ptr = (u32 *)(th + 1);
  15.737 +		if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
  15.738 +			(*ts_ptr != ntohl((TCPOPT_NOP << 24) |
  15.739 +			(TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) |
  15.740 +			TCPOLEN_TIMESTAMP))) {
  15.741 +			return -1;
  15.742 +		}
  15.743 +	}
  15.744 +
  15.745 +	tcp_data_len = ntohs(iph->tot_len) - (th->doff << 2) - sizeof(*iph);
  15.746 +
  15.747 +	if (tcp_data_len == 0)
  15.748 +		return -1;
  15.749 +
  15.750 +	return tcp_data_len;
  15.751 +}
  15.752 +
  15.753 +/**
  15.754 + * ixgbe_lro_ring_queue - if able, queue skb into lro chain
  15.755 + * @lrolist: pointer to structure for lro entries
  15.756 + * @adapter: address of board private structure
  15.757 + * @new_skb: pointer to current skb being checked
  15.758 + * @status: hardware indication of status of receive
  15.759 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
  15.760 + * @rx_desc: rx descriptor
  15.761 + *
  15.762 + * Checks whether the skb given is eligible for LRO and if that's
  15.763 + * fine chains it to the existing lro_skb based on flowid. If an LRO for
  15.764 + * the flow doesn't exist create one.
  15.765 + **/
  15.766 +static int ixgbe_lro_ring_queue(struct ixgbe_lro_list *lrolist,
  15.767 +                                struct ixgbe_adapter *adapter,
  15.768 +                                struct sk_buff *new_skb, u8 status,
  15.769 +                                struct ixgbe_ring *rx_ring,
  15.770 +                                union ixgbe_adv_rx_desc *rx_desc)
  15.771 +{
  15.772 +	struct ethhdr *eh;
  15.773 +	struct iphdr *iph;
  15.774 +	struct tcphdr *th, *header_th;
  15.775 +	int  opt_bytes, header_ok = 1;
  15.776 +	u32 *ts_ptr = NULL;
  15.777 +	struct sk_buff *lro_skb;
  15.778 +	struct ixgbe_lro_desc *lrod;
  15.779 +	struct hlist_node *node;
  15.780 +	u32 seq;
  15.781 +	struct ixgbe_lro_info *lro_data = &adapter->lro_data;
  15.782 +	int tcp_data_len;
  15.783 +	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
  15.784 +
  15.785 +	/* Disable LRO when in promiscuous mode, useful for debugging LRO */
  15.786 +	if (adapter->netdev->flags & IFF_PROMISC)
  15.787 +		return -1;
  15.788 +
  15.789 +	eh = (struct ethhdr *)skb_mac_header(new_skb);
  15.790 +	iph = (struct iphdr *)(eh + 1);
  15.791 +
  15.792 +	/* check to see if it is IPv4/TCP */
  15.793 +	if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
  15.794 +	     (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
  15.795 +		return -1;
  15.796 +
  15.797 +	/* find the TCP header */
  15.798 +	th = (struct tcphdr *) (iph + 1);
  15.799 +
  15.800 +	tcp_data_len = ixgbe_lro_header_ok(lro_data, new_skb, iph, th);
  15.801 +	if (tcp_data_len == -1)
  15.802 +		header_ok = 0;
  15.803 +
  15.804 +	/* make sure any packet we are about to chain doesn't include any pad */
  15.805 +	skb_trim(new_skb, ntohs(iph->tot_len));
  15.806 +
  15.807 +	opt_bytes = (th->doff << 2) - sizeof(*th);
  15.808 +	if (opt_bytes != 0)
  15.809 +		ts_ptr = (u32 *)(th + 1);
  15.810 +
  15.811 +	seq = ntohl(th->seq);
  15.812 +	/*
  15.813 +	 * we have a packet that might be eligible for LRO,
  15.814 +	 * so see if it matches anything we might expect
  15.815 +	 */
  15.816 +	hlist_for_each_entry(lrod, node, &lrolist->active, lro_node) {
  15.817 +		if (lrod->source_port == th->source &&
  15.818 +			lrod->dest_port == th->dest &&
  15.819 +			lrod->source_ip == iph->saddr &&
  15.820 +			lrod->dest_ip == iph->daddr &&
  15.821 +			lrod->vlan_tag == tag) {
  15.822 +
  15.823 +			if (!header_ok) {
  15.824 +				ixgbe_lro_ring_flush(lrolist, adapter, lrod,
  15.825 +				                     status, rx_ring, rx_desc);
  15.826 +				return -1;
  15.827 +			}
  15.828 +
  15.829 +			if (seq != lrod->next_seq) {
  15.830 +				/* out of order packet */
  15.831 +				ixgbe_lro_ring_flush(lrolist, adapter, lrod,
  15.832 +				                     status, rx_ring, rx_desc);
  15.833 +				return -1;
  15.834 +			}
  15.835 +
  15.836 +			if (lrod->timestamp) {
  15.837 +				u32 tsval = ntohl(*(ts_ptr + 1));
  15.838 +				/* make sure timestamp values are increasing */
  15.839 +				if (lrod->tsval > tsval || *(ts_ptr + 2) == 0) {
  15.840 +					ixgbe_lro_ring_flush(lrolist, adapter,
  15.841 +					                     lrod, status,
  15.842 +					                     rx_ring, rx_desc);
  15.843 +					return -1;
  15.844 +				}
  15.845 +				lrod->tsval = tsval;
  15.846 +				lrod->tsecr = *(ts_ptr + 2);
  15.847 +			}
  15.848 +
  15.849 +			lro_skb = lrod->skb;
  15.850 +
  15.851 +			lro_skb->len += tcp_data_len;
  15.852 +			lro_skb->data_len += tcp_data_len;
  15.853 +			lro_skb->truesize += tcp_data_len;
  15.854 +
  15.855 +			lrod->next_seq += tcp_data_len;
  15.856 +			lrod->ack_seq = th->ack_seq;
  15.857 +			lrod->window = th->window;
  15.858 +			lrod->data_size += tcp_data_len;
  15.859 +			if (tcp_data_len > lrod->mss)
  15.860 +				lrod->mss = tcp_data_len;
  15.861 +
  15.862 +			/* Remove IP and TCP header*/
  15.863 +			skb_pull(new_skb, ntohs(iph->tot_len) - tcp_data_len);
  15.864 +
  15.865 +			/* Chain this new skb in frag_list */
  15.866 +			if (skb_shinfo(lro_skb)->frag_list != NULL )
  15.867 +				lrod->last_skb->next = new_skb;
  15.868 +			else
  15.869 +				skb_shinfo(lro_skb)->frag_list = new_skb;
  15.870 +
  15.871 +			lrod->last_skb = new_skb ;
  15.872 +
  15.873 +			lrod->append_cnt++;
  15.874 +
  15.875 +			/* New packet with push flag, flush the whole packet. */
  15.876 +			if (th->psh) {
  15.877 +				header_th =
  15.878 +				(struct tcphdr *)(lro_skb->data + sizeof(*iph));
  15.879 +				header_th->psh |= th->psh;
  15.880 +				ixgbe_lro_ring_flush(lrolist, adapter, lrod,
  15.881 +				                     status, rx_ring, rx_desc);
  15.882 +				return 0;
  15.883 +			}
  15.884 +
  15.885 +			if (lrod->append_cnt >= lro_data->max)
  15.886 +				ixgbe_lro_ring_flush(lrolist, adapter, lrod,
  15.887 +				                     status, rx_ring, rx_desc);
  15.888 +
  15.889 +			return 0;
  15.890 +		} /*End of if*/
  15.891 +	}
  15.892 +
  15.893 +	/* start a new packet */
  15.894 +	if (header_ok && !hlist_empty(&lrolist->free)) {
  15.895 +		lrod = hlist_entry(lrolist->free.first, struct ixgbe_lro_desc,
  15.896 +		                   lro_node);
  15.897 +
  15.898 +		lrod->skb = new_skb;
  15.899 +		lrod->source_ip = iph->saddr;
  15.900 +		lrod->dest_ip = iph->daddr;
  15.901 +		lrod->source_port = th->source;
  15.902 +		lrod->dest_port = th->dest;
  15.903 +		lrod->next_seq = seq + tcp_data_len;
  15.904 +		lrod->mss = tcp_data_len;
  15.905 +		lrod->ack_seq = th->ack_seq;
  15.906 +		lrod->window = th->window;
  15.907 +		lrod->data_size = tcp_data_len;
  15.908 +		lrod->vlan_tag = tag;
  15.909 +
  15.910 +		/* record timestamp if it is present */
  15.911 +		if (opt_bytes) {
  15.912 +			lrod->timestamp = 1;
  15.913 +			lrod->tsval = ntohl(*(ts_ptr + 1));
  15.914 +			lrod->tsecr = *(ts_ptr + 2);
  15.915 +		}
  15.916 +		/* remove first packet from freelist.. */
  15.917 +		hlist_del(&lrod->lro_node);
  15.918 +		/* .. and insert at the front of the active list */
  15.919 +		hlist_add_head(&lrod->lro_node, &lrolist->active);
  15.920 +		lrolist->active_cnt++;
  15.921 +
  15.922 +		return 0;
  15.923 +	}
  15.924 +
  15.925 +	return -1;
  15.926 +}
  15.927 +
  15.928 +static void ixgbe_lro_ring_exit(struct ixgbe_lro_list *lrolist)
  15.929 +{
  15.930 +	struct hlist_node *node, *node2;
  15.931 +	struct ixgbe_lro_desc *lrod;
  15.932 +
  15.933 +	hlist_for_each_entry_safe(lrod, node, node2, &lrolist->active,
  15.934 +	                          lro_node) {
  15.935 +		hlist_del(&lrod->lro_node);
  15.936 +		kfree(lrod);
  15.937 +	}
  15.938 +
  15.939 +	hlist_for_each_entry_safe(lrod, node, node2, &lrolist->free,
  15.940 +	                          lro_node) {
  15.941 +		hlist_del(&lrod->lro_node);
  15.942 +		kfree(lrod);
  15.943 +	}
  15.944 +}
  15.945 +
  15.946 +static void ixgbe_lro_ring_init(struct ixgbe_lro_list *lrolist,
  15.947 +                                struct ixgbe_adapter *adapter)
  15.948 +{
  15.949 +	int j, bytes;
  15.950 +	struct ixgbe_lro_desc *lrod;
  15.951 +
  15.952 +	bytes = sizeof(struct ixgbe_lro_desc);
  15.953 +
  15.954 +	INIT_HLIST_HEAD(&lrolist->free);
  15.955 +	INIT_HLIST_HEAD(&lrolist->active);
  15.956 +
  15.957 +	for (j = 0; j < IXGBE_LRO_MAX; j++) {
  15.958 +		lrod = kzalloc(bytes, GFP_KERNEL);
  15.959 +		if (lrod != NULL) {
  15.960 +			INIT_HLIST_NODE(&lrod->lro_node);
  15.961 +			hlist_add_head(&lrod->lro_node, &lrolist->free);
  15.962 +		} else {
  15.963 +			DPRINTK(PROBE, ERR,
  15.964 +			        "Allocation for LRO descriptor %u failed\n", j);
  15.965 +		}
  15.966 +	}
  15.967 +}
  15.968 +
  15.969 +#endif /* IXGBE_NO_LRO */
  15.970 +#ifdef CONFIG_IXGBE_NAPI
  15.971 +static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
  15.972 +                               struct ixgbe_ring *rx_ring,
  15.973 +                               int *work_done, int work_to_do)
  15.974 +#else
  15.975 +static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
  15.976 +                               struct ixgbe_ring *rx_ring)
  15.977 +#endif
  15.978 +{
  15.979 +	struct pci_dev *pdev = adapter->pdev;
  15.980 +	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
  15.981 +	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
  15.982 +	struct sk_buff *skb;
  15.983 +	unsigned int i;
  15.984 +	u32 len, staterr;
  15.985 +	u16 hdr_info;
  15.986 +	bool cleaned = false;
  15.987 +	int cleaned_count = 0;
  15.988 +#ifndef CONFIG_IXGBE_NAPI
  15.989 +	int work_to_do = rx_ring->work_limit, local_work_done = 0;
  15.990 +	int *work_done = &local_work_done;
  15.991 +#endif
  15.992 +	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  15.993 +
  15.994 +	i = rx_ring->next_to_clean;
  15.995 +	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
  15.996 +	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  15.997 +	rx_buffer_info = &rx_ring->rx_buffer_info[i];
  15.998 +
  15.999 +	while (staterr & IXGBE_RXD_STAT_DD) {
 15.1000 +		u32 upper_len = 0;
 15.1001 +		if (*work_done >= work_to_do)
 15.1002 +			break;
 15.1003 +		(*work_done)++;
 15.1004 +
 15.1005 +		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 15.1006 +			hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
 15.1007 +			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 15.1008 +			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 15.1009 +			if (hdr_info & IXGBE_RXDADV_SPH)
 15.1010 +				adapter->rx_hdr_split++;
 15.1011 +			if (len > IXGBE_RX_HDR_SIZE)
 15.1012 +				len = IXGBE_RX_HDR_SIZE;
 15.1013 +			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 15.1014 +		} else {
 15.1015 +			len = le16_to_cpu(rx_desc->wb.upper.length);
 15.1016 +		}
 15.1017 +
 15.1018 +#ifndef IXGBE_NO_LLI
 15.1019 +		if (staterr & IXGBE_RXD_STAT_DYNINT)
 15.1020 +			adapter->lli_int++;
 15.1021 +#endif
 15.1022 +
 15.1023 +		cleaned = true;
 15.1024 +		skb = rx_buffer_info->skb;
 15.1025 +		prefetch(skb->data - NET_IP_ALIGN);
 15.1026 +		rx_buffer_info->skb = NULL;
 15.1027 +
 15.1028 +		if (len && !skb_shinfo(skb)->nr_frags) {
 15.1029 +			pci_unmap_single(pdev, rx_buffer_info->dma,
 15.1030 +			                 rx_ring->rx_buf_len + NET_IP_ALIGN,
 15.1031 +			                 PCI_DMA_FROMDEVICE);
 15.1032 +			skb_put(skb, len);
 15.1033 +		}
 15.1034 +
 15.1035 +		if (upper_len) {
 15.1036 +			pci_unmap_page(pdev, rx_buffer_info->page_dma,
 15.1037 +			               PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
 15.1038 +			rx_buffer_info->page_dma = 0;
 15.1039 +			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 15.1040 +			                   rx_buffer_info->page,
 15.1041 +			                   rx_buffer_info->page_offset,
 15.1042 +			                   upper_len);
 15.1043 +
 15.1044 +			if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
 15.1045 +			    (page_count(rx_buffer_info->page) != 1))
 15.1046 +				rx_buffer_info->page = NULL;
 15.1047 +			else
 15.1048 +				get_page(rx_buffer_info->page);
 15.1049 +
 15.1050 +			skb->len += upper_len;
 15.1051 +			skb->data_len += upper_len;
 15.1052 +			skb->truesize += upper_len;
 15.1053 +		}
 15.1054 +
 15.1055 +		i++;
 15.1056 +		if (i == rx_ring->count)
 15.1057 +			i = 0;
 15.1058 +		next_buffer = &rx_ring->rx_buffer_info[i];
 15.1059 +
 15.1060 +		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
 15.1061 +		prefetch(next_rxd);
 15.1062 +
 15.1063 +		cleaned_count++;
 15.1064 +		if (staterr & IXGBE_RXD_STAT_EOP) {
 15.1065 +			rx_ring->stats.packets++;
 15.1066 +			rx_ring->stats.bytes += skb->len;
 15.1067 +		} else {
 15.1068 +			rx_buffer_info->skb = next_buffer->skb;
 15.1069 +			rx_buffer_info->dma = next_buffer->dma;
 15.1070 +			next_buffer->skb = skb;
 15.1071 +			next_buffer->dma = 0;
 15.1072 +			adapter->non_eop_descs++;
 15.1073 +			goto next_desc;
 15.1074 +		}
 15.1075 +
 15.1076 +		/* ERR_MASK will only have valid bits if EOP set */
 15.1077 +		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
 15.1078 +			dev_kfree_skb_irq(skb);
 15.1079 +			goto next_desc;
 15.1080 +		}
 15.1081 +
 15.1082 +		ixgbe_rx_checksum(adapter, staterr, skb);
 15.1083 +
 15.1084 +		/* probably a little skewed due to removing CRC */
 15.1085 +		total_rx_bytes += skb->len;
 15.1086 +		total_rx_packets++;
 15.1087 +
 15.1088 +		skb->protocol = eth_type_trans(skb, adapter->netdev);
 15.1089 +#ifndef IXGBE_NO_LRO
 15.1090 +		if (ixgbe_lro_ring_queue(rx_ring->lrolist,
 15.1091 +				adapter, skb, staterr, rx_ring, rx_desc) == 0) {
 15.1092 +			adapter->netdev->last_rx = jiffies;
 15.1093 +			rx_ring->stats.packets++;
 15.1094 +			if (upper_len)
 15.1095 +				rx_ring->stats.bytes += upper_len;
 15.1096 +			else
 15.1097 +				rx_ring->stats.bytes += skb->len;
 15.1098 +			goto next_desc;
 15.1099 +		}
 15.1100 +#endif
 15.1101 +		ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
 15.1102 +		adapter->netdev->last_rx = jiffies;
 15.1103 +
 15.1104 +next_desc:
 15.1105 +		rx_desc->wb.upper.status_error = 0;
 15.1106 +
 15.1107 +		/* return some buffers to hardware, one at a time is too slow */
 15.1108 +		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
 15.1109 +			ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 15.1110 +			cleaned_count = 0;
 15.1111 +		}
 15.1112 +
 15.1113 +		/* use prefetched values */
 15.1114 +		rx_desc = next_rxd;
 15.1115 +		rx_buffer_info = next_buffer;
 15.1116 +
 15.1117 +		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 15.1118 +	}
 15.1119 +
 15.1120 +	rx_ring->next_to_clean = i;
 15.1121 +#ifndef IXGBE_NO_LRO
 15.1122 +	ixgbe_lro_ring_flush_all(rx_ring->lrolist, adapter,
 15.1123 +                            staterr, rx_ring, rx_desc);
 15.1124 +#endif /* IXGBE_NO_LRO */
 15.1125 +	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 15.1126 +#ifndef IXGBE_NO_INET_LRO
 15.1127 +	if (rx_ring->lro_used) {
 15.1128 +		lro_flush_all(&rx_ring->lro_mgr);
 15.1129 +		rx_ring->lro_used = false;
 15.1130 +	}
 15.1131 +#endif
 15.1132 +
 15.1133 +	if (cleaned_count)
 15.1134 +		ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 15.1135 +
 15.1136 +	rx_ring->total_packets += total_rx_packets;
 15.1137 +	rx_ring->total_bytes += total_rx_bytes;
 15.1138 +	adapter->net_stats.rx_bytes += total_rx_bytes;
 15.1139 +	adapter->net_stats.rx_packets += total_rx_packets;
 15.1140 +
 15.1141 +#ifndef CONFIG_IXGBE_NAPI
 15.1142 +	/* re-arm the interrupt if we had to bail early and have more work */
 15.1143 +	if (*work_done >= work_to_do)
 15.1144 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rx_ring->v_idx);
 15.1145 +#endif
 15.1146 +	return cleaned;
 15.1147 +}
 15.1148 +
 15.1149 +#ifdef CONFIG_IXGBE_NAPI
 15.1150 +static int ixgbe_clean_rxonly(struct napi_struct *, int);
 15.1151 +#endif
 15.1152 +/**
 15.1153 + * ixgbe_configure_msix - Configure MSI-X hardware
 15.1154 + * @adapter: board private structure
 15.1155 + *
 15.1156 + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
 15.1157 + * interrupts.
 15.1158 + **/
 15.1159 +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 15.1160 +{
 15.1161 +	struct ixgbe_q_vector *q_vector;
 15.1162 +	int i, j, q_vectors, v_idx, r_idx;
 15.1163 +	u32 mask;
 15.1164 +
 15.1165 +	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 15.1166 +
 15.1167 +	/* Populate the IVAR table and set the ITR values to the
 15.1168 +	 * corresponding register.
 15.1169 +	 */
 15.1170 +	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
 15.1171 +		q_vector = &adapter->q_vector[v_idx];
 15.1172 +		/* XXX for_each_bit(...) */
 15.1173 +		r_idx = find_first_bit(q_vector->rxr_idx,
 15.1174 +		                       adapter->num_rx_queues);
 15.1175 +
 15.1176 +		for (i = 0; i < q_vector->rxr_count; i++) {
 15.1177 +			j = adapter->rx_ring[r_idx].reg_idx;
 15.1178 +			ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
 15.1179 +			r_idx = find_next_bit(q_vector->rxr_idx,
 15.1180 +			                      adapter->num_rx_queues,
 15.1181 +			                      r_idx + 1);
 15.1182 +		}
 15.1183 +		r_idx = find_first_bit(q_vector->txr_idx,
 15.1184 +		                       adapter->num_tx_queues);
 15.1185 +
 15.1186 +		for (i = 0; i < q_vector->txr_count; i++) {
 15.1187 +			j = adapter->tx_ring[r_idx].reg_idx;
 15.1188 +			ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
 15.1189 +			r_idx = find_next_bit(q_vector->txr_idx,
 15.1190 +			                      adapter->num_tx_queues,
 15.1191 +			                      r_idx + 1);
 15.1192 +		}
 15.1193 +
 15.1194 +		/* if this is a tx only vector halve the interrupt rate */
 15.1195 +		if (q_vector->txr_count && !q_vector->rxr_count)
 15.1196 +			q_vector->eitr = (adapter->eitr_param >> 1);
 15.1197 +		else
 15.1198 +			/* rx only */
 15.1199 +			q_vector->eitr = adapter->eitr_param;
 15.1200 +
 15.1201 +		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
 15.1202 +		                EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
 15.1203 +	}
 15.1204 +
 15.1205 +	ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
 15.1206 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 15.1207 +#ifdef IXGBE_TCP_TIMER
 15.1208 +	ixgbe_set_ivar(adapter, IXGBE_IVAR_TCP_TIMER_INDEX, ++v_idx);
 15.1209 +#endif
 15.1210 +
 15.1211 +	/* set up to autoclear timer, and the vectors */
 15.1212 +	mask = IXGBE_EIMS_ENABLE_MASK;
 15.1213 +	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 15.1214 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 15.1215 +}
 15.1216 +
 15.1217 +enum latency_range {
 15.1218 +	lowest_latency = 0,
 15.1219 +	low_latency = 1,
 15.1220 +	bulk_latency = 2,
 15.1221 +	latency_invalid = 255
 15.1222 +};
 15.1223 +
 15.1224 +/**
 15.1225 + * ixgbe_update_itr - update the dynamic ITR value based on statistics
 15.1226 + * @adapter: pointer to adapter
 15.1227 + * @eitr: eitr setting (ints per sec) to give last timeslice
 15.1228 + * @itr_setting: current throttle rate in ints/second
 15.1229 + * @packets: the number of packets during this measurement interval
 15.1230 + * @bytes: the number of bytes during this measurement interval
 15.1231 + *
 15.1232 + *      Stores a new ITR value based on packets and byte
 15.1233 + *      counts during the last interrupt.  The advantage of per interrupt
 15.1234 + *      computation is faster updates and more accurate ITR for the current
 15.1235 + *      traffic pattern.  Constants in this function were computed
 15.1236 + *      based on theoretical maximum wire speed and thresholds were set based
 15.1237 + *      on testing data as well as attempting to minimize response time
 15.1238 + *      while increasing bulk throughput.
 15.1239 + *      this functionality is controlled by the InterruptThrottleRate module
 15.1240 + *      parameter (see ixgbe_param.c)
 15.1241 + **/
 15.1242 +static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
 15.1243 +                           u32 eitr, u8 itr_setting,
 15.1244 +                           int packets, int bytes)
 15.1245 +{
 15.1246 +	unsigned int retval = itr_setting;
 15.1247 +	u32 timepassed_us;
 15.1248 +	u64 bytes_perint;
 15.1249 +
 15.1250 +	if (packets == 0)
 15.1251 +		goto update_itr_done;
 15.1252 +
 15.1253 +
 15.1254 +	/* simple throttlerate management
 15.1255 +	 *    0-20MB/s lowest (100000 ints/s)
 15.1256 +	 *   20-100MB/s low   (20000 ints/s)
 15.1257 +	 *  100-1249MB/s bulk (8000 ints/s)
 15.1258 +	 */
 15.1259 +	/* what was last interrupt timeslice? */
 15.1260 +	timepassed_us = 1000000/eitr;
 15.1261 +	bytes_perint = bytes / timepassed_us; /* bytes/usec */
 15.1262 +
 15.1263 +	switch (itr_setting) {
 15.1264 +	case lowest_latency:
 15.1265 +		if (bytes_perint > adapter->eitr_low) {
 15.1266 +			retval = low_latency;
 15.1267 +		}
 15.1268 +		break;
 15.1269 +	case low_latency:
 15.1270 +		if (bytes_perint > adapter->eitr_high) {
 15.1271 +			retval = bulk_latency;
 15.1272 +		}
 15.1273 +		else if (bytes_perint <= adapter->eitr_low) {
 15.1274 +			retval = lowest_latency;
 15.1275 +		}
 15.1276 +		break;
 15.1277 +	case bulk_latency:
 15.1278 +		if (bytes_perint <= adapter->eitr_high) {
 15.1279 +			retval = low_latency;
 15.1280 +		}
 15.1281 +		break;
 15.1282 +	}
 15.1283 +
 15.1284 +update_itr_done:
 15.1285 +	return retval;
 15.1286 +}
 15.1287 +
 15.1288 +static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 15.1289 +{
 15.1290 +	struct ixgbe_adapter *adapter = q_vector->adapter;
 15.1291 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1292 +	u32 new_itr;
 15.1293 +	u8 current_itr, ret_itr;
 15.1294 +	int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
 15.1295 +	                      sizeof(struct ixgbe_q_vector);
 15.1296 +	struct ixgbe_ring *rx_ring, *tx_ring;
 15.1297 +
 15.1298 +	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 15.1299 +	for (i = 0; i < q_vector->txr_count; i++) {
 15.1300 +		tx_ring = &(adapter->tx_ring[r_idx]);
 15.1301 +		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
 15.1302 +		                           q_vector->tx_itr,
 15.1303 +		                           tx_ring->total_packets,
 15.1304 +		                           tx_ring->total_bytes);
 15.1305 +		/* if the result for this queue would decrease interrupt
 15.1306 +		 * rate for this vector then use that result */
 15.1307 +		q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
 15.1308 +		                    q_vector->tx_itr - 1 : ret_itr);
 15.1309 +		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 15.1310 +		                      r_idx + 1);
 15.1311 +	}
 15.1312 +
 15.1313 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1314 +	for (i = 0; i < q_vector->rxr_count; i++) {
 15.1315 +		rx_ring = &(adapter->rx_ring[r_idx]);
 15.1316 +		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
 15.1317 +		                           q_vector->rx_itr,
 15.1318 +		                           rx_ring->total_packets,
 15.1319 +		                           rx_ring->total_bytes);
 15.1320 +		/* if the result for this queue would decrease interrupt
 15.1321 +		 * rate for this vector then use that result */
 15.1322 +		q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
 15.1323 +		                    q_vector->rx_itr - 1 : ret_itr);
 15.1324 +		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 15.1325 +		                      r_idx + 1);
 15.1326 +	}
 15.1327 +
 15.1328 +	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
 15.1329 +
 15.1330 +	switch (current_itr) {
 15.1331 +	/* counts and packets in update_itr are dependent on these numbers */
 15.1332 +	case lowest_latency:
 15.1333 +		new_itr = 100000;
 15.1334 +		break;
 15.1335 +	case low_latency:
 15.1336 +		new_itr = 20000; /* aka hwitr = ~200 */
 15.1337 +		break;
 15.1338 +	case bulk_latency:
 15.1339 +	default:
 15.1340 +		new_itr = 8000;
 15.1341 +		break;
 15.1342 +	}
 15.1343 +
 15.1344 +	if (new_itr != q_vector->eitr) {
 15.1345 +		u32 itr_reg;
 15.1346 +		/* do an exponential smoothing */
 15.1347 +		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 15.1348 +		q_vector->eitr = new_itr;
 15.1349 +		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
 15.1350 +		/* must write high and low 16 bits to reset counter */
 15.1351 +		DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, itr_reg);
 15.1352 +		IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
 15.1353 +	}
 15.1354 +
 15.1355 +	return;
 15.1356 +}
 15.1357 +
 15.1358 +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
 15.1359 +{
 15.1360 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1361 +
 15.1362 +	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
 15.1363 +	    (eicr & IXGBE_EICR_GPI_SDP1)) {
 15.1364 +		DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
 15.1365 +		/* write to clear the interrupt */
 15.1366 +		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 15.1367 +	}
 15.1368 +}
 15.1369 +
 15.1370 +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
 15.1371 +{
 15.1372 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1373 +
 15.1374 +	adapter->lsc_int++;
 15.1375 +	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 15.1376 +	adapter->link_check_timeout = jiffies;
 15.1377 +	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
 15.1378 +		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
 15.1379 +		schedule_work(&adapter->watchdog_task);
 15.1380 +	}
 15.1381 +}
 15.1382 +
 15.1383 +static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
 15.1384 +{
 15.1385 +	struct net_device *netdev = data;
 15.1386 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 15.1387 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1388 +	u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 15.1389 +
 15.1390 +	if (eicr & IXGBE_EICR_LSC)
 15.1391 +		ixgbe_check_lsc(adapter);
 15.1392 +
 15.1393 +	ixgbe_check_fan_failure(adapter, eicr);
 15.1394 +
 15.1395 +	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 15.1396 +		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 15.1397 +
 15.1398 +	return IRQ_HANDLED;
 15.1399 +}
 15.1400 +
 15.1401 +#ifdef IXGBE_TCP_TIMER
 15.1402 +static irqreturn_t ixgbe_msix_pba(int irq, void *data)
 15.1403 +{
 15.1404 +	struct net_device *netdev = data;
 15.1405 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 15.1406 +	int i;
 15.1407 +
 15.1408 +	u32 pba = readl(adapter->msix_addr + IXGBE_MSIXPBA);
 15.1409 +	for (i = 0; i < MAX_MSIX_COUNT; i++) {
 15.1410 +		if (pba & (1 << i))
 15.1411 +			adapter->msix_handlers[i](irq, data, regs);
 15.1412 +		else
 15.1413 +			adapter->pba_zero[i]++;
 15.1414 +	}
 15.1415 +
 15.1416 +	adapter->msix_pba++;
 15.1417 +	return IRQ_HANDLED;
 15.1418 +}
 15.1419 +
 15.1420 +static irqreturn_t ixgbe_msix_tcp_timer(int irq, void *data)
 15.1421 +{
 15.1422 +	struct net_device *netdev = data;
 15.1423 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 15.1424 +
 15.1425 +	adapter->msix_tcp_timer++;
 15.1426 +
 15.1427 +	return IRQ_HANDLED;
 15.1428 +}
 15.1429 +
 15.1430 +#endif /* IXGBE_TCP_TIMER */
 15.1431 +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 15.1432 +{
 15.1433 +	struct ixgbe_q_vector *q_vector = data;
 15.1434 +	struct ixgbe_adapter  *adapter = q_vector->adapter;
 15.1435 +	struct ixgbe_ring     *tx_ring;
 15.1436 +	int i, r_idx;
 15.1437 +
 15.1438 +	if (!q_vector->txr_count)
 15.1439 +		return IRQ_HANDLED;
 15.1440 +
 15.1441 +	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 15.1442 +	for (i = 0; i < q_vector->txr_count; i++) {
 15.1443 +		tx_ring = &(adapter->tx_ring[r_idx]);
 15.1444 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 15.1445 +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 15.1446 +			ixgbe_update_tx_dca(adapter, tx_ring);
 15.1447 +#endif
 15.1448 +		tx_ring->total_bytes = 0;
 15.1449 +		tx_ring->total_packets = 0;
 15.1450 +		ixgbe_clean_tx_irq(adapter, tx_ring);
 15.1451 +		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 15.1452 +		                      r_idx + 1);
 15.1453 +	}
 15.1454 +
 15.1455 +	/*
 15.1456 +	 * possibly later we can enable tx auto-adjustment if necessary
 15.1457 +	 *
 15.1458 +	if (adapter->itr_setting & 3)
 15.1459 +		ixgbe_set_itr_msix(q_vector);
 15.1460 +	 */
 15.1461 +
 15.1462 +	return IRQ_HANDLED;
 15.1463 +}
 15.1464 +
 15.1465 +/**
 15.1466 + * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
 15.1467 + * @irq: unused
 15.1468 + * @data: pointer to our q_vector struct for this interrupt vector
 15.1469 + **/
 15.1470 +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 15.1471 +{
 15.1472 +	struct ixgbe_q_vector *q_vector = data;
 15.1473 +	struct ixgbe_adapter  *adapter = q_vector->adapter;
 15.1474 +	struct ixgbe_ring  *rx_ring;
 15.1475 +	int r_idx;
 15.1476 +	int i;
 15.1477 +
 15.1478 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1479 +	for (i = 0; i < q_vector->rxr_count; i++) {
 15.1480 +		rx_ring = &(adapter->rx_ring[r_idx]);
 15.1481 +		rx_ring->total_bytes = 0;
 15.1482 +		rx_ring->total_packets = 0;
 15.1483 +#ifndef CONFIG_IXGBE_NAPI
 15.1484 +		ixgbe_clean_rx_irq(adapter, rx_ring);
 15.1485 +
 15.1486 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 15.1487 +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 15.1488 +			ixgbe_update_rx_dca(adapter, rx_ring);
 15.1489 +
 15.1490 +#endif
 15.1491 +		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 15.1492 +		                      r_idx + 1);
 15.1493 +	}
 15.1494 +
 15.1495 +	if (adapter->itr_setting & 3)
 15.1496 +		ixgbe_set_itr_msix(q_vector);
 15.1497 +#else
 15.1498 +		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 15.1499 +		                      r_idx + 1);
 15.1500 +	}
 15.1501 +
 15.1502 +	if (!q_vector->rxr_count)
 15.1503 +		return IRQ_HANDLED;
 15.1504 +
 15.1505 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1506 +	rx_ring = &(adapter->rx_ring[r_idx]);
 15.1507 +	/* disable interrupts on this vector only */
 15.1508 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
 15.1509 +	netif_rx_schedule(adapter->netdev, &q_vector->napi);
 15.1510 +#endif
 15.1511 +
 15.1512 +	return IRQ_HANDLED;
 15.1513 +}
 15.1514 +
 15.1515 +static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
 15.1516 +{
 15.1517 +	ixgbe_msix_clean_rx(irq, data);
 15.1518 +	ixgbe_msix_clean_tx(irq, data);
 15.1519 +
 15.1520 +	return IRQ_HANDLED;
 15.1521 +}
 15.1522 +
 15.1523 +#ifdef CONFIG_IXGBE_NAPI
 15.1524 +/**
 15.1525 + * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
 15.1526 + * @napi: napi struct with our devices info in it
 15.1527 + * @budget: amount of work driver is allowed to do this pass, in packets
 15.1528 + *
 15.1529 + * This function is optimized for cleaning one queue only on a single
 15.1530 + * q_vector!!!
 15.1531 + **/
 15.1532 +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
 15.1533 +{
 15.1534 +	struct ixgbe_q_vector *q_vector =
 15.1535 +	                       container_of(napi, struct ixgbe_q_vector, napi);
 15.1536 +	struct ixgbe_adapter *adapter = q_vector->adapter;
 15.1537 +	struct ixgbe_ring *rx_ring = NULL;
 15.1538 +	int work_done = 0;
 15.1539 +	long r_idx;
 15.1540 +
 15.1541 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1542 +	rx_ring = &(adapter->rx_ring[r_idx]);
 15.1543 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 15.1544 +	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 15.1545 +		ixgbe_update_rx_dca(adapter, rx_ring);
 15.1546 +#endif
 15.1547 +
 15.1548 +	ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
 15.1549 +
 15.1550 +	/* If all Rx work done, exit the polling mode */
 15.1551 +	if ((work_done == 0) || !netif_running(adapter->netdev)) {
 15.1552 +		netif_rx_complete(adapter->netdev, napi);
 15.1553 +		if (adapter->itr_setting & 3)
 15.1554 +			ixgbe_set_itr_msix(q_vector);
 15.1555 +		if (!test_bit(__IXGBE_DOWN, &adapter->state))
 15.1556 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
 15.1557 +		return 0;
 15.1558 +	}
 15.1559 +
 15.1560 +	return work_done;
 15.1561 +}
 15.1562 +
 15.1563 +/**
 15.1564 + * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
 15.1565 + * @napi: napi struct with our devices info in it
 15.1566 + * @budget: amount of work driver is allowed to do this pass, in packets
 15.1567 + *
 15.1568 + * This function will clean more than one rx queue associated with a
 15.1569 + * q_vector.
 15.1570 + **/
 15.1571 +static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
 15.1572 +{
 15.1573 +	struct ixgbe_q_vector *q_vector =
 15.1574 +	                       container_of(napi, struct ixgbe_q_vector, napi);
 15.1575 +	struct ixgbe_adapter *adapter = q_vector->adapter;
 15.1576 +	struct ixgbe_ring *rx_ring = NULL;
 15.1577 +	int work_done = 0, i;
 15.1578 +	long r_idx;
 15.1579 +	u16 enable_mask = 0;
 15.1580 +
 15.1581 +	/* attempt to distribute budget to each queue fairly, but don't allow
 15.1582 +	 * the budget to go below 1 because we'll exit polling */
 15.1583 +	budget /= (q_vector->rxr_count ?: 1);
 15.1584 +	budget = max(budget, 1);
 15.1585 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1586 +	for (i = 0; i < q_vector->rxr_count; i++) {
 15.1587 +		rx_ring = &(adapter->rx_ring[r_idx]);
 15.1588 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
 15.1589 +		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 15.1590 +			ixgbe_update_rx_dca(adapter, rx_ring);
 15.1591 +#endif
 15.1592 +		ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
 15.1593 +		enable_mask |= rx_ring->v_idx;
 15.1594 +		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 15.1595 +		                      r_idx + 1);
 15.1596 +	}
 15.1597 +
 15.1598 +	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 15.1599 +	rx_ring = &(adapter->rx_ring[r_idx]);
 15.1600 +	/* If all Rx work done, exit the polling mode */
 15.1601 +	if ((work_done == 0) || !netif_running(adapter->netdev)) {
 15.1602 +		netif_rx_complete(adapter->netdev, napi);
 15.1603 +		if (adapter->itr_setting & 3)
 15.1604 +			ixgbe_set_itr_msix(q_vector);
 15.1605 +		if (!test_bit(__IXGBE_DOWN, &adapter->state))
 15.1606 +			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
 15.1607 +		return 0;
 15.1608 +	}
 15.1609 +
 15.1610 +	return work_done;
 15.1611 +}
 15.1612 +
 15.1613 +#endif /* CONFIG_IXGBE_NAPI */
 15.1614 +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
 15.1615 +                                     int r_idx)
 15.1616 +{
 15.1617 +	a->q_vector[v_idx].adapter = a;
 15.1618 +	set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
 15.1619 +	a->q_vector[v_idx].rxr_count++;
 15.1620 +	a->rx_ring[r_idx].v_idx = 1 << v_idx;
 15.1621 +
 15.1622 +}
 15.1623 +
 15.1624 +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
 15.1625 +                                     int r_idx)
 15.1626 +{
 15.1627 +	a->q_vector[v_idx].adapter = a;
 15.1628 +	set_bit(r_idx, a->q_vector[v_idx].txr_idx);
 15.1629 +	a->q_vector[v_idx].txr_count++;
 15.1630 +	a->tx_ring[r_idx].v_idx = 1 << v_idx;
 15.1631 +}
 15.1632 +
 15.1633 +/**
 15.1634 + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
 15.1635 + * @adapter: board private structure to initialize
 15.1636 + * @vectors: allotted vector count for descriptor rings
 15.1637 + *
 15.1638 + * This function maps descriptor rings to the queue-specific vectors
 15.1639 + * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 15.1640 + * one vector per ring/queue, but on a constrained vector budget, we
 15.1641 + * group the rings as "efficiently" as possible.  You would add new
 15.1642 + * mapping configurations in here.
 15.1643 + **/
 15.1644 +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, int vectors)
 15.1645 +{
 15.1646 +	int v_start = 0;
 15.1647 +	int rxr_idx = 0, txr_idx = 0;
 15.1648 +	int rxr_remaining = adapter->num_rx_queues;
 15.1649 +	int txr_remaining = adapter->num_tx_queues;
 15.1650 +	int i, j;
 15.1651 +	int rqpv, tqpv;
 15.1652 +	int err = 0;
 15.1653 +
 15.1654 +	/* No mapping required if MSI-X is disabled. */
 15.1655 +	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 15.1656 +		goto out;
 15.1657 +
 15.1658 +	/*
 15.1659 +	 * The ideal configuration...
 15.1660 +	 * We have enough vectors to map one per queue.
 15.1661 +	 */
 15.1662 +	if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
 15.1663 +		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
 15.1664 +			map_vector_to_rxq(adapter, v_start, rxr_idx);
 15.1665 +
 15.1666 +		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
 15.1667 +			map_vector_to_txq(adapter, v_start, txr_idx);
 15.1668 +
 15.1669 +		goto out;
 15.1670 +	}
 15.1671 +
 15.1672 +	/*
 15.1673 +	 * If we don't have enough vectors for a 1-to-1
 15.1674 +	 * mapping, we'll have to group them so there are
 15.1675 +	 * multiple queues per vector.
 15.1676 +	 */
 15.1677 +	/* Re-adjusting *qpv takes care of the remainder. */
 15.1678 +	for (i = v_start; i < vectors; i++) {
 15.1679 +		rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
 15.1680 +		for (j = 0; j < rqpv; j++) {
 15.1681 +			map_vector_to_rxq(adapter, i, rxr_idx);
 15.1682 +			rxr_idx++;
 15.1683 +			rxr_remaining--;
 15.1684 +		}
 15.1685 +	}
 15.1686 +	for (i = v_start; i < vectors; i++) {
 15.1687 +		tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
 15.1688 +		for (j = 0; j < tqpv; j++) {
 15.1689 +			map_vector_to_txq(adapter, i, txr_idx);
 15.1690 +			txr_idx++;
 15.1691 +			txr_remaining--;
 15.1692 +		}
 15.1693 +	}
 15.1694 +
 15.1695 +out:
 15.1696 +	return err;
 15.1697 +}
 15.1698 +
 15.1699 +/**
 15.1700 + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
 15.1701 + * @adapter: board private structure
 15.1702 + *
 15.1703 + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
 15.1704 + * interrupts from the kernel.
 15.1705 + **/
 15.1706 +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 15.1707 +{
 15.1708 +	struct net_device *netdev = adapter->netdev;
 15.1709 +	irqreturn_t (*handler)(int, void *);
 15.1710 +	int i, vector, q_vectors, err;
 15.1711 +	int ri = 0, ti = 0;
 15.1712 +
 15.1713 +	/* Decrement for Other and TCP Timer vectors */
 15.1714 +	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 15.1715 +
 15.1716 +	/* Map the Tx/Rx rings to the vectors we were allotted. */
 15.1717 +	err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
 15.1718 +	if (err)
 15.1719 +		goto out;
 15.1720 +
 15.1721 +#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
 15.1722 +                         (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
 15.1723 +                         &ixgbe_msix_clean_many)
 15.1724 +	for (vector = 0; vector < q_vectors; vector++) {
 15.1725 +		handler = SET_HANDLER(&adapter->q_vector[vector]);
 15.1726 +
 15.1727 +		if (handler == &ixgbe_msix_clean_rx) {
 15.1728 +			sprintf(adapter->name[vector], "%s-%s-%d",
 15.1729 +			        netdev->name, "rx", ri++);
 15.1730 +		} else if (handler == &ixgbe_msix_clean_tx) {
 15.1731 +			sprintf(adapter->name[vector], "%s-%s-%d",
 15.1732 +			        netdev->name, "tx", ti++);
 15.1733 +		} else {
 15.1734 +			sprintf(adapter->name[vector], "%s-%s-%d",
 15.1735 +			        netdev->name, "TxRx", vector);
 15.1736 +		}
 15.1737 +		err = request_irq(adapter->msix_entries[vector].vector,
 15.1738 +		                  handler, 0, adapter->name[vector],
 15.1739 +		                  &(adapter->q_vector[vector]));
 15.1740 +		if (err) {
 15.1741 +			DPRINTK(PROBE, ERR,
 15.1742 +			        "request_irq failed for MSIX interrupt "
 15.1743 +			        "Error: %d\n", err);
 15.1744 +			goto free_queue_irqs;
 15.1745 +		}
 15.1746 +	}
 15.1747 +
 15.1748 +	sprintf(adapter->name[vector], "%s:lsc", netdev->name);
 15.1749 +	err = request_irq(adapter->msix_entries[vector].vector,
 15.1750 +	                  &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 15.1751 +	if (err) {
 15.1752 +		DPRINTK(PROBE, ERR,
 15.1753 +		        "request_irq for msix_lsc failed: %d\n", err);
 15.1754 +		goto free_queue_irqs;
 15.1755 +	}
 15.1756 +
 15.1757 +#ifdef IXGBE_TCP_TIMER
 15.1758 +	vector++;
 15.1759 +	sprintf(adapter->name[vector], "%s:timer", netdev->name);
 15.1760 +	err = request_irq(adapter->msix_entries[vector].vector,
 15.1761 +	                  &ixgbe_msix_tcp_timer, 0, adapter->name[vector],
 15.1762 +	                  netdev);
 15.1763 +	if (err) {
 15.1764 +		DPRINTK(PROBE, ERR,
 15.1765 +		        "request_irq for msix_tcp_timer failed: %d\n", err);
 15.1766 +		/* Free "Other" interrupt */
 15.1767 +		free_irq(adapter->msix_entries[--vector].vector, netdev);
 15.1768 +		goto free_queue_irqs;
 15.1769 +	}
 15.1770 +
 15.1771 +#endif
 15.1772 +	return 0;
 15.1773 +
 15.1774 +free_queue_irqs:
 15.1775 +	for (i = vector - 1; i >= 0; i--)
 15.1776 +		free_irq(adapter->msix_entries[--vector].vector,
 15.1777 +		         &(adapter->q_vector[i]));
 15.1778 +	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 15.1779 +	pci_disable_msix(adapter->pdev);
 15.1780 +	kfree(adapter->msix_entries);
 15.1781 +	adapter->msix_entries = NULL;
 15.1782 +out:
 15.1783 +	return err;
 15.1784 +}
 15.1785 +
 15.1786 +static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 15.1787 +{
 15.1788 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1789 +	struct ixgbe_q_vector *q_vector = adapter->q_vector;
 15.1790 +	u8 current_itr;
 15.1791 +	u32 new_itr = q_vector->eitr;
 15.1792 +	struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
 15.1793 +	struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
 15.1794 +
 15.1795 +	q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
 15.1796 +	                                    q_vector->tx_itr,
 15.1797 +	                                    tx_ring->total_packets,
 15.1798 +	                                    tx_ring->total_bytes);
 15.1799 +	q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
 15.1800 +	                                    q_vector->rx_itr,
 15.1801 +	                                    rx_ring->total_packets,
 15.1802 +	                                    rx_ring->total_bytes);
 15.1803 +
 15.1804 +	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
 15.1805 +
 15.1806 +	switch (current_itr) {
 15.1807 +	/* counts and packets in update_itr are dependent on these numbers */
 15.1808 +	case lowest_latency:
 15.1809 +		new_itr = 100000;
 15.1810 +		break;
 15.1811 +	case low_latency:
 15.1812 +		new_itr = 20000; /* aka hwitr = ~200 */
 15.1813 +		break;
 15.1814 +	case bulk_latency:
 15.1815 +		new_itr = 8000;
 15.1816 +		break;
 15.1817 +	default:
 15.1818 +		break;
 15.1819 +	}
 15.1820 +
 15.1821 +	if (new_itr != q_vector->eitr) {
 15.1822 +		u32 itr_reg;
 15.1823 +		/* do an exponential smoothing */
 15.1824 +		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 15.1825 +		q_vector->eitr = new_itr;
 15.1826 +		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
 15.1827 +		/* must write high and low 16 bits to reset counter */
 15.1828 +		IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
 15.1829 +	}
 15.1830 +
 15.1831 +	return;
 15.1832 +}
 15.1833 +
 15.1834 +/**
 15.1835 + * ixgbe_irq_enable - Enable default interrupt generation settings
 15.1836 + * @adapter: board private structure
 15.1837 + **/
 15.1838 +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 15.1839 +{
 15.1840 +	u32 mask;
 15.1841 +	mask = IXGBE_EIMS_ENABLE_MASK;
 15.1842 +	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 15.1843 +		mask |= IXGBE_EIMS_GPI_SDP1;
 15.1844 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 15.1845 +	IXGBE_WRITE_FLUSH(&adapter->hw);
 15.1846 +}
 15.1847 +
 15.1848 +
 15.1849 +/**
 15.1850 + * ixgbe_intr - legacy mode Interrupt Handler
 15.1851 + * @irq: interrupt number
 15.1852 + * @data: pointer to a network interface device structure
 15.1853 + * @pt_regs: CPU registers structure
 15.1854 + **/
 15.1855 +static irqreturn_t ixgbe_intr(int irq, void *data)
 15.1856 +{
 15.1857 +	struct net_device *netdev = data;
 15.1858 +	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 15.1859 +	struct ixgbe_hw *hw = &adapter->hw;
 15.1860 +	u32 eicr;
 15.1861 +
 15.1862 +	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
 15.1863 +	 * therefore no explict interrupt disable is necessary */
 15.1864 +	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 15.1865 +	if (!eicr) {
 15.1866 +#ifdef CONFIG_IXGBE_NAPI
 15.1867 +		/* shared interrupt alert!
 15.1868 +		 * make sure interrupts are enabled because the read will
 15.1869 +		 * have disabled interrupts due to EIAM */
 15.1870 +		ixgbe_irq_enable(adapter);
 15.1871 +#endif
 15.1872 +		return IRQ_NONE;  /* Not our interrupt */
 15.1873 +	}
 15.1874 +
 15.1875 +	if (eicr & IXGBE_EICR_LSC)
 15.1876 +		ixgbe_check_lsc(adapter);
 15.1877 +
 15.1878 +	ixgbe_check_fan_failure(adapter, eicr);
 15.1879 +
 15.1880 +#ifdef CONFIG_IXGBE_NAPI
 15.1881 +	if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
 15.1882 +		adapter->tx_ring[0].total_packets = 0;
 15.1883 +		adapter->tx_ring[0].total_bytes = 0;
 15.1884 +		adapter->rx_ring[0].total_packets = 0;
 15.1885 +		adapter->rx_ring[0].total_bytes = 0;
 15.1886 +		/* would disable interrupts here but EIAM disabled it */
 15.1887 +		__netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
 15.1888 +	}
 15.1889 +
 15.1890 +#else
 15.1891 +	adapter->tx_ring[0].total_packets = 0;
 15.1892 +	adapter->tx_ring[0].total_bytes = 0;
 15.1893 +	adapter->rx_ring[0].total_packets = 0;
 15.1894 +	adapter->rx_ring[0].total_bytes = 0;
 15.1895 +	ixgbe_clean_rx_irq(adapter, adapter->rx_ring);
 15.1896 +	ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
 15.1897 +
 15.1898 +	/* dynamically adjust throttle */
 15.1899 +	if (adapter->itr_setting & 3)
 15.1900 +		ixgbe_set_itr(adapter);
 15.1901 +
 15.1902 +#endif
 15.1903 +	return IRQ_HANDLED;
 15.1904 +}
 15.1905 +
 15.1906 +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
 15.1907 +{
 15.1908 +	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 15.1909 +
 15.1910 +	for (i = 0; i < q_vectors; i++) {
 15.1911 +		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
 15.1912 +		bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
 15.1913 +		bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
 15.1914 +		q_vector->rxr_count = 0;
 15.1915 +		q_vector->txr_count = 0;
 15.1916 +	}
 15.1917 +}
 15.1918 +
 15.1919 +/**
 15.1920 + * ixgbe_request_irq - initialize interrupts
 15.1921 + * @adapter: board private structure
 15.1922 + *
 15.1923 + * Attempts to configure interrupts using the best available
 15.1924 + * capabilities of the hardware and kernel.
 15.1925 + **/
 15.1926 +static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 15.1927 +{
 15.1928 +	struct net_device *netdev = adapter->netdev;
 15.1929 +	int err;
 15.1930 +
 15.1931 +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 15.1932 +		err = ixgbe_request_msix_irqs(adapter);
 15.1933 +	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
 15.1934 +		err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
 15.1935 +		                  netdev->name, netdev);
 15.1936 +	} else {
 15.1937 +		err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
 15.1938 +		                  netdev->name, netdev);
 15.1939 +	}
 15.1940 +
 15.1941 +	if (err)
 15.1942 +		DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
 15.1943 +
 15.1944 +	return err;
 15.1945 +}
 15.1946 +
 15.1947 +static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 15.1948 +{
 15.1949 +	struct net_device *netdev = adapter->netdev;
 15.1950 +
 15.1951 +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 15.1952 +		int i, q_vectors;
 15.1953 +
 15.1954 +		q_vectors = adapter->num_msix_vectors;
 15.1955 +
 15.1956 +		i = q_vectors - 1;
 15.1957 +#ifdef IXGBE_TCP_TIMER
 15.1958 +		free_irq(adapter->msix_entries[i].vector, netdev);
 15.1959 +		i--;
 15.1960 +#endif
 15.1961 +		free_irq(adapter->msix_entries[i].vector, netdev);
 15.1962 +
 15.1963 +		i--;
 15.1964 +		for (; i >= 0; i--) {
 15.1965 +			free_irq(adapter->msix_entries[i].vector,
 15.1966 +			         &(adapter->q_vector[i]));
 15.1967 +		}
 15.1968 +
 15.1969 +		ixgbe_reset_q_vectors(adapter);
 15.1970 +	} else {
 15.1971 +		free_irq(adapter->pdev->irq, netdev);
 15.1972 +	}
 15.1973 +}
 15.1974 +
 15.1975 +/**
 15.1976 + * ixgbe_irq_disable - Mask off interrupt generation on the NIC
 15.1977 + * @adapter: board private structure
 15.1978 + **/
 15.1979 +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 15.1980 +{
 15.1981 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
 15.1982 +	IXGBE_WRITE_FLUSH(&adapter->hw);
 15.1983 +	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 15.1984 +		int i;
 15.1985 +		for (i = 0; i < adapter->num_msix_vectors; i++)
 15.1986 +			synchronize_irq(adapter->msix_entries[i].vector);
 15.1987 +	} else {
 15.1988 +		synchronize_irq(adapter->pdev->irq);
 15.1989 +	}
 15.1990 +}
 15.1991 +
 15.1992 +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
 15.1993 +{
 15.1994 +	u32 mask = IXGBE_EIMS_RTX_QUEUE;
 15.1995 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 15.1996 +	/* skip the flush */
 15.1997 +}
 15.1998 +
 15.1999 +/**
 15.2000 + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
 15.2001 + *
 15.2002 + **/
 15.2003 +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 15.2004 +{
 15.2005 +	struct ixgbe_hw *hw = &adapter->hw;
 15.2006 +
 15.2007 +	IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
 15.2008 +	                EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
 15.2009 +
 15.2010 +	ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
 15.2011 +	ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
 15.2012 +
 15.2013 +	map_vector_to_rxq(adapter, 0, 0);
 15.2014 +	map_vector_to_txq(adapter, 0, 0);
 15.2015 +
 15.2016 +	DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
 15.2017 +}
 15.2018 +
 15.2019 +/**
 15.2020 + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
 15.2021 + * @adapter: board private structure
 15.2022 + *
 15.2023 + * Configure the Tx unit of the MAC after a reset.
 15.2024 + **/
 15.2025 +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 15.2026 +{
 15.2027 +	u64 tdba, tdwba;
 15.2028 +	struct ixgbe_hw *hw = &adapter->hw;
 15.2029 +	u32 i, j, tdlen, txctrl;
 15.2030 +
 15.2031 +	/* Setup the HW Tx Head and Tail descriptor pointers */
 15.2032 +	for (i = 0; i < adapter->num_tx_queues; i++) {
 15.2033 +		struct ixgbe_ring *ring = &adapter->tx_ring[i];
 15.2034 +		j = ring->reg_idx;
 15.2035 +		tdba = ring->dma;
 15.2036 +		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
 15.2037 +		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
 15.2038 +		                (tdba & DMA_32BIT_MASK));
 15.2039 +		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
 15.2040 +		tdwba = ring->dma +
 15.2041 +		        (ring->count * sizeof(union ixgbe_adv_tx_desc));
 15.2042 +		tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
 15.2043 +		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
 15.2044 +		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
 15.2045 +		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
 15.2046 +		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
 15.2047 +		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
 15.2048 +		adapter->tx_ring[i].head = IXGBE_TDH(j);
 15.2049 +		adapter->tx_ring[i].tail = IXGBE_TDT(j);
 15.2050 +		/* Disable Tx Head Writeback RO bit, since this hoses
 15.2051 +		 * bookkeeping if things aren't delivered in order.
 15.2052 +		 */
 15.2053 +		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
 15.2054 +		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
 15.2055 +		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
 15.2056 +	}
 15.2057 +}
 15.2058 +
 15.2059 +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
 15.2060 +
 15.2061 +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
 15.2062 +{
 15.2063 +	struct ixgbe_ring *rx_ring;
 15.2064 +	u32 srrctl;
 15.2065 +	int queue0;
 15.2066 +	unsigned long mask;
 15.2067 +
 15.2068 +	/* program one srrctl register per VMDq index */
 15.2069 +	if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
 15.2070 +		long shift, len;
 15.2071 +		mask = (unsigned long) adapter->ring_feature[RING_F_VMDQ].mask;
 15.2072 +		len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
 15.2073 +		shift = find_first_bit(&mask, len);
 15.2074 +		queue0 = (index & mask);
 15.2075 +		index = (index & mask) >> shift;
 15.2076 +	/* if VMDq is not active we must program one srrctl register per
 15.2077 +	 * RSS queue since we have enabled RDRXCTL.MVMEN
 15.2078 +	 */
 15.2079 +	} else {
 15.2080 +		mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
 15.2081 +		queue0 = index & mask;
 15.2082 +		index = index & mask;
 15.2083 +	}
 15.2084 +
 15.2085 +	rx_ring = &adapter->rx_ring[queue0];
 15.2086 +
 15.2087 +	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
 15.2088 +
 15.2089 +	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 15.2090 +	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 15.2091 +
 15.2092 +	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 15.2093 +		srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 15.2094 +		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
 15.2095 +		srrctl |= ((IXGBE_RX_HDR_SIZE <<
 15.2096 +		            IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 15.2097 +		           IXGBE_SRRCTL_BSIZEHDR_MASK);
 15.2098 +	} else {
 15.2099 +		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 15.2100 +
 15.2101 +		if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
 15.2102 +			srrctl |= IXGBE_RXBUFFER_2048 >>
 15.2103 +			          IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 15.2104 +		else
 15.2105 +			srrctl |= rx_ring->rx_buf_len >>
 15.2106 +			          IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 15.2107 +	}
 15.2108 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
 15.2109 +}
 15.2110 +
 15.2111 +#ifndef IXGBE_NO_INET_LRO
 15.2112 +/**
 15.2113 + * ixgbe_get_skb_hdr - helper function for LRO header processing
 15.2114 + * @skb: pointer to sk_buff to be added to LRO packet
 15.2115 + * @iphdr: pointer to ip header structure
 15.2116 + * @tcph: pointer to tcp header structure
 15.2117 + * @hdr_flags: pointer to header flags
 15.2118 + * @priv: private data
 15.2119 + **/
 15.2120 +static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
 15.2121 +                             u64 *hdr_flags, void *priv)
 15.2122 +{
 15.2123 +	union ixgbe_adv_rx_desc *rx_desc = priv;
 15.2124 +
 15.2125 +	/* Verify that this is a valid IPv4 TCP packet */
 15.2126 +	if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
 15.2127 +	     (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
 15.2128 +		return -1;
 15.2129 +
 15.2130 +	/* Set network headers */
 15.2131 +	skb_reset_network_header(skb);
 15.2132 +	skb_set_transport_header(skb, ip_hdrlen(skb));
 15.2133 +	*iphdr = ip_hdr(skb);
 15.2134 +	*tcph = tcp_hdr(skb);
 15.2135 +	*hdr_flags = LRO_IPV4 | LRO_TCP;
 15.2136 +	return 0;
 15.2137 +}
 15.2138 +
 15.2139 +#endif /* IXGBE_NO_INET_LRO */
 15.2140 +/**
 15.2141 + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
 15.2142 + * @adapter: board private structure
 15.2143 + *
 15.2144 + * Configure the Rx unit of the MAC after a reset.
 15.2145 + **/
 15.2146 +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 15.2147 +{
 15.2148 +	u64 rdba;
 15.2149 +	struct ixgbe_hw *hw = &adapter->hw;
 15.2150 +	struct net_device *netdev = adapter->netdev;
 15.2151 +	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 15.2152 +	int i, j;
 15.2153 +	u32 rdlen, rxctrl, rxcsum;
 15.2154 +	static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
 15.2155 +	                  0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
 15.2156 +	                  0x6A3E67EA, 0x14364D17, 0x3BED200D};
 15.2157 +	u32 fctrl, hlreg0;
 15.2158 +	u32 reta = 0, mrqc;
 15.2159 +	u32 vmdctl;
 15.2160 +	u32 rdrxctl;
 15.2161 +	int rx_buf_len;
 15.2162 +
 15.2163 +#ifndef IXGBE_NO_LRO
 15.2164 +	adapter->lro_data.max = lromax;
 15.2165 +
 15.2166 +	if (lromax * netdev->mtu > (1 << 16))
 15.2167 +		adapter->lro_data.max = ((1 << 16) / netdev->mtu) - 1;
 15.2168 +
 15.2169 +#endif
 15.2170 +	/* Decide whether to use packet split mode or not */
 15.2171 +	if (netdev->mtu > ETH_DATA_LEN) {
 15.2172 +		if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
 15.2173 +			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 15.2174 +		else
 15.2175 +			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 15.2176 +	} else {
 15.2177 +		if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) {
 15.2178 +			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 15.2179 +		} else
 15.2180 +			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 15.2181 +	}
 15.2182 +
 15.2183 +	/* Set the RX buffer length according to the mode */
 15.2184 +	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
 15.2185 +		rx_buf_len = IXGBE_RX_HDR_SIZE;
 15.2186 +	} else {
 15.2187 +		if (netdev->mtu <= ETH_DATA_LEN)
 15.2188 +			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 15.2189 +		else
 15.2190 +			rx_buf_len = ALIGN(max_frame, 1024);
 15.2191 +	}
 15.2192 +
 15.2193 +	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 15.2194 +	fctrl |= IXGBE_FCTRL_BAM;
 15.2195 +	fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
 15.2196 +	fctrl |= IXGBE_FCTRL_PMCF;
 15.2197 +	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 15.2198 +
 15.2199 +	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 15.2200 +	if (adapter->netdev->mtu <= ETH_DATA_LEN)
 15.2201 +		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 15.2202 +	else
 15.2203 +		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 15.2204 +	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 15.2205 +
 15.2206 +	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 15.2207 +	/* disable receives while setting up the descriptors */
 15.2208 +	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 15.2209 +	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 15.2210 +
 15.2211 +	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 15.2212 +	 * the Base and Length of the Rx Descriptor Ring */
 15.2213 +	for (i = 0; i < adapter->num_rx_queues; i++) {
 15.2214 +		rdba = adapter->rx_ring[i].dma;
 15.2215 +		j = adapter->rx_ring[i].reg_idx;
 15.2216 +		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
 15.2217 +		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
 15.2218 +		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
 15.2219 +		IXGBE_WRITE