ia64/linux-2.6.18-xen.hg

changeset 880:4ffa9ad54890

Upgrade forcedeth net driver to 0.62 (driver package v1.25)

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 28 09:53:22 2009 +0100 (2009-05-28)
parents 2ab54cc40761
children e9f508296fc7
files drivers/net/forcedeth.c
line diff
     1.1 --- a/drivers/net/forcedeth.c	Wed May 27 11:21:00 2009 +0100
     1.2 +++ b/drivers/net/forcedeth.c	Thu May 28 09:53:22 2009 +0100
     1.3 @@ -108,7 +108,13 @@
     1.4   *	0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
     1.5   *	0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
     1.6   *	0.55: 22 Mar 2006: Add flow control (pause frame).
     1.7 - *	0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
     1.8 + *	0.56: 22 Mar 2006: Additional ethtool and moduleparam support.
     1.9 + *	0.57: 14 May 2006: Moved mac address writes to nv_probe and nv_remove.
    1.10 + *	0.58: 20 May 2006: Optimized rx and tx data paths.
    1.11 + *	0.59: 31 May 2006: Added support for sideband management unit.
    1.12 + *	0.60: 31 May 2006: Added support for recoverable error.
    1.13 + *	0.61: 18 Jul 2006: Added support for suspend/resume.
    1.14 + *	0.62: 16 Jan 2007: Fixed statistics, mgmt communication, and low phy speed on S5.
    1.15   *
    1.16   * Known bugs:
    1.17   * We suspect that on some hardware no TX done interrupts are generated.
    1.18 @@ -120,8 +126,9 @@
    1.19   * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
    1.20   * superfluous timer interrupts from the nic.
    1.21   */
    1.22 -#define FORCEDETH_VERSION		"0.56"
    1.23 +#define FORCEDETH_VERSION		"0.62-Driver Package V1.25"
    1.24  #define DRV_NAME			"forcedeth"
    1.25 +#define DRV_DATE			"2008/01/30"
    1.26  
    1.27  #include <linux/module.h>
    1.28  #include <linux/types.h>
    1.29 @@ -138,41 +145,351 @@
    1.30  #include <linux/random.h>
    1.31  #include <linux/init.h>
    1.32  #include <linux/if_vlan.h>
    1.33 +#include <linux/rtnetlink.h>
    1.34 +#include <linux/reboot.h>
    1.35 +#include <linux/version.h>
    1.36 +
    1.37 +#define RHES3  		0
    1.38 +#define SLES9	 	1
    1.39 +#define RHES4		2
    1.40 +#define SUSE10		3 
    1.41 +#define	FEDORA5		4 
    1.42 +#define	FEDORA6		5
    1.43 +#define	SLES10U1    	5
    1.44 +#define	FEDORA7		6
    1.45 +#define	OPENSUSE10U3	7
    1.46 +#define NVNEW	8
    1.47 +
    1.48 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)
    1.49 +#define NVVER NVNEW  
    1.50 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
    1.51 +#define NVVER OPENSUSE10U3
    1.52 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
    1.53 +#define NVVER FEDORA7
    1.54 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
    1.55 +#define NVVER FEDORA6
    1.56 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
    1.57 +#define NVVER FEDORA5		
    1.58 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
    1.59 +#define NVVER SUSE10		
    1.60 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,6)
    1.61 +#define NVVER RHES4	
    1.62 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
    1.63 +#define NVVER SLES9	
    1.64 +#else
    1.65 +#define NVVER RHES3  	
    1.66 +#endif
    1.67 +
    1.68 +#if NVVER > RHES3
    1.69  #include <linux/dma-mapping.h>
    1.70 +#else
    1.71 +#include <linux/forcedeth-compat.h>
    1.72 +#endif
    1.73  
    1.74  #include <asm/irq.h>
    1.75  #include <asm/io.h>
    1.76  #include <asm/uaccess.h>
    1.77  #include <asm/system.h>
    1.78  
    1.79 -#if 0
    1.80 +#ifdef  NVLAN_DEBUG
    1.81  #define dprintk			printk
    1.82  #else
    1.83  #define dprintk(x...)		do { } while (0)
    1.84  #endif
    1.85  
    1.86 +#define DPRINTK(nlevel,klevel,args...) (void)((debug & NETIF_MSG_##nlevel) && printk(klevel args))
    1.87 +
    1.88 + /* pci_ids.h */
    1.89 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
    1.90 +#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 
    1.91 +#endif
    1.92 +
    1.93 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_13
    1.94 +#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 
    1.95 +#endif
    1.96 +
    1.97 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_14
    1.98 +#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 
    1.99 +#endif
   1.100 +
   1.101 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_15
   1.102 +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 
   1.103 +#endif
   1.104 +
   1.105 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_16
   1.106 +#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
   1.107 +#endif
   1.108 +
   1.109 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_17
   1.110 +#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 
   1.111 +#endif
   1.112 +
   1.113 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_18
   1.114 +#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE 
   1.115 +#endif
   1.116 +
   1.117 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_19
   1.118 +#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF 
   1.119 +#endif
   1.120 +
   1.121 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_20
   1.122 +#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 
   1.123 +#endif
   1.124 +
   1.125 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_21
   1.126 +#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 
   1.127 +#endif
   1.128 +
   1.129 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_22
   1.130 +#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 
   1.131 +#endif
   1.132 +
   1.133 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
   1.134 +#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 
   1.135 +#endif
   1.136 +
   1.137 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_24
   1.138 +#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054c
   1.139 +#endif
   1.140 +
   1.141 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_25
   1.142 +#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054d
   1.143 +#endif
   1.144 +
   1.145 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_26
   1.146 +#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054e
   1.147 +#endif
   1.148 +
   1.149 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_27
   1.150 +#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054f
   1.151 +#endif
   1.152 +
   1.153 + /* mii.h */
   1.154 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_28
   1.155 +#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07dc
   1.156 +#endif
   1.157 +
   1.158 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_29
   1.159 +#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07dd
   1.160 +#endif
   1.161 +
   1.162 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_30
   1.163 +#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07de
   1.164 +#endif
   1.165 +
   1.166 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_31
   1.167 +#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07df
   1.168 +#endif
   1.169 +
   1.170 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_32
   1.171 +#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
   1.172 +#endif
   1.173 +
   1.174 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_33
   1.175 +#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
   1.176 +#endif
   1.177 +
   1.178 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_34
   1.179 +#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
   1.180 +#endif
   1.181 +
   1.182 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_35
   1.183 +#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
   1.184 +#endif
   1.185 +
   1.186 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_36
   1.187 +#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
   1.188 +#endif
   1.189 +
   1.190 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_37
   1.191 +#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
   1.192 +#endif
   1.193 +
   1.194 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_38
   1.195 +#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
   1.196 +#endif
   1.197 +
   1.198 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_39
   1.199 +#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
   1.200 +#endif
   1.201 +
   1.202 +#ifndef ADVERTISE_1000HALF
   1.203 +#define ADVERTISE_1000HALF	0x0100
   1.204 +#endif
   1.205 +#ifndef ADVERTISE_1000FULL
   1.206 +#define ADVERTISE_1000FULL	0x0200
   1.207 +#endif
   1.208 +#ifndef ADVERTISE_PAUSE_CAP
   1.209 +#define ADVERTISE_PAUSE_CAP	0x0400
   1.210 +#endif
   1.211 +#ifndef ADVERTISE_PAUSE_ASYM
   1.212 +#define ADVERTISE_PAUSE_ASYM	0x0800
   1.213 +#endif
   1.214 +#ifndef MII_CTRL1000
   1.215 +#define MII_CTRL1000		0x09 
   1.216 +#endif
   1.217 +#ifndef MII_STAT1000
   1.218 +#define MII_STAT1000		0x0A 
   1.219 +#endif
   1.220 +#ifndef LPA_1000FULL
   1.221 +#define LPA_1000FULL		0x0800
   1.222 +#endif
   1.223 +#ifndef LPA_1000HALF
   1.224 +#define LPA_1000HALF		0x0400 
   1.225 +#endif
   1.226 +#ifndef LPA_PAUSE_CAP
   1.227 +#define LPA_PAUSE_CAP		0x0400
   1.228 +#endif
   1.229 +#ifndef LPA_PAUSE_ASYM
   1.230 +#define LPA_PAUSE_ASYM		0x0800
   1.231 +#endif
   1.232 +#ifndef BMCR_SPEED1000
   1.233 +#define BMCR_SPEED1000		0x0040	/* MSB of Speed (1000)         */
   1.234 +#endif
   1.235 +
   1.236 +#ifndef NETDEV_TX_OK
   1.237 +#define NETDEV_TX_OK 		0	/* driver took care of packet */
   1.238 +#endif
   1.239 +
   1.240 +#ifndef NETDEV_TX_BUSY
   1.241 +#define NETDEV_TX_BUSY 		1    /* driver tx path was busy*/
   1.242 +#endif
   1.243 +
   1.244 +#ifndef DMA_39BIT_MASK
   1.245 +#define DMA_39BIT_MASK		0x0000007fffffffffULL    
   1.246 +#endif
   1.247 +
   1.248 +#ifndef __iomem
   1.249 +#define __iomem 
   1.250 +#endif
   1.251 +
   1.252 +#ifndef __bitwise
   1.253 +#define __bitwise
   1.254 +#endif
   1.255 +
   1.256 +#ifndef __force
   1.257 +#define __force
   1.258 +#endif
   1.259 +
   1.260 +#ifndef PCI_D0
   1.261 +#define PCI_D0		((int __bitwise __force) 0)
   1.262 +#endif
   1.263 +
   1.264 +#ifndef PM_EVENT_SUSPEND 
   1.265 +#define PM_EVENT_SUSPEND 2 
   1.266 +#endif
   1.267 +
   1.268 +#ifndef MODULE_VERSION
   1.269 +#define MODULE_VERSION(ver)
   1.270 +#endif
   1.271 +
   1.272 +#if NVVER > FEDORA6
   1.273 +#define CHECKSUM_HW CHECKSUM_PARTIAL
   1.274 +#endif
   1.275 +
   1.276 +#if NVVER < SUSE10
   1.277 +#define pm_message_t u32
   1.278 +#endif
   1.279 +
   1.280 + /* rx/tx mac addr + type + vlan + align + slack*/
   1.281 +#ifndef RX_NIC_BUFSIZE	
   1.282 +#define RX_NIC_BUFSIZE		(ETH_DATA_LEN + 64)
   1.283 +#endif
   1.284 + /* even more slack */
   1.285 +#ifndef RX_ALLOC_BUFSIZE	
   1.286 +#define RX_ALLOC_BUFSIZE	(ETH_DATA_LEN + 128)
   1.287 +#endif
   1.288 +
   1.289 +#ifndef PCI_DEVICE
   1.290 +#define PCI_DEVICE(vend,dev) \
   1.291 +	 .vendor = (vend), .device = (dev), \
   1.292 + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
   1.293 +#endif
   1.294 +
   1.295 +#if NVVER < RHES4
   1.296 + struct msix_entry {
   1.297 +	 u16 vector;	/* kernel uses to write allocated vector */
   1.298 +	 u16 entry;	/* driver uses to specify entry, OS writes */
   1.299 + };
   1.300 +#endif
   1.301 +
   1.302 +#ifndef PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
   1.303 +#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0x00
   1.304 +#endif
   1.305 +
   1.306 +#ifndef PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 
   1.307 +#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 0x04 
   1.308 +#endif
   1.309 +
   1.310 +#ifndef PCI_MSIX_ENTRY_DATA_OFFSET
   1.311 +#define PCI_MSIX_ENTRY_DATA_OFFSET 0x08
   1.312 +#endif 
   1.313 +
   1.314 +#ifndef PCI_MSIX_ENTRY_SIZE
   1.315 +#define PCI_MSIX_ENTRY_SIZE 16
   1.316 +#endif
   1.317 +
   1.318 +#ifndef PCI_MSIX_FLAGS_BIRMASK
   1.319 +#define PCI_MSIX_FLAGS_BIRMASK		(7 << 0)
   1.320 +#endif
   1.321 +
   1.322 +#ifndef PCI_CAP_ID_MSIX
   1.323 +#define PCI_CAP_ID_MSIX 0x11
   1.324 +#endif
   1.325 +
   1.326 +#if NVVER > FEDORA7
   1.327 +#define IRQ_FLAG IRQF_SHARED
   1.328 +#else
   1.329 +#define IRQ_FLAG SA_SHIRQ
   1.330 +#endif
   1.331  
   1.332  /*
   1.333   * Hardware access:
   1.334   */
   1.335  
   1.336 -#define DEV_NEED_TIMERIRQ	0x0001  /* set the timer irq flag in the irq mask */
   1.337 -#define DEV_NEED_LINKTIMER	0x0002	/* poll link settings. Relies on the timer irq */
   1.338 -#define DEV_HAS_LARGEDESC	0x0004	/* device supports jumbo frames and needs packet format 2 */
   1.339 -#define DEV_HAS_HIGH_DMA        0x0008  /* device supports 64bit dma */
   1.340 -#define DEV_HAS_CHECKSUM        0x0010  /* device supports tx and rx checksum offloads */
   1.341 -#define DEV_HAS_VLAN            0x0020  /* device supports vlan tagging and striping */
   1.342 -#define DEV_HAS_MSI             0x0040  /* device supports MSI */
   1.343 -#define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
   1.344 -#define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
   1.345 -#define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
   1.346 -#define DEV_HAS_STATISTICS      0x0400  /* device supports hw statistics */
   1.347 -#define DEV_HAS_TEST_EXTENDED   0x0800  /* device supports extended diagnostic test */
   1.348 +#define DEV_NEED_TIMERIRQ	   0x00001  /* set the timer irq flag in the irq mask */
   1.349 +#define DEV_NEED_LINKTIMER	   0x00002  /* poll link settings. Relies on the timer irq */
   1.350 +#define DEV_HAS_LARGEDESC	   0x00004  /* device supports jumbo frames and needs packet format 2 */
   1.351 +#define DEV_HAS_HIGH_DMA           0x00008  /* device supports 64bit dma */
   1.352 +#define DEV_HAS_CHECKSUM           0x00010  /* device supports tx and rx checksum offloads */
   1.353 +#define DEV_HAS_VLAN               0x00020  /* device supports vlan tagging and striping */
   1.354 +#define DEV_HAS_MSI                0x00040  /* device supports MSI */
   1.355 +#define DEV_HAS_MSI_X              0x00080  /* device supports MSI-X */
   1.356 +#define DEV_HAS_POWER_CNTRL        0x00100  /* device supports power savings */
   1.357 +#define DEV_HAS_STATISTICS_V1      0x00200  /* device supports hw statistics version 1 */
   1.358 +#define DEV_HAS_STATISTICS_V2      0x00400  /* device supports hw statistics version 2 */
   1.359 +#define DEV_HAS_TEST_EXTENDED      0x00800  /* device supports extended diagnostic test */
   1.360 +#define DEV_HAS_MGMT_UNIT          0x01000  /* device supports management unit */
   1.361 +#define DEV_HAS_CORRECT_MACADDR    0x02000  /* device supports correct mac address */
   1.362 +#define DEV_HAS_COLLISION_FIX      0x04000  /* device supports tx collision fix */
   1.363 +#define DEV_HAS_PAUSEFRAME_TX_V1   0x08000  /* device supports tx pause frames version 1 */
   1.364 +#define DEV_HAS_PAUSEFRAME_TX_V2   0x10000  /* device supports tx pause frames version 2 */
   1.365 +#define DEV_HAS_PAUSEFRAME_TX_V3   0x20000  /* device supports tx pause frames version 3 */
   1.366 +
   1.367 +
   1.368 +#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
   1.369 +	.vendor = PCI_VENDOR_ID_NVIDIA, \
   1.370 +	.device = deviceid, \
   1.371 +	.subvendor = PCI_ANY_ID, \
   1.372 +	.subdevice = PCI_ANY_ID, \
   1.373 +	.driver_data = nv_driver_data, \
   1.374 +},
   1.375 +
   1.376 +#define Mv_LED_Control 16
   1.377 +#define Mv_Page_Address 22
   1.378 +#define Mv_LED_FORCE_OFF 0x88
   1.379 +#define Mv_LED_DUAL_MODE3 0x40
   1.380 +
   1.381 +struct nvmsi_msg{
   1.382 +	u32 address_lo;
   1.383 +	u32 address_hi;
   1.384 +	u32 data;
   1.385 +};
   1.386  
   1.387  enum {
   1.388  	NvRegIrqStatus = 0x000,
   1.389  #define NVREG_IRQSTAT_MIIEVENT	0x040
   1.390 -#define NVREG_IRQSTAT_MASK		0x1ff
   1.391 +#define NVREG_IRQSTAT_MASK		0x81ff
   1.392  	NvRegIrqMask = 0x004,
   1.393  #define NVREG_IRQ_RX_ERROR		0x0001
   1.394  #define NVREG_IRQ_RX			0x0002
   1.395 @@ -183,23 +500,24 @@ enum {
   1.396  #define NVREG_IRQ_LINK			0x0040
   1.397  #define NVREG_IRQ_RX_FORCED		0x0080
   1.398  #define NVREG_IRQ_TX_FORCED		0x0100
   1.399 +#define NVREG_IRQ_RECOVER_ERROR		0x8000
   1.400  #define NVREG_IRQMASK_THROUGHPUT	0x00df
   1.401 -#define NVREG_IRQMASK_CPU		0x0040
   1.402 +#define NVREG_IRQMASK_CPU		0x0060
   1.403  #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
   1.404  #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
   1.405 -#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
   1.406 +#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
   1.407  
   1.408  #define NVREG_IRQ_UNKNOWN	(~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
   1.409 -					NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
   1.410 -					NVREG_IRQ_TX_FORCED))
   1.411 +			NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
   1.412 +			NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
   1.413  
   1.414  	NvRegUnknownSetupReg6 = 0x008,
   1.415  #define NVREG_UNKSETUP6_VAL		3
   1.416  
   1.417 -/*
   1.418 - * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
   1.419 - * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
   1.420 - */
   1.421 +	/*
   1.422 +	 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
   1.423 +	 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
   1.424 +	 */
   1.425  	NvRegPollingInterval = 0x00c,
   1.426  #define NVREG_POLL_DEFAULT_THROUGHPUT	970
   1.427  #define NVREG_POLL_DEFAULT_CPU	13
   1.428 @@ -212,10 +530,20 @@ enum {
   1.429  #define NVREG_MISC1_HD		0x02
   1.430  #define NVREG_MISC1_FORCE	0x3b0f3c
   1.431  
   1.432 -	NvRegMacReset = 0x3c,
   1.433 +	NvRegMacReset = 0x34,
   1.434  #define NVREG_MAC_RESET_ASSERT	0x0F3
   1.435  	NvRegTransmitterControl = 0x084,
   1.436  #define NVREG_XMITCTL_START	0x01
   1.437 +#define NVREG_XMITCTL_MGMT_ST	0x40000000
   1.438 +#define NVREG_XMITCTL_SYNC_MASK		0x000f0000
   1.439 +#define NVREG_XMITCTL_SYNC_NOT_READY	0x0
   1.440 +#define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
   1.441 +#define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
   1.442 +#define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
   1.443 +#define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
   1.444 +#define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
   1.445 +#define NVREG_XMITCTL_HOST_LOADED	0x00004000
   1.446 +#define NVREG_XMITCTL_TX_PATH_EN	0x01000000
   1.447  	NvRegTransmitterStatus = 0x088,
   1.448  #define NVREG_XMITSTAT_BUSY	0x01
   1.449  
   1.450 @@ -231,6 +559,7 @@ enum {
   1.451  #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
   1.452  	NvRegReceiverControl = 0x094,
   1.453  #define NVREG_RCVCTL_START	0x01
   1.454 +#define NVREG_RCVCTL_RX_PATH_EN	0x01000000
   1.455  	NvRegReceiverStatus = 0x98,
   1.456  #define NVREG_RCVSTAT_BUSY	0x01
   1.457  
   1.458 @@ -241,7 +570,7 @@ enum {
   1.459  #define NVREG_RNDSEED_FORCE3	0x7400
   1.460  
   1.461  	NvRegTxDeferral = 0xA0,
   1.462 -#define NVREG_TX_DEFERRAL_DEFAULT	0x15050f
   1.463 +#define NVREG_TX_DEFERRAL_DEFAULT      	0x15050f
   1.464  #define NVREG_TX_DEFERRAL_RGMII_10_100	0x16070f
   1.465  #define NVREG_TX_DEFERRAL_RGMII_1000	0x14050f
   1.466  	NvRegRxDeferral = 0xA4,
   1.467 @@ -252,7 +581,9 @@ enum {
   1.468  #define NVREG_MCASTADDRA_FORCE	0x01
   1.469  	NvRegMulticastAddrB = 0xB4,
   1.470  	NvRegMulticastMaskA = 0xB8,
   1.471 +#define NVREG_MCASTMASKA_NONE		0xffffffff
   1.472  	NvRegMulticastMaskB = 0xBC,
   1.473 +#define NVREG_MCASTMASKB_NONE		0xffff
   1.474  
   1.475  	NvRegPhyInterface = 0xC0,
   1.476  #define PHY_RGMII		0x10000000
   1.477 @@ -262,7 +593,8 @@ enum {
   1.478  	NvRegRingSizes = 0x108,
   1.479  #define NVREG_RINGSZ_TXSHIFT 0
   1.480  #define NVREG_RINGSZ_RXSHIFT 16
   1.481 -	NvRegUnknownTransmitterReg = 0x10c,
   1.482 +	NvRegTransmitPoll = 0x10c,
   1.483 +#define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
   1.484  	NvRegLinkSpeed = 0x110,
   1.485  #define NVREG_LINKSPEED_FORCE 0x10000
   1.486  #define NVREG_LINKSPEED_10	1000
   1.487 @@ -283,22 +615,24 @@ enum {
   1.488  #define NVREG_TXRXCTL_RESET	0x0010
   1.489  #define NVREG_TXRXCTL_RXCHECK	0x0400
   1.490  #define NVREG_TXRXCTL_DESC_1	0
   1.491 -#define NVREG_TXRXCTL_DESC_2	0x02100
   1.492 -#define NVREG_TXRXCTL_DESC_3	0x02200
   1.493 +#define NVREG_TXRXCTL_DESC_2	0x002100
   1.494 +#define NVREG_TXRXCTL_DESC_3	0xc02200
   1.495  #define NVREG_TXRXCTL_VLANSTRIP 0x00040
   1.496  #define NVREG_TXRXCTL_VLANINS	0x00080
   1.497  	NvRegTxRingPhysAddrHigh = 0x148,
   1.498  	NvRegRxRingPhysAddrHigh = 0x14C,
   1.499  	NvRegTxPauseFrame = 0x170,
   1.500 -#define NVREG_TX_PAUSEFRAME_DISABLE	0x1ff0080
   1.501 -#define NVREG_TX_PAUSEFRAME_ENABLE	0x0c00030
   1.502 +#define NVREG_TX_PAUSEFRAME_DISABLE 	0x01ff0080
   1.503 +#define NVREG_TX_PAUSEFRAME_ENABLE_V1 	0x01800010
   1.504 +#define NVREG_TX_PAUSEFRAME_ENABLE_V2 	0x056003f0
   1.505 +#define NVREG_TX_PAUSEFRAME_ENABLE_V3 	0x09f00880
   1.506  	NvRegMIIStatus = 0x180,
   1.507  #define NVREG_MIISTAT_ERROR		0x0001
   1.508  #define NVREG_MIISTAT_LINKCHANGE	0x0008
   1.509 -#define NVREG_MIISTAT_MASK		0x000f
   1.510 -#define NVREG_MIISTAT_MASK2		0x000f
   1.511 -	NvRegUnknownSetupReg4 = 0x184,
   1.512 -#define NVREG_UNKSETUP4_VAL	8
   1.513 +#define NVREG_MIISTAT_MASK_RW		0x0007
   1.514 +#define NVREG_MIISTAT_MASK_ALL		0x000f
   1.515 +	NvRegMIIMask = 0x184,
   1.516 +#define NVREG_MII_LINKCHANGE		0x0008
   1.517  
   1.518  	NvRegAdapterControl = 0x188,
   1.519  #define NVREG_ADAPTCTL_START	0x02
   1.520 @@ -328,6 +662,7 @@ enum {
   1.521  #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
   1.522  
   1.523  	NvRegPatternCRC = 0x204,
   1.524 +#define NV_UNKNOWN_VAL  0x01
   1.525  	NvRegPatternMask = 0x208,
   1.526  	NvRegPowerCap = 0x268,
   1.527  #define NVREG_POWERCAP_D3SUPP	(1<<30)
   1.528 @@ -368,6 +703,7 @@ enum {
   1.529  	NvRegTxPause = 0x2e0,
   1.530  	NvRegRxPause = 0x2e4,
   1.531  	NvRegRxDropFrame = 0x2e8,
   1.532 +
   1.533  	NvRegVlanControl = 0x300,
   1.534  #define NVREG_VLANCONTROL_ENABLE	0x2000
   1.535  	NvRegMSIXMap0 = 0x3e0,
   1.536 @@ -409,7 +745,7 @@ typedef union _ring_type {
   1.537  #define NV_TX_CARRIERLOST	(1<<27)
   1.538  #define NV_TX_LATECOLLISION	(1<<28)
   1.539  #define NV_TX_UNDERFLOW		(1<<29)
   1.540 -#define NV_TX_ERROR		(1<<30)
   1.541 +#define NV_TX_ERROR		(1<<30) /* logical OR of all errors */
   1.542  #define NV_TX_VALID		(1<<31)
   1.543  
   1.544  #define NV_TX2_LASTPACKET	(1<<29)
   1.545 @@ -420,7 +756,7 @@ typedef union _ring_type {
   1.546  #define NV_TX2_LATECOLLISION	(1<<27)
   1.547  #define NV_TX2_UNDERFLOW	(1<<28)
   1.548  /* error and valid are the same for both */
   1.549 -#define NV_TX2_ERROR		(1<<30)
   1.550 +#define NV_TX2_ERROR		(1<<30) /* logical OR of all errors */
   1.551  #define NV_TX2_VALID		(1<<31)
   1.552  #define NV_TX2_TSO		(1<<28)
   1.553  #define NV_TX2_TSO_SHIFT	14
   1.554 @@ -441,13 +777,13 @@ typedef union _ring_type {
   1.555  #define NV_RX_CRCERR		(1<<27)
   1.556  #define NV_RX_OVERFLOW		(1<<28)
   1.557  #define NV_RX_FRAMINGERR	(1<<29)
   1.558 -#define NV_RX_ERROR		(1<<30)
   1.559 +#define NV_RX_ERROR		(1<<30) /* logical OR of all errors */
   1.560  #define NV_RX_AVAIL		(1<<31)
   1.561  
   1.562  #define NV_RX2_CHECKSUMMASK	(0x1C000000)
   1.563 -#define NV_RX2_CHECKSUMOK1	(0x10000000)
   1.564 -#define NV_RX2_CHECKSUMOK2	(0x14000000)
   1.565 -#define NV_RX2_CHECKSUMOK3	(0x18000000)
   1.566 +#define NV_RX2_CHECKSUM_IP	(0x10000000)
   1.567 +#define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
   1.568 +#define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
   1.569  #define NV_RX2_DESCRIPTORVALID	(1<<29)
   1.570  #define NV_RX2_SUBSTRACT1	(1<<25)
   1.571  #define NV_RX2_ERROR1		(1<<18)
   1.572 @@ -458,7 +794,7 @@ typedef union _ring_type {
   1.573  #define NV_RX2_OVERFLOW		(1<<23)
   1.574  #define NV_RX2_FRAMINGERR	(1<<24)
   1.575  /* error and avail are the same for both */
   1.576 -#define NV_RX2_ERROR		(1<<30)
   1.577 +#define NV_RX2_ERROR		(1<<30) /* logical OR of all errors */
   1.578  #define NV_RX2_AVAIL		(1<<31)
   1.579  
   1.580  #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
   1.581 @@ -466,7 +802,8 @@ typedef union _ring_type {
   1.582  
   1.583  /* Miscelaneous hardware related defines: */
   1.584  #define NV_PCI_REGSZ_VER1      	0x270
   1.585 -#define NV_PCI_REGSZ_VER2      	0x604
   1.586 +#define NV_PCI_REGSZ_VER2      	0x2d4
   1.587 +#define NV_PCI_REGSZ_VER3      	0x604
   1.588  
   1.589  /* various timeout delays: all in usec */
   1.590  #define NV_TXRX_RESET_DELAY	4
   1.591 @@ -492,12 +829,12 @@ typedef union _ring_type {
   1.592  #define NV_WATCHDOG_TIMEO	(5*HZ)
   1.593  
   1.594  #define RX_RING_DEFAULT		128
   1.595 -#define TX_RING_DEFAULT		256
   1.596 -#define RX_RING_MIN		128
   1.597 -#define TX_RING_MIN		64
   1.598 +#define TX_RING_DEFAULT		64
   1.599 +#define RX_RING_MIN		RX_RING_DEFAULT
   1.600 +#define TX_RING_MIN		TX_RING_DEFAULT
   1.601  #define RING_MAX_DESC_VER_1	1024
   1.602  #define RING_MAX_DESC_VER_2_3	16384
   1.603 -/*
   1.604 +/* 
   1.605   * Difference between the get and put pointers for the tx ring.
   1.606   * This is used to throttle the amount of data outstanding in the
   1.607   * tx ring.
   1.608 @@ -518,7 +855,7 @@ typedef union _ring_type {
   1.609  #define LINK_TIMEOUT	(3*HZ)
   1.610  #define STATS_INTERVAL	(10*HZ)
   1.611  
   1.612 -/*
   1.613 +/* 
   1.614   * desc_ver values:
   1.615   * The nic supports three different descriptor types:
   1.616   * - DESC_VER_1: Original
   1.617 @@ -532,16 +869,46 @@ typedef union _ring_type {
   1.618  /* PHY defines */
   1.619  #define PHY_OUI_MARVELL	0x5043
   1.620  #define PHY_OUI_CICADA	0x03f1
   1.621 +#define PHY_OUI_VITESSE	0x01c1
   1.622 +#define PHY_OUI_REALTEK	0x0732
   1.623  #define PHYID1_OUI_MASK	0x03ff
   1.624  #define PHYID1_OUI_SHFT	6
   1.625  #define PHYID2_OUI_MASK	0xfc00
   1.626  #define PHYID2_OUI_SHFT	10
   1.627 -#define PHY_INIT1	0x0f000
   1.628 -#define PHY_INIT2	0x0e00
   1.629 -#define PHY_INIT3	0x01000
   1.630 -#define PHY_INIT4	0x0200
   1.631 -#define PHY_INIT5	0x0004
   1.632 -#define PHY_INIT6	0x02000
   1.633 +#define PHYID2_MODEL_MASK		0x03f0
   1.634 +#define PHY_MODEL_MARVELL_E3016		0x220
   1.635 +#define PHY_MODEL_MARVELL_E1011		0xb0
   1.636 +#define PHY_MARVELL_E3016_INITMASK	0x0300
   1.637 +#define PHY_CICADA_INIT1	0x0f000
   1.638 +#define PHY_CICADA_INIT2	0x0e00
   1.639 +#define PHY_CICADA_INIT3	0x01000
   1.640 +#define PHY_CICADA_INIT4	0x0200
   1.641 +#define PHY_CICADA_INIT5	0x0004
   1.642 +#define PHY_CICADA_INIT6	0x02000
   1.643 +#define PHY_VITESSE_INIT_REG1	0x1f
   1.644 +#define PHY_VITESSE_INIT_REG2	0x10
   1.645 +#define PHY_VITESSE_INIT_REG3	0x11
   1.646 +#define PHY_VITESSE_INIT_REG4	0x12
   1.647 +#define PHY_VITESSE_INIT_MSK1	0xc
   1.648 +#define PHY_VITESSE_INIT_MSK2	0x0180
   1.649 +#define PHY_VITESSE_INIT1	0x52b5
   1.650 +#define PHY_VITESSE_INIT2	0xaf8a
   1.651 +#define PHY_VITESSE_INIT3	0x8
   1.652 +#define PHY_VITESSE_INIT4	0x8f8a
   1.653 +#define PHY_VITESSE_INIT5	0xaf86
   1.654 +#define PHY_VITESSE_INIT6	0x8f86
   1.655 +#define PHY_VITESSE_INIT7	0xaf82
   1.656 +#define PHY_VITESSE_INIT8	0x0100
   1.657 +#define PHY_VITESSE_INIT9	0x8f82
   1.658 +#define PHY_VITESSE_INIT10	0x0
   1.659 +#define PHY_REALTEK_INIT_REG1	0x1f
   1.660 +#define PHY_REALTEK_INIT_REG2	0x19
   1.661 +#define PHY_REALTEK_INIT_REG3	0x13
   1.662 +#define PHY_REALTEK_INIT1	0x0000
   1.663 +#define PHY_REALTEK_INIT2	0x8e00
   1.664 +#define PHY_REALTEK_INIT3 	0x0001
   1.665 +#define PHY_REALTEK_INIT4	0xad17
   1.666 +
   1.667  #define PHY_GIGABIT	0x0100
   1.668  
   1.669  #define PHY_TIMEOUT	0x1
   1.670 @@ -572,74 +939,97 @@ typedef union _ring_type {
   1.671  #define NV_MSI_X_VECTOR_TX    0x1
   1.672  #define NV_MSI_X_VECTOR_OTHER 0x2
   1.673  
   1.674 -/* statistics */
   1.675 +#define NV_RESTART_TX	      0x1
   1.676 +#define NV_RESTART_RX	      0x2
   1.677 +#define NVLAN_DISABLE_ALL_FEATURES  do { \
   1.678 +	msi = NV_MSI_INT_DISABLED; \
   1.679 +	msix = NV_MSIX_INT_DISABLED; \
   1.680 +	scatter_gather = NV_SCATTER_GATHER_DISABLED; \
   1.681 +	tso_offload = NV_TSO_DISABLED; \
   1.682 +	tx_checksum_offload = NV_TX_CHECKSUM_DISABLED; \
   1.683 +	rx_checksum_offload = NV_RX_CHECKSUM_DISABLED; \
   1.684 +	tx_flow_control = NV_TX_FLOW_CONTROL_DISABLED; \
   1.685 +	rx_flow_control = NV_RX_FLOW_CONTROL_DISABLED; \
   1.686 +	wol = NV_WOL_DISABLED; \
   1.687 +	tagging_8021pq = NV_8021PQ_DISABLED; \
   1.688 +} while (0)
   1.689 +
   1.690  struct nv_ethtool_str {
   1.691  	char name[ETH_GSTRING_LEN];
   1.692  };
   1.693  
   1.694  static const struct nv_ethtool_str nv_estats_str[] = {
   1.695 +	{ "tx_dropped" },
   1.696 +	{ "tx_fifo_errors" },
   1.697 +	{ "tx_carrier_errors" },
   1.698 +	{ "tx_packets" },
   1.699  	{ "tx_bytes" },
   1.700 +	{ "rx_crc_errors" },
   1.701 +	{ "rx_over_errors" },
   1.702 +	{ "rx_errors_total" },
   1.703 +	{ "rx_packets" },
   1.704 +	{ "rx_bytes" },
   1.705 +
   1.706 +	/* hardware counters */
   1.707  	{ "tx_zero_rexmt" },
   1.708  	{ "tx_one_rexmt" },
   1.709  	{ "tx_many_rexmt" },
   1.710  	{ "tx_late_collision" },
   1.711 -	{ "tx_fifo_errors" },
   1.712 -	{ "tx_carrier_errors" },
   1.713  	{ "tx_excess_deferral" },
   1.714  	{ "tx_retry_error" },
   1.715 -	{ "tx_deferral" },
   1.716 -	{ "tx_packets" },
   1.717 -	{ "tx_pause" },
   1.718  	{ "rx_frame_error" },
   1.719  	{ "rx_extra_byte" },
   1.720  	{ "rx_late_collision" },
   1.721  	{ "rx_runt" },
   1.722  	{ "rx_frame_too_long" },
   1.723 -	{ "rx_over_errors" },
   1.724 -	{ "rx_crc_errors" },
   1.725  	{ "rx_frame_align_error" },
   1.726  	{ "rx_length_error" },
   1.727  	{ "rx_unicast" },
   1.728  	{ "rx_multicast" },
   1.729  	{ "rx_broadcast" },
   1.730 -	{ "rx_bytes" },
   1.731 +	{ "tx_deferral" },
   1.732 +	{ "tx_pause" },
   1.733  	{ "rx_pause" },
   1.734 -	{ "rx_drop_frame" },
   1.735 -	{ "rx_packets" },
   1.736 -	{ "rx_errors_total" }
   1.737 +	{ "rx_drop_frame" }
   1.738  };
   1.739  
   1.740  struct nv_ethtool_stats {
   1.741 +	u64 tx_dropped;
   1.742 +	u64 tx_fifo_errors;
   1.743 +	u64 tx_carrier_errors;
   1.744 +	u64 tx_packets;
   1.745  	u64 tx_bytes;
   1.746 +	u64 rx_crc_errors;
   1.747 +	u64 rx_over_errors;
   1.748 +	u64 rx_errors_total;
   1.749 +	u64 rx_packets;
   1.750 +	u64 rx_bytes;
   1.751 +
   1.752 +	/* hardware counters */
   1.753  	u64 tx_zero_rexmt;
   1.754  	u64 tx_one_rexmt;
   1.755  	u64 tx_many_rexmt;
   1.756  	u64 tx_late_collision;
   1.757 -	u64 tx_fifo_errors;
   1.758 -	u64 tx_carrier_errors;
   1.759  	u64 tx_excess_deferral;
   1.760  	u64 tx_retry_error;
   1.761 -	u64 tx_deferral;
   1.762 -	u64 tx_packets;
   1.763 -	u64 tx_pause;
   1.764  	u64 rx_frame_error;
   1.765  	u64 rx_extra_byte;
   1.766  	u64 rx_late_collision;
   1.767  	u64 rx_runt;
   1.768  	u64 rx_frame_too_long;
   1.769 -	u64 rx_over_errors;
   1.770 -	u64 rx_crc_errors;
   1.771  	u64 rx_frame_align_error;
   1.772  	u64 rx_length_error;
   1.773  	u64 rx_unicast;
   1.774  	u64 rx_multicast;
   1.775  	u64 rx_broadcast;
   1.776 -	u64 rx_bytes;
   1.777 +	u64 tx_deferral;
   1.778 +	u64 tx_pause;
   1.779  	u64 rx_pause;
   1.780  	u64 rx_drop_frame;
   1.781 -	u64 rx_packets;
   1.782 -	u64 rx_errors_total;
   1.783  };
   1.784 +#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
   1.785 +#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 4)
   1.786 +#define NV_DEV_STATISTICS_SW_COUNT 10
   1.787  
   1.788  /* diagnostics */
   1.789  #define NV_TEST_COUNT_BASE 3
   1.790 @@ -667,20 +1057,63 @@ static const struct register_test nv_reg
   1.791  	{ 0,0 }
   1.792  };
   1.793  
   1.794 +struct nv_skb_map {
   1.795 +	struct sk_buff *skb;
   1.796 +	dma_addr_t dma;
   1.797 +	unsigned int dma_len;
   1.798 +};
   1.799 +
   1.800  /*
   1.801   * SMP locking:
   1.802   * All hardware access under dev->priv->lock, except the performance
   1.803   * critical parts:
   1.804   * - rx is (pseudo-) lockless: it relies on the single-threading provided
   1.805   *	by the arch code for interrupts.
   1.806 - * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
   1.807 + * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
   1.808   *	needs dev->priv->lock :-(
   1.809 - * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
   1.810 + * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
   1.811   */
   1.812  
   1.813  /* in dev: base, irq */
   1.814  struct fe_priv {
   1.815 +
   1.816 +	/* fields used in fast path are grouped together 
   1.817 +	   for better cache performance
   1.818 +	 */
   1.819  	spinlock_t lock;
   1.820 +	spinlock_t timer_lock;
   1.821 +	void __iomem *base;
   1.822 +	struct pci_dev *pci_dev;
   1.823 +	u32 txrxctl_bits;
   1.824 +	int stop_tx;
   1.825 +	int need_linktimer;
   1.826 +	unsigned long link_timeout;
   1.827 +	u32 irqmask;
   1.828 +	u32 msi_flags;
   1.829 +
   1.830 +	unsigned int rx_buf_sz;
   1.831 +	struct vlan_group *vlangrp;
   1.832 +	int tx_ring_size;
   1.833 +	int rx_csum;
   1.834 +
   1.835 +	/*
   1.836 +	 * rx specific fields in fast path
   1.837 +	 */
   1.838 +	ring_type get_rx __attribute__((aligned(L1_CACHE_BYTES)));
   1.839 +	ring_type put_rx, first_rx, last_rx;
   1.840 +	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
   1.841 +	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
   1.842 +
   1.843 +	/*
   1.844 +	 * tx specific fields in fast path
   1.845 +	 */
   1.846 +	ring_type get_tx __attribute__((aligned(L1_CACHE_BYTES)));
   1.847 +	ring_type put_tx, first_tx, last_tx;
   1.848 +	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
   1.849 +	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
   1.850 +
   1.851 +	struct nv_skb_map *rx_skb;
   1.852 +	struct nv_skb_map *tx_skb;
   1.853  
   1.854  	/* General data:
   1.855  	 * Locking: spin_lock(&np->lock); */
   1.856 @@ -689,69 +1122,60 @@ struct fe_priv {
   1.857  	int in_shutdown;
   1.858  	u32 linkspeed;
   1.859  	int duplex;
   1.860 +	int speed_duplex;
   1.861  	int autoneg;
   1.862  	int fixed_mode;
   1.863  	int phyaddr;
   1.864  	int wolenabled;
   1.865  	unsigned int phy_oui;
   1.866 +	unsigned int phy_model;
   1.867  	u16 gigabit;
   1.868  	int intr_test;
   1.869 +	int recover_error;
   1.870  
   1.871  	/* General data: RO fields */
   1.872  	dma_addr_t ring_addr;
   1.873 -	struct pci_dev *pci_dev;
   1.874  	u32 orig_mac[2];
   1.875 -	u32 irqmask;
   1.876  	u32 desc_ver;
   1.877 -	u32 txrxctl_bits;
   1.878  	u32 vlanctl_bits;
   1.879  	u32 driver_data;
   1.880  	u32 register_size;
   1.881 -
   1.882 -	void __iomem *base;
   1.883 +	u32 mac_in_use;
   1.884  
   1.885  	/* rx specific fields.
   1.886  	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
   1.887  	 */
   1.888  	ring_type rx_ring;
   1.889 -	unsigned int cur_rx, refill_rx;
   1.890 -	struct sk_buff **rx_skbuff;
   1.891 -	dma_addr_t *rx_dma;
   1.892 -	unsigned int rx_buf_sz;
   1.893  	unsigned int pkt_limit;
   1.894  	struct timer_list oom_kick;
   1.895  	struct timer_list nic_poll;
   1.896  	struct timer_list stats_poll;
   1.897  	u32 nic_poll_irq;
   1.898  	int rx_ring_size;
   1.899 -
   1.900 -	/* media detection workaround.
   1.901 -	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
   1.902 -	 */
   1.903 -	int need_linktimer;
   1.904 -	unsigned long link_timeout;
   1.905 +	u32 rx_len_errors;
   1.906  	/*
   1.907  	 * tx specific fields.
   1.908  	 */
   1.909  	ring_type tx_ring;
   1.910 -	unsigned int next_tx, nic_tx;
   1.911 -	struct sk_buff **tx_skbuff;
   1.912 -	dma_addr_t *tx_dma;
   1.913 -	unsigned int *tx_dma_len;
   1.914  	u32 tx_flags;
   1.915 -	int tx_ring_size;
   1.916  	int tx_limit_start;
   1.917  	int tx_limit_stop;
   1.918  
   1.919 -	/* vlan fields */
   1.920 -	struct vlan_group *vlangrp;
   1.921  
   1.922  	/* msi/msi-x fields */
   1.923 -	u32 msi_flags;
   1.924  	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
   1.925  
   1.926  	/* flow control */
   1.927  	u32 pause_flags;
   1.928 +	u32 led_stats[3];
   1.929 +	u32 saved_config_space[64];
   1.930 +	u32 saved_nvregphyinterface;
   1.931 +#if NVVER < SUSE10
   1.932 +	u32 pci_state[16];
   1.933 +#endif
   1.934 +	/* msix table */
   1.935 +	struct nvmsi_msg nvmsg[NV_MSI_X_MAX_VECTORS];
   1.936 +	unsigned long msix_pa_addr;
   1.937  };
   1.938  
   1.939  /*
   1.940 @@ -762,12 +1186,12 @@ static int max_interrupt_work = 5;
   1.941  
   1.942  /*
   1.943   * Optimization can be either throuput mode or cpu mode
   1.944 - *
   1.945 + * 
   1.946   * Throughput Mode: Every tx and rx packet will generate an interrupt.
   1.947   * CPU Mode: Interrupts are controlled by a timer.
   1.948   */
   1.949  enum {
   1.950 -	NV_OPTIMIZATION_MODE_THROUGHPUT,
   1.951 +	NV_OPTIMIZATION_MODE_THROUGHPUT, 
   1.952  	NV_OPTIMIZATION_MODE_CPU
   1.953  };
   1.954  static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
   1.955 @@ -788,16 +1212,112 @@ enum {
   1.956  	NV_MSI_INT_DISABLED,
   1.957  	NV_MSI_INT_ENABLED
   1.958  };
   1.959 +
   1.960 +#ifdef CONFIG_PCI_MSI 
   1.961  static int msi = NV_MSI_INT_ENABLED;
   1.962 +#else
   1.963 +static int msi = NV_MSI_INT_DISABLED;
   1.964 +#endif
   1.965  
   1.966  /*
   1.967   * MSIX interrupts
   1.968   */
   1.969  enum {
   1.970 -	NV_MSIX_INT_DISABLED,
   1.971 +	NV_MSIX_INT_DISABLED, 
   1.972  	NV_MSIX_INT_ENABLED
   1.973  };
   1.974 +
   1.975 +#ifdef CONFIG_PCI_MSI 
   1.976  static int msix = NV_MSIX_INT_ENABLED;
   1.977 +#else
   1.978 +static int msix = NV_MSIX_INT_DISABLED;
   1.979 +#endif
   1.980 +/*
   1.981 + * PHY Speed and Duplex
   1.982 + */
   1.983 +enum {
   1.984 +	NV_SPEED_DUPLEX_AUTO,
   1.985 +	NV_SPEED_DUPLEX_10_HALF_DUPLEX,
   1.986 +	NV_SPEED_DUPLEX_10_FULL_DUPLEX,
   1.987 +	NV_SPEED_DUPLEX_100_HALF_DUPLEX,
   1.988 +	NV_SPEED_DUPLEX_100_FULL_DUPLEX,
   1.989 +	NV_SPEED_DUPLEX_1000_FULL_DUPLEX
   1.990 +};
   1.991 +static int speed_duplex = NV_SPEED_DUPLEX_AUTO;
   1.992 +
   1.993 +/*
   1.994 + * PHY autonegotiation
   1.995 + */
   1.996 +static int autoneg = AUTONEG_ENABLE;
   1.997 +
   1.998 +/*
   1.999 + * Scatter gather
  1.1000 + */
  1.1001 +enum {
  1.1002 +	NV_SCATTER_GATHER_DISABLED,
  1.1003 +	NV_SCATTER_GATHER_ENABLED
  1.1004 +};
  1.1005 +static int scatter_gather = NV_SCATTER_GATHER_ENABLED;
  1.1006 +
  1.1007 +/*
  1.1008 + * TCP Segmentation Offload (TSO)
  1.1009 + */
  1.1010 +enum {
  1.1011 +	NV_TSO_DISABLED,
  1.1012 +	NV_TSO_ENABLED
  1.1013 +};
  1.1014 +static int tso_offload = NV_TSO_ENABLED;
  1.1015 +
  1.1016 +/*
  1.1017 + * MTU settings
  1.1018 + */
  1.1019 +static int mtu = ETH_DATA_LEN;
  1.1020 +
  1.1021 +/*
  1.1022 + * Tx checksum offload
  1.1023 + */
  1.1024 +enum {
  1.1025 +	NV_TX_CHECKSUM_DISABLED, 
  1.1026 +	NV_TX_CHECKSUM_ENABLED 
  1.1027 +};
  1.1028 +static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED;
  1.1029 +
  1.1030 +/*
  1.1031 + * Rx checksum offload
  1.1032 + */
  1.1033 +enum {
  1.1034 +	NV_RX_CHECKSUM_DISABLED, 
  1.1035 +	NV_RX_CHECKSUM_ENABLED 
  1.1036 +};
  1.1037 +static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED;
  1.1038 +
  1.1039 +/*
  1.1040 + * Tx ring size
  1.1041 + */
  1.1042 +static int tx_ring_size = TX_RING_DEFAULT;
  1.1043 +
  1.1044 +/*
  1.1045 + * Rx ring size
  1.1046 + */
  1.1047 +static int rx_ring_size = RX_RING_DEFAULT;
  1.1048 +
  1.1049 +/*
  1.1050 + * Tx flow control
  1.1051 + */
  1.1052 +enum {
  1.1053 +	NV_TX_FLOW_CONTROL_DISABLED, 
  1.1054 +	NV_TX_FLOW_CONTROL_ENABLED
  1.1055 +};
  1.1056 +static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED;
  1.1057 +
  1.1058 +/*
  1.1059 + * Rx flow control
  1.1060 + */
  1.1061 +enum {
  1.1062 +	NV_RX_FLOW_CONTROL_DISABLED, 
  1.1063 +	NV_RX_FLOW_CONTROL_ENABLED
  1.1064 +};
  1.1065 +static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED;
  1.1066  
  1.1067  /*
  1.1068   * DMA 64bit
  1.1069 @@ -808,14 +1328,98 @@ enum {
  1.1070  };
  1.1071  static int dma_64bit = NV_DMA_64BIT_ENABLED;
  1.1072  
  1.1073 +/*
  1.1074 + * Wake On Lan
  1.1075 + */
  1.1076 +enum {
  1.1077 +	NV_WOL_DISABLED,
  1.1078 +	NV_WOL_ENABLED
  1.1079 +};
  1.1080 +static int wol = NV_WOL_DISABLED;
  1.1081 +
  1.1082 +/*
  1.1083 + * Tagging 802.1pq
  1.1084 + */
  1.1085 +enum {
  1.1086 +	NV_8021PQ_DISABLED,
  1.1087 +	NV_8021PQ_ENABLED
  1.1088 +};
  1.1089 +static int tagging_8021pq = NV_8021PQ_ENABLED;
  1.1090 +
  1.1091 +enum {
  1.1092 +	NV_LOW_POWER_DISABLED,
  1.1093 +	NV_LOW_POWER_ENABLED
  1.1094 +};
  1.1095 +static int lowpowerspeed = NV_LOW_POWER_ENABLED;
  1.1096 +
  1.1097 +static int debug = 0;
  1.1098 +
  1.1099 +#if NVVER < RHES4
  1.1100 +static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
  1.1101 +{
  1.1102 +#if HZ <= 1000 && !(1000 % HZ)
  1.1103 +	return (m + (1000 / HZ) - 1) / (1000 / HZ);
  1.1104 +#elif HZ > 1000 && !(HZ % 1000)
  1.1105 +	return m * (HZ / 1000);
  1.1106 +#else
  1.1107 +	return (m * HZ + 999) / 1000;
  1.1108 +#endif
  1.1109 +}
  1.1110 +#endif
  1.1111 +
  1.1112 +static void nv_msleep(unsigned int msecs)
  1.1113 +{
  1.1114 +#if NVVER > SLES9 
  1.1115 +	msleep(msecs);
  1.1116 +#else
  1.1117 +	unsigned long timeout = nv_msecs_to_jiffies(msecs);
  1.1118 +
  1.1119 +	while (timeout) {
  1.1120 +		set_current_state(TASK_UNINTERRUPTIBLE);
  1.1121 +		timeout = schedule_timeout(timeout);
  1.1122 +	}
  1.1123 +#endif
  1.1124 +}
  1.1125 +
  1.1126  static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  1.1127  {
  1.1128 +#if NVVER > RHES3 
  1.1129  	return netdev_priv(dev);
  1.1130 +#else
  1.1131 +	return (struct fe_priv *) dev->priv;
  1.1132 +#endif
  1.1133 +}
  1.1134 +
  1.1135 +static void __init quirk_nforce_network_class(struct pci_dev *pdev)
  1.1136 +{
  1.1137 +	/* Some implementations of the nVidia network controllers
  1.1138 +	 * show up as bridges, when we need to see them as network
  1.1139 +	 * devices.
  1.1140 +	 */
  1.1141 +
  1.1142 +	/* If this is already known as a network ctlr, do nothing. */
  1.1143 +	if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET)
  1.1144 +		return;
  1.1145 +
  1.1146 +	if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) {
  1.1147 +		char    c;
  1.1148 +
  1.1149 +		/* Clearing bit 6 of the register at 0xf8
  1.1150 +		 * selects Ethernet device class
  1.1151 +		 */
  1.1152 +		pci_read_config_byte(pdev, 0xf8, &c);
  1.1153 +		c &= 0xbf;
  1.1154 +		pci_write_config_byte(pdev, 0xf8, c);
  1.1155 +
  1.1156 +		/* sysfs needs pdev->class to be set correctly */
  1.1157 +		pdev->class &= 0x0000ff;
  1.1158 +		pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8);
  1.1159 +	}
  1.1160  }
  1.1161  
  1.1162  static inline u8 __iomem *get_hwbase(struct net_device *dev)
  1.1163  {
  1.1164 -	return ((struct fe_priv *)netdev_priv(dev))->base;
  1.1165 +	return ((struct fe_priv *)get_nvpriv(dev))->base;
  1.1166  }
  1.1167  
  1.1168  static inline void pci_push(u8 __iomem *base)
  1.1169 @@ -836,7 +1440,7 @@ static inline u32 nv_descr_getlength_ex(
  1.1170  }
  1.1171  
  1.1172  static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  1.1173 -				int delay, int delaymax, const char *msg)
  1.1174 +		int delay, int delaymax, const char *msg)
  1.1175  {
  1.1176  	u8 __iomem *base = get_hwbase(dev);
  1.1177  
  1.1178 @@ -887,22 +1491,16 @@ static void free_rings(struct net_device
  1.1179  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.1180  		if(np->rx_ring.orig)
  1.1181  			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  1.1182 -					    np->rx_ring.orig, np->ring_addr);
  1.1183 +					np->rx_ring.orig, np->ring_addr);
  1.1184  	} else {
  1.1185  		if (np->rx_ring.ex)
  1.1186  			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  1.1187 -					    np->rx_ring.ex, np->ring_addr);
  1.1188 -	}
  1.1189 -	if (np->rx_skbuff)
  1.1190 -		kfree(np->rx_skbuff);
  1.1191 -	if (np->rx_dma)
  1.1192 -		kfree(np->rx_dma);
  1.1193 -	if (np->tx_skbuff)
  1.1194 -		kfree(np->tx_skbuff);
  1.1195 -	if (np->tx_dma)
  1.1196 -		kfree(np->tx_dma);
  1.1197 -	if (np->tx_dma_len)
  1.1198 -		kfree(np->tx_dma_len);
  1.1199 +					np->rx_ring.ex, np->ring_addr);
  1.1200 +	}
  1.1201 +	if (np->rx_skb)
  1.1202 +		kfree(np->rx_skb);
  1.1203 +	if (np->tx_skb)
  1.1204 +		kfree(np->tx_skb);	
  1.1205  }
  1.1206  
  1.1207  static int using_multi_irqs(struct net_device *dev)
  1.1208 @@ -910,8 +1508,8 @@ static int using_multi_irqs(struct net_d
  1.1209  	struct fe_priv *np = get_nvpriv(dev);
  1.1210  
  1.1211  	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  1.1212 -	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
  1.1213 -	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  1.1214 +			((np->msi_flags & NV_MSI_X_ENABLED) && 
  1.1215 +			 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  1.1216  		return 0;
  1.1217  	else
  1.1218  		return 1;
  1.1219 @@ -921,11 +1519,13 @@ static void nv_enable_irq(struct net_dev
  1.1220  {
  1.1221  	struct fe_priv *np = get_nvpriv(dev);
  1.1222  
  1.1223 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1224 +	/* modify network device class id */	
  1.1225  	if (!using_multi_irqs(dev)) {
  1.1226  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.1227  			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.1228  		else
  1.1229 -			enable_irq(dev->irq);
  1.1230 +			enable_irq(np->pci_dev->irq);
  1.1231  	} else {
  1.1232  		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.1233  		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.1234 @@ -937,11 +1537,12 @@ static void nv_disable_irq(struct net_de
  1.1235  {
  1.1236  	struct fe_priv *np = get_nvpriv(dev);
  1.1237  
  1.1238 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1239  	if (!using_multi_irqs(dev)) {
  1.1240  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.1241  			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.1242  		else
  1.1243 -			disable_irq(dev->irq);
  1.1244 +			disable_irq(np->pci_dev->irq);
  1.1245  	} else {
  1.1246  		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.1247  		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.1248 @@ -953,8 +1554,11 @@ static void nv_disable_irq(struct net_de
  1.1249  static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  1.1250  {
  1.1251  	u8 __iomem *base = get_hwbase(dev);
  1.1252 +	struct fe_priv *np = get_nvpriv(dev);
  1.1253  
  1.1254  	writel(mask, base + NvRegIrqMask);
  1.1255 +	if (np->msi_flags & NV_MSI_ENABLED)
  1.1256 +		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  1.1257  }
  1.1258  
  1.1259  static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  1.1260 @@ -982,7 +1586,7 @@ static int mii_rw(struct net_device *dev
  1.1261  	u32 reg;
  1.1262  	int retval;
  1.1263  
  1.1264 -	writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  1.1265 +	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
  1.1266  
  1.1267  	reg = readl(base + NvRegMIIControl);
  1.1268  	if (reg & NVREG_MIICTL_INUSE) {
  1.1269 @@ -998,7 +1602,7 @@ static int mii_rw(struct net_device *dev
  1.1270  	writel(reg, base + NvRegMIIControl);
  1.1271  
  1.1272  	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  1.1273 -			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  1.1274 +				NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  1.1275  		dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
  1.1276  				dev->name, miireg, addr);
  1.1277  		retval = -1;
  1.1278 @@ -1020,29 +1624,111 @@ static int mii_rw(struct net_device *dev
  1.1279  	return retval;
  1.1280  }
  1.1281  
  1.1282 -static int phy_reset(struct net_device *dev)
  1.1283 +static void nv_save_LED_stats(struct net_device *dev)
  1.1284  {
  1.1285 -	struct fe_priv *np = netdev_priv(dev);
  1.1286 +	struct fe_priv *np = get_nvpriv(dev);
  1.1287 +	u32 reg=0;
  1.1288 +	u32 value=0;
  1.1289 +	int i=0;
  1.1290 +
  1.1291 +	reg = Mv_Page_Address;
  1.1292 +	value = 3;
  1.1293 +	mii_rw(dev,np->phyaddr,reg,value);
  1.1294 +	udelay(5);
  1.1295 +
  1.1296 +	reg = Mv_LED_Control;
  1.1297 +	for(i=0;i<3;i++){
  1.1298 +		np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ);	
  1.1299 +		dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
  1.1300 +	}
  1.1301 +
  1.1302 +}
  1.1303 +
  1.1304 +static void nv_restore_LED_stats(struct net_device *dev)
  1.1305 +{
  1.1306 +
  1.1307 +	struct fe_priv *np = get_nvpriv(dev);
  1.1308 +	u32 reg=0;
  1.1309 +	u32 value=0;
  1.1310 +	int i=0;
  1.1311 +
  1.1312 +	reg = Mv_Page_Address;
  1.1313 +	value = 3;
  1.1314 +	mii_rw(dev,np->phyaddr,reg,value);
  1.1315 +	udelay(5);
  1.1316 +
  1.1317 +	reg = Mv_LED_Control;
  1.1318 +	for(i=0;i<3;i++){
  1.1319 +		mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]);	
  1.1320 +		udelay(1);
  1.1321 +		dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
  1.1322 +	}
  1.1323 +
  1.1324 +}
  1.1325 +
  1.1326 +static void nv_LED_on(struct net_device *dev)
  1.1327 +{
  1.1328 +	struct fe_priv *np = get_nvpriv(dev);
  1.1329 +	u32 reg=0;
  1.1330 +	u32 value=0;
  1.1331 +
  1.1332 +	reg = Mv_Page_Address;
  1.1333 +	value = 3;
  1.1334 +	mii_rw(dev,np->phyaddr,reg,value);
  1.1335 +	udelay(5);
  1.1336 +
  1.1337 +	reg = Mv_LED_Control;
  1.1338 +	mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3);	
  1.1339 +
  1.1340 +}
  1.1341 +
  1.1342 +static void nv_LED_off(struct net_device *dev)
  1.1343 +{
  1.1344 +	struct fe_priv *np = get_nvpriv(dev);
  1.1345 +	u32 reg=0;
  1.1346 +	u32 value=0;
  1.1347 +
  1.1348 +	reg = Mv_Page_Address;
  1.1349 +	value = 3;
  1.1350 +	mii_rw(dev,np->phyaddr,reg,value);
  1.1351 +	udelay(5);
  1.1352 +
  1.1353 +	reg = Mv_LED_Control;
  1.1354 +	mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF);	
  1.1355 +	udelay(1);
  1.1356 +
  1.1357 +}
  1.1358 +
  1.1359 +static int phy_reset(struct net_device *dev, u32 bmcr_setup)
  1.1360 +{
  1.1361 +	struct fe_priv *np = get_nvpriv(dev);
  1.1362  	u32 miicontrol;
  1.1363  	unsigned int tries = 0;
  1.1364  
  1.1365 -	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.1366 -	miicontrol |= BMCR_RESET;
  1.1367 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1368 +	if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
  1.1369 +		nv_save_LED_stats(dev);
  1.1370 +	}
  1.1371 +	miicontrol = BMCR_RESET | bmcr_setup;
  1.1372  	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
  1.1373  		return -1;
  1.1374  	}
  1.1375  
  1.1376  	/* wait for 500ms */
  1.1377 -	msleep(500);
  1.1378 +	nv_msleep(500);
  1.1379  
  1.1380  	/* must wait till reset is deasserted */
  1.1381  	while (miicontrol & BMCR_RESET) {
  1.1382 -		msleep(10);
  1.1383 +		nv_msleep(10);
  1.1384  		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.1385  		/* FIXME: 100 tries seem excessive */
  1.1386  		if (tries++ > 100)
  1.1387  			return -1;
  1.1388  	}
  1.1389 +	if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
  1.1390 +		nv_restore_LED_stats(dev);
  1.1391 +	}
  1.1392 +
  1.1393  	return 0;
  1.1394  }
  1.1395  
  1.1396 @@ -1052,9 +1738,59 @@ static int phy_init(struct net_device *d
  1.1397  	u8 __iomem *base = get_hwbase(dev);
  1.1398  	u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
  1.1399  
  1.1400 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1401 +	/* phy errata for E3016 phy */
  1.1402 +	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1.1403 +		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1.1404 +		reg &= ~PHY_MARVELL_E3016_INITMASK;
  1.1405 +		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
  1.1406 +			printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
  1.1407 +			return PHY_ERROR;
  1.1408 +		}
  1.1409 +	}
  1.1410 +
  1.1411 +	if (np->phy_oui == PHY_OUI_REALTEK) {
  1.1412 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1.1413 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1414 +			return PHY_ERROR;
  1.1415 +		}
  1.1416 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1.1417 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1418 +			return PHY_ERROR;
  1.1419 +		}
  1.1420 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1.1421 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1422 +			return PHY_ERROR;
  1.1423 +		}
  1.1424 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1.1425 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1426 +			return PHY_ERROR;
  1.1427 +		}
  1.1428 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1.1429 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1430 +			return PHY_ERROR;
  1.1431 +		}
  1.1432 +	}
  1.1433 +
  1.1434  	/* set advertise register */
  1.1435  	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.1436 -	reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
  1.1437 +	reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  1.1438 +	if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO)
  1.1439 +		reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
  1.1440 +	if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
  1.1441 +		reg |= ADVERTISE_10HALF;
  1.1442 +	if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
  1.1443 +		reg |= ADVERTISE_10FULL;
  1.1444 +	if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
  1.1445 +		reg |= ADVERTISE_100HALF;
  1.1446 +	if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
  1.1447 +		reg |= ADVERTISE_100FULL;
  1.1448 +	if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  1.1449 +		reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1.1450 +	if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.1451 +		reg |= ADVERTISE_PAUSE_ASYM;
  1.1452 +	np->fixed_mode = reg;
  1.1453 +
  1.1454  	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  1.1455  		printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
  1.1456  		return PHY_ERROR;
  1.1457 @@ -1069,11 +1805,15 @@ static int phy_init(struct net_device *d
  1.1458  		np->gigabit = PHY_GIGABIT;
  1.1459  		mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1.1460  		mii_control_1000 &= ~ADVERTISE_1000HALF;
  1.1461 -		if (phyinterface & PHY_RGMII)
  1.1462 +		if (phyinterface & PHY_RGMII && 
  1.1463 +				(np->speed_duplex == NV_SPEED_DUPLEX_AUTO || 
  1.1464 +				 (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE)))
  1.1465  			mii_control_1000 |= ADVERTISE_1000FULL;
  1.1466 -		else
  1.1467 +		else {
  1.1468 +			if (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_DISABLE)
  1.1469 +				printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
  1.1470  			mii_control_1000 &= ~ADVERTISE_1000FULL;
  1.1471 -
  1.1472 +		}
  1.1473  		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  1.1474  			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1475  			return PHY_ERROR;
  1.1476 @@ -1082,8 +1822,25 @@ static int phy_init(struct net_device *d
  1.1477  	else
  1.1478  		np->gigabit = 0;
  1.1479  
  1.1480 -	/* reset the phy */
  1.1481 -	if (phy_reset(dev)) {
  1.1482 +	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.1483 +	if (np->autoneg == AUTONEG_DISABLE){
  1.1484 +		np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
  1.1485 +		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
  1.1486 +			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1.1487 +		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.1488 +			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.1489 +		mii_control &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  1.1490 +		if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  1.1491 +			mii_control |= BMCR_FULLDPLX;
  1.1492 +		if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  1.1493 +			mii_control |= BMCR_SPEED100;
  1.1494 +	} else {
  1.1495 +		mii_control |= BMCR_ANENABLE;
  1.1496 +	}
  1.1497 +
  1.1498 +	/* reset the phy and setup BMCR 
  1.1499 +	 * (certain phys need reset at same time new values are set) */
  1.1500 +	if (phy_reset(dev, mii_control)) {
  1.1501  		printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
  1.1502  		return PHY_ERROR;
  1.1503  	}
  1.1504 @@ -1091,14 +1848,14 @@ static int phy_init(struct net_device *d
  1.1505  	/* phy vendor specific configuration */
  1.1506  	if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
  1.1507  		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  1.1508 -		phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
  1.1509 -		phy_reserved |= (PHY_INIT3 | PHY_INIT4);
  1.1510 +		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
  1.1511 +		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
  1.1512  		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
  1.1513  			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1514  			return PHY_ERROR;
  1.1515  		}
  1.1516  		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1.1517 -		phy_reserved |= PHY_INIT5;
  1.1518 +		phy_reserved |= PHY_CICADA_INIT5;
  1.1519  		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
  1.1520  			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1521  			return PHY_ERROR;
  1.1522 @@ -1106,20 +1863,114 @@ static int phy_init(struct net_device *d
  1.1523  	}
  1.1524  	if (np->phy_oui == PHY_OUI_CICADA) {
  1.1525  		phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1.1526 -		phy_reserved |= PHY_INIT6;
  1.1527 +		phy_reserved |= PHY_CICADA_INIT6;
  1.1528  		if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
  1.1529  			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1530  			return PHY_ERROR;
  1.1531  		}
  1.1532  	}
  1.1533 +	if (np->phy_oui == PHY_OUI_VITESSE) {
  1.1534 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
  1.1535 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1536 +			return PHY_ERROR;
  1.1537 +		}		
  1.1538 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
  1.1539 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1540 +			return PHY_ERROR;
  1.1541 +		}		
  1.1542 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1.1543 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1.1544 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1545 +			return PHY_ERROR;
  1.1546 +		}		
  1.1547 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1.1548 +		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1.1549 +		phy_reserved |= PHY_VITESSE_INIT3;
  1.1550 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1.1551 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1552 +			return PHY_ERROR;
  1.1553 +		}		
  1.1554 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
  1.1555 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1556 +			return PHY_ERROR;
  1.1557 +		}		
  1.1558 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
  1.1559 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1560 +			return PHY_ERROR;
  1.1561 +		}		
  1.1562 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1.1563 +		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1.1564 +		phy_reserved |= PHY_VITESSE_INIT3;
  1.1565 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1.1566 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1567 +			return PHY_ERROR;
  1.1568 +		}		
  1.1569 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1.1570 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1.1571 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1572 +			return PHY_ERROR;
  1.1573 +		}		
  1.1574 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
  1.1575 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1576 +			return PHY_ERROR;
  1.1577 +		}		
  1.1578 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
  1.1579 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1580 +			return PHY_ERROR;
  1.1581 +		}		
  1.1582 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
  1.1583 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
  1.1584 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1585 +			return PHY_ERROR;
  1.1586 +		}		
  1.1587 +		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
  1.1588 +		phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
  1.1589 +		phy_reserved |= PHY_VITESSE_INIT8;
  1.1590 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
  1.1591 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1592 +			return PHY_ERROR;
  1.1593 +		}		
  1.1594 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
  1.1595 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1596 +			return PHY_ERROR;
  1.1597 +		}		
  1.1598 +		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
  1.1599 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1600 +			return PHY_ERROR;
  1.1601 +		}		
  1.1602 +	}
  1.1603 +	if (np->phy_oui == PHY_OUI_REALTEK) {
  1.1604 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1.1605 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1606 +			return PHY_ERROR;
  1.1607 +		}
  1.1608 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
  1.1609 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1610 +			return PHY_ERROR;
  1.1611 +		}
  1.1612 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
  1.1613 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1614 +			return PHY_ERROR;
  1.1615 +		}
  1.1616 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
  1.1617 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1618 +			return PHY_ERROR;
  1.1619 +		}
  1.1620 +		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
  1.1621 +			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1.1622 +			return PHY_ERROR;
  1.1623 +		}
  1.1624 +	}
  1.1625  	/* some phys clear out pause advertisment on reset, set it back */
  1.1626  	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1.1627  
  1.1628  	/* restart auto negotiation */
  1.1629 -	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.1630 -	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1.1631 -	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1.1632 -		return PHY_ERROR;
  1.1633 +	if (np->autoneg == AUTONEG_ENABLE) {
  1.1634 +		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.1635 +		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1.1636 +		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1.1637 +			return PHY_ERROR;
  1.1638 +		}
  1.1639  	}
  1.1640  
  1.1641  	return 0;
  1.1642 @@ -1127,80 +1978,118 @@ static int phy_init(struct net_device *d
  1.1643  
  1.1644  static void nv_start_rx(struct net_device *dev)
  1.1645  {
  1.1646 -	struct fe_priv *np = netdev_priv(dev);
  1.1647 +	struct fe_priv *np = get_nvpriv(dev);
  1.1648  	u8 __iomem *base = get_hwbase(dev);
  1.1649 -
  1.1650 -	dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
  1.1651 +	u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1.1652 +
  1.1653 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1654 +
  1.1655  	/* Already running? Stop it. */
  1.1656 -	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  1.1657 -		writel(0, base + NvRegReceiverControl);
  1.1658 +	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
  1.1659 +		rx_ctrl &= ~NVREG_RCVCTL_START;
  1.1660 +		writel(rx_ctrl, base + NvRegReceiverControl);
  1.1661  		pci_push(base);
  1.1662  	}
  1.1663  	writel(np->linkspeed, base + NvRegLinkSpeed);
  1.1664  	pci_push(base);
  1.1665 -	writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
  1.1666 +	rx_ctrl |= NVREG_RCVCTL_START;
  1.1667 +	if (np->mac_in_use)
  1.1668 +		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
  1.1669 +	writel(rx_ctrl, base + NvRegReceiverControl);
  1.1670  	dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
  1.1671 -				dev->name, np->duplex, np->linkspeed);
  1.1672 +			dev->name, np->duplex, np->linkspeed);
  1.1673  	pci_push(base);
  1.1674  }
  1.1675  
  1.1676  static void nv_stop_rx(struct net_device *dev)
  1.1677  {
  1.1678 +	struct fe_priv *np = get_nvpriv(dev);
  1.1679  	u8 __iomem *base = get_hwbase(dev);
  1.1680 -
  1.1681 -	dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
  1.1682 -	writel(0, base + NvRegReceiverControl);
  1.1683 +	u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1.1684 +
  1.1685 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1686 +	if (!np->mac_in_use)
  1.1687 +		rx_ctrl &= ~NVREG_RCVCTL_START;
  1.1688 +	else
  1.1689 +		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
  1.1690 +	writel(rx_ctrl, base + NvRegReceiverControl);
  1.1691  	reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1.1692  			NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
  1.1693  			KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
  1.1694  
  1.1695  	udelay(NV_RXSTOP_DELAY2);
  1.1696 -	writel(0, base + NvRegLinkSpeed);
  1.1697 +	if (!np->mac_in_use)
  1.1698 +		writel(0, base + NvRegLinkSpeed);
  1.1699  }
  1.1700  
  1.1701  static void nv_start_tx(struct net_device *dev)
  1.1702  {
  1.1703 +	struct fe_priv *np = get_nvpriv(dev);
  1.1704  	u8 __iomem *base = get_hwbase(dev);
  1.1705 -
  1.1706 -	dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
  1.1707 -	writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
  1.1708 +	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1.1709 +
  1.1710 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1711 +	tx_ctrl |= NVREG_XMITCTL_START;
  1.1712 +	if (np->mac_in_use)
  1.1713 +		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
  1.1714 +	writel(tx_ctrl, base + NvRegTransmitterControl);
  1.1715  	pci_push(base);
  1.1716  }
  1.1717  
  1.1718  static void nv_stop_tx(struct net_device *dev)
  1.1719  {
  1.1720 +	struct fe_priv *np = get_nvpriv(dev);
  1.1721  	u8 __iomem *base = get_hwbase(dev);
  1.1722 -
  1.1723 -	dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
  1.1724 -	writel(0, base + NvRegTransmitterControl);
  1.1725 +	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1.1726 +
  1.1727 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1728 +	if (!np->mac_in_use)
  1.1729 +		tx_ctrl &= ~NVREG_XMITCTL_START;
  1.1730 +	else
  1.1731 +		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
  1.1732 +	writel(tx_ctrl, base + NvRegTransmitterControl);
  1.1733  	reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1.1734  			NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
  1.1735  			KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
  1.1736  
  1.1737  	udelay(NV_TXSTOP_DELAY2);
  1.1738 -	writel(0, base + NvRegUnknownTransmitterReg);
  1.1739 +	if (!np->mac_in_use)
  1.1740 +		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  1.1741  }
  1.1742  
  1.1743  static void nv_txrx_reset(struct net_device *dev)
  1.1744  {
  1.1745 -	struct fe_priv *np = netdev_priv(dev);
  1.1746 +	struct fe_priv *np = get_nvpriv(dev);
  1.1747  	u8 __iomem *base = get_hwbase(dev);
  1.1748 -
  1.1749 -	dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
  1.1750 +	unsigned int i;
  1.1751 +
  1.1752 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1753 +	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1.1754 +	for(i=0;i<10000;i++){
  1.1755 +		udelay(1);
  1.1756 +		if(readl(base+NvRegTxRxControl) & NVREG_TXRXCTL_IDLE)	
  1.1757 +			break;
  1.1758 +	}
  1.1759  	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1.1760  	pci_push(base);
  1.1761  	udelay(NV_TXRX_RESET_DELAY);
  1.1762 -	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1.1763  	pci_push(base);
  1.1764  }
  1.1765  
  1.1766  static void nv_mac_reset(struct net_device *dev)
  1.1767  {
  1.1768 -	struct fe_priv *np = netdev_priv(dev);
  1.1769 +	struct fe_priv *np = get_nvpriv(dev);
  1.1770  	u8 __iomem *base = get_hwbase(dev);
  1.1771 -
  1.1772 -	dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
  1.1773 +	u32 temp1,temp2,temp3;
  1.1774 +
  1.1775 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.1776  	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1.1777 +
  1.1778 +	/* save registers since they will be cleared on reset */
  1.1779 +	temp1 = readl(base + NvRegMacAddrA);
  1.1780 +	temp2 = readl(base + NvRegMacAddrB);
  1.1781 +	temp3 = readl(base + NvRegTransmitPoll);
  1.1782 +
  1.1783  	pci_push(base);
  1.1784  	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1.1785  	pci_push(base);
  1.1786 @@ -1208,89 +2097,208 @@ static void nv_mac_reset(struct net_devi
  1.1787  	writel(0, base + NvRegMacReset);
  1.1788  	pci_push(base);
  1.1789  	udelay(NV_MAC_RESET_DELAY);
  1.1790 +
  1.1791 +	/* restore saved registers */
  1.1792 +	writel(temp1, base + NvRegMacAddrA);
  1.1793 +	writel(temp2, base + NvRegMacAddrB);
  1.1794 +	writel(temp3, base + NvRegTransmitPoll);
  1.1795 +
  1.1796  	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1.1797  	pci_push(base);
  1.1798  }
  1.1799  
  1.1800 +#if NVVER < SLES9
  1.1801 +static int nv_ethtool_ioctl(struct net_device *dev, void *useraddr)
  1.1802 +{
  1.1803 +	struct fe_priv *np = get_nvpriv(dev);
  1.1804 +	u8 *base = get_hwbase(dev);
  1.1805 +	u32 ethcmd;
  1.1806 +
  1.1807 +	if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
  1.1808 +		return -EFAULT;
  1.1809 +
  1.1810 +	switch (ethcmd) {
  1.1811 +		case ETHTOOL_GDRVINFO:
  1.1812 +			{
  1.1813 +				struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
  1.1814 +				strcpy(info.driver, "forcedeth");
  1.1815 +				strcpy(info.version, FORCEDETH_VERSION);
  1.1816 +				strcpy(info.bus_info, pci_name(np->pci_dev));
  1.1817 +				if (copy_to_user(useraddr, &info, sizeof (info)))
  1.1818 +					return -EFAULT;
  1.1819 +				return 0;
  1.1820 +			}
  1.1821 +		case ETHTOOL_GLINK:
  1.1822 +			{
  1.1823 +				struct ethtool_value edata = { ETHTOOL_GLINK };
  1.1824 +
  1.1825 +				edata.data = !!netif_carrier_ok(dev);
  1.1826 +
  1.1827 +				if (copy_to_user(useraddr, &edata, sizeof(edata)))
  1.1828 +					return -EFAULT;
  1.1829 +				return 0;
  1.1830 +			}
  1.1831 +		case ETHTOOL_GWOL:
  1.1832 +			{
  1.1833 +				struct ethtool_wolinfo wolinfo;
  1.1834 +				memset(&wolinfo, 0, sizeof(wolinfo));
  1.1835 +				wolinfo.supported = WAKE_MAGIC;
  1.1836 +
  1.1837 +				spin_lock_irq(&np->lock);
  1.1838 +				if (np->wolenabled)
  1.1839 +					wolinfo.wolopts = WAKE_MAGIC;
  1.1840 +				spin_unlock_irq(&np->lock);
  1.1841 +
  1.1842 +				if (copy_to_user(useraddr, &wolinfo, sizeof(wolinfo)))
  1.1843 +					return -EFAULT;
  1.1844 +				return 0;
  1.1845 +			}
  1.1846 +		case ETHTOOL_SWOL:
  1.1847 +			{
  1.1848 +				struct ethtool_wolinfo wolinfo;
  1.1849 +				if (copy_from_user(&wolinfo, useraddr, sizeof(wolinfo)))
  1.1850 +					return -EFAULT;
  1.1851 +
  1.1852 +				spin_lock_irq(&np->lock);
  1.1853 +				if (wolinfo.wolopts == 0) {
  1.1854 +					writel(0, base + NvRegWakeUpFlags);
  1.1855 +					np->wolenabled = NV_WOL_DISABLED;
  1.1856 +				}
  1.1857 +				if (wolinfo.wolopts & WAKE_MAGIC) {
  1.1858 +					writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
  1.1859 +					np->wolenabled = NV_WOL_ENABLED;
  1.1860 +				}
  1.1861 +				spin_unlock_irq(&np->lock);
  1.1862 +				return 0;
  1.1863 +			}
  1.1864 +
  1.1865 +		default:
  1.1866 +			break;
  1.1867 +	}
  1.1868 +
  1.1869 +	return -EOPNOTSUPP;
  1.1870 +}
  1.1871 +
  1.1872  /*
  1.1873 - * nv_get_stats: dev->get_stats function
  1.1874 - * Get latest stats value from the nic.
  1.1875 - * Called with read_lock(&dev_base_lock) held for read -
  1.1876 - * only synchronized against unregister_netdevice.
  1.1877 + * nv_ioctl: dev->do_ioctl function
  1.1878 + * Called with rtnl_lock held.
  1.1879   */
  1.1880 -static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1.1881 +static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1.1882  {
  1.1883 -	struct fe_priv *np = netdev_priv(dev);
  1.1884 -
  1.1885 -	/* It seems that the nic always generates interrupts and doesn't
  1.1886 -	 * accumulate errors internally. Thus the current values in np->stats
  1.1887 -	 * are already up to date.
  1.1888 -	 */
  1.1889 -	return &np->stats;
  1.1890 +	switch(cmd) {
  1.1891 +		case SIOCETHTOOL:
  1.1892 +			return nv_ethtool_ioctl(dev, rq->ifr_data);
  1.1893 +
  1.1894 +		default:
  1.1895 +			return -EOPNOTSUPP;
  1.1896 +	}
  1.1897  }
  1.1898 +#endif
  1.1899  
  1.1900  /*
  1.1901   * nv_alloc_rx: fill rx ring entries.
  1.1902   * Return 1 if the allocations for the skbs failed and the
  1.1903   * rx engine is without Available descriptors
  1.1904   */
  1.1905 -static int nv_alloc_rx(struct net_device *dev)
  1.1906 +static inline int nv_alloc_rx(struct net_device *dev)
  1.1907  {
  1.1908 -	struct fe_priv *np = netdev_priv(dev);
  1.1909 -	unsigned int refill_rx = np->refill_rx;
  1.1910 -	int nr;
  1.1911 -
  1.1912 -	while (np->cur_rx != refill_rx) {
  1.1913 -		struct sk_buff *skb;
  1.1914 -
  1.1915 -		nr = refill_rx % np->rx_ring_size;
  1.1916 -		if (np->rx_skbuff[nr] == NULL) {
  1.1917 -
  1.1918 -			skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1.1919 -			if (!skb)
  1.1920 -				break;
  1.1921 -
  1.1922 +	struct fe_priv *np = get_nvpriv(dev);
  1.1923 +	struct ring_desc* less_rx;
  1.1924 +	struct sk_buff *skb;
  1.1925 +
  1.1926 +	less_rx = np->get_rx.orig;
  1.1927 +	if (less_rx-- == np->first_rx.orig)
  1.1928 +		less_rx = np->last_rx.orig;
  1.1929 +
  1.1930 +	while (np->put_rx.orig != less_rx) {
  1.1931 +		skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1.1932 +		if (skb) {
  1.1933  			skb->dev = dev;
  1.1934 -			np->rx_skbuff[nr] = skb;
  1.1935 +			np->put_rx_ctx->skb = skb;
  1.1936 +#if NVVER > FEDORA7
  1.1937 +			np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
  1.1938 +					skb_tailroom(skb), PCI_DMA_FROMDEVICE);
  1.1939 +			np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1.1940 +#else
  1.1941 +			np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
  1.1942 +					skb->end-skb->data, PCI_DMA_FROMDEVICE);
  1.1943 +			np->put_rx_ctx->dma_len = skb->end-skb->data;
  1.1944 +#endif
  1.1945 +			np->put_rx.orig->PacketBuffer = cpu_to_le32(np->put_rx_ctx->dma);
  1.1946 +			wmb();
  1.1947 +			np->put_rx.orig->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1.1948 +			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
  1.1949 +				np->put_rx.orig = np->first_rx.orig;
  1.1950 +			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1.1951 +				np->put_rx_ctx = np->first_rx_ctx;
  1.1952  		} else {
  1.1953 -			skb = np->rx_skbuff[nr];
  1.1954 +			return 1;
  1.1955  		}
  1.1956 -		np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
  1.1957 +	}
  1.1958 +	return 0;
  1.1959 +}
  1.1960 +
  1.1961 +static inline int nv_alloc_rx_optimized(struct net_device *dev)
  1.1962 +{
  1.1963 +	struct fe_priv *np = get_nvpriv(dev);
  1.1964 +	struct ring_desc_ex* less_rx;
  1.1965 +	struct sk_buff *skb;
  1.1966 +
  1.1967 +	less_rx = np->get_rx.ex;
  1.1968 +	if (less_rx-- == np->first_rx.ex)
  1.1969 +		less_rx = np->last_rx.ex;
  1.1970 +
  1.1971 +	while (np->put_rx.ex != less_rx) {
  1.1972 +		skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1.1973 +		if (skb) {
  1.1974 +			skb->dev = dev;
  1.1975 +			np->put_rx_ctx->skb = skb;
  1.1976 +#if NVVER > FEDORA7
  1.1977 +			np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
  1.1978 +					skb_tailroom(skb), PCI_DMA_FROMDEVICE);
  1.1979 +			np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1.1980 +#else
  1.1981 +			np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
  1.1982  					skb->end-skb->data, PCI_DMA_FROMDEVICE);
  1.1983 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.1984 -			np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
  1.1985 +			np->put_rx_ctx->dma_len = skb->end-skb->data;
  1.1986 +#endif
  1.1987 +			np->put_rx.ex->PacketBufferHigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
  1.1988 +			np->put_rx.ex->PacketBufferLow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;			
  1.1989  			wmb();
  1.1990 -			np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1.1991 +			np->put_rx.ex->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1.1992 +			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
  1.1993 +				np->put_rx.ex = np->first_rx.ex;
  1.1994 +			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1.1995 +				np->put_rx_ctx = np->first_rx_ctx;
  1.1996  		} else {
  1.1997 -			np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
  1.1998 -			np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
  1.1999 -			wmb();
  1.2000 -			np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1.2001 +			return 1;
  1.2002  		}
  1.2003 -		dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
  1.2004 -					dev->name, refill_rx);
  1.2005 -		refill_rx++;
  1.2006 -	}
  1.2007 -	np->refill_rx = refill_rx;
  1.2008 -	if (np->cur_rx - refill_rx == np->rx_ring_size)
  1.2009 -		return 1;
  1.2010 +	}
  1.2011  	return 0;
  1.2012 +
  1.2013  }
  1.2014  
  1.2015  static void nv_do_rx_refill(unsigned long data)
  1.2016  {
  1.2017  	struct net_device *dev = (struct net_device *) data;
  1.2018 -	struct fe_priv *np = netdev_priv(dev);
  1.2019 -
  1.2020 +	struct fe_priv *np = get_nvpriv(dev);
  1.2021 +	int retcode;
  1.2022 +
  1.2023 +	spin_lock_irq(&np->timer_lock);
  1.2024  	if (!using_multi_irqs(dev)) {
  1.2025  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.2026  			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.2027  		else
  1.2028 -			disable_irq(dev->irq);
  1.2029 +			disable_irq(np->pci_dev->irq);
  1.2030  	} else {
  1.2031  		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.2032  	}
  1.2033 -	if (nv_alloc_rx(dev)) {
  1.2034 +
  1.2035 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2036 +		retcode = nv_alloc_rx(dev);
  1.2037 +	else
  1.2038 +		retcode = nv_alloc_rx_optimized(dev);
  1.2039 +	if (retcode) {
  1.2040  		spin_lock_irq(&np->lock);
  1.2041  		if (!np->in_shutdown)
  1.2042  			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.2043 @@ -1300,66 +2308,96 @@ static void nv_do_rx_refill(unsigned lon
  1.2044  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.2045  			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.2046  		else
  1.2047 -			enable_irq(dev->irq);
  1.2048 +			enable_irq(np->pci_dev->irq);
  1.2049  	} else {
  1.2050  		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.2051  	}
  1.2052 +	spin_unlock_irq(&np->timer_lock);
  1.2053  }
  1.2054  
  1.2055 -static void nv_init_rx(struct net_device *dev)
  1.2056 +static void nv_init_rx(struct net_device *dev) 
  1.2057  {
  1.2058 -	struct fe_priv *np = netdev_priv(dev);
  1.2059 +	struct fe_priv *np = get_nvpriv(dev);
  1.2060  	int i;
  1.2061  
  1.2062 -	np->cur_rx = np->rx_ring_size;
  1.2063 -	np->refill_rx = 0;
  1.2064 -	for (i = 0; i < np->rx_ring_size; i++)
  1.2065 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2066 +	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
  1.2067 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2068 +		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
  1.2069 +	else
  1.2070 +		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
  1.2071 +	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
  1.2072 +	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
  1.2073 +
  1.2074 +	for (i = 0; i < np->rx_ring_size; i++) {
  1.2075 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2076  			np->rx_ring.orig[i].FlagLen = 0;
  1.2077 -	        else
  1.2078 +			np->rx_ring.orig[i].PacketBuffer = 0;
  1.2079 +		} else {
  1.2080  			np->rx_ring.ex[i].FlagLen = 0;
  1.2081 +			np->rx_ring.ex[i].TxVlan = 0;
  1.2082 +			np->rx_ring.ex[i].PacketBufferHigh = 0;
  1.2083 +			np->rx_ring.ex[i].PacketBufferLow = 0;
  1.2084 +		}
  1.2085 +		np->rx_skb[i].skb = NULL;
  1.2086 +		np->rx_skb[i].dma = 0;
  1.2087 +	}
  1.2088  }
  1.2089  
  1.2090  static void nv_init_tx(struct net_device *dev)
  1.2091  {
  1.2092 -	struct fe_priv *np = netdev_priv(dev);
  1.2093 +	struct fe_priv *np = get_nvpriv(dev);
  1.2094  	int i;
  1.2095  
  1.2096 -	np->next_tx = np->nic_tx = 0;
  1.2097 +	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
  1.2098 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2099 +		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
  1.2100 +	else
  1.2101 +		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
  1.2102 +	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
  1.2103 +	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
  1.2104 +
  1.2105  	for (i = 0; i < np->tx_ring_size; i++) {
  1.2106 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2107 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2108  			np->tx_ring.orig[i].FlagLen = 0;
  1.2109 -	        else
  1.2110 +			np->tx_ring.orig[i].PacketBuffer = 0;
  1.2111 +		} else {
  1.2112  			np->tx_ring.ex[i].FlagLen = 0;
  1.2113 -		np->tx_skbuff[i] = NULL;
  1.2114 -		np->tx_dma[i] = 0;
  1.2115 +			np->tx_ring.ex[i].TxVlan = 0;
  1.2116 +			np->tx_ring.ex[i].PacketBufferHigh = 0;
  1.2117 +			np->tx_ring.ex[i].PacketBufferLow = 0;
  1.2118 +		}
  1.2119 +		np->tx_skb[i].skb = NULL;
  1.2120 +		np->tx_skb[i].dma = 0;
  1.2121  	}
  1.2122  }
  1.2123  
  1.2124  static int nv_init_ring(struct net_device *dev)
  1.2125  {
  1.2126 +	struct fe_priv *np = get_nvpriv(dev);
  1.2127  	nv_init_tx(dev);
  1.2128  	nv_init_rx(dev);
  1.2129 -	return nv_alloc_rx(dev);
  1.2130 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2131 +		return nv_alloc_rx(dev);
  1.2132 +	else
  1.2133 +		return nv_alloc_rx_optimized(dev);
  1.2134  }
  1.2135  
  1.2136  static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
  1.2137  {
  1.2138 -	struct fe_priv *np = netdev_priv(dev);
  1.2139 +	struct fe_priv *np = get_nvpriv(dev);
  1.2140  
  1.2141  	dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
  1.2142 -		dev->name, skbnr);
  1.2143 -
  1.2144 -	if (np->tx_dma[skbnr]) {
  1.2145 -		pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
  1.2146 -			       np->tx_dma_len[skbnr],
  1.2147 -			       PCI_DMA_TODEVICE);
  1.2148 -		np->tx_dma[skbnr] = 0;
  1.2149 -	}
  1.2150 -
  1.2151 -	if (np->tx_skbuff[skbnr]) {
  1.2152 -		dev_kfree_skb_any(np->tx_skbuff[skbnr]);
  1.2153 -		np->tx_skbuff[skbnr] = NULL;
  1.2154 +			dev->name, skbnr);
  1.2155 +
  1.2156 +	if (np->tx_skb[skbnr].dma) {
  1.2157 +		pci_unmap_page(np->pci_dev, np->tx_skb[skbnr].dma,
  1.2158 +				np->tx_skb[skbnr].dma_len,
  1.2159 +				PCI_DMA_TODEVICE);
  1.2160 +		np->tx_skb[skbnr].dma = 0;
  1.2161 +	}
  1.2162 +	if (np->tx_skb[skbnr].skb) {
  1.2163 +		dev_kfree_skb_any(np->tx_skb[skbnr].skb);
  1.2164 +		np->tx_skb[skbnr].skb = NULL;
  1.2165  		return 1;
  1.2166  	} else {
  1.2167  		return 0;
  1.2168 @@ -1368,14 +2406,19 @@ static int nv_release_txskb(struct net_d
  1.2169  
  1.2170  static void nv_drain_tx(struct net_device *dev)
  1.2171  {
  1.2172 -	struct fe_priv *np = netdev_priv(dev);
  1.2173 +	struct fe_priv *np = get_nvpriv(dev);
  1.2174  	unsigned int i;
  1.2175  
  1.2176  	for (i = 0; i < np->tx_ring_size; i++) {
  1.2177 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2178 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2179  			np->tx_ring.orig[i].FlagLen = 0;
  1.2180 -		else
  1.2181 +			np->tx_ring.orig[i].PacketBuffer = 0;
  1.2182 +		} else {
  1.2183  			np->tx_ring.ex[i].FlagLen = 0;
  1.2184 +			np->tx_ring.ex[i].TxVlan = 0;
  1.2185 +			np->tx_ring.ex[i].PacketBufferHigh = 0;
  1.2186 +			np->tx_ring.ex[i].PacketBufferLow = 0;
  1.2187 +		}
  1.2188  		if (nv_release_txskb(dev, i))
  1.2189  			np->stats.tx_dropped++;
  1.2190  	}
  1.2191 @@ -1383,20 +2426,31 @@ static void nv_drain_tx(struct net_devic
  1.2192  
  1.2193  static void nv_drain_rx(struct net_device *dev)
  1.2194  {
  1.2195 -	struct fe_priv *np = netdev_priv(dev);
  1.2196 +	struct fe_priv *np = get_nvpriv(dev);
  1.2197  	int i;
  1.2198  	for (i = 0; i < np->rx_ring_size; i++) {
  1.2199 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2200 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2201  			np->rx_ring.orig[i].FlagLen = 0;
  1.2202 -		else
  1.2203 +			np->rx_ring.orig[i].PacketBuffer = 0;
  1.2204 +		} else {
  1.2205  			np->rx_ring.ex[i].FlagLen = 0;
  1.2206 +			np->rx_ring.ex[i].TxVlan = 0;
  1.2207 +			np->rx_ring.ex[i].PacketBufferHigh = 0;
  1.2208 +			np->rx_ring.ex[i].PacketBufferLow = 0;
  1.2209 +		}
  1.2210  		wmb();
  1.2211 -		if (np->rx_skbuff[i]) {
  1.2212 -			pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1.2213 -						np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1.2214 -						PCI_DMA_FROMDEVICE);
  1.2215 -			dev_kfree_skb(np->rx_skbuff[i]);
  1.2216 -			np->rx_skbuff[i] = NULL;
  1.2217 +		if (np->rx_skb[i].skb) {
  1.2218 +#if NVVER > FEDORA7
  1.2219 +			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
  1.2220 +					(skb_end_pointer(np->rx_skb[i].skb) - np->rx_skb[i].skb->data),
  1.2221 +					PCI_DMA_FROMDEVICE);
  1.2222 +#else
  1.2223 +			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
  1.2224 +					np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
  1.2225 +					PCI_DMA_FROMDEVICE);
  1.2226 +#endif
  1.2227 +			dev_kfree_skb(np->rx_skb[i].skb);
  1.2228 +			np->rx_skb[i].skb = NULL;
  1.2229  		}
  1.2230  	}
  1.2231  }
  1.2232 @@ -1409,134 +2463,245 @@ static void drain_ring(struct net_device
  1.2233  
  1.2234  /*
  1.2235   * nv_start_xmit: dev->hard_start_xmit function
  1.2236 - * Called with netif_tx_lock held.
  1.2237 + * Called with dev->xmit_lock held.
  1.2238   */
  1.2239  static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1.2240  {
  1.2241 -	struct fe_priv *np = netdev_priv(dev);
  1.2242 +	struct fe_priv *np = get_nvpriv(dev);
  1.2243  	u32 tx_flags = 0;
  1.2244  	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1.2245  	unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1.2246 -	unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
  1.2247 -	unsigned int start_nr = np->next_tx % np->tx_ring_size;
  1.2248  	unsigned int i;
  1.2249  	u32 offset = 0;
  1.2250  	u32 bcnt;
  1.2251  	u32 size = skb->len-skb->data_len;
  1.2252  	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1.2253 -	u32 tx_flags_vlan = 0;
  1.2254 -
  1.2255 +	u32 empty_slots;
  1.2256 +	struct ring_desc* put_tx;
  1.2257 +	struct ring_desc* start_tx;
  1.2258 +	struct ring_desc* prev_tx;
  1.2259 +	struct nv_skb_map* prev_tx_ctx;
  1.2260 +
  1.2261 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.2262  	/* add fragments to entries count */
  1.2263  	for (i = 0; i < fragments; i++) {
  1.2264  		entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1.2265 -			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1.2266 -	}
  1.2267 -
  1.2268 -	spin_lock_irq(&np->lock);
  1.2269 -
  1.2270 -	if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
  1.2271 -		spin_unlock_irq(&np->lock);
  1.2272 -		netif_stop_queue(dev);
  1.2273 -		return NETDEV_TX_BUSY;
  1.2274 -	}
  1.2275 -
  1.2276 -	/* setup the header buffer */
  1.2277 -	do {
  1.2278 -		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1.2279 -		nr = (nr + 1) % np->tx_ring_size;
  1.2280 -
  1.2281 -		np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1.2282 -						PCI_DMA_TODEVICE);
  1.2283 -		np->tx_dma_len[nr] = bcnt;
  1.2284 -
  1.2285 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2286 -			np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
  1.2287 -			np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2288 -		} else {
  1.2289 -			np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1.2290 -			np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1.2291 -			np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2292 -		}
  1.2293 -		tx_flags = np->tx_flags;
  1.2294 -		offset += bcnt;
  1.2295 -		size -= bcnt;
  1.2296 -	} while(size);
  1.2297 -
  1.2298 -	/* setup the fragments */
  1.2299 -	for (i = 0; i < fragments; i++) {
  1.2300 -		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1.2301 -		u32 size = frag->size;
  1.2302 -		offset = 0;
  1.2303 -
  1.2304 +			((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1.2305 +	}
  1.2306 +
  1.2307 +	empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
  1.2308 +	if (likely(empty_slots > entries)) {
  1.2309 +
  1.2310 +		start_tx = put_tx = np->put_tx.orig;
  1.2311 +
  1.2312 +		/* setup the header buffer */
  1.2313  		do {
  1.2314 +			prev_tx = put_tx;
  1.2315 +			prev_tx_ctx = np->put_tx_ctx;
  1.2316  			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1.2317 -			nr = (nr + 1) % np->tx_ring_size;
  1.2318 -
  1.2319 -			np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1.2320 -						      PCI_DMA_TODEVICE);
  1.2321 -			np->tx_dma_len[nr] = bcnt;
  1.2322 -
  1.2323 -			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2324 -				np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
  1.2325 -				np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2326 -			} else {
  1.2327 -				np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1.2328 -				np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1.2329 -				np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2330 -			}
  1.2331 +			np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1.2332 +					PCI_DMA_TODEVICE);
  1.2333 +			np->put_tx_ctx->dma_len = bcnt;
  1.2334 +			put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
  1.2335 +			put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2336 +
  1.2337 +			tx_flags = np->tx_flags;
  1.2338  			offset += bcnt;
  1.2339  			size -= bcnt;
  1.2340 -		} while (size);
  1.2341 -	}
  1.2342 -
  1.2343 -	/* set last fragment flag  */
  1.2344 -	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2345 -		np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
  1.2346 -	} else {
  1.2347 -		np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
  1.2348 -	}
  1.2349 -
  1.2350 -	np->tx_skbuff[nr] = skb;
  1.2351 +			if (unlikely(put_tx++ == np->last_tx.orig))
  1.2352 +				put_tx = np->first_tx.orig;
  1.2353 +			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1.2354 +				np->put_tx_ctx = np->first_tx_ctx;
  1.2355 +		} while(size);
  1.2356 +
  1.2357 +		/* setup the fragments */
  1.2358 +		for (i = 0; i < fragments; i++) {
  1.2359 +			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1.2360 +			u32 size = frag->size;
  1.2361 +			offset = 0;
  1.2362 +
  1.2363 +			do {
  1.2364 +				prev_tx = put_tx;
  1.2365 +				prev_tx_ctx = np->put_tx_ctx;
  1.2366 +				bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1.2367 +
  1.2368 +				np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1.2369 +						PCI_DMA_TODEVICE);
  1.2370 +				np->put_tx_ctx->dma_len = bcnt;
  1.2371 +
  1.2372 +				put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
  1.2373 +				put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2374 +				offset += bcnt;
  1.2375 +				size -= bcnt;
  1.2376 +				if (unlikely(put_tx++ == np->last_tx.orig))
  1.2377 +					put_tx = np->first_tx.orig;
  1.2378 +				if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1.2379 +					np->put_tx_ctx = np->first_tx_ctx;
  1.2380 +			} while (size);
  1.2381 +		}
  1.2382 +
  1.2383 +		/* set last fragment flag  */
  1.2384 +		prev_tx->FlagLen |= cpu_to_le32(tx_flags_extra);
  1.2385 +
  1.2386 +		/* save skb in this slot's context area */
  1.2387 +		prev_tx_ctx->skb = skb;
  1.2388  
  1.2389  #ifdef NETIF_F_TSO
  1.2390 -	if (skb_is_gso(skb))
  1.2391 -		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1.2392 -	else
  1.2393 -#endif
  1.2394 -	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  1.2395 -
  1.2396 -	/* vlan tag */
  1.2397 -	if (np->vlangrp && vlan_tx_tag_present(skb)) {
  1.2398 -		tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
  1.2399 -	}
  1.2400 -
  1.2401 -	/* set tx flags */
  1.2402 -	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2403 -		np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1.2404 +#if NVVER > FEDORA5 
  1.2405 +		if (skb_shinfo(skb)->gso_size)
  1.2406 +			tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1.2407 +#else
  1.2408 +		if (skb_shinfo(skb)->tso_size)
  1.2409 +			tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  1.2410 +#endif
  1.2411 +		else
  1.2412 +#endif
  1.2413 +			tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  1.2414 +
  1.2415 +		spin_lock_irq(&np->lock);
  1.2416 +
  1.2417 +		/* set tx flags */
  1.2418 +		start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1.2419 +		np->put_tx.orig = put_tx;
  1.2420 +
  1.2421 +		spin_unlock_irq(&np->lock);
  1.2422 +
  1.2423 +		dev->trans_start = jiffies;
  1.2424 +		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.2425 +		return NETDEV_TX_OK;
  1.2426  	} else {
  1.2427 -		np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
  1.2428 -		np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1.2429 -	}
  1.2430 -
  1.2431 -	dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
  1.2432 -		dev->name, np->next_tx, entries, tx_flags_extra);
  1.2433 -	{
  1.2434 -		int j;
  1.2435 -		for (j=0; j<64; j++) {
  1.2436 -			if ((j%16) == 0)
  1.2437 -				dprintk("\n%03x:", j);
  1.2438 -			dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  1.2439 +		spin_lock_irq(&np->lock);
  1.2440 +		netif_stop_queue(dev);
  1.2441 +		np->stop_tx = 1;
  1.2442 +		spin_unlock_irq(&np->lock);
  1.2443 +		return NETDEV_TX_BUSY;
  1.2444 +	}
  1.2445 +}
  1.2446 +
  1.2447 +static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
  1.2448 +{
  1.2449 +	struct fe_priv *np = get_nvpriv(dev);
  1.2450 +	u32 tx_flags = 0;
  1.2451 +	u32 tx_flags_extra;
  1.2452 +	unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1.2453 +	unsigned int i;
  1.2454 +	u32 offset = 0;
  1.2455 +	u32 bcnt;
  1.2456 +	u32 size = skb->len-skb->data_len;
  1.2457 +	u32 empty_slots;
  1.2458 +	struct ring_desc_ex* put_tx;
  1.2459 +	struct ring_desc_ex* start_tx;
  1.2460 +	struct ring_desc_ex* prev_tx;
  1.2461 +	struct nv_skb_map* prev_tx_ctx;
  1.2462 +
  1.2463 +	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1.2464 +
  1.2465 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.2466 +	/* add fragments to entries count */
  1.2467 +	for (i = 0; i < fragments; i++) {
  1.2468 +		entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1.2469 +			((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1.2470 +	}
  1.2471 +
  1.2472 +	empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
  1.2473 +	if (likely(empty_slots > entries)) {
  1.2474 +
  1.2475 +		start_tx = put_tx = np->put_tx.ex;
  1.2476 +
  1.2477 +		/* setup the header buffer */
  1.2478 +		do {
  1.2479 +			prev_tx = put_tx;
  1.2480 +			prev_tx_ctx = np->put_tx_ctx;
  1.2481 +			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1.2482 +			np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1.2483 +					PCI_DMA_TODEVICE);
  1.2484 +			np->put_tx_ctx->dma_len = bcnt;
  1.2485 +			put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
  1.2486 +			put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
  1.2487 +			put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2488 +
  1.2489 +			tx_flags = NV_TX2_VALID;
  1.2490 +			offset += bcnt;
  1.2491 +			size -= bcnt;
  1.2492 +			if (unlikely(put_tx++ == np->last_tx.ex))
  1.2493 +				put_tx = np->first_tx.ex;
  1.2494 +			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1.2495 +				np->put_tx_ctx = np->first_tx_ctx;
  1.2496 +		} while(size);
  1.2497 +		/* setup the fragments */
  1.2498 +		for (i = 0; i < fragments; i++) {
  1.2499 +			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1.2500 +			u32 size = frag->size;
  1.2501 +			offset = 0;
  1.2502 +
  1.2503 +			do {
  1.2504 +				prev_tx = put_tx;
  1.2505 +				prev_tx_ctx = np->put_tx_ctx;
  1.2506 +				bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1.2507 +
  1.2508 +				np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1.2509 +						PCI_DMA_TODEVICE);
  1.2510 +				np->put_tx_ctx->dma_len = bcnt;
  1.2511 +
  1.2512 +				put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
  1.2513 +				put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
  1.2514 +				put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1.2515 +				offset += bcnt;
  1.2516 +				size -= bcnt;
  1.2517 +				if (unlikely(put_tx++ == np->last_tx.ex))
  1.2518 +					put_tx = np->first_tx.ex;
  1.2519 +				if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1.2520 +					np->put_tx_ctx = np->first_tx_ctx;
  1.2521 +			} while (size);
  1.2522  		}
  1.2523 -		dprintk("\n");
  1.2524 -	}
  1.2525 -
  1.2526 -	np->next_tx += entries;
  1.2527 -
  1.2528 -	dev->trans_start = jiffies;
  1.2529 -	spin_unlock_irq(&np->lock);
  1.2530 -	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.2531 -	pci_push(get_hwbase(dev));
  1.2532 -	return NETDEV_TX_OK;
  1.2533 +
  1.2534 +		/* set last fragment flag  */
  1.2535 +		prev_tx->FlagLen |= cpu_to_le32(NV_TX2_LASTPACKET);
  1.2536 +
  1.2537 +		/* save skb in this slot's context area */
  1.2538 +		prev_tx_ctx->skb = skb;
  1.2539 +
  1.2540 +#ifdef NETIF_F_TSO
  1.2541 +#if NVVER > FEDORA5 
  1.2542 +		if (skb_shinfo(skb)->gso_size)
  1.2543 +			tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1.2544 +#else
  1.2545 +		if (skb_shinfo(skb)->tso_size)
  1.2546 +			tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
  1.2547 +#endif
  1.2548 +		else
  1.2549 +#endif
  1.2550 +			tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  1.2551 +
  1.2552 +		/* vlan tag */
  1.2553 +		if (likely(!np->vlangrp)) {
  1.2554 +			start_tx->TxVlan = 0;
  1.2555 +		} else {
  1.2556 +			if (vlan_tx_tag_present(skb))
  1.2557 +				start_tx->TxVlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
  1.2558 +			else
  1.2559 +				start_tx->TxVlan = 0;
  1.2560 +		}
  1.2561 +
  1.2562 +		spin_lock_irq(&np->lock);
  1.2563 +
  1.2564 +		/* set tx flags */
  1.2565 +		start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1.2566 +		np->put_tx.ex = put_tx;
  1.2567 +
  1.2568 +		spin_unlock_irq(&np->lock);
  1.2569 +
  1.2570 +		dev->trans_start = jiffies;
  1.2571 +		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.2572 +		return NETDEV_TX_OK;
  1.2573 +
  1.2574 +	} else {
  1.2575 +		spin_lock_irq(&np->lock);
  1.2576 +		netif_stop_queue(dev);
  1.2577 +		np->stop_tx = 1;
  1.2578 +		spin_unlock_irq(&np->lock);
  1.2579 +		return NETDEV_TX_BUSY;
  1.2580 +	}
  1.2581  }
  1.2582  
  1.2583  /*
  1.2584 @@ -1544,30 +2709,26 @@ static int nv_start_xmit(struct sk_buff 
  1.2585   *
  1.2586   * Caller must own np->lock.
  1.2587   */
  1.2588 -static void nv_tx_done(struct net_device *dev)
  1.2589 +static inline void nv_tx_done(struct net_device *dev)
  1.2590  {
  1.2591 -	struct fe_priv *np = netdev_priv(dev);
  1.2592 +	struct fe_priv *np = get_nvpriv(dev);
  1.2593  	u32 Flags;
  1.2594 -	unsigned int i;
  1.2595 -	struct sk_buff *skb;
  1.2596 -
  1.2597 -	while (np->nic_tx != np->next_tx) {
  1.2598 -		i = np->nic_tx % np->tx_ring_size;
  1.2599 -
  1.2600 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2601 -			Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
  1.2602 -		else
  1.2603 -			Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
  1.2604 -
  1.2605 -		dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
  1.2606 -					dev->name, np->nic_tx, Flags);
  1.2607 -		if (Flags & NV_TX_VALID)
  1.2608 -			break;
  1.2609 +	struct ring_desc* orig_get_tx = np->get_tx.orig;
  1.2610 +	struct ring_desc* put_tx = np->put_tx.orig;
  1.2611 +
  1.2612 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.2613 +	while ((np->get_tx.orig != put_tx) &&
  1.2614 +			!((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) {
  1.2615 +		dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
  1.2616 +
  1.2617 +		pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
  1.2618 +				np->get_tx_ctx->dma_len,
  1.2619 +				PCI_DMA_TODEVICE);
  1.2620 +		np->get_tx_ctx->dma = 0;
  1.2621 +
  1.2622  		if (np->desc_ver == DESC_VER_1) {
  1.2623  			if (Flags & NV_TX_LASTPACKET) {
  1.2624 -				skb = np->tx_skbuff[i];
  1.2625 -				if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
  1.2626 -					     NV_TX_UNDERFLOW|NV_TX_ERROR)) {
  1.2627 +				if (Flags & NV_TX_ERROR) {
  1.2628  					if (Flags & NV_TX_UNDERFLOW)
  1.2629  						np->stats.tx_fifo_errors++;
  1.2630  					if (Flags & NV_TX_CARRIERLOST)
  1.2631 @@ -1575,14 +2736,15 @@ static void nv_tx_done(struct net_device
  1.2632  					np->stats.tx_errors++;
  1.2633  				} else {
  1.2634  					np->stats.tx_packets++;
  1.2635 -					np->stats.tx_bytes += skb->len;
  1.2636 +					np->stats.tx_bytes += np->get_tx_ctx->skb->len;
  1.2637  				}
  1.2638 +				dev_kfree_skb_any(np->get_tx_ctx->skb);
  1.2639 +				np->get_tx_ctx->skb = NULL;
  1.2640 +
  1.2641  			}
  1.2642  		} else {
  1.2643  			if (Flags & NV_TX2_LASTPACKET) {
  1.2644 -				skb = np->tx_skbuff[i];
  1.2645 -				if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
  1.2646 -					     NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
  1.2647 +				if (Flags & NV_TX2_ERROR) {
  1.2648  					if (Flags & NV_TX2_UNDERFLOW)
  1.2649  						np->stats.tx_fifo_errors++;
  1.2650  					if (Flags & NV_TX2_CARRIERLOST)
  1.2651 @@ -1590,27 +2752,74 @@ static void nv_tx_done(struct net_device
  1.2652  					np->stats.tx_errors++;
  1.2653  				} else {
  1.2654  					np->stats.tx_packets++;
  1.2655 -					np->stats.tx_bytes += skb->len;
  1.2656 -				}
  1.2657 +					np->stats.tx_bytes += np->get_tx_ctx->skb->len;
  1.2658 +				}				
  1.2659 +				dev_kfree_skb_any(np->get_tx_ctx->skb);
  1.2660 +				np->get_tx_ctx->skb = NULL;
  1.2661  			}
  1.2662  		}
  1.2663 -		nv_release_txskb(dev, i);
  1.2664 -		np->nic_tx++;
  1.2665 -	}
  1.2666 -	if (np->next_tx - np->nic_tx < np->tx_limit_start)
  1.2667 +
  1.2668 +		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
  1.2669 +			np->get_tx.orig = np->first_tx.orig;
  1.2670 +		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  1.2671 +			np->get_tx_ctx = np->first_tx_ctx;
  1.2672 +	}
  1.2673 +	if (unlikely((np->stop_tx == 1) && (np->get_tx.orig != orig_get_tx))) {
  1.2674 +		np->stop_tx = 0;
  1.2675  		netif_wake_queue(dev);
  1.2676 +	}
  1.2677 +}
  1.2678 +
  1.2679 +static inline void nv_tx_done_optimized(struct net_device *dev, int max_work)
  1.2680 +{
  1.2681 +	struct fe_priv *np = get_nvpriv(dev);
  1.2682 +	u32 Flags;
  1.2683 +	struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
  1.2684 +	struct ring_desc_ex* put_tx = np->put_tx.ex;
  1.2685 +
  1.2686 +	while ((np->get_tx.ex != put_tx) &&
  1.2687 +			!((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) &&
  1.2688 +			(max_work-- > 0)) {
  1.2689 +		dprintk(KERN_DEBUG "%s: nv_tx_done_optimized:NVLAN tx done\n", dev->name);
  1.2690 +
  1.2691 +		pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
  1.2692 +				np->get_tx_ctx->dma_len,
  1.2693 +				PCI_DMA_TODEVICE);
  1.2694 +		np->get_tx_ctx->dma = 0;
  1.2695 +
  1.2696 +		if (Flags & NV_TX2_LASTPACKET) {
  1.2697 +			if (!(Flags & NV_TX2_ERROR)) {
  1.2698 +				np->stats.tx_packets++;
  1.2699 +			}
  1.2700 +			dev_kfree_skb_any(np->get_tx_ctx->skb);
  1.2701 +			np->get_tx_ctx->skb = NULL;
  1.2702 +		}
  1.2703 +
  1.2704 +		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
  1.2705 +			np->get_tx.ex = np->first_tx.ex;
  1.2706 +		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  1.2707 +			np->get_tx_ctx = np->first_tx_ctx;
  1.2708 +	}
  1.2709 +	if (unlikely((np->stop_tx == 1) && (np->get_tx.ex != orig_get_tx))) {
  1.2710 +		np->stop_tx = 0;
  1.2711 +		netif_wake_queue(dev);
  1.2712 +	}
  1.2713  }
  1.2714  
  1.2715  /*
  1.2716   * nv_tx_timeout: dev->tx_timeout function
  1.2717 - * Called with netif_tx_lock held.
  1.2718 + * Called with dev->xmit_lock held.
  1.2719 + *
  1.2720   */
  1.2721  static void nv_tx_timeout(struct net_device *dev)
  1.2722  {
  1.2723 -	struct fe_priv *np = netdev_priv(dev);
  1.2724 +	struct fe_priv *np = get_nvpriv(dev);
  1.2725  	u8 __iomem *base = get_hwbase(dev);
  1.2726  	u32 status;
  1.2727  
  1.2728 +	if (!netif_running(dev))
  1.2729 +		return;
  1.2730 +
  1.2731  	if (np->msi_flags & NV_MSI_X_ENABLED)
  1.2732  		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  1.2733  	else
  1.2734 @@ -1621,9 +2830,15 @@ static void nv_tx_timeout(struct net_dev
  1.2735  	{
  1.2736  		int i;
  1.2737  
  1.2738 -		printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
  1.2739 -				dev->name, (unsigned long)np->ring_addr,
  1.2740 -				np->next_tx, np->nic_tx);
  1.2741 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2742 +			printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
  1.2743 +					dev->name, (unsigned long)np->tx_ring.orig,
  1.2744 +					(unsigned long)np->get_tx.orig, (unsigned long)np->put_tx.orig);
  1.2745 +		} else {
  1.2746 +			printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
  1.2747 +					dev->name, (unsigned long)np->tx_ring.ex,
  1.2748 +					(unsigned long)np->get_tx.ex, (unsigned long)np->put_tx.ex);
  1.2749 +		}
  1.2750  		printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
  1.2751  		for (i=0;i<=np->register_size;i+= 32) {
  1.2752  			printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  1.2753 @@ -1637,54 +2852,64 @@ static void nv_tx_timeout(struct net_dev
  1.2754  		for (i=0;i<np->tx_ring_size;i+= 4) {
  1.2755  			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2756  				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  1.2757 -				       i,
  1.2758 -				       le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
  1.2759 -				       le32_to_cpu(np->tx_ring.orig[i].FlagLen),
  1.2760 -				       le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
  1.2761 -				       le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
  1.2762 -				       le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
  1.2763 -				       le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
  1.2764 -				       le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
  1.2765 -				       le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
  1.2766 +						i, 
  1.2767 +						le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
  1.2768 +						le32_to_cpu(np->tx_ring.orig[i].FlagLen),
  1.2769 +						le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
  1.2770 +						le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
  1.2771 +						le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
  1.2772 +						le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
  1.2773 +						le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
  1.2774 +						le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
  1.2775  			} else {
  1.2776  				printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  1.2777 -				       i,
  1.2778 -				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
  1.2779 -				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
  1.2780 -				       le32_to_cpu(np->tx_ring.ex[i].FlagLen),
  1.2781 -				       le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
  1.2782 -				       le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
  1.2783 -				       le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
  1.2784 -				       le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
  1.2785 -				       le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
  1.2786 -				       le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
  1.2787 -				       le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
  1.2788 -				       le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
  1.2789 -				       le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
  1.2790 +						i, 
  1.2791 +						le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
  1.2792 +						le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
  1.2793 +						le32_to_cpu(np->tx_ring.ex[i].FlagLen),
  1.2794 +						le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
  1.2795 +						le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
  1.2796 +						le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
  1.2797 +						le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
  1.2798 +						le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
  1.2799 +						le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
  1.2800 +						le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
  1.2801 +						le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
  1.2802 +						le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
  1.2803  			}
  1.2804  		}
  1.2805  	}
  1.2806  
  1.2807 +	nv_disable_irq(dev);
  1.2808  	spin_lock_irq(&np->lock);
  1.2809  
  1.2810  	/* 1) stop tx engine */
  1.2811  	nv_stop_tx(dev);
  1.2812  
  1.2813  	/* 2) check that the packets were not sent already: */
  1.2814 -	nv_tx_done(dev);
  1.2815 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2816 +		nv_tx_done(dev);
  1.2817 +	else
  1.2818 +		nv_tx_done_optimized(dev, np->tx_ring_size);
  1.2819  
  1.2820  	/* 3) if there are dead entries: clear everything */
  1.2821 -	if (np->next_tx != np->nic_tx) {
  1.2822 +	if (np->get_tx_ctx != np->put_tx_ctx) {
  1.2823  		printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
  1.2824  		nv_drain_tx(dev);
  1.2825 -		np->next_tx = np->nic_tx = 0;
  1.2826 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.2827 +			np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
  1.2828 +		else
  1.2829 +			np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
  1.2830 +		np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
  1.2831  		setup_hw_rings(dev, NV_SETUP_TX_RING);
  1.2832 -		netif_wake_queue(dev);
  1.2833 -	}
  1.2834 -
  1.2835 +	}
  1.2836 +
  1.2837 +	netif_wake_queue(dev);
  1.2838  	/* 4) restart tx engine */
  1.2839  	nv_start_tx(dev);
  1.2840 +
  1.2841  	spin_unlock_irq(&np->lock);
  1.2842 +	nv_enable_irq(dev);
  1.2843  }
  1.2844  
  1.2845  /*
  1.2846 @@ -1705,7 +2930,7 @@ static int nv_getlen(struct net_device *
  1.2847  		hdrlen = ETH_HLEN;
  1.2848  	}
  1.2849  	dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
  1.2850 -				dev->name, datalen, protolen, hdrlen);
  1.2851 +			dev->name, datalen, protolen, hdrlen);
  1.2852  	if (protolen > ETH_DATA_LEN)
  1.2853  		return datalen; /* Value in proto field not a len, no checks possible */
  1.2854  
  1.2855 @@ -1740,162 +2965,210 @@ static int nv_getlen(struct net_device *
  1.2856  	}
  1.2857  }
  1.2858  
  1.2859 -static void nv_rx_process(struct net_device *dev)
  1.2860 +static inline void nv_rx_process(struct net_device *dev)
  1.2861  {
  1.2862 -	struct fe_priv *np = netdev_priv(dev);
  1.2863 +	struct fe_priv *np = get_nvpriv(dev);
  1.2864  	u32 Flags;
  1.2865 -	u32 vlanflags = 0;
  1.2866 -
  1.2867 -	for (;;) {
  1.2868 -		struct sk_buff *skb;
  1.2869 -		int len;
  1.2870 -		int i;
  1.2871 -		if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
  1.2872 -			break;	/* we scanned the whole ring - do not continue */
  1.2873 -
  1.2874 -		i = np->cur_rx % np->rx_ring_size;
  1.2875 -		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.2876 -			Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
  1.2877 -			len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
  1.2878 -		} else {
  1.2879 -			Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
  1.2880 -			len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
  1.2881 -			vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
  1.2882 -		}
  1.2883 -
  1.2884 -		dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
  1.2885 -					dev->name, np->cur_rx, Flags);
  1.2886 -
  1.2887 -		if (Flags & NV_RX_AVAIL)
  1.2888 -			break;	/* still owned by hardware, */
  1.2889 -
  1.2890 -		/*
  1.2891 -		 * the packet is for us - immediately tear down the pci mapping.
  1.2892 -		 * TODO: check if a prefetch of the first cacheline improves
  1.2893 -		 * the performance.
  1.2894 -		 */
  1.2895 -		pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1.2896 -				np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1.2897 +	struct sk_buff *skb;
  1.2898 +	int len;
  1.2899 +
  1.2900 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.2901 +	while((np->get_rx.orig != np->put_rx.orig) &&
  1.2902 +			!((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) {
  1.2903 +
  1.2904 +		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  1.2905 +				np->get_rx_ctx->dma_len,
  1.2906  				PCI_DMA_FROMDEVICE);
  1.2907  
  1.2908 +		skb = np->get_rx_ctx->skb;
  1.2909 +		np->get_rx_ctx->skb = NULL;
  1.2910 +
  1.2911  		{
  1.2912  			int j;
  1.2913  			dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
  1.2914  			for (j=0; j<64; j++) {
  1.2915  				if ((j%16) == 0)
  1.2916  					dprintk("\n%03x:", j);
  1.2917 -				dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
  1.2918 +				dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  1.2919  			}
  1.2920  			dprintk("\n");
  1.2921  		}
  1.2922 -		/* look at what we actually got: */
  1.2923 +
  1.2924  		if (np->desc_ver == DESC_VER_1) {
  1.2925 -			if (!(Flags & NV_RX_DESCRIPTORVALID))
  1.2926 -				goto next_pkt;
  1.2927 -
  1.2928 -			if (Flags & NV_RX_ERROR) {
  1.2929 -				if (Flags & NV_RX_MISSEDFRAME) {
  1.2930 -					np->stats.rx_missed_errors++;
  1.2931 -					np->stats.rx_errors++;
  1.2932 -					goto next_pkt;
  1.2933 -				}
  1.2934 -				if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
  1.2935 -					np->stats.rx_errors++;
  1.2936 -					goto next_pkt;
  1.2937 -				}
  1.2938 -				if (Flags & NV_RX_CRCERR) {
  1.2939 -					np->stats.rx_crc_errors++;
  1.2940 -					np->stats.rx_errors++;
  1.2941 -					goto next_pkt;
  1.2942 -				}
  1.2943 -				if (Flags & NV_RX_OVERFLOW) {
  1.2944 -					np->stats.rx_over_errors++;
  1.2945 -					np->stats.rx_errors++;
  1.2946 -					goto next_pkt;
  1.2947 -				}
  1.2948 -				if (Flags & NV_RX_ERROR4) {
  1.2949 -					len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1.2950 -					if (len < 0) {
  1.2951 +
  1.2952 +			if (likely(Flags & NV_RX_DESCRIPTORVALID)) {
  1.2953 +				len = Flags & LEN_MASK_V1;
  1.2954 +				if (unlikely(Flags & NV_RX_ERROR)) {
  1.2955 +					if (Flags & NV_RX_ERROR4) {
  1.2956 +						len = nv_getlen(dev, skb->data, len);
  1.2957 +						if (len < 0 || len > np->rx_buf_sz) {
  1.2958 +							np->stats.rx_errors++;
  1.2959 +							dev_kfree_skb(skb);
  1.2960 +							goto next_pkt;
  1.2961 +						}
  1.2962 +					}
  1.2963 +					/* framing errors are soft errors */
  1.2964 +					else if (Flags & NV_RX_FRAMINGERR) {
  1.2965 +						if (Flags & NV_RX_SUBSTRACT1) {
  1.2966 +							len--;
  1.2967 +						}
  1.2968 +					}
  1.2969 +					/* the rest are hard errors */
  1.2970 +					else {
  1.2971 +						if (Flags & NV_RX_MISSEDFRAME)
  1.2972 +							np->stats.rx_missed_errors++;
  1.2973 +						if (Flags & NV_RX_CRCERR)
  1.2974 +							np->stats.rx_crc_errors++;
  1.2975 +						if (Flags & NV_RX_OVERFLOW)
  1.2976 +							np->stats.rx_over_errors++;
  1.2977  						np->stats.rx_errors++;
  1.2978 +						dev_kfree_skb(skb);
  1.2979  						goto next_pkt;
  1.2980  					}
  1.2981  				}
  1.2982 -				/* framing errors are soft errors. */
  1.2983 -				if (Flags & NV_RX_FRAMINGERR) {
  1.2984 -					if (Flags & NV_RX_SUBSTRACT1) {
  1.2985 -						len--;
  1.2986 -					}
  1.2987 -				}
  1.2988 +			} else {
  1.2989 +				dev_kfree_skb(skb);
  1.2990 +				goto next_pkt;
  1.2991  			}
  1.2992  		} else {
  1.2993 -			if (!(Flags & NV_RX2_DESCRIPTORVALID))
  1.2994 -				goto next_pkt;
  1.2995 -
  1.2996 -			if (Flags & NV_RX2_ERROR) {
  1.2997 -				if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
  1.2998 -					np->stats.rx_errors++;
  1.2999 -					goto next_pkt;
  1.3000 +			if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
  1.3001 +				len = Flags & LEN_MASK_V2;
  1.3002 +				if (unlikely(Flags & NV_RX2_ERROR)) {
  1.3003 +					if (Flags & NV_RX2_ERROR4) {
  1.3004 +						len = nv_getlen(dev, skb->data, len);
  1.3005 +						if (len < 0 || len > np->rx_buf_sz) {
  1.3006 +							np->stats.rx_errors++;
  1.3007 +							dev_kfree_skb(skb);
  1.3008 +							goto next_pkt;
  1.3009 +						}
  1.3010 +					}
  1.3011 +					/* framing errors are soft errors */
  1.3012 +					else if (Flags & NV_RX2_FRAMINGERR) {
  1.3013 +						if (Flags & NV_RX2_SUBSTRACT1) {
  1.3014 +							len--;
  1.3015 +						}
  1.3016 +					}
  1.3017 +					/* the rest are hard errors */
  1.3018 +					else {
  1.3019 +						if (Flags & NV_RX2_CRCERR)
  1.3020 +							np->stats.rx_crc_errors++;
  1.3021 +						if (Flags & NV_RX2_OVERFLOW)
  1.3022 +							np->stats.rx_over_errors++;
  1.3023 +						np->stats.rx_errors++;
  1.3024 +						dev_kfree_skb(skb);
  1.3025 +						goto next_pkt;
  1.3026 +					}
  1.3027  				}
  1.3028 -				if (Flags & NV_RX2_CRCERR) {
  1.3029 -					np->stats.rx_crc_errors++;
  1.3030 -					np->stats.rx_errors++;
  1.3031 -					goto next_pkt;
  1.3032 -				}
  1.3033 -				if (Flags & NV_RX2_OVERFLOW) {
  1.3034 -					np->stats.rx_over_errors++;
  1.3035 -					np->stats.rx_errors++;
  1.3036 -					goto next_pkt;
  1.3037 -				}
  1.3038 +				if (((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) 
  1.3039 +					/*ip and tcp or udp */
  1.3040 +					skb->ip_summed = CHECKSUM_UNNECESSARY;
  1.3041 +			} else {
  1.3042 +				dev_kfree_skb(skb);
  1.3043 +				goto next_pkt;
  1.3044 +			}
  1.3045 +		}
  1.3046 +
  1.3047 +		/* got a valid packet - forward it to the network core */
  1.3048 +		dprintk(KERN_DEBUG "%s: nv_rx_process:NVLAN rx done\n", dev->name);
  1.3049 +		skb_put(skb, len);
  1.3050 +		skb->protocol = eth_type_trans(skb, dev);
  1.3051 +		netif_rx(skb);
  1.3052 +		dev->last_rx = jiffies;
  1.3053 +		np->stats.rx_packets++;
  1.3054 +		np->stats.rx_bytes += len;
  1.3055 +next_pkt:
  1.3056 +		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
  1.3057 +			np->get_rx.orig = np->first_rx.orig;
  1.3058 +		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  1.3059 +			np->get_rx_ctx = np->first_rx_ctx;
  1.3060 +	}
  1.3061 +}
  1.3062 +
  1.3063 +static inline int nv_rx_process_optimized(struct net_device *dev, int max_work)
  1.3064 +{
  1.3065 +	struct fe_priv *np = get_nvpriv(dev);
  1.3066 +	u32 Flags;
  1.3067 +	u32 vlanflags = 0;
  1.3068 +	u32 rx_processed_cnt = 0;
  1.3069 +	struct sk_buff *skb;
  1.3070 +	int len;
  1.3071 +
  1.3072 +	while((np->get_rx.ex != np->put_rx.ex) &&
  1.3073 +			!((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) &&
  1.3074 +			(rx_processed_cnt++ < max_work)) {
  1.3075 +
  1.3076 +		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  1.3077 +				np->get_rx_ctx->dma_len,
  1.3078 +				PCI_DMA_FROMDEVICE);
  1.3079 +
  1.3080 +		skb = np->get_rx_ctx->skb;
  1.3081 +		np->get_rx_ctx->skb = NULL;
  1.3082 +
  1.3083 +		/* look at what we actually got: */
  1.3084 +		if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
  1.3085 +			len = Flags & LEN_MASK_V2;
  1.3086 +			if (unlikely(Flags & NV_RX2_ERROR)) {
  1.3087  				if (Flags & NV_RX2_ERROR4) {
  1.3088 -					len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1.3089 -					if (len < 0) {
  1.3090 -						np->stats.rx_errors++;
  1.3091 +					len = nv_getlen(dev, skb->data, len);
  1.3092 +					if (len < 0 || len > np->rx_buf_sz) {
  1.3093 +						np->rx_len_errors++;
  1.3094 +						dev_kfree_skb(skb);
  1.3095  						goto next_pkt;
  1.3096  					}
  1.3097  				}
  1.3098  				/* framing errors are soft errors */
  1.3099 -				if (Flags & NV_RX2_FRAMINGERR) {
  1.3100 +				else if (Flags & NV_RX2_FRAMINGERR) {
  1.3101  					if (Flags & NV_RX2_SUBSTRACT1) {
  1.3102  						len--;
  1.3103  					}
  1.3104  				}
  1.3105 -			}
  1.3106 -			if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
  1.3107 -				Flags &= NV_RX2_CHECKSUMMASK;
  1.3108 -				if (Flags == NV_RX2_CHECKSUMOK1 ||
  1.3109 -				    Flags == NV_RX2_CHECKSUMOK2 ||
  1.3110 -				    Flags == NV_RX2_CHECKSUMOK3) {
  1.3111 -					dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
  1.3112 -					np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
  1.3113 -				} else {
  1.3114 -					dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
  1.3115 +				/* the rest are hard errors */
  1.3116 +				else {
  1.3117 +					dev_kfree_skb(skb);
  1.3118 +					goto next_pkt;
  1.3119  				}
  1.3120  			}
  1.3121 +
  1.3122 +			if (likely(np->rx_csum)) {
  1.3123 +				if (likely(((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))) 
  1.3124 +					/*ip and tcp or udp */
  1.3125 +					skb->ip_summed = CHECKSUM_UNNECESSARY;
  1.3126 +			}
  1.3127 +			dprintk(KERN_DEBUG "%s: nv_rx_process_optimized:NVLAN rx done\n", dev->name);
  1.3128 +
  1.3129 +			/* got a valid packet - forward it to the network core */
  1.3130 +			skb_put(skb, len);
  1.3131 +			skb->protocol = eth_type_trans(skb, dev);
  1.3132 +			prefetch(skb->data);
  1.3133 +
  1.3134 +			if (likely(!np->vlangrp)) {
  1.3135 +				netif_rx(skb);
  1.3136 +			} else {
  1.3137 +				vlanflags = le32_to_cpu(np->get_rx.ex->PacketBufferLow);
  1.3138 +				if (vlanflags & NV_RX3_VLAN_TAG_PRESENT)
  1.3139 +					vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
  1.3140 +				else
  1.3141 +					netif_rx(skb);
  1.3142 +			}
  1.3143 +
  1.3144 +			dev->last_rx = jiffies;
  1.3145 +			np->stats.rx_packets++;
  1.3146 +			np->stats.rx_bytes += len;
  1.3147 +		} else {
  1.3148 +			dev_kfree_skb(skb);
  1.3149  		}
  1.3150 -		/* got a valid packet - forward it to the network core */
  1.3151 -		skb = np->rx_skbuff[i];
  1.3152 -		np->rx_skbuff[i] = NULL;
  1.3153 -
  1.3154 -		skb_put(skb, len);
  1.3155 -		skb->protocol = eth_type_trans(skb, dev);
  1.3156 -		dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
  1.3157 -					dev->name, np->cur_rx, len, skb->protocol);
  1.3158 -		if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
  1.3159 -			vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
  1.3160 -		} else {
  1.3161 -			netif_rx(skb);
  1.3162 -		}
  1.3163 -		dev->last_rx = jiffies;
  1.3164 -		np->stats.rx_packets++;
  1.3165 -		np->stats.rx_bytes += len;
  1.3166  next_pkt:
  1.3167 -		np->cur_rx++;
  1.3168 -	}
  1.3169 +		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
  1.3170 +			np->get_rx.ex = np->first_rx.ex;
  1.3171 +		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  1.3172 +			np->get_rx_ctx = np->first_rx_ctx;
  1.3173 +	}
  1.3174 +	return rx_processed_cnt;
  1.3175  }
  1.3176  
  1.3177  static void set_bufsize(struct net_device *dev)
  1.3178  {
  1.3179 -	struct fe_priv *np = netdev_priv(dev);
  1.3180 +	struct fe_priv *np = get_nvpriv(dev);
  1.3181  
  1.3182  	if (dev->mtu <= ETH_DATA_LEN)
  1.3183  		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  1.3184 @@ -1909,7 +3182,7 @@ static void set_bufsize(struct net_devic
  1.3185   */
  1.3186  static int nv_change_mtu(struct net_device *dev, int new_mtu)
  1.3187  {
  1.3188 -	struct fe_priv *np = netdev_priv(dev);
  1.3189 +	struct fe_priv *np = get_nvpriv(dev);
  1.3190  	int old_mtu;
  1.3191  
  1.3192  	if (new_mtu < 64 || new_mtu > np->pkt_limit)
  1.3193 @@ -1933,8 +3206,13 @@ static int nv_change_mtu(struct net_devi
  1.3194  		 * guessed, there is probably a simpler approach.
  1.3195  		 * Changing the MTU is a rare event, it shouldn't matter.
  1.3196  		 */
  1.3197 +		nv_disable_hw_interrupts(dev,np->irqmask);
  1.3198  		nv_disable_irq(dev);
  1.3199 +#if NVVER > FEDORA5
  1.3200  		netif_tx_lock_bh(dev);
  1.3201 +#else
  1.3202 +		spin_lock_bh(&dev->xmit_lock);
  1.3203 +#endif
  1.3204  		spin_lock(&np->lock);
  1.3205  		/* stop engines */
  1.3206  		nv_stop_rx(dev);
  1.3207 @@ -1953,7 +3231,7 @@ static int nv_change_mtu(struct net_devi
  1.3208  		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1.3209  		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.3210  		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.3211 -			base + NvRegRingSizes);
  1.3212 +				base + NvRegRingSizes);
  1.3213  		pci_push(base);
  1.3214  		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.3215  		pci_push(base);
  1.3216 @@ -1962,8 +3240,13 @@ static int nv_change_mtu(struct net_devi
  1.3217  		nv_start_rx(dev);
  1.3218  		nv_start_tx(dev);
  1.3219  		spin_unlock(&np->lock);
  1.3220 +#if NVVER > FEDORA5
  1.3221  		netif_tx_unlock_bh(dev);
  1.3222 +#else
  1.3223 +		spin_unlock_bh(&dev->xmit_lock);
  1.3224 +#endif
  1.3225  		nv_enable_irq(dev);
  1.3226 +		nv_enable_hw_interrupts(dev,np->irqmask);
  1.3227  	}
  1.3228  	return 0;
  1.3229  }
  1.3230 @@ -1974,11 +3257,11 @@ static void nv_copy_mac_to_hw(struct net
  1.3231  	u32 mac[2];
  1.3232  
  1.3233  	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  1.3234 -			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  1.3235 +		(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  1.3236  	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  1.3237 -
  1.3238  	writel(mac[0], base + NvRegMacAddrA);
  1.3239  	writel(mac[1], base + NvRegMacAddrB);
  1.3240 +
  1.3241  }
  1.3242  
  1.3243  /*
  1.3244 @@ -1987,17 +3270,22 @@ static void nv_copy_mac_to_hw(struct net
  1.3245   */
  1.3246  static int nv_set_mac_address(struct net_device *dev, void *addr)
  1.3247  {
  1.3248 -	struct fe_priv *np = netdev_priv(dev);
  1.3249 +	struct fe_priv *np = get_nvpriv(dev);
  1.3250  	struct sockaddr *macaddr = (struct sockaddr*)addr;
  1.3251  
  1.3252  	if(!is_valid_ether_addr(macaddr->sa_data))
  1.3253  		return -EADDRNOTAVAIL;
  1.3254  
  1.3255 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.3256  	/* synchronized against open : rtnl_lock() held by caller */
  1.3257  	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  1.3258  
  1.3259  	if (netif_running(dev)) {
  1.3260 +#if NVVER > FEDORA5
  1.3261  		netif_tx_lock_bh(dev);
  1.3262 +#else
  1.3263 +		spin_lock_bh(&dev->xmit_lock);
  1.3264 +#endif
  1.3265  		spin_lock_irq(&np->lock);
  1.3266  
  1.3267  		/* stop rx engine */
  1.3268 @@ -2009,7 +3297,11 @@ static int nv_set_mac_address(struct net
  1.3269  		/* restart rx engine */
  1.3270  		nv_start_rx(dev);
  1.3271  		spin_unlock_irq(&np->lock);
  1.3272 +#if NVVER > FEDORA5
  1.3273  		netif_tx_unlock_bh(dev);
  1.3274 +#else
  1.3275 +		spin_unlock_bh(&dev->xmit_lock);
  1.3276 +#endif
  1.3277  	} else {
  1.3278  		nv_copy_mac_to_hw(dev);
  1.3279  	}
  1.3280 @@ -2018,11 +3310,11 @@ static int nv_set_mac_address(struct net
  1.3281  
  1.3282  /*
  1.3283   * nv_set_multicast: dev->set_multicast function
  1.3284 - * Called with netif_tx_lock held.
  1.3285 + * Called with dev->xmit_lock held.
  1.3286   */
  1.3287  static void nv_set_multicast(struct net_device *dev)
  1.3288  {
  1.3289 -	struct fe_priv *np = netdev_priv(dev);
  1.3290 +	struct fe_priv *np = get_nvpriv(dev);
  1.3291  	u8 __iomem *base = get_hwbase(dev);
  1.3292  	u32 addr[2];
  1.3293  	u32 mask[2];
  1.3294 @@ -2032,7 +3324,7 @@ static void nv_set_multicast(struct net_
  1.3295  	memset(mask, 0, sizeof(mask));
  1.3296  
  1.3297  	if (dev->flags & IFF_PROMISC) {
  1.3298 -		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
  1.3299 +		dprintk(KERN_DEBUG "%s: Promiscuous mode enabled.\n", dev->name);
  1.3300  		pff |= NVREG_PFF_PROMISC;
  1.3301  	} else {
  1.3302  		pff |= NVREG_PFF_MYADDR;
  1.3303 @@ -2063,6 +3355,9 @@ static void nv_set_multicast(struct net_
  1.3304  			addr[1] = alwaysOn[1];
  1.3305  			mask[0] = alwaysOn[0] | alwaysOff[0];
  1.3306  			mask[1] = alwaysOn[1] | alwaysOff[1];
  1.3307 +		} else {
  1.3308 +			mask[0] = NVREG_MCASTMASKA_NONE;
  1.3309 +			mask[1] = NVREG_MCASTMASKB_NONE;
  1.3310  		}
  1.3311  	}
  1.3312  	addr[0] |= NVREG_MCASTADDRA_FORCE;
  1.3313 @@ -2075,15 +3370,16 @@ static void nv_set_multicast(struct net_
  1.3314  	writel(mask[1], base + NvRegMulticastMaskB);
  1.3315  	writel(pff, base + NvRegPacketFilterFlags);
  1.3316  	dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
  1.3317 -		dev->name);
  1.3318 +			dev->name);
  1.3319  	nv_start_rx(dev);
  1.3320  	spin_unlock_irq(&np->lock);
  1.3321  }
  1.3322  
  1.3323  static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  1.3324  {
  1.3325 -	struct fe_priv *np = netdev_priv(dev);
  1.3326 +	struct fe_priv *np = get_nvpriv(dev);
  1.3327  	u8 __iomem *base = get_hwbase(dev);
  1.3328 +	u32 pause_enable;
  1.3329  
  1.3330  	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  1.3331  
  1.3332 @@ -2099,12 +3395,17 @@ static void nv_update_pause(struct net_d
  1.3333  	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  1.3334  		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  1.3335  		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  1.3336 -			writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
  1.3337 +			pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
  1.3338 +			if(np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
  1.3339 +				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
  1.3340 +			if(np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
  1.3341 +				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
  1.3342 +			writel(pause_enable ,  base + NvRegTxPauseFrame);
  1.3343  			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  1.3344  			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.3345  		} else {
  1.3346  			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
  1.3347 -			writel(regmisc, base + NvRegMisc1);
  1.3348 +			writel(regmisc, base + NvRegMisc1);			
  1.3349  		}
  1.3350  	}
  1.3351  }
  1.3352 @@ -2122,7 +3423,7 @@ static void nv_update_pause(struct net_d
  1.3353   */
  1.3354  static int nv_update_linkspeed(struct net_device *dev)
  1.3355  {
  1.3356 -	struct fe_priv *np = netdev_priv(dev);
  1.3357 +	struct fe_priv *np = get_nvpriv(dev);
  1.3358  	u8 __iomem *base = get_hwbase(dev);
  1.3359  	int adv = 0;
  1.3360  	int lpa = 0;
  1.3361 @@ -2132,6 +3433,7 @@ static int nv_update_linkspeed(struct ne
  1.3362  	int mii_status;
  1.3363  	int retval = 0;
  1.3364  	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
  1.3365 +	u32 txrxFlags = 0 ;
  1.3366  
  1.3367  	/* BMSR_LSTATUS is latched, read it twice:
  1.3368  	 * we want the current value.
  1.3369 @@ -2148,7 +3450,7 @@ static int nv_update_linkspeed(struct ne
  1.3370  		goto set_speed;
  1.3371  	}
  1.3372  
  1.3373 -	if (np->autoneg == 0) {
  1.3374 +	if (np->autoneg == AUTONEG_DISABLE) {
  1.3375  		dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
  1.3376  				dev->name, np->fixed_mode);
  1.3377  		if (np->fixed_mode & LPA_100FULL) {
  1.3378 @@ -2180,17 +3482,16 @@ static int nv_update_linkspeed(struct ne
  1.3379  	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.3380  	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  1.3381  	dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
  1.3382 -				dev->name, adv, lpa);
  1.3383 -
  1.3384 +			dev->name, adv, lpa);
  1.3385  	retval = 1;
  1.3386  	if (np->gigabit == PHY_GIGABIT) {
  1.3387  		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1.3388  		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  1.3389  
  1.3390  		if ((control_1000 & ADVERTISE_1000FULL) &&
  1.3391 -			(status_1000 & LPA_1000FULL)) {
  1.3392 +				(status_1000 & LPA_1000FULL)) {
  1.3393  			dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
  1.3394 -				dev->name);
  1.3395 +					dev->name);
  1.3396  			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  1.3397  			newdup = 1;
  1.3398  			goto set_speed;
  1.3399 @@ -2227,6 +3528,17 @@ set_speed:
  1.3400  	np->duplex = newdup;
  1.3401  	np->linkspeed = newls;
  1.3402  
  1.3403 +	/* The transmitter and receiver must be restarted for safe update */
  1.3404 +	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
  1.3405 +		txrxFlags |= NV_RESTART_TX;
  1.3406 +		nv_stop_tx(dev);
  1.3407 +	}
  1.3408 +	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  1.3409 +		txrxFlags |= NV_RESTART_RX;
  1.3410 +		nv_stop_rx(dev);
  1.3411 +	}
  1.3412 +
  1.3413 +	
  1.3414  	if (np->gigabit == PHY_GIGABIT) {
  1.3415  		phyreg = readl(base + NvRegRandomSeed);
  1.3416  		phyreg &= ~(0x3FF00);
  1.3417 @@ -2268,9 +3580,8 @@ set_speed:
  1.3418  			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
  1.3419  	}
  1.3420  	writel(txreg, base + NvRegTxWatermark);
  1.3421 -
  1.3422  	writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
  1.3423 -		base + NvRegMisc1);
  1.3424 +			base + NvRegMisc1);
  1.3425  	pci_push(base);
  1.3426  	writel(np->linkspeed, base + NvRegLinkSpeed);
  1.3427  	pci_push(base);
  1.3428 @@ -2283,37 +3594,42 @@ set_speed:
  1.3429  			lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
  1.3430  
  1.3431  			switch (adv_pause) {
  1.3432 -			case (ADVERTISE_PAUSE_CAP):
  1.3433 -				if (lpa_pause & LPA_PAUSE_CAP) {
  1.3434 -					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1.3435 -					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.3436 -						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.3437 -				}
  1.3438 -				break;
  1.3439 -			case (ADVERTISE_PAUSE_ASYM):
  1.3440 -				if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  1.3441 +				case (ADVERTISE_PAUSE_CAP):
  1.3442 +					if (lpa_pause & LPA_PAUSE_CAP) {
  1.3443 +						pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1.3444 +						if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.3445 +							pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.3446 +					}
  1.3447 +					break;
  1.3448 +				case (ADVERTISE_PAUSE_ASYM):
  1.3449 +					if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  1.3450  				{
  1.3451  					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.3452  				}
  1.3453 -				break;
  1.3454 -			case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
  1.3455 -				if (lpa_pause & LPA_PAUSE_CAP)
  1.3456 +					break;
  1.3457 +				case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
  1.3458 +					if (lpa_pause & LPA_PAUSE_CAP)
  1.3459  				{
  1.3460  					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
  1.3461  					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.3462  						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1.3463  				}
  1.3464 -				if (lpa_pause == LPA_PAUSE_ASYM)
  1.3465 -				{
  1.3466 -					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1.3467 -				}
  1.3468 -				break;
  1.3469 +					if (lpa_pause == LPA_PAUSE_ASYM)
  1.3470 +					{
  1.3471 +						pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1.3472 +					} 
  1.3473 +					break;
  1.3474  			}
  1.3475  		} else {
  1.3476  			pause_flags = np->pause_flags;
  1.3477  		}
  1.3478  	}
  1.3479  	nv_update_pause(dev, pause_flags);
  1.3480 +	
  1.3481 +	if (txrxFlags & NV_RESTART_TX)
  1.3482 +		nv_start_tx(dev);
  1.3483 +	if (txrxFlags & NV_RESTART_RX)
  1.3484 +		nv_start_rx(dev);
  1.3485  
  1.3486  	return retval;
  1.3487  }
  1.3488 @@ -2341,7 +3657,7 @@ static void nv_link_irq(struct net_devic
  1.3489  	u32 miistat;
  1.3490  
  1.3491  	miistat = readl(base + NvRegMIIStatus);
  1.3492 -	writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  1.3493 +	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
  1.3494  	dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
  1.3495  
  1.3496  	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  1.3497 @@ -2349,15 +3665,19 @@ static void nv_link_irq(struct net_devic
  1.3498  	dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
  1.3499  }
  1.3500  
  1.3501 +#if NVVER < FEDORA7
  1.3502  static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
  1.3503 +#else
  1.3504 +static irqreturn_t nv_nic_irq(int foo, void *data)
  1.3505 +#endif
  1.3506  {
  1.3507  	struct net_device *dev = (struct net_device *) data;
  1.3508 -	struct fe_priv *np = netdev_priv(dev);
  1.3509 +	struct fe_priv *np = get_nvpriv(dev);
  1.3510  	u8 __iomem *base = get_hwbase(dev);
  1.3511 -	u32 events;
  1.3512 +	u32 events,mask;
  1.3513  	int i;
  1.3514  
  1.3515 -	dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
  1.3516 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.3517  
  1.3518  	for (i=0; ; i++) {
  1.3519  		if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  1.3520 @@ -2369,7 +3689,8 @@ static irqreturn_t nv_nic_irq(int foo, v
  1.3521  		}
  1.3522  		pci_push(base);
  1.3523  		dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  1.3524 -		if (!(events & np->irqmask))
  1.3525 +		mask = readl(base + NvRegIrqMask);
  1.3526 +		if (!(events & mask))
  1.3527  			break;
  1.3528  
  1.3529  		spin_lock(&np->lock);
  1.3530 @@ -2397,11 +3718,11 @@ static irqreturn_t nv_nic_irq(int foo, v
  1.3531  		}
  1.3532  		if (events & (NVREG_IRQ_TX_ERR)) {
  1.3533  			dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  1.3534 -						dev->name, events);
  1.3535 +					dev->name, events);
  1.3536  		}
  1.3537  		if (events & (NVREG_IRQ_UNKNOWN)) {
  1.3538  			printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  1.3539 -						dev->name, events);
  1.3540 +					dev->name, events);
  1.3541  		}
  1.3542  		if (i > max_interrupt_work) {
  1.3543  			spin_lock(&np->lock);
  1.3544 @@ -2427,34 +3748,112 @@ static irqreturn_t nv_nic_irq(int foo, v
  1.3545  	return IRQ_RETVAL(i);
  1.3546  }
  1.3547  
  1.3548 -static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
  1.3549 +#define TX_WORK_PER_LOOP  64
  1.3550 +#define RX_WORK_PER_LOOP  64
  1.3551 +#if NVVER < FEDORA7
  1.3552 +static irqreturn_t nv_nic_irq_optimized(int foo, void *data, struct pt_regs *regs)
  1.3553 +#else
  1.3554 +static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
  1.3555 +#endif
  1.3556  {
  1.3557  	struct net_device *dev = (struct net_device *) data;
  1.3558 -	struct fe_priv *np = netdev_priv(dev);
  1.3559 +	struct fe_priv *np = get_nvpriv(dev);
  1.3560 +	u8 __iomem *base = get_hwbase(dev);
  1.3561 +	u32 events,mask;
  1.3562 +	int i = 1;
  1.3563 +
  1.3564 +	do {
  1.3565 +		if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  1.3566 +			events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  1.3567 +			writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  1.3568 +		} else {
  1.3569 +			events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  1.3570 +			writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  1.3571 +		}
  1.3572 +
  1.3573 +		mask = readl(base + NvRegIrqMask);
  1.3574 +		if (events & mask) {
  1.3575 +
  1.3576 +			spin_lock(&np->lock);
  1.3577 +			nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  1.3578 +			spin_unlock(&np->lock);
  1.3579 +
  1.3580 +			if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
  1.3581 +				if (unlikely(nv_alloc_rx_optimized(dev))) {
  1.3582 +					spin_lock(&np->lock);
  1.3583 +					if (!np->in_shutdown)
  1.3584 +						mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.3585 +					spin_unlock(&np->lock);
  1.3586 +				}
  1.3587 +			}
  1.3588 +			if (unlikely(events & NVREG_IRQ_LINK)) {
  1.3589 +				spin_lock(&np->lock);
  1.3590 +				nv_link_irq(dev);
  1.3591 +				spin_unlock(&np->lock);
  1.3592 +			}
  1.3593 +			if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  1.3594 +				spin_lock(&np->lock);
  1.3595 +				nv_linkchange(dev);
  1.3596 +				spin_unlock(&np->lock);
  1.3597 +				np->link_timeout = jiffies + LINK_TIMEOUT;
  1.3598 +			}
  1.3599 +			if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
  1.3600 +				spin_lock(&np->lock);
  1.3601 +				/* disable interrupts on the nic */
  1.3602 +				if (!(np->msi_flags & NV_MSI_X_ENABLED))
  1.3603 +					writel(0, base + NvRegIrqMask);
  1.3604 +				else
  1.3605 +					writel(np->irqmask, base + NvRegIrqMask);
  1.3606 +				pci_push(base);
  1.3607 +
  1.3608 +				if (!np->in_shutdown) {
  1.3609 +					np->nic_poll_irq = np->irqmask;
  1.3610 +					np->recover_error = 1;
  1.3611 +					mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  1.3612 +				}
  1.3613 +				spin_unlock(&np->lock);
  1.3614 +				break;
  1.3615 +			}
  1.3616 +		} else
  1.3617 +			break;
  1.3618 +	}
  1.3619 +	while (i++ <= max_interrupt_work);
  1.3620 +
  1.3621 +	return IRQ_RETVAL(i);
  1.3622 +}
  1.3623 +
  1.3624 +#if NVVER < FEDORA7
  1.3625 +static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
  1.3626 +#else
  1.3627 +static irqreturn_t nv_nic_irq_tx(int foo, void *data)
  1.3628 +#endif
  1.3629 +{
  1.3630 +	struct net_device *dev = (struct net_device *) data;
  1.3631 +	struct fe_priv *np = get_nvpriv(dev);
  1.3632  	u8 __iomem *base = get_hwbase(dev);
  1.3633  	u32 events;
  1.3634  	int i;
  1.3635 -
  1.3636 -	dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
  1.3637 +	unsigned long flags;
  1.3638 +
  1.3639 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.3640  
  1.3641  	for (i=0; ; i++) {
  1.3642  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  1.3643  		writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  1.3644 -		pci_push(base);
  1.3645  		dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
  1.3646  		if (!(events & np->irqmask))
  1.3647  			break;
  1.3648  
  1.3649 -		spin_lock_irq(&np->lock);
  1.3650 -		nv_tx_done(dev);
  1.3651 -		spin_unlock_irq(&np->lock);
  1.3652 +		spin_lock_irqsave(&np->lock, flags);
  1.3653 +		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  1.3654 +		spin_unlock_irqrestore(&np->lock, flags);
  1.3655  
  1.3656  		if (events & (NVREG_IRQ_TX_ERR)) {
  1.3657  			dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  1.3658 -						dev->name, events);
  1.3659 +					dev->name, events);
  1.3660  		}
  1.3661  		if (i > max_interrupt_work) {
  1.3662 -			spin_lock_irq(&np->lock);
  1.3663 +			spin_lock_irqsave(&np->lock, flags);
  1.3664  			/* disable interrupts on the nic */
  1.3665  			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  1.3666  			pci_push(base);
  1.3667 @@ -2464,7 +3863,7 @@ static irqreturn_t nv_nic_irq_tx(int foo
  1.3668  				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  1.3669  			}
  1.3670  			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  1.3671 -			spin_unlock_irq(&np->lock);
  1.3672 +			spin_unlock_irqrestore(&np->lock, flags);
  1.3673  			break;
  1.3674  		}
  1.3675  
  1.3676 @@ -2474,34 +3873,39 @@ static irqreturn_t nv_nic_irq_tx(int foo
  1.3677  	return IRQ_RETVAL(i);
  1.3678  }
  1.3679  
  1.3680 +#if NVVER < FEDORA7
  1.3681  static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
  1.3682 +#else
  1.3683 +static irqreturn_t nv_nic_irq_rx(int foo, void *data)
  1.3684 +#endif
  1.3685  {
  1.3686  	struct net_device *dev = (struct net_device *) data;
  1.3687 -	struct fe_priv *np = netdev_priv(dev);
  1.3688 +	struct fe_priv *np = get_nvpriv(dev);
  1.3689  	u8 __iomem *base = get_hwbase(dev);
  1.3690  	u32 events;
  1.3691  	int i;
  1.3692 -
  1.3693 -	dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
  1.3694 +	unsigned long flags;
  1.3695 +
  1.3696 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.3697  
  1.3698  	for (i=0; ; i++) {
  1.3699  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  1.3700  		writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  1.3701 -		pci_push(base);
  1.3702  		dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
  1.3703  		if (!(events & np->irqmask))
  1.3704  			break;
  1.3705  
  1.3706 -		nv_rx_process(dev);
  1.3707 -		if (nv_alloc_rx(dev)) {
  1.3708 -			spin_lock_irq(&np->lock);
  1.3709 -			if (!np->in_shutdown)
  1.3710 -				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.3711 -			spin_unlock_irq(&np->lock);
  1.3712 +		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
  1.3713 +			if (unlikely(nv_alloc_rx_optimized(dev))) {
  1.3714 +				spin_lock_irqsave(&np->lock, flags);
  1.3715 +				if (!np->in_shutdown)
  1.3716 +					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.3717 +				spin_unlock_irqrestore(&np->lock, flags);
  1.3718 +			}
  1.3719  		}
  1.3720  
  1.3721  		if (i > max_interrupt_work) {
  1.3722 -			spin_lock_irq(&np->lock);
  1.3723 +			spin_lock_irqsave(&np->lock, flags);
  1.3724  			/* disable interrupts on the nic */
  1.3725  			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  1.3726  			pci_push(base);
  1.3727 @@ -2511,7 +3915,7 @@ static irqreturn_t nv_nic_irq_rx(int foo
  1.3728  				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  1.3729  			}
  1.3730  			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  1.3731 -			spin_unlock_irq(&np->lock);
  1.3732 +			spin_unlock_irqrestore(&np->lock, flags);
  1.3733  			break;
  1.3734  		}
  1.3735  
  1.3736 @@ -2521,24 +3925,33 @@ static irqreturn_t nv_nic_irq_rx(int foo
  1.3737  	return IRQ_RETVAL(i);
  1.3738  }
  1.3739  
  1.3740 +#if NVVER < FEDORA7
  1.3741  static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
  1.3742 +#else
  1.3743 +static irqreturn_t nv_nic_irq_other(int foo, void *data)
  1.3744 +#endif
  1.3745  {
  1.3746  	struct net_device *dev = (struct net_device *) data;
  1.3747 -	struct fe_priv *np = netdev_priv(dev);
  1.3748 +	struct fe_priv *np = get_nvpriv(dev);
  1.3749  	u8 __iomem *base = get_hwbase(dev);
  1.3750  	u32 events;
  1.3751  	int i;
  1.3752 -
  1.3753 -	dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
  1.3754 +	unsigned long flags;
  1.3755 +
  1.3756 +	dprintk("%s:%s\n",dev->name,__FUNCTION__);
  1.3757  
  1.3758  	for (i=0; ; i++) {
  1.3759  		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  1.3760  		writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  1.3761 -		pci_push(base);
  1.3762  		dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  1.3763  		if (!(events & np->irqmask))
  1.3764  			break;
  1.3765  
  1.3766 +		/* check tx in case we reached max loop limit in tx isr */
  1.3767 +		spin_lock_irqsave(&np->lock, flags);
  1.3768 +		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  1.3769 +		spin_unlock_irqrestore(&np->lock, flags);
  1.3770 +
  1.3771  		if (events & NVREG_IRQ_LINK) {
  1.3772  			spin_lock_irq(&np->lock);
  1.3773  			nv_link_irq(dev);
  1.3774 @@ -2550,9 +3963,23 @@ static irqreturn_t nv_nic_irq_other(int 
  1.3775  			spin_unlock_irq(&np->lock);
  1.3776  			np->link_timeout = jiffies + LINK_TIMEOUT;
  1.3777  		}
  1.3778 +		if (events & NVREG_IRQ_RECOVER_ERROR) {
  1.3779 +			spin_lock_irq(&np->lock);
  1.3780 +			/* disable interrupts on the nic */
  1.3781 +			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  1.3782 +			pci_push(base);
  1.3783 +
  1.3784 +			if (!np->in_shutdown) {
  1.3785 +				np->nic_poll_irq |= NVREG_IRQ_OTHER;
  1.3786 +				np->recover_error = 1;
  1.3787 +				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  1.3788 +			}
  1.3789 +			spin_unlock_irq(&np->lock);
  1.3790 +			break;
  1.3791 +		}
  1.3792  		if (events & (NVREG_IRQ_UNKNOWN)) {
  1.3793  			printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  1.3794 -						dev->name, events);
  1.3795 +					dev->name, events);
  1.3796  		}
  1.3797  		if (i > max_interrupt_work) {
  1.3798  			spin_lock_irq(&np->lock);
  1.3799 @@ -2575,14 +4002,18 @@ static irqreturn_t nv_nic_irq_other(int 
  1.3800  	return IRQ_RETVAL(i);
  1.3801  }
  1.3802  
  1.3803 +#if NVVER < FEDORA7
  1.3804  static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
  1.3805 +#else
  1.3806 +static irqreturn_t nv_nic_irq_test(int foo, void *data)
  1.3807 +#endif
  1.3808  {
  1.3809  	struct net_device *dev = (struct net_device *) data;
  1.3810 -	struct fe_priv *np = netdev_priv(dev);
  1.3811 +	struct fe_priv *np = get_nvpriv(dev);
  1.3812  	u8 __iomem *base = get_hwbase(dev);
  1.3813  	u32 events;
  1.3814  
  1.3815 -	dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
  1.3816 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.3817  
  1.3818  	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  1.3819  		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  1.3820 @@ -2605,6 +4036,7 @@ static irqreturn_t nv_nic_irq_test(int f
  1.3821  	return IRQ_RETVAL(1);
  1.3822  }
  1.3823  
  1.3824 +#ifdef CONFIG_PCI_MSI
  1.3825  static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  1.3826  {
  1.3827  	u8 __iomem *base = get_hwbase(dev);
  1.3828 @@ -2630,12 +4062,15 @@ static void set_msix_vector_map(struct n
  1.3829  	}
  1.3830  	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  1.3831  }
  1.3832 +#endif
  1.3833  
  1.3834  static int nv_request_irq(struct net_device *dev, int intr_test)
  1.3835  {
  1.3836  	struct fe_priv *np = get_nvpriv(dev);
  1.3837 +	int ret = 1;
  1.3838 +
  1.3839 +#if NVVER > SLES9
  1.3840  	u8 __iomem *base = get_hwbase(dev);
  1.3841 -	int ret = 1;
  1.3842  	int i;
  1.3843  
  1.3844  	if (np->msi_flags & NV_MSI_X_CAPABLE) {
  1.3845 @@ -2646,21 +4081,21 @@ static int nv_request_irq(struct net_dev
  1.3846  			np->msi_flags |= NV_MSI_X_ENABLED;
  1.3847  			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  1.3848  				/* Request irq for rx handling */
  1.3849 -				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
  1.3850 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQ_FLAG, dev->name, dev) != 0) {
  1.3851  					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  1.3852  					pci_disable_msix(np->pci_dev);
  1.3853  					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3854  					goto out_err;
  1.3855  				}
  1.3856  				/* Request irq for tx handling */
  1.3857 -				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
  1.3858 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQ_FLAG, dev->name, dev) != 0) {
  1.3859  					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  1.3860  					pci_disable_msix(np->pci_dev);
  1.3861  					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3862  					goto out_free_rx;
  1.3863  				}
  1.3864  				/* Request irq for link and timer handling */
  1.3865 -				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
  1.3866 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQ_FLAG, dev->name, dev) != 0) {
  1.3867  					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  1.3868  					pci_disable_msix(np->pci_dev);
  1.3869  					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3870 @@ -2669,15 +4104,19 @@ static int nv_request_irq(struct net_dev
  1.3871  				/* map interrupts to their respective vector */
  1.3872  				writel(0, base + NvRegMSIXMap0);
  1.3873  				writel(0, base + NvRegMSIXMap1);
  1.3874 +#ifdef CONFIG_PCI_MSI
  1.3875  				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  1.3876  				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  1.3877  				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  1.3878 +#endif
  1.3879  			} else {
  1.3880  				/* Request irq for all interrupts */
  1.3881 -				if ((!intr_test &&
  1.3882 -				     request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  1.3883 -				    (intr_test &&
  1.3884 -				     request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  1.3885 +				if ((!intr_test && np->desc_ver == DESC_VER_3 &&
  1.3886 +							request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3887 +						(!intr_test && np->desc_ver != DESC_VER_3 &&
  1.3888 +						 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3889 +						(intr_test &&
  1.3890 +						 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0)) {
  1.3891  					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  1.3892  					pci_disable_msix(np->pci_dev);
  1.3893  					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3894 @@ -2692,14 +4131,17 @@ static int nv_request_irq(struct net_dev
  1.3895  	}
  1.3896  	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  1.3897  		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  1.3898 -			pci_intx(np->pci_dev, 0);
  1.3899  			np->msi_flags |= NV_MSI_ENABLED;
  1.3900 -			if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  1.3901 -			    (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  1.3902 +			dev->irq = np->pci_dev->irq;
  1.3903 +			if ((!intr_test && np->desc_ver == DESC_VER_3 &&
  1.3904 +						request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3905 +					(!intr_test && np->desc_ver != DESC_VER_3 &&
  1.3906 +					 request_irq(np->pci_dev->irq, &nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3907 +					(intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0)) {
  1.3908  				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  1.3909  				pci_disable_msi(np->pci_dev);
  1.3910 -				pci_intx(np->pci_dev, 1);
  1.3911  				np->msi_flags &= ~NV_MSI_ENABLED;
  1.3912 +				dev->irq = np->pci_dev->irq;
  1.3913  				goto out_err;
  1.3914  			}
  1.3915  
  1.3916 @@ -2710,22 +4152,124 @@ static int nv_request_irq(struct net_dev
  1.3917  			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  1.3918  		}
  1.3919  	}
  1.3920 +#else
  1.3921 +#ifdef CONFIG_PCI_MSI
  1.3922 +	u8 __iomem *base = get_hwbase(dev);
  1.3923 +	int i;
  1.3924 +
  1.3925 +	if (np->msi_flags & NV_MSI_X_CAPABLE) {
  1.3926 +		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  1.3927 +			np->msi_x_entry[i].entry = i;
  1.3928 +		}
  1.3929 +		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  1.3930 +			np->msi_flags |= NV_MSI_X_ENABLED;
  1.3931 +			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  1.3932 +				msi_alloc_vectors(np->pci_dev,(int *)np->msi_x_entry,2);
  1.3933 +				/* Request irq for rx handling */
  1.3934 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQ_FLAG, dev->name, dev) != 0) {
  1.3935 +					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  1.3936 +					pci_disable_msi(np->pci_dev);
  1.3937 +					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3938 +					goto out_err;
  1.3939 +				}
  1.3940 +				/* Request irq for tx handling */
  1.3941 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQ_FLAG, dev->name, dev) != 0) {
  1.3942 +					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  1.3943 +					pci_disable_msi(np->pci_dev);
  1.3944 +					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3945 +					goto out_free_rx;
  1.3946 +				}
  1.3947 +				/* Request irq for link and timer handling */
  1.3948 +				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQ_FLAG, dev->name, dev) != 0) {
  1.3949 +					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  1.3950 +					pci_disable_msi(np->pci_dev);
  1.3951 +					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3952 +					goto out_free_tx;
  1.3953 +				}
  1.3954 +				/* map interrupts to their respective vector */
  1.3955 +				writel(0, base + NvRegMSIXMap0);
  1.3956 +				writel(0, base + NvRegMSIXMap1);
  1.3957 +#ifdef CONFIG_PCI_MSI
  1.3958 +				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  1.3959 +				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  1.3960 +				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  1.3961 +#endif
  1.3962 +			} else {
  1.3963 +				/* Request irq for all interrupts */
  1.3964 +				if ((!intr_test && np->desc_ver == DESC_VER_3 &&
  1.3965 +							request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3966 +						(!intr_test && np->desc_ver != DESC_VER_3 &&
  1.3967 +						 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3968 +						(intr_test &&
  1.3969 +						 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0)) {
  1.3970 +					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  1.3971 +					pci_disable_msi(np->pci_dev);
  1.3972 +					np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.3973 +					goto out_err;
  1.3974 +				}
  1.3975 +
  1.3976 +				/* map interrupts to vector 0 */
  1.3977 +				writel(0, base + NvRegMSIXMap0);
  1.3978 +				writel(0, base + NvRegMSIXMap1);
  1.3979 +			}
  1.3980 +		}
  1.3981 +	}
  1.3982 +	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  1.3983 +
  1.3984 +		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  1.3985 +			np->msi_flags |= NV_MSI_ENABLED;
  1.3986 +			dev->irq = np->pci_dev->irq;
  1.3987 +			if ((!intr_test && np->desc_ver == DESC_VER_3 &&
  1.3988 +						request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3989 +					(!intr_test && np->desc_ver != DESC_VER_3 &&
  1.3990 +					 request_irq(np->pci_dev->irq, &nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) ||
  1.3991 +					(intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0)) {
  1.3992 +				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  1.3993 +				pci_disable_msi(np->pci_dev);
  1.3994 +				np->msi_flags &= ~NV_MSI_ENABLED;
  1.3995 +				dev->irq = np->pci_dev->irq;
  1.3996 +				goto out_err;
  1.3997 +			}
  1.3998 +
  1.3999 +			/* map interrupts to vector 0 */
  1.4000 +			writel(0, base + NvRegMSIMap0);
  1.4001 +			writel(0, base + NvRegMSIMap1);
  1.4002 +			/* enable msi vector 0 */
  1.4003 +			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  1.4004 +		}
  1.4005 +	}
  1.4006 +#endif
  1.4007 +#endif
  1.4008  	if (ret != 0) {
  1.4009 -		if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  1.4010 -		    (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
  1.4011 +		if ((!intr_test  && np->desc_ver == DESC_VER_3 && 
  1.4012 +					request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, IRQ_FLAG, dev->name, dev) != 0) ||
  1.4013 +				(!intr_test  && np->desc_ver != DESC_VER_3 &&
  1.4014 +				 request_irq(np->pci_dev->irq, &nv_nic_irq, IRQ_FLAG, dev->name, dev) != 0) || 
  1.4015 +				(intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQ_FLAG, dev->name, dev) != 0))
  1.4016  			goto out_err;
  1.4017  
  1.4018  	}
  1.4019  
  1.4020  	return 0;
  1.4021 +
  1.4022 +#if NVVER > SLES9
  1.4023  out_free_tx:
  1.4024  	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  1.4025  out_free_rx:
  1.4026  	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  1.4027 +#else
  1.4028 +#ifdef CONFIG_PCI_MSI	
  1.4029 +out_free_tx:
  1.4030 +	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  1.4031 +out_free_rx:
  1.4032 +	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  1.4033 +#endif
  1.4034 +#endif
  1.4035  out_err:
  1.4036  	return 1;
  1.4037  }
  1.4038  
  1.4039 +#if NVVER > SLES9
  1.4040  static void nv_free_irq(struct net_device *dev)
  1.4041  {
  1.4042  	struct fe_priv *np = get_nvpriv(dev);
  1.4043 @@ -2741,16 +4285,43 @@ static void nv_free_irq(struct net_devic
  1.4044  		free_irq(np->pci_dev->irq, dev);
  1.4045  		if (np->msi_flags & NV_MSI_ENABLED) {
  1.4046  			pci_disable_msi(np->pci_dev);
  1.4047 -			pci_intx(np->pci_dev, 1);
  1.4048  			np->msi_flags &= ~NV_MSI_ENABLED;
  1.4049  		}
  1.4050  	}
  1.4051  }
  1.4052 +#else
  1.4053 +static void nv_free_irq(struct net_device *dev)
  1.4054 +{
  1.4055 +	struct fe_priv *np = get_nvpriv(dev);
  1.4056 +
  1.4057 +#ifdef CONFIG_PCI_MSI		
  1.4058 +	int i;
  1.4059 +
  1.4060 +	if (np->msi_flags & NV_MSI_X_ENABLED) {
  1.4061 +		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  1.4062 +			free_irq(np->msi_x_entry[i].vector, dev);
  1.4063 +		}
  1.4064 +		pci_disable_msi(np->pci_dev);
  1.4065 +		np->msi_flags &= ~NV_MSI_X_ENABLED;
  1.4066 +	} else {
  1.4067 +		free_irq(np->pci_dev->irq, dev);
  1.4068 +
  1.4069 +		if (np->msi_flags & NV_MSI_ENABLED) {
  1.4070 +			pci_disable_msi(np->pci_dev);
  1.4071 +			np->msi_flags &= ~NV_MSI_ENABLED;
  1.4072 +		}
  1.4073 +	}
  1.4074 +#else
  1.4075 +	free_irq(np->pci_dev->irq, dev);
  1.4076 +#endif
  1.4077 +
  1.4078 +}
  1.4079 +#endif 
  1.4080  
  1.4081  static void nv_do_nic_poll(unsigned long data)
  1.4082  {
  1.4083  	struct net_device *dev = (struct net_device *) data;
  1.4084 -	struct fe_priv *np = netdev_priv(dev);
  1.4085 +	struct fe_priv *np = get_nvpriv(dev);
  1.4086  	u8 __iomem *base = get_hwbase(dev);
  1.4087  	u32 mask = 0;
  1.4088  
  1.4089 @@ -2760,115 +4331,239 @@ static void nv_do_nic_poll(unsigned long
  1.4090  	 * nv_nic_irq because that may decide to do otherwise
  1.4091  	 */
  1.4092  
  1.4093 +	spin_lock_irq(&np->timer_lock);
  1.4094  	if (!using_multi_irqs(dev)) {
  1.4095  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.4096 -			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.4097 +			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.4098  		else
  1.4099 -			disable_irq_lockdep(dev->irq);
  1.4100 +			disable_irq(np->pci_dev->irq);
  1.4101  		mask = np->irqmask;
  1.4102  	} else {
  1.4103  		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  1.4104 -			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.4105 +			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.4106  			mask |= NVREG_IRQ_RX_ALL;
  1.4107  		}
  1.4108  		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  1.4109 -			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.4110 +			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.4111  			mask |= NVREG_IRQ_TX_ALL;
  1.4112  		}
  1.4113  		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  1.4114 -			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  1.4115 +			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  1.4116  			mask |= NVREG_IRQ_OTHER;
  1.4117  		}
  1.4118  	}
  1.4119  	np->nic_poll_irq = 0;
  1.4120  
  1.4121 -	/* FIXME: Do we need synchronize_irq(dev->irq) here? */
  1.4122 +	/* disable_irq() contains synchronize_irq,thus no irq handler can run now */
  1.4123 +
  1.4124 +	if (np->recover_error) {
  1.4125 +		np->recover_error = 0;
  1.4126 +		printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
  1.4127 +		if (netif_running(dev)) {
  1.4128 +#if NVVER > FEDORA5
  1.4129 +			netif_tx_lock_bh(dev);
  1.4130 +#else
  1.4131 +			spin_lock_bh(&dev->xmit_lock);
  1.4132 +#endif
  1.4133 +			spin_lock(&np->lock);
  1.4134 +			/* stop engines */
  1.4135 +			nv_stop_rx(dev);
  1.4136 +			nv_stop_tx(dev);
  1.4137 +			nv_txrx_reset(dev);
  1.4138 +			/* drain rx queue */
  1.4139 +			nv_drain_rx(dev);
  1.4140 +			nv_drain_tx(dev);
  1.4141 +			/* reinit driver view of the rx queue */
  1.4142 +			set_bufsize(dev);
  1.4143 +			if (nv_init_ring(dev)) {
  1.4144 +				if (!np->in_shutdown)
  1.4145 +					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.4146 +			}
  1.4147 +			/* reinit nic view of the rx queue */
  1.4148 +			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1.4149 +			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.4150 +			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.4151 +					base + NvRegRingSizes);
  1.4152 +			pci_push(base);
  1.4153 +			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.4154 +			pci_push(base);
  1.4155 +
  1.4156 +			/* restart rx engine */
  1.4157 +			nv_start_rx(dev);
  1.4158 +			nv_start_tx(dev);
  1.4159 +			spin_unlock(&np->lock);
  1.4160 +#if NVVER > FEDORA5
  1.4161 +			netif_tx_unlock_bh(dev);
  1.4162 +#else
  1.4163 +			spin_unlock_bh(&dev->xmit_lock);
  1.4164 +#endif
  1.4165 +		}
  1.4166 +	}
  1.4167  
  1.4168  	writel(mask, base + NvRegIrqMask);
  1.4169  	pci_push(base);
  1.4170  
  1.4171  	if (!using_multi_irqs(dev)) {
  1.4172 -		nv_nic_irq(0, dev, NULL);
  1.4173 +		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.4174 +#if NVVER < FEDORA7
  1.4175 +			nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
  1.4176 +#else
  1.4177 +		nv_nic_irq((int) 0, (void *) data);
  1.4178 +#endif
  1.4179 +		else
  1.4180 +#if NVVER < FEDORA7
  1.4181 +			nv_nic_irq_optimized((int) 0, (void *) data, (struct pt_regs *) NULL);
  1.4182 +#else
  1.4183 +		nv_nic_irq_optimized((int) 0, (void *) data);
  1.4184 +#endif
  1.4185  		if (np->msi_flags & NV_MSI_X_ENABLED)
  1.4186 -			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.4187 +			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1.4188  		else
  1.4189 -			enable_irq_lockdep(dev->irq);
  1.4190 +			enable_irq(np->pci_dev->irq);
  1.4191  	} else {
  1.4192  		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  1.4193 -			nv_nic_irq_rx(0, dev, NULL);
  1.4194 -			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.4195 +#if NVVER < FEDORA7
  1.4196 +			nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
  1.4197 +#else
  1.4198 +			nv_nic_irq_rx((int) 0, (void *) data);
  1.4199 +#endif
  1.4200 +			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1.4201  		}
  1.4202  		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  1.4203 -			nv_nic_irq_tx(0, dev, NULL);
  1.4204 -			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.4205 +#if NVVER < FEDORA7
  1.4206 +			nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
  1.4207 +#else
  1.4208 +			nv_nic_irq_tx((int) 0, (void *) data);
  1.4209 +#endif
  1.4210 +			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  1.4211  		}
  1.4212  		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  1.4213 -			nv_nic_irq_other(0, dev, NULL);
  1.4214 -			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  1.4215 +#if NVVER < FEDORA7
  1.4216 +			nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
  1.4217 +#else
  1.4218 +			nv_nic_irq_other((int) 0, (void *) data);
  1.4219 +#endif
  1.4220 +			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  1.4221  		}
  1.4222  	}
  1.4223 +	spin_unlock_irq(&np->timer_lock);
  1.4224  }
  1.4225  
  1.4226 +#if NVVER > RHES3
  1.4227  #ifdef CONFIG_NET_POLL_CONTROLLER
  1.4228  static void nv_poll_controller(struct net_device *dev)
  1.4229  {
  1.4230  	nv_do_nic_poll((unsigned long) dev);
  1.4231  }
  1.4232  #endif
  1.4233 +#else
  1.4234 +static void nv_poll_controller(struct net_device *dev)
  1.4235 +{
  1.4236 +	nv_do_nic_poll((unsigned long) dev);
  1.4237 +}
  1.4238 +#endif
  1.4239  
  1.4240  static void nv_do_stats_poll(unsigned long data)
  1.4241  {
  1.4242  	struct net_device *dev = (struct net_device *) data;
  1.4243 -	struct fe_priv *np = netdev_priv(dev);
  1.4244 +	struct fe_priv *np = get_nvpriv(dev);
  1.4245  	u8 __iomem *base = get_hwbase(dev);
  1.4246  
  1.4247 -	np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1.4248 -	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1.4249 -	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1.4250 -	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1.4251 -	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1.4252 -	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1.4253 -	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1.4254 -	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1.4255 -	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1.4256 -	np->estats.tx_deferral += readl(base + NvRegTxDef);
  1.4257 -	np->estats.tx_packets += readl(base + NvRegTxFrame);
  1.4258 -	np->estats.tx_pause += readl(base + NvRegTxPause);
  1.4259 -	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1.4260 -	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1.4261 -	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1.4262 -	np->estats.rx_runt += readl(base + NvRegRxRunt);
  1.4263 -	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1.4264 -	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1.4265 -	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1.4266 -	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1.4267 -	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1.4268 -	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1.4269 -	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1.4270 -	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1.4271 -	np->estats.rx_bytes += readl(base + NvRegRxCnt);
  1.4272 -	np->estats.rx_pause += readl(base + NvRegRxPause);
  1.4273 -	np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  1.4274 -	np->estats.rx_packets =
  1.4275 -		np->estats.rx_unicast +
  1.4276 -		np->estats.rx_multicast +
  1.4277 -		np->estats.rx_broadcast;
  1.4278 -	np->estats.rx_errors_total =
  1.4279 -		np->estats.rx_crc_errors +
  1.4280 -		np->estats.rx_over_errors +
  1.4281 -		np->estats.rx_frame_error +
  1.4282 -		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1.4283 -		np->estats.rx_late_collision +
  1.4284 -		np->estats.rx_runt +
  1.4285 -		np->estats.rx_frame_too_long;
  1.4286 -
  1.4287 -	if (!np->in_shutdown)
  1.4288 +	spin_lock_irq(&np->lock);
  1.4289 +
  1.4290 +	np->estats.tx_dropped = np->stats.tx_dropped;
  1.4291 +	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
  1.4292 +		np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1.4293 +		np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1.4294 +		np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1.4295 +		np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1.4296 +		np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1.4297 +		np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1.4298 +		np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1.4299 +		np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1.4300 +		np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1.4301 +		np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1.4302 +		np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1.4303 +		np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1.4304 +		np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1.4305 +		np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1.4306 +		np->estats.rx_runt += readl(base + NvRegRxRunt);
  1.4307 +		np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1.4308 +		np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1.4309 +		np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1.4310 +		np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1.4311 +		np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1.4312 +		np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1.4313 +		np->estats.rx_packets = 
  1.4314 +			np->estats.rx_unicast + 
  1.4315 +			np->estats.rx_multicast + 
  1.4316 +			np->estats.rx_broadcast;
  1.4317 +		np->estats.rx_errors_total = 
  1.4318 +			np->estats.rx_crc_errors +
  1.4319 +			np->estats.rx_over_errors +
  1.4320 +			np->estats.rx_frame_error +
  1.4321 +			(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1.4322 +			np->estats.rx_late_collision +
  1.4323 +			np->estats.rx_runt +
  1.4324 +			np->estats.rx_frame_too_long +
  1.4325 +			np->rx_len_errors;
  1.4326 +
  1.4327 +		if (np->driver_data & DEV_HAS_STATISTICS_V2) {
  1.4328 +			np->estats.tx_deferral += readl(base + NvRegTxDef);
  1.4329 +			np->estats.tx_packets += readl(base + NvRegTxFrame);
  1.4330 +			np->estats.rx_bytes += readl(base + NvRegRxCnt);
  1.4331 +			np->estats.tx_pause += readl(base + NvRegTxPause);
  1.4332 +			np->estats.rx_pause += readl(base + NvRegRxPause);
  1.4333 +			np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  1.4334 +		}
  1.4335 +
  1.4336 +		/* copy to net_device stats */
  1.4337 +		np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
  1.4338 +		np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
  1.4339 +		np->stats.tx_bytes = np->estats.tx_bytes;
  1.4340 +		np->stats.rx_crc_errors = np->estats.rx_crc_errors;
  1.4341 +		np->stats.rx_over_errors = np->estats.rx_over_errors;
  1.4342 +		np->stats.rx_packets = np->estats.rx_packets;
  1.4343 +		np->stats.rx_errors = np->estats.rx_errors_total;
  1.4344 +
  1.4345 +	} else {
  1.4346 +		np->estats.tx_packets = np->stats.tx_packets;
  1.4347 +		np->estats.tx_fifo_errors = np->stats.tx_fifo_errors;
  1.4348 +		np->estats.tx_carrier_errors = np->stats.tx_carrier_errors;
  1.4349 +		np->estats.tx_bytes = np->stats.tx_bytes;
  1.4350 +		np->estats.rx_bytes = np->stats.rx_bytes;
  1.4351 +		np->estats.rx_crc_errors = np->stats.rx_crc_errors;
  1.4352 +		np->estats.rx_over_errors = np->stats.rx_over_errors;
  1.4353 +		np->estats.rx_packets = np->stats.rx_packets;
  1.4354 +		np->estats.rx_errors_total = np->stats.rx_errors;
  1.4355 +	}
  1.4356 +
  1.4357 +	if (!np->in_shutdown && netif_running(dev))
  1.4358  		mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  1.4359 +	spin_unlock_irq(&np->lock);
  1.4360 +}
  1.4361 +
  1.4362 +/*
  1.4363 + * nv_get_stats: dev->get_stats function
  1.4364 + * Get latest stats value from the nic.
  1.4365 + * Called with read_lock(&dev_base_lock) held for read -
  1.4366 + * only synchronized against unregister_netdevice.
  1.4367 + */
  1.4368 +static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1.4369 +{
  1.4370 +	struct fe_priv *np = get_nvpriv(dev);
  1.4371 +
  1.4372 +	/* It seems that the nic always generates interrupts and doesn't
  1.4373 +	 * accumulate errors internally. Thus the current values in np->stats
  1.4374 +	 * are already up to date.
  1.4375 +	 */
  1.4376 +	nv_do_stats_poll((unsigned long)dev);
  1.4377 +	return &np->stats;
  1.4378  }
  1.4379  
  1.4380  static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1.4381  {
  1.4382 -	struct fe_priv *np = netdev_priv(dev);
  1.4383 +	struct fe_priv *np = get_nvpriv(dev);
  1.4384  	strcpy(info->driver, "forcedeth");
  1.4385  	strcpy(info->version, FORCEDETH_VERSION);
  1.4386  	strcpy(info->bus_info, pci_name(np->pci_dev));
  1.4387 @@ -2876,7 +4571,7 @@ static void nv_get_drvinfo(struct net_de
  1.4388  
  1.4389  static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  1.4390  {
  1.4391 -	struct fe_priv *np = netdev_priv(dev);
  1.4392 +	struct fe_priv *np = get_nvpriv(dev);
  1.4393  	wolinfo->supported = WAKE_MAGIC;
  1.4394  
  1.4395  	spin_lock_irq(&np->lock);
  1.4396 @@ -2887,7 +4582,7 @@ static void nv_get_wol(struct net_device
  1.4397  
  1.4398  static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  1.4399  {
  1.4400 -	struct fe_priv *np = netdev_priv(dev);
  1.4401 +	struct fe_priv *np = get_nvpriv(dev);
  1.4402  	u8 __iomem *base = get_hwbase(dev);
  1.4403  	u32 flags = 0;
  1.4404  
  1.4405 @@ -2907,7 +4602,7 @@ static int nv_set_wol(struct net_device 
  1.4406  
  1.4407  static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  1.4408  {
  1.4409 -	struct fe_priv *np = netdev_priv(dev);
  1.4410 +	struct fe_priv *np = get_nvpriv(dev);
  1.4411  	int adv;
  1.4412  
  1.4413  	spin_lock_irq(&np->lock);
  1.4414 @@ -2926,15 +4621,15 @@ static int nv_get_settings(struct net_de
  1.4415  
  1.4416  	if (netif_carrier_ok(dev)) {
  1.4417  		switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  1.4418 -		case NVREG_LINKSPEED_10:
  1.4419 -			ecmd->speed = SPEED_10;
  1.4420 -			break;
  1.4421 -		case NVREG_LINKSPEED_100:
  1.4422 -			ecmd->speed = SPEED_100;
  1.4423 -			break;
  1.4424 -		case NVREG_LINKSPEED_1000:
  1.4425 -			ecmd->speed = SPEED_1000;
  1.4426 -			break;
  1.4427 +			case NVREG_LINKSPEED_10:
  1.4428 +				ecmd->speed = SPEED_10;
  1.4429 +				break;
  1.4430 +			case NVREG_LINKSPEED_100:
  1.4431 +				ecmd->speed = SPEED_100;
  1.4432 +				break;
  1.4433 +			case NVREG_LINKSPEED_1000:
  1.4434 +				ecmd->speed = SPEED_1000;
  1.4435 +				break;
  1.4436  		}
  1.4437  		ecmd->duplex = DUPLEX_HALF;
  1.4438  		if (np->duplex)
  1.4439 @@ -2965,9 +4660,9 @@ static int nv_get_settings(struct net_de
  1.4440  		}
  1.4441  	}
  1.4442  	ecmd->supported = (SUPPORTED_Autoneg |
  1.4443 -		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  1.4444 -		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  1.4445 -		SUPPORTED_MII);
  1.4446 +			SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  1.4447 +			SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  1.4448 +			SUPPORTED_MII);
  1.4449  	if (np->gigabit == PHY_GIGABIT)
  1.4450  		ecmd->supported |= SUPPORTED_1000baseT_Full;
  1.4451  
  1.4452 @@ -2981,8 +4676,9 @@ static int nv_get_settings(struct net_de
  1.4453  
  1.4454  static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  1.4455  {
  1.4456 -	struct fe_priv *np = netdev_priv(dev);
  1.4457 -
  1.4458 +	struct fe_priv *np = get_nvpriv(dev);
  1.4459 +
  1.4460 +	dprintk(KERN_DEBUG "%s: nv_set_settings \n", dev->name);
  1.4461  	if (ecmd->port != PORT_MII)
  1.4462  		return -EINVAL;
  1.4463  	if (ecmd->transceiver != XCVR_EXTERNAL)
  1.4464 @@ -2996,7 +4692,7 @@ static int nv_set_settings(struct net_de
  1.4465  		u32 mask;
  1.4466  
  1.4467  		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  1.4468 -			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  1.4469 +			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  1.4470  		if (np->gigabit == PHY_GIGABIT)
  1.4471  			mask |= ADVERTISED_1000baseT_Full;
  1.4472  
  1.4473 @@ -3017,14 +4713,27 @@ static int nv_set_settings(struct net_de
  1.4474  
  1.4475  	netif_carrier_off(dev);
  1.4476  	if (netif_running(dev)) {
  1.4477 -		nv_disable_irq(dev);
  1.4478 +		nv_disable_hw_interrupts(dev, np->irqmask);
  1.4479 +#if NVVER > RHES3
  1.4480 +		synchronize_irq(np->pci_dev->irq);
  1.4481 +#else
  1.4482 +		synchronize_irq();
  1.4483 +#endif
  1.4484 +#if NVVER > FEDORA5
  1.4485  		netif_tx_lock_bh(dev);
  1.4486 +#else
  1.4487 +		spin_lock_bh(&dev->xmit_lock);
  1.4488 +#endif
  1.4489  		spin_lock(&np->lock);
  1.4490  		/* stop engines */
  1.4491  		nv_stop_rx(dev);
  1.4492  		nv_stop_tx(dev);
  1.4493  		spin_unlock(&np->lock);
  1.4494 +#if NVVER > FEDORA5
  1.4495  		netif_tx_unlock_bh(dev);
  1.4496 +#else
  1.4497 +		spin_unlock_bh(&dev->xmit_lock);
  1.4498 +#endif
  1.4499  	}
  1.4500  
  1.4501  	if (ecmd->autoneg == AUTONEG_ENABLE) {
  1.4502 @@ -3035,14 +4744,22 @@ static int nv_set_settings(struct net_de
  1.4503  		/* advertise only what has been requested */
  1.4504  		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.4505  		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  1.4506 -		if (ecmd->advertising & ADVERTISED_10baseT_Half)
  1.4507 +		if (ecmd->advertising & ADVERTISED_10baseT_Half) {
  1.4508  			adv |= ADVERTISE_10HALF;
  1.4509 -		if (ecmd->advertising & ADVERTISED_10baseT_Full)
  1.4510 +			np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
  1.4511 +		}
  1.4512 +		if (ecmd->advertising & ADVERTISED_10baseT_Full) {
  1.4513  			adv |= ADVERTISE_10FULL;
  1.4514 -		if (ecmd->advertising & ADVERTISED_100baseT_Half)
  1.4515 +			np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
  1.4516 +		}
  1.4517 +		if (ecmd->advertising & ADVERTISED_100baseT_Half) {
  1.4518  			adv |= ADVERTISE_100HALF;
  1.4519 -		if (ecmd->advertising & ADVERTISED_100baseT_Full)
  1.4520 +			np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
  1.4521 +		}
  1.4522 +		if (ecmd->advertising & ADVERTISED_100baseT_Full) {
  1.4523  			adv |= ADVERTISE_100FULL;
  1.4524 +			np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
  1.4525 +		}
  1.4526  		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisments but disable tx pause */
  1.4527  			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1.4528  		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  1.4529 @@ -3052,17 +4769,34 @@ static int nv_set_settings(struct net_de
  1.4530  		if (np->gigabit == PHY_GIGABIT) {
  1.4531  			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1.4532  			adv &= ~ADVERTISE_1000FULL;
  1.4533 -			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  1.4534 +			if (ecmd->advertising & ADVERTISED_1000baseT_Full) {
  1.4535  				adv |= ADVERTISE_1000FULL;
  1.4536 +				np->speed_duplex = NV_SPEED_DUPLEX_1000_FULL_DUPLEX;
  1.4537 +			}
  1.4538  			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  1.4539 +
  1.4540 +			if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full|ADVERTISED_1000baseT_Full))
  1.4541 +				np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
  1.4542 +		} else {
  1.4543 +			if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full))
  1.4544 +				np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
  1.4545  		}
  1.4546  
  1.4547  		if (netif_running(dev))
  1.4548  			printk(KERN_INFO "%s: link down.\n", dev->name);
  1.4549  		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.4550 -		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1.4551 -		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4552 -
  1.4553 +		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1.4554 +			bmcr |= BMCR_ANENABLE;
  1.4555 +			/* reset the phy in order for settings to stick,
  1.4556 +			 * and cause autoneg to start */
  1.4557 +			if (phy_reset(dev, bmcr)) {
  1.4558 +				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  1.4559 +				return -EINVAL;
  1.4560 +			}
  1.4561 +		} else {
  1.4562 +			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1.4563 +			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4564 +		}
  1.4565  	} else {
  1.4566  		int adv, bmcr;
  1.4567  
  1.4568 @@ -3070,14 +4804,22 @@ static int nv_set_settings(struct net_de
  1.4569  
  1.4570  		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.4571  		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  1.4572 -		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  1.4573 +		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) {
  1.4574  			adv |= ADVERTISE_10HALF;
  1.4575 -		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  1.4576 +			np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
  1.4577 +		}
  1.4578 +		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) {
  1.4579  			adv |= ADVERTISE_10FULL;
  1.4580 -		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  1.4581 +			np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
  1.4582 +		}
  1.4583 +		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) {
  1.4584  			adv |= ADVERTISE_100HALF;
  1.4585 -		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  1.4586 +			np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
  1.4587 +		}
  1.4588 +		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) {
  1.4589  			adv |= ADVERTISE_100FULL;
  1.4590 +			np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
  1.4591 +		}
  1.4592  		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  1.4593  		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  1.4594  			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1.4595 @@ -3102,24 +4844,26 @@ static int nv_set_settings(struct net_de
  1.4596  			bmcr |= BMCR_FULLDPLX;
  1.4597  		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  1.4598  			bmcr |= BMCR_SPEED100;
  1.4599 -		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4600  		if (np->phy_oui == PHY_OUI_MARVELL) {
  1.4601 -			/* reset the phy */
  1.4602 -			if (phy_reset(dev)) {
  1.4603 +			/* reset the phy in order for forced mode settings to stick */
  1.4604 +			if (phy_reset(dev, bmcr)) {
  1.4605  				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  1.4606  				return -EINVAL;
  1.4607  			}
  1.4608 -		} else if (netif_running(dev)) {
  1.4609 -			/* Wait a bit and then reconfigure the nic. */
  1.4610 -			udelay(10);
  1.4611 -			nv_linkchange(dev);
  1.4612 +		} else {
  1.4613 +			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4614 +			if (netif_running(dev)) {
  1.4615 +				/* Wait a bit and then reconfigure the nic. */
  1.4616 +				udelay(10);
  1.4617 +				nv_linkchange(dev);
  1.4618 +			}
  1.4619  		}
  1.4620  	}
  1.4621  
  1.4622  	if (netif_running(dev)) {
  1.4623  		nv_start_rx(dev);
  1.4624  		nv_start_tx(dev);
  1.4625 -		nv_enable_irq(dev);
  1.4626 +		nv_enable_hw_interrupts(dev, np->irqmask);
  1.4627  	}
  1.4628  
  1.4629  	return 0;
  1.4630 @@ -3129,13 +4873,13 @@ static int nv_set_settings(struct net_de
  1.4631  
  1.4632  static int nv_get_regs_len(struct net_device *dev)
  1.4633  {
  1.4634 -	struct fe_priv *np = netdev_priv(dev);
  1.4635 +	struct fe_priv *np = get_nvpriv(dev);
  1.4636  	return np->register_size;
  1.4637  }
  1.4638  
  1.4639  static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  1.4640  {
  1.4641 -	struct fe_priv *np = netdev_priv(dev);
  1.4642 +	struct fe_priv *np = get_nvpriv(dev);
  1.4643  	u8 __iomem *base = get_hwbase(dev);
  1.4644  	u32 *rbuf = buf;
  1.4645  	int i;
  1.4646 @@ -3149,7 +4893,7 @@ static void nv_get_regs(struct net_devic
  1.4647  
  1.4648  static int nv_nway_reset(struct net_device *dev)
  1.4649  {
  1.4650 -	struct fe_priv *np = netdev_priv(dev);
  1.4651 +	struct fe_priv *np = get_nvpriv(dev);
  1.4652  	int ret;
  1.4653  
  1.4654  	if (np->autoneg) {
  1.4655 @@ -3158,19 +4902,36 @@ static int nv_nway_reset(struct net_devi
  1.4656  		netif_carrier_off(dev);
  1.4657  		if (netif_running(dev)) {
  1.4658  			nv_disable_irq(dev);
  1.4659 +#if NVVER > FEDORA5
  1.4660  			netif_tx_lock_bh(dev);
  1.4661 +#else
  1.4662 +			spin_lock_bh(&dev->xmit_lock);
  1.4663 +#endif
  1.4664  			spin_lock(&np->lock);
  1.4665  			/* stop engines */
  1.4666  			nv_stop_rx(dev);
  1.4667  			nv_stop_tx(dev);
  1.4668  			spin_unlock(&np->lock);
  1.4669 +#if NVVER > FEDORA5
  1.4670  			netif_tx_unlock_bh(dev);
  1.4671 +#else
  1.4672 +			spin_unlock_bh(&dev->xmit_lock);
  1.4673 +#endif
  1.4674  			printk(KERN_INFO "%s: link down.\n", dev->name);
  1.4675  		}
  1.4676  
  1.4677  		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.4678 -		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1.4679 -		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4680 +		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1.4681 +			bmcr |= BMCR_ANENABLE;
  1.4682 +			/* reset the phy in order for settings to stick*/
  1.4683 +			if (phy_reset(dev, bmcr)) {
  1.4684 +				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  1.4685 +				return -EINVAL;
  1.4686 +			}
  1.4687 +		} else {
  1.4688 +			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1.4689 +			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.4690 +		}
  1.4691  
  1.4692  		if (netif_running(dev)) {
  1.4693  			nv_start_rx(dev);
  1.4694 @@ -3185,19 +4946,9 @@ static int nv_nway_reset(struct net_devi
  1.4695  	return ret;
  1.4696  }
  1.4697  
  1.4698 -static int nv_set_tso(struct net_device *dev, u32 value)
  1.4699 -{
  1.4700 -	struct fe_priv *np = netdev_priv(dev);
  1.4701 -
  1.4702 -	if ((np->driver_data & DEV_HAS_CHECKSUM))
  1.4703 -		return ethtool_op_set_tso(dev, value);
  1.4704 -	else
  1.4705 -		return -EOPNOTSUPP;
  1.4706 -}
  1.4707 -
  1.4708  static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  1.4709  {
  1.4710 -	struct fe_priv *np = netdev_priv(dev);
  1.4711 +	struct fe_priv *np = get_nvpriv(dev);
  1.4712  
  1.4713  	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  1.4714  	ring->rx_mini_max_pending = 0;
  1.4715 @@ -3212,66 +4963,62 @@ static void nv_get_ringparam(struct net_
  1.4716  
  1.4717  static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  1.4718  {
  1.4719 -	struct fe_priv *np = netdev_priv(dev);
  1.4720 +	struct fe_priv *np = get_nvpriv(dev);
  1.4721  	u8 __iomem *base = get_hwbase(dev);
  1.4722 -	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
  1.4723 +	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
  1.4724  	dma_addr_t ring_addr;
  1.4725  
  1.4726  	if (ring->rx_pending < RX_RING_MIN ||
  1.4727 -	    ring->tx_pending < TX_RING_MIN ||
  1.4728 -	    ring->rx_mini_pending != 0 ||
  1.4729 -	    ring->rx_jumbo_pending != 0 ||
  1.4730 -	    (np->desc_ver == DESC_VER_1 &&
  1.4731 -	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  1.4732 -	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  1.4733 -	    (np->desc_ver != DESC_VER_1 &&
  1.4734 -	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  1.4735 -	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  1.4736 +			ring->tx_pending < TX_RING_MIN ||
  1.4737 +			ring->rx_mini_pending != 0 ||
  1.4738 +			ring->rx_jumbo_pending != 0 ||
  1.4739 +			(np->desc_ver == DESC_VER_1 && 
  1.4740 +			 (ring->rx_pending > RING_MAX_DESC_VER_1 || 
  1.4741 +			  ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  1.4742 +			(np->desc_ver != DESC_VER_1 && 
  1.4743 +			 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 
  1.4744 +			  ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  1.4745  		return -EINVAL;
  1.4746  	}
  1.4747  
  1.4748  	/* allocate new rings */
  1.4749  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.4750  		rxtx_ring = pci_alloc_consistent(np->pci_dev,
  1.4751 -					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  1.4752 -					    &ring_addr);
  1.4753 +				sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  1.4754 +				&ring_addr);
  1.4755  	} else {
  1.4756  		rxtx_ring = pci_alloc_consistent(np->pci_dev,
  1.4757 -					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  1.4758 -					    &ring_addr);
  1.4759 -	}
  1.4760 -	rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
  1.4761 -	rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
  1.4762 -	tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
  1.4763 -	tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
  1.4764 -	tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
  1.4765 -	if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
  1.4766 +				sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  1.4767 +				&ring_addr);
  1.4768 +	}
  1.4769 +	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
  1.4770 +	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
  1.4771 +
  1.4772 +	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
  1.4773  		/* fall back to old rings */
  1.4774  		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.4775  			if(rxtx_ring)
  1.4776  				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  1.4777 -						    rxtx_ring, ring_addr);
  1.4778 +						rxtx_ring, ring_addr);
  1.4779  		} else {
  1.4780  			if (rxtx_ring)
  1.4781  				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  1.4782 -						    rxtx_ring, ring_addr);
  1.4783 +						rxtx_ring, ring_addr);
  1.4784  		}
  1.4785  		if (rx_skbuff)
  1.4786  			kfree(rx_skbuff);
  1.4787 -		if (rx_dma)
  1.4788 -			kfree(rx_dma);
  1.4789  		if (tx_skbuff)
  1.4790  			kfree(tx_skbuff);
  1.4791 -		if (tx_dma)
  1.4792 -			kfree(tx_dma);
  1.4793 -		if (tx_dma_len)
  1.4794 -			kfree(tx_dma_len);
  1.4795  		goto exit;
  1.4796  	}
  1.4797  
  1.4798  	if (netif_running(dev)) {
  1.4799  		nv_disable_irq(dev);
  1.4800 +#if NVVER > FEDORA5
  1.4801  		netif_tx_lock_bh(dev);
  1.4802 +#else
  1.4803 +		spin_lock_bh(&dev->xmit_lock);
  1.4804 +#endif
  1.4805  		spin_lock(&np->lock);
  1.4806  		/* stop engines */
  1.4807  		nv_stop_rx(dev);
  1.4808 @@ -3287,8 +5034,8 @@ static int nv_set_ringparam(struct net_d
  1.4809  	/* set new values */
  1.4810  	np->rx_ring_size = ring->rx_pending;
  1.4811  	np->tx_ring_size = ring->tx_pending;
  1.4812 -	np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
  1.4813 -	np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
  1.4814 +	np->tx_limit_stop =np->tx_ring_size - TX_LIMIT_DIFFERENCE;
  1.4815 +	np->tx_limit_start =np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
  1.4816  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.4817  		np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
  1.4818  		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  1.4819 @@ -3296,18 +5043,12 @@ static int nv_set_ringparam(struct net_d
  1.4820  		np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
  1.4821  		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  1.4822  	}
  1.4823 -	np->rx_skbuff = (struct sk_buff**)rx_skbuff;
  1.4824 -	np->rx_dma = (dma_addr_t*)rx_dma;
  1.4825 -	np->tx_skbuff = (struct sk_buff**)tx_skbuff;
  1.4826 -	np->tx_dma = (dma_addr_t*)tx_dma;
  1.4827 -	np->tx_dma_len = (unsigned int*)tx_dma_len;
  1.4828 +	np->rx_skb = (struct nv_skb_map*)rx_skbuff;
  1.4829 +	np->tx_skb = (struct nv_skb_map*)tx_skbuff;
  1.4830  	np->ring_addr = ring_addr;
  1.4831  
  1.4832 -	memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  1.4833 -	memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  1.4834 -	memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  1.4835 -	memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  1.4836 -	memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  1.4837 +	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
  1.4838 +	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
  1.4839  
  1.4840  	if (netif_running(dev)) {
  1.4841  		/* reinit driver view of the queues */
  1.4842 @@ -3321,7 +5062,7 @@ static int nv_set_ringparam(struct net_d
  1.4843  		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1.4844  		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.4845  		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.4846 -			base + NvRegRingSizes);
  1.4847 +				base + NvRegRingSizes);
  1.4848  		pci_push(base);
  1.4849  		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.4850  		pci_push(base);
  1.4851 @@ -3330,7 +5071,11 @@ static int nv_set_ringparam(struct net_d
  1.4852  		nv_start_rx(dev);
  1.4853  		nv_start_tx(dev);
  1.4854  		spin_unlock(&np->lock);
  1.4855 +#if NVVER > FEDORA5
  1.4856  		netif_tx_unlock_bh(dev);
  1.4857 +#else
  1.4858 +		spin_unlock_bh(&dev->xmit_lock);
  1.4859 +#endif
  1.4860  		nv_enable_irq(dev);
  1.4861  	}
  1.4862  	return 0;
  1.4863 @@ -3340,7 +5085,7 @@ exit:
  1.4864  
  1.4865  static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  1.4866  {
  1.4867 -	struct fe_priv *np = netdev_priv(dev);
  1.4868 +	struct fe_priv *np = get_nvpriv(dev);
  1.4869  
  1.4870  	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  1.4871  	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  1.4872 @@ -3349,13 +5094,13 @@ static void nv_get_pauseparam(struct net
  1.4873  
  1.4874  static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  1.4875  {
  1.4876 -	struct fe_priv *np = netdev_priv(dev);
  1.4877 +	struct fe_priv *np = get_nvpriv(dev);
  1.4878  	int adv, bmcr;
  1.4879  
  1.4880  	if ((!np->autoneg && np->duplex == 0) ||
  1.4881 -	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  1.4882 -		printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
  1.4883 -		       dev->name);
  1.4884 +			(np->autoneg && !pause->autoneg && np->duplex == 0)) {
  1.4885 +		printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 
  1.4886 +				dev->name);
  1.4887  		return -EINVAL;
  1.4888  	}
  1.4889  	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  1.4890 @@ -3366,13 +5111,21 @@ static int nv_set_pauseparam(struct net_
  1.4891  	netif_carrier_off(dev);
  1.4892  	if (netif_running(dev)) {
  1.4893  		nv_disable_irq(dev);
  1.4894 +#if NVVER > FEDORA5
  1.4895  		netif_tx_lock_bh(dev);
  1.4896 +#else
  1.4897 +		spin_lock_bh(&dev->xmit_lock);
  1.4898 +#endif
  1.4899  		spin_lock(&np->lock);
  1.4900  		/* stop engines */
  1.4901  		nv_stop_rx(dev);
  1.4902  		nv_stop_tx(dev);
  1.4903  		spin_unlock(&np->lock);
  1.4904 +#if NVVER > FEDORA5
  1.4905  		netif_tx_unlock_bh(dev);
  1.4906 +#else
  1.4907 +		spin_unlock_bh(&dev->xmit_lock);
  1.4908 +#endif
  1.4909  	}
  1.4910  
  1.4911  	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  1.4912 @@ -3420,31 +5173,26 @@ static int nv_set_pauseparam(struct net_
  1.4913  
  1.4914  static u32 nv_get_rx_csum(struct net_device *dev)
  1.4915  {
  1.4916 -	struct fe_priv *np = netdev_priv(dev);
  1.4917 -	return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
  1.4918 +	struct fe_priv *np = get_nvpriv(dev);
  1.4919 +	return (np->rx_csum) != 0;
  1.4920  }
  1.4921  
  1.4922  static int nv_set_rx_csum(struct net_device *dev, u32 data)
  1.4923  {
  1.4924 -	struct fe_priv *np = netdev_priv(dev);
  1.4925 +	struct fe_priv *np = get_nvpriv(dev);
  1.4926  	u8 __iomem *base = get_hwbase(dev);
  1.4927  	int retcode = 0;
  1.4928  
  1.4929  	if (np->driver_data & DEV_HAS_CHECKSUM) {
  1.4930  
  1.4931 -		if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
  1.4932 -		    (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
  1.4933 -			/* already set or unset */
  1.4934 -			return 0;
  1.4935 -		}
  1.4936 -
  1.4937  		if (data) {
  1.4938 +			np->rx_csum = 1;
  1.4939  			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  1.4940 -		} else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
  1.4941 -			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  1.4942  		} else {
  1.4943 -			printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
  1.4944 -			return -EINVAL;
  1.4945 +			np->rx_csum = 0;
  1.4946 +			/* vlan is dependent on rx checksum offload */
  1.4947 +			if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
  1.4948 +				np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  1.4949  		}
  1.4950  
  1.4951  		if (netif_running(dev)) {
  1.4952 @@ -3459,39 +5207,85 @@ static int nv_set_rx_csum(struct net_dev
  1.4953  	return retcode;
  1.4954  }
  1.4955  
  1.4956 -static int nv_set_tx_csum(struct net_device *dev, u32 data)
  1.4957 +#ifdef NETIF_F_TSO
  1.4958 +static int nv_set_tso(struct net_device *dev, u32 data)
  1.4959  {
  1.4960 -	struct fe_priv *np = netdev_priv(dev);
  1.4961 -
  1.4962 -	if (np->driver_data & DEV_HAS_CHECKSUM)
  1.4963 -		return ethtool_op_set_tx_hw_csum(dev, data);
  1.4964 -	else
  1.4965 -		return -EOPNOTSUPP;
  1.4966 +	struct fe_priv *np = get_nvpriv(dev);
  1.4967 +
  1.4968 +	if (np->driver_data & DEV_HAS_CHECKSUM){
  1.4969 +#if NVVER < SUSE10 
  1.4970 +		if(data){
  1.4971 +			if(ethtool_op_get_sg(dev)==0)
  1.4972 +				return -EINVAL;
  1.4973 +		}
  1.4974 +#endif
  1.4975 +		return ethtool_op_set_tso(dev, data);
  1.4976 +	}else
  1.4977 +		return -EINVAL;
  1.4978  }
  1.4979 +#endif
  1.4980  
  1.4981  static int nv_set_sg(struct net_device *dev, u32 data)
  1.4982  {
  1.4983 -	struct fe_priv *np = netdev_priv(dev);
  1.4984 -
  1.4985 +	struct fe_priv *np = get_nvpriv(dev);
  1.4986 +
  1.4987 +	if (np->driver_data & DEV_HAS_CHECKSUM){
  1.4988 +#if NVVER < SUSE10
  1.4989 +		if(data){
  1.4990 +			if(ethtool_op_get_tx_csum(dev)==0)
  1.4991 +				return -EINVAL;
  1.4992 +		}
  1.4993 +#ifdef NETIF_F_TSO
  1.4994 +		if(!data)
  1.4995 +			/* set tso off */
  1.4996 +			nv_set_tso(dev,data);   
  1.4997 +#endif
  1.4998 +#endif
  1.4999 +		return ethtool_op_set_sg(dev, data);
  1.5000 +	}else
  1.5001 +		return -EINVAL;
  1.5002 +}
  1.5003 +
  1.5004 +static int nv_set_tx_csum(struct net_device *dev, u32 data)
  1.5005 +{
  1.5006 +	struct fe_priv *np = get_nvpriv(dev);
  1.5007 +
  1.5008 +#if NVVER < SUSE10
  1.5009 +	/* set sg off if tx off */
  1.5010 +	if(!data)
  1.5011 +		nv_set_sg(dev,data);
  1.5012 +#endif	
  1.5013  	if (np->driver_data & DEV_HAS_CHECKSUM)
  1.5014 -		return ethtool_op_set_sg(dev, data);
  1.5015 +#if NVVER > RHES4 
  1.5016 +		return ethtool_op_set_tx_hw_csum(dev, data);
  1.5017 +#else
  1.5018 +	{
  1.5019 +		if (data)
  1.5020 +			dev->features |= NETIF_F_IP_CSUM;
  1.5021 +		else
  1.5022 +			dev->features &= ~NETIF_F_IP_CSUM;
  1.5023 +		return 0;
  1.5024 +	}
  1.5025 +#endif
  1.5026  	else
  1.5027 -		return -EOPNOTSUPP;
  1.5028 +		return -EINVAL;
  1.5029  }
  1.5030  
  1.5031  static int nv_get_stats_count(struct net_device *dev)
  1.5032  {
  1.5033 -	struct fe_priv *np = netdev_priv(dev);
  1.5034 -
  1.5035 -	if (np->driver_data & DEV_HAS_STATISTICS)
  1.5036 -		return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
  1.5037 +	struct fe_priv *np = get_nvpriv(dev);
  1.5038 +
  1.5039 +	if (np->driver_data & DEV_HAS_STATISTICS_V1)
  1.5040 +		return NV_DEV_STATISTICS_V1_COUNT;
  1.5041 +	else if (np->driver_data & DEV_HAS_STATISTICS_V2)
  1.5042 +		return NV_DEV_STATISTICS_V2_COUNT;
  1.5043  	else
  1.5044 -		return 0;
  1.5045 +		return NV_DEV_STATISTICS_SW_COUNT;
  1.5046  }
  1.5047  
  1.5048  static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  1.5049  {
  1.5050 -	struct fe_priv *np = netdev_priv(dev);
  1.5051 +	struct fe_priv *np = get_nvpriv(dev);
  1.5052  
  1.5053  	/* update stats */
  1.5054  	nv_do_stats_poll((unsigned long)dev);
  1.5055 @@ -3501,7 +5295,7 @@ static void nv_get_ethtool_stats(struct 
  1.5056  
  1.5057  static int nv_self_test_count(struct net_device *dev)
  1.5058  {
  1.5059 -	struct fe_priv *np = netdev_priv(dev);
  1.5060 +	struct fe_priv *np = get_nvpriv(dev);
  1.5061  
  1.5062  	if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  1.5063  		return NV_TEST_COUNT_EXTENDED;
  1.5064 @@ -3511,7 +5305,7 @@ static int nv_self_test_count(struct net
  1.5065  
  1.5066  static int nv_link_test(struct net_device *dev)
  1.5067  {
  1.5068 -	struct fe_priv *np = netdev_priv(dev);
  1.5069 +	struct fe_priv *np = get_nvpriv(dev);
  1.5070  	int mii_status;
  1.5071  
  1.5072  	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1.5073 @@ -3554,7 +5348,7 @@ static int nv_register_test(struct net_d
  1.5074  
  1.5075  static int nv_interrupt_test(struct net_device *dev)
  1.5076  {
  1.5077 -	struct fe_priv *np = netdev_priv(dev);
  1.5078 +	struct fe_priv *np = get_nvpriv(dev);
  1.5079  	u8 __iomem *base = get_hwbase(dev);
  1.5080  	int ret = 1;
  1.5081  	int testcnt;
  1.5082 @@ -3583,7 +5377,7 @@ static int nv_interrupt_test(struct net_
  1.5083  	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  1.5084  
  1.5085  	/* wait for at least one interrupt */
  1.5086 -	msleep(100);
  1.5087 +	nv_msleep(100);
  1.5088  
  1.5089  	spin_lock_irq(&np->lock);
  1.5090  
  1.5091 @@ -3617,7 +5411,7 @@ static int nv_interrupt_test(struct net_
  1.5092  
  1.5093  static int nv_loopback_test(struct net_device *dev)
  1.5094  {
  1.5095 -	struct fe_priv *np = netdev_priv(dev);
  1.5096 +	struct fe_priv *np = get_nvpriv(dev);
  1.5097  	u8 __iomem *base = get_hwbase(dev);
  1.5098  	struct sk_buff *tx_skb, *rx_skb;
  1.5099  	dma_addr_t test_dma_addr;
  1.5100 @@ -3629,6 +5423,8 @@ static int nv_loopback_test(struct net_d
  1.5101  	u32 misc1_flags = 0;
  1.5102  	int ret = 1;
  1.5103  
  1.5104 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.5105 +
  1.5106  	if (netif_running(dev)) {
  1.5107  		nv_disable_irq(dev);
  1.5108  		filter_flags = readl(base + NvRegPacketFilterFlags);
  1.5109 @@ -3649,7 +5445,7 @@ static int nv_loopback_test(struct net_d
  1.5110  	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1.5111  	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.5112  	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.5113 -		base + NvRegRingSizes);
  1.5114 +			base + NvRegRingSizes);
  1.5115  	pci_push(base);
  1.5116  
  1.5117  	/* restart rx engine */
  1.5118 @@ -3662,8 +5458,13 @@ static int nv_loopback_test(struct net_d
  1.5119  	pkt_data = skb_put(tx_skb, pkt_len);
  1.5120  	for (i = 0; i < pkt_len; i++)
  1.5121  		pkt_data[i] = (u8)(i & 0xff);
  1.5122 +#if NVVER > FEDORA7
  1.5123  	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  1.5124 -				       tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
  1.5125 +			skb_tailroom(tx_skb), PCI_DMA_FROMDEVICE);
  1.5126 +#else
  1.5127 +	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  1.5128 +			tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
  1.5129 +#endif
  1.5130  
  1.5131  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.5132  		np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
  1.5133 @@ -3676,7 +5477,7 @@ static int nv_loopback_test(struct net_d
  1.5134  	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.5135  	pci_push(get_hwbase(dev));
  1.5136  
  1.5137 -	msleep(500);
  1.5138 +	nv_msleep(500);
  1.5139  
  1.5140  	/* check for rx of the packet */
  1.5141  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.5142 @@ -3699,18 +5500,18 @@ static int nv_loopback_test(struct net_d
  1.5143  		}
  1.5144  	}
  1.5145  
  1.5146 -	if (ret) {
  1.5147 +	if (ret) {		
  1.5148  		if (len != pkt_len) {
  1.5149  			ret = 0;
  1.5150 -			dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
  1.5151 -				dev->name, len, pkt_len);
  1.5152 +			dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 
  1.5153 +					dev->name, len, pkt_len);
  1.5154  		} else {
  1.5155 -			rx_skb = np->rx_skbuff[0];
  1.5156 +			rx_skb = np->rx_skb[0].skb;
  1.5157  			for (i = 0; i < pkt_len; i++) {
  1.5158  				if (rx_skb->data[i] != (u8)(i & 0xff)) {
  1.5159  					ret = 0;
  1.5160 -					dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
  1.5161 -						dev->name, i);
  1.5162 +					dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 
  1.5163 +							dev->name, i);
  1.5164  					break;
  1.5165  				}
  1.5166  			}
  1.5167 @@ -3719,9 +5520,15 @@ static int nv_loopback_test(struct net_d
  1.5168  		dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
  1.5169  	}
  1.5170  
  1.5171 +#if NVVER > FEDORA7
  1.5172  	pci_unmap_page(np->pci_dev, test_dma_addr,
  1.5173 -		       tx_skb->end-tx_skb->data,
  1.5174 -		       PCI_DMA_TODEVICE);
  1.5175 +			skb_end_pointer(tx_skb)-tx_skb->data,
  1.5176 +			PCI_DMA_TODEVICE);
  1.5177 +#else
  1.5178 +	pci_unmap_page(np->pci_dev, test_dma_addr,
  1.5179 +			tx_skb->end-tx_skb->data,
  1.5180 +			PCI_DMA_TODEVICE);
  1.5181 +#endif
  1.5182  	dev_kfree_skb_any(tx_skb);
  1.5183  
  1.5184  	/* stop engines */
  1.5185 @@ -3743,11 +5550,13 @@ static int nv_loopback_test(struct net_d
  1.5186  
  1.5187  static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  1.5188  {
  1.5189 -	struct fe_priv *np = netdev_priv(dev);
  1.5190 +	struct fe_priv *np = get_nvpriv(dev);
  1.5191  	u8 __iomem *base = get_hwbase(dev);
  1.5192  	int result;
  1.5193  	memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
  1.5194  
  1.5195 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.5196 +
  1.5197  	if (!nv_link_test(dev)) {
  1.5198  		test->flags |= ETH_TEST_FL_FAILED;
  1.5199  		buffer[0] = 1;
  1.5200 @@ -3756,7 +5565,11 @@ static void nv_self_test(struct net_devi
  1.5201  	if (test->flags & ETH_TEST_FL_OFFLINE) {
  1.5202  		if (netif_running(dev)) {
  1.5203  			netif_stop_queue(dev);
  1.5204 +#if NVVER > FEDORA5
  1.5205  			netif_tx_lock_bh(dev);
  1.5206 +#else
  1.5207 +			spin_lock_bh(&dev->xmit_lock);
  1.5208 +#endif
  1.5209  			spin_lock_irq(&np->lock);
  1.5210  			nv_disable_hw_interrupts(dev, np->irqmask);
  1.5211  			if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  1.5212 @@ -3772,7 +5585,11 @@ static void nv_self_test(struct net_devi
  1.5213  			nv_drain_rx(dev);
  1.5214  			nv_drain_tx(dev);
  1.5215  			spin_unlock_irq(&np->lock);
  1.5216 +#if NVVER > FEDORA5
  1.5217  			netif_tx_unlock_bh(dev);
  1.5218 +#else
  1.5219 +			spin_unlock_bh(&dev->xmit_lock);
  1.5220 +#endif
  1.5221  		}
  1.5222  
  1.5223  		if (!nv_register_test(dev)) {
  1.5224 @@ -3806,7 +5623,7 @@ static void nv_self_test(struct net_devi
  1.5225  			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1.5226  			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.5227  			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.5228 -				base + NvRegRingSizes);
  1.5229 +					base + NvRegRingSizes);
  1.5230  			pci_push(base);
  1.5231  			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.5232  			pci_push(base);
  1.5233 @@ -3822,12 +5639,12 @@ static void nv_self_test(struct net_devi
  1.5234  static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  1.5235  {
  1.5236  	switch (stringset) {
  1.5237 -	case ETH_SS_STATS:
  1.5238 -		memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
  1.5239 -		break;
  1.5240 -	case ETH_SS_TEST:
  1.5241 -		memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
  1.5242 -		break;
  1.5243 +		case ETH_SS_STATS:
  1.5244 +			memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
  1.5245 +			break;
  1.5246 +		case ETH_SS_TEST:
  1.5247 +			memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
  1.5248 +			break;
  1.5249  	}
  1.5250  }
  1.5251  
  1.5252 @@ -3841,9 +5658,11 @@ static struct ethtool_ops ops = {
  1.5253  	.get_regs_len = nv_get_regs_len,
  1.5254  	.get_regs = nv_get_regs,
  1.5255  	.nway_reset = nv_nway_reset,
  1.5256 +#if NVVER < NVNEW
  1.5257 +#if NVVER > SUSE10
  1.5258  	.get_perm_addr = ethtool_op_get_perm_addr,
  1.5259 -	.get_tso = ethtool_op_get_tso,
  1.5260 -	.set_tso = nv_set_tso,
  1.5261 +#endif
  1.5262 +#endif
  1.5263  	.get_ringparam = nv_get_ringparam,
  1.5264  	.set_ringparam = nv_set_ringparam,
  1.5265  	.get_pauseparam = nv_get_pauseparam,
  1.5266 @@ -3854,6 +5673,10 @@ static struct ethtool_ops ops = {
  1.5267  	.set_tx_csum = nv_set_tx_csum,
  1.5268  	.get_sg = ethtool_op_get_sg,
  1.5269  	.set_sg = nv_set_sg,
  1.5270 +#ifdef NETIF_F_TSO
  1.5271 +	.get_tso = ethtool_op_get_tso,
  1.5272 +	.set_tso = nv_set_tso,
  1.5273 +#endif
  1.5274  	.get_strings = nv_get_strings,
  1.5275  	.get_stats_count = nv_get_stats_count,
  1.5276  	.get_ethtool_stats = nv_get_ethtool_stats,
  1.5277 @@ -3873,10 +5696,14 @@ static void nv_vlan_rx_register(struct n
  1.5278  	if (grp) {
  1.5279  		/* enable vlan on MAC */
  1.5280  		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  1.5281 +		/* vlan is dependent on rx checksum */
  1.5282 +		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  1.5283  	} else {
  1.5284  		/* disable vlan on MAC */
  1.5285  		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  1.5286  		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  1.5287 +		if (!np->rx_csum)
  1.5288 +			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  1.5289  	}
  1.5290  
  1.5291  	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1.5292 @@ -3889,26 +5716,72 @@ static void nv_vlan_rx_kill_vid(struct n
  1.5293  	/* nothing to do */
  1.5294  };
  1.5295  
  1.5296 +/* The mgmt unit and driver use a semaphore to access the phy during init */
  1.5297 +static int nv_mgmt_acquire_sema(struct net_device *dev)
  1.5298 +{
  1.5299 +	u8 __iomem *base = get_hwbase(dev);
  1.5300 +	int i;
  1.5301 +	u32 tx_ctrl, mgmt_sema;
  1.5302 +
  1.5303 +	for (i = 0; i < 10; i++) {
  1.5304 +		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
  1.5305 +		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
  1.5306 +			dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is free\n");
  1.5307 +			break;
  1.5308 +		}
  1.5309 +		nv_msleep(500);
  1.5310 +	}
  1.5311 +
  1.5312 +	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
  1.5313 +		dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n");
  1.5314 +		return 0;
  1.5315 +	}
  1.5316 +
  1.5317 +	for (i = 0; i < 2; i++) {
  1.5318 +		tx_ctrl = readl(base + NvRegTransmitterControl);
  1.5319 +		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
  1.5320 +		writel(tx_ctrl, base + NvRegTransmitterControl);
  1.5321 +
  1.5322 +		/* verify that semaphore was acquired */
  1.5323 +		tx_ctrl = readl(base + NvRegTransmitterControl);
  1.5324 +		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
  1.5325 +				((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
  1.5326 +			dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: acquired sema\n");
  1.5327 +			return 1;
  1.5328 +		} else
  1.5329 +			udelay(50);
  1.5330 +	}
  1.5331 +
  1.5332 +	dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
  1.5333 +	return 0;
  1.5334 +}
  1.5335 +
  1.5336  static int nv_open(struct net_device *dev)
  1.5337  {
  1.5338 -	struct fe_priv *np = netdev_priv(dev);
  1.5339 +	struct fe_priv *np = get_nvpriv(dev);
  1.5340  	u8 __iomem *base = get_hwbase(dev);
  1.5341  	int ret = 1;
  1.5342 +	u32 tx_ctrl;
  1.5343  	int oom, i;
  1.5344  
  1.5345  	dprintk(KERN_DEBUG "nv_open: begin\n");
  1.5346  
  1.5347 -	/* 1) erase previous misconfiguration */
  1.5348 +	/* erase previous misconfiguration */
  1.5349  	if (np->driver_data & DEV_HAS_POWER_CNTRL)
  1.5350  		nv_mac_reset(dev);
  1.5351 -	/* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
  1.5352 +	/* stop adapter: ignored, 4.3 seems to be overkill */
  1.5353  	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  1.5354  	writel(0, base + NvRegMulticastAddrB);
  1.5355 -	writel(0, base + NvRegMulticastMaskA);
  1.5356 -	writel(0, base + NvRegMulticastMaskB);
  1.5357 +	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  1.5358 +	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  1.5359  	writel(0, base + NvRegPacketFilterFlags);
  1.5360  
  1.5361 -	writel(0, base + NvRegTransmitterControl);
  1.5362 +	if (np->mac_in_use){
  1.5363 +		tx_ctrl = readl(base + NvRegTransmitterControl);
  1.5364 +		tx_ctrl &= ~NVREG_XMITCTL_START;
  1.5365 +	}else
  1.5366 +		tx_ctrl = 0;
  1.5367 +	writel(tx_ctrl, base + NvRegTransmitterControl);
  1.5368  	writel(0, base + NvRegReceiverControl);
  1.5369  
  1.5370  	writel(0, base + NvRegAdapterControl);
  1.5371 @@ -3916,26 +5789,23 @@ static int nv_open(struct net_device *de
  1.5372  	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  1.5373  		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
  1.5374  
  1.5375 -	/* 2) initialize descriptor rings */
  1.5376 +	/* initialize descriptor rings */
  1.5377  	set_bufsize(dev);
  1.5378  	oom = nv_init_ring(dev);
  1.5379  
  1.5380  	writel(0, base + NvRegLinkSpeed);
  1.5381 -	writel(0, base + NvRegUnknownTransmitterReg);
  1.5382 +	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  1.5383  	nv_txrx_reset(dev);
  1.5384  	writel(0, base + NvRegUnknownSetupReg6);
  1.5385  
  1.5386  	np->in_shutdown = 0;
  1.5387  
  1.5388 -	/* 3) set mac address */
  1.5389 -	nv_copy_mac_to_hw(dev);
  1.5390 -
  1.5391 -	/* 4) give hw rings */
  1.5392 +	/* give hw rings */
  1.5393  	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1.5394  	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1.5395 -		base + NvRegRingSizes);
  1.5396 -
  1.5397 -	/* 5) continue setup */
  1.5398 +			base + NvRegRingSizes);
  1.5399 +
  1.5400 +	/* continue setup */
  1.5401  	writel(np->linkspeed, base + NvRegLinkSpeed);
  1.5402  	if (np->desc_ver == DESC_VER_1)
  1.5403  		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
  1.5404 @@ -3949,11 +5819,11 @@ static int nv_open(struct net_device *de
  1.5405  			NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
  1.5406  			KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
  1.5407  
  1.5408 -	writel(0, base + NvRegUnknownSetupReg4);
  1.5409 +	writel(0, base + NvRegMIIMask);
  1.5410  	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  1.5411 -	writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  1.5412 -
  1.5413 -	/* 6) continue setup */
  1.5414 +	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  1.5415 +
  1.5416 +	/* continue setup */
  1.5417  	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  1.5418  	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  1.5419  	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  1.5420 @@ -3976,7 +5846,7 @@ static int nv_open(struct net_device *de
  1.5421  	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  1.5422  			base + NvRegAdapterControl);
  1.5423  	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  1.5424 -	writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
  1.5425 +	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
  1.5426  	if (np->wolenabled)
  1.5427  		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  1.5428  
  1.5429 @@ -3990,7 +5860,7 @@ static int nv_open(struct net_device *de
  1.5430  
  1.5431  	nv_disable_hw_interrupts(dev, np->irqmask);
  1.5432  	pci_push(base);
  1.5433 -	writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  1.5434 +	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  1.5435  	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  1.5436  	pci_push(base);
  1.5437  
  1.5438 @@ -4004,8 +5874,8 @@ static int nv_open(struct net_device *de
  1.5439  	spin_lock_irq(&np->lock);
  1.5440  	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  1.5441  	writel(0, base + NvRegMulticastAddrB);
  1.5442 -	writel(0, base + NvRegMulticastMaskA);
  1.5443 -	writel(0, base + NvRegMulticastMaskB);
  1.5444 +	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  1.5445 +	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  1.5446  	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  1.5447  	/* One manual link speed update: Interrupts are enabled, future link
  1.5448  	 * speed changes cause interrupts and are handled by nv_link_irq().
  1.5449 @@ -4013,7 +5883,7 @@ static int nv_open(struct net_device *de
  1.5450  	{
  1.5451  		u32 miistat;
  1.5452  		miistat = readl(base + NvRegMIIStatus);
  1.5453 -		writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  1.5454 +		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  1.5455  		dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
  1.5456  	}
  1.5457  	/* set linkspeed to invalid value, thus force nv_update_linkspeed
  1.5458 @@ -4026,15 +5896,14 @@ static int nv_open(struct net_device *de
  1.5459  	if (ret) {
  1.5460  		netif_carrier_on(dev);
  1.5461  	} else {
  1.5462 -		printk("%s: no link during initialization.\n", dev->name);
  1.5463 +		dprintk(KERN_DEBUG "%s: no link during initialization.\n", dev->name);
  1.5464  		netif_carrier_off(dev);
  1.5465  	}
  1.5466  	if (oom)
  1.5467  		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1.5468  
  1.5469  	/* start statistics timer */
  1.5470 -	if (np->driver_data & DEV_HAS_STATISTICS)
  1.5471 -		mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  1.5472 +	mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  1.5473  
  1.5474  	spin_unlock_irq(&np->lock);
  1.5475  
  1.5476 @@ -4046,13 +5915,19 @@ out_drain:
  1.5477  
  1.5478  static int nv_close(struct net_device *dev)
  1.5479  {
  1.5480 -	struct fe_priv *np = netdev_priv(dev);
  1.5481 +	struct fe_priv *np = get_nvpriv(dev);
  1.5482  	u8 __iomem *base;
  1.5483  
  1.5484 +	dprintk(KERN_DEBUG "nv_close: begin\n");
  1.5485  	spin_lock_irq(&np->lock);
  1.5486  	np->in_shutdown = 1;
  1.5487  	spin_unlock_irq(&np->lock);
  1.5488 -	synchronize_irq(dev->irq);
  1.5489 +
  1.5490 +#if NVVER > RHES3
  1.5491 +	synchronize_irq(np->pci_dev->irq);
  1.5492 +#else
  1.5493 +	synchronize_irq();
  1.5494 +#endif
  1.5495  
  1.5496  	del_timer_sync(&np->oom_kick);
  1.5497  	del_timer_sync(&np->nic_poll);
  1.5498 @@ -4079,12 +5954,6 @@ static int nv_close(struct net_device *d
  1.5499  	if (np->wolenabled)
  1.5500  		nv_start_rx(dev);
  1.5501  
  1.5502 -	/* special op: write back the misordered MAC address - otherwise
  1.5503 -	 * the next nv_probe would see a wrong address.
  1.5504 -	 */
  1.5505 -	writel(np->orig_mac[0], base + NvRegMacAddrA);
  1.5506 -	writel(np->orig_mac[1], base + NvRegMacAddrB);
  1.5507 -
  1.5508  	/* FIXME: power down nic */
  1.5509  
  1.5510  	return 0;
  1.5511 @@ -4097,16 +5966,21 @@ static int __devinit nv_probe(struct pci
  1.5512  	unsigned long addr;
  1.5513  	u8 __iomem *base;
  1.5514  	int err, i;
  1.5515 -	u32 powerstate;
  1.5516 -
  1.5517 +	u32 powerstate, phystate_orig = 0, phystate, txreg,reg,mii_status;
  1.5518 +	int phyinitialized = 0;
  1.5519 +
  1.5520 +	/* modify network device class id */	
  1.5521 +	quirk_nforce_network_class(pci_dev);
  1.5522  	dev = alloc_etherdev(sizeof(struct fe_priv));
  1.5523  	err = -ENOMEM;
  1.5524  	if (!dev)
  1.5525  		goto out;
  1.5526  
  1.5527 -	np = netdev_priv(dev);
  1.5528 +	dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
  1.5529 +	np = get_nvpriv(dev);
  1.5530  	np->pci_dev = pci_dev;
  1.5531  	spin_lock_init(&np->lock);
  1.5532 +	spin_lock_init(&np->timer_lock);
  1.5533  	SET_MODULE_OWNER(dev);
  1.5534  	SET_NETDEV_DEV(dev, &pci_dev->dev);
  1.5535  
  1.5536 @@ -4133,7 +6007,9 @@ static int __devinit nv_probe(struct pci
  1.5537  	if (err < 0)
  1.5538  		goto out_disable;
  1.5539  
  1.5540 -	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
  1.5541 +	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
  1.5542 +		np->register_size = NV_PCI_REGSZ_VER3;
  1.5543 +	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
  1.5544  		np->register_size = NV_PCI_REGSZ_VER2;
  1.5545  	else
  1.5546  		np->register_size = NV_PCI_REGSZ_VER1;
  1.5547 @@ -4143,8 +6019,8 @@ static int __devinit nv_probe(struct pci
  1.5548  	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  1.5549  		dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
  1.5550  				pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
  1.5551 -				pci_resource_len(pci_dev, i),
  1.5552 -				pci_resource_flags(pci_dev, i));
  1.5553 +				(long)pci_resource_len(pci_dev, i),
  1.5554 +				(long)pci_resource_flags(pci_dev, i));
  1.5555  		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  1.5556  				pci_resource_len(pci_dev, i) >= np->register_size) {
  1.5557  			addr = pci_resource_start(pci_dev, i);
  1.5558 @@ -4153,7 +6029,7 @@ static int __devinit nv_probe(struct pci
  1.5559  	}
  1.5560  	if (i == DEVICE_COUNT_RESOURCE) {
  1.5561  		printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
  1.5562 -					pci_name(pci_dev));
  1.5563 +				pci_name(pci_dev));
  1.5564  		goto out_relreg;
  1.5565  	}
  1.5566  
  1.5567 @@ -4168,15 +6044,17 @@ static int __devinit nv_probe(struct pci
  1.5568  		if (dma_64bit) {
  1.5569  			if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  1.5570  				printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
  1.5571 -				       pci_name(pci_dev));
  1.5572 +						pci_name(pci_dev));
  1.5573  			} else {
  1.5574  				dev->features |= NETIF_F_HIGHDMA;
  1.5575  				printk(KERN_INFO "forcedeth: using HIGHDMA\n");
  1.5576  			}
  1.5577 +#if NVVER > RHES3
  1.5578  			if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  1.5579  				printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
  1.5580 -				       pci_name(pci_dev));
  1.5581 +						pci_name(pci_dev));
  1.5582  			}
  1.5583 +#endif
  1.5584  		}
  1.5585  	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
  1.5586  		/* packet format 2: supports jumbo frames */
  1.5587 @@ -4191,21 +6069,43 @@ static int __devinit nv_probe(struct pci
  1.5588  	np->pkt_limit = NV_PKTLIMIT_1;
  1.5589  	if (id->driver_data & DEV_HAS_LARGEDESC)
  1.5590  		np->pkt_limit = NV_PKTLIMIT_2;
  1.5591 +	if (mtu > np->pkt_limit) {
  1.5592 +		printk(KERN_INFO "forcedeth: MTU value of %d is too large. Setting to maximum value of %d\n",
  1.5593 +				mtu, np->pkt_limit);
  1.5594 +		dev->mtu = np->pkt_limit;
  1.5595 +	} else {
  1.5596 +		dev->mtu = mtu;
  1.5597 +	}
  1.5598  
  1.5599  	if (id->driver_data & DEV_HAS_CHECKSUM) {
  1.5600 -		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  1.5601 -		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
  1.5602 +		if (rx_checksum_offload) {
  1.5603 +			np->rx_csum = 1;
  1.5604 +			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  1.5605 +		}
  1.5606 +
  1.5607 +		if (tx_checksum_offload)	
  1.5608 +#if NVVER > RHES4
  1.5609 +			dev->features |= NETIF_F_HW_CSUM;
  1.5610 +#else
  1.5611 +		dev->features |= NETIF_F_IP_CSUM;
  1.5612 +#endif
  1.5613 +
  1.5614 +		if (scatter_gather)
  1.5615 +			dev->features |= NETIF_F_SG;
  1.5616  #ifdef NETIF_F_TSO
  1.5617 -		dev->features |= NETIF_F_TSO;
  1.5618 -#endif
  1.5619 - 	}
  1.5620 +		if (tso_offload)
  1.5621 +			dev->features |= NETIF_F_TSO;
  1.5622 +#endif
  1.5623 +	}
  1.5624  
  1.5625  	np->vlanctl_bits = 0;
  1.5626 -	if (id->driver_data & DEV_HAS_VLAN) {
  1.5627 +	if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) {
  1.5628  		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  1.5629  		dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  1.5630  		dev->vlan_rx_register = nv_vlan_rx_register;
  1.5631  		dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
  1.5632 +		/* vlan needs rx checksum support, so force it */
  1.5633 +		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  1.5634  	}
  1.5635  
  1.5636  	np->msi_flags = 0;
  1.5637 @@ -4216,11 +6116,27 @@ static int __devinit nv_probe(struct pci
  1.5638  		np->msi_flags |= NV_MSI_X_CAPABLE;
  1.5639  	}
  1.5640  
  1.5641 -	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  1.5642 -	if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
  1.5643 -		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  1.5644 -	}
  1.5645 -
  1.5646 +	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
  1.5647 +	if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED)
  1.5648 +		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  1.5649 +	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
  1.5650 +			(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)||
  1.5651 +			(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3))
  1.5652 +	{
  1.5653 +		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
  1.5654 +		if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED)
  1.5655 +			np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  1.5656 +	}
  1.5657 +	if (autoneg == AUTONEG_ENABLE) {
  1.5658 +		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  1.5659 +	} else if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX) {
  1.5660 +		printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n");
  1.5661 +		goto out_relreg;
  1.5662 +	}
  1.5663 +
  1.5664 +	/* save phy config */
  1.5665 +	np->autoneg = autoneg;
  1.5666 +	np->speed_duplex = speed_duplex;
  1.5667  
  1.5668  	err = -ENOMEM;
  1.5669  	np->base = ioremap(addr, np->register_size);
  1.5670 @@ -4228,51 +6144,86 @@ static int __devinit nv_probe(struct pci
  1.5671  		goto out_relreg;
  1.5672  	dev->base_addr = (unsigned long)np->base;
  1.5673  
  1.5674 +	/* stop engines */
  1.5675 +	nv_stop_rx(dev);
  1.5676 +	nv_stop_tx(dev);
  1.5677 +	nv_txrx_reset(dev);
  1.5678 +
  1.5679  	dev->irq = pci_dev->irq;
  1.5680  
  1.5681 -	np->rx_ring_size = RX_RING_DEFAULT;
  1.5682 -	np->tx_ring_size = TX_RING_DEFAULT;
  1.5683 -	np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
  1.5684 -	np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
  1.5685 +	if (np->desc_ver == DESC_VER_1) {
  1.5686 +		if (rx_ring_size > RING_MAX_DESC_VER_1) {
  1.5687 +			printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
  1.5688 +					rx_ring_size, RING_MAX_DESC_VER_1);
  1.5689 +			rx_ring_size = RING_MAX_DESC_VER_1;
  1.5690 +		}
  1.5691 +		if (tx_ring_size > RING_MAX_DESC_VER_1) {
  1.5692 +			printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
  1.5693 +					tx_ring_size, RING_MAX_DESC_VER_1);
  1.5694 +			tx_ring_size = RING_MAX_DESC_VER_1;
  1.5695 +		}
  1.5696 +	} else {
  1.5697 +		if (rx_ring_size > RING_MAX_DESC_VER_2_3) {
  1.5698 +			printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
  1.5699 +					rx_ring_size, RING_MAX_DESC_VER_2_3);
  1.5700 +			rx_ring_size = RING_MAX_DESC_VER_2_3;
  1.5701 +		}
  1.5702 +		if (tx_ring_size > RING_MAX_DESC_VER_2_3) {
  1.5703 +			printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
  1.5704 +					tx_ring_size, RING_MAX_DESC_VER_2_3);
  1.5705 +			tx_ring_size = RING_MAX_DESC_VER_2_3;
  1.5706 +		}
  1.5707 +	}
  1.5708 +	np->rx_ring_size = rx_ring_size;
  1.5709 +	np->tx_ring_size = tx_ring_size;
  1.5710 +	np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
  1.5711 +	np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
  1.5712  
  1.5713  	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1.5714  		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  1.5715 -					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  1.5716 -					&np->ring_addr);
  1.5717 +				sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  1.5718 +				&np->ring_addr);
  1.5719  		if (!np->rx_ring.orig)
  1.5720  			goto out_unmap;
  1.5721  		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  1.5722  	} else {
  1.5723  		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  1.5724 -					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  1.5725 -					&np->ring_addr);
  1.5726 +				sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  1.5727 +				&np->ring_addr);
  1.5728  		if (!np->rx_ring.ex)
  1.5729  			goto out_unmap;
  1.5730  		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  1.5731  	}
  1.5732 -	np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
  1.5733 -	np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
  1.5734 -	np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
  1.5735 -	np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
  1.5736 -	np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
  1.5737 -	if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
  1.5738 +	np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
  1.5739 +	np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
  1.5740 +	if (!np->rx_skb || !np->tx_skb)
  1.5741  		goto out_freering;
  1.5742 -	memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  1.5743 -	memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  1.5744 -	memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  1.5745 -	memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  1.5746 -	memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  1.5747 +	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
  1.5748 +	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
  1.5749  
  1.5750  	dev->open = nv_open;
  1.5751  	dev->stop = nv_close;
  1.5752 -	dev->hard_start_xmit = nv_start_xmit;
  1.5753 +	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1.5754 +		dev->hard_start_xmit = nv_start_xmit;
  1.5755 +	else
  1.5756 +		dev->hard_start_xmit = nv_start_xmit_optimized;
  1.5757  	dev->get_stats = nv_get_stats;
  1.5758  	dev->change_mtu = nv_change_mtu;
  1.5759  	dev->set_mac_address = nv_set_mac_address;
  1.5760  	dev->set_multicast_list = nv_set_multicast;
  1.5761 +
  1.5762 +#if NVVER < SLES9
  1.5763 +	dev->do_ioctl = nv_ioctl;
  1.5764 +#endif
  1.5765 +
  1.5766 +#if NVVER > RHES3
  1.5767  #ifdef CONFIG_NET_POLL_CONTROLLER
  1.5768  	dev->poll_controller = nv_poll_controller;
  1.5769  #endif
  1.5770 +#else
  1.5771 +	dev->poll_controller = nv_poll_controller;
  1.5772 +#endif
  1.5773 +
  1.5774  	SET_ETHTOOL_OPS(dev, &ops);
  1.5775  	dev->tx_timeout = nv_tx_timeout;
  1.5776  	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  1.5777 @@ -4284,23 +6235,45 @@ static int __devinit nv_probe(struct pci
  1.5778  	np->orig_mac[0] = readl(base + NvRegMacAddrA);
  1.5779  	np->orig_mac[1] = readl(base + NvRegMacAddrB);
  1.5780  
  1.5781 -	dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
  1.5782 -	dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
  1.5783 -	dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  1.5784 -	dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  1.5785 -	dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
  1.5786 -	dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
  1.5787 +	/* check the workaround bit for correct mac address order */
  1.5788 +	txreg = readl(base + NvRegTransmitPoll);
  1.5789 +	if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
  1.5790 +			(id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
  1.5791 +		/* mac address is already in correct order */
  1.5792 +		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
  1.5793 +		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
  1.5794 +		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  1.5795 +		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  1.5796 +		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
  1.5797 +		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
  1.5798 +	} else {
  1.5799 +		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
  1.5800 +		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
  1.5801 +		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  1.5802 +		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  1.5803 +		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
  1.5804 +		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
  1.5805 +		/* set permanent address to be correct aswell */
  1.5806 +		np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  1.5807 +			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  1.5808 +		np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  1.5809 +		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  1.5810 +	}
  1.5811 +#if NVVER > SUSE10
  1.5812  	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  1.5813  
  1.5814 -	if (!is_valid_ether_addr(dev->perm_addr)) {
  1.5815 +	if (!is_valid_ether_addr(dev->perm_addr)){
  1.5816 +#else
  1.5817 +	if (!is_valid_ether_addr(dev->dev_addr)) {		
  1.5818 +#endif	
  1.5819  		/*
  1.5820  		 * Bad mac address. At least one bios sets the mac address
  1.5821  		 * to 01:23:45:67:89:ab
  1.5822  		 */
  1.5823  		printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
  1.5824 -			pci_name(pci_dev),
  1.5825 -			dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  1.5826 -			dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  1.5827 +				pci_name(pci_dev),
  1.5828 +				dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  1.5829 +				dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  1.5830  		printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
  1.5831  		dev->dev_addr[0] = 0x00;
  1.5832  		dev->dev_addr[1] = 0x00;
  1.5833 @@ -4311,10 +6284,12 @@ static int __devinit nv_probe(struct pci
  1.5834  	dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
  1.5835  			dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  1.5836  			dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  1.5837 +	/* set mac address */
  1.5838 +	nv_copy_mac_to_hw(dev);
  1.5839  
  1.5840  	/* disable WOL */
  1.5841  	writel(0, base + NvRegWakeUpFlags);
  1.5842 -	np->wolenabled = 0;
  1.5843 +	np->wolenabled = wol;
  1.5844  
  1.5845  	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  1.5846  		u8 revision_id;
  1.5847 @@ -4324,8 +6299,8 @@ static int __devinit nv_probe(struct pci
  1.5848  		powerstate = readl(base + NvRegPowerState2);
  1.5849  		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  1.5850  		if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
  1.5851 -		     id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
  1.5852 -		    revision_id >= 0xA3)
  1.5853 +					id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
  1.5854 +				revision_id >= 0xA3)
  1.5855  			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  1.5856  		writel(powerstate, base + NvRegPowerState2);
  1.5857  	}
  1.5858 @@ -4356,6 +6331,41 @@ static int __devinit nv_probe(struct pci
  1.5859  		np->need_linktimer = 0;
  1.5860  	}
  1.5861  
  1.5862 +	/* clear phy state and temporarily halt phy interrupts */
  1.5863 +	writel(0, base + NvRegMIIMask);
  1.5864 +	phystate = readl(base + NvRegAdapterControl);
  1.5865 +	if (phystate & NVREG_ADAPTCTL_RUNNING) {
  1.5866 +		phystate_orig = 1;
  1.5867 +		phystate &= ~NVREG_ADAPTCTL_RUNNING;
  1.5868 +		writel(phystate, base + NvRegAdapterControl);
  1.5869 +	}
  1.5870 +	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  1.5871 +
  1.5872 +	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
  1.5873 +		/* management unit running on the mac? */
  1.5874 +		if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
  1.5875 +			np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
  1.5876 +			dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
  1.5877 +			for (i = 0; i < 5000; i++) {
  1.5878 +				nv_msleep(1);
  1.5879 +				if (nv_mgmt_acquire_sema(dev)) {
  1.5880 +					/* management unit setup the phy already? */
  1.5881 +					if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
  1.5882 +							NVREG_XMITCTL_SYNC_PHY_INIT) {
  1.5883 +						if(np->mac_in_use){
  1.5884 +							/* phy is inited by mgmt unit */
  1.5885 +							phyinitialized = 1;
  1.5886 +							dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
  1.5887 +						}
  1.5888 +					} else {
  1.5889 +						/* we need to init the phy */
  1.5890 +					}
  1.5891 +					break;
  1.5892 +				}
  1.5893 +			}
  1.5894 +		}
  1.5895 +	}
  1.5896 +
  1.5897  	/* find a suitable phy */
  1.5898  	for (i = 1; i <= 32; i++) {
  1.5899  		int id1, id2;
  1.5900 @@ -4372,27 +6382,46 @@ static int __devinit nv_probe(struct pci
  1.5901  		if (id2 < 0 || id2 == 0xffff)
  1.5902  			continue;
  1.5903  
  1.5904 +		np->phy_model = id2 & PHYID2_MODEL_MASK;
  1.5905  		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  1.5906  		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  1.5907  		dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
  1.5908 -			pci_name(pci_dev), id1, id2, phyaddr);
  1.5909 +				pci_name(pci_dev), id1, id2, phyaddr);
  1.5910  		np->phyaddr = phyaddr;
  1.5911  		np->phy_oui = id1 | id2;
  1.5912  		break;
  1.5913  	}
  1.5914  	if (i == 33) {
  1.5915  		printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
  1.5916 -		       pci_name(pci_dev));
  1.5917 +				pci_name(pci_dev));
  1.5918  		goto out_error;
  1.5919  	}
  1.5920  
  1.5921 -	/* reset it */
  1.5922 -	phy_init(dev);
  1.5923 +	if (!phyinitialized) {		
  1.5924 +		/* reset it */
  1.5925 +		phy_init(dev);
  1.5926 +		np->autoneg = autoneg;
  1.5927 +	} else {
  1.5928 +		/* see if it is a gigabit phy */
  1.5929 +		mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1.5930 +		if (mii_status & PHY_GIGABIT) {
  1.5931 +			np->gigabit = PHY_GIGABIT;
  1.5932 +		}
  1.5933 +		reg = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.5934 +		np->autoneg = (reg & BMCR_ANENABLE ? AUTONEG_ENABLE:AUTONEG_DISABLE);
  1.5935 +		if(np->autoneg == AUTONEG_DISABLE){
  1.5936 +			reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.5937 +			np->fixed_mode = reg;
  1.5938 +		}
  1.5939 +	}
  1.5940 +
  1.5941 +	if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676 ) {
  1.5942 +		nv_LED_on(dev);
  1.5943 +	}
  1.5944  
  1.5945  	/* set default link speed settings */
  1.5946  	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1.5947  	np->duplex = 0;
  1.5948 -	np->autoneg = 1;
  1.5949  
  1.5950  	err = register_netdev(dev);
  1.5951  	if (err) {
  1.5952 @@ -4406,6 +6435,8 @@ static int __devinit nv_probe(struct pci
  1.5953  	return 0;
  1.5954  
  1.5955  out_error:
  1.5956 +	if (phystate_orig)
  1.5957 +		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
  1.5958  	pci_set_drvdata(pci_dev, NULL);
  1.5959  out_freering:
  1.5960  	free_rings(dev);
  1.5961 @@ -4421,11 +6452,32 @@ out:
  1.5962  	return err;
  1.5963  }
  1.5964  
  1.5965 +#ifdef CONFIG_PM
  1.5966 +static void nv_set_low_speed(struct net_device *dev);
  1.5967 +#endif
  1.5968  static void __devexit nv_remove(struct pci_dev *pci_dev)
  1.5969  {
  1.5970  	struct net_device *dev = pci_get_drvdata(pci_dev);
  1.5971 -
  1.5972 +	struct fe_priv *np = get_nvpriv(dev);
  1.5973 +	u8 __iomem *base = get_hwbase(dev);
  1.5974 +	u32 tx_ctrl;
  1.5975 +
  1.5976 +	if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676) {
  1.5977 +		nv_LED_off(dev);
  1.5978 +	}
  1.5979  	unregister_netdev(dev);
  1.5980 +	/* special op: write back the misordered MAC address - otherwise
  1.5981 +	 * the next nv_probe would see a wrong address.
  1.5982 +	 */
  1.5983 +	writel(np->orig_mac[0], base + NvRegMacAddrA);
  1.5984 +	writel(np->orig_mac[1], base + NvRegMacAddrB);
  1.5985 +
  1.5986 +	/* relinquish control of the semaphore */
  1.5987 +	if (np->mac_in_use){
  1.5988 +		tx_ctrl = readl(base + NvRegTransmitterControl);
  1.5989 +		tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_MASK;
  1.5990 +		writel(tx_ctrl, base + NvRegTransmitterControl);
  1.5991 +	}
  1.5992  
  1.5993  	/* free all structures */
  1.5994  	free_rings(dev);
  1.5995 @@ -4467,90 +6519,436 @@ static struct pci_device_id pci_tbl[] = 
  1.5996  	},
  1.5997  	{	/* CK804 Ethernet Controller */
  1.5998  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
  1.5999 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  1.6000 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
  1.6001  	},
  1.6002  	{	/* CK804 Ethernet Controller */
  1.6003  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
  1.6004 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  1.6005 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
  1.6006  	},
  1.6007  	{	/* MCP04 Ethernet Controller */
  1.6008  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
  1.6009 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  1.6010 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
  1.6011  	},
  1.6012  	{	/* MCP04 Ethernet Controller */
  1.6013  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
  1.6014 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  1.6015 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
  1.6016  	},
  1.6017  	{	/* MCP51 Ethernet Controller */
  1.6018  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
  1.6019 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  1.6020 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
  1.6021  	},
  1.6022  	{	/* MCP51 Ethernet Controller */
  1.6023  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
  1.6024 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  1.6025 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
  1.6026  	},
  1.6027  	{	/* MCP55 Ethernet Controller */
  1.6028  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
  1.6029 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6030 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
  1.6031  	},
  1.6032  	{	/* MCP55 Ethernet Controller */
  1.6033  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
  1.6034 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6035 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
  1.6036  	},
  1.6037  	{	/* MCP61 Ethernet Controller */
  1.6038  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
  1.6039 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6040 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6041  	},
  1.6042  	{	/* MCP61 Ethernet Controller */
  1.6043  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
  1.6044 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6045 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6046  	},
  1.6047  	{	/* MCP61 Ethernet Controller */
  1.6048  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
  1.6049 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6050 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6051  	},
  1.6052  	{	/* MCP61 Ethernet Controller */
  1.6053  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
  1.6054 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6055 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6056  	},
  1.6057  	{	/* MCP65 Ethernet Controller */
  1.6058  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
  1.6059 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6060 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6061  	},
  1.6062  	{	/* MCP65 Ethernet Controller */
  1.6063  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
  1.6064 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6065 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6066  	},
  1.6067  	{	/* MCP65 Ethernet Controller */
  1.6068  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
  1.6069 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6070 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6071  	},
  1.6072  	{	/* MCP65 Ethernet Controller */
  1.6073  		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
  1.6074 -		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  1.6075 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6076 +	},
  1.6077 +	{	/* MCP67 Ethernet Controller */
  1.6078 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
  1.6079 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6080 +	},
  1.6081 +	{	/* MCP67 Ethernet Controller */
  1.6082 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
  1.6083 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6084 +	},
  1.6085 +	{	/* MCP67 Ethernet Controller */
  1.6086 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
  1.6087 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6088 +	},
  1.6089 +	{	/* MCP67 Ethernet Controller */
  1.6090 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
  1.6091 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6092 +	},
  1.6093 +	{	/* MCP73 Ethernet Controller */
  1.6094 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
  1.6095 +		.driver_data =  DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6096 +	},
  1.6097 +	{	/* MCP73 Ethernet Controller */
  1.6098 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
  1.6099 +		.driver_data =  DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6100 +	},
  1.6101 +	{	/* MCP73 Ethernet Controller */
  1.6102 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
  1.6103 +		.driver_data =  DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6104 +	},
  1.6105 +	{	/* MCP73 Ethernet Controller */
  1.6106 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
  1.6107 +		.driver_data =  DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6108 +	},
  1.6109 +	{	/* MCP77 Ethernet Controller */
  1.6110 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
  1.6111 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6112 +	},
  1.6113 +	{	/* MCP77 Ethernet Controller */
  1.6114 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
  1.6115 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6116 +	},
  1.6117 +	{	/* MCP77 Ethernet Controller */
  1.6118 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
  1.6119 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6120 +	},
  1.6121 +	{	/* MCP77 Ethernet Controller */
  1.6122 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
  1.6123 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6124 +	},
  1.6125 +	{	/* MCP79 Ethernet Controller */
  1.6126 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
  1.6127 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6128 +	},
  1.6129 +	{	/* MCP79 Ethernet Controller */
  1.6130 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
  1.6131 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6132 +	},
  1.6133 +	{	/* MCP79 Ethernet Controller */
  1.6134 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
  1.6135 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6136 +	},
  1.6137 +	{	/* MCP79 Ethernet Controller */
  1.6138 +		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
  1.6139 +		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
  1.6140  	},
  1.6141  	{0,},
  1.6142  };
  1.6143  
  1.6144 -static struct pci_driver driver = {
  1.6145 +#ifdef CONFIG_PM
  1.6146 +static void nv_set_low_speed(struct net_device *dev)
  1.6147 +{
  1.6148 +	struct fe_priv *np = get_nvpriv(dev);
  1.6149 +	int adv = 0;
  1.6150 +	int lpa = 0;
  1.6151 +	int adv_lpa, bmcr, tries = 0;
  1.6152 +	int mii_status;
  1.6153 +	u32 control_1000;
  1.6154 +
  1.6155 +	if (np->autoneg == 0 || ((np->linkspeed & 0xFFF) != NVREG_LINKSPEED_1000))
  1.6156 +		return;
  1.6157 +
  1.6158 +	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1.6159 +	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  1.6160 +	control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1.6161 +
  1.6162 +	adv_lpa = lpa & adv;
  1.6163 +
  1.6164 +	if ((adv_lpa & LPA_10FULL) || (adv_lpa & LPA_10HALF)) {
  1.6165 +		adv &= ~(ADVERTISE_100BASE4 | ADVERTISE_100FULL | ADVERTISE_100HALF);
  1.6166 +		control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
  1.6167 +		printk(KERN_INFO "forcedeth %s: set low speed to 10mbs\n",dev->name);
  1.6168 +	} else if ((adv_lpa & LPA_100FULL) || (adv_lpa & LPA_100HALF)) {
  1.6169 +		control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
  1.6170 +	} else 
  1.6171 +		return;
  1.6172 +
  1.6173 +	/* set new advertisements */
  1.6174 +	mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  1.6175 +	mii_rw(dev, np->phyaddr, MII_CTRL1000, control_1000);
  1.6176 +
  1.6177 +	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1.6178 +	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1.6179 +		bmcr |= BMCR_ANENABLE;
  1.6180 +		/* reset the phy in order for settings to stick,
  1.6181 +		 * and cause autoneg to start */
  1.6182 +		if (phy_reset(dev, bmcr)) {
  1.6183 +			printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  1.6184 +			return;
  1.6185 +		}
  1.6186 +	} else {
  1.6187 +		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1.6188 +		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  1.6189 +	}
  1.6190 +	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1.6191 +	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1.6192 +	while (!(mii_status & BMSR_ANEGCOMPLETE)) {
  1.6193 +		nv_msleep(100);
  1.6194 +		mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1.6195 +		if (tries++ > 50)
  1.6196 +			break;
  1.6197 +	}
  1.6198 +
  1.6199 +	nv_update_linkspeed(dev);
  1.6200 +
  1.6201 +	return;
  1.6202 +}
  1.6203 +
  1.6204 +static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
  1.6205 +{
  1.6206 +	struct net_device *dev = pci_get_drvdata(pdev);
  1.6207 +	struct fe_priv *np = get_nvpriv(dev);
  1.6208 +	u8 __iomem *base = get_hwbase(dev);
  1.6209 +	int i;
  1.6210 +	u32 tx_ctrl;
  1.6211 +
  1.6212 +	dprintk(KERN_INFO "forcedeth: nv_suspend\n");
  1.6213 +
  1.6214 +	/* MCP55:save msix table */
  1.6215 +	if((pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_14)||(pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_15))
  1.6216 +	{
  1.6217 +		unsigned long phys_addr;	
  1.6218 +		void __iomem *base_addr;	
  1.6219 +		void __iomem *base;	
  1.6220 +		unsigned int  bir,len;
  1.6221 +		unsigned int i;
  1.6222 +		int pos;
  1.6223 +		u32 table_offset;
  1.6224 +
  1.6225 +		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
  1.6226 +		pci_read_config_dword(pdev, pos+0x04 , &table_offset);
  1.6227 +		bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  1.6228 +		table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  1.6229 +		phys_addr = pci_resource_start(pdev, bir) + table_offset;
  1.6230 +		np->msix_pa_addr = phys_addr;
  1.6231 +		len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
  1.6232 +		base_addr = ioremap_nocache(phys_addr, len);
  1.6233 +
  1.6234 +		for(i=0;i<NV_MSI_X_MAX_VECTORS;i++){
  1.6235 +			base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
  1.6236 +			np->nvmsg[i].address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  1.6237 +			np->nvmsg[i].address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET );
  1.6238 +			np->nvmsg[i].data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  1.6239 +		}
  1.6240 +
  1.6241 +		iounmap(base_addr);
  1.6242 +	}
  1.6243 +
  1.6244 +	nv_update_linkspeed(dev);
  1.6245 +
  1.6246 +	if (netif_running(dev)) {
  1.6247 +		netif_device_detach(dev);
  1.6248 +		/* bring down the adapter */
  1.6249 +		nv_close(dev);
  1.6250 +	}
  1.6251 +
  1.6252 +	/* relinquish control of the semaphore */
  1.6253 +	if (np->mac_in_use){
  1.6254 +		tx_ctrl = readl(base + NvRegTransmitterControl);
  1.6255 +		tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_MASK;
  1.6256 +		writel(tx_ctrl, base + NvRegTransmitterControl);
  1.6257 +	}
  1.6258 +
  1.6259 +	/* set phy to a lower speed to conserve power */
  1.6260 +	if((lowpowerspeed==NV_LOW_POWER_ENABLED)&&!np->mac_in_use)
  1.6261 +		nv_set_low_speed(dev);
  1.6262 +
  1.6263 +#if NVVER > RHES4
  1.6264 +	pci_save_state(pdev);
  1.6265 +#else
  1.6266 +	pci_save_state(pdev,np->pci_state);
  1.6267 +#endif
  1.6268 +	np->saved_nvregphyinterface= readl(base+NvRegPhyInterface);
  1.6269 +	for(i=0;i<64;i++){
  1.6270 +		pci_read_config_dword(pdev,i*4,&np->saved_config_space[i]);
  1.6271 +	}
  1.6272 +#if NVVER > RHES4
  1.6273 +	pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
  1.6274 +#else
  1.6275 +	pci_enable_wake(pdev, state, np->wolenabled);
  1.6276 +#endif
  1.6277 +	pci_disable_device(pdev);
  1.6278 +
  1.6279 +#if NVVER > RHES4
  1.6280 +	pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1.6281 +#else
  1.6282 +	pci_set_power_state(pdev, state);
  1.6283 +#endif
  1.6284 +
  1.6285 +	return 0;
  1.6286 +}
  1.6287 +
  1.6288 +static int nv_resume(struct pci_dev *pdev)
  1.6289 +{
  1.6290 +	struct net_device *dev = pci_get_drvdata(pdev);
  1.6291 +	int rc = 0;
  1.6292 +	struct fe_priv *np = get_nvpriv(dev);
  1.6293 +	u8 __iomem *base = get_hwbase(dev);
  1.6294 +	int i;
  1.6295 +	int err;
  1.6296 +	u32 txreg; 
  1.6297 +
  1.6298 +	dprintk(KERN_INFO "forcedeth: nv_resume\n");
  1.6299 +
  1.6300 +	pci_set_power_state(pdev, PCI_D0);
  1.6301 +#if NVVER > RHES4
  1.6302 +	pci_restore_state(pdev);
  1.6303 +#else
  1.6304 +	pci_restore_state(pdev,np->pci_state);
  1.6305 +#endif
  1.6306 +	for(i=0;i<64;i++){
  1.6307 +		pci_write_config_dword(pdev,i*4,np->saved_config_space[i]);
  1.6308 +	}
  1.6309 +	err = pci_enable_device(pdev); 
  1.6310 +	if (err) {
  1.6311 +		printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
  1.6312 +				err, pci_name(pdev));
  1.6313 +	}
  1.6314 +	pci_set_master(pdev);
  1.6315 +
  1.6316 +	txreg = readl(base + NvRegTransmitPoll);
  1.6317 +	txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
  1.6318 +	writel(txreg, base + NvRegTransmitPoll);
  1.6319 +	writel(np->saved_nvregphyinterface,base+NvRegPhyInterface);
  1.6320 +	writel(np->orig_mac[0], base + NvRegMacAddrA);
  1.6321 +	writel(np->orig_mac[1], base + NvRegMacAddrB);
  1.6322 +
  1.6323 +	/* MCP55:restore msix table */
  1.6324 +	if((pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_14)||(pdev->device==PCI_DEVICE_ID_NVIDIA_NVENET_15))
  1.6325 +	{
  1.6326 +		unsigned long phys_addr;	
  1.6327 +		void __iomem *base_addr;	
  1.6328 +		void __iomem *base;	
  1.6329 +		unsigned int  len;
  1.6330 +		unsigned int i;
  1.6331 +
  1.6332 +		len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
  1.6333 +		phys_addr = np->msix_pa_addr;
  1.6334 +		base_addr = ioremap_nocache(phys_addr, len);
  1.6335 +		for(i=0;i< NV_MSI_X_MAX_VECTORS;i++){
  1.6336 +			base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
  1.6337 +			writel(np->nvmsg[i].address_lo,base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  1.6338 +			writel(np->nvmsg[i].address_hi,base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  1.6339 +			writel(np->nvmsg[i].data,base + PCI_MSIX_ENTRY_DATA_OFFSET);
  1.6340 +		}
  1.6341 +
  1.6342 +		iounmap(base_addr);
  1.6343 +	}
  1.6344 +
  1.6345 +	if(np->mac_in_use){
  1.6346 +		/* take control of the semaphore */
  1.6347 +		for (i = 0; i < 5000; i++) {
  1.6348 +			if(nv_mgmt_acquire_sema(dev))
  1.6349 +				break;
  1.6350 +			nv_msleep(1);
  1.6351 +		}
  1.6352 +	}
  1.6353 +
  1.6354 +	if(lowpowerspeed==NV_LOW_POWER_ENABLED){
  1.6355 +		/* re-initialize the phy */
  1.6356 +		phy_init(dev);
  1.6357 +		udelay(10);
  1.6358 +	}
  1.6359 +
  1.6360 +	/* bring up the adapter */
  1.6361 +	if (netif_running(dev)){
  1.6362 +		rc = nv_open(dev);
  1.6363 +	}
  1.6364 +	netif_device_attach(dev);
  1.6365 +
  1.6366 +	return rc;
  1.6367 +}
  1.6368 +
  1.6369 +#endif /* CONFIG_PM */
  1.6370 +static struct pci_driver nv_eth_driver = {
  1.6371  	.name = "forcedeth",
  1.6372  	.id_table = pci_tbl,
  1.6373  	.probe = nv_probe,
  1.6374  	.remove = __devexit_p(nv_remove),
  1.6375 +#ifdef CONFIG_PM
  1.6376 +	.suspend	= nv_suspend,
  1.6377 +	.resume		= nv_resume,
  1.6378 +#endif
  1.6379  };
  1.6380  
  1.6381 +#ifdef CONFIG_PM
  1.6382 +static int nv_reboot_handler(struct notifier_block *nb, unsigned long event, void *p)
  1.6383 +{
  1.6384 +	struct pci_dev *pdev = NULL;
  1.6385 +	pm_message_t state = { PM_EVENT_SUSPEND };
  1.6386 +
  1.6387 +	switch (event)
  1.6388 +	{
  1.6389 +		case SYS_POWER_OFF:
  1.6390 +		case SYS_HALT:
  1.6391 +		case SYS_DOWN:
  1.6392 +#if NVVER < FEDORA7
  1.6393 +			while ((pdev = pci_find_device(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, pdev)) != NULL) {
  1.6394 +#else
  1.6395 +			while ((pdev = pci_get_device(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, pdev)) != NULL) {
  1.6396 +#endif
  1.6397 +				if (pci_dev_driver(pdev) == &nv_eth_driver) {
  1.6398 +					nv_suspend(pdev, state);
  1.6399 +				}
  1.6400 +			}
  1.6401 +	}
  1.6402 +
  1.6403 +	return NOTIFY_DONE;
  1.6404 +}
  1.6405 +
  1.6406 +/*
  1.6407 + * Reboot notification
  1.6408 + */
  1.6409 +struct notifier_block nv_reboot_notifier = 
  1.6410 +{
  1.6411 +notifier_call   : nv_reboot_handler,
  1.6412 +		  next            : NULL,
  1.6413 +		  priority        : 0
  1.6414 +};
  1.6415 +#endif
  1.6416  
  1.6417  static int __init init_nic(void)
  1.6418  {
  1.6419 +	int status;
  1.6420  	printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
  1.6421 -	return pci_module_init(&driver);
  1.6422 +	DPRINTK(DRV,KERN_DEBUG,"forcedeth:%s\n",DRV_DATE);
  1.6423 +#if NVVER > FEDORA7
  1.6424 +	status = pci_register_driver(&nv_eth_driver);
  1.6425 +#else
  1.6426 +	status = pci_module_init(&nv_eth_driver);
  1.6427 +#endif
  1.6428 +#ifdef CONFIG_PM
  1.6429 +	if (status >= 0)
  1.6430 +		register_reboot_notifier(&nv_reboot_notifier);
  1.6431 +#endif
  1.6432 +	return status;
  1.6433  }
  1.6434  
  1.6435  static void __exit exit_nic(void)
  1.6436  {
  1.6437 -	pci_unregister_driver(&driver);
  1.6438 +#ifdef CONFIG_PM
  1.6439 +	unregister_reboot_notifier(&nv_reboot_notifier);
  1.6440 +#endif
  1.6441 +	pci_unregister_driver(&nv_eth_driver);
  1.6442  }
  1.6443  
  1.6444 +#if NVVER > SLES9
  1.6445 +module_param(debug, int, 0);
  1.6446 +module_param(lowpowerspeed, int, 0);
  1.6447 +MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
  1.6448  module_param(max_interrupt_work, int, 0);
  1.6449  MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  1.6450  module_param(optimization_mode, int, 0);
  1.6451 @@ -4561,12 +6959,84 @@ module_param(msi, int, 0);
  1.6452  MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  1.6453  module_param(msix, int, 0);
  1.6454  MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  1.6455 +
  1.6456 +module_param(speed_duplex, int, 0);
  1.6457 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
  1.6458 +module_param(autoneg, int, 0);
  1.6459 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
  1.6460 +module_param(scatter_gather, int, 0);
  1.6461 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
  1.6462 +module_param(tso_offload, int, 0);
  1.6463 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6464 +module_param(mtu, int, 0);
  1.6465 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
  1.6466 +module_param(tx_checksum_offload, int, 0);
  1.6467 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6468 +module_param(rx_checksum_offload, int, 0);
  1.6469 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6470 +module_param(tx_ring_size, int, 0);
  1.6471 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
  1.6472 +module_param(rx_ring_size, int, 0);
  1.6473 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
  1.6474 +module_param(tx_flow_control, int, 0);
  1.6475 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
  1.6476 +module_param(rx_flow_control, int, 0);
  1.6477 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
  1.6478  module_param(dma_64bit, int, 0);
  1.6479  MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  1.6480 -
  1.6481 +module_param(wol, int, 0);
  1.6482 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
  1.6483 +module_param(tagging_8021pq, int, 0);
  1.6484 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
  1.6485 +#else
  1.6486 +MODULE_PARM(debug, "i");
  1.6487 +MODULE_PARM(lowpowerspeed, "i");
  1.6488 +MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
  1.6489 +MODULE_PARM(max_interrupt_work, "i");
  1.6490 +MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  1.6491 +MODULE_PARM(optimization_mode, "i");
  1.6492 +MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
  1.6493 +MODULE_PARM(poll_interval, "i");
  1.6494 +MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  1.6495 +#ifdef CONFIG_PCI_MSI
  1.6496 +MODULE_PARM(msi, "i");
  1.6497 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  1.6498 +MODULE_PARM(msix, "i");
  1.6499 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  1.6500 +#endif
  1.6501 +MODULE_PARM(speed_duplex, "i");
  1.6502 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
  1.6503 +MODULE_PARM(autoneg, "i");
  1.6504 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
  1.6505 +MODULE_PARM(scatter_gather, "i");
  1.6506 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
  1.6507 +MODULE_PARM(tso_offload, "i");
  1.6508 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6509 +MODULE_PARM(mtu, "i");
  1.6510 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
  1.6511 +MODULE_PARM(tx_checksum_offload, "i");
  1.6512 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6513 +MODULE_PARM(rx_checksum_offload, "i");
  1.6514 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
  1.6515 +MODULE_PARM(tx_ring_size, "i");
  1.6516 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
  1.6517 +MODULE_PARM(rx_ring_size, "i");
  1.6518 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
  1.6519 +MODULE_PARM(tx_flow_control, "i");
  1.6520 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
  1.6521 +MODULE_PARM(rx_flow_control, "i");
  1.6522 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
  1.6523 +MODULE_PARM(dma_64bit, "i");
  1.6524 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  1.6525 +MODULE_PARM(wol, "i");
  1.6526 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
  1.6527 +MODULE_PARM(tagging_8021pq, "i");
  1.6528 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
  1.6529 +#endif
  1.6530  MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  1.6531  MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  1.6532  MODULE_LICENSE("GPL");
  1.6533 +MODULE_VERSION(FORCEDETH_VERSION);
  1.6534  
  1.6535  MODULE_DEVICE_TABLE(pci, pci_tbl);
  1.6536