ia64/linux-2.6.18-xen.hg

changeset 847:ad4d307bf9ce

net sfc: Update sfc and sfc_resource driver to latest release

...and update sfc_netfront, sfc_netback, sfc_netutil for any API changes

sfc_netback: Fix asymmetric use of SFC buffer table alloc and free
sfc_netback: Clean up if no SFC accel device found
sfc_netback: Gracefully handle case where page grant fails
sfc_netback: Disable net acceleration if the physical link goes down
sfc_netfront: Less verbose error messages, more verbose counters for
rx discard errors
sfc_netfront: Gracefully handle case where SFC netfront fails during
initialisation

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:59:10 2009 +0100 (2009-03-31)
parents 89d9c025b46c
children ab1d4fbbe4bf
files drivers/net/sfc/Kconfig drivers/net/sfc/Makefile drivers/net/sfc/Module.symvers drivers/net/sfc/bitfield.h drivers/net/sfc/debugfs.c drivers/net/sfc/driverlink.c drivers/net/sfc/driverlink_api.h drivers/net/sfc/efx.c drivers/net/sfc/efx.h drivers/net/sfc/enum.h drivers/net/sfc/ethtool.c drivers/net/sfc/falcon.c drivers/net/sfc/falcon.h drivers/net/sfc/falcon_hwdefs.h drivers/net/sfc/falcon_io.h drivers/net/sfc/falcon_xmac.c drivers/net/sfc/i2c-direct.c drivers/net/sfc/kernel_compat.c drivers/net/sfc/kernel_compat.h drivers/net/sfc/mdio_10g.c drivers/net/sfc/mtd.c drivers/net/sfc/net_driver.h drivers/net/sfc/null_phy.c drivers/net/sfc/pm8358_phy.c drivers/net/sfc/rx.c drivers/net/sfc/rx.h drivers/net/sfc/selftest.c drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h drivers/net/sfc/sfc_resource/ci/efhw/common.h drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h drivers/net/sfc/sfc_resource/ci/efhw/eventq.h drivers/net/sfc/sfc_resource/ci/efhw/falcon.h drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h drivers/net/sfc/sfc_resource/ci/efhw/iopage.h drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h drivers/net/sfc/sfc_resource/ci/efrm/private.h drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h drivers/net/sfc/sfc_resource/driver_object.c drivers/net/sfc/sfc_resource/driverlink_new.c drivers/net/sfc/sfc_resource/efx_vi_shm.c drivers/net/sfc/sfc_resource/eventq.c drivers/net/sfc/sfc_resource/falcon.c drivers/net/sfc/sfc_resource/falcon_mac.c drivers/net/sfc/sfc_resource/filter_resource.c drivers/net/sfc/sfc_resource/iobufset_resource.c drivers/net/sfc/sfc_resource/iopage.c drivers/net/sfc/sfc_resource/kernel_compat.c drivers/net/sfc/sfc_resource/kernel_compat.h drivers/net/sfc/sfc_resource/linux_resource_internal.h drivers/net/sfc/sfc_resource/nic.c drivers/net/sfc/sfc_resource/resource_driver.c drivers/net/sfc/sfc_resource/resources.c drivers/net/sfc/sfc_resource/vi_resource_alloc.c drivers/net/sfc/sfc_resource/vi_resource_event.c drivers/net/sfc/sfc_resource/vi_resource_flush.c drivers/net/sfc/sfc_resource/vi_resource_manager.c drivers/net/sfc/sfe4001.c drivers/net/sfc/tenxpress.c drivers/net/sfc/tx.c drivers/net/sfc/txc43128_phy.c drivers/net/sfc/workarounds.h drivers/net/sfc/xfp_phy.c drivers/xen/sfc_netback/accel.c drivers/xen/sfc_netback/accel_solarflare.c drivers/xen/sfc_netback/accel_xenbus.c drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h drivers/xen/sfc_netback/ci/efhw/common.h drivers/xen/sfc_netback/ci/efhw/common_sysdep.h drivers/xen/sfc_netback/ci/efhw/efhw_types.h drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h drivers/xen/sfc_netback/ci/efhw/iopage_types.h drivers/xen/sfc_netback/ci/efrm/nic_table.h drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h drivers/xen/sfc_netback/ci/tools/log.h drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h drivers/xen/sfc_netfront/accel.h drivers/xen/sfc_netfront/accel_debugfs.c drivers/xen/sfc_netfront/accel_msg.c drivers/xen/sfc_netfront/accel_netfront.c drivers/xen/sfc_netfront/accel_vi.c drivers/xen/sfc_netfront/ef_vi_falcon.h drivers/xen/sfc_netfront/etherfabric/ef_vi.h drivers/xen/sfc_netfront/falcon_event.c drivers/xen/sfc_netfront/falcon_vi.c drivers/xen/sfc_netfront/sysdep.h
line diff
     1.1 --- a/drivers/net/sfc/Kconfig	Tue Mar 31 11:49:12 2009 +0100
     1.2 +++ b/drivers/net/sfc/Kconfig	Tue Mar 31 11:59:10 2009 +0100
     1.3 @@ -2,6 +2,7 @@ config SFC
     1.4  	tristate "Solarflare Solarstorm SFC4000 support"
     1.5  	depends on PCI && INET
     1.6  	select MII
     1.7 +	select CRC32
     1.8  	help
     1.9  	  This driver supports 10-gigabit Ethernet cards based on
    1.10  	  the Solarflare Communications Solarstorm SFC4000 controller.
    1.11 @@ -28,8 +29,7 @@ config SFC_MTD
    1.12  	  new boot ROM to the NIC.
    1.13  
    1.14  config SFC_RESOURCE
    1.15 -	depends on SFC && X86
    1.16 -	tristate "Solarflare Solarstorm SFC4000 resource driver"
    1.17 -	help
    1.18 -	  This module provides the SFC resource manager driver.
    1.19 -
    1.20 +        depends on SFC && X86
    1.21 +        tristate "Solarflare Solarstorm SFC4000 resource driver"
    1.22 +        help
    1.23 +          This module provides the SFC resource manager driver.
     2.1 --- a/drivers/net/sfc/Makefile	Tue Mar 31 11:49:12 2009 +0100
     2.2 +++ b/drivers/net/sfc/Makefile	Tue Mar 31 11:59:10 2009 +0100
     2.3 @@ -1,43 +1,13 @@
     2.4 -
     2.5 -# Final objects
     2.6 -sfc_o = sfc.o
     2.7 -sfc_mtd_o = sfc_mtd.o
     2.8 +sfc-y			+= efx.o falcon.o tx.o rx.o mentormac.o falcon_gmac.o \
     2.9 +			   falcon_xmac.o alaska.o i2c-direct.o selftest.o \
    2.10 +			   driverlink.o	ethtool.o xfp_phy.o mdio_10g.o \
    2.11 +			   txc43128_phy.o tenxpress.o lm87_support.o boards.o \
    2.12 +			   sfe4001.o pm8358_phy.o null_phy.o kernel_compat.o
    2.13 +sfc-$(CONFIG_SFC_DEBUGFS) += debugfs.o
    2.14 +obj-$(CONFIG_SFC)	+= sfc.o
    2.15  
    2.16 -# Constituent objects
    2.17 -sfc_elements_o :=
    2.18 -sfc_elements_o += efx.o
    2.19 -sfc_elements_o += falcon.o
    2.20 -sfc_elements_o += tx.o
    2.21 -sfc_elements_o += rx.o
    2.22 -sfc_elements_o += mentormac.o
    2.23 -sfc_elements_o += falcon_gmac.o
    2.24 -sfc_elements_o += falcon_xmac.o
    2.25 -sfc_elements_o += alaska.o
    2.26 -sfc_elements_o += i2c-direct.o
    2.27 -sfc_elements_o += selftest.o
    2.28 -sfc_elements_o += driverlink.o
    2.29 -ifeq ($(CONFIG_SFC_DEBUGFS),y)
    2.30 -sfc_elements_o += debugfs.o
    2.31 -endif
    2.32 -sfc_elements_o += ethtool.o
    2.33 -sfc_elements_o += xfp_phy.o
    2.34 -sfc_elements_o += mdio_10g.o
    2.35 -sfc_elements_o += txc43128_phy.o
    2.36 -sfc_elements_o += tenxpress.o
    2.37 -sfc_elements_o += lm87_support.o
    2.38 -sfc_elements_o += boards.o
    2.39 -sfc_elements_o += sfe4001.o
    2.40 -sfc_elements_o += pm8358_phy.o
    2.41 -sfc_elements_o += null_phy.o
    2.42 -sfc_elements_o += phy.o
    2.43 -sfc_elements_o += kernel_compat.o
    2.44 -
    2.45 -sfc_mtd_elements_o := mtd.o
    2.46 -
    2.47 -obj-$(CONFIG_SFC) += $(sfc_o)
    2.48 -obj-$(CONFIG_SFC_MTD) += $(sfc_mtd_o)
    2.49 -
    2.50 -sfc-objs = $(sfc_elements_o)
    2.51 -sfc_mtd-objs = $(sfc_mtd_elements_o)
    2.52 +sfc_mtd-y = mtd.o
    2.53 +obj-$(CONFIG_SFC_MTD)	+= sfc_mtd.o
    2.54  
    2.55  obj-$(CONFIG_SFC_RESOURCE) += sfc_resource/
    2.56 +
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/drivers/net/sfc/Module.symvers	Tue Mar 31 11:59:10 2009 +0100
     3.3 @@ -0,0 +1,6 @@
     3.4 +0x2e5e77fa	efx_dl_unregister_driver	drivers/net/sfc/sfc	EXPORT_SYMBOL
     3.5 +0x4ac7afe9	efx_dl_schedule_reset	drivers/net/sfc/sfc	EXPORT_SYMBOL
     3.6 +0xbb52ca8e	efx_dl_register_driver_api_ver_1	drivers/net/sfc/sfc	EXPORT_SYMBOL
     3.7 +0x278552f6	efx_dl_register_callbacks	drivers/net/sfc/sfc	EXPORT_SYMBOL
     3.8 +0xc4414515	efx_dl_get_nic	drivers/net/sfc/sfc	EXPORT_SYMBOL
     3.9 +0x42cae6c4	efx_dl_unregister_callbacks	drivers/net/sfc/sfc	EXPORT_SYMBOL
     4.1 --- a/drivers/net/sfc/bitfield.h	Tue Mar 31 11:49:12 2009 +0100
     4.2 +++ b/drivers/net/sfc/bitfield.h	Tue Mar 31 11:59:10 2009 +0100
     4.3 @@ -55,11 +55,6 @@
     4.4  #define EFX_DWORD_3_LBN 96
     4.5  #define EFX_DWORD_3_WIDTH 32
     4.6  
     4.7 -#define EFX_BYTE  1
     4.8 -#define EFX_WORD  2
     4.9 -#define EFX_DWORD 4
    4.10 -#define EFX_OWORD 8
    4.11 -
    4.12  /* Specified attribute (e.g. LBN) of the specified field */
    4.13  #define EFX_VAL(field, attribute) field ## _ ## attribute
    4.14  /* Low bit number of the specified field */
    4.15 @@ -505,7 +500,7 @@ typedef union efx_oword {
    4.16  #endif
    4.17  
    4.18  #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
    4.19 -	if (FALCON_REV(efx) == FALCON_REV_B0) {			   \
    4.20 +	if (FALCON_REV(efx) >= FALCON_REV_B0) {			   \
    4.21  		EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
    4.22  	} else { \
    4.23  		EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
    4.24 @@ -513,7 +508,7 @@ typedef union efx_oword {
    4.25  } while (0)
    4.26  
    4.27  #define EFX_QWORD_FIELD_VER(efx, qword, field)	\
    4.28 -	(FALCON_REV(efx) == FALCON_REV_B0 ?	\
    4.29 +	(FALCON_REV(efx) >= FALCON_REV_B0 ?	\
    4.30  	 EFX_QWORD_FIELD((qword), field##_B0) :	\
    4.31  	 EFX_QWORD_FIELD((qword), field##_A1))
    4.32  
    4.33 @@ -527,18 +522,4 @@ typedef union efx_oword {
    4.34  			  ~((u64) 0) : ~((u32) 0))
    4.35  #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
    4.36  
    4.37 -/*
    4.38 - * Determine if a DMA address is over the 4GB threshold
    4.39 - *
    4.40 - * Defined in a slightly tortuous way to avoid compiler warnings.
    4.41 - */
    4.42 -static inline int efx_is_over_4gb(dma_addr_t address)
    4.43 -{
    4.44 -	if (DMA_ADDR_T_WIDTH > 32)
    4.45 -		return (((u64) address) >> 32) ? 1 : 0;
    4.46 -	else
    4.47 -		/* Can never be true */
    4.48 -		return 0;
    4.49 -}
    4.50 -
    4.51  #endif /* EFX_BITFIELD_H */
     5.1 --- a/drivers/net/sfc/debugfs.c	Tue Mar 31 11:49:12 2009 +0100
     5.2 +++ b/drivers/net/sfc/debugfs.c	Tue Mar 31 11:59:10 2009 +0100
     5.3 @@ -27,9 +27,6 @@
     5.4  
     5.5  #include <linux/module.h>
     5.6  #include <linux/pci.h>
     5.7 -/* For out-of-tree builds we always need procfs, if only for a compatibility
     5.8 - * symlink.
     5.9 - */
    5.10  #include <linux/proc_fs.h>
    5.11  #include <linux/dcache.h>
    5.12  #include <linux/seq_file.h>
    5.13 @@ -38,13 +35,6 @@
    5.14  #include "debugfs.h"
    5.15  #include "falcon.h"
    5.16  
    5.17 -/* EFX_USE_DEBUGFS is defined by kernel_compat.h so we can't decide whether to
    5.18 - * include this earlier.
    5.19 - */
    5.20 -#ifdef EFX_USE_DEBUGFS
    5.21 -#include <linux/debugfs.h>
    5.22 -#endif
    5.23 -
    5.24  #ifndef PRIu64
    5.25  #	if (BITS_PER_LONG == 64)
    5.26  #		define PRIu64 "lu"
    5.27 @@ -53,8 +43,6 @@
    5.28  #	endif
    5.29  #endif
    5.30  
    5.31 -#ifndef EFX_USE_DEBUGFS
    5.32 -
    5.33  static void efx_debugfs_remove(struct proc_dir_entry *entry)
    5.34  {
    5.35  	if (entry)
    5.36 @@ -65,7 +53,6 @@ static void efx_debugfs_remove(struct pr
    5.37  #define debugfs_create_dir proc_mkdir
    5.38  #define debugfs_create_symlink proc_symlink
    5.39  
    5.40 -#endif /* !EFX_USE_DEBUGFS */
    5.41  
    5.42  /* Parameter definition bound to a structure - each file has one of these */
    5.43  struct efx_debugfs_bound_param {
    5.44 @@ -87,25 +74,6 @@ static struct dentry *efx_debug_cards;
    5.45  
    5.46  /* Sequential file interface to bound parameters */
    5.47  
    5.48 -#if defined(EFX_USE_DEBUGFS)
    5.49 -
    5.50 -static int efx_debugfs_seq_show(struct seq_file *file, void *v)
    5.51 -{
    5.52 -	struct efx_debugfs_bound_param *binding =
    5.53 -		(struct efx_debugfs_bound_param *)file->private;
    5.54 -
    5.55 -	return binding->param->reader(file,
    5.56 -				      binding->structure +
    5.57 -				      binding->param->offset);
    5.58 -}
    5.59 -
    5.60 -static int efx_debugfs_open(struct inode *inode, struct file *file)
    5.61 -{
    5.62 -	return single_open(file, efx_debugfs_seq_show, inode->i_private);
    5.63 -}
    5.64 -
    5.65 -#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
    5.66 -
    5.67  static int efx_debugfs_seq_show(struct seq_file *file, void *v)
    5.68  {
    5.69  	struct proc_dir_entry *entry = (struct proc_dir_entry *)file->private;
    5.70 @@ -124,8 +92,6 @@ static int efx_debugfs_open(struct inode
    5.71  	return single_open(file, efx_debugfs_seq_show, PROC_I(inode)->pde);
    5.72  }
    5.73  
    5.74 -#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
    5.75 -
    5.76  
    5.77  static struct file_operations efx_debugfs_file_ops = {
    5.78  	.owner   = THIS_MODULE,
    5.79 @@ -136,42 +102,11 @@ static struct file_operations efx_debugf
    5.80  };
    5.81  
    5.82  
    5.83 -#if defined(EFX_USE_DEBUGFS)
    5.84 -
    5.85 -/**
    5.86 - * efx_fini_debugfs_child - remove a named child of a debugfs directory
    5.87 - * @dir:		Directory
    5.88 - * @name:		Name of child
    5.89 - *
    5.90 - * This removes the named child from the directory, if it exists.
    5.91 - */
    5.92 -void efx_fini_debugfs_child(struct dentry *dir, const char *name)
    5.93 -{
    5.94 -	struct qstr child_name;
    5.95 -	struct dentry *child;
    5.96 -
    5.97 -	child_name.len = strlen(name);
    5.98 -	child_name.name = name;
    5.99 -	child_name.hash = full_name_hash(child_name.name, child_name.len);
   5.100 -	child = d_lookup(dir, &child_name);
   5.101 -	if (child) {
   5.102 -		/* If it's a "regular" file, free its parameter binding */
   5.103 -		if (S_ISREG(child->d_inode->i_mode))
   5.104 -			kfree(child->d_inode->i_private);
   5.105 -		debugfs_remove(child);
   5.106 -		dput(child);
   5.107 -	}
   5.108 -}
   5.109 -
   5.110 -#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
   5.111 -
   5.112  void efx_fini_debugfs_child(struct proc_dir_entry *dir, const char *name)
   5.113  {
   5.114  	remove_proc_entry(name, dir);
   5.115  }
   5.116  
   5.117 -#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
   5.118 -
   5.119  /*
   5.120   * Remove a debugfs directory.
   5.121   *
   5.122 @@ -283,22 +218,6 @@ static int efx_init_debugfs_files(struct
   5.123  
   5.124  	while (param->name) {
   5.125  		struct dentry *entry;
   5.126 -#if defined(EFX_USE_DEBUGFS)
   5.127 -		struct efx_debugfs_bound_param *binding;
   5.128 -
   5.129 -		binding = kmalloc(sizeof(*binding), GFP_KERNEL);
   5.130 -		if (!binding)
   5.131 -			goto err;
   5.132 -		binding->param = param;
   5.133 -		binding->structure = structure;
   5.134 -
   5.135 -		entry = debugfs_create_file(param->name, S_IRUGO, parent,
   5.136 -					    binding, &efx_debugfs_file_ops);
   5.137 -		if (!entry) {
   5.138 -			kfree(binding);
   5.139 -			goto err;
   5.140 -		}
   5.141 -#else
   5.142  		entry = create_proc_entry(param->name, S_IRUGO, parent);
   5.143  		if (!entry)
   5.144  			goto err;
   5.145 @@ -314,7 +233,6 @@ static int efx_init_debugfs_files(struct
   5.146  		entry->proc_fops = &efx_debugfs_file_ops;
   5.147  		smp_wmb();
   5.148  		entry->read_proc = (read_proc_t *) structure;
   5.149 -#endif
   5.150  
   5.151  		param++;
   5.152  	}
   5.153 @@ -392,7 +310,6 @@ void efx_fini_debugfs_netdev(struct net_
   5.154  static struct efx_debugfs_parameter efx_debugfs_port_parameters[] = {
   5.155  	EFX_NAMED_PARAMETER(enabled, struct efx_nic, port_enabled,
   5.156  			    int, efx_debugfs_read_int),
   5.157 -	EFX_INT_PARAMETER(struct efx_nic, net_dev_registered),
   5.158  	EFX_INT_PARAMETER(struct efx_nic, rx_checksum_enabled),
   5.159  	EFX_ATOMIC_PARAMETER(struct efx_nic, netif_stop_count),
   5.160  	EFX_INT_PARAMETER(struct efx_nic, link_up),
   5.161 @@ -668,6 +585,14 @@ static struct efx_debugfs_parameter efx_
   5.162  	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_level),
   5.163  	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_push_pages),
   5.164  	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_pop_pages),
   5.165 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_merges),
   5.166 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_bursts),
   5.167 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_slow_start),
   5.168 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_misorder),
   5.169 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_too_many),
   5.170 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_new_stream),
   5.171 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_drop_idle),
   5.172 +	EFX_UINT_PARAMETER(struct efx_channel, ssr.n_drop_closed),
   5.173  	{NULL},
   5.174  };
   5.175  
   5.176 @@ -882,11 +807,7 @@ void efx_fini_debugfs_nic(struct efx_nic
   5.177  int efx_init_debugfs(void)
   5.178  {
   5.179  	/* Create top-level directory */
   5.180 -#if defined(EFX_USE_DEBUGFS)
   5.181 -	efx_debug_root = debugfs_create_dir("sfc", NULL);
   5.182 -#else
   5.183  	efx_debug_root = proc_mkdir("sfc", proc_root_driver);
   5.184 -#endif
   5.185  	if (!efx_debug_root)
   5.186  		goto err;
   5.187  
   5.188 @@ -895,11 +816,6 @@ int efx_init_debugfs(void)
   5.189  	if (!efx_debug_cards)
   5.190  		goto err;
   5.191  
   5.192 -#if defined(EFX_USE_DEBUGFS)
   5.193 -	/* Create compatibility sym-link */
   5.194 -	if (!proc_symlink("sfc", proc_root_driver, "/sys/kernel/debug/sfc"))
   5.195 -		goto err;
   5.196 -#endif
   5.197  	return 0;
   5.198  
   5.199   err:
   5.200 @@ -914,9 +830,7 @@ int efx_init_debugfs(void)
   5.201   */
   5.202  void efx_fini_debugfs(void)
   5.203  {
   5.204 -#if defined(EFX_USE_DEBUGFS)
   5.205  	remove_proc_entry("sfc", proc_root_driver);
   5.206 -#endif
   5.207  	debugfs_remove(efx_debug_cards);
   5.208  	efx_debug_cards = NULL;
   5.209  	debugfs_remove(efx_debug_root);
     6.1 --- a/drivers/net/sfc/driverlink.c	Tue Mar 31 11:49:12 2009 +0100
     6.2 +++ b/drivers/net/sfc/driverlink.c	Tue Mar 31 11:59:10 2009 +0100
     6.3 @@ -140,20 +140,15 @@ void efx_dl_unregister_driver(struct efx
     6.4  	printk(KERN_INFO "Efx driverlink unregistering %s driver\n",
     6.5  		 driver->name);
     6.6  
     6.7 -	/* Acquire lock.  We can't return failure, so have to use
     6.8 -	 * down() instead of down_interruptible()
     6.9 -	 */
    6.10 +	/* Acquire lock.  We can't return failure */
    6.11  	mutex_lock(&efx_driverlink_lock);
    6.12  
    6.13 -	/* Remove all devices claimed by the driver */
    6.14  	list_for_each_entry_safe(efx_handle, efx_handle_n,
    6.15  				 &driver->device_list, driver_node)
    6.16  		efx_dl_del_device(&efx_handle->efx_dev);
    6.17  
    6.18 -	/* Remove driver from driver list */
    6.19  	list_del(&driver->node);
    6.20  
    6.21 -	/* Release lock */
    6.22  	mutex_unlock(&efx_driverlink_lock);
    6.23  }
    6.24  EXPORT_SYMBOL(efx_dl_unregister_driver);
    6.25 @@ -252,22 +247,14 @@ int efx_dl_register_nic(struct efx_nic *
    6.26   * To avoid a branch point on the fast-path, the callbacks are always
    6.27   * implemented - they are never NULL.
    6.28   */
    6.29 -#if defined(EFX_USE_FASTCALL)
    6.30  static enum efx_veto fastcall
    6.31 -#else
    6.32 -static enum efx_veto
    6.33 -#endif
    6.34  efx_dummy_tx_packet_callback(struct efx_dl_device *efx_dev, struct sk_buff *skb)
    6.35  {
    6.36  	/* Never veto the packet */
    6.37  	return EFX_ALLOW_PACKET;
    6.38  }
    6.39  
    6.40 -#if defined(EFX_USE_FASTCALL)
    6.41  static enum efx_veto fastcall
    6.42 -#else
    6.43 -static enum efx_veto
    6.44 -#endif
    6.45  efx_dummy_rx_packet_callback(struct efx_dl_device *efx_dev,
    6.46  			     const char *pkt_buf, int len)
    6.47  {
     7.1 --- a/drivers/net/sfc/driverlink_api.h	Tue Mar 31 11:49:12 2009 +0100
     7.2 +++ b/drivers/net/sfc/driverlink_api.h	Tue Mar 31 11:59:10 2009 +0100
     7.3 @@ -29,13 +29,8 @@
     7.4  #define EFX_DRIVERLINK_API_H
     7.5  
     7.6  #include <linux/list.h> /* for struct list_head */
     7.7 -#if !defined(EFX_USE_FASTCALL)
     7.8 -	#include <linux/version.h>
     7.9 -	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
    7.10 -		#define EFX_USE_FASTCALL yes
    7.11 -		#include <linux/linkage.h>
    7.12 -	#endif
    7.13 -#endif
    7.14 +#include <linux/linkage.h>
    7.15 +#define EFX_USE_FASTCALL yes
    7.16  
    7.17  /**
    7.18   * DOC: Efx driverlink API
    7.19 @@ -327,25 +322,25 @@ enum efx_dl_falcon_resource_flags {
    7.20   *	The sfc driver will provide the appropriate lock semantics for
    7.21   *	the underlying hardware.
    7.22   * @buffer_table_min: First available buffer table entry
    7.23 - * @buffer_table_max: Last available buffer table entry + 1
    7.24 + * @buffer_table_lim: Last available buffer table entry + 1
    7.25   * @evq_timer_min: First available event queue with timer
    7.26 - * @evq_timer_max: Last available event queue with timer + 1
    7.27 + * @evq_timer_lim: Last available event queue with timer + 1
    7.28   * @evq_int_min: First available event queue with interrupt
    7.29 - * @evq_int_max: Last available event queue with interrupt + 1
    7.30 + * @evq_int_lim: Last available event queue with interrupt + 1
    7.31   * @rxq_min: First available RX queue
    7.32 - * @rxq_max: Last available RX queue + 1
    7.33 + * @rxq_lim: Last available RX queue + 1
    7.34   * @txq_min: First available TX queue
    7.35 - * @txq_max: Last available TX queue + 1
    7.36 + * @txq_lim: Last available TX queue + 1
    7.37   * @flags: Hardware variation flags
    7.38   */
    7.39  struct efx_dl_falcon_resources {
    7.40  	struct efx_dl_device_info hdr;
    7.41  	spinlock_t *biu_lock;
    7.42 -	unsigned buffer_table_min, buffer_table_max;
    7.43 -	unsigned evq_timer_min, evq_timer_max;
    7.44 -	unsigned evq_int_min, evq_int_max;
    7.45 -	unsigned rxq_min, rxq_max;
    7.46 -	unsigned txq_min, txq_max;
    7.47 +	unsigned buffer_table_min, buffer_table_lim;
    7.48 +	unsigned evq_timer_min, evq_timer_lim;
    7.49 +	unsigned evq_int_min, evq_int_lim;
    7.50 +	unsigned rxq_min, rxq_lim;
    7.51 +	unsigned txq_min, txq_lim;
    7.52  	enum efx_dl_falcon_resource_flags flags;
    7.53  };
    7.54  
    7.55 @@ -426,13 +421,8 @@ struct efx_dl_callbacks {
    7.56  	 * may have multiple TX queues, running in parallel, please avoid
    7.57  	 * the need for locking if it all possible.
    7.58  	 */
    7.59 -#if defined(EFX_USE_FASTCALL)
    7.60  	enum efx_veto fastcall (*tx_packet) (struct efx_dl_device *efx_dev,
    7.61  					     struct sk_buff *skb);
    7.62 -#else
    7.63 -	enum efx_veto (*tx_packet) (struct efx_dl_device *efx_dev,
    7.64 -				    struct sk_buff *skb);
    7.65 -#endif
    7.66  
    7.67  	/*
    7.68  	 * rx_packet - Packet received.
    7.69 @@ -457,13 +447,8 @@ struct efx_dl_callbacks {
    7.70  	 * allows for lockless operation between receive channels, so
    7.71  	 * please avoid the need for locking if at all possible.
    7.72  	 */
    7.73 -#if defined(EFX_USE_FASTCALL)
    7.74  	enum efx_veto fastcall (*rx_packet) (struct efx_dl_device *efx_dev,
    7.75  					     const char *pkt_hdr, int pkt_len);
    7.76 -#else
    7.77 -	enum efx_veto (*rx_packet) (struct efx_dl_device *efx_dev,
    7.78 -				    const char *pkt_hdr, int pkt_len);
    7.79 -#endif
    7.80  
    7.81  	/*
    7.82  	 * link_change - Link status change.
     8.1 --- a/drivers/net/sfc/efx.c	Tue Mar 31 11:49:12 2009 +0100
     8.2 +++ b/drivers/net/sfc/efx.c	Tue Mar 31 11:59:10 2009 +0100
     8.3 @@ -36,7 +36,6 @@
     8.4  #include <linux/in.h>
     8.5  #include <linux/crc32.h>
     8.6  #include <linux/ethtool.h>
     8.7 -#include <asm/uaccess.h>
     8.8  #include "net_driver.h"
     8.9  #include "gmii.h"
    8.10  #include "driverlink.h"
    8.11 @@ -93,13 +92,16 @@ const char *efx_phy_type_names[] = {
    8.12  
    8.13  const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
    8.14  const char *efx_reset_type_names[] = {
    8.15 -	[RESET_TYPE_INVISIBLE]    = "INVISIBLE",
    8.16 -	[RESET_TYPE_ALL]          = "ALL",
    8.17 -	[RESET_TYPE_WORLD]        = "WORLD",
    8.18 -	[RESET_TYPE_DISABLE]      = "DISABLE",
    8.19 -	[RESET_TYPE_MONITOR]      = "MONITOR",
    8.20 -	[RESET_TYPE_INT_ERROR]    = "INT_ERROR",
    8.21 -	[RESET_TYPE_RX_RECOVERY]  = "RX_RECOVERY",
    8.22 +	[RESET_TYPE_INVISIBLE]     = "INVISIBLE",
    8.23 +	[RESET_TYPE_ALL]           = "ALL",
    8.24 +	[RESET_TYPE_WORLD]         = "WORLD",
    8.25 +	[RESET_TYPE_DISABLE]       = "DISABLE",
    8.26 +	[RESET_TYPE_MONITOR]       = "MONITOR",
    8.27 +	[RESET_TYPE_INT_ERROR]     = "INT_ERROR",
    8.28 +	[RESET_TYPE_RX_RECOVERY]   = "RX_RECOVERY",
    8.29 +	[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
    8.30 +	[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
    8.31 +	[RESET_TYPE_TX_SKIP]       = "TX_SKIP",
    8.32  };
    8.33  
    8.34  const unsigned int efx_nic_state_max = STATE_MAX;
    8.35 @@ -113,6 +115,12 @@ const char *efx_nic_state_names[] = {
    8.36  
    8.37  #define EFX_MAX_MTU (9 * 1024)
    8.38  
    8.39 +/* RX slow fill workqueue. If memory allocation fails in the fast path,
    8.40 + * a work item is pushed onto this work queue to retry the allocation later,
    8.41 + * to avoid the NIC being starved of RX buffers. Since this is a per cpu
    8.42 + * workqueue, there is nothing to be gained in making it per NIC
    8.43 + */
    8.44 +static struct workqueue_struct *refill_workqueue;
    8.45  
    8.46  /**************************************************************************
    8.47   *
    8.48 @@ -121,6 +129,16 @@ const char *efx_nic_state_names[] = {
    8.49   *************************************************************************/
    8.50  
    8.51  /*
    8.52 + * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
    8.53 + *
    8.54 + * This sets the default for new devices.  It can be controlled later
    8.55 + * using ethtool.
    8.56 + */
    8.57 +static int lro = 1;
    8.58 +module_param(lro, int, 0644);
    8.59 +MODULE_PARM_DESC(lro, "Large receive offload acceleration");
    8.60 +
    8.61 +/*
    8.62   * Use separate channels for TX and RX events
    8.63   *
    8.64   * Set this to 1 to use separate channels for TX and RX. It allows us to
    8.65 @@ -150,7 +168,7 @@ static unsigned int monitor_reset = 1;
    8.66  /* This controls whether or not the driver will initialise devices
    8.67   * with invalid MAC addresses stored in the EEPROM or flash.  If true,
    8.68   * such devices will be initialised with a random locally-generated
    8.69 - * MAC address.  This allows for loading the efx_mtd driver to
    8.70 + * MAC address.  This allows for loading the sfc_mtd driver to
    8.71   * reprogram the flash, even if the flash contents (including the MAC
    8.72   * address) have previously been erased.
    8.73   */
    8.74 @@ -182,11 +200,7 @@ static unsigned int tx_irq_mod_usec = 15
    8.75   */
    8.76  static unsigned int allow_load_on_failure;
    8.77  
    8.78 -/* Set to 1 to enable the use of Message-Signalled Interrupts (MSI).
    8.79 - * MSI will not work on some motherboards due to limitations of the
    8.80 - * chipset, so the default is off.
    8.81 - *
    8.82 - * This is the highest capability interrupt mode to use
    8.83 +/* This is the first interrupt mode to try out of:
    8.84   * 0 => MSI-X
    8.85   * 1 => MSI
    8.86   * 2 => legacy
    8.87 @@ -203,10 +217,8 @@ static unsigned int onload_offline_selft
    8.88   * i.e. the number of CPUs among which we may distribute simultaneous
    8.89   * interrupt handling.
    8.90   *
    8.91 - * Cards without MSI-X will only target one CPU
    8.92 - *
    8.93 - * Default (0) means to use all CPUs in the system.  This parameter
    8.94 - * can be set using "rss_cpus=xxx" when loading the module.
    8.95 + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
    8.96 + * The default (0) means to assign an interrupt to each package (level II cache)
    8.97   */
    8.98  static unsigned int rss_cpus;
    8.99  module_param(rss_cpus, uint, 0444);
   8.100 @@ -222,6 +234,13 @@ static void efx_remove_port(struct efx_n
   8.101  static void efx_fini_napi(struct efx_nic *efx);
   8.102  static void efx_fini_channels(struct efx_nic *efx);
   8.103  
   8.104 +#define EFX_ASSERT_RESET_SERIALISED(efx)		\
   8.105 +	do {						\
   8.106 +		if ((efx->state == STATE_RUNNING) ||	\
   8.107 +		    (efx->state == STATE_RESETTING))	\
   8.108 +			ASSERT_RTNL();			\
   8.109 +	} while (0)
   8.110 +
   8.111  /**************************************************************************
   8.112   *
   8.113   * Event queue processing
   8.114 @@ -253,6 +272,7 @@ static inline int efx_process_channel(st
   8.115  		channel->rx_pkt = NULL;
   8.116  	}
   8.117  
   8.118 +	efx_flush_lro(channel);
   8.119  	efx_rx_strategy(channel);
   8.120  
   8.121  	/* Refill descriptor rings as necessary */
   8.122 @@ -288,19 +308,11 @@ static inline void efx_channel_processed
   8.123   * NAPI guarantees serialisation of polls of the same device, which
   8.124   * provides the guarantee required by efx_process_channel().
   8.125   */
   8.126 -#if !defined(EFX_HAVE_OLD_NAPI)
   8.127 -static int efx_poll(struct napi_struct *napi, int budget)
   8.128 -{
   8.129 -	struct efx_channel *channel =
   8.130 -		container_of(napi, struct efx_channel, napi_str);
   8.131 -	struct net_device *napi_dev = channel->napi_dev;
   8.132 -#else
   8.133  static int efx_poll(struct net_device *napi, int *budget_ret)
   8.134  {
   8.135  	struct net_device *napi_dev = napi;
   8.136  	struct efx_channel *channel = napi_dev->priv;
   8.137  	int budget = min(napi_dev->quota, *budget_ret);
   8.138 -#endif
   8.139  	int unused;
   8.140  	int rx_packets;
   8.141  
   8.142 @@ -309,10 +321,8 @@ static int efx_poll(struct net_device *n
   8.143  
   8.144  	unused = efx_process_channel(channel, budget);
   8.145  	rx_packets = (budget - unused);
   8.146 -#if defined(EFX_HAVE_OLD_NAPI)
   8.147  	napi_dev->quota -= rx_packets;
   8.148  	*budget_ret -= rx_packets;
   8.149 -#endif
   8.150  
   8.151  	if (rx_packets < budget) {
   8.152  		/* There is no race here; although napi_disable() will
   8.153 @@ -324,11 +334,7 @@ static int efx_poll(struct net_device *n
   8.154  		efx_channel_processed(channel);
   8.155  	}
   8.156  
   8.157 -#if !defined(EFX_HAVE_OLD_NAPI)
   8.158 -	return rx_packets;
   8.159 -#else
   8.160  	return (rx_packets >= budget);
   8.161 -#endif
   8.162  }
   8.163  
   8.164  /* Process the eventq of the specified channel immediately on this CPU
   8.165 @@ -387,8 +393,6 @@ static int efx_init_eventq(struct efx_ch
   8.166  {
   8.167  	EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
   8.168  
   8.169 -	ASSERT_RTNL();
   8.170 -
   8.171  	/* Initialise fields */
   8.172  	channel->eventq_read_ptr = 0;
   8.173  
   8.174 @@ -399,8 +403,6 @@ static void efx_fini_eventq(struct efx_c
   8.175  {
   8.176  	EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
   8.177  
   8.178 -	ASSERT_RTNL();
   8.179 -
   8.180  	falcon_fini_eventq(channel);
   8.181  }
   8.182  
   8.183 @@ -429,7 +431,7 @@ static void efx_calc_rx_buffer_params(st
   8.184  	       EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
   8.185  	       efx->type->rx_buffer_padding);
   8.186  
   8.187 -	/* Page-based allocation page-order */
   8.188 +	/* Calculate page-order */
   8.189  	for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
   8.190  		;
   8.191  
   8.192 @@ -573,9 +575,7 @@ static void efx_stop_channel(struct efx_
   8.193  	/* Wait for any NAPI processing to complete */
   8.194  	napi_disable(&channel->napi_str);
   8.195  
   8.196 -	/* Ensure that any worker threads have exited or will be
   8.197 -	 * no-ops.
   8.198 -	 */
   8.199 +	/* Ensure that any worker threads have exited or will be no-ops */
   8.200  	efx_for_each_channel_rx_queue(rx_queue, channel) {
   8.201  		spin_lock_bh(&rx_queue->add_lock);
   8.202  		spin_unlock_bh(&rx_queue->add_lock);
   8.203 @@ -588,7 +588,8 @@ static void efx_fini_channels(struct efx
   8.204  	struct efx_tx_queue *tx_queue;
   8.205  	struct efx_rx_queue *rx_queue;
   8.206  
   8.207 -	ASSERT_RTNL();
   8.208 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.209 +	BUG_ON(efx->port_enabled);
   8.210  
   8.211  	efx_for_each_channel(channel, efx) {
   8.212  		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
   8.213 @@ -624,6 +625,11 @@ static void efx_remove_channel(struct ef
   8.214  	channel->used_flags = 0;
   8.215  }
   8.216  
   8.217 +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
   8.218 +{
   8.219 +	queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
   8.220 +}
   8.221 +
   8.222  /**************************************************************************
   8.223   *
   8.224   * Port handling
   8.225 @@ -636,12 +642,13 @@ static void efx_remove_channel(struct ef
   8.226   */
   8.227  static void efx_link_status_changed(struct efx_nic *efx)
   8.228  {
   8.229 -	unsigned long flags __attribute__ ((unused));
   8.230  	int carrier_ok;
   8.231  
   8.232 -	/* Ensure no link status notifications get sent to the OS after the net
   8.233 -	 * device has been unregistered. */
   8.234 -	if (!efx->net_dev_registered)
   8.235 +	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
   8.236 +	 * that no events are triggered between unregister_netdev() and the
   8.237 +	 * driver unloading. A more general condition is that NETDEV_CHANGE
   8.238 +	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
   8.239 +	if (!netif_running(efx->net_dev))
   8.240  		return;
   8.241  
   8.242  	carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
   8.243 @@ -685,46 +692,50 @@ static void efx_link_status_changed(stru
   8.244  			 (efx->loopback_mode ? " LOOPBACK]" : ""),
   8.245  			 (efx->promiscuous ? " [PROMISC]" : ""));
   8.246  	} else {
   8.247 -		EFX_INFO(efx, "link down\n");
   8.248 +		EFX_INFO(efx, "link down%s\n",
   8.249 +			 efx->phy_powered ? "" : " [OFF]");
   8.250  	}
   8.251  
   8.252  }
   8.253  
   8.254 -/* This call reinitialises the MAC to pick up new PHY settings
   8.255 - * To call from a context that cannot sleep use reconfigure_work work item
   8.256 - * For on_disabled=1 the caller must be serialised against efx_reset,
   8.257 - * ideally by holding the rtnl lock.
   8.258 - */
   8.259 -void efx_reconfigure_port(struct efx_nic *efx, int on_disabled)
   8.260 +/* This call reinitialises the MAC to pick up new PHY settings. The
   8.261 + * caller must hold the mac_lock */
   8.262 +static void __efx_reconfigure_port(struct efx_nic *efx)
   8.263  {
   8.264 -	mutex_lock(&efx->mac_lock);
   8.265 -
   8.266 -	EFX_LOG(efx, "reconfiguring MAC from PHY settings\n");
   8.267 -
   8.268 -	if (on_disabled)
   8.269 -		ASSERT_RTNL();
   8.270 -	else if (!efx->port_enabled)
   8.271 -		goto out;
   8.272 +	WARN_ON(!mutex_is_locked(&efx->mac_lock));
   8.273 +
   8.274 +	EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
   8.275 +		raw_smp_processor_id());
   8.276  
   8.277  	efx->mac_op->reconfigure(efx);
   8.278  
   8.279 -out:
   8.280  	/* Inform kernel of loss/gain of carrier */
   8.281  	efx_link_status_changed(efx);
   8.282 -
   8.283 +}
   8.284 +
   8.285 +/* Reinitialise the MAC to pick up new PHY settings, even if the port is
   8.286 + * disabled. */
   8.287 +void efx_reconfigure_port(struct efx_nic *efx)
   8.288 +{
   8.289 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.290 +
   8.291 +	mutex_lock(&efx->mac_lock);
   8.292 +	__efx_reconfigure_port(efx);
   8.293  	mutex_unlock(&efx->mac_lock);
   8.294  }
   8.295  
   8.296 +/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
   8.297 + * we don't efx_reconfigure_port() if the port is disabled. Care is taken
   8.298 + * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
   8.299  static void efx_reconfigure_work(struct work_struct *data)
   8.300  {
   8.301  	struct efx_nic *efx = container_of(data, struct efx_nic,
   8.302  					   reconfigure_work);
   8.303  
   8.304 -	EFX_LOG(efx, "MAC reconfigure executing on CPU %d\n",
   8.305 -		raw_smp_processor_id());
   8.306 -
   8.307 -	/* Reinitialise MAC to activate new PHY parameters */
   8.308 -	efx_reconfigure_port(efx, 0);
   8.309 +	mutex_lock(&efx->mac_lock);
   8.310 +	if (efx->port_enabled)
   8.311 +		__efx_reconfigure_port(efx);
   8.312 +	mutex_unlock(&efx->mac_lock);
   8.313  }
   8.314  
   8.315  static int efx_probe_port(struct efx_nic *efx)
   8.316 @@ -789,47 +800,34 @@ static int efx_init_port(struct efx_nic 
   8.317  	return 0;
   8.318  }
   8.319  
   8.320 -/* Allow efx_reconfigure_port() to run, and propagate delayed changes
   8.321 - * to the promiscuous flag to the MAC if needed */
   8.322 +/* Allow efx_reconfigure_port() to be scheduled, and close the window
   8.323 + * between efx_stop_port and efx_flush_all whereby a previously scheduled
   8.324 + * efx_reconfigure_port() may have been cancelled */
   8.325  static void efx_start_port(struct efx_nic *efx)
   8.326  {
   8.327  	EFX_LOG(efx, "start port\n");
   8.328 -	ASSERT_RTNL();
   8.329 -
   8.330  	BUG_ON(efx->port_enabled);
   8.331  
   8.332  	mutex_lock(&efx->mac_lock);
   8.333  	efx->port_enabled = 1;
   8.334 +	__efx_reconfigure_port(efx);
   8.335  	mutex_unlock(&efx->mac_lock);
   8.336 -
   8.337 -	if (efx->net_dev_registered) {
   8.338 -		int promiscuous;
   8.339 -
   8.340 -		netif_tx_lock_bh(efx->net_dev);
   8.341 -		promiscuous = (efx->net_dev->flags & IFF_PROMISC) ? 1 : 0;
   8.342 -		if (efx->promiscuous != promiscuous) {
   8.343 -			efx->promiscuous = promiscuous;
   8.344 -			queue_work(efx->workqueue, &efx->reconfigure_work);
   8.345 -		}
   8.346 -		netif_tx_unlock_bh(efx->net_dev);
   8.347 -	}
   8.348  }
   8.349  
   8.350 -/* Prevents efx_reconfigure_port() from executing, and prevents
   8.351 +/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
   8.352   * efx_set_multicast_list() from scheduling efx_reconfigure_work.
   8.353   * efx_reconfigure_work can still be scheduled via NAPI processing
   8.354   * until efx_flush_all() is called */
   8.355  static void efx_stop_port(struct efx_nic *efx)
   8.356  {
   8.357  	EFX_LOG(efx, "stop port\n");
   8.358 -	ASSERT_RTNL();
   8.359  
   8.360  	mutex_lock(&efx->mac_lock);
   8.361  	efx->port_enabled = 0;
   8.362  	mutex_unlock(&efx->mac_lock);
   8.363  
   8.364  	/* Serialise against efx_set_multicast_list() */
   8.365 -	if (efx->net_dev_registered) {
   8.366 +	if (NET_DEV_REGISTERED(efx)) {
   8.367  		netif_tx_lock_bh(efx->net_dev);
   8.368  		netif_tx_unlock_bh(efx->net_dev);
   8.369  	}
   8.370 @@ -868,6 +866,7 @@ static void efx_remove_port(struct efx_n
   8.371  static int efx_init_io(struct efx_nic *efx)
   8.372  {
   8.373  	struct pci_dev *pci_dev = efx->pci_dev;
   8.374 +	dma_addr_t dma_mask = efx->type->max_dma_mask;
   8.375  	int rc;
   8.376  
   8.377  	EFX_LOG(efx, "initialising I/O\n");
   8.378 @@ -886,20 +885,18 @@ static int efx_init_io(struct efx_nic *e
   8.379  	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
   8.380  	 * masks event though they reject 46 bit masks.
   8.381  	 */
   8.382 -	efx->dma_mask = efx->type->max_dma_mask;
   8.383 -	while (efx->dma_mask > 0x7fffffffUL) {
   8.384 -		if (pci_dma_supported(pci_dev, efx->dma_mask) &&
   8.385 -		    ((rc = pci_set_dma_mask(pci_dev, efx->dma_mask)) == 0))
   8.386 +	while (dma_mask > 0x7fffffffUL) {
   8.387 +		if (pci_dma_supported(pci_dev, dma_mask) &&
   8.388 +		    ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
   8.389  			break;
   8.390 -		efx->dma_mask >>= 1;
   8.391 +		dma_mask >>= 1;
   8.392  	}
   8.393  	if (rc) {
   8.394  		EFX_ERR(efx, "could not find a suitable DMA mask\n");
   8.395  		goto fail2;
   8.396  	}
   8.397 -	EFX_LOG(efx, "using DMA mask %llx\n",
   8.398 -		(unsigned long long)efx->dma_mask);
   8.399 -	rc = pci_set_consistent_dma_mask(pci_dev, efx->dma_mask);
   8.400 +	EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
   8.401 +	rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
   8.402  	if (rc) {
   8.403  		/* pci_set_consistent_dma_mask() is not *allowed* to
   8.404  		 * fail with a mask that pci_set_dma_mask() accepted,
   8.405 @@ -912,13 +909,7 @@ static int efx_init_io(struct efx_nic *e
   8.406  	/* Get memory base address */
   8.407  	efx->membase_phys = pci_resource_start(efx->pci_dev,
   8.408  					       efx->type->mem_bar);
   8.409 -#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
   8.410  	rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
   8.411 -#else
   8.412 -	if (!request_mem_region(efx->membase_phys, efx->type->mem_map_size,
   8.413 -				"sfc"))
   8.414 -		rc = -EIO;
   8.415 -#endif
   8.416  	if (rc) {
   8.417  		EFX_ERR(efx, "request for memory BAR failed\n");
   8.418  		rc = -EIO;
   8.419 @@ -960,11 +951,7 @@ static void efx_fini_io(struct efx_nic *
   8.420  	}
   8.421  
   8.422  	if (efx->membase_phys) {
   8.423 -#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
   8.424  		pci_release_region(efx->pci_dev, efx->type->mem_bar);
   8.425 -#else
   8.426 -		release_mem_region(efx->membase_phys, efx->type->mem_map_size);
   8.427 -#endif
   8.428  		efx->membase_phys = 0UL;
   8.429  	}
   8.430  
   8.431 @@ -972,33 +959,44 @@ static void efx_fini_io(struct efx_nic *
   8.432  }
   8.433  
   8.434  /* Probe the number and type of interrupts we are able to obtain. */
   8.435 -static int efx_probe_interrupts(struct efx_nic *efx)
   8.436 +static void efx_probe_interrupts(struct efx_nic *efx)
   8.437  {
   8.438 +	int max_channel = efx->type->phys_addr_channels - 1;
   8.439  	struct msix_entry xentries[EFX_MAX_CHANNELS];
   8.440  	int rc, i;
   8.441  
   8.442 -	/* Select number of used RSS queues */
   8.443 -	/* TODO: Can we react to CPU hotplug? */
   8.444 -	if (rss_cpus == 0)
   8.445 -		rss_cpus = num_online_cpus();
   8.446 -
   8.447 -	efx->rss_queues = 1;
   8.448  	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
   8.449 -		unsigned int max_channel = efx->type->phys_addr_channels - 1;
   8.450 -
   8.451  		BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
   8.452 -		efx->rss_queues = min(max_channel + 1, rss_cpus);
   8.453 +
   8.454 +		if (rss_cpus == 0) {
   8.455 +#ifdef topology_core_siblings
   8.456 +			cpumask_t core_mask;
   8.457 +			int cpu;
   8.458 +
   8.459 +			cpus_clear(core_mask);
   8.460 +			efx->rss_queues = 0;
   8.461 +			for_each_online_cpu(cpu) {
   8.462 +				if (!cpu_isset(cpu, core_mask)) {
   8.463 +					++efx->rss_queues;
   8.464 +					cpus_or(core_mask, core_mask,
   8.465 +						topology_core_siblings(cpu));
   8.466 +				}
   8.467 +			}
   8.468 +#else
   8.469 +			efx->rss_queues = num_online_cpus();
   8.470 +#endif
   8.471 +		} else {
   8.472 +			efx->rss_queues = rss_cpus;
   8.473 +		}
   8.474 +
   8.475 +		/* Limit the number of rss queues appropriately */
   8.476 +		efx->rss_queues = min(efx->rss_queues, max_channel + 1);
   8.477  		efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
   8.478 -	}
   8.479 -
   8.480 -	/* Determine how many RSS queues we can use, and mark channels
   8.481 -	 * with the appropriate interrupt state */
   8.482 -	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
   8.483 -		/* Build MSI request structure */
   8.484 +
   8.485 +		/* Request maximum number of MSI interrupts, and fill out
   8.486 +		 * the channel interrupt information the allowed allocation */
   8.487  		for (i = 0; i < efx->rss_queues; i++)
   8.488  			xentries[i].entry = i;
   8.489 -
   8.490 -		/* Request maximum number of MSI interrupts */
   8.491  		rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
   8.492  		if (rc > 0) {
   8.493  			EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
   8.494 @@ -1006,6 +1004,7 @@ static int efx_probe_interrupts(struct e
   8.495  			rc = pci_enable_msix(efx->pci_dev, xentries,
   8.496  					     efx->rss_queues);
   8.497  		}
   8.498 +
   8.499  		if (rc == 0) {
   8.500  			for (i = 0; i < efx->rss_queues; i++) {
   8.501  				efx->channel[i].has_interrupt = 1;
   8.502 @@ -1033,13 +1032,12 @@ static int efx_probe_interrupts(struct e
   8.503  
   8.504  	/* Assume legacy interrupts */
   8.505  	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
   8.506 +		efx->rss_queues = 1;
   8.507  		/* Every channel is interruptible */
   8.508  		for (i = 0; i < EFX_MAX_CHANNELS; i++)
   8.509  			efx->channel[i].has_interrupt = 1;
   8.510  		efx->legacy_irq = efx->pci_dev->irq;
   8.511  	}
   8.512 -
   8.513 -	return 0;
   8.514  }
   8.515  
   8.516  static void efx_remove_interrupts(struct efx_nic *efx)
   8.517 @@ -1059,7 +1057,7 @@ static void efx_remove_interrupts(struct
   8.518  /* Select number of used resources
   8.519   * Should be called after probe_interrupts()
   8.520   */
   8.521 -static int efx_select_used(struct efx_nic *efx)
   8.522 +static void efx_select_used(struct efx_nic *efx)
   8.523  {
   8.524  	struct efx_tx_queue *tx_queue;
   8.525  	struct efx_rx_queue *rx_queue;
   8.526 @@ -1096,7 +1094,6 @@ static int efx_select_used(struct efx_ni
   8.527  			rx_queue++;
   8.528  		}
   8.529  	}
   8.530 -	return 0;
   8.531  }
   8.532  
   8.533  static int efx_probe_nic(struct efx_nic *efx)
   8.534 @@ -1112,29 +1109,22 @@ static int efx_probe_nic(struct efx_nic 
   8.535  
   8.536  	/* Determine the number of channels and RX queues by trying to hook
   8.537  	 * in MSI-X interrupts. */
   8.538 -	rc = efx_probe_interrupts(efx);
   8.539 -	if (rc)
   8.540 -		goto fail2;
   8.541 +	efx_probe_interrupts(efx);
   8.542  
   8.543  	/* Determine number of RX queues and TX queues */
   8.544 -	rc = efx_select_used(efx);
   8.545 -	if (rc)
   8.546 -		goto fail3;
   8.547 +	efx_select_used(efx);
   8.548  
   8.549  	/* Register debugfs entries */
   8.550  	rc = efx_init_debugfs_nic(efx);
   8.551  	if (rc)
   8.552 -		goto fail4;
   8.553 +		goto fail2;
   8.554  	/* Initialise the interrupt moderation settings */
   8.555  	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
   8.556  
   8.557  	return 0;
   8.558  
   8.559 - fail4:
   8.560 -	/* fall-thru */
   8.561 - fail3:
   8.562 + fail2:
   8.563  	efx_remove_interrupts(efx);
   8.564 - fail2:
   8.565  	falcon_remove_nic(efx);
   8.566   fail1:
   8.567  	return rc;
   8.568 @@ -1190,21 +1180,23 @@ static int efx_probe_all(struct efx_nic 
   8.569   fail3:
   8.570  	efx_for_each_channel(channel, efx)
   8.571  		efx_remove_channel(channel);
   8.572 +	efx_remove_port(efx);
   8.573   fail2:
   8.574 -	efx_remove_port(efx);
   8.575 +	efx_remove_nic(efx);
   8.576   fail1:
   8.577  	return rc;
   8.578  }
   8.579  
   8.580  /* Called after previous invocation(s) of efx_stop_all, restarts the
   8.581 - * port, kernel transmit queue, NAPI processing and hardware interrupts.
   8.582 + * port, kernel transmit queue, NAPI processing and hardware interrupts,
   8.583 + * and ensures that the port is scheduled to be reconfigured.
   8.584   * This function is safe to call multiple times when the NIC is in any
   8.585   * state. */
   8.586  static void efx_start_all(struct efx_nic *efx)
   8.587  {
   8.588  	struct efx_channel *channel;
   8.589  
   8.590 -	ASSERT_RTNL();
   8.591 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.592  
   8.593  	/* Check that it is appropriate to restart the interface. All
   8.594  	 * of these flags are safe to read under just the rtnl lock */
   8.595 @@ -1212,7 +1204,7 @@ static void efx_start_all(struct efx_nic
   8.596  		return;
   8.597  	if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
   8.598  		return;
   8.599 -	if (efx->net_dev_registered && !netif_running(efx->net_dev))
   8.600 +	if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
   8.601  		return;
   8.602  
   8.603  	/* Mark the port as enabled so port reconfigurations can start, then
   8.604 @@ -1227,8 +1219,7 @@ static void efx_start_all(struct efx_nic
   8.605  
   8.606  	/* Start hardware monitor if we're in RUNNING */
   8.607  	if (efx->state == STATE_RUNNING)
   8.608 -		queue_delayed_work(efx->workqueue, &efx->monitor_work,
   8.609 -				   efx_monitor_interval);
   8.610 +		queue_work(efx->workqueue, &efx->monitor_work);
   8.611  }
   8.612  
   8.613  /* Flush all delayed work. Should only be called when no more delayed work
   8.614 @@ -1236,24 +1227,6 @@ static void efx_start_all(struct efx_nic
   8.615   * since we're holding the rtnl_lock at this point. */
   8.616  static void efx_flush_all(struct efx_nic *efx)
   8.617  {
   8.618 -#if defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
   8.619 -	struct efx_rx_queue *rx_queue;
   8.620 -
   8.621 -	/* Make sure the hardware monitor is stopped */
   8.622 -	cancel_delayed_work_sync(&efx->monitor_work);
   8.623 -
   8.624 -	/* Ensure that all RX slow refills are complete. */
   8.625 -	efx_for_each_rx_queue(rx_queue, efx) {
   8.626 -		cancel_delayed_work_sync(&rx_queue->work);
   8.627 -	}
   8.628 -#endif
   8.629 -
   8.630 -#if defined(EFX_USE_CANCEL_WORK_SYNC)
   8.631 -	/* Stop scheduled port reconfigurations */
   8.632 -	cancel_work_sync(&efx->reconfigure_work);
   8.633 -#endif
   8.634 -
   8.635 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
   8.636  	/* Ensure that the hardware monitor and asynchronous port
   8.637  	 * reconfigurations are complete, which are the only two consumers
   8.638  	 * of efx->workqueue. Since the hardware monitor runs on a long period,
   8.639 @@ -1270,9 +1243,8 @@ static void efx_flush_all(struct efx_nic
   8.640  
   8.641  	/* efx_rx_work will disarm if !channel->enabled, so we can just
   8.642  	 * flush the refill workqueue twice as well. */
   8.643 -	flush_workqueue(efx->refill_workqueue);
   8.644 -	flush_workqueue(efx->refill_workqueue);
   8.645 -#endif
   8.646 +	flush_workqueue(refill_workqueue);
   8.647 +	flush_workqueue(refill_workqueue);
   8.648  }
   8.649  
   8.650  /* Quiesce hardware and software without bringing the link down.
   8.651 @@ -1284,7 +1256,7 @@ static void efx_stop_all(struct efx_nic 
   8.652  {
   8.653  	struct efx_channel *channel;
   8.654  
   8.655 -	ASSERT_RTNL();
   8.656 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.657  
   8.658  	/* port_enabled can be read safely under the rtnl lock */
   8.659  	if (!efx->port_enabled)
   8.660 @@ -1298,20 +1270,27 @@ static void efx_stop_all(struct efx_nic 
   8.661  		if (channel->irq)
   8.662  			synchronize_irq(channel->irq);
   8.663  
   8.664 -	/* Stop all synchronous port reconfigurations. */
   8.665 -	efx_stop_port(efx);
   8.666 -
   8.667  	/* Stop all NAPI processing and synchronous rx refills */
   8.668  	efx_for_each_channel(channel, efx)
   8.669  		efx_stop_channel(channel);
   8.670  
   8.671 +	/* Stop all asynchronous port reconfigurations. Since all
   8.672 +	 * event processing has already been stopped, there is no
   8.673 +	 * window to loose phy events */
   8.674 +	efx_stop_port(efx);
   8.675 +
   8.676  	/* Flush reconfigure_work, refill_workqueue, monitor_work */
   8.677  	efx_flush_all(efx);
   8.678  
   8.679 +	/* Isolate the MAC from the TX and RX engines, so that queue
   8.680 +	 * flushes will complete in a timely fashion. */
   8.681 +	falcon_deconfigure_mac_wrapper(efx);
   8.682 +	falcon_drain_tx_fifo(efx);
   8.683 +
   8.684  	/* Stop the kernel transmit interface late, so the watchdog
   8.685  	 * timer isn't ticking over the flush */
   8.686  	efx_stop_queue(efx);
   8.687 -	if (efx->net_dev_registered) {
   8.688 +	if (NET_DEV_REGISTERED(efx)) {
   8.689  		netif_tx_lock_bh(efx->net_dev);
   8.690  		netif_tx_unlock_bh(efx->net_dev);
   8.691  	}
   8.692 @@ -1360,16 +1339,15 @@ static int efx_run_selftests(struct efx_
   8.693  	return rc;
   8.694  }
   8.695  
   8.696 +/* A convinience function to safely flush all the queues */
   8.697  int efx_flush_queues(struct efx_nic *efx)
   8.698  {
   8.699  	int rc;
   8.700  
   8.701 -	ASSERT_RTNL();
   8.702 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.703  
   8.704  	efx_stop_all(efx);
   8.705  
   8.706 -	/* We can't just flush the tx queues because the event queues
   8.707 -	 * may contain tx completions from that queue. Just flush everything */
   8.708  	efx_fini_channels(efx);
   8.709  	rc = efx_init_channels(efx);
   8.710  	if (rc) {
   8.711 @@ -1394,7 +1372,7 @@ void efx_init_irq_moderation(struct efx_
   8.712  	struct efx_tx_queue *tx_queue;
   8.713  	struct efx_rx_queue *rx_queue;
   8.714  
   8.715 -	ASSERT_RTNL();
   8.716 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.717  
   8.718  	efx_for_each_tx_queue(tx_queue, efx)
   8.719  		tx_queue->channel->irq_moderation = tx_usecs;
   8.720 @@ -1413,20 +1391,13 @@ void efx_init_irq_moderation(struct efx_
   8.721   * efx_reconfigure_port via the mac_lock */
   8.722  static void efx_monitor(struct work_struct *data)
   8.723  {
   8.724 -#if !defined(EFX_NEED_WORK_API_WRAPPERS)
   8.725 -	struct efx_nic *efx = container_of(data, struct efx_nic,
   8.726 -					   monitor_work.work);
   8.727 -#else
   8.728  	struct efx_nic *efx = container_of(data, struct efx_nic,
   8.729  					   monitor_work);
   8.730 -#endif
   8.731  	int rc = 0;
   8.732  
   8.733  	EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
   8.734  		  raw_smp_processor_id());
   8.735  
   8.736 -
   8.737 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
   8.738  	/* Without cancel_delayed_work_sync(), we have to make sure that
   8.739  	 * we don't rearm when port_enabled == 0 */
   8.740  	mutex_lock(&efx->mac_lock);
   8.741 @@ -1436,19 +1407,6 @@ static void efx_monitor(struct work_stru
   8.742  	}
   8.743  
   8.744  	rc = efx->mac_op->check_hw(efx);
   8.745 -#else
   8.746 -	/* If the mac_lock is already held then it is likely a port
   8.747 -	 * reconfiguration is already in place, which will likely do
   8.748 -	 * most of the work of check_hw() anyway. */
   8.749 -	if (!mutex_trylock(&efx->mac_lock)) {
   8.750 -		queue_delayed_work(efx->workqueue, &efx->monitor_work,
   8.751 -				   efx_monitor_interval);
   8.752 -		return;
   8.753 -	}
   8.754 -
   8.755 -	if (efx->port_enabled)
   8.756 -		rc = efx->mac_op->check_hw(efx);
   8.757 -#endif
   8.758  	mutex_unlock(&efx->mac_lock);
   8.759  
   8.760  	if (rc) {
   8.761 @@ -1478,24 +1436,11 @@ static void efx_monitor(struct work_stru
   8.762  static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
   8.763  {
   8.764  	struct efx_nic *efx = net_dev->priv;
   8.765 -	int rc;
   8.766 -
   8.767 -	ASSERT_RTNL();
   8.768 -
   8.769 -	switch (cmd) {
   8.770 -	case SIOCGMIIPHY:
   8.771 -	case SIOCGMIIREG:
   8.772 -		rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
   8.773 -		break;
   8.774 -	case SIOCSMIIREG:
   8.775 -		rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
   8.776 -		efx_reconfigure_port(efx, 0);
   8.777 -		break;
   8.778 -	default:
   8.779 -		rc = -EOPNOTSUPP;
   8.780 -	}
   8.781 -
   8.782 -	return rc;
   8.783 +
   8.784 +	if (!in_interrupt())
   8.785 +	    EFX_ASSERT_RESET_SERIALISED(efx);
   8.786 +
   8.787 +	return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
   8.788  }
   8.789  
   8.790  /**************************************************************************
   8.791 @@ -1512,8 +1457,6 @@ static int efx_init_napi(struct efx_nic 
   8.792  	struct efx_channel *channel;
   8.793  	int rc;
   8.794  
   8.795 -	ASSERT_RTNL();
   8.796 -
   8.797  	/* Allocate the NAPI dev for the port */
   8.798  	efx->net_dev = alloc_etherdev(0);
   8.799  	if (!efx->net_dev) {
   8.800 @@ -1523,29 +1466,15 @@ static int efx_init_napi(struct efx_nic 
   8.801  	efx->net_dev->priv = efx;
   8.802  	efx->mii.dev = efx->net_dev;
   8.803  
   8.804 -	/* Set features based on module parameters and DMA mask.
   8.805 -	 * Enable DMA to ZONE_HIGHMEM if the NIC can access all memory
   8.806 -	 * directly.  This only has an effect on 32-bit systems and
   8.807 -	 * PAE on x86 limits memory to 64GB so 40 bits is plenty to
   8.808 -	 * address everything.  If the device can't address 40 bits
   8.809 -	 * then it's safest to turn NETIF_F_HIGHDMA off because this
   8.810 -	 * might be a PAE system with more than 4G of RAM and a 32-bit
   8.811 -	 * NIC.  The use of EFX_DMA_MASK is to eliminate compiler
   8.812 -	 * warnings on platforms where dma_addr_t is 32-bit.  We
   8.813 -	 * assume that in those cases we can access all memory
   8.814 -	 * directly if our DMA mask is all ones. */
   8.815 -	efx->net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
   8.816 -	if (efx->dma_mask >= EFX_DMA_MASK(DMA_40BIT_MASK))
   8.817 -		efx->net_dev->features |= NETIF_F_HIGHDMA;
   8.818 +	efx->net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
   8.819 +				   NETIF_F_HIGHDMA);
   8.820 +	efx->lro_enabled = lro;
   8.821  
   8.822  	/* Copy MAC address */
   8.823  	memcpy(&efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
   8.824  
   8.825  	/* Allocate the per channel devs */
   8.826  	efx_for_each_channel(channel, efx) {
   8.827 -#if !defined(EFX_HAVE_OLD_NAPI)
   8.828 -		channel->napi_dev = efx->net_dev;
   8.829 -#else
   8.830  		channel->napi_dev = alloc_etherdev(0);
   8.831  		if (!channel->napi_dev) {
   8.832  			rc = -ENOMEM;
   8.833 @@ -1553,7 +1482,11 @@ static int efx_init_napi(struct efx_nic 
   8.834  		}
   8.835  		channel->napi_dev->priv = channel;
   8.836  		atomic_set(&channel->napi_dev->refcnt, 1);
   8.837 -#endif
   8.838 +
   8.839 +		/* Initialise LRO/SSR */
   8.840 +		rc = efx_ssr_init(&channel->ssr, efx);
   8.841 +		if (rc)
   8.842 +			goto err;
   8.843  	}
   8.844  
   8.845  	return 0;
   8.846 @@ -1567,16 +1500,15 @@ static void efx_fini_napi(struct efx_nic
   8.847  {
   8.848  	struct efx_channel *channel;
   8.849  
   8.850 -	ASSERT_RTNL();
   8.851 -
   8.852  	efx_for_each_channel(channel, efx) {
   8.853 +		/* Fini LRO/SSR */
   8.854 +		efx_ssr_fini(&channel->ssr);
   8.855 +
   8.856  		/* Finish per channel NAPI */
   8.857 -#if defined(EFX_HAVE_OLD_NAPI)
   8.858  		if (channel->napi_dev) {
   8.859  			channel->napi_dev->priv = NULL;
   8.860  			free_netdev(channel->napi_dev);
   8.861  		}
   8.862 -#endif
   8.863  		channel->napi_dev = NULL;
   8.864  	}
   8.865  
   8.866 @@ -1621,10 +1553,11 @@ static void efx_netpoll(struct net_devic
   8.867  static int efx_net_open(struct net_device *net_dev)
   8.868  {
   8.869  	struct efx_nic *efx = net_dev->priv;
   8.870 -	ASSERT_RTNL();
   8.871 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.872  
   8.873  	EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
   8.874  		raw_smp_processor_id());
   8.875 +
   8.876  	efx_start_all(efx);
   8.877  	return 0;
   8.878  }
   8.879 @@ -1641,7 +1574,7 @@ static int efx_net_stop(struct net_devic
   8.880  	EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
   8.881  		raw_smp_processor_id());
   8.882  
   8.883 -	/* Stop device and flush all the channels */
   8.884 +	/* Stop the device and flush all the channels */
   8.885  	efx_stop_all(efx);
   8.886  	efx_fini_channels(efx);
   8.887  	rc = efx_init_channels(efx);
   8.888 @@ -1651,9 +1584,7 @@ static int efx_net_stop(struct net_devic
   8.889  	return 0;
   8.890  }
   8.891  
   8.892 -/* Context: process, dev_base_lock held, non-blocking.
   8.893 - * Statistics are taken directly from the MAC.
   8.894 - */
   8.895 +/* Context: process, dev_base_lock held, non-blocking. */
   8.896  static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
   8.897  {
   8.898  	struct efx_nic *efx = net_dev->priv;
   8.899 @@ -1662,23 +1593,27 @@ static struct net_device_stats *efx_net_
   8.900  
   8.901  	if (!spin_trylock(&efx->stats_lock))
   8.902  		return stats;
   8.903 -	if (efx->state == STATE_RUNNING)
   8.904 +	if (efx->state == STATE_RUNNING) {
   8.905  		efx->mac_op->update_stats(efx);
   8.906 +		falcon_update_nic_stats(efx);
   8.907 +	}
   8.908  	spin_unlock(&efx->stats_lock);
   8.909  
   8.910  	stats->rx_packets = mac_stats->rx_packets;
   8.911  	stats->tx_packets = mac_stats->tx_packets;
   8.912  	stats->rx_bytes = mac_stats->rx_bytes;
   8.913  	stats->tx_bytes = mac_stats->tx_bytes;
   8.914 -	stats->tx_errors = mac_stats->tx_bad;
   8.915  	stats->multicast = mac_stats->rx_multicast;
   8.916  	stats->collisions = mac_stats->tx_collision;
   8.917 -	stats->rx_length_errors = mac_stats->rx_gtjumbo;
   8.918 -	stats->rx_over_errors = mac_stats->rx_overflow;
   8.919 +	stats->rx_length_errors = (mac_stats->rx_gtjumbo +
   8.920 +				   mac_stats->rx_length_error);
   8.921 +	stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
   8.922  	stats->rx_crc_errors = mac_stats->rx_bad;
   8.923  	stats->rx_frame_errors = mac_stats->rx_align_error;
   8.924 -	stats->rx_fifo_errors = 0;
   8.925 +	stats->rx_fifo_errors = mac_stats->rx_overflow;
   8.926  	stats->rx_missed_errors = mac_stats->rx_missed;
   8.927 +	stats->tx_window_errors = mac_stats->tx_late_collision;
   8.928 +
   8.929  	stats->rx_errors = (stats->rx_length_errors +
   8.930  			    stats->rx_over_errors +
   8.931  			    stats->rx_crc_errors +
   8.932 @@ -1686,11 +1621,8 @@ static struct net_device_stats *efx_net_
   8.933  			    stats->rx_fifo_errors +
   8.934  			    stats->rx_missed_errors +
   8.935  			    mac_stats->rx_symbol_error);
   8.936 -	stats->tx_aborted_errors = 0;
   8.937 -	stats->tx_carrier_errors = 0;
   8.938 -	stats->tx_fifo_errors = 0;
   8.939 -	stats->tx_heartbeat_errors = 0;
   8.940 -	stats->tx_window_errors = 0;
   8.941 +	stats->tx_errors = (stats->tx_window_errors +
   8.942 +			    mac_stats->tx_bad);
   8.943  
   8.944  	return stats;
   8.945  }
   8.946 @@ -1715,7 +1647,7 @@ static int efx_change_mtu(struct net_dev
   8.947  	struct efx_nic *efx = net_dev->priv;
   8.948  	int rc = 0;
   8.949  
   8.950 -	ASSERT_RTNL();
   8.951 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.952  
   8.953  	if (new_mtu > EFX_MAX_MTU)
   8.954  		return -EINVAL;
   8.955 @@ -1738,15 +1670,11 @@ static int efx_change_mtu(struct net_dev
   8.956  	if (rc)
   8.957  		goto fail;
   8.958  
   8.959 -	/* Reconfigure the MAC */
   8.960 -	efx_reconfigure_port(efx, 1);
   8.961 -
   8.962  	/* Notify driverlink client of new MTU */
   8.963  	EFX_DL_CALLBACK(efx, mtu_changed, new_mtu);
   8.964  
   8.965 + out:
   8.966  	efx_start_all(efx);
   8.967 -
   8.968 - out:
   8.969  	return rc;
   8.970  
   8.971   fail:
   8.972 @@ -1760,7 +1688,7 @@ static int efx_set_mac_address(struct ne
   8.973  	struct sockaddr *addr = data;
   8.974  	char *new_addr = addr->sa_data;
   8.975  
   8.976 -	ASSERT_RTNL();
   8.977 +	EFX_ASSERT_RESET_SERIALISED(efx);
   8.978  
   8.979  	if (!is_valid_ether_addr(new_addr)) {
   8.980  		DECLARE_MAC_BUF(mac);
   8.981 @@ -1772,7 +1700,7 @@ static int efx_set_mac_address(struct ne
   8.982  	memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
   8.983  
   8.984  	/* Reconfigure the MAC */
   8.985 -	efx_reconfigure_port(efx, 1);
   8.986 +	efx_reconfigure_port(efx);
   8.987  
   8.988  	return 0;
   8.989  }
   8.990 @@ -1783,7 +1711,6 @@ static void efx_set_multicast_list(struc
   8.991  	struct efx_nic *efx = net_dev->priv;
   8.992  	struct dev_mc_list *mc_list = net_dev->mc_list;
   8.993  	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
   8.994 -	unsigned long flags __attribute__ ((unused));
   8.995  	int promiscuous;
   8.996  	u32 crc;
   8.997  	int bit;
   8.998 @@ -1792,10 +1719,11 @@ static void efx_set_multicast_list(struc
   8.999  	/* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
  8.1000  	promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
  8.1001  	if (efx->promiscuous != promiscuous) {
  8.1002 -		if (efx->port_enabled) {
  8.1003 -			efx->promiscuous = promiscuous;
  8.1004 +		efx->promiscuous = promiscuous;
  8.1005 +		/* Close the window between efx_stop_port() and efx_flush_all()
  8.1006 +		 * by only queuing work when the port is enabled. */
  8.1007 +		if (efx->port_enabled)
  8.1008  			queue_work(efx->workqueue, &efx->reconfigure_work);
  8.1009 -		}
  8.1010  	}
  8.1011  
  8.1012  	/* Build multicast hash table */
  8.1013 @@ -1805,8 +1733,8 @@ static void efx_set_multicast_list(struc
  8.1014  		memset(mc_hash, 0x00, sizeof(*mc_hash));
  8.1015  		for (i = 0; i < net_dev->mc_count; i++) {
  8.1016  			crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
  8.1017 -			bit = (crc & ((1 << EFX_MCAST_HASH_BITS) - 1));
  8.1018 -			set_bit_le(bit, (void *)mc_hash);
  8.1019 +			bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  8.1020 +			set_bit_le(bit, mc_hash->byte);
  8.1021  			mc_list = mc_list->next;
  8.1022  		}
  8.1023  	}
  8.1024 @@ -1836,6 +1764,21 @@ static struct notifier_block efx_netdev_
  8.1025  	.notifier_call = efx_netdev_event,
  8.1026  };
  8.1027  
  8.1028 +/* Prior to Linux 2.6.24, the bonding driver may call change_mtu()
  8.1029 + * without holding the RTNL, unlike all other callers.  We try to
  8.1030 + * mitigate the risk of a race with other reconfiguration using
  8.1031 + * rtnl_trylock(), but we cannot eliminate it completely.
  8.1032 + */
  8.1033 +static int efx_locked_change_mtu(struct net_device *net_dev, int new_mtu)
  8.1034 +{
  8.1035 +	int must_unlock = rtnl_trylock();
  8.1036 +	int rc = efx_change_mtu(net_dev, new_mtu);
  8.1037 +	if (must_unlock)
  8.1038 +		rtnl_unlock();
  8.1039 +	return rc;
  8.1040 +}
  8.1041 +#define efx_change_mtu efx_locked_change_mtu
  8.1042 +
  8.1043  static int efx_register_netdev(struct efx_nic *efx)
  8.1044  {
  8.1045  	struct net_device *net_dev = efx->net_dev;
  8.1046 @@ -1861,8 +1804,6 @@ static int efx_register_netdev(struct ef
  8.1047  	/* Always start with carrier off; PHY events will detect the link */
  8.1048  	netif_carrier_off(efx->net_dev);
  8.1049  
  8.1050 -	BUG_ON(efx->net_dev_registered);
  8.1051 -
  8.1052  	/* Clear MAC statistics */
  8.1053  	efx->mac_op->update_stats(efx);
  8.1054  	memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
  8.1055 @@ -1882,28 +1823,11 @@ static int efx_register_netdev(struct ef
  8.1056  		return rc;
  8.1057  	}
  8.1058  
  8.1059 -	/* Allow link change notifications to be sent to the operating
  8.1060 -	 * system.  The must happen after register_netdev so that
  8.1061 -	 * there are no outstanding link changes if that call fails.
  8.1062 -	 * It must happen before efx_reconfigure_port so that the
  8.1063 -	 * initial state of the link is reported. */
  8.1064 -	mutex_lock(&efx->mac_lock);
  8.1065 -	efx->net_dev_registered = 1;
  8.1066 -	mutex_unlock(&efx->mac_lock);
  8.1067 -
  8.1068 -	/* Safety net: in case we don't get a PHY event */
  8.1069 -	rtnl_lock();
  8.1070 -	efx_reconfigure_port(efx, 1);
  8.1071 -	rtnl_unlock();
  8.1072 -
  8.1073 -	EFX_LOG(efx, "registered\n");
  8.1074 -
  8.1075  	return 0;
  8.1076  }
  8.1077  
  8.1078  static void efx_unregister_netdev(struct efx_nic *efx)
  8.1079  {
  8.1080 -	int was_registered = efx->net_dev_registered;
  8.1081  	struct efx_tx_queue *tx_queue;
  8.1082  
  8.1083  	if (!efx->net_dev)
  8.1084 @@ -1911,22 +1835,13 @@ static void efx_unregister_netdev(struct
  8.1085  
  8.1086  	BUG_ON(efx->net_dev->priv != efx);
  8.1087  
  8.1088 -	/* SFC Bug 5356: Ensure that no more link status notifications get
  8.1089 -	 * sent to the stack.  Bad things happen if there's an
  8.1090 -	 * outstanding notification after the net device is freed, but
  8.1091 -	 * they only get flushed out by unregister_netdev, not by
  8.1092 -	 * free_netdev. */
  8.1093 -	mutex_lock(&efx->mac_lock);
  8.1094 -	efx->net_dev_registered = 0;
  8.1095 -	mutex_unlock(&efx->mac_lock);
  8.1096 -
  8.1097  	/* Free up any skbs still remaining. This has to happen before
  8.1098  	 * we try to unregister the netdev as running their destructors
  8.1099  	 * may be needed to get the device ref. count to 0. */
  8.1100  	efx_for_each_tx_queue(tx_queue, efx)
  8.1101  		efx_release_tx_buffers(tx_queue);
  8.1102  
  8.1103 -	if (was_registered) {
  8.1104 +	if (NET_DEV_REGISTERED(efx)) {
  8.1105  		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  8.1106  		efx_fini_debugfs_netdev(efx->net_dev);
  8.1107  		unregister_netdev(efx->net_dev);
  8.1108 @@ -1939,16 +1854,12 @@ static void efx_unregister_netdev(struct
  8.1109   *
  8.1110   **************************************************************************/
  8.1111  
  8.1112 -/* This suspends the device (and acquires the suspend lock) without
  8.1113 - * flushing the descriptor queues.  It is included for the convenience
  8.1114 - * of the driverlink layer.
  8.1115 - */
  8.1116 +/* Serialise access to the driverlink callbacks, by quiescing event processing
  8.1117 + * (without flushing the descriptor queues), and acquiring the rtnl_lock */
  8.1118  void efx_suspend(struct efx_nic *efx)
  8.1119  {
  8.1120  	EFX_LOG(efx, "suspending operations\n");
  8.1121  
  8.1122 -	down(&efx->suspend_lock);
  8.1123 -
  8.1124  	rtnl_lock();
  8.1125  	efx_stop_all(efx);
  8.1126  }
  8.1127 @@ -1959,8 +1870,6 @@ void efx_resume(struct efx_nic *efx)
  8.1128  
  8.1129  	efx_start_all(efx);
  8.1130  	rtnl_unlock();
  8.1131 -
  8.1132 -	up(&efx->suspend_lock);
  8.1133  }
  8.1134  
  8.1135  /* The final hardware and software finalisation before reset.
  8.1136 @@ -1970,7 +1879,7 @@ static int efx_reset_down(struct efx_nic
  8.1137  {
  8.1138  	int rc;
  8.1139  
  8.1140 -	ASSERT_RTNL();
  8.1141 +	EFX_ASSERT_RESET_SERIALISED(efx);
  8.1142  
  8.1143  	rc = efx->mac_op->get_settings(efx, ecmd);
  8.1144  	if (rc) {
  8.1145 @@ -1996,10 +1905,6 @@ static int efx_reset_up(struct efx_nic *
  8.1146  	if (rc)
  8.1147  		goto fail1;
  8.1148  
  8.1149 -	/* In an INVISIBLE_RESET there might not be a link state transition,
  8.1150 -	 * so we push the multicast list here. */
  8.1151 -	falcon_set_multicast_hash(efx);
  8.1152 -
  8.1153  	/* Restore MAC and PHY settings. */
  8.1154  	rc = efx->mac_op->set_settings(efx, ecmd);
  8.1155  	if (rc) {
  8.1156 @@ -2021,36 +1926,30 @@ static int efx_reset_up(struct efx_nic *
  8.1157   *
  8.1158   * This function will sleep.  You cannot reset from within an atomic
  8.1159   * state; use efx_schedule_reset() instead.
  8.1160 + *
  8.1161 + * Grabs the dl_reset_lock, and to serialise with kernel interfaces the
  8.1162 + * rtnl_lock.
  8.1163   */
  8.1164  static int efx_reset(struct efx_nic *efx)
  8.1165  {
  8.1166  	struct ethtool_cmd ecmd;
  8.1167 -	unsigned long flags __attribute__ ((unused));
  8.1168  	enum reset_type method = efx->reset_pending;
  8.1169  	int rc;
  8.1170  
  8.1171 +	/* Notify driverlink clients of imminent reset. */
  8.1172  	efx_dl_reset_lock();
  8.1173 -
  8.1174 -	rc = down_interruptible(&efx->suspend_lock);
  8.1175 -	if (rc) {
  8.1176 -		EFX_ERR(efx, "reset aborted by signal\n");
  8.1177 -		goto unlock_dl_lock;
  8.1178 -	}
  8.1179 -
  8.1180 -	/* We've got suspend_lock, which means we can only be in
  8.1181 -	 * STATE_RUNNING or STATE_FINI. Don't clear
  8.1182 -	 * efx->reset_pending, since this flag indicates that we
  8.1183 -	 * should retry device initialisation.
  8.1184 -	 */
  8.1185 +	efx_dl_reset_suspend(efx);
  8.1186 +
  8.1187 +	/* Serialise with kernel interfaces */
  8.1188 +	rtnl_lock();
  8.1189 +
  8.1190 +	/* If we're not RUNNING then don't reset. Leave the reset_pending
  8.1191 +	 * flag set so that efx_pci_probe_main will be retried */
  8.1192  	if (efx->state != STATE_RUNNING) {
  8.1193  		EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
  8.1194 -		goto unlock_suspend_lock;
  8.1195 +		goto unlock_rtnl;
  8.1196  	}
  8.1197  
  8.1198 -	/* Notify driverlink clients of imminent reset. */
  8.1199 -	efx_dl_reset_suspend(efx);
  8.1200 -	rtnl_lock();
  8.1201 -
  8.1202  	efx->state = STATE_RESETTING;
  8.1203  	EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
  8.1204  
  8.1205 @@ -2104,15 +2003,16 @@ static int efx_reset(struct efx_nic *efx
  8.1206  		goto fail5;
  8.1207  
  8.1208  	mutex_unlock(&efx->mac_lock);
  8.1209 -	efx_reconfigure_port(efx, 1);
  8.1210  	EFX_LOG(efx, "reset complete\n");
  8.1211  
  8.1212  	efx->state = STATE_RUNNING;
  8.1213  	efx_start_all(efx);
  8.1214  
  8.1215 + unlock_rtnl:
  8.1216  	rtnl_unlock();
  8.1217 -
  8.1218 -	goto notify;
  8.1219 +	efx_dl_reset_resume(efx, 1);
  8.1220 +	efx_dl_reset_unlock();
  8.1221 +	return 0;
  8.1222  
  8.1223   fail5:
  8.1224   fail4:
  8.1225 @@ -2122,22 +2022,13 @@ static int efx_reset(struct efx_nic *efx
  8.1226  	EFX_ERR(efx, "has been disabled\n");
  8.1227  	efx->state = STATE_DISABLED;
  8.1228  
  8.1229 -	/* Remove the net_dev */
  8.1230  	mutex_unlock(&efx->mac_lock);
  8.1231  	rtnl_unlock();
  8.1232 +	/* Remove the net_dev */
  8.1233  	efx_unregister_netdev(efx);
  8.1234  	efx_fini_port(efx);
  8.1235 -
  8.1236 - notify:
  8.1237 -	/* Notify driverlink clients of completed reset */
  8.1238 -	efx_dl_reset_resume(efx, (rc == 0));
  8.1239 -
  8.1240 - unlock_suspend_lock:
  8.1241 -	up(&efx->suspend_lock);
  8.1242 -
  8.1243 - unlock_dl_lock:
  8.1244 +	efx_dl_reset_resume(efx, 0);
  8.1245  	efx_dl_reset_unlock();
  8.1246 -
  8.1247  	return rc;
  8.1248  }
  8.1249  
  8.1250 @@ -2170,6 +2061,7 @@ void efx_schedule_reset(struct efx_nic *
  8.1251  	case RESET_TYPE_RX_RECOVERY:
  8.1252  	case RESET_TYPE_RX_DESC_FETCH:
  8.1253  	case RESET_TYPE_TX_DESC_FETCH:
  8.1254 +	case RESET_TYPE_TX_SKIP:
  8.1255  		method = RESET_TYPE_INVISIBLE;
  8.1256  		break;
  8.1257  	default:
  8.1258 @@ -2185,11 +2077,7 @@ void efx_schedule_reset(struct efx_nic *
  8.1259  
  8.1260  	efx->reset_pending = method;
  8.1261  
  8.1262 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  8.1263  	queue_work(efx->reset_workqueue, &efx->reset_work);
  8.1264 -#else
  8.1265 -	queue_work(efx->workqueue, &efx->reset_work);
  8.1266 -#endif
  8.1267  }
  8.1268  
  8.1269  /**************************************************************************
  8.1270 @@ -2198,23 +2086,12 @@ void efx_schedule_reset(struct efx_nic *
  8.1271   *
  8.1272   **************************************************************************/
  8.1273  
  8.1274 -enum efx_type_index {
  8.1275 -	EFX_TYPE_FALCON_A = 0,
  8.1276 -	EFX_TYPE_FALCON_B = 1,
  8.1277 -};
  8.1278 -
  8.1279 -static struct efx_nic_type *efx_nic_types[] = {
  8.1280 -	[EFX_TYPE_FALCON_A] = &falcon_a_nic_type,
  8.1281 -	[EFX_TYPE_FALCON_B] = &falcon_b_nic_type,
  8.1282 -};
  8.1283 -
  8.1284 -
  8.1285  /* PCI device ID table */
  8.1286  static struct pci_device_id efx_pci_table[] __devinitdata = {
  8.1287 -	{EFX_VENDID_SFC, FALCON_A_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
  8.1288 -	 0, 0, EFX_TYPE_FALCON_A},
  8.1289 -	{EFX_VENDID_SFC, FALCON_B_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
  8.1290 -	 0, 0, EFX_TYPE_FALCON_B},
  8.1291 +	{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
  8.1292 +	 .driver_data = (unsigned long) &falcon_a_nic_type},
  8.1293 +	{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
  8.1294 +	 .driver_data = (unsigned long) &falcon_b_nic_type},
  8.1295  	{0}			/* end of list */
  8.1296  };
  8.1297  
  8.1298 @@ -2275,7 +2152,7 @@ static struct efx_board efx_dummy_board_
  8.1299  /* This zeroes out and then fills in the invariants in a struct
  8.1300   * efx_nic (including all sub-structures).
  8.1301   */
  8.1302 -static int efx_init_struct(struct efx_nic *efx, enum efx_type_index type,
  8.1303 +static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
  8.1304  			   struct pci_dev *pci_dev)
  8.1305  {
  8.1306  	struct efx_channel *channel;
  8.1307 @@ -2288,7 +2165,6 @@ static int efx_init_struct(struct efx_ni
  8.1308  	spin_lock_init(&efx->biu_lock);
  8.1309  	spin_lock_init(&efx->phy_lock);
  8.1310  	mutex_init(&efx->spi_lock);
  8.1311 -	sema_init(&efx->suspend_lock, 1);
  8.1312  	INIT_WORK(&efx->reset_work, efx_reset_work);
  8.1313  	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  8.1314  	efx->pci_dev = pci_dev;
  8.1315 @@ -2333,7 +2209,7 @@ static int efx_init_struct(struct efx_ni
  8.1316  		INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
  8.1317  	}
  8.1318  
  8.1319 -	efx->type = efx_nic_types[type];
  8.1320 +	efx->type = type;
  8.1321  
  8.1322  	/* Sanity-check NIC type */
  8.1323  	EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
  8.1324 @@ -2352,63 +2228,39 @@ static int efx_init_struct(struct efx_ni
  8.1325  	/* Higher numbered interrupt modes are less capable! */
  8.1326  	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  8.1327  				  interrupt_mode);
  8.1328 -#if defined(EFX_NEED_DUMMY_MSIX)
  8.1329 -	if (efx->interrupt_mode == EFX_INT_MODE_MSIX)
  8.1330 -		efx->interrupt_mode = EFX_INT_MODE_MSI;
  8.1331 -#endif
  8.1332 -
  8.1333 -	/* Tasks that can fail are last */
  8.1334 -	efx->refill_workqueue = create_workqueue("sfc_refill");
  8.1335 -	if (!efx->refill_workqueue) {
  8.1336 -		rc = -ENOMEM;
  8.1337 -		goto fail1;
  8.1338 -	}
  8.1339  
  8.1340  	efx->workqueue = create_singlethread_workqueue("sfc_work");
  8.1341  	if (!efx->workqueue) {
  8.1342  		rc = -ENOMEM;
  8.1343 -		goto fail2;
  8.1344 +		goto fail1;
  8.1345  	}
  8.1346  
  8.1347 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  8.1348  	efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
  8.1349  	if (!efx->reset_workqueue) {
  8.1350  		rc = -ENOMEM;
  8.1351 -		goto fail3;
  8.1352 +		goto fail2;
  8.1353  	}
  8.1354 -#endif
  8.1355  
  8.1356  	return 0;
  8.1357  
  8.1358 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  8.1359 - fail3:
  8.1360 + fail2:
  8.1361  	destroy_workqueue(efx->workqueue);
  8.1362  	efx->workqueue = NULL;
  8.1363 -#endif
  8.1364 -
  8.1365 - fail2:
  8.1366 -	destroy_workqueue(efx->refill_workqueue);
  8.1367 -	efx->refill_workqueue = NULL;
  8.1368 +
  8.1369   fail1:
  8.1370  	return rc;
  8.1371  }
  8.1372  
  8.1373  static void efx_fini_struct(struct efx_nic *efx)
  8.1374  {
  8.1375 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  8.1376  	if (efx->reset_workqueue) {
  8.1377  		destroy_workqueue(efx->reset_workqueue);
  8.1378  		efx->reset_workqueue = NULL;
  8.1379  	}
  8.1380 -#endif
  8.1381  	if (efx->workqueue) {
  8.1382  		destroy_workqueue(efx->workqueue);
  8.1383  		efx->workqueue = NULL;
  8.1384  	}
  8.1385 -	if (efx->refill_workqueue) {
  8.1386 -		destroy_workqueue(efx->refill_workqueue);
  8.1387 -		efx->refill_workqueue = NULL;
  8.1388 -	}
  8.1389  }
  8.1390  
  8.1391  /**************************************************************************
  8.1392 @@ -2422,7 +2274,7 @@ static void efx_fini_struct(struct efx_n
  8.1393   */
  8.1394  static void efx_pci_remove_main(struct efx_nic *efx)
  8.1395  {
  8.1396 -	ASSERT_RTNL();
  8.1397 +	EFX_ASSERT_RESET_SERIALISED(efx);
  8.1398  
  8.1399  	/* Skip everything if we never obtained a valid membase */
  8.1400  	if (!efx->membase)
  8.1401 @@ -2456,43 +2308,29 @@ static void efx_pci_remove(struct pci_de
  8.1402  	/* Unregister driver from driverlink layer */
  8.1403  	efx_dl_unregister_nic(efx);
  8.1404  
  8.1405 -	/* Mark the NIC as fini under both suspend_lock and
  8.1406 -	 * rtnl_lock */
  8.1407 -	down(&efx->suspend_lock);
  8.1408 +	/* Mark the NIC as fini, then stop the interface */
  8.1409  	rtnl_lock();
  8.1410  	efx->state = STATE_FINI;
  8.1411 -	up(&efx->suspend_lock);
  8.1412 -
  8.1413 -	if (efx->membase) {
  8.1414 -		/* Stop the NIC. Since we're in STATE_FINI, this
  8.1415 -		 * won't be reversed. */
  8.1416 -		if (efx->net_dev_registered)
  8.1417 -			dev_close(efx->net_dev);
  8.1418 -
  8.1419 -		/* Release the rtnl lock. Any queued efx_resets()
  8.1420 -		 * can now return early [we're in STATE_FINI]. */
  8.1421 -		rtnl_unlock();
  8.1422 -
  8.1423 -		efx_unregister_netdev(efx);
  8.1424 -		efx_fini_debugfs_channels(efx);
  8.1425 -
  8.1426 -		/* Wait for any scheduled resets to complete. No more will be
  8.1427 -		 * scheduled from this point because efx_stop_all() has been
  8.1428 -		 * called, we are no longer registered with driverlink, and
  8.1429 -		 * the net_device's have been removed. */
  8.1430 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  8.1431 -		flush_workqueue(efx->reset_workqueue);
  8.1432 -#else
  8.1433 -		flush_workqueue(efx->workqueue);
  8.1434 -#endif
  8.1435 -
  8.1436 -		/* Fini and remove all the software state */
  8.1437 -		rtnl_lock();
  8.1438 -		efx_pci_remove_main(efx);
  8.1439 -	}
  8.1440 -
  8.1441 +	dev_close(efx->net_dev);
  8.1442 +
  8.1443 +	/* Allow any queued efx_resets() to complete */
  8.1444  	rtnl_unlock();
  8.1445  
  8.1446 +	if (efx->membase == NULL)
  8.1447 +		goto out;
  8.1448 +
  8.1449 +	efx_unregister_netdev(efx);
  8.1450 +	efx_fini_debugfs_channels(efx);
  8.1451 +
  8.1452 +	/* Wait for any scheduled resets to complete. No more will be
  8.1453 +	 * scheduled from this point because efx_stop_all() has been
  8.1454 +	 * called, we are no longer registered with driverlink, and
  8.1455 +	 * the net_device's have been removed. */
  8.1456 +	flush_workqueue(efx->reset_workqueue);
  8.1457 +
  8.1458 +	efx_pci_remove_main(efx);
  8.1459 +
  8.1460 +out:
  8.1461  	efx_fini_io(efx);
  8.1462  	EFX_LOG(efx, "shutdown successful\n");
  8.1463  
  8.1464 @@ -2593,7 +2431,7 @@ static int __devinit efx_pci_probe(struc
  8.1465  				   const struct pci_device_id *entry)
  8.1466  {
  8.1467  	struct efx_nic *efx;
  8.1468 -	enum efx_type_index type = entry->driver_data;
  8.1469 +	struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
  8.1470  	int i, rc;
  8.1471  
  8.1472  	/* Allocate and initialise a struct efx_nic */
  8.1473 @@ -2614,50 +2452,27 @@ static int __devinit efx_pci_probe(struc
  8.1474  	if (rc)
  8.1475  		goto fail3;
  8.1476  
  8.1477 -	/* From this point on we begin to expose the driver to the OS
  8.1478 -	 * to varying degrees, so lets grab the suspend_lock and
  8.1479 -	 * rtnl_lock to serialise against efx_reset() and
  8.1480 -	 * friends. efx->state is not STATE_RUNNING yet, but we don't
  8.1481 -	 * want these tasks to fail, just to block until we drop the
  8.1482 -	 * lock
  8.1483 -	 */
  8.1484 -	rc = down_interruptible(&efx->suspend_lock);
  8.1485 -	if (rc) {
  8.1486 -		EFX_ERR(efx, "suspend interrupted - aborting\n");
  8.1487 -		goto fail4;
  8.1488 -	}
  8.1489 -
  8.1490 -	rtnl_lock();
  8.1491 -
  8.1492 -	/* Probe, initialise and start everything. Run self-test */
  8.1493 +	/* No serialisation is required with the reset path because
  8.1494 +	 * we're in STATE_INIT. */
  8.1495  	for (i = 0; i < 5; i++) {
  8.1496  		rc = efx_pci_probe_main(efx);
  8.1497  		if (rc == 0)
  8.1498  			break;
  8.1499  
  8.1500 -		/* Retry if a recoverably reset event has been scheduled */
  8.1501 -		if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
  8.1502 -		    (efx->reset_pending != RESET_TYPE_ALL))
  8.1503 -			goto fail5;
  8.1504 -
  8.1505  		/* Serialise against efx_reset(). No more resets will be
  8.1506  		 * scheduled since efx_stop_all() has been called, and we
  8.1507  		 * have not and never have been registered with either
  8.1508  		 * the rtnetlink or driverlink layers. */
  8.1509 -		rtnl_unlock();
  8.1510 -		up(&efx->suspend_lock);
  8.1511 -
  8.1512 -#if defined(EFX_USE_CANCEL_WORK_SYNC)
  8.1513 -		cancel_work_sync(&efx->reset_work);
  8.1514 -#else
  8.1515  		flush_workqueue(efx->reset_workqueue);
  8.1516 -#endif
  8.1517 -
  8.1518 -		down(&efx->suspend_lock);
  8.1519 -		rtnl_lock();
  8.1520 +
  8.1521 +		/* Retry if a recoverably reset event has been scheduled */
  8.1522 +		if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
  8.1523 +		    (efx->reset_pending != RESET_TYPE_ALL))
  8.1524 +			goto fail4;
  8.1525  
  8.1526  		efx->reset_pending = RESET_TYPE_NONE;
  8.1527 -	};
  8.1528 +	}
  8.1529 +
  8.1530  	if (rc) {
  8.1531  		EFX_ERR(efx, "Could not reset NIC\n");
  8.1532  		goto fail5;
  8.1533 @@ -2671,16 +2486,14 @@ static int __devinit efx_pci_probe(struc
  8.1534  	/* Switch to the running state before we expose the device to
  8.1535  	 * the OS.  This is to ensure that the initial gathering of
  8.1536  	 * MAC stats succeeds. */
  8.1537 +	rtnl_lock();
  8.1538  	efx->state = STATE_RUNNING;
  8.1539 -
  8.1540  	rtnl_unlock();
  8.1541  
  8.1542  	rc = efx_register_netdev(efx);
  8.1543  	if (rc)
  8.1544  		goto fail7;
  8.1545  
  8.1546 -	up(&efx->suspend_lock);
  8.1547 -
  8.1548  	EFX_LOG(efx, "initialisation successful\n");
  8.1549  
  8.1550  	/* Register with driverlink layer */
  8.1551 @@ -2691,18 +2504,12 @@ static int __devinit efx_pci_probe(struc
  8.1552  	return 0;
  8.1553  
  8.1554   fail8:
  8.1555 -	down(&efx->suspend_lock);
  8.1556  	efx_unregister_netdev(efx);
  8.1557   fail7:
  8.1558 -	/* Re-acquire the rtnl lock around pci_remove_main() */
  8.1559 -	rtnl_lock();
  8.1560  	efx_fini_debugfs_channels(efx);
  8.1561   fail6:
  8.1562  	efx_pci_remove_main(efx);
  8.1563   fail5:
  8.1564 -	/* Drop the locks before fini */
  8.1565 -	rtnl_unlock();
  8.1566 -	up(&efx->suspend_lock);
  8.1567   fail4:
  8.1568  	efx_fini_io(efx);
  8.1569   fail3:
  8.1570 @@ -2749,6 +2556,12 @@ static int __init efx_init_module(void)
  8.1571  	if (rc)
  8.1572  		goto err_notifier;
  8.1573  
  8.1574 +	refill_workqueue = create_workqueue("sfc_refill");
  8.1575 +	if (!refill_workqueue) {
  8.1576 +		rc = -ENOMEM;
  8.1577 +		goto err_refill;
  8.1578 +	}
  8.1579 +
  8.1580  	rc = pci_register_driver(&efx_pci_driver);
  8.1581  	if (rc < 0)
  8.1582  		goto err_pci;
  8.1583 @@ -2756,6 +2569,8 @@ static int __init efx_init_module(void)
  8.1584  	return 0;
  8.1585  
  8.1586   err_pci:
  8.1587 +	destroy_workqueue(refill_workqueue);
  8.1588 + err_refill:
  8.1589  	unregister_netdevice_notifier(&efx_netdev_notifier);
  8.1590   err_notifier:
  8.1591  	efx_fini_debugfs();
  8.1592 @@ -2768,6 +2583,7 @@ static void __exit efx_exit_module(void)
  8.1593  	printk(KERN_INFO "Solarflare NET driver unloading\n");
  8.1594  
  8.1595  	pci_unregister_driver(&efx_pci_driver);
  8.1596 +	destroy_workqueue(refill_workqueue);
  8.1597  	unregister_netdevice_notifier(&efx_netdev_notifier);
  8.1598  	efx_fini_debugfs();
  8.1599  
     9.1 --- a/drivers/net/sfc/efx.h	Tue Mar 31 11:49:12 2009 +0100
     9.2 +++ b/drivers/net/sfc/efx.h	Tue Mar 31 11:59:10 2009 +0100
     9.3 @@ -43,30 +43,19 @@ extern void efx_stop_queue(struct efx_ni
     9.4  extern void efx_wake_queue(struct efx_nic *efx);
     9.5  
     9.6  /* RX */
     9.7 -#if defined(EFX_USE_FASTCALL)
     9.8  extern void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue,
     9.9  				   unsigned int index);
    9.10 -#else
    9.11 -extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
    9.12 -#endif
    9.13 -#if defined(EFX_USE_FASTCALL)
    9.14  extern void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
    9.15  				   unsigned int index, unsigned int len,
    9.16  				   int checksummed, int discard);
    9.17 -#else
    9.18 -extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
    9.19 -			  unsigned int len, int checksummed, int discard);
    9.20 -#endif
    9.21 -extern void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
    9.22 -			       struct efx_rx_buffer *rx_buf);
    9.23 +extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
    9.24  
    9.25  /* Channels */
    9.26  extern void efx_process_channel_now(struct efx_channel *channel);
    9.27  extern int efx_flush_queues(struct efx_nic *efx);
    9.28  
    9.29  /* Ports */
    9.30 -extern void efx_reconfigure_port(struct efx_nic *efx,
    9.31 -				 int on_disabled);
    9.32 +extern void efx_reconfigure_port(struct efx_nic *efx);
    9.33  
    9.34  /* Global */
    9.35  extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
    9.36 @@ -91,13 +80,8 @@ static inline void efx_schedule_channel(
    9.37  		  channel->channel, raw_smp_processor_id());
    9.38  	channel->work_pending = 1;
    9.39  
    9.40 -#if defined(EFX_HAVE_OLD_NAPI)
    9.41  	if (!test_and_set_bit(__LINK_STATE_RX_SCHED, &channel->napi_dev->state))
    9.42  		__netif_rx_schedule(channel->napi_dev);
    9.43 -#else
    9.44 -	netif_rx_schedule(channel->napi_dev, &channel->napi_str);
    9.45 -#endif
    9.46  }
    9.47  
    9.48 -
    9.49  #endif /* EFX_EFX_H */
    10.1 --- a/drivers/net/sfc/enum.h	Tue Mar 31 11:49:12 2009 +0100
    10.2 +++ b/drivers/net/sfc/enum.h	Tue Mar 31 11:59:10 2009 +0100
    10.3 @@ -98,6 +98,9 @@ enum efx_loopback_mode {
    10.4   * @RESET_TYPE_MONITOR: reset due to hardware monitor
    10.5   * @RESET_TYPE_INT_ERROR: reset due to internal error
    10.6   * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
    10.7 + * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
    10.8 + * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
    10.9 + * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
   10.10   */
   10.11  enum reset_type {
   10.12  	RESET_TYPE_NONE = -1,
   10.13 @@ -111,6 +114,7 @@ enum reset_type {
   10.14  	RESET_TYPE_RX_RECOVERY,
   10.15  	RESET_TYPE_RX_DESC_FETCH,
   10.16  	RESET_TYPE_TX_DESC_FETCH,
   10.17 +	RESET_TYPE_TX_SKIP,
   10.18  	RESET_TYPE_MAX,
   10.19  };
   10.20  
    11.1 --- a/drivers/net/sfc/ethtool.c	Tue Mar 31 11:49:12 2009 +0100
    11.2 +++ b/drivers/net/sfc/ethtool.c	Tue Mar 31 11:59:10 2009 +0100
    11.3 @@ -28,7 +28,6 @@
    11.4  #include <linux/netdevice.h>
    11.5  #include <linux/ethtool.h>
    11.6  #include <linux/rtnetlink.h>
    11.7 -#include <asm/uaccess.h>
    11.8  #include "net_driver.h"
    11.9  #include "selftest.h"
   11.10  #include "efx.h"
   11.11 @@ -200,8 +199,15 @@ int efx_ethtool_get_settings(struct net_
   11.12  			     struct ethtool_cmd *ecmd)
   11.13  {
   11.14  	struct efx_nic *efx = net_dev->priv;
   11.15 +	int rc;
   11.16  
   11.17 -	return efx->mac_op->get_settings(efx, ecmd);
   11.18 +	if (!in_interrupt())
   11.19 +	    mutex_lock(&efx->mac_lock);
   11.20 +	rc = efx->mac_op->get_settings(efx, ecmd);
   11.21 +	if (!in_interrupt())
   11.22 +	    mutex_unlock(&efx->mac_lock);
   11.23 +
   11.24 +	return rc;
   11.25  }
   11.26  
   11.27  /* This must be called with rtnl_lock held. */
   11.28 @@ -211,14 +217,13 @@ int efx_ethtool_set_settings(struct net_
   11.29  	struct efx_nic *efx = net_dev->priv;
   11.30  	int rc;
   11.31  
   11.32 +	mutex_lock(&efx->mac_lock);
   11.33  	rc = efx->mac_op->set_settings(efx, ecmd);
   11.34 -	if (rc)
   11.35 -		return rc;
   11.36 +	mutex_unlock(&efx->mac_lock);
   11.37 +	if (!rc)
   11.38 +		efx_reconfigure_port(efx);
   11.39  
   11.40 -	/* Push the settings to the MAC */
   11.41 -	efx_reconfigure_port(efx, 0);
   11.42 -
   11.43 -	return 0;
   11.44 +	return rc;
   11.45  }
   11.46  
   11.47  static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
   11.48 @@ -418,7 +423,6 @@ static void efx_ethtool_get_stats(struct
   11.49  				  struct ethtool_stats *stats
   11.50  				  __attribute__ ((unused)), u64 *data)
   11.51  {
   11.52 -	unsigned long flags __attribute__ ((unused));
   11.53  	struct efx_nic *efx = net_dev->priv;
   11.54  	struct efx_mac_stats *mac_stats = &efx->mac_stats;
   11.55  	struct efx_ethtool_stat *stat;
   11.56 @@ -429,7 +433,6 @@ static void efx_ethtool_get_stats(struct
   11.57  
   11.58  	/* Update MAC and NIC statistics */
   11.59  	net_dev->get_stats(net_dev);
   11.60 -	falcon_update_nic_stats(efx);
   11.61  
   11.62  	/* Fill detailed statistics buffer */
   11.63  	for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
   11.64 @@ -461,7 +464,6 @@ static int efx_ethtool_set_tx_csum(struc
   11.65  	if (rc)
   11.66  		return rc;
   11.67  
   11.68 -
   11.69  	efx_flush_queues(efx);
   11.70  
   11.71  	return 0;
   11.72 @@ -668,14 +670,14 @@ static int efx_ethtool_set_pauseparam(st
   11.73  	flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
   11.74  
   11.75  	/* Try to push the pause parameters */
   11.76 +	mutex_lock(&efx->mac_lock);
   11.77  	rc = efx->mac_op->set_pause(efx, flow_control);
   11.78 -	if (rc)
   11.79 -		return rc;
   11.80 +	mutex_unlock(&efx->mac_lock);
   11.81  
   11.82 -	/* Push the settings to the MAC */
   11.83 -	efx_reconfigure_port(efx, 0);
   11.84 +	if (!rc)
   11.85 +		efx_reconfigure_port(efx);
   11.86  
   11.87 -	return 0;
   11.88 +	return rc;
   11.89  }
   11.90  
   11.91  static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
   11.92 @@ -689,7 +691,6 @@ static void efx_ethtool_get_pauseparam(s
   11.93  }
   11.94  
   11.95  
   11.96 -#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
   11.97  static int efx_ethtool_op_get_perm_addr(struct net_device *net_dev,
   11.98  					struct ethtool_perm_addr *addr,
   11.99  					u8 *data)
  11.100 @@ -700,7 +701,6 @@ static int efx_ethtool_op_get_perm_addr(
  11.101  
  11.102  	return 0;
  11.103  }
  11.104 -#endif
  11.105  
  11.106  struct ethtool_ops efx_ethtool_ops = {
  11.107  	.get_settings		= efx_ethtool_get_settings,
  11.108 @@ -718,17 +718,11 @@ struct ethtool_ops efx_ethtool_ops = {
  11.109  	.set_tx_csum		= efx_ethtool_set_tx_csum,
  11.110  	.get_sg			= ethtool_op_get_sg,
  11.111  	.set_sg			= ethtool_op_set_sg,
  11.112 -#if defined(EFX_USE_ETHTOOL_FLAGS)
  11.113 -	.get_flags		= ethtool_op_get_flags,
  11.114 -	.set_flags		= ethtool_op_set_flags,
  11.115 -#endif
  11.116  	.self_test_count	= efx_ethtool_self_test_count,
  11.117  	.self_test		= efx_ethtool_self_test,
  11.118  	.get_strings		= efx_ethtool_get_strings,
  11.119  	.phys_id		= efx_ethtool_phys_id,
  11.120  	.get_stats_count	= efx_ethtool_get_stats_count,
  11.121  	.get_ethtool_stats	= efx_ethtool_get_stats,
  11.122 -#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
  11.123  	.get_perm_addr          = efx_ethtool_op_get_perm_addr,
  11.124 -#endif
  11.125  };
    12.1 --- a/drivers/net/sfc/falcon.c	Tue Mar 31 11:49:12 2009 +0100
    12.2 +++ b/drivers/net/sfc/falcon.c	Tue Mar 31 11:59:10 2009 +0100
    12.3 @@ -25,8 +25,7 @@
    12.4   ****************************************************************************
    12.5   */
    12.6  
    12.7 -#include <asm/io.h>
    12.8 -#include <asm/bitops.h>
    12.9 +#include <linux/bitops.h>
   12.10  #include <linux/delay.h>
   12.11  #include <linux/pci.h>
   12.12  #include <linux/module.h>
   12.13 @@ -51,20 +50,29 @@
   12.14   * present in SFE400X evaluation boards
   12.15   */
   12.16  
   12.17 +/**
   12.18 + * struct falcon_nic_data - Falcon NIC state
   12.19 + * @tx_dc_entries: Number of entries in each TX queue descriptor cache
   12.20 + * @rx_dc_entries: Number of entries in each RX queue descriptor cache
   12.21 + * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
   12.22 + * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
   12.23 + * @old_loopback_mode: Previous loopback mode used in deconfigure_mac_wrapper
   12.24 + * @external_sram_cfg: Size and number of banks of external SRAM
   12.25 + * @pci_dev2: The secondary PCI device if present
   12.26 + * @resources: Driverlink parameters
   12.27 + */
   12.28  struct falcon_nic_data {
   12.29 -	/* Number of entries in each TX queue descriptor cache. */
   12.30  	unsigned tx_dc_entries;
   12.31 -	/* Number of entries in each RX queue descriptor cache. */
   12.32  	unsigned rx_dc_entries;
   12.33 -	/* Base address in SRAM of TX queue descriptor caches. */
   12.34  	unsigned tx_dc_base;
   12.35 -	/* Base address in SRAM of RX queue descriptor caches. */
   12.36  	unsigned rx_dc_base;
   12.37  
   12.38 -	/* Previous loopback mode used in deconfigure_mac_wrapper */
   12.39  	enum efx_loopback_mode old_loopback_mode;
   12.40  
   12.41 -	/* Driverlink parameters */
   12.42 +	struct pci_dev *pci_dev2;
   12.43 +
   12.44 +	int external_sram_cfg;
   12.45 +
   12.46  	struct efx_dl_falcon_resources resources;
   12.47  };
   12.48  
   12.49 @@ -150,21 +158,23 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "R
   12.50  #endif
   12.51  
   12.52  /* TX DMA length mask (13-bit) */
   12.53 -#define FALCON_TX_DMA_MASK (8192 - 1)
   12.54 -
   12.55 -/* Alignment of special buffers (4KB) */
   12.56 -#define FALCON_BUF_ALIGN 4096
   12.57 +#define FALCON_TX_DMA_MASK (4096 - 1)
   12.58 +
   12.59 +/* Size and alignment of special buffers (4KB) */
   12.60 +#define FALCON_BUF_SIZE 4096
   12.61  
   12.62  /* Dummy SRAM size code */
   12.63  #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
   12.64  
   12.65  /* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
   12.66 -#define PCI_EXP_DEVCAP_PWR_VAL_LBN	(18)
   12.67 -/* This field takes up bits 26 and 27. */
   12.68 -#define PCI_EXP_DEVCAP_PWR_SCL_LBN	(26)
   12.69 -#define PCI_EXP_LNKSTA_LNK_WID		(0x3f0)
   12.70 -#define PCI_EXP_LNKSTA_LNK_WID_LBN	(4)
   12.71 -
   12.72 +#define PCI_EXP_DEVCAP_PWR_VAL_LBN	18
   12.73 +#define PCI_EXP_DEVCAP_PWR_SCL_LBN	26
   12.74 +#define PCI_EXP_DEVCTL_PAYLOAD_LBN	5
   12.75 +#define PCI_EXP_LNKSTA_LNK_WID		0x3f0
   12.76 +#define PCI_EXP_LNKSTA_LNK_WID_LBN	4
   12.77 +
   12.78 +#define FALCON_IS_DUAL_FUNC(efx)		\
   12.79 +	(FALCON_REV(efx) < FALCON_REV_B0)
   12.80  
   12.81  /**************************************************************************
   12.82   *
   12.83 @@ -284,16 +294,24 @@ static struct efx_i2c_bit_operations fal
   12.84   *
   12.85   *************************************************************************/
   12.86  
   12.87 -/* Adds the relevant entries to the full-mode buffer table. */
   12.88 +/*
   12.89 + * Initialise a Falcon special buffer
   12.90 + *
   12.91 + * This will define a buffer (previously allocated via
   12.92 + * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
   12.93 + * it to be used for event queues, descriptor rings etc.
   12.94 + */
   12.95  static int
   12.96 -falcon_pin_special_buffer_full(struct efx_nic *efx,
   12.97 -			       struct efx_special_buffer *buffer)
   12.98 +falcon_init_special_buffer(struct efx_nic *efx,
   12.99 +			   struct efx_special_buffer *buffer)
  12.100  {
  12.101  	efx_qword_t buf_desc;
  12.102  	int index;
  12.103  	dma_addr_t dma_addr;
  12.104  	int i;
  12.105  
  12.106 +	EFX_BUG_ON_PARANOID(!buffer->addr);
  12.107 +
  12.108  	/* Write buffer descriptors to NIC */
  12.109  	for (i = 0; i < buffer->entries; i++) {
  12.110  		index = buffer->index + i;
  12.111 @@ -311,15 +329,18 @@ falcon_pin_special_buffer_full(struct ef
  12.112  	return 0;
  12.113  }
  12.114  
  12.115 -/* Clears the relevant entries from the buffer table */
  12.116 +/* Unmaps a buffer from Falcon and clears the buffer table entries */
  12.117  static void
  12.118 -falcon_clear_special_buffer_full(struct efx_nic *efx,
  12.119 -				 struct efx_special_buffer *buffer)
  12.120 +falcon_fini_special_buffer(struct efx_nic *efx,
  12.121 +			   struct efx_special_buffer *buffer)
  12.122  {
  12.123  	efx_oword_t buf_tbl_upd;
  12.124  	unsigned int start = buffer->index;
  12.125  	unsigned int end = (buffer->index + buffer->entries - 1);
  12.126  
  12.127 +	if (!buffer->entries)
  12.128 +		return;
  12.129 +
  12.130  	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
  12.131  		buffer->index, buffer->index + buffer->entries - 1);
  12.132  
  12.133 @@ -337,11 +358,8 @@ falcon_clear_special_buffer_full(struct 
  12.134   * This allocates memory for a new buffer, clears it and allocates a
  12.135   * new buffer ID range.  It does not write into Falcon's buffer table.
  12.136   *
  12.137 - * This call will allocate 4kB buffers, since Falcon can't use 8kB
  12.138 - * buffers for event queues and descriptor rings.  It will always
  12.139 - * allocate an even number of 4kB buffers, since when we're in
  12.140 - * half-entry mode for the buffer table we can only deal with pairs of
  12.141 - * buffers.
  12.142 + * This call will allocate 4KB buffers, since Falcon can't use 8KB
  12.143 + * buffers for event queues and descriptor rings.
  12.144   */
  12.145  static int falcon_alloc_special_buffer(struct efx_nic *efx,
  12.146  				       struct efx_special_buffer *buffer,
  12.147 @@ -349,8 +367,7 @@ static int falcon_alloc_special_buffer(s
  12.148  {
  12.149  	struct falcon_nic_data *nic_data = efx->nic_data;
  12.150  
  12.151 -	/* Round size up to an 8kB boundary (i.e. pairs of 4kB buffers) */
  12.152 -	len = (len + 8192 - 1) & ~(8192 - 1);
  12.153 +	len = ALIGN(len, FALCON_BUF_SIZE);
  12.154  
  12.155  	/* Allocate buffer as consistent PCI DMA space */
  12.156  	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  12.157 @@ -358,8 +375,8 @@ static int falcon_alloc_special_buffer(s
  12.158  	if (!buffer->addr)
  12.159  		return -ENOMEM;
  12.160  	buffer->len = len;
  12.161 -	buffer->entries = len / 4096;
  12.162 -	BUG_ON(buffer->dma_addr & (FALCON_BUF_ALIGN - 1));
  12.163 +	buffer->entries = len / FALCON_BUF_SIZE;
  12.164 +	BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
  12.165  
  12.166  	/* All zeros is a potentially valid event so memset to 0xff */
  12.167  	memset(buffer->addr, 0xff, len);
  12.168 @@ -377,34 +394,6 @@ static int falcon_alloc_special_buffer(s
  12.169  	return 0;
  12.170  }
  12.171  
  12.172 -/*
  12.173 - * Initialise a Falcon special buffer
  12.174 - *
  12.175 - * This will define a buffer (previously allocated via
  12.176 - * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
  12.177 - * it to be used for event queues, descriptor rings etc.
  12.178 - */
  12.179 -static int falcon_init_special_buffer(struct efx_nic *efx,
  12.180 -				      struct efx_special_buffer *buffer)
  12.181 -{
  12.182 -	EFX_BUG_ON_PARANOID(!buffer->addr);
  12.183 -
  12.184 -	/* Write buffer descriptors to NIC */
  12.185 -	return falcon_pin_special_buffer_full(efx, buffer);
  12.186 -}
  12.187 -
  12.188 -/* Unmaps a buffer from Falcon and clears the buffer table
  12.189 - * entries */
  12.190 -static void falcon_fini_special_buffer(struct efx_nic *efx,
  12.191 -				       struct efx_special_buffer *buffer)
  12.192 -{
  12.193 -
  12.194 -	if (!buffer->entries)
  12.195 -		return;
  12.196 -
  12.197 -	falcon_clear_special_buffer_full(efx, buffer);
  12.198 -}
  12.199 -
  12.200  /* Release the buffer memory. */
  12.201  static void falcon_free_special_buffer(struct efx_nic *efx,
  12.202  				       struct efx_special_buffer *buffer)
  12.203 @@ -487,11 +476,7 @@ static inline void falcon_notify_tx_desc
  12.204   * descriptor in the hardware TX descriptor ring (in host memory), and
  12.205   * write a doorbell.
  12.206   */
  12.207 -#if defined(EFX_USE_FASTCALL)
  12.208  void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue)
  12.209 -#else
  12.210 -void falcon_push_buffers(struct efx_tx_queue *tx_queue)
  12.211 -#endif
  12.212  {
  12.213  
  12.214  	struct efx_tx_buffer *buffer;
  12.215 @@ -603,8 +588,7 @@ static int falcon_flush_tx_queue(struct 
  12.216  	falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
  12.217  	msleep(FALCON_FLUSH_TIMEOUT);
  12.218  
  12.219 -	/* If the NIC is resetting then don't bother checking */
  12.220 -	if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
  12.221 +	if (EFX_WORKAROUND_7803(efx))
  12.222  		return 0;
  12.223  
  12.224  	/* Look for a flush completed event */
  12.225 @@ -707,11 +691,7 @@ static inline void falcon_build_rx_desc(
  12.226  /* This writes to the RX_DESC_WPTR register for the specified receive
  12.227   * descriptor ring.
  12.228   */
  12.229 -#if defined(EFX_USE_FASTCALL)
  12.230  void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
  12.231 -#else
  12.232 -void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
  12.233 -#endif
  12.234  {
  12.235  	efx_dword_t reg;
  12.236  	unsigned write_ptr;
  12.237 @@ -799,8 +779,7 @@ static int falcon_flush_rx_queue(struct 
  12.238  	falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
  12.239  	msleep(FALCON_FLUSH_TIMEOUT);
  12.240  
  12.241 -	/* If the NIC is resetting then don't bother checking */
  12.242 -	if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
  12.243 +	if (EFX_WORKAROUND_7803(efx))
  12.244  		return 0;
  12.245  
  12.246  	/* Look for a flush completed event */
  12.247 @@ -863,8 +842,10 @@ void falcon_fini_rx(struct efx_rx_queue 
  12.248  			continue;
  12.249  		break;
  12.250  	}
  12.251 -	if (rc)
  12.252 +	if (rc) {
  12.253  		EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
  12.254 +		efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
  12.255 +	}
  12.256  
  12.257  	/* Remove RX descriptor ring from card */
  12.258  	EFX_ZERO_OWORD(rx_desc_ptr);
  12.259 @@ -897,11 +878,7 @@ void falcon_remove_rx(struct efx_rx_queu
  12.260   * whereas channel->eventq_read_ptr contains the index of the "next to
  12.261   * read" event.
  12.262   */
  12.263 -#if defined(EFX_USE_FASTCALL)
  12.264  void fastcall falcon_eventq_read_ack(struct efx_channel *channel)
  12.265 -#else
  12.266 -void falcon_eventq_read_ack(struct efx_channel *channel)
  12.267 -#endif
  12.268  {
  12.269  	efx_dword_t reg;
  12.270  	struct efx_nic *efx = channel->efx;
  12.271 @@ -947,10 +924,10 @@ static inline void falcon_handle_tx_even
  12.272  		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
  12.273  		tx_queue = &efx->tx_queue[tx_ev_q_label];
  12.274  
  12.275 -		if (efx->net_dev_registered)
  12.276 +		if (NET_DEV_REGISTERED(efx))
  12.277  			netif_tx_lock(efx->net_dev);
  12.278  		falcon_notify_tx_desc(tx_queue);
  12.279 -		if (efx->net_dev_registered)
  12.280 +		if (NET_DEV_REGISTERED(efx))
  12.281  			netif_tx_unlock(efx->net_dev);
  12.282  	} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
  12.283  		   EFX_WORKAROUND_10727(efx)) {
  12.284 @@ -1290,11 +1267,7 @@ static void falcon_handle_driver_event(s
  12.285  	}
  12.286  }
  12.287  
  12.288 -#if defined(EFX_USE_FASTCALL)
  12.289  int fastcall falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
  12.290 -#else
  12.291 -int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
  12.292 -#endif
  12.293  {
  12.294  	unsigned int read_ptr;
  12.295  	efx_qword_t event, *p_event;
  12.296 @@ -1555,6 +1528,7 @@ static inline void falcon_irq_ack_a1(str
  12.297   */
  12.298  static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
  12.299  {
  12.300 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.301  	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
  12.302  	efx_oword_t fatal_intr;
  12.303  	int error, mem_perr;
  12.304 @@ -1581,8 +1555,8 @@ static irqreturn_t falcon_fatal_interrup
  12.305  
  12.306  	/* Disable DMA bus mastering on both devices */
  12.307  	pci_disable_device(efx->pci_dev);
  12.308 -	if (efx->type->is_dual_func)
  12.309 -		pci_disable_device(efx->pci_dev2);
  12.310 +	if (FALCON_IS_DUAL_FUNC(efx))
  12.311 +		pci_disable_device(nic_data->pci_dev2);
  12.312  
  12.313  	if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
  12.314  		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
  12.315 @@ -1603,13 +1577,9 @@ out:
  12.316   * interrupts are disabled, to allow for correct semantics of
  12.317   * efx_suspend() and efx_resume().
  12.318   */
  12.319 -#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
  12.320 -static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
  12.321 -#else
  12.322  static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id,
  12.323  					      struct pt_regs *regs
  12.324  					      __attribute__ ((unused)))
  12.325 -#endif
  12.326  {
  12.327  	struct efx_nic *efx = (struct efx_nic *)dev_id;
  12.328  	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
  12.329 @@ -1647,13 +1617,9 @@ static irqreturn_t falcon_legacy_interru
  12.330  }
  12.331  
  12.332  
  12.333 -#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
  12.334 -static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
  12.335 -#else
  12.336  static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id,
  12.337  					      struct pt_regs *regs
  12.338  					      __attribute__ ((unused)))
  12.339 -#endif
  12.340  {
  12.341  	struct efx_nic *efx = (struct efx_nic *)dev_id;
  12.342  	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
  12.343 @@ -1710,13 +1676,9 @@ static irqreturn_t falcon_legacy_interru
  12.344   * interrupts are disabled, to allow for correct semantics of
  12.345   * efx_suspend() and efx_resume().
  12.346   */
  12.347 -#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
  12.348 -static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
  12.349 -#else
  12.350  static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id,
  12.351  					struct pt_regs *regs
  12.352  					__attribute__ ((unused)))
  12.353 -#endif
  12.354  {
  12.355  	struct efx_channel *channel = (struct efx_channel *)dev_id;
  12.356  	struct efx_nic *efx = channel->efx;
  12.357 @@ -1746,7 +1708,6 @@ static void falcon_setup_rss_indir_table
  12.358  {
  12.359  	int i = 0;
  12.360  	unsigned long offset;
  12.361 -	unsigned long flags __attribute__ ((unused));
  12.362  	efx_dword_t dword;
  12.363  
  12.364  	if (FALCON_REV(efx) < FALCON_REV_B0)
  12.365 @@ -1976,8 +1937,6 @@ falcon_spi_write(const struct efx_spi_de
  12.366  void falcon_drain_tx_fifo(struct efx_nic *efx)
  12.367  {
  12.368  	efx_oword_t temp;
  12.369 -	efx_oword_t mcast_reg0;
  12.370 -	efx_oword_t mcast_reg1;
  12.371  	int count;
  12.372  
  12.373  	if (FALCON_REV(efx) < FALCON_REV_B0)
  12.374 @@ -1995,9 +1954,6 @@ void falcon_drain_tx_fifo(struct efx_nic
  12.375  	EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
  12.376  	falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
  12.377  
  12.378 -	falcon_read(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
  12.379 -	falcon_read(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
  12.380 -
  12.381  	/* Reset the MAC and EM block. */
  12.382  	falcon_read(efx, &temp, GLB_CTL_REG_KER);
  12.383  	EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
  12.384 @@ -2025,10 +1981,6 @@ void falcon_drain_tx_fifo(struct efx_nic
  12.385  
  12.386  	spin_unlock(&efx->stats_lock);
  12.387  
  12.388 -	/* Restore the multicast hash registers. */
  12.389 -	falcon_write(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
  12.390 -	falcon_write(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
  12.391 -
  12.392  	/* If we've reset the EM block and the link is up, then
  12.393  	 * we'll have to kick the XAUI link so the PHY can recover */
  12.394  	if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
  12.395 @@ -2053,6 +2005,12 @@ void falcon_deconfigure_mac_wrapper(stru
  12.396  	 * draining the TX fifo and resetting. */
  12.397  	changing_loopback = (efx->loopback_mode != nic_data->old_loopback_mode);
  12.398  	nic_data->old_loopback_mode = efx->loopback_mode;
  12.399 +
  12.400 +	if (EFX_WORKAROUND_11667(efx) && (efx->phy_type == PHY_TYPE_10XPRESS)) {
  12.401 +		if (changing_loopback)
  12.402 +			return;
  12.403 +	}
  12.404 +
  12.405  	if (changing_loopback || !efx->link_up)
  12.406  		falcon_drain_tx_fifo(efx);
  12.407  }
  12.408 @@ -2074,8 +2032,7 @@ void falcon_reconfigure_mac_wrapper(stru
  12.409  	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
  12.410  	 * as advertised.  Disable to ensure packets are not
  12.411  	 * indefinitely held and TX queue can be flushed at any point
  12.412 -	 * while the link is down.
  12.413 -	 */
  12.414 +	 * while the link is down. */
  12.415  	EFX_POPULATE_OWORD_5(reg,
  12.416  			     MAC_XOFF_VAL, 0xffff /* max pause time */,
  12.417  			     MAC_BCAD_ACPT, 1,
  12.418 @@ -2091,12 +2048,12 @@ void falcon_reconfigure_mac_wrapper(stru
  12.419  
  12.420  	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
  12.421  
  12.422 -	/*
  12.423 -	 * Transmission of pause frames when RX crosses the threshold is
  12.424 +	/* Restore the multicast hash registers. */
  12.425 +	falcon_set_multicast_hash(efx);
  12.426 +
  12.427 +	/* Transmission of pause frames when RX crosses the threshold is
  12.428  	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
  12.429 -	 *
  12.430 -	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL
  12.431 -	 */
  12.432 +	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
  12.433  	tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
  12.434  	falcon_read(efx, &reg, RX_CFG_REG_KER);
  12.435  	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
  12.436 @@ -2291,7 +2248,6 @@ static int falcon_mdio_read(struct net_d
  12.437  	unsigned int phy_10g = phy_id & FALCON_PHY_ID_10G;
  12.438  	efx_oword_t reg;
  12.439  	int value = -1;
  12.440 -	unsigned long flags __attribute__ ((unused));
  12.441  
  12.442  	if (phy_addr == PHY_ADDR_INVALID)
  12.443  		return -1;
  12.444 @@ -2373,7 +2329,7 @@ static void falcon_init_mdio(struct mii_
  12.445  	gmii->mdio_read = falcon_mdio_read;
  12.446  	gmii->mdio_write = falcon_mdio_write;
  12.447  	gmii->phy_id_mask = FALCON_PHY_ID_MASK;
  12.448 -	gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_DEV_ADR)) - 1);
  12.449 +	gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
  12.450  }
  12.451  
  12.452  static int falcon_probe_gmac_port(struct efx_nic *efx)
  12.453 @@ -2493,17 +2449,16 @@ void falcon_remove_port(struct efx_nic *
  12.454  
  12.455  void falcon_set_multicast_hash(struct efx_nic *efx)
  12.456  {
  12.457 -	union efx_multicast_hash falcon_mc_hash;
  12.458 +	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  12.459  
  12.460  	/* Broadcast packets go through the multicast hash filter.
  12.461  	 * ether_crc_le() of the broadcast address is 0xbe2612ff
  12.462 -	 * so we always add bit 0xff to the mask we are given.
  12.463 +	 * so we always add bit 0xff to the mask.
  12.464  	 */
  12.465 -	memcpy(&falcon_mc_hash, &efx->multicast_hash, sizeof(falcon_mc_hash));
  12.466 -	set_bit_le(0xff, (void *)&falcon_mc_hash);
  12.467 -
  12.468 -	falcon_write(efx, &falcon_mc_hash.oword[0], MAC_MCAST_HASH_REG0_KER);
  12.469 -	falcon_write(efx, &falcon_mc_hash.oword[1], MAC_MCAST_HASH_REG1_KER);
  12.470 +	set_bit_le(0xff, mc_hash->byte);
  12.471 +
  12.472 +	falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
  12.473 +	falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
  12.474  }
  12.475  
  12.476  /**************************************************************************
  12.477 @@ -2587,6 +2542,7 @@ out:
  12.478   * context and is allowed to sleep. */
  12.479  int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
  12.480  {
  12.481 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.482  	efx_oword_t glb_ctl_reg_ker;
  12.483  	int rc;
  12.484  
  12.485 @@ -2601,8 +2557,8 @@ int falcon_reset_hw(struct efx_nic *efx,
  12.486  				"function prior to hardware reset\n");
  12.487  			goto fail1;
  12.488  		}
  12.489 -		if (efx->type->is_dual_func) {
  12.490 -			rc = pci_save_state(efx->pci_dev2);
  12.491 +		if (FALCON_IS_DUAL_FUNC(efx)) {
  12.492 +			rc = pci_save_state(nic_data->pci_dev2);
  12.493  			if (rc) {
  12.494  				EFX_ERR(efx, "failed to backup PCI state of "
  12.495  					"secondary function prior to "
  12.496 @@ -2635,8 +2591,8 @@ int falcon_reset_hw(struct efx_nic *efx,
  12.497  
  12.498  	/* Restore PCI configuration if needed */
  12.499  	if (method == RESET_TYPE_WORLD) {
  12.500 -		if (efx->type->is_dual_func) {
  12.501 -			rc = pci_restore_state(efx->pci_dev2);
  12.502 +		if (FALCON_IS_DUAL_FUNC(efx)) {
  12.503 +			rc = pci_restore_state(nic_data->pci_dev2);
  12.504  			if (rc) {
  12.505  				EFX_ERR(efx, "failed to restore PCI config for "
  12.506  					"the secondary function\n");
  12.507 @@ -2686,19 +2642,21 @@ fail6:
  12.508   */
  12.509  static int falcon_reset_sram(struct efx_nic *efx)
  12.510  {
  12.511 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.512  	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
  12.513  	int count, onchip, sram_cfg_val;
  12.514  
  12.515  	/* Set the SRAM wake/sleep GPIO appropriately. */
  12.516 -	onchip = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY);
  12.517 +	onchip = (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY);
  12.518  	falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
  12.519  	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
  12.520  	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, onchip ? 1 : 0);
  12.521  	falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
  12.522  
  12.523  	/* Initiate SRAM reset */
  12.524 -	sram_cfg_val = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) ?
  12.525 -		0 : efx->external_sram_cfg;
  12.526 +	sram_cfg_val = nic_data->external_sram_cfg;
  12.527 +	if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
  12.528 +		sram_cfg_val = 0;
  12.529  
  12.530  	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
  12.531  			     SRAM_OOB_BT_INIT_EN, 1,
  12.532 @@ -2761,11 +2719,12 @@ static void falcon_spi_device_init(struc
  12.533  /* Extract non-volatile configuration */
  12.534  static int falcon_probe_nvconfig(struct efx_nic *efx)
  12.535  {
  12.536 -	int rc;
  12.537 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.538  	struct falcon_nvconfig *nvconfig;
  12.539  	struct efx_spi_device *spi;
  12.540  	size_t offset, len;
  12.541  	int magic_num, struct_ver, board_rev, onchip_sram;
  12.542 +	int rc;
  12.543  
  12.544  	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
  12.545  
  12.546 @@ -2831,18 +2790,18 @@ static int falcon_probe_nvconfig(struct 
  12.547  	 * automatically but might may been reset since boot.
  12.548  	 */
  12.549  	if (onchip_sram) {
  12.550 -		efx->external_sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY;
  12.551 +		nic_data->external_sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY;
  12.552  	} else {
  12.553 -		efx->external_sram_cfg =
  12.554 -		    EFX_OWORD_FIELD(nvconfig->srm_cfg_reg,
  12.555 -				    SRM_NUM_BANKS_AND_BANK_SIZE);
  12.556 -		WARN_ON(efx->external_sram_cfg == SRM_NB_BSZ_RESERVED);
  12.557 +		nic_data->external_sram_cfg =
  12.558 +			EFX_OWORD_FIELD(nvconfig->srm_cfg_reg,
  12.559 +					SRM_NUM_BANKS_AND_BANK_SIZE);
  12.560 +		WARN_ON(nic_data->external_sram_cfg == SRM_NB_BSZ_RESERVED);
  12.561  		/* Replace invalid setting with the smallest defaults */
  12.562 -		if (efx->external_sram_cfg == SRM_NB_BSZ_DEFAULT)
  12.563 -			efx->external_sram_cfg = SRM_NB_BSZ_1BANKS_2M;
  12.564 +		if (nic_data->external_sram_cfg == SRM_NB_BSZ_DEFAULT)
  12.565 +			nic_data->external_sram_cfg = SRM_NB_BSZ_1BANKS_2M;
  12.566  	}
  12.567  	EFX_LOG(efx, "external_sram_cfg=%d (>=0 is external)\n",
  12.568 -		efx->external_sram_cfg);
  12.569 +		nic_data->external_sram_cfg);
  12.570  
  12.571   out:
  12.572  	kfree(nvconfig);
  12.573 @@ -2892,31 +2851,35 @@ static int falcon_dimension_resources(st
  12.574  	 */
  12.575  	switch (FALCON_REV(efx)) {
  12.576  	case FALCON_REV_A1:
  12.577 -		res->rxq_min = res->txq_min = 16;
  12.578 -		res->evq_int_min = res->evq_int_max = 4;
  12.579 +		res->rxq_min = 16;
  12.580 +		res->txq_min = 16;
  12.581 +		res->evq_int_min = 4;
  12.582 +		res->evq_int_lim = 5;
  12.583  		res->evq_timer_min = 5;
  12.584 -		res->evq_timer_max = 4096;
  12.585 +		res->evq_timer_lim = 4096;
  12.586  		internal_dcs_entries = 8192;
  12.587  		break;
  12.588  	case FALCON_REV_B0:
  12.589  	default:
  12.590 -		res->rxq_min = res->txq_min = res->evq_int_min = 0;
  12.591 -		res->evq_int_max = 64;
  12.592 +		res->rxq_min = 0;
  12.593 +		res->txq_min = 0;
  12.594 +		res->evq_int_min = 0;
  12.595 +		res->evq_int_lim = 64;
  12.596  		res->evq_timer_min = 64;
  12.597 -		res->evq_timer_max = 4096;
  12.598 +		res->evq_timer_lim = 4096;
  12.599  		internal_dcs_entries = 4096;
  12.600  		break;
  12.601  	}
  12.602  
  12.603  	buffer_entry_bytes = 8;
  12.604  
  12.605 -	if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) {
  12.606 -		res->rxq_max = internal_dcs_entries / nic_data->rx_dc_entries;
  12.607 -		res->txq_max = internal_dcs_entries / nic_data->tx_dc_entries;
  12.608 +	if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) {
  12.609 +		res->rxq_lim = internal_dcs_entries / nic_data->rx_dc_entries;
  12.610 +		res->txq_lim = internal_dcs_entries / nic_data->tx_dc_entries;
  12.611  		/* Prog model says 8K entries for buffer table in internal
  12.612  		 * mode.  But does this not depend on full/half mode?
  12.613  		 */
  12.614 -		res->buffer_table_max = 8192;
  12.615 +		res->buffer_table_lim = 8192;
  12.616  		nic_data->tx_dc_base = 0x130000;
  12.617  		nic_data->rx_dc_base = 0x100000;
  12.618  	} else {
  12.619 @@ -2925,7 +2888,7 @@ static int falcon_dimension_resources(st
  12.620  		/* Determine how much SRAM we have to play with.  We have
  12.621  		 * to fit buffer table and descriptor caches in.
  12.622  		 */
  12.623 -		switch (efx->external_sram_cfg) {
  12.624 +		switch (nic_data->external_sram_cfg) {
  12.625  		case SRM_NB_BSZ_1BANKS_2M:
  12.626  		default:
  12.627  			sram_bytes = 2 * 1024 * 1024;
  12.628 @@ -2953,17 +2916,17 @@ static int falcon_dimension_resources(st
  12.629  		max_vnics = sram_bytes / vnic_bytes;
  12.630  		for (n_vnics = 1; n_vnics < res->evq_timer_min + max_vnics;)
  12.631  			n_vnics *= 2;
  12.632 -		res->rxq_max = n_vnics;
  12.633 -		res->txq_max = n_vnics;
  12.634 +		res->rxq_lim = n_vnics;
  12.635 +		res->txq_lim = n_vnics;
  12.636  
  12.637  		dcs = n_vnics * nic_data->tx_dc_entries * 8;
  12.638  		nic_data->tx_dc_base = sram_bytes - dcs;
  12.639  		dcs = n_vnics * nic_data->rx_dc_entries * 8;
  12.640  		nic_data->rx_dc_base = nic_data->tx_dc_base - dcs;
  12.641 -		res->buffer_table_max = nic_data->rx_dc_base / 8;
  12.642 +		res->buffer_table_lim = nic_data->rx_dc_base / 8;
  12.643  	}
  12.644  
  12.645 -	if (efx->type->is_dual_func)
  12.646 +	if (FALCON_IS_DUAL_FUNC(efx))
  12.647  		res->flags |= EFX_DL_FALCON_DUAL_FUNC;
  12.648  
  12.649  	if (EFX_INT_MODE_USE_MSI(efx))
  12.650 @@ -2982,7 +2945,6 @@ static int falcon_probe_nic_variant(stru
  12.651  	falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
  12.652  	efx->is_asic = EFX_OWORD_FIELD(altera_build, VER_ALL) == 0;
  12.653  
  12.654 -#if !defined(EFX_USE_PCI_DEV_REVISION)
  12.655  	{
  12.656  		int rc;
  12.657  		rc = pci_read_config_byte(efx->pci_dev, PCI_CLASS_REVISION,
  12.658 @@ -2990,7 +2952,7 @@ static int falcon_probe_nic_variant(stru
  12.659  		if (rc)
  12.660  			return rc;
  12.661  	}
  12.662 -#endif
  12.663 +
  12.664  	switch (FALCON_REV(efx)) {
  12.665  	case FALCON_REV_A0:
  12.666  	case 0xff:
  12.667 @@ -3189,24 +3151,28 @@ int falcon_probe_nic(struct efx_nic *efx
  12.668  	efx->i2c.sda = 1;
  12.669  	efx->i2c.scl = 1;
  12.670  
  12.671 +	/* Allocate storage for hardware specific data */
  12.672 +	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
  12.673 +	efx->nic_data = (void *) nic_data;
  12.674 +
  12.675  	/* Determine number of ports etc. */
  12.676  	rc = falcon_probe_nic_variant(efx);
  12.677  	if (rc)
  12.678  		goto fail1;
  12.679  
  12.680  	/* Probe secondary function if expected */
  12.681 -	if (efx->type->is_dual_func) {
  12.682 +	if (FALCON_IS_DUAL_FUNC(efx)) {
  12.683  		struct pci_dev *dev = pci_dev_get(efx->pci_dev);
  12.684  
  12.685  		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
  12.686  					     dev))) {
  12.687  			if (dev->bus == efx->pci_dev->bus &&
  12.688  			    dev->devfn == efx->pci_dev->devfn + 1) {
  12.689 -				efx->pci_dev2 = dev;
  12.690 +				nic_data->pci_dev2 = dev;
  12.691  				break;
  12.692  			}
  12.693  		}
  12.694 -		if (!efx->pci_dev2) {
  12.695 +		if (!nic_data->pci_dev2) {
  12.696  			EFX_ERR(efx, "failed to find secondary function\n");
  12.697  			rc = -ENODEV;
  12.698  			goto fail2;
  12.699 @@ -3244,11 +3210,6 @@ int falcon_probe_nic(struct efx_nic *efx
  12.700  		efx->mii.phy_id = 2;
  12.701  	}
  12.702  
  12.703 -	/* Decide how many resources we can allocate, to ourselves
  12.704 -	 * and to driverlink clients */
  12.705 -	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
  12.706 -	efx->nic_data = (void *) nic_data;
  12.707 -
  12.708  	rc = falcon_dimension_resources(efx);
  12.709  	if (rc)
  12.710  		goto fail6;
  12.711 @@ -3256,21 +3217,21 @@ int falcon_probe_nic(struct efx_nic *efx
  12.712  	return 0;
  12.713  
  12.714   fail6:
  12.715 -	kfree(nic_data);
  12.716 -	efx->nic_data = efx->dl_info = NULL;
  12.717 +	efx->dl_info = NULL;
  12.718   fail5:
  12.719  	falcon_remove_spi_devices(efx);
  12.720  	falcon_free_buffer(efx, &efx->irq_status);
  12.721   fail4:
  12.722  	/* fall-thru */
  12.723   fail3:
  12.724 -	if (efx->pci_dev2) {
  12.725 -		pci_dev_put(efx->pci_dev2);
  12.726 -		efx->pci_dev2 = NULL;
  12.727 +	if (nic_data->pci_dev2) {
  12.728 +		pci_dev_put(nic_data->pci_dev2);
  12.729 +		nic_data->pci_dev2 = NULL;
  12.730  	}
  12.731   fail2:
  12.732  	/* fall-thru */
  12.733   fail1:
  12.734 +	kfree(efx->nic_data);
  12.735  	return rc;
  12.736  }
  12.737  
  12.738 @@ -3336,7 +3297,7 @@ static void falcon_init_ack_repl_timer(s
  12.739  				  &pcie_ctrl_stat_reg);
  12.740  	pcie_devicectrl = (u16) EFX_EXTRACT_DWORD(pcie_ctrl_stat_reg, 0, 15);
  12.741  	tlp_size = ((PCI_EXP_DEVCTL_PAYLOAD & pcie_devicectrl) >>
  12.742 -		    ffs(PCI_EXP_DEVCTL_PAYLOAD));
  12.743 +		    PCI_EXP_DEVCTL_PAYLOAD_LBN);
  12.744  	EFX_WARN_ON_PARANOID(tlp_size > 3); /* => 1024 bytes */
  12.745  	tlp_ack_factor = &tlp_ack_factor_lut[tlp_size & 0x3];
  12.746  	tlp_size_decoded = tlp_ack_factor->tlp;
  12.747 @@ -3450,6 +3411,7 @@ static void falcon_fini_pcie_core(struct
  12.748   */
  12.749  int falcon_init_nic(struct efx_nic *efx)
  12.750  {
  12.751 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.752  	struct falcon_nic_data *data;
  12.753  	efx_oword_t temp;
  12.754  	unsigned thresh;
  12.755 @@ -3469,7 +3431,7 @@ int falcon_init_nic(struct efx_nic *efx)
  12.756  	/* Use on-chip SRAM if needed.
  12.757  	 */
  12.758  	falcon_read(efx, &temp, NIC_STAT_REG);
  12.759 -	if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
  12.760 +	if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
  12.761  		EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
  12.762  	else
  12.763  		EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 0);
  12.764 @@ -3623,9 +3585,7 @@ void falcon_fini_nic(struct efx_nic *efx
  12.765  
  12.766  void falcon_remove_nic(struct efx_nic *efx)
  12.767  {
  12.768 -	/* Tear down the private nic state, and the driverlink nic params */
  12.769 -	kfree(efx->nic_data);
  12.770 -	efx->nic_data = efx->dl_info = NULL;
  12.771 +	struct falcon_nic_data *nic_data = efx->nic_data;
  12.772  
  12.773  	falcon_remove_spi_devices(efx);
  12.774  	falcon_free_buffer(efx, &efx->irq_status);
  12.775 @@ -3634,10 +3594,14 @@ void falcon_remove_nic(struct efx_nic *e
  12.776  	(void) falcon_reset_hw(efx, RESET_TYPE_ALL);
  12.777  
  12.778  	/* Release the second function after the reset */
  12.779 -	if (efx->pci_dev2) {
  12.780 -		pci_dev_put(efx->pci_dev2);
  12.781 -		efx->pci_dev2 = NULL;
  12.782 +	if (nic_data->pci_dev2) {
  12.783 +		pci_dev_put(nic_data->pci_dev2);
  12.784 +		nic_data->pci_dev2 = NULL;
  12.785  	}
  12.786 +
  12.787 +	/* Tear down the private nic state, and the driverlink nic params */
  12.788 +	kfree(efx->nic_data);
  12.789 +	efx->nic_data = efx->dl_info = NULL;
  12.790  }
  12.791  
  12.792  void falcon_update_nic_stats(struct efx_nic *efx)
  12.793 @@ -3657,7 +3621,6 @@ void falcon_update_nic_stats(struct efx_
  12.794   */
  12.795  
  12.796  struct efx_nic_type falcon_a_nic_type = {
  12.797 -	.is_dual_func = 1,
  12.798  	.mem_bar = 2,
  12.799  	.mem_map_size = 0x20000,
  12.800  	.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
  12.801 @@ -3679,7 +3642,6 @@ struct efx_nic_type falcon_a_nic_type = 
  12.802  };
  12.803  
  12.804  struct efx_nic_type falcon_b_nic_type = {
  12.805 -	.is_dual_func = 0,
  12.806  	.mem_bar = 2,
  12.807  	/* Map everything up to and including the RSS indirection
  12.808  	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
    13.1 --- a/drivers/net/sfc/falcon.h	Tue Mar 31 11:49:12 2009 +0100
    13.2 +++ b/drivers/net/sfc/falcon.h	Tue Mar 31 11:59:10 2009 +0100
    13.3 @@ -28,8 +28,6 @@
    13.4  #ifndef EFX_FALCON_H
    13.5  #define EFX_FALCON_H
    13.6  
    13.7 -#include <asm/io.h>
    13.8 -#include <linux/spinlock.h>
    13.9  #include "net_driver.h"
   13.10  
   13.11  /*
   13.12 @@ -42,11 +40,7 @@ enum falcon_revision {
   13.13  	FALCON_REV_B0 = 2,
   13.14  };
   13.15  
   13.16 -#if defined(EFX_USE_PCI_DEV_REVISION)
   13.17 -#define FALCON_REV(efx) ((efx)->pci_dev->revision)
   13.18 -#else
   13.19  #define FALCON_REV(efx) ((efx)->revision)
   13.20 -#endif
   13.21  
   13.22  extern struct efx_nic_type falcon_a_nic_type;
   13.23  extern struct efx_nic_type falcon_b_nic_type;
   13.24 @@ -63,46 +57,29 @@ extern int falcon_probe_tx(struct efx_tx
   13.25  extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
   13.26  extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
   13.27  extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
   13.28 -#if defined(EFX_USE_FASTCALL)
   13.29  extern void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue);
   13.30 -#else
   13.31 -extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
   13.32 -#endif
   13.33  
   13.34  /* RX data path */
   13.35  extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
   13.36  extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
   13.37  extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
   13.38  extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
   13.39 -#if defined(EFX_USE_FASTCALL)
   13.40  extern void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
   13.41 -#else
   13.42 -extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
   13.43 -#endif
   13.44  
   13.45  /* Event data path */
   13.46  extern int falcon_probe_eventq(struct efx_channel *channel);
   13.47  extern int falcon_init_eventq(struct efx_channel *channel);
   13.48  extern void falcon_fini_eventq(struct efx_channel *channel);
   13.49  extern void falcon_remove_eventq(struct efx_channel *channel);
   13.50 -#if defined(EFX_USE_FASTCALL)
   13.51  extern int fastcall falcon_process_eventq(struct efx_channel *channel,
   13.52  					  int *rx_quota);
   13.53 -#else
   13.54 -extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
   13.55 -#endif
   13.56 -#if defined(EFX_USE_FASTCALL)
   13.57  extern void fastcall falcon_eventq_read_ack(struct efx_channel *channel);
   13.58 -#else
   13.59 -extern void falcon_eventq_read_ack(struct efx_channel *channel);
   13.60 -#endif
   13.61  
   13.62  /* Ports */
   13.63  extern int falcon_probe_port(struct efx_nic *efx);
   13.64  extern void falcon_remove_port(struct efx_nic *efx);
   13.65  
   13.66  /* MAC/PHY */
   13.67 -extern void falcon_check_xaui_link_up(struct efx_nic *efx);
   13.68  extern int falcon_xaui_link_ok(struct efx_nic *efx);
   13.69  extern int falcon_dma_stats(struct efx_nic *efx,
   13.70  			    unsigned int done_offset);
    14.1 --- a/drivers/net/sfc/falcon_hwdefs.h	Tue Mar 31 11:49:12 2009 +0100
    14.2 +++ b/drivers/net/sfc/falcon_hwdefs.h	Tue Mar 31 11:59:10 2009 +0100
    14.3 @@ -894,6 +894,15 @@
    14.4  #define XM_DIS_FCNTL_WIDTH 1
    14.5  
    14.6  /* XGMAC pause time count register */
    14.7 +/* XGMAC management interrupt mask register */
    14.8 +#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
    14.9 +#define XM_MSK_PRMBLE_ERR_LBN 2
   14.10 +#define XM_MSK_PRMBLE_ERR_WIDTH 1
   14.11 +#define XM_MSK_RMTFLT_LBN 1
   14.12 +#define XM_MSK_RMTFLT_WIDTH 1
   14.13 +#define XM_MSK_LCLFLT_LBN 0
   14.14 +#define XM_MSK_LCLFLT_WIDTH 1
   14.15 +
   14.16  #define XM_PAUSE_TIME_REG_MAC 0x9
   14.17  #define XM_TX_PAUSE_CNT_LBN 16
   14.18  #define XM_TX_PAUSE_CNT_WIDTH 16
   14.19 @@ -922,6 +931,15 @@
   14.20  #define XX_PWRDNC_EN_LBN 14
   14.21  #define XX_PWRDNC_EN_WIDTH 1
   14.22  #define XX_PWRDNB_EN_LBN 13
   14.23 +/* XGMAC management interrupt status register */
   14.24 +#define XM_MGT_INT_REG_MAC_B0 0x0f
   14.25 +#define XM_PRMBLE_ERR 2
   14.26 +#define XM_PRMBLE_WIDTH 1
   14.27 +#define XM_RMTFLT_LBN 1
   14.28 +#define XM_RMTFLT_WIDTH 1
   14.29 +#define XM_LCLFLT_LBN 0
   14.30 +#define XM_LCLFLT_WIDTH 1
   14.31 +
   14.32  #define XX_PWRDNB_EN_WIDTH 1
   14.33  #define XX_PWRDNA_EN_LBN 12
   14.34  #define XX_PWRDNA_EN_WIDTH 1
    15.1 --- a/drivers/net/sfc/falcon_io.h	Tue Mar 31 11:49:12 2009 +0100
    15.2 +++ b/drivers/net/sfc/falcon_io.h	Tue Mar 31 11:59:10 2009 +0100
    15.3 @@ -28,8 +28,9 @@
    15.4  #ifndef EFX_FALCON_IO_H
    15.5  #define EFX_FALCON_IO_H
    15.6  
    15.7 +#include <linux/io.h>
    15.8 +#include <linux/spinlock.h>
    15.9  #include "net_driver.h"
   15.10 -#include "falcon.h"
   15.11  
   15.12  /**************************************************************************
   15.13   *
    16.1 --- a/drivers/net/sfc/falcon_xmac.c	Tue Mar 31 11:49:12 2009 +0100
    16.2 +++ b/drivers/net/sfc/falcon_xmac.c	Tue Mar 31 11:59:10 2009 +0100
    16.3 @@ -236,6 +236,43 @@ int falcon_reset_xaui(struct efx_nic *ef
    16.4  	return rc;
    16.5  }
    16.6  
    16.7 +static int falcon_xgmii_status(struct efx_nic *efx)
    16.8 +{
    16.9 +	efx_dword_t reg;
   16.10 +
   16.11 +	if (FALCON_REV(efx) < FALCON_REV_B0)
   16.12 +		return 1;
   16.13 +
   16.14 +	/* The ISR latches, so clear it and re-read */
   16.15 +	efx->mac_op->mac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
   16.16 +	efx->mac_op->mac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
   16.17 +
   16.18 +	if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
   16.19 +	    EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
   16.20 +		EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
   16.21 +		return 0;
   16.22 +	}
   16.23 +
   16.24 +	return 1;
   16.25 +}
   16.26 +
   16.27 +static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
   16.28 +{
   16.29 +	efx_dword_t reg;
   16.30 +
   16.31 +	if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
   16.32 +		return;
   16.33 +
   16.34 +	/* Flush the ISR */
   16.35 +	if (enable)
   16.36 +		efx->mac_op->mac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
   16.37 +
   16.38 +	EFX_POPULATE_DWORD_2(reg,
   16.39 +			     XM_MSK_RMTFLT, !enable,
   16.40 +			     XM_MSK_LCLFLT, !enable);
   16.41 +	efx->mac_op->mac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
   16.42 +}
   16.43 +
   16.44  static int falcon_init_xmac(struct efx_nic *efx)
   16.45  {
   16.46  	int rc;
   16.47 @@ -257,6 +294,7 @@ static int falcon_init_xmac(struct efx_n
   16.48  	if (rc)
   16.49  		goto fail2;
   16.50  
   16.51 +	falcon_mask_status_intr(efx, 1);
   16.52  	return 0;
   16.53  
   16.54   fail2:
   16.55 @@ -269,9 +307,7 @@ static int falcon_init_xmac(struct efx_n
   16.56  int falcon_xaui_link_ok(struct efx_nic *efx)
   16.57  {
   16.58  	efx_dword_t reg;
   16.59 -	int align_done;
   16.60 -	int sync_status;
   16.61 -	int link_ok = 0;
   16.62 +	int align_done, sync_status, link_ok = 0;
   16.63  
   16.64  	/* If we're in internal loopback, then the link is up.
   16.65  	 * The A1 FPGA/4G has RX and TX XAUI wired together, so the link is up.
   16.66 @@ -292,13 +328,25 @@ int falcon_xaui_link_ok(struct efx_nic *
   16.67  	EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
   16.68  	EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
   16.69  	EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
   16.70 +	efx->mac_op->mac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
   16.71  
   16.72 -	efx->mac_op->mac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
   16.73 +	/* If the link is up, then check the phy side of the xaui link
   16.74 +	 * (error conditions from the wire side propoagate back through
   16.75 +	 * the phy to the xaui side). */
   16.76 +	if (efx->link_up && link_ok) {
   16.77 +		int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
   16.78 +		if (has_phyxs)
   16.79 +			link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
   16.80 +	}
   16.81 +
   16.82 +	/* If the PHY and XAUI links are up, then check the mac's xgmii
   16.83 +	 * fault state */
   16.84 +	if (efx->link_up && link_ok)
   16.85 +		link_ok = falcon_xgmii_status(efx);
   16.86  
   16.87  	return link_ok;
   16.88  }
   16.89  
   16.90 -/* Do most of the heavy lifting of falcon_reconfigure_xmac */
   16.91  static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
   16.92  {
   16.93  	unsigned int max_frame_len;
   16.94 @@ -367,7 +415,7 @@ static void falcon_reconfigure_xmac_core
   16.95  	efx->mac_op->mac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
   16.96  
   16.97  	/* Handle B0 FPGA loopback where RAMBUS XGXS block not present */
   16.98 -	if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic) {
   16.99 +	if (FALCON_REV(efx) >= FALCON_REV_B0 && !efx->is_asic) {
  16.100  		int xgmii_loopback =
  16.101  			(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
  16.102  
  16.103 @@ -378,7 +426,6 @@ static void falcon_reconfigure_xmac_core
  16.104  	}
  16.105  }
  16.106  
  16.107 -/* Do most of the heavy lifting of falcon_reconfigure_xmac */
  16.108  static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
  16.109  {
  16.110  	efx_dword_t reg;
  16.111 @@ -387,7 +434,7 @@ static void falcon_reconfigure_xgxs_core
  16.112  	int xgmii_loopback =
  16.113  		(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
  16.114  
  16.115 -	if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic)
  16.116 +	if (FALCON_REV(efx) >= FALCON_REV_B0 && !efx->is_asic)
  16.117  		/* RAMBUS XGXS block is not present */
  16.118  		return;
  16.119  
  16.120 @@ -442,52 +489,46 @@ static void falcon_reconfigure_xgxs_core
  16.121  }
  16.122  
  16.123  
  16.124 -/* Sometimes the XAUI link between Falcon and XFP fails to come up. The state
  16.125 - * of the link is checked during phy_reconfigure(). After XAIU is reset then
  16.126 - * the MAC must be reconfigured.
  16.127 - */
  16.128 -#define MAX_XAUI_TRIES (5)	/* It's never been seen to take more than 2 */
  16.129 -
  16.130 -void falcon_check_xaui_link_up(struct efx_nic *efx)
  16.131 +/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
  16.132 + * to come back up. Bash it until it comes back up */
  16.133 +static int falcon_check_xaui_link_up(struct efx_nic *efx)
  16.134  {
  16.135  	int max_tries, tries;
  16.136 -	tries = EFX_WORKAROUND_5147(efx) ? MAX_XAUI_TRIES : 1;
  16.137 +	tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
  16.138  	max_tries = tries;
  16.139  
  16.140  	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
  16.141  	    (efx->phy_type == PHY_TYPE_NONE) ||
  16.142  	    !efx->phy_powered)
  16.143 -		return;
  16.144 +		return 0;
  16.145  
  16.146  	while (tries) {
  16.147  		if (falcon_xaui_link_ok(efx))
  16.148 -			return;
  16.149 +			return 1;
  16.150  
  16.151  		EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
  16.152  			__func__, tries);
  16.153  		(void) falcon_reset_xaui(efx);
  16.154 -		/* Cannot use full reconfigure. Need to avoid recursion */
  16.155 -
  16.156 -		/* Give the poor thing time to sort itself out: if we retry
  16.157 -		 * too fast it will never train. */
  16.158  		udelay(200);
  16.159 -
  16.160 -		falcon_reconfigure_xgxs_core(efx);
  16.161 -
  16.162  		tries--;
  16.163  	}
  16.164  
  16.165 -	EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
  16.166 +	EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
  16.167  		max_tries);
  16.168 +	return 0;
  16.169  }
  16.170  
  16.171  static void falcon_reconfigure_xmac(struct efx_nic *efx)
  16.172  {
  16.173 +	int xaui_link_ok;
  16.174 +
  16.175 +	falcon_mask_status_intr(efx, 0);
  16.176 +
  16.177 +	/* Deconfigure the mac wrapper, draining the tx fifo if necessary */
  16.178  	falcon_deconfigure_mac_wrapper(efx);
  16.179  
  16.180 -	/* In internal loopback modes disable transmit */
  16.181 +	/* Reconfigure the PHY, disabling transmit in mac level loopback. */
  16.182  	efx->tx_disabled = LOOPBACK_INTERNAL(efx);
  16.183 -
  16.184  	efx->phy_op->reconfigure(efx);
  16.185  
  16.186  	falcon_reconfigure_xgxs_core(efx);
  16.187 @@ -496,8 +537,11 @@ static void falcon_reconfigure_xmac(stru
  16.188  	/* Reconfigure MAC wrapper */
  16.189  	falcon_reconfigure_mac_wrapper(efx);
  16.190  
  16.191 -	/* Ensure XAUI link is up - might repeat reconfigure_xmac_core */
  16.192 -	falcon_check_xaui_link_up(efx);
  16.193 +	/* Ensure XAUI link is up */
  16.194 +	xaui_link_ok = falcon_check_xaui_link_up(efx);
  16.195 +
  16.196 +	if (xaui_link_ok && efx->link_up)
  16.197 +		falcon_mask_status_intr(efx, 1);
  16.198  }
  16.199  
  16.200  static void falcon_fini_xmac(struct efx_nic *efx)
  16.201 @@ -576,29 +620,28 @@ static void falcon_update_stats_xmac(str
  16.202  
  16.203  static int falcon_check_xmac(struct efx_nic *efx)
  16.204  {
  16.205 -	unsigned link_ok, phyxs_ok = 1;
  16.206 -	unsigned has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
  16.207 -
  16.208 -	/* Check the remote XAUI link status */
  16.209 -	link_ok = falcon_xaui_link_ok(efx);
  16.210 +	unsigned xaui_link_ok;
  16.211 +	int rc;
  16.212  
  16.213  	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
  16.214 +	    (efx->phy_type == PHY_TYPE_NONE) ||
  16.215  	    !efx->phy_powered)
  16.216  		return 0;
  16.217  
  16.218 -	if (link_ok && has_phyxs && !LOOPBACK_INTERNAL(efx)) {
  16.219 -		/* Does the PHYXS think we have lane sync? */
  16.220 -		phyxs_ok = mdio_clause45_phyxgxs_lane_sync(efx);
  16.221 -	}
  16.222 +	falcon_mask_status_intr(efx, 0);
  16.223 +	xaui_link_ok = falcon_xaui_link_ok(efx);
  16.224  
  16.225 -	if (EFX_WORKAROUND_5147(efx) && (!link_ok || !phyxs_ok)) {
  16.226 +	if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
  16.227  		(void) falcon_reset_xaui(efx);
  16.228 -		falcon_reconfigure_xgxs_core(efx);
  16.229 -	}
  16.230  
  16.231  	/* Call the PHY check_hw routine */
  16.232 -	efx->phy_op->check_hw(efx);
  16.233 -	return 0;
  16.234 +	rc = efx->phy_op->check_hw(efx);
  16.235 +
  16.236 +	/* Unmask interrupt if everything was (and still is) ok */
  16.237 +	if (xaui_link_ok && efx->link_up)
  16.238 +		falcon_mask_status_intr(efx, 1);
  16.239 +
  16.240 +	return rc;
  16.241  }
  16.242  
  16.243  /* Simulate a PHY event */
  16.244 @@ -659,12 +702,10 @@ static int falcon_xmac_set_pause(struct 
  16.245  	reset = ((flow_control & EFX_FC_TX) &&
  16.246  		 !(efx->flow_control & EFX_FC_TX));
  16.247  	if (EFX_WORKAROUND_11482(efx) && reset) {
  16.248 -		if (FALCON_REV(efx) == FALCON_REV_B0) {
  16.249 +		if (FALCON_REV(efx) >= FALCON_REV_B0) {
  16.250  			/* Recover by resetting the EM block */
  16.251 -			mutex_lock(&efx->mac_lock);
  16.252  			if (efx->link_up)
  16.253  				falcon_drain_tx_fifo(efx);
  16.254 -			mutex_unlock(&efx->mac_lock);
  16.255  		} else {
  16.256  			/* Schedule a reset to recover */
  16.257  			efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
    17.1 --- a/drivers/net/sfc/i2c-direct.c	Tue Mar 31 11:49:12 2009 +0100
    17.2 +++ b/drivers/net/sfc/i2c-direct.c	Tue Mar 31 11:59:10 2009 +0100
    17.3 @@ -25,13 +25,13 @@
    17.4   ****************************************************************************
    17.5   */
    17.6  
    17.7 -#include <asm/io.h>
    17.8  #include <linux/delay.h>
    17.9  #include "net_driver.h"
   17.10  #include "i2c-direct.h"
   17.11  
   17.12 -/* EEPROM access via I2C
   17.13 - * data (SDA) and clock (SCL) line read/writes
   17.14 +/*
   17.15 + * I2C data (SDA) and clock (SCL) line read/writes with appropriate
   17.16 + * delays.
   17.17   */
   17.18  
   17.19  static inline void setsda(struct efx_i2c_interface *i2c, int state)
   17.20 @@ -79,7 +79,7 @@ static inline void i2c_release(struct ef
   17.21  {
   17.22  	EFX_WARN_ON_PARANOID(!i2c->scl);
   17.23  	EFX_WARN_ON_PARANOID(!i2c->sda);
   17.24 -	/* Just in case */
   17.25 +	/* Devices may time out if operations do not end */
   17.26  	setscl(i2c, 1);
   17.27  	setsda(i2c, 1);
   17.28  	EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
    18.1 --- a/drivers/net/sfc/kernel_compat.c	Tue Mar 31 11:49:12 2009 +0100
    18.2 +++ b/drivers/net/sfc/kernel_compat.c	Tue Mar 31 11:59:10 2009 +0100
    18.3 @@ -25,8 +25,6 @@
    18.4   ****************************************************************************
    18.5   */
    18.6  
    18.7 -#define EFX_IN_KCOMPAT_C 1
    18.8 -
    18.9  #include "net_driver.h"
   18.10  #include <linux/mii.h>
   18.11  #include <linux/ethtool.h>
   18.12 @@ -47,514 +45,22 @@
   18.13  
   18.14  /**************************************************************************
   18.15   *
   18.16 - * GMII-friendly versions of mii_ethtool_[gs]set
   18.17 - *
   18.18 - **************************************************************************
   18.19 - *
   18.20 - * Kernels prior to 2.6.12 don't support GMII PHYs via
   18.21 - * mii_ethtool_gset and mii_ethtool_sset.  These are those functions
   18.22 - * taken from a 2.6.12 kernel tree, with the tests for
   18.23 - * mii->supports_gmii removed (since that field doesn't exist in older
   18.24 - * kernels).
   18.25 - *
   18.26 - */
   18.27 -
   18.28 -#ifdef EFX_NEED_MII_ETHTOOL_FIX
   18.29 -int efx_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
   18.30 -{
   18.31 -	struct net_device *dev = mii->dev;
   18.32 -	u32 advert, bmcr, lpa, nego;
   18.33 -	u32 advert2 = 0, bmcr2 = 0, lpa2 = 0;
   18.34 -
   18.35 -	ecmd->supported =
   18.36 -	    (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
   18.37 -	     SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
   18.38 -	     SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
   18.39 -	ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
   18.40 -
   18.41 -	/* only supports twisted-pair */
   18.42 -	ecmd->port = PORT_MII;
   18.43 -
   18.44 -	/* only supports internal transceiver */
   18.45 -	ecmd->transceiver = XCVR_INTERNAL;
   18.46 -
   18.47 -	/* this isn't fully supported at higher layers */
   18.48 -	ecmd->phy_address = mii->phy_id;
   18.49 -
   18.50 -	ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
   18.51 -	advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
   18.52 -	advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
   18.53 -
   18.54 -	if (advert & ADVERTISE_10HALF)
   18.55 -		ecmd->advertising |= ADVERTISED_10baseT_Half;
   18.56 -	if (advert & ADVERTISE_10FULL)
   18.57 -		ecmd->advertising |= ADVERTISED_10baseT_Full;
   18.58 -	if (advert & ADVERTISE_100HALF)
   18.59 -		ecmd->advertising |= ADVERTISED_100baseT_Half;
   18.60 -	if (advert & ADVERTISE_100FULL)
   18.61 -		ecmd->advertising |= ADVERTISED_100baseT_Full;
   18.62 -	if (advert2 & ADVERTISE_1000HALF)
   18.63 -		ecmd->advertising |= ADVERTISED_1000baseT_Half;
   18.64 -	if (advert2 & ADVERTISE_1000FULL)
   18.65 -		ecmd->advertising |= ADVERTISED_1000baseT_Full;
   18.66 -
   18.67 -	bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
   18.68 -	lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
   18.69 -	bmcr2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
   18.70 -	lpa2 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
   18.71 -	if (bmcr & BMCR_ANENABLE) {
   18.72 -		ecmd->advertising |= ADVERTISED_Autoneg;
   18.73 -		ecmd->autoneg = AUTONEG_ENABLE;
   18.74 -
   18.75 -		nego = mii_nway_result(advert & lpa);
   18.76 -		if ((bmcr2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &
   18.77 -		    (lpa2 >> 2))
   18.78 -			ecmd->speed = SPEED_1000;
   18.79 -		else if (nego == LPA_100FULL || nego == LPA_100HALF)
   18.80 -			ecmd->speed = SPEED_100;
   18.81 -		else
   18.82 -			ecmd->speed = SPEED_10;
   18.83 -		if ((lpa2 & LPA_1000FULL) || nego == LPA_100FULL ||
   18.84 -		    nego == LPA_10FULL) {
   18.85 -			ecmd->duplex = DUPLEX_FULL;
   18.86 -			mii->full_duplex = 1;
   18.87 -		} else {
   18.88 -			ecmd->duplex = DUPLEX_HALF;
   18.89 -			mii->full_duplex = 0;
   18.90 -		}
   18.91 -	} else {
   18.92 -		ecmd->autoneg = AUTONEG_DISABLE;
   18.93 -
   18.94 -		ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
   18.95 -				(bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
   18.96 -			       (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
   18.97 -		ecmd->duplex =
   18.98 -			(bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
   18.99 -	}
  18.100 -
  18.101 -	/* ignore maxtxpkt, maxrxpkt for now */
  18.102 -
  18.103 -	return 0;
  18.104 -}
  18.105 -
  18.106 -int efx_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
  18.107 -{
  18.108 -	struct net_device *dev = mii->dev;
  18.109 -
  18.110 -	if (ecmd->speed != SPEED_10 &&
  18.111 -	    ecmd->speed != SPEED_100 &&
  18.112 -	    ecmd->speed != SPEED_1000)
  18.113 -		return -EINVAL;
  18.114 -	if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  18.115 -		return -EINVAL;
  18.116 -	if (ecmd->port != PORT_MII)
  18.117 -		return -EINVAL;
  18.118 -	if (ecmd->transceiver != XCVR_INTERNAL)
  18.119 -		return -EINVAL;
  18.120 -	if (ecmd->phy_address != mii->phy_id)
  18.121 -		return -EINVAL;
  18.122 -	if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
  18.123 -		return -EINVAL;
  18.124 -
  18.125 -	/* ignore supported, maxtxpkt, maxrxpkt */
  18.126 -
  18.127 -	if (ecmd->autoneg == AUTONEG_ENABLE) {
  18.128 -		u32 bmcr, advert, tmp;
  18.129 -		u32 advert2 = 0, tmp2 = 0;
  18.130 -
  18.131 -		if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
  18.132 -					  ADVERTISED_10baseT_Full |
  18.133 -					  ADVERTISED_100baseT_Half |
  18.134 -					  ADVERTISED_100baseT_Full |
  18.135 -					  ADVERTISED_1000baseT_Half |
  18.136 -					  ADVERTISED_1000baseT_Full)) == 0)
  18.137 -			return -EINVAL;
  18.138 -
  18.139 -		/* advertise only what has been requested */
  18.140 -		advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
  18.141 -		tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
  18.142 -		advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
  18.143 -		tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
  18.144 -		if (ecmd->advertising & ADVERTISED_10baseT_Half)
  18.145 -			tmp |= ADVERTISE_10HALF;
  18.146 -		if (ecmd->advertising & ADVERTISED_10baseT_Full)
  18.147 -			tmp |= ADVERTISE_10FULL;
  18.148 -		if (ecmd->advertising & ADVERTISED_100baseT_Half)
  18.149 -			tmp |= ADVERTISE_100HALF;
  18.150 -		if (ecmd->advertising & ADVERTISED_100baseT_Full)
  18.151 -			tmp |= ADVERTISE_100FULL;
  18.152 -		if (ecmd->advertising & ADVERTISED_1000baseT_Half)
  18.153 -			tmp2 |= ADVERTISE_1000HALF;
  18.154 -		if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  18.155 -			tmp2 |= ADVERTISE_1000FULL;
  18.156 -		if (advert != tmp) {
  18.157 -			mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
  18.158 -			mii->advertising = tmp;
  18.159 -		}
  18.160 -		if (advert2 != tmp2)
  18.161 -			mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2);
  18.162 -
  18.163 -		/* turn on autonegotiation, and force a renegotiate */
  18.164 -		bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
  18.165 -		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  18.166 -		mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
  18.167 -
  18.168 -		mii->force_media = 0;
  18.169 -	} else {
  18.170 -		u32 bmcr, tmp;
  18.171 -
  18.172 -		/* turn off auto negotiation, set speed and duplexity */
  18.173 -		bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
  18.174 -		tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
  18.175 -			       BMCR_SPEED1000 | BMCR_FULLDPLX);
  18.176 -		if (ecmd->speed == SPEED_1000)
  18.177 -			tmp |= BMCR_SPEED1000;
  18.178 -		else if (ecmd->speed == SPEED_100)
  18.179 -			tmp |= BMCR_SPEED100;
  18.180 -		if (ecmd->duplex == DUPLEX_FULL) {
  18.181 -			tmp |= BMCR_FULLDPLX;
  18.182 -			mii->full_duplex = 1;
  18.183 -		} else {
  18.184 -			mii->full_duplex = 0;
  18.185 -		}
  18.186 -		if (bmcr != tmp)
  18.187 -			mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
  18.188 -
  18.189 -		mii->force_media = 1;
  18.190 -	}
  18.191 -	return 0;
  18.192 -}
  18.193 -#endif /* NEED_EFX_MII_ETHTOOL_GSET */
  18.194 -
  18.195 -/**************************************************************************
  18.196 - *
  18.197 - * unregister_netdevice_notifier : Has a race before 2.6.17
  18.198 - *
  18.199 - **************************************************************************
  18.200 - *
  18.201 - */
  18.202 -
  18.203 -#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
  18.204 -/**
  18.205 - * efx_unregister_netdevice_notifier - fixed unregister_netdevice_notifier
  18.206 - * @nb:		notifier to unregister
  18.207 - *
  18.208 - * unregister_netdevice_notifier() does not wait for the notifier
  18.209 - * to be unused before 2.6.17.  This wrapper fixes that.
  18.210 - */
  18.211 -int efx_unregister_netdevice_notifier(struct notifier_block *nb)
  18.212 -{
  18.213 -	int res;
  18.214 -
  18.215 -	res = unregister_netdevice_notifier(nb);
  18.216 -	/* Ensure any outstanding calls complete. */
  18.217 -	rtnl_lock();
  18.218 -	rtnl_unlock();
  18.219 -	return res;
  18.220 -}
  18.221 -#endif /* NEED_EFX_UNREGISTER_NETDEVICE_NOTIFIER */
  18.222 -
  18.223 -/**************************************************************************
  18.224 - *
  18.225 - * IOMMU-locking versions of pci_[un]map_single and
  18.226 - * pci_{alloc,free}_consistent.  See SFC bug 4560.
  18.227 - *
  18.228 - **************************************************************************
  18.229 - *
  18.230 - */
  18.231 -#ifdef EFX_NEED_IOMMU_LOCK
  18.232 -
  18.233 -/*
  18.234 - * efx_use_iommu_lock - IOMMU lock use control
  18.235 - *
  18.236 - * If set to 1, the driver will attempt to mitigate the race condition
  18.237 - * bug around IOMMU accesses in some 2.6 kernels.  If set to 2, the
  18.238 - * driver will use the lock even if it thinks it doesn't need to.
  18.239 - * Note that this is only a best-effort attempt; in particular, we
  18.240 - * cannot do anything about other drivers touching the IOMMU.
  18.241 - */
  18.242 -static unsigned int efx_use_iommu_lock = 1;
  18.243 -EXPORT_SYMBOL(efx_use_iommu_lock);
  18.244 -
  18.245 -/*
  18.246 - * efx_iommu_lock - lock around IOMMU accesses
  18.247 - *
  18.248 - * This spinlock should be held while calling functions that access
  18.249 - * the IOMMU if efx_use_iommu_lock is >= 2.  The efx_pci_*()
  18.250 - * functions do this where possible.
  18.251 - */
  18.252 -static spinlock_t efx_iommu_lock = SPIN_LOCK_UNLOCKED;
  18.253 -EXPORT_SYMBOL(efx_iommu_lock);
  18.254 -
  18.255 -/* Don't use the IOMMU lock if the device can access the whole of memory */
  18.256 -#define EFX_DMA_CONSISTENT(_efx)			\
  18.257 -	(((_efx)->dma_mask >> PAGE_SHIFT) >= max_pfn)
  18.258 -/**
  18.259 - * efx_pci_map_single - map buffer for DMA, under IOMMU lock
  18.260 - * @pci:		PCI device
  18.261 - * @ptr:		Buffer
  18.262 - * @size:		Buffer length
  18.263 - * @direction:		DMA direction
  18.264 - *
  18.265 - * Wrapper for pci_map_single that uses efx_iommu_lock if necessary.
  18.266 - */
  18.267 -dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr, size_t size,
  18.268 -			      int direction)
  18.269 -{
  18.270 -	struct efx_nic *efx = pci_get_drvdata(pci);
  18.271 -	unsigned long flags __attribute__ ((unused));
  18.272 -	dma_addr_t dma_addr;
  18.273 -
  18.274 -	if (unlikely((efx_use_iommu_lock &&
  18.275 -		      (!EFX_NO_IOMMU) &&
  18.276 -		      (!EFX_DMA_CONSISTENT(efx))) ||
  18.277 -		     efx_use_iommu_lock >= 2)) {
  18.278 -		spin_lock_irqsave(&efx_iommu_lock, flags);
  18.279 -		dma_addr = pci_map_single(pci, ptr, size, direction);
  18.280 -		spin_unlock_irqrestore(&efx_iommu_lock, flags);
  18.281 -	} else {
  18.282 -		dma_addr = pci_map_single(pci, ptr, size, direction);
  18.283 -	}
  18.284 -	return dma_addr;
  18.285 -}
  18.286 -
  18.287 -/**
  18.288 - * efx_pci_unmap_single - unmap buffer for DMA, under IOMMU lock
  18.289 - * @pci:		PCI device
  18.290 - * @dma_addr:		DMA address
  18.291 - * @size:		Buffer length
  18.292 - * @direction:		DMA direction
  18.293 - *
  18.294 - * Wrapper for pci_unmap_single that uses efx_iommu_lock if necessary.
  18.295 - */
  18.296 -void efx_pci_unmap_single(struct pci_dev *pci, dma_addr_t dma_addr,
  18.297 -			  size_t size, int direction)
  18.298 -{
  18.299 -	struct efx_nic *efx = pci_get_drvdata(pci);
  18.300 -	unsigned long flags __attribute__ ((unused));
  18.301 -
  18.302 -	if (unlikely((efx_use_iommu_lock &&
  18.303 -		      (!EFX_NO_IOMMU) &&
  18.304 -		      (!EFX_DMA_CONSISTENT(efx))) ||
  18.305 -		     efx_use_iommu_lock >= 2)) {
  18.306 -		spin_lock_irqsave(&efx_iommu_lock, flags);
  18.307 -		pci_unmap_single(pci, dma_addr, size, direction);
  18.308 -		spin_unlock_irqrestore(&efx_iommu_lock, flags);
  18.309 -	} else {
  18.310 -		pci_unmap_single(pci, dma_addr, size, direction);
  18.311 -	}
  18.312 -}
  18.313 -
  18.314 -/**
  18.315 - * efx_pci_alloc_consistent - allocate DMA-consistent buffer, under IOMMU lock
  18.316 - * @pci:		PCI device
  18.317 - * @size:		Buffer length
  18.318 - * @dma_addr:		DMA address
  18.319 - *
  18.320 - * Wrapper for pci_alloc_consistent that uses efx_iommu_lock if necessary.
  18.321 - *
  18.322 - * Bugs: Currently this can't use the spinlock because
  18.323 - *	pci_alloc_consistent may block.
  18.324 - */
  18.325 -void *efx_pci_alloc_consistent(struct pci_dev *pci, size_t size,
  18.326 -			       dma_addr_t *dma_addr)
  18.327 -{
  18.328 -	return pci_alloc_consistent(pci, size, dma_addr);
  18.329 -}
  18.330 -
  18.331 -/**
  18.332 - * efx_pci_free_consistent - free DMA-consistent buffer, under IOMMU lock
  18.333 - * @pci:		PCI device
  18.334 - * @size:		Buffer length
  18.335 - * @ptr:		Buffer
  18.336 - * @dma_addr:		DMA address
  18.337 - *
  18.338 - * Wrapper for pci_free_consistent that uses efx_iommu_lock if necessary.
  18.339 - */
  18.340 -void efx_pci_free_consistent(struct pci_dev *pci, size_t size, void *ptr,
  18.341 -			     dma_addr_t dma_addr)
  18.342 -{
  18.343 -	struct efx_nic *efx = pci_get_drvdata(pci);
  18.344 -	unsigned long flags __attribute__ ((unused));
  18.345 -
  18.346 -	if (unlikely((efx_use_iommu_lock &&
  18.347 -		      (!EFX_NO_IOMMU) &&
  18.348 -		      (!EFX_DMA_CONSISTENT(efx))) ||
  18.349 -		     efx_use_iommu_lock >= 2)) {
  18.350 -		spin_lock_irqsave(&efx_iommu_lock, flags);
  18.351 -		pci_free_consistent(pci, size, ptr, dma_addr);
  18.352 -		spin_unlock_irqrestore(&efx_iommu_lock, flags);
  18.353 -	} else {
  18.354 -		pci_free_consistent(pci, size, ptr, dma_addr);
  18.355 -	}
  18.356 -}
  18.357 -
  18.358 -module_param(efx_use_iommu_lock, uint, 0644);
  18.359 -MODULE_PARM_DESC(efx_use_iommu_lock, "Enable lock for bug in free_iommu");
  18.360 -
  18.361 -#endif
  18.362 -
  18.363 -#ifdef EFX_NEED_COMPOUND_PAGE_FIX
  18.364 -
  18.365 -void efx_compound_page_destructor(struct page *page)
  18.366 -{
  18.367 -	/* Fake up page state to keep __free_pages happy */
  18.368 -	set_page_count(page, 1);
  18.369 -	page[1].mapping = NULL;
  18.370 -
  18.371 -	__free_pages(page, (unsigned long)page[1].index);
  18.372 -}
  18.373 -
  18.374 -#endif /* NEED_COMPOUND_PAGE_FIX */
  18.375 -
  18.376 -/**************************************************************************
  18.377 - *
  18.378 - * print_hex_dump, taken from lib/hexdump.c.
  18.379 - *
  18.380 - **************************************************************************
  18.381 - *
  18.382 - */
  18.383 -#ifdef EFX_NEED_HEX_DUMP
  18.384 -
  18.385 -#define hex_asc(x)	"0123456789abcdef"[x]
  18.386 -#define isascii(c) (((unsigned char)(c))<=0x7f)
  18.387 -
  18.388 -static void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
  18.389 -			       int groupsize, char *linebuf, size_t linebuflen,
  18.390 -			       int ascii)
  18.391 -{
  18.392 -        const u8 *ptr = buf;
  18.393 -        u8 ch;
  18.394 -        int j, lx = 0;
  18.395 -        int ascii_column;
  18.396 -
  18.397 -        if (rowsize != 16 && rowsize != 32)
  18.398 -                rowsize = 16;
  18.399 -
  18.400 -        if (!len)
  18.401 -                goto nil;
  18.402 -        if (len > rowsize)              /* limit to one line at a time */
  18.403 -                len = rowsize;
  18.404 -        if ((len % groupsize) != 0)     /* no mixed size output */
  18.405 -                groupsize = 1;
  18.406 -
  18.407 -        switch (groupsize) {
  18.408 -        case 8: {
  18.409 -                const u64 *ptr8 = buf;
  18.410 -                int ngroups = len / groupsize;
  18.411 -
  18.412 -                for (j = 0; j < ngroups; j++)
  18.413 -                        lx += scnprintf(linebuf + lx, linebuflen - lx,
  18.414 -				"%16.16llx ", (unsigned long long)*(ptr8 + j));
  18.415 -                ascii_column = 17 * ngroups + 2;
  18.416 -                break;
  18.417 -        }
  18.418 -
  18.419 -        case 4: {
  18.420 -                const u32 *ptr4 = buf;
  18.421 -                int ngroups = len / groupsize;
  18.422 -
  18.423 -                for (j = 0; j < ngroups; j++)
  18.424 -                        lx += scnprintf(linebuf + lx, linebuflen - lx,
  18.425 -				"%8.8x ", *(ptr4 + j));
  18.426 -                ascii_column = 9 * ngroups + 2;
  18.427 -                break;
  18.428 -        }
  18.429 -
  18.430 -        case 2: {
  18.431 -                const u16 *ptr2 = buf;
  18.432 -                int ngroups = len / groupsize;
  18.433 -
  18.434 -                for (j = 0; j < ngroups; j++)
  18.435 -                        lx += scnprintf(linebuf + lx, linebuflen - lx,
  18.436 -				"%4.4x ", *(ptr2 + j));
  18.437 -                ascii_column = 5 * ngroups + 2;
  18.438 -                break;
  18.439 -        }
  18.440 -
  18.441 -        default:
  18.442 -                for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
  18.443 -                     j++) {
  18.444 -                        ch = ptr[j];
  18.445 -                        linebuf[lx++] = hex_asc(ch >> 4);
  18.446 -                        linebuf[lx++] = hex_asc(ch & 0x0f);
  18.447 -                        linebuf[lx++] = ' ';
  18.448 -                }
  18.449 -                ascii_column = 3 * rowsize + 2;
  18.450 -                break;
  18.451 -        }
  18.452 -        if (!ascii)
  18.453 -                goto nil;
  18.454 -
  18.455 -        while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
  18.456 -                linebuf[lx++] = ' ';
  18.457 -	/* Removed is_print() check */
  18.458 -        for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++)
  18.459 -                linebuf[lx++] = isascii(ptr[j]) ? ptr[j] : '.';
  18.460 -nil:
  18.461 -        linebuf[lx++] = '\0';
  18.462 -}
  18.463 -
  18.464 -void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
  18.465 -		    int rowsize, int groupsize,
  18.466 -		    const void *buf, size_t len, int ascii)
  18.467 -{
  18.468 -        const u8 *ptr = buf;
  18.469 -        int i, linelen, remaining = len;
  18.470 -        char linebuf[200];
  18.471 -
  18.472 -        if (rowsize != 16 && rowsize != 32)
  18.473 -                rowsize = 16;
  18.474 -
  18.475 -        for (i = 0; i < len; i += rowsize) {
  18.476 -                linelen = min(remaining, rowsize);
  18.477 -                remaining -= rowsize;
  18.478 -                hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
  18.479 -				   linebuf, sizeof(linebuf), ascii);
  18.480 -
  18.481 -                switch (prefix_type) {
  18.482 -                case DUMP_PREFIX_ADDRESS:
  18.483 -                        printk("%s%s%*p: %s\n", level, prefix_str,
  18.484 -			       (int)(2 * sizeof(void *)), ptr + i, linebuf);
  18.485 -                        break;
  18.486 -                case DUMP_PREFIX_OFFSET:
  18.487 -                        printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
  18.488 -                        break;
  18.489 -                default:
  18.490 -                        printk("%s%s%s\n", level, prefix_str, linebuf);
  18.491 -                        break;
  18.492 -                }
  18.493 -        }
  18.494 -}
  18.495 -
  18.496 -#endif /* EFX_NEED_HEX_DUMP */
  18.497 -
  18.498 -/**************************************************************************
  18.499 - *
  18.500   * print_mac, from net/ethernet/eth.c in v2.6.24
  18.501   *
  18.502   **************************************************************************
  18.503   *
  18.504   */
  18.505 -#ifdef EFX_NEED_PRINT_MAC
  18.506  char *print_mac(char *buf, const u8 *addr)
  18.507  {
  18.508          sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
  18.509                  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
  18.510          return buf;
  18.511  }
  18.512 -#endif /* EFX_NEED_PRINT_MAC */
  18.513  
  18.514  #ifdef EFX_NEED_CSUM_TCPUDP_NOFOLD
  18.515 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  18.516 -__wsum
  18.517 -csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
  18.518 -		    unsigned short proto, __wsum sum)
  18.519 -#else
  18.520  __wsum
  18.521  csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr,
  18.522  		    unsigned short len, unsigned short proto, __wsum sum)
  18.523 -#endif
  18.524  {
  18.525  	unsigned long result;
  18.526  
  18.527 @@ -570,85 +76,3 @@ csum_tcpudp_nofold (unsigned long saddr,
  18.528  
  18.529  }
  18.530  #endif /* EFX_NEED_CSUM_TCPUDP_NOFOLD */
  18.531 -
  18.532 -#ifdef EFX_NEED_RANDOM_ETHER_ADDR
  18.533 -/* Generate random MAC address */
  18.534 -void efx_random_ether_addr(uint8_t *addr) {
  18.535 -        get_random_bytes (addr, ETH_ALEN);
  18.536 -	addr [0] &= 0xfe;       /* clear multicast bit */
  18.537 -	addr [0] |= 0x02;       /* set local assignment bit (IEEE802) */
  18.538 -}
  18.539 -#endif /* EFX_NEED_RANDOM_ETHER_ADDR */
  18.540 -
  18.541 -#ifdef EFX_NEED_MSECS_TO_JIFFIES
  18.542 -/*
  18.543 - * When we convert to jiffies then we interpret incoming values
  18.544 - * the following way:
  18.545 - *
  18.546 - * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
  18.547 - *
  18.548 - * - 'too large' values [that would result in larger than
  18.549 - *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
  18.550 - *
  18.551 - * - all other values are converted to jiffies by either multiplying
  18.552 - *   the input value by a factor or dividing it with a factor
  18.553 - *
  18.554 - * We must also be careful about 32-bit overflows.
  18.555 - */
  18.556 -#ifndef MSEC_PER_SEC
  18.557 -#define MSEC_PER_SEC	1000L
  18.558 -#endif
  18.559 -unsigned long msecs_to_jiffies(const unsigned int m)
  18.560 -{
  18.561 -	/*
  18.562 -	 * Negative value, means infinite timeout:
  18.563 -	 */
  18.564 -	if ((int)m < 0)
  18.565 -		return MAX_JIFFY_OFFSET;
  18.566 -
  18.567 -#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
  18.568 -	/*
  18.569 -	 * HZ is equal to or smaller than 1000, and 1000 is a nice
  18.570 -	 * round multiple of HZ, divide with the factor between them,
  18.571 -	 * but round upwards:
  18.572 -	 */
  18.573 -	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
  18.574 -#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
  18.575 -	/*
  18.576 -	 * HZ is larger than 1000, and HZ is a nice round multiple of
  18.577 -	 * 1000 - simply multiply with the factor between them.
  18.578 -	 *
  18.579 -	 * But first make sure the multiplication result cannot
  18.580 -	 * overflow:
  18.581 -	 */
  18.582 -	if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
  18.583 -		return MAX_JIFFY_OFFSET;
  18.584 -
  18.585 -	return m * (HZ / MSEC_PER_SEC);
  18.586 -#else
  18.587 -	/*
  18.588 -	 * Generic case - multiply, round and divide. But first
  18.589 -	 * check that if we are doing a net multiplication, that
  18.590 -	 * we wouldnt overflow:
  18.591 -	 */
  18.592 -	if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
  18.593 -		return MAX_JIFFY_OFFSET;
  18.594 -
  18.595 -	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
  18.596 -#endif
  18.597 -}
  18.598 -#endif /* EFX_NEED_MSECS_TO_JIFFIES */
  18.599 -
  18.600 -#ifdef EFX_NEED_MSLEEP
  18.601 -/**
  18.602 - * msleep - sleep safely even with waitqueue interruptions
  18.603 - * @msecs: Time in milliseconds to sleep for
  18.604 - */
  18.605 -void msleep(unsigned int msecs)
  18.606 -{
  18.607 -	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  18.608 -
  18.609 -	while (timeout)
  18.610 -		timeout = schedule_timeout_uninterruptible(timeout);
  18.611 -}
  18.612 -#endif
    19.1 --- a/drivers/net/sfc/kernel_compat.h	Tue Mar 31 11:49:12 2009 +0100
    19.2 +++ b/drivers/net/sfc/kernel_compat.h	Tue Mar 31 11:59:10 2009 +0100
    19.3 @@ -37,406 +37,32 @@
    19.4  #include <linux/interrupt.h>
    19.5  #include <linux/skbuff.h>
    19.6  #include <linux/netdevice.h>
    19.7 -
    19.8 -#include "extraversion.h"
    19.9 +#include <linux/rtnetlink.h>
   19.10  
   19.11  /*
   19.12   * Kernel backwards compatibility
   19.13   *
   19.14 - * This file provides macros that enable the driver to be compiled on
   19.15 - * any kernel from 2.6.9 onward (plus SLES 9 2.6.5), without requiring
   19.16 - * explicit version tests scattered throughout the code.
   19.17 - */
   19.18 -
   19.19 -/**************************************************************************
   19.20 - *
   19.21 - * Version/config/architecture tests to set feature flags
   19.22 - *
   19.23 - **************************************************************************
   19.24 - *
   19.25 - * NOTE: For simplicity, these initial version tests cover kernel.org
   19.26 - * releases only.  Backported features in "enterprise" kernels are
   19.27 - * handled further down.
   19.28 + * This file provides macros to facilitate backporting the driver.
   19.29   */
   19.30  
   19.31 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) &&	 \
   19.32 -	!(defined(EFX_DIST_SUSE) &&			 \
   19.33 -	  LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
   19.34 -	  EFX_DIST_KVER_LEVEL_1 == 7)
   19.35 -	#error "This kernel version is now unsupported"
   19.36 -#endif
   19.37 -
   19.38 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
   19.39 -	#define EFX_NEED_RANDOM_ETHER_ADDR yes
   19.40 -#endif
   19.41 -
   19.42 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
   19.43 -	#define EFX_NEED_I2C_CLASS_HWMON yes
   19.44 -	#define EFX_NEED_IF_MII yes
   19.45 -	#define EFX_NEED_MSLEEP yes
   19.46 -	#define EFX_NEED_MSECS_TO_JIFFIES yes
   19.47 -#endif
   19.48 -
   19.49 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,8)
   19.50 -	#define EFX_USE_MTD_ERASE_FAIL_ADDR yes
   19.51 -#else
   19.52 -	#define EFX_NEED_MTD_ERASE_CALLBACK yes
   19.53 -	#define EFX_NEED_DUMMY_PCI_DISABLE_MSI yes
   19.54 -	#define EFX_NEED_DUMMY_MSIX yes
   19.55 -#endif
   19.56 -
   19.57 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
   19.58 -	#define EFX_NEED_BYTEORDER_TYPES yes
   19.59 -#endif
   19.60 -
   19.61 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
   19.62 -	#define EFX_NEED_MMIOWB yes
   19.63 -	#define EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS yes
   19.64 -#endif
   19.65 -
   19.66 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
   19.67 -	#define EFX_NEED_DUMMY_SUPPORTS_GMII yes
   19.68 -	#define EFX_NEED_MII_CONSTANTS yes
   19.69 -	#define EFX_NEED_MII_ETHTOOL_FIX yes
   19.70 -	#define EFX_HAVE_MSIX_TABLE_RESERVED yes
   19.71 -#endif
   19.72 -
   19.73 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
   19.74 -	#define EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE yes
   19.75 -	#define EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE yes
   19.76 -	#define EFX_NEED_GFP_T yes
   19.77 -	#define EFX_NEED_KZALLOC yes
   19.78 -#endif
   19.79 -
   19.80 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
   19.81 -	#define EFX_NEED_SETUP_TIMER yes
   19.82 -	#ifdef CONFIG_HUGETLB_PAGE
   19.83 -		#define EFX_USE_COMPOUND_PAGES yes
   19.84 -	#endif
   19.85 -#else
   19.86 -	#define EFX_USE_COMPOUND_PAGES yes
   19.87 -#endif
   19.88 -
   19.89 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
   19.90 -	#define EFX_NEED_MUTEX yes
   19.91 -	#define EFX_NEED_SAFE_LISTS yes
   19.92 -	#ifdef EFX_USE_COMPOUND_PAGES
   19.93 -		#define EFX_NEED_COMPOUND_PAGE_FIX yes
   19.94 -	#endif
   19.95 -#endif
   19.96 -
   19.97 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
   19.98 -	#define EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX yes
   19.99 -	#define EFX_NEED_DEV_NOTICE yes
  19.100 +#ifdef __ia64__
  19.101 +	/* csum_tcpudp_nofold() is extern but not exported */
  19.102 +	#define EFX_NEED_CSUM_TCPUDP_NOFOLD yes
  19.103  #endif
  19.104 -
  19.105 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
  19.106 -	#define EFX_NEED_IRQF_FLAGS yes
  19.107 -	#define EFX_NEED_NETDEV_ALLOC_SKB yes
  19.108 -	/* Fedora backported 2.6.18 netdevice.h changes */
  19.109 -	#ifndef NETIF_F_GSO
  19.110 -		#define EFX_NEED_NETIF_TX_LOCK yes
  19.111 -	#endif
  19.112 -#else
  19.113 -	#define EFX_USE_MTD_WRITESIZE yes
  19.114 -#endif
  19.115 -
  19.116 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
  19.117 -	#define EFX_NEED_IRQ_HANDLER_T yes
  19.118 -	#define EFX_HAVE_IRQ_HANDLER_REGS yes
  19.119 -#endif
  19.120 -
  19.121 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  19.122 -	#define EFX_NEED_WORK_API_WRAPPERS yes
  19.123 -	#define EFX_USE_FASTCALL yes
  19.124 -	#define EFX_NEED_CSUM_UNFOLDED yes
  19.125 -#endif
  19.126 -
  19.127 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
  19.128 -	/*
  19.129 -	 * debugfs was introduced earlier, but only supports sym-links
  19.130 -	 * from 2.6.21
  19.131 -	 */
  19.132 -	#ifdef CONFIG_DEBUG_FS
  19.133 -		#define EFX_USE_DEBUGFS yes
  19.134 -	#endif
  19.135 -#endif
  19.136 -
  19.137 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
  19.138 -	#define EFX_NEED_SKB_HEADER_MACROS yes
  19.139 -	#define EFX_NEED_HEX_DUMP yes
  19.140 -#else
  19.141 -	#define EFX_USE_CANCEL_WORK_SYNC yes
  19.142 -#endif
  19.143 -
  19.144 -#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,22)
  19.145 -	#define EFX_NEED_HEX_DUMP_CONST_FIX yes
  19.146 -#endif
  19.147 -
  19.148 -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) && \
  19.149 -    (LINUX_VERSION_CODE <  KERNEL_VERSION(2,6,23))
  19.150 -	#define EFX_USE_ETHTOOL_GET_PERM_ADDR yes
  19.151 -#endif
  19.152 -
  19.153 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  19.154 -	#ifdef __ia64__
  19.155 -		/* csum_tcpudp_nofold() is extern but not exported */
  19.156 -		#define EFX_NEED_CSUM_TCPUDP_NOFOLD yes
  19.157 -	#endif
  19.158 -#else
  19.159 -	#define EFX_USE_PCI_DEV_REVISION yes
  19.160 -	#define EFX_USE_CANCEL_DELAYED_WORK_SYNC yes
  19.161 -#endif
  19.162 -
  19.163 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
  19.164 -	#define EFX_HAVE_OLD_NAPI yes
  19.165 -	#define EFX_NEED_GENERIC_LRO yes
  19.166 -	#define EFX_NEED_PRINT_MAC yes
  19.167 -#else
  19.168 -	#define EFX_USE_ETHTOOL_FLAGS yes
  19.169 -#endif
  19.170 -
  19.171 -/*
  19.172 - * SFC Bug 4560: Some kernels leak IOMMU entries under heavy load.  Use a
  19.173 - * spinlock to serialise access where possible to alleviate the
  19.174 - * problem.
  19.175 - *
  19.176 - * NB. The following definition is duplicated in
  19.177 - * the char driver.  Please keep in sync.
  19.178 - */
  19.179 -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && \
  19.180 -     defined(__x86_64__) && defined(CONFIG_SMP))
  19.181 -	#define EFX_NEED_IOMMU_LOCK yes
  19.182 -	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
  19.183 -		#if defined(CONFIG_GART_IOMMU)
  19.184 -			#define EFX_NO_IOMMU no_iommu
  19.185 -		#else
  19.186 -			#define EFX_NO_IOMMU 1
  19.187 -		#endif
  19.188 -	#else
  19.189 -		#define EFX_NO_IOMMU 0
  19.190 -	#endif
  19.191 -#endif
  19.192 -
  19.193  #ifdef CONFIG_PPC64
  19.194  	/* __raw_writel and friends are broken on ppc64 */
  19.195  	#define EFX_NEED_RAW_READ_AND_WRITE_FIX yes
  19.196  #endif
  19.197  
  19.198 -/**************************************************************************
  19.199 - *
  19.200 - * Exceptions for backported features
  19.201 - *
  19.202 - **************************************************************************
  19.203 - */
  19.204 -
  19.205 -/* RHEL4 */
  19.206 -#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)
  19.207 -	#if EFX_DIST_KVER_LEVEL_1 >= 22
  19.208 -		/* linux-2.6.9-mmiowb.patch */
  19.209 -		#undef EFX_NEED_MMIOWB
  19.210 -	#endif
  19.211 -	#if EFX_DIST_KVER_LEVEL_1 >= 34
  19.212 -		/* linux-2.6.9-net-mii-update.patch */
  19.213 -		#undef EFX_NEED_DUMMY_SUPPORTS_GMII
  19.214 -		#undef EFX_NEED_MII_CONSTANTS
  19.215 -		#undef EFX_NEED_MII_ETHTOOL_FIX
  19.216 -		/* linux-2.6.9-gfp_t-typedef.patch */
  19.217 -		#undef EFX_NEED_GFP_T
  19.218 -		/* linux-2.6.9-slab-update.patch */
  19.219 -		#undef EFX_NEED_KZALLOC
  19.220 -	#endif
  19.221 -	#if EFX_DIST_KVER_LEVEL_1 >= 55
  19.222 -		/* linux-2.6.18-sata-update.patch (ported from 2.6.18->2.6.9) */
  19.223 -		#undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
  19.224 -		#undef EFX_NEED_IRQ_HANDLER_T
  19.225 -	#endif
  19.226 -#endif
  19.227 -
  19.228 -/* RHEL5 */
  19.229 -#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
  19.230 -	#if EFX_DIST_KVER_LEVEL_1 >= 53
  19.231 -		/* linux-2.6.18-sata-update.patch */
  19.232 -		#undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
  19.233 -		#undef EFX_NEED_IRQ_HANDLER_T
  19.234 -	#endif
  19.235 -#endif
  19.236 -
  19.237 -#if defined(EFX_DIST_RHEL)
  19.238 -	#if (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,9)) && \
  19.239 -	     (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,18))
  19.240 -		#error "Unknown Red Hat Enterprise kernel version"
  19.241 -	#endif
  19.242 -#endif
  19.243 -
  19.244 -/* SLES9 */
  19.245 -#if defined(EFX_DIST_SUSE) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
  19.246 -	EFX_DIST_KVER_LEVEL_1 == 7
  19.247 -	#if EFX_DIST_KVER_LEVEL_2 >= 139
  19.248 -		#undef EFX_NEED_MMIOWB
  19.249 -	#endif
  19.250 -	#if EFX_DIST_KVER_LEVEL_2 >= 191
  19.251 -		#undef EFX_NEED_MSLEEP
  19.252 -		#undef EFX_NEED_MSECS_TO_JIFFIES
  19.253 -	#endif
  19.254 -	#if EFX_DIST_KVER_LEVEL_2 >= 244
  19.255 -		#undef EFX_NEED_BYTEORDER_TYPES
  19.256 -	#endif
  19.257 -	#if EFX_DIST_KVER_LEVEL_2 >= 252
  19.258 -		#undef EFX_NEED_KZALLOC
  19.259 -	#endif
  19.260 -#endif
  19.261 -
  19.262 -/**************************************************************************
  19.263 - *
  19.264 - * Definitions of missing constants, types, functions and macros
  19.265 - *
  19.266 - **************************************************************************
  19.267 - *
  19.268 - */
  19.269 -
  19.270 -#ifndef DMA_40BIT_MASK
  19.271 -	#define DMA_40BIT_MASK	0x000000ffffffffffULL
  19.272 -#endif
  19.273 -
  19.274 -#ifndef spin_trylock_irqsave
  19.275 -	#define spin_trylock_irqsave(lock, flags)	\
  19.276 -	({						\
  19.277 -		local_irq_save(flags);			\
  19.278 -		spin_trylock(lock) ?			\
  19.279 -		1 : ({local_irq_restore(flags); 0;});	\
  19.280 -	})
  19.281 -#endif
  19.282 -
  19.283 -#ifndef raw_smp_processor_id
  19.284 -	#define raw_smp_processor_id() (current_thread_info()->cpu)
  19.285 -#endif
  19.286 -
  19.287 -#ifndef NETIF_F_LRO
  19.288 -	#define NETIF_F_LRO 0
  19.289 -#endif
  19.290 -
  19.291 -/* Cope with small changes in PCI constants between minor kernel revisions */
  19.292 -#if PCI_X_STATUS != 4
  19.293 -	#undef PCI_X_STATUS
  19.294 -	#define PCI_X_STATUS 4
  19.295 -	#undef PCI_X_STATUS_MAX_SPLIT
  19.296 -	#define PCI_X_STATUS_MAX_SPLIT 0x03800000
  19.297 -#endif
  19.298 -
  19.299 -#ifndef PCI_EXP_LNKSTA
  19.300 -	#define PCI_EXP_LNKSTA		18	    /* Link Status */
  19.301 -#endif
  19.302 +typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
  19.303  
  19.304 -/* Used for struct pt_regs */
  19.305 -#ifndef regs_return_value
  19.306 -	#if defined(__x86_64__)
  19.307 -		#define regs_return_value(regs) ((regs)->rax)
  19.308 -	#elif defined(__i386__)
  19.309 -		#define regs_return_value(regs) ((regs)->eax)
  19.310 -	#elif defined(__ia64__)
  19.311 -		#define regs_return_value(regs) ((regs)->r8)
  19.312 -	#else
  19.313 -		#error "Need definition for regs_return_value()"
  19.314 -	#endif
  19.315 -#endif
  19.316 -
  19.317 -#ifndef __GFP_COMP
  19.318 -	#define __GFP_COMP 0
  19.319 -#endif
  19.320 -
  19.321 -#ifndef __iomem
  19.322 -	#define __iomem
  19.323 -#endif
  19.324 -
  19.325 -#ifndef NET_IP_ALIGN
  19.326 -	#define NET_IP_ALIGN 2
  19.327 -#endif
  19.328 -
  19.329 -#ifndef PCI_CAP_ID_EXP
  19.330 -#define PCI_CAP_ID_EXP		0x10    /* PCI Express */
  19.331 -#endif
  19.332 -
  19.333 -#ifndef PCI_EXP_FLAGS
  19.334 -#define PCI_EXP_FLAGS           2           /* Capabilities register */
  19.335 -#define PCI_EXP_FLAGS_TYPE      0x00f0      /* Capability version */
  19.336 -#define PCI_EXP_TYPE_ENDPOINT   0x0         /* Express Endpoint */
  19.337 -#define PCI_EXP_TYPE_LEG_END    0x1         /* Legacy Endpoint */
  19.338 -#define PCI_EXP_TYPE_ROOT_PORT  0x4         /* Root Port */
  19.339 -#endif
  19.340 -
  19.341 -#ifndef PCI_EXP_DEVCAP
  19.342 -#define PCI_EXP_DEVCAP          4           /* Device capabilities */
  19.343 -#define PCI_EXP_DEVCAP_PAYLOAD  0x07        /* Max_Payload_Size */
  19.344 -#define PCI_EXP_DEVCAP_PWR_VAL  0x3fc0000   /* Slot Power Limit Value */
  19.345 -#define PCI_EXP_DEVCAP_PWR_SCL  0xc000000   /* Slot Power Limit Scale */
  19.346 -#endif
  19.347 -
  19.348 -#ifndef PCI_EXP_DEVCTL
  19.349 -#define PCI_EXP_DEVCTL          8           /* Device Control */
  19.350 -#define PCI_EXP_DEVCTL_PAYLOAD  0x00e0      /* Max_Payload_Size */
  19.351 -#define PCI_EXP_DEVCTL_READRQ   0x7000      /* Max_Read_Request_Size */
  19.352 -#endif
  19.353 -
  19.354 -#ifndef PCI_EXP_LNKSTA
  19.355 -#define PCI_EXP_LNKSTA		18	    /* Link Status */
  19.356 -#endif
  19.357 +#define skb_mac_header(skb)	(skb)->mac.raw
  19.358 +#define skb_network_header(skb) (skb)->nh.raw
  19.359 +#define eth_hdr(skb)		((struct ethhdr *)skb_mac_header(skb))
  19.360 +#define tcp_hdr(skb)		(skb)->h.th
  19.361 +#define ip_hdr(skb)		(skb)->nh.iph
  19.362 +#define skb_tail_pointer(skb)   (skb)->tail
  19.363  
  19.364 -#ifndef NETDEV_TX_OK
  19.365 -	#define NETDEV_TX_OK 0
  19.366 -#endif
  19.367 -
  19.368 -#ifndef NETDEV_TX_BUSY
  19.369 -	#define NETDEV_TX_BUSY 1
  19.370 -#endif
  19.371 -
  19.372 -#ifndef __force
  19.373 -	#define __force
  19.374 -#endif
  19.375 -
  19.376 -#if ! defined(for_each_cpu_mask) && ! defined(CONFIG_SMP)
  19.377 -	#define for_each_cpu_mask(cpu, mask)            \
  19.378 -		for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
  19.379 -#endif
  19.380 -
  19.381 -/**************************************************************************/
  19.382 -
  19.383 -#ifdef EFX_NEED_IRQ_HANDLER_T
  19.384 -	typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
  19.385 -#endif
  19.386 -
  19.387 -#ifdef EFX_NEED_I2C_CLASS_HWMON
  19.388 -	#define I2C_CLASS_HWMON (1<<0)
  19.389 -#endif
  19.390 -
  19.391 -#ifdef EFX_NEED_MII_CONSTANTS
  19.392 -	#define MII_CTRL1000		0x09
  19.393 -	#define MII_STAT1000		0x0a
  19.394 -	#define BMCR_SPEED1000		0x0040
  19.395 -	#define ADVERTISE_PAUSE_ASYM	0x0800
  19.396 -	#define ADVERTISE_PAUSE_CAP	0x0400
  19.397 -	#define LPA_PAUSE_ASYM		0x0800
  19.398 -	#define LPA_PAUSE_CAP		0x0400
  19.399 -	#define ADVERTISE_1000FULL	0x0200
  19.400 -	#define ADVERTISE_1000HALF	0x0100
  19.401 -	#define LPA_1000FULL		0x0800
  19.402 -	#define LPA_1000HALF		0x0400
  19.403 -#endif
  19.404 -
  19.405 -#ifdef EFX_NEED_DUMMY_SUPPORTS_GMII
  19.406 -	#include <linux/mii.h>
  19.407 -	/* Ugly; redirect nonexistent new field to an old unused field */
  19.408 -	#undef supports_gmii
  19.409 -	#define supports_gmii full_duplex
  19.410 -#endif
  19.411 -
  19.412 -#ifdef EFX_NEED_SKB_HEADER_MACROS
  19.413 -	#define skb_mac_header(skb)	(skb)->mac.raw
  19.414 -	#define skb_network_header(skb) (skb)->nh.raw
  19.415 -	#define tcp_hdr(skb)		(skb)->h.th
  19.416 -	#define ip_hdr(skb)		(skb)->nh.iph
  19.417 -	#define skb_tail_pointer(skb)   (skb)->tail
  19.418 -#endif
  19.419  
  19.420  #ifdef EFX_NEED_RAW_READ_AND_WRITE_FIX
  19.421  	#include <asm/io.h>
  19.422 @@ -499,427 +125,60 @@
  19.423  	#define __raw_readq efx_raw_readq
  19.424  #endif
  19.425  
  19.426 -#ifdef EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE
  19.427 -	static inline signed long
  19.428 -	schedule_timeout_interruptible(signed long timeout)
  19.429 -	{
  19.430 -		set_current_state(TASK_INTERRUPTIBLE);
  19.431 -		return schedule_timeout(timeout);
  19.432 -	}
  19.433 -#endif
  19.434 -
  19.435 -#ifdef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
  19.436 -	static inline signed long
  19.437 -	schedule_timeout_uninterruptible(signed long timeout)
  19.438 -	{
  19.439 -		set_current_state(TASK_UNINTERRUPTIBLE);
  19.440 -		return schedule_timeout(timeout);
  19.441 -	}
  19.442 -#endif
  19.443 -
  19.444 -#ifdef EFX_NEED_MMIOWB
  19.445 -	#if defined(__i386__) || defined(__x86_64__)
  19.446 -		#define mmiowb()
  19.447 -	#elif defined(__ia64__)
  19.448 -		#ifndef ia64_mfa
  19.449 -			#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
  19.450 -		#endif
  19.451 -		#define mmiowb ia64_mfa
  19.452 -	#else
  19.453 -		#error "Need definition for mmiowb()"
  19.454 -	#endif
  19.455 -#endif
  19.456 -
  19.457 -#ifdef EFX_NEED_KZALLOC
  19.458 -	static inline void *kzalloc(size_t size, int flags)
  19.459 -	{
  19.460 -		void *buf = kmalloc(size, flags);
  19.461 -		if (buf)
  19.462 -			memset(buf, 0,size);
  19.463 -		return buf;
  19.464 -	}
  19.465 -#endif
  19.466 -
  19.467 -#ifdef EFX_NEED_SETUP_TIMER
  19.468 -	static inline void setup_timer(struct timer_list * timer,
  19.469 -				       void (*function)(unsigned long),
  19.470 -				       unsigned long data)
  19.471 -	{
  19.472 -		timer->function = function;
  19.473 -		timer->data = data;
  19.474 -		init_timer(timer);
  19.475 -	}
  19.476 -#endif
  19.477 -
  19.478 -#ifdef EFX_NEED_MUTEX
  19.479 -	#define EFX_DEFINE_MUTEX(x) DECLARE_MUTEX(x)
  19.480 -	#undef DEFINE_MUTEX
  19.481 -	#define DEFINE_MUTEX EFX_DEFINE_MUTEX
  19.482 -
  19.483 -	#define efx_mutex semaphore
  19.484 -	#undef mutex
  19.485 -	#define mutex efx_mutex
  19.486 -
  19.487 -	#define efx_mutex_init(x) init_MUTEX(x)
  19.488 -	#undef mutex_init
  19.489 -	#define mutex_init efx_mutex_init
  19.490 -
  19.491 -	#define efx_mutex_destroy(x) do { } while(0)
  19.492 -	#undef mutex_destroy
  19.493 -	#define mutex_destroy efx_mutex_destroy
  19.494 -
  19.495 -	#define efx_mutex_lock(x) down(x)
  19.496 -	#undef mutex_lock
  19.497 -	#define mutex_lock efx_mutex_lock
  19.498 -
  19.499 -	#define efx_mutex_lock_interruptible(x) down_interruptible(x)
  19.500 -	#undef mutex_lock_interruptible
  19.501 -	#define mutex_lock_interruptible efx_mutex_lock_interruptible
  19.502 -
  19.503 -	#define efx_mutex_unlock(x) up(x)
  19.504 -	#undef mutex_unlock
  19.505 -	#define mutex_unlock efx_mutex_unlock
  19.506 -
  19.507 -	#define efx_mutex_trylock(x) (!down_trylock(x))
  19.508 -	#undef mutex_trylock
  19.509 -	#define mutex_trylock efx_mutex_trylock
  19.510 -
  19.511 -	static inline int efx_mutex_is_locked(struct efx_mutex *m)
  19.512 -	{
  19.513 -		/* NB. This is quite inefficient, but it's the best we
  19.514 -		 * can do with the semaphore API. */
  19.515 -		if ( down_trylock(m) )
  19.516 -			return 1;
  19.517 -		/* Undo the effect of down_trylock. */
  19.518 -		up(m);
  19.519 -		return 0;
  19.520 -	}
  19.521 -	#undef mutex_is_locked
  19.522 -	#define mutex_is_locked efx_mutex_is_locked
  19.523 -#endif
  19.524 -
  19.525 -#ifndef NETIF_F_GSO
  19.526 -	#define efx_gso_size tso_size
  19.527 -	#undef gso_size
  19.528 -	#define gso_size efx_gso_size
  19.529 -	#define efx_gso_segs tso_segs
  19.530 -	#undef gso_segs
  19.531 -	#define gso_segs efx_gso_segs
  19.532 -#endif
  19.533 -
  19.534 -#ifdef EFX_NEED_IRQF_FLAGS
  19.535 -	#ifdef SA_PROBEIRQ
  19.536 -		#define IRQF_PROBE_SHARED  SA_PROBEIRQ
  19.537 -	#else
  19.538 -		#define IRQF_PROBE_SHARED  0
  19.539 -	#endif
  19.540 -	#define IRQF_SHARED	   SA_SHIRQ
  19.541 -#endif
  19.542 -
  19.543 -#ifdef EFX_NEED_NETDEV_ALLOC_SKB
  19.544 -	#ifndef NET_SKB_PAD
  19.545 -		#define NET_SKB_PAD 16
  19.546 -	#endif
  19.547 -
  19.548 -	static inline
  19.549 -	struct sk_buff *netdev_alloc_skb(struct net_device *dev,
  19.550 -					 unsigned int length)
  19.551 -	{
  19.552 -		struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD,
  19.553 -						GFP_ATOMIC | __GFP_COLD);
  19.554 -		if (likely(skb)) {
  19.555 -			skb_reserve(skb, NET_SKB_PAD);
  19.556 -			skb->dev = dev;
  19.557 -		}
  19.558 -		return skb;
  19.559 -	}
  19.560 -#endif
  19.561 -
  19.562 -#ifdef EFX_NEED_NETIF_TX_LOCK
  19.563 -	static inline void netif_tx_lock(struct net_device *dev)
  19.564 -	{
  19.565 -		spin_lock(&dev->xmit_lock);
  19.566 -		dev->xmit_lock_owner = smp_processor_id();
  19.567 -	}
  19.568 -	static inline void netif_tx_lock_bh(struct net_device *dev)
  19.569 -	{
  19.570 -		spin_lock_bh(&dev->xmit_lock);
  19.571 -		dev->xmit_lock_owner = smp_processor_id();
  19.572 -	}
  19.573 -	static inline void netif_tx_unlock_bh(struct net_device *dev)
  19.574 -	{
  19.575 -		dev->xmit_lock_owner = -1;
  19.576 -		spin_unlock_bh(&dev->xmit_lock);
  19.577 -	}
  19.578 -	static inline void netif_tx_unlock(struct net_device *dev)
  19.579 -	{
  19.580 -		dev->xmit_lock_owner = -1;
  19.581 -		spin_unlock(&dev->xmit_lock);
  19.582 -	}
  19.583 -#endif
  19.584 -
  19.585 -#ifdef EFX_NEED_CSUM_UNFOLDED
  19.586 -	typedef u32 __wsum;
  19.587 -	#define csum_unfold(x) ((__force __wsum) x)
  19.588 -#endif
  19.589 -
  19.590 -#ifdef EFX_NEED_HEX_DUMP
  19.591 -	enum {
  19.592 -		DUMP_PREFIX_NONE,
  19.593 -		DUMP_PREFIX_ADDRESS,
  19.594 -		DUMP_PREFIX_OFFSET
  19.595 -	};
  19.596 -#endif
  19.597 -
  19.598 -#ifdef EFX_NEED_PRINT_MAC
  19.599 -	#define DECLARE_MAC_BUF(var) char var[18] __attribute__((unused))
  19.600 -#endif
  19.601 -
  19.602 -#ifdef EFX_NEED_GFP_T
  19.603 -	typedef unsigned int gfp_t;
  19.604 -#endif
  19.605 -
  19.606 -#ifdef EFX_NEED_SAFE_LISTS
  19.607 -	#define list_for_each_entry_safe_reverse(pos, n, head, member)	     \
  19.608 -		for (pos = list_entry((head)->prev, typeof(*pos), member),   \
  19.609 -		     n = list_entry(pos->member.prev, typeof(*pos), member); \
  19.610 -		     &pos->member != (head);				     \
  19.611 -		     pos = n,						     \
  19.612 -		     n = list_entry(n->member.prev, typeof(*n), member))
  19.613 -#endif
  19.614 -
  19.615 -#ifdef EFX_NEED_DEV_NOTICE
  19.616 -	#define dev_notice dev_warn
  19.617 -#endif
  19.618 -
  19.619 -#ifdef EFX_NEED_IF_MII
  19.620 -	#include <linux/mii.h>
  19.621 -	static inline struct mii_ioctl_data *efx_if_mii ( struct ifreq *rq ) {
  19.622 -		return ( struct mii_ioctl_data * ) &rq->ifr_ifru;
  19.623 -	}
  19.624 -	#undef if_mii
  19.625 -	#define if_mii efx_if_mii
  19.626 -#endif
  19.627 +typedef u32 __wsum;
  19.628 +#define csum_unfold(x) ((__force __wsum) x)
  19.629  
  19.630 -#ifdef EFX_NEED_MTD_ERASE_CALLBACK
  19.631 -	#include <linux/mtd/mtd.h>
  19.632 -	static inline void efx_mtd_erase_callback(struct erase_info *instr) {
  19.633 -		if ( instr->callback )
  19.634 -			instr->callback ( instr );
  19.635 -	}
  19.636 -	#undef mtd_erase_callback
  19.637 -	#define mtd_erase_callback efx_mtd_erase_callback
  19.638 -#endif
  19.639 -
  19.640 -#ifdef EFX_NEED_DUMMY_PCI_DISABLE_MSI
  19.641 -	#include <linux/pci.h>
  19.642 -	static inline void dummy_pci_disable_msi ( struct pci_dev *dev ) {
  19.643 -		/* Do nothing */
  19.644 -	}
  19.645 -	#undef pci_disable_msi
  19.646 -	#define pci_disable_msi dummy_pci_disable_msi
  19.647 -#endif
  19.648 -
  19.649 -#ifdef EFX_NEED_DUMMY_MSIX
  19.650 -	struct msix_entry {
  19.651 -		u16 	vector;	/* kernel uses to write allocated vector */
  19.652 -		u16	entry;	/* driver uses to specify entry, OS writes */
  19.653 -	};
  19.654 -	static inline int pci_enable_msix(struct pci_dev* dev,
  19.655 -					  struct msix_entry *entries, int nvec)
  19.656 -		{return -1;}
  19.657 -	static inline void pci_disable_msix(struct pci_dev *dev) { /* Do nothing */}
  19.658 -#endif
  19.659 -
  19.660 -#ifdef EFX_NEED_BYTEORDER_TYPES
  19.661 -	typedef __u16 __be16;
  19.662 -	typedef __u32 __be32;
  19.663 -	typedef __u64 __be64;
  19.664 -	typedef __u16 __le16;
  19.665 -	typedef __u32 __le32;
  19.666 -	typedef __u64 __le64;
  19.667 -#endif
  19.668 -
  19.669 -/**************************************************************************
  19.670 - *
  19.671 - * Missing functions provided by kernel_compat.c
  19.672 - *
  19.673 - **************************************************************************
  19.674 - *
  19.675 - */
  19.676 -#ifdef EFX_NEED_RANDOM_ETHER_ADDR
  19.677 -	extern void efx_random_ether_addr(uint8_t *addr);
  19.678 -	#ifndef EFX_IN_KCOMPAT_C
  19.679 -		#undef random_ether_addr
  19.680 -		#define random_ether_addr efx_random_ether_addr
  19.681 -	#endif
  19.682 -#endif
  19.683 -
  19.684 -#ifdef EFX_NEED_MII_ETHTOOL_FIX
  19.685 -	extern int efx_mii_ethtool_gset(struct mii_if_info *mii,
  19.686 -					struct ethtool_cmd *ecmd);
  19.687 -	extern int efx_mii_ethtool_sset(struct mii_if_info *mii,
  19.688 -					struct ethtool_cmd *ecmd);
  19.689 -	#ifndef EFX_IN_KCOMPAT_C
  19.690 -		#undef mii_ethtool_gset
  19.691 -		#define mii_ethtool_gset efx_mii_ethtool_gset
  19.692 -		#undef mii_ethtool_sset
  19.693 -		#define mii_ethtool_sset efx_mii_ethtool_sset
  19.694 -	#endif
  19.695 -#endif
  19.696 -
  19.697 -#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
  19.698 -	extern int efx_unregister_netdevice_notifier(struct notifier_block *nb);
  19.699 -	#ifndef EFX_IN_KCOMPAT_C
  19.700 -		#undef unregister_netdevice_notifier
  19.701 -		#define unregister_netdevice_notifier \
  19.702 -				efx_unregister_netdevice_notifier
  19.703 -	#endif
  19.704 -#endif
  19.705 -
  19.706 -#ifdef EFX_NEED_IOMMU_LOCK
  19.707 -	extern dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr,
  19.708 -					     size_t size, int direction);
  19.709 -	extern void efx_pci_unmap_single(struct pci_dev *pci,
  19.710 -					 dma_addr_t dma_addr, size_t size,
  19.711 -					 int direction);
  19.712 -	extern void * efx_pci_alloc_consistent(struct pci_dev *pci,
  19.713 -					       size_t size,
  19.714 -					       dma_addr_t *dma_addr);
  19.715 -	extern void efx_pci_free_consistent(struct pci_dev *pci,
  19.716 -					    size_t size, void *ptr,
  19.717 -					    dma_addr_t dma_addr);
  19.718 -	#ifndef EFX_IN_KCOMPAT_C
  19.719 -		#undef pci_map_single
  19.720 -		#undef pci_unmap_single
  19.721 -		#undef pci_alloc_consistent
  19.722 -		#undef pci_free_consistent
  19.723 -		#define pci_map_single efx_pci_map_single
  19.724 -		#define pci_unmap_single efx_pci_unmap_single
  19.725 -		#define pci_alloc_consistent efx_pci_alloc_consistent
  19.726 -		#define pci_free_consistent efx_pci_free_consistent
  19.727 -	#endif
  19.728 -#endif
  19.729 -
  19.730 -#ifdef EFX_NEED_PRINT_MAC
  19.731 -	extern char *print_mac(char *buf, const u8 *addr);
  19.732 -#endif
  19.733 -
  19.734 -#ifdef EFX_NEED_COMPOUND_PAGE_FIX
  19.735 -	extern void efx_compound_page_destructor(struct page *page);
  19.736 -#endif
  19.737 +#define DECLARE_MAC_BUF(var) char var[18] __attribute__((unused))
  19.738 +extern char *print_mac(char *buf, const u8 *addr);
  19.739  
  19.740 -#ifdef EFX_NEED_HEX_DUMP
  19.741 -	extern void
  19.742 -	print_hex_dump(const char *level, const char *prefix_str,
  19.743 -		       int prefix_type, int rowsize, int groupsize,
  19.744 -		       const void *buf, size_t len, int ascii);
  19.745 -#endif
  19.746 -
  19.747 -#ifdef EFX_NEED_MSECS_TO_JIFFIES
  19.748 -	extern unsigned long msecs_to_jiffies(const unsigned int m);
  19.749 -#endif
  19.750 -
  19.751 -#ifdef EFX_NEED_MSLEEP
  19.752 -	extern void msleep(unsigned int msecs);
  19.753 -#endif
  19.754 -
  19.755 -/**************************************************************************
  19.756 - *
  19.757 - * Wrappers to fix bugs and parameter changes
  19.758 - *
  19.759 - **************************************************************************
  19.760 - *
  19.761 +/**
  19.762 + * queue_delayed_work in pre 2.6.20 can't rearm from inside
  19.763 + * the work member. So instead do a rather hacky sleep
  19.764   */
  19.765 -
  19.766 -#ifdef EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS
  19.767 -	#define pci_save_state(_dev)					\
  19.768 -		pci_save_state(_dev, (_dev)->saved_config_space)
  19.769 -
  19.770 -	#define pci_restore_state(_dev)					\
  19.771 -		pci_restore_state(_dev, (_dev)->saved_config_space)
  19.772 -#endif
  19.773 -
  19.774 -#ifdef EFX_NEED_WORK_API_WRAPPERS
  19.775 -	/**
  19.776 -	 * queue_delayed_work in pre 2.6.20 can't rearm from inside
  19.777 -	 * the work member. So instead do a rather hacky sleep
  19.778 -	 */
  19.779 -	#define delayed_work work_struct
  19.780 -	#define INIT_DELAYED_WORK INIT_WORK
  19.781 -
  19.782 -	static int inline efx_queue_delayed_work(struct workqueue_struct *wq,
  19.783 -						 struct work_struct *work,
  19.784 -						 unsigned long delay)
  19.785 -	{
  19.786 -		if (unlikely(delay > 0))
  19.787 -			schedule_timeout_uninterruptible(delay);
  19.788 -		return queue_work(wq, work);
  19.789 -	}
  19.790 -	#define queue_delayed_work efx_queue_delayed_work
  19.791 -
  19.792 -	/**
  19.793 -	 * The old and new work-function prototypes just differ
  19.794 -	 * in the type of the pointer returned, so it's safe
  19.795 -	 * to cast between the prototypes.
  19.796 -	 */
  19.797 -	typedef void (*efx_old_work_func_t)(void *p);
  19.798 +#define delayed_work work_struct
  19.799 +#define INIT_DELAYED_WORK INIT_WORK
  19.800  
  19.801 -	#undef INIT_WORK
  19.802 -	#define INIT_WORK(_work, _func)					\
  19.803 -		do {							\
  19.804 -			INIT_LIST_HEAD(&(_work)->entry);		\
  19.805 -			(_work)->pending = 0;				\
  19.806 -			PREPARE_WORK((_work),				\
  19.807 -				     (efx_old_work_func_t) (_func),	\
  19.808 -				     (_work));				\
  19.809 -		} while (0)
  19.810 -#endif
  19.811 -
  19.812 -#ifdef EFX_HAVE_OLD_NAPI
  19.813 -	#define napi_str napi_dev[0]
  19.814 -
  19.815 -	static inline void netif_napi_add(struct net_device *dev,
  19.816 -					  struct net_device *dummy,
  19.817 -					  int (*poll) (struct net_device *,
  19.818 -						       int *),
  19.819 -					  int weight)
  19.820 -	{
  19.821 -		dev->weight = weight;
  19.822 -		dev->poll = poll;
  19.823 -	}
  19.824 -
  19.825 -	#define napi_enable netif_poll_enable
  19.826 -	#define napi_disable netif_poll_disable
  19.827 +static int inline efx_queue_delayed_work(struct workqueue_struct *wq,
  19.828 +					 struct work_struct *work,
  19.829 +					 unsigned long delay)
  19.830 +{
  19.831 +	if (unlikely(delay > 0))
  19.832 +		schedule_timeout_uninterruptible(delay);
  19.833 +	return queue_work(wq, work);
  19.834 +}
  19.835 +#define queue_delayed_work efx_queue_delayed_work
  19.836  
  19.837 -	#define netif_rx_complete(dev, dummy) netif_rx_complete(dev)
  19.838 -#endif
  19.839 +/**
  19.840 + * The old and new work-function prototypes just differ
  19.841 + * in the type of the pointer returned, so it's safe
  19.842 + * to cast between the prototypes.
  19.843 + */
  19.844 +typedef void (*efx_old_work_func_t)(void *p);
  19.845  
  19.846 -#ifdef EFX_NEED_COMPOUND_PAGE_FIX
  19.847 -	static inline
  19.848 -	struct page *efx_alloc_pages(gfp_t flags, unsigned int order)
  19.849 -	{
  19.850 -		struct page *p = alloc_pages(flags, order);
  19.851 -		if ((flags & __GFP_COMP) && (p != NULL) && (order > 0))
  19.852 -			p[1].mapping = (void *)efx_compound_page_destructor;
  19.853 -		return p;
  19.854 -	}
  19.855 -	#undef alloc_pages
  19.856 -	#define alloc_pages efx_alloc_pages
  19.857 +#undef INIT_WORK
  19.858 +#define INIT_WORK(_work, _func)					\
  19.859 +	do {							\
  19.860 +		INIT_LIST_HEAD(&(_work)->entry);		\
  19.861 +		(_work)->pending = 0;				\
  19.862 +		PREPARE_WORK((_work),				\
  19.863 +			     (efx_old_work_func_t) (_func),	\
  19.864 +			     (_work));				\
  19.865 +	} while (0)
  19.866  
  19.867 -	static inline
  19.868 -	void efx_free_pages(struct page *p, unsigned int order)
  19.869 -	{
  19.870 -		if ((order > 0) && (page_count(p) == 1))
  19.871 -			p[1].mapping = NULL;
  19.872 -		__free_pages(p, order);
  19.873 -	}
  19.874 -	#define __free_pages efx_free_pages
  19.875 -#endif
  19.876 +#define napi_str napi_dev[0]
  19.877  
  19.878 -#ifdef EFX_NEED_HEX_DUMP_CONST_FIX
  19.879 -	#define print_hex_dump(v,s,t,r,g,b,l,a) \
  19.880 -		print_hex_dump((v),(s),(t),(r),(g),(void*)(b),(l),(a))
  19.881 -#endif
  19.882 +static inline void netif_napi_add(struct net_device *dev,
  19.883 +				  struct net_device *dummy,
  19.884 +				  int (*poll) (struct net_device *, int *),
  19.885 +				  int weight)
  19.886 +{
  19.887 +	dev->weight = weight;
  19.888 +	dev->poll = poll;
  19.889 +}
  19.890 +
  19.891 +#define napi_enable netif_poll_enable
  19.892 +#define napi_disable netif_poll_disable
  19.893 +
  19.894 +#define netif_rx_complete(dev, dummy) netif_rx_complete(dev)
  19.895  
  19.896  #endif /* EFX_KERNEL_COMPAT_H */
    20.1 --- a/drivers/net/sfc/mdio_10g.c	Tue Mar 31 11:49:12 2009 +0100
    20.2 +++ b/drivers/net/sfc/mdio_10g.c	Tue Mar 31 11:59:10 2009 +0100
    20.3 @@ -106,8 +106,8 @@ static int mdio_clause45_check_mmd(struc
    20.4  }
    20.5  
    20.6  /* This ought to be ridiculous overkill. We expect it to fail rarely */
    20.7 -#define MDIO45_RESET_TIME	HZ
    20.8 -#define MDIO45_RESET_ITERS	(100)
    20.9 +#define MDIO45_RESET_TIME	1000 /* ms */
   20.10 +#define MDIO45_RESET_ITERS	100
   20.11  
   20.12  int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
   20.13  				  unsigned int mmd_mask)
    21.1 --- a/drivers/net/sfc/mtd.c	Tue Mar 31 11:49:12 2009 +0100
    21.2 +++ b/drivers/net/sfc/mtd.c	Tue Mar 31 11:59:10 2009 +0100
    21.3 @@ -300,9 +300,7 @@ out:
    21.4  		erase->state = MTD_ERASE_DONE;
    21.5  	} else {
    21.6  		erase->state = MTD_ERASE_FAILED;
    21.7 -#if defined(EFX_USE_MTD_ERASE_FAIL_ADDR)
    21.8  		erase->fail_addr = 0xffffffff;
    21.9 -#endif
   21.10  	}
   21.11  	mtd_erase_callback(erase);
   21.12  	return rc;
   21.13 @@ -437,9 +435,7 @@ static __devinit int efx_mtd_register(st
   21.14  
   21.15  	efx_mtd->mtd.size = spi->size;
   21.16  	efx_mtd->mtd.erasesize = spi->erase_size;
   21.17 -#if defined(EFX_USE_MTD_WRITESIZE)
   21.18  	efx_mtd->mtd.writesize = 1;
   21.19 -#endif
   21.20  	if (snprintf(efx_mtd->name, sizeof(efx_mtd->name),
   21.21  		     "%s %s", efx->name, type_name) >=
   21.22  	    sizeof(efx_mtd->name))
    22.1 --- a/drivers/net/sfc/net_driver.h	Tue Mar 31 11:49:12 2009 +0100
    22.2 +++ b/drivers/net/sfc/net_driver.h	Tue Mar 31 11:59:10 2009 +0100
    22.3 @@ -50,13 +50,8 @@
    22.4  #include "driverlink.h"
    22.5  #include "i2c-direct.h"
    22.6  
    22.7 -	#ifndef EFX_USE_DEBUGFS
    22.8 -		/* Sick, but we have no other use for dentry */
    22.9 -		#define dentry proc_dir_entry
   22.10 -	#endif
   22.11 -
   22.12 -#define EFX_MAX_LRO_DESCRIPTORS 8
   22.13 -#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
   22.14 +/* Sick, but we have no other use for dentry */
   22.15 +#define dentry proc_dir_entry
   22.16  
   22.17  /**************************************************************************
   22.18   *
   22.19 @@ -66,7 +61,7 @@
   22.20  #ifndef EFX_DRIVER_NAME
   22.21  #define EFX_DRIVER_NAME	"sfc"
   22.22  #endif
   22.23 -#define EFX_DRIVER_VERSION	"2.2.0101"
   22.24 +#define EFX_DRIVER_VERSION	"2.2.0204"
   22.25  
   22.26  #ifdef EFX_ENABLE_DEBUG
   22.27  #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
   22.28 @@ -76,11 +71,15 @@
   22.29  #define EFX_WARN_ON_PARANOID(x) do {} while (0)
   22.30  #endif
   22.31  
   22.32 +#define NET_DEV_REGISTERED(efx)					\
   22.33 +	((efx)->net_dev &&					\
   22.34 +	 ((efx)->net_dev->reg_state == NETREG_REGISTERED))
   22.35 +
   22.36  /* Include net device name in log messages if it has been registered.
   22.37   * Use efx->name not efx->net_dev->name so that races with (un)registration
   22.38   * are harmless.
   22.39   */
   22.40 -#define NET_DEV_NAME(efx) ((efx)->net_dev_registered ? (efx)->name : "")
   22.41 +#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
   22.42  
   22.43  /* Un-rate-limited logging */
   22.44  #define EFX_ERR(efx, fmt, args...) \
   22.45 @@ -336,6 +335,83 @@ struct efx_buffer {
   22.46  };
   22.47  
   22.48  
   22.49 +
   22.50 +/**
   22.51 + * struct efx_ssr_conn - Connection state for Soft Segment Reassembly (SSR) aka LRO
   22.52 + * @link: Link for hash table and free list.
   22.53 + * @active_link: Link for active_conns list
   22.54 + * @saddr: Source IP address
   22.55 + * @daddr: Destination IP address
   22.56 + * @source: Source TCP port number
   22.57 + * @dest: Destination TCP port number
   22.58 + * @n_in_order_pkts: Number of in-order packets we've seen with payload.
   22.59 + * @next_seq: Next in-order sequence number.
   22.60 + * @last_pkt_jiffies: Time we last saw a packet on this connection.
   22.61 + * @skb: The SKB we are currently holding.
   22.62 + *	If %NULL, then all following fields are undefined.
   22.63 + * @skb_tail: The tail of the frag_list of SKBs we're holding.
   22.64 + *	Only valid after at least one merge.
   22.65 + * @eh: The ethernet header of the skb we are holding.
   22.66 + * @iph: The IP header of the skb we are holding.
   22.67 + * @th: The TCP header of the skb we are holding.
   22.68 + * @th_last: The TCP header of the last packet merged.
   22.69 + */
   22.70 +struct efx_ssr_conn {
   22.71 +	struct list_head link;
   22.72 +	struct list_head active_link;
   22.73 +	unsigned saddr, daddr;
   22.74 +	unsigned short source, dest;
   22.75 +	unsigned n_in_order_pkts;
   22.76 +	unsigned next_seq;
   22.77 +	unsigned long last_pkt_jiffies;
   22.78 +	struct sk_buff *skb;
   22.79 +	struct sk_buff *skb_tail;
   22.80 +	struct ethhdr *eh;
   22.81 +	struct iphdr *iph;
   22.82 +	struct tcphdr *th;
   22.83 +	struct tcphdr *th_last;
   22.84 +};
   22.85 +
   22.86 +/**
   22.87 + * struct efx_ssr_state - Port state for Soft Segment Reassembly (SSR) aka LRO
   22.88 + * @efx: The associated NIC.
   22.89 + * @conns_mask: Number of hash buckets - 1.
   22.90 + * @conns: Hash buckets for tracked connections.
   22.91 + * @conns_n: Length of linked list for each hash bucket.
   22.92 + * @active_conns: Connections that are holding a packet.
   22.93 + *	Connections are self-linked when not in this list.
   22.94 + * @free_conns: Free efx_ssr_conn instances.
   22.95 + * @last_purge_jiffies: The value of jiffies last time we purged idle
   22.96 + *	connections.
   22.97 + * @n_merges: Number of packets absorbed by SSR.
   22.98 + * @n_bursts: Number of bursts spotted by SSR.
   22.99 + * @n_slow_start: Number of packets not merged because connection may be in
  22.100 + *	slow-start.
  22.101 + * @n_misorder: Number of out-of-order packets seen in tracked streams.
  22.102 + * @n_too_many: Incremented when we're trying to track too many streams.
  22.103 + * @n_new_stream: Number of distinct streams we've tracked.
  22.104 + * @n_drop_idle: Number of streams discarded because they went idle.
  22.105 + * @n_drop_closed: Number of streams that have seen a FIN or RST.
  22.106 + */
  22.107 +struct efx_ssr_state {
  22.108 +	struct efx_nic *efx;
  22.109 +	unsigned conns_mask;
  22.110 +	struct list_head *conns;
  22.111 +	unsigned *conns_n;
  22.112 +	struct list_head active_conns;
  22.113 +	struct list_head free_conns;
  22.114 +	unsigned long last_purge_jiffies;
  22.115 +	unsigned n_merges;
  22.116 +	unsigned n_bursts;
  22.117 +	unsigned n_slow_start;
  22.118 +	unsigned n_misorder;
  22.119 +	unsigned n_too_many;
  22.120 +	unsigned n_new_stream;
  22.121 +	unsigned n_drop_idle;
  22.122 +	unsigned n_drop_closed;
  22.123 +};
  22.124 +
  22.125 +
  22.126  /* Flags for channel->used_flags */
  22.127  #define EFX_USED_BY_RX 1
  22.128  #define EFX_USED_BY_TX 2
  22.129 @@ -371,6 +447,7 @@ enum efx_rx_alloc_method {
  22.130   * @last_eventq_read_ptr: Last event queue read pointer value.
  22.131   * @eventq_magic: Event queue magic value for driver-generated test events
  22.132   * @debug_dir: debugfs directory
  22.133 + * @ssr: LRO/SSR state
  22.134   * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
  22.135   *	and diagnostic counters
  22.136   * @rx_alloc_push_pages: RX allocation method currently in use for pushing
  22.137 @@ -395,9 +472,6 @@ struct efx_channel {
  22.138  	unsigned int has_interrupt;
  22.139  	unsigned int irq_moderation;
  22.140  	struct net_device *napi_dev;
  22.141 -#if !defined(EFX_HAVE_OLD_NAPI)
  22.142 -	struct napi_struct napi_str;
  22.143 -#endif
  22.144  	struct work_struct reset_work;
  22.145  	int work_pending;
  22.146  	struct efx_special_buffer eventq;
  22.147 @@ -409,6 +483,7 @@ struct efx_channel {
  22.148  	struct dentry *debug_dir;
  22.149  #endif
  22.150  
  22.151 +	struct efx_ssr_state ssr;
  22.152  	int rx_alloc_level;
  22.153  	int rx_alloc_push_pages;
  22.154  	int rx_alloc_pop_pages;
  22.155 @@ -533,10 +608,10 @@ enum phy_type {
  22.156  #define EFX_ISCLAUSE45(efx) ((efx)->phy_type != PHY_TYPE_1G_ALASKA)
  22.157  
  22.158  enum nic_state {
  22.159 -	STATE_INIT = 0,      /* suspend_lock always held */
  22.160 +	STATE_INIT = 0,
  22.161  	STATE_RUNNING = 1,
  22.162  	STATE_FINI = 2,
  22.163 -	STATE_RESETTING = 3, /* suspend_lock always held */
  22.164 +	STATE_RESETTING = 3, /* rtnl_lock always held */
  22.165  	STATE_DISABLED = 4,
  22.166  	STATE_MAX,
  22.167  };
  22.168 @@ -548,10 +623,10 @@ enum nic_state {
  22.169   * This is the equivalent of NET_IP_ALIGN [which controls the alignment
  22.170   * of the skb->head for hardware DMA].
  22.171   */
  22.172 -#ifdef __ia64__
  22.173 -#define EFX_PAGE_IP_ALIGN 2
  22.174 +#if defined(__i386__) || defined(__x86_64__)
  22.175 +#define EFX_PAGE_IP_ALIGN 0
  22.176  #else
  22.177 -#define EFX_PAGE_IP_ALIGN 0
  22.178 +#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
  22.179  #endif
  22.180  
  22.181  /*
  22.182 @@ -578,14 +653,14 @@ enum efx_fc_type {
  22.183   * @mac_writel: Write dword to MAC register
  22.184   * @mac_readl: Read dword from a MAC register
  22.185   * @init: Initialise MAC and PHY
  22.186 - * @reconfigure: Reconfigure MAC and PHY (e.g. for new link parameters)
  22.187 + * @reconfigure: Reconfigure MAC and PHY. Serialised by the mac_lock
  22.188   * @update_stats: Update statistics
  22.189   * @fini: Shut down MAC and PHY
  22.190 - * @check_hw: Check hardware
  22.191 + * @check_hw: Check hardware. Serialised by the mac_lock
  22.192   * @fake_phy_event: Simulate a PHY event on a port
  22.193 - * @get_settings: Get ethtool settings
  22.194 - * @set_settings: Set ethtool settings
  22.195 - * @set_pause: Set pause parameters
  22.196 + * @get_settings: Get ethtool settings. Serialised by the mac_lock
  22.197 + * @set_settings: Set ethtool settings. Serialised by the mac_lock
  22.198 + * @set_pause: Set pause parameters. Serialised by the mac_lock
  22.199   */
  22.200  struct efx_mac_operations {
  22.201  	void (*mac_writel) (struct efx_nic *efx,
  22.202 @@ -711,8 +786,8 @@ struct efx_mac_stats {
  22.203  
  22.204  /* An Efx multicast filter hash */
  22.205  union efx_multicast_hash {
  22.206 -	u8 byte[EFX_MCAST_HASH_ENTRIES / sizeof(u8)];
  22.207 -	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t)];
  22.208 +	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
  22.209 +	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
  22.210  };
  22.211  
  22.212  /* Efx Error condition statistics */
  22.213 @@ -732,12 +807,9 @@ struct efx_nic_errors {
  22.214   * struct efx_nic - an Efx NIC
  22.215   * @name: Device name (net device name or bus id before net device registered)
  22.216   * @pci_dev: The PCI device
  22.217 - * @pci_dev2: The secondary PCI device if present
  22.218   * @type: Controller type attributes
  22.219 - * @dma_mask: DMA mask
  22.220   * @legacy_irq: IRQ number
  22.221   * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
  22.222 - * @refill_workqueue: RX refill workqueue
  22.223   * @reset_work: Scheduled reset workitem
  22.224   * @monitor_work: Hardware monitor workitem
  22.225   * @membase_phys: Memory BAR value as physical address
  22.226 @@ -746,15 +818,10 @@ struct efx_nic_errors {
  22.227   * @interrupt_mode: Interrupt mode
  22.228   * @is_asic: Is ASIC (else FPGA)
  22.229   * @is_10g: Is set to 10G (else 1G)
  22.230 - * @external_sram_cfg: Size and number of banks of external SRAM
  22.231   * @i2c: I2C interface
  22.232   * @board_info: Board-level information
  22.233 - * @state: Device state flag. Can only be manipulated when both
  22.234 - *	suspend_lock and rtnl_lock are held.  Can be read when
  22.235 - *	either is held.
  22.236 + * @state: Device state flag. Serialised by the rtnl_lock.
  22.237   * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
  22.238 - * @suspend_lock: Device suspend lock.  This must not be acquired with
  22.239 - *	rtnl_lock held.
  22.240   * @tx_queue: TX DMA queues
  22.241   * @rx_queue: RX DMA queues
  22.242   * @channel: Channels
  22.243 @@ -774,23 +841,21 @@ struct efx_nic_errors {
  22.244   * @spi_lock: SPI bus lock
  22.245   * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
  22.246   * @nic_data: Hardware dependant state
  22.247 - * @mac_lock: MAC access lock. Protects efx->port_enabled/net_dev_registered
  22.248 - *            and efx_reconfigure_port()
  22.249 + * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
  22.250 + *	efx_reconfigure_port()
  22.251   * @port_enabled: Port enabled indicator.
  22.252 - *	Serialises efx_stop_all and efx_start_all with kernel interfaces.
  22.253 - *	Safe to read under the rtnl_lock, mac_lock, or netif_tx_lock, but
  22.254 - *	all three must be held to modify it.
  22.255 - * @net_dev_registered: Port is registered with operating system.
  22.256 + *	Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
  22.257 + *	efx_reconfigure_work with kernel interfaces. Safe to read under any
  22.258 + *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
  22.259 + *	be held to modify it.
  22.260   * @port_initialized: Port initialized?
  22.261   * @net_dev: Operating system network device. Consider holding the rtnl lock
  22.262   * @rx_checksum_enabled: RX checksumming enabled
  22.263   * @netif_stop_count: Port stop count
  22.264   * @netif_stop_lock: Port stop lock
  22.265 - * @mac_stats: MAC statistics
  22.266 - * @stats: Net device statistics.
  22.267 - *	Hardware-specific code fills in @mac_stats, which provides a
  22.268 - *	detailed breakdown.  Generic code aggregates these statistics
  22.269 - *	into a standard &struct net_device_stats.
  22.270 + * @mac_stats: MAC statistics. These include all statistics the MACs
  22.271 + *	can provide.  Generic code converts these into a standard
  22.272 + *	&struct net_device_stats.
  22.273   * @stats_buffer: DMA buffer for statistics
  22.274   * @stats_lock: Statistics update lock
  22.275   * @mac_op: MAC interface
  22.276 @@ -829,21 +894,14 @@ struct efx_nic_errors {
  22.277  struct efx_nic {
  22.278  	char name[IFNAMSIZ];
  22.279  	struct pci_dev *pci_dev;
  22.280 -	struct pci_dev *pci_dev2;
  22.281 -#if !defined(EFX_USE_PCI_DEV_REVISION)
  22.282  	u8 revision;
  22.283 -#endif
  22.284  	const struct efx_nic_type *type;
  22.285 -	dma_addr_t dma_mask;
  22.286  	int legacy_irq;
  22.287  	struct workqueue_struct *workqueue;
  22.288 -#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
  22.289  	/* Since we can't use cancel_delayed_work_sync efx_reset() has to
  22.290  	 * flush efx->workqueue to serialise against efx_reconfigure_port
  22.291  	 * and efx_monitor. So it can't also run on workqueue */
  22.292  	struct workqueue_struct *reset_workqueue;
  22.293 -#endif
  22.294 -	struct workqueue_struct *refill_workqueue;
  22.295  	struct work_struct reset_work;
  22.296  	struct delayed_work monitor_work;
  22.297  	unsigned long membase_phys;
  22.298 @@ -852,7 +910,6 @@ struct efx_nic {
  22.299  	enum efx_int_mode interrupt_mode;
  22.300  	unsigned int is_asic:1;
  22.301  	unsigned int is_10g:1;
  22.302 -	int external_sram_cfg;
  22.303  
  22.304  	struct efx_i2c_interface i2c;
  22.305  	struct efx_board board_info;
  22.306 @@ -860,8 +917,6 @@ struct efx_nic {
  22.307  	enum nic_state state;
  22.308  	enum reset_type reset_pending;
  22.309  
  22.310 -	struct semaphore suspend_lock;
  22.311 -
  22.312  	struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
  22.313  	struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
  22.314  	struct efx_channel channel[EFX_MAX_CHANNELS];
  22.315 @@ -885,11 +940,11 @@ struct efx_nic {
  22.316  
  22.317  	struct mutex mac_lock;
  22.318  	int port_enabled;
  22.319 -	int net_dev_registered;
  22.320  
  22.321  	int port_initialized;
  22.322  	struct net_device *net_dev;
  22.323  	int rx_checksum_enabled;
  22.324 +	int lro_enabled;
  22.325  
  22.326  	atomic_t netif_stop_count;
  22.327  	spinlock_t netif_stop_lock;
  22.328 @@ -942,7 +997,6 @@ struct efx_nic {
  22.329  
  22.330  /**
  22.331   * struct efx_nic_type - Efx device type definition
  22.332 - * @is_dual_func: Is dual-function (else single-function)
  22.333   * @mem_bar: Memory BAR number
  22.334   * @mem_map_size: Memory BAR mapped size
  22.335   * @txd_ptr_tbl_base: TX descriptor ring base address
  22.336 @@ -965,7 +1019,6 @@ struct efx_nic {
  22.337   *	descriptors
  22.338   */
  22.339  struct efx_nic_type {
  22.340 -	unsigned int is_dual_func;
  22.341  	unsigned int mem_bar;
  22.342  	unsigned int mem_map_size;
  22.343  	unsigned int txd_ptr_tbl_base;
    23.1 --- a/drivers/net/sfc/null_phy.c	Tue Mar 31 11:49:12 2009 +0100
    23.2 +++ b/drivers/net/sfc/null_phy.c	Tue Mar 31 11:59:10 2009 +0100
    23.3 @@ -33,10 +33,8 @@ static int falcon_null_phy_check_hw(stru
    23.4  	int link_ok = falcon_xaui_link_ok(efx);
    23.5  
    23.6  	/* Generate PHY event that a PHY would have generated */
    23.7 -	if (link_ok != efx->link_up) {
    23.8 -		efx->link_up = link_ok;
    23.9 +	if (link_ok != efx->link_up)
   23.10  		efx->mac_op->fake_phy_event(efx);
   23.11 -	}
   23.12  
   23.13  	return 0;
   23.14  }
   23.15 @@ -46,7 +44,7 @@ static void falcon_null_phy_reconfigure(
   23.16  	/* CX4 is always 10000FD only */
   23.17  	efx->link_options = GM_LPA_10000FULL;
   23.18  
   23.19 -	falcon_null_phy_check_hw(efx);
   23.20 +	efx->link_up = falcon_xaui_link_ok(efx);
   23.21  }
   23.22  
   23.23  struct efx_phy_operations falcon_null_phy_ops = {
    24.1 --- a/drivers/net/sfc/pm8358_phy.c	Tue Mar 31 11:49:12 2009 +0100
    24.2 +++ b/drivers/net/sfc/pm8358_phy.c	Tue Mar 31 11:59:10 2009 +0100
    24.3 @@ -162,10 +162,8 @@ static int pm8358_phy_check_hw(struct ef
    24.4  	int rc = 0;
    24.5  	int link_up = pm8358_link_ok(efx);
    24.6  	/* Simulate a PHY event if link state has changed */
    24.7 -	if (link_up != efx->link_up) {
    24.8 -		efx->link_up = link_up;
    24.9 +	if (link_up != efx->link_up)
   24.10  		efx->mac_op->fake_phy_event(efx);
   24.11 -	}
   24.12  
   24.13  	return rc;
   24.14  }
    25.1 --- a/drivers/net/sfc/rx.c	Tue Mar 31 11:49:12 2009 +0100
    25.2 +++ b/drivers/net/sfc/rx.c	Tue Mar 31 11:59:10 2009 +0100
    25.3 @@ -84,7 +84,7 @@ static int rx_alloc_method = RX_ALLOC_ME
    25.4  #define RX_ALLOC_LEVEL_LRO 0x2000
    25.5  #define RX_ALLOC_LEVEL_MAX 0x3000
    25.6  #define RX_ALLOC_FACTOR_LRO 1
    25.7 -#define RX_ALLOC_FACTOR_SKB -2
    25.8 +#define RX_ALLOC_FACTOR_SKB (-2)
    25.9  
   25.10  /* This is the percentage fill level below which new RX descriptors
   25.11   * will be added to the RX descriptor ring.
   25.12 @@ -284,8 +284,8 @@ static inline void efx_free_rx_buffer(st
   25.13  	}
   25.14  }
   25.15  
   25.16 -inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
   25.17 -			       struct efx_rx_buffer *rx_buf)
   25.18 +static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
   25.19 +				      struct efx_rx_buffer *rx_buf)
   25.20  {
   25.21  	/* Unmap for DMA */
   25.22  	efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
   25.23 @@ -364,7 +364,7 @@ static int __efx_fast_push_rx_descriptor
   25.24  		  rx_queue->added_count - rx_queue->removed_count);
   25.25  
   25.26   out:
   25.27 -	/* Send write pointer to card.  */
   25.28 +	/* Send write pointer to card. */
   25.29  	falcon_notify_rx_desc(rx_queue);
   25.30  
   25.31  	/* If the fast fill is running inside from the refill tasklet, then
   25.32 @@ -399,8 +399,7 @@ void efx_fast_push_rx_descriptors(struct
   25.33  		 * that work is immediately pending to free some memory
   25.34  		 * (e.g. an RX event or TX completion)
   25.35  		 */
   25.36 -		queue_delayed_work(rx_queue->efx->refill_workqueue,
   25.37 -				   &rx_queue->work, 0);
   25.38 +		efx_schedule_slow_fill(rx_queue, 0);
   25.39  	}
   25.40  }
   25.41  
   25.42 @@ -409,11 +408,7 @@ void efx_rx_work(struct work_struct *dat
   25.43  	struct efx_rx_queue *rx_queue;
   25.44  	int rc;
   25.45  
   25.46 -#if !defined(EFX_NEED_WORK_API_WRAPPERS)
   25.47 -	rx_queue = container_of(data, struct efx_rx_queue, work.work);
   25.48 -#else
   25.49  	rx_queue = container_of(data, struct efx_rx_queue, work);
   25.50 -#endif
   25.51  
   25.52  	if (unlikely(!rx_queue->channel->enabled))
   25.53  		return;
   25.54 @@ -425,10 +420,8 @@ void efx_rx_work(struct work_struct *dat
   25.55  	/* Push new RX descriptors, allowing at least 1 jiffy for
   25.56  	 * the kernel to free some more memory. */
   25.57  	rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
   25.58 -	if (rc) {
   25.59 -		queue_delayed_work(rx_queue->efx->refill_workqueue,
   25.60 -				   &rx_queue->work, 1);
   25.61 -	}
   25.62 +	if (rc)
   25.63 +		efx_schedule_slow_fill(rx_queue, 1);
   25.64  }
   25.65  
   25.66  static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
   25.67 @@ -513,14 +506,9 @@ static inline struct sk_buff *efx_rx_mk_
   25.68  	return skb;
   25.69  }
   25.70  
   25.71 -#if defined(EFX_USE_FASTCALL)
   25.72  void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
   25.73  			    unsigned int index, unsigned int len,
   25.74  			    int checksummed, int discard)
   25.75 -#else
   25.76 -void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
   25.77 -		   unsigned int len, int checksummed, int discard)
   25.78 -#endif
   25.79  {
   25.80  	struct efx_nic *efx = rx_queue->efx;
   25.81  	struct efx_rx_buffer *rx_buf;
   25.82 @@ -587,6 +575,7 @@ void __efx_rx_packet(struct efx_channel 
   25.83  	struct efx_nic *efx = channel->efx;
   25.84  	enum efx_veto veto;
   25.85  	struct sk_buff *skb;
   25.86 +	int lro = efx->lro_enabled;
   25.87  
   25.88  	/* If we're in loopback test, then pass the packet directly to the
   25.89  	 * loopback layer, and free the rx_buf here
   25.90 @@ -616,8 +605,15 @@ void __efx_rx_packet(struct efx_channel 
   25.91  	 * changed, then flush the LRO state.
   25.92  	 */
   25.93  	if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
   25.94 +		efx_flush_lro(channel);
   25.95  		channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
   25.96  	}
   25.97 +	if (likely(checksummed && lro)) {
   25.98 +		if (efx_ssr(&channel->ssr, rx_buf)) {
   25.99 +			channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
  25.100 +			goto done;
  25.101 +		}
  25.102 +	}
  25.103  
  25.104  	/* Allow callback to veto the packet */
  25.105  	veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len);
  25.106 @@ -668,7 +664,7 @@ void efx_rx_strategy(struct efx_channel 
  25.107  	enum efx_rx_alloc_method method = rx_alloc_method;
  25.108  
  25.109  	/* Only makes sense to use page based allocation if LRO is enabled */
  25.110 -	if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
  25.111 +	if (!(channel->efx->lro_enabled)) {
  25.112  		method = RX_ALLOC_METHOD_SKB;
  25.113  	} else if (method == RX_ALLOC_METHOD_AUTO) {
  25.114  		/* Constrain the rx_alloc_level */
  25.115 @@ -725,8 +721,6 @@ int efx_init_rx_queue(struct efx_rx_queu
  25.116  
  25.117  	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
  25.118  
  25.119 -	ASSERT_RTNL();
  25.120 -
  25.121  	/* Initialise ptr fields */
  25.122  	rx_queue->added_count = 0;
  25.123  	rx_queue->notified_count = 0;
  25.124 @@ -754,8 +748,6 @@ void efx_fini_rx_queue(struct efx_rx_que
  25.125  
  25.126  	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
  25.127  
  25.128 -	ASSERT_RTNL();
  25.129 -
  25.130  	/* Flush RX queue and remove descriptor ring */
  25.131  	falcon_fini_rx(rx_queue);
  25.132  
  25.133 @@ -788,6 +780,12 @@ void efx_remove_rx_queue(struct efx_rx_q
  25.134  	rx_queue->used = 0;
  25.135  }
  25.136  
  25.137 +/* Flush LRO/SSR state for the given channel */
  25.138 +void efx_flush_lro(struct efx_channel *channel)
  25.139 +{
  25.140 +	efx_ssr_end_of_burst(&channel->ssr);
  25.141 +}
  25.142 +
  25.143  
  25.144  module_param(rx_alloc_method, int, 0644);
  25.145  MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
  25.146 @@ -796,3 +794,441 @@ module_param(rx_refill_threshold, uint, 
  25.147  MODULE_PARM_DESC(rx_refill_threshold,
  25.148  		 "RX descriptor ring fast/slow fill threshold (%)");
  25.149  
  25.150 +
  25.151 +
  25.152 +/* Size of the LRO hash table.  Must be a power of 2.  A larger table
  25.153 + * means we can accelerate a larger number of streams.
  25.154 + */
  25.155 +static unsigned lro_table_size = 128;
  25.156 +module_param(lro_table_size, uint, 0644);
  25.157 +MODULE_PARM_DESC(lro_table_size,
  25.158 +		 "Size of the LRO hash table.  Must be a power of 2");
  25.159 +
  25.160 +/* Maximum length of a hash chain.  If chains get too long then the lookup
  25.161 + * time increases and may exceed the benefit of LRO.
  25.162 + */
  25.163 +static unsigned lro_chain_max = 20;
  25.164 +module_param(lro_chain_max, uint, 0644);
  25.165 +MODULE_PARM_DESC(lro_chain_max,
  25.166 +		 "Maximum length of chains in the LRO hash table");
  25.167 +
  25.168 +
  25.169 +/* Maximum time (in jiffies) that a connection can be idle before it's LRO
  25.170 + * state is discarded.
  25.171 + */
  25.172 +static unsigned lro_idle_jiffies = HZ / 10 + 1;	/* 100ms */
  25.173 +module_param(lro_idle_jiffies, uint, 0644);
  25.174 +MODULE_PARM_DESC(lro_idle_jiffies, "Time (in jiffies) after which an"
  25.175 +		 " idle connection's LRO state is discarded");
  25.176 +
  25.177 +
  25.178 +/* Number of packets with payload that must arrive in-order before a
  25.179 + * connection is eligible for LRO.  The idea is we should avoid coalescing
  25.180 + * segments when the sender is in slow-start because reducing the ACK rate
  25.181 + * can damage performance.
  25.182 + */
  25.183 +static unsigned lro_slow_start_packets = 20;
  25.184 +module_param(lro_slow_start_packets, uint, 0644);
  25.185 +MODULE_PARM_DESC(lro_slow_start_packets, "Number of packets that must "
  25.186 +		 "pass in-order before starting LRO.");
  25.187 +
  25.188 +
  25.189 +int efx_ssr_init(struct efx_ssr_state *st, struct efx_nic *efx)
  25.190 +{
  25.191 +	unsigned i;
  25.192 +	st->conns_mask = lro_table_size - 1;
  25.193 +	if ((st->conns_mask + 1) & st->conns_mask) {
  25.194 +		EFX_ERR(efx, "lro_table_size(=%u) must be a power of 2\n",
  25.195 +			lro_table_size);
  25.196 +		return -EINVAL;
  25.197 +	}
  25.198 +	st->efx = efx;
  25.199 +	st->conns = kmalloc((st->conns_mask + 1)
  25.200 +			    * sizeof(st->conns[0]), GFP_KERNEL);
  25.201 +	if (st->conns == NULL)
  25.202 +		return -ENOMEM;
  25.203 +	st->conns_n = kmalloc((st->conns_mask + 1)
  25.204 +			      * sizeof(st->conns_n[0]), GFP_KERNEL);
  25.205 +	if (st->conns_n == NULL) {
  25.206 +		kfree(st->conns);
  25.207 +		st->conns = NULL;
  25.208 +		return -ENOMEM;
  25.209 +	}
  25.210 +	for (i = 0; i <= st->conns_mask; ++i) {
  25.211 +		INIT_LIST_HEAD(&st->conns[i]);
  25.212 +		st->conns_n[i] = 0;
  25.213 +	}
  25.214 +	INIT_LIST_HEAD(&st->active_conns);
  25.215 +	INIT_LIST_HEAD(&st->free_conns);
  25.216 +	return 0;
  25.217 +}
  25.218 +
  25.219 +/* Drop the given connection, and add it to the free list */
  25.220 +static inline void efx_ssr_drop(struct efx_ssr_state *st,
  25.221 +				struct efx_ssr_conn *c, unsigned conn_hash)
  25.222 +{
  25.223 +	EFX_BUG_ON_PARANOID(c->skb);
  25.224 +	EFX_BUG_ON_PARANOID(st->conns_n[conn_hash] <= 0);
  25.225 +	--st->conns_n[conn_hash];
  25.226 +	list_del(&c->link);
  25.227 +	list_add(&c->link, &st->free_conns);
  25.228 +}
  25.229 +
  25.230 +void efx_ssr_fini(struct efx_ssr_state *st)
  25.231 +{
  25.232 +	struct efx_ssr_conn *c;
  25.233 +	unsigned i;
  25.234 +
  25.235 +	/* Return cleanly if efx_ssr_init() has not been called. */
  25.236 +	if (st->conns == NULL)
  25.237 +		return;
  25.238 +
  25.239 +	EFX_BUG_ON_PARANOID(!list_empty(&st->active_conns));
  25.240 +
  25.241 +	for (i = 0; i <= st->conns_mask; ++i)
  25.242 +		while (!list_empty(&st->conns[i])) {
  25.243 +			c = list_entry(st->conns[i].prev,
  25.244 +				       struct efx_ssr_conn, link);
  25.245 +			efx_ssr_drop(st, c, i);
  25.246 +		}
  25.247 +
  25.248 +	while (!list_empty(&st->free_conns)) {
  25.249 +		c = list_entry(st->free_conns.prev, struct efx_ssr_conn, link);
  25.250 +		list_del(&c->link);
  25.251 +		EFX_BUG_ON_PARANOID(c->skb);
  25.252 +		kfree(c);
  25.253 +	}
  25.254 +
  25.255 +	kfree(st->conns_n);
  25.256 +	kfree(st->conns);
  25.257 +	st->conns = NULL;
  25.258 +}
  25.259 +
  25.260 +/* Calc IP checksum and deliver to the OS */
  25.261 +static void efx_ssr_deliver(struct efx_ssr_state *st, struct efx_ssr_conn *c)
  25.262 +{
  25.263 +	struct efx_nic *efx = st->efx;
  25.264 +	int veto, len;
  25.265 +
  25.266 +	EFX_BUG_ON_PARANOID(!c->skb);
  25.267 +
  25.268 +	++st->n_bursts;
  25.269 +
  25.270 +	/* Finish off packet munging and recalculate IP header checksum. */
  25.271 +	c->iph->tot_len = htons(c->iph->tot_len);
  25.272 +	c->iph->check = 0;
  25.273 +	c->iph->check = ip_fast_csum((u8 *) c->iph, c->iph->ihl);
  25.274 +
  25.275 +	len = c->skb->len + ((char *)c->iph - (char *)c->eh);
  25.276 +	c->skb->truesize = len + sizeof(struct sk_buff);
  25.277 +
  25.278 +	c->th->window = c->th_last->window;
  25.279 +	c->th->ack_seq = c->th_last->ack_seq;
  25.280 +	if (c->th->doff == c->th_last->doff) {
  25.281 +		/* Copy TCP options (take care to avoid going negative). */
  25.282 +		len = ((c->th->doff - 5) & 0xf) << 2u;
  25.283 +		memcpy(c->th + 1, c->th_last + 1, len);
  25.284 +	}
  25.285 +
  25.286 +	/* Allow callback to veto the packet. */
  25.287 +	veto = EFX_DL_CALLBACK(efx, rx_packet, (char *)c->eh, len);
  25.288 +	if (unlikely(veto)) {
  25.289 +		EFX_LOG(efx, "RX vetoed by driverlink %s driver\n",
  25.290 +			efx->dl_cb_dev.rx_packet->driver->name);
  25.291 +		dev_kfree_skb_any(c->skb);
  25.292 +	} else {
  25.293 +		netif_receive_skb(c->skb);
  25.294 +	}
  25.295 +
  25.296 +	c->skb = NULL;
  25.297 +	list_del_init(&c->active_link);
  25.298 +}
  25.299 +
  25.300 +/* Stop tracking connections that have gone idle in order to keep hash
  25.301 + * chains short.
  25.302 + */
  25.303 +static void efx_ssr_purge_idle(struct efx_ssr_state *st, unsigned now)
  25.304 +{
  25.305 +	struct efx_ssr_conn *c;
  25.306 +	unsigned i;
  25.307 +
  25.308 +	EFX_BUG_ON_PARANOID(!list_empty(&st->active_conns));
  25.309 +
  25.310 +	st->last_purge_jiffies = now;
  25.311 +	for (i = 0; i <= st->conns_mask; ++i) {
  25.312 +		if (list_empty(&st->conns[i]))
  25.313 +			continue;
  25.314 +
  25.315 +		c = list_entry(st->conns[i].prev, struct efx_ssr_conn, link);
  25.316 +		if (now - c->last_pkt_jiffies > lro_idle_jiffies) {
  25.317 +			++st->n_drop_idle;
  25.318 +			efx_ssr_drop(st, c, i);
  25.319 +		}
  25.320 +	}
  25.321 +}
  25.322 +
  25.323 +/* Push held skbs down into network stack.
  25.324 + * Only called when active list is non-empty.
  25.325 + */
  25.326 +void __efx_ssr_end_of_burst(struct efx_ssr_state *st)
  25.327 +{
  25.328 +	struct efx_ssr_conn *c;
  25.329 +	unsigned j;
  25.330 +
  25.331 +	EFX_BUG_ON_PARANOID(list_empty(&st->active_conns));
  25.332 +
  25.333 +	do {
  25.334 +		c = list_entry(st->active_conns.next, struct efx_ssr_conn,
  25.335 +			       active_link);
  25.336 +		EFX_BUG_ON_PARANOID(!c->skb);
  25.337 +		efx_ssr_deliver(st, c);
  25.338 +	} while (!list_empty(&st->active_conns));
  25.339 +
  25.340 +	j = jiffies;
  25.341 +	if (unlikely(j != st->last_purge_jiffies))
  25.342 +		efx_ssr_purge_idle(st, j);
  25.343 +}
  25.344 +
  25.345 +/* Construct an skb Push held skbs down into network stack.
  25.346 + * Only called when active list is non-empty.
  25.347 + */
  25.348 +static inline int
  25.349 +efx_ssr_merge(struct efx_ssr_state *st, struct efx_ssr_conn *c,
  25.350 +	      struct tcphdr *th, int data_length)
  25.351 +{
  25.352 +	/* Increase lengths appropriately */
  25.353 +	c->skb->len += data_length;
  25.354 +	c->skb->data_len += data_length;
  25.355 +
  25.356 +	/*
  25.357 +	 * Keep track of max MSS seen and store in
  25.358 +	 * gso_size for kernel to use
  25.359 +	 */
  25.360 +	if (data_length > skb_shinfo(c->skb)->gso_size)
  25.361 +		skb_shinfo(c->skb)->gso_size = data_length;
  25.362 +
  25.363 +	/* Update the connection state flags */
  25.364 +	c->iph->tot_len += data_length;
  25.365 +	c->th->psh |= th->psh;
  25.366 +	c->th_last = th;
  25.367 +	++st->n_merges;
  25.368 +
  25.369 +	/* Pass packet up now if another segment could overflow the IP
  25.370 +	 * length.
  25.371 +	 */
  25.372 +	return (c->skb->len > 65536 - 9200);
  25.373 +}
  25.374 +
  25.375 +static inline void
  25.376 +efx_ssr_start(struct efx_ssr_state *st, struct efx_ssr_conn *c,
  25.377 +	      struct tcphdr *th, int data_length)
  25.378 +{
  25.379 +	/* Initialise gso_size appropriately */
  25.380 +	skb_shinfo(c->skb)->gso_size = data_length;
  25.381 +
  25.382 +	/* Mangle header fields for later processing */
  25.383 +	c->iph->tot_len = ntohs(c->iph->tot_len);
  25.384 +
  25.385 +	/* Move this connection the head of the active list */
  25.386 +	list_del(&c->active_link);
  25.387 +	list_add(&c->active_link, &st->active_conns);
  25.388 +}
  25.389 +
  25.390 +static inline int
  25.391 +efx_ssr_conn_page(struct efx_ssr_state *st, struct efx_ssr_conn *c,
  25.392 +		  struct efx_rx_buffer *rx_buf, struct tcphdr *th,
  25.393 +		  int hdr_length, int data_length)
  25.394 +{
  25.395 +	if (likely(c->skb)) {
  25.396 +		struct skb_frag_struct *frag;
  25.397 +		frag = skb_shinfo(c->skb)->frags;
  25.398 +		frag += skb_shinfo(c->skb)->nr_frags;
  25.399 +		frag->page = rx_buf->page;
  25.400 +		frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_length;
  25.401 +		frag->size = data_length;
  25.402 +		++skb_shinfo(c->skb)->nr_frags;
  25.403 +		rx_buf->page = NULL;
  25.404 +
  25.405 +		if (efx_ssr_merge(st, c, th, data_length) ||
  25.406 +		    (skb_shinfo(c->skb)->nr_frags == MAX_SKB_FRAGS))
  25.407 +			efx_ssr_deliver(st, c);
  25.408 +
  25.409 +		return 1;
  25.410 +	} else {
  25.411 +		c->skb = efx_rx_mk_skb(rx_buf, st->efx, hdr_length);
  25.412 +		if (unlikely(c->skb == NULL))
  25.413 +			return 0;
  25.414 +
  25.415 +		c->eh = eth_hdr(c->skb);
  25.416 +		c->iph = (struct iphdr *)c->skb->data;
  25.417 +		c->th = (struct tcphdr *)((u8 *) c->iph + c->iph->ihl * 4);
  25.418 +		c->th_last = c->th;
  25.419 +
  25.420 +		efx_ssr_start(st, c, th, data_length);
  25.421 +
  25.422 +		return 1;
  25.423 +	}
  25.424 +}
  25.425 +
  25.426 +static inline void
  25.427 +efx_ssr_conn_skb(struct efx_ssr_state *st, struct efx_ssr_conn *c,
  25.428 +		 struct efx_rx_buffer *rx_buf, struct ethhdr *eh,
  25.429 +		 struct iphdr *iph, struct tcphdr *th, int data_length)
  25.430 +{
  25.431 +	/* Transfer ownership of the rx_buf->skb to the LRO chain */
  25.432 +	struct sk_buff *skb = rx_buf->skb;
  25.433 +	rx_buf->skb = NULL;
  25.434 +
  25.435 +	if (likely(c->skb)) {
  25.436 +		/* Remove the headers */
  25.437 +		skb_pull(skb, skb->len - data_length);
  25.438 +
  25.439 +		/* Tack the new skb onto the head skb's frag_list. */
  25.440 +		EFX_BUG_ON_PARANOID(skb->next);
  25.441 +		if (!skb_shinfo(c->skb)->frag_list)
  25.442 +			skb_shinfo(c->skb)->frag_list = skb;
  25.443 +		else
  25.444 +			c->skb_tail->next = skb;
  25.445 +		c->skb_tail = skb;
  25.446 +
  25.447 +		if (efx_ssr_merge(st, c, th, data_length))
  25.448 +			efx_ssr_deliver(st, c);
  25.449 +	} else {
  25.450 +		c->skb = skb;
  25.451 +		c->eh = eh;
  25.452 +		c->iph = iph;
  25.453 +		c->th = th;
  25.454 +		c->th_last = th;
  25.455 +
  25.456 +		efx_ssr_start(st, c, th, data_length);
  25.457 +	}
  25.458 +}
  25.459 +
  25.460 +/* Process SKB and decide whether to dispatch it to the stack now or
  25.461 + * later.
  25.462 + */
  25.463 +int efx_ssr(struct efx_ssr_state *st, struct efx_rx_buffer *rx_buf)
  25.464 +{
  25.465 +
  25.466 +	int eh_proto, data_length, hdr_length, dont_merge;
  25.467 +	struct efx_ssr_conn *c;
  25.468 +	struct ethhdr *eh;
  25.469 +	struct iphdr *iph;
  25.470 +	struct tcphdr *th;
  25.471 +	unsigned th_seq, conn_hash, pkt_length;
  25.472 +
  25.473 +	/* This does not handle VLAN code */
  25.474 +	/* Find the IP header. The ethernet header is always at rx_buf->data */
  25.475 +	eh = (struct ethhdr *)rx_buf->data;
  25.476 +	if (rx_buf->page) {
  25.477 +		eh_proto = eh->h_proto;
  25.478 +		iph = (struct iphdr *)(eh + 1);
  25.479 +	} else {
  25.480 +		/* The skb head is at the IP header */
  25.481 +		eh_proto = rx_buf->skb->protocol;
  25.482 +		iph = (struct iphdr *)rx_buf->skb->data;
  25.483 +	}
  25.484 +
  25.485 +	/* We're not interested if it isn't TCP over IPv4, or if fragged. */
  25.486 +	if ((eh_proto - htons(ETH_P_IP)) |
  25.487 +	    (iph->protocol - IPPROTO_TCP) |
  25.488 +	    (iph->frag_off & htons(IP_MF | IP_OFFSET)))
  25.489 +		return 0;
  25.490 +
  25.491 +	/* Get the TCP protocol */
  25.492 +	th = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
  25.493 +	hdr_length = (u8 *) th + th->doff * 4 - (u8 *) eh;
  25.494 +	/* Cope with padding after IP header */
  25.495 +	pkt_length = ntohs(iph->tot_len) + (u8 *)iph - (u8 *)eh;
  25.496 +	rx_buf->len = min(pkt_length, rx_buf->len);
  25.497 +	data_length = rx_buf->len - hdr_length;
  25.498 +	th_seq = ntohl(th->seq);
  25.499 +	dont_merge = ((data_length <= 0)
  25.500 +		      | th->urg | th->syn | th->rst | th->fin);
  25.501 +
  25.502 +	/* Very cheap and crude hash. */
  25.503 +	conn_hash = (th->source ^ th->dest) & st->conns_mask;
  25.504 +
  25.505 +	list_for_each_entry(c, &st->conns[conn_hash], link) {
  25.506 +		if ((c->saddr - iph->saddr) | (c->daddr - iph->daddr) |
  25.507 +		    (c->source - th->source) | (c->dest - th->dest))
  25.508 +			continue;
  25.509 +
  25.510 +		/* Re-insert at head of list to reduce lookup time. */
  25.511 +		list_del(&c->link);
  25.512 +		list_add(&c->link, &st->conns[conn_hash]);
  25.513 +
  25.514 +		if (unlikely(th_seq - c->next_seq)) {
  25.515 +			/* Out-of-order, so start counting again. */
  25.516 +			if (c->skb)
  25.517 +				efx_ssr_deliver(st, c);
  25.518 +			c->n_in_order_pkts = 0;
  25.519 +			c->next_seq = th_seq + data_length;
  25.520 +			++st->n_misorder;
  25.521 +			return 0;
  25.522 +		}
  25.523 +		c->next_seq = th_seq + data_length;
  25.524 +		c->last_pkt_jiffies = jiffies;
  25.525 +
  25.526 +		if (c->n_in_order_pkts < lro_slow_start_packets) {
  25.527 +			/* May be in slow-start, so don't merge. */
  25.528 +			++st->n_slow_start;
  25.529 +			++c->n_in_order_pkts;
  25.530 +			return 0;
  25.531 +		}
  25.532 +
  25.533 +		if (unlikely(dont_merge)) {
  25.534 +			if (c->skb)
  25.535 +				efx_ssr_deliver(st, c);
  25.536 +			if (th->fin || th->rst) {
  25.537 +				++st->n_drop_closed;
  25.538 +				efx_ssr_drop(st, c, conn_hash);
  25.539 +			}
  25.540 +			return 0;
  25.541 +		}
  25.542 +
  25.543 +		if (rx_buf->page) {
  25.544 +			return efx_ssr_conn_page(st, c, rx_buf, th, hdr_length,
  25.545 +						 data_length);
  25.546 +		} else {
  25.547 +			efx_ssr_conn_skb(st, c, rx_buf, eh, iph, th,
  25.548 +					 data_length);
  25.549 +			return 1;
  25.550 +		}
  25.551 +	}
  25.552 +
  25.553 +	/* We're not yet tracking this connection. */
  25.554 +	if (dont_merge)
  25.555 +		return 0;
  25.556 +
  25.557 +	if (st->conns_n[conn_hash] >= lro_chain_max) {
  25.558 +		++st->n_too_many;
  25.559 +		return 0;
  25.560 +	}
  25.561 +
  25.562 +	if (!list_empty(&st->free_conns)) {
  25.563 +		c = list_entry(st->free_conns.next, struct efx_ssr_conn, link);
  25.564 +		list_del(&c->link);
  25.565 +	} else {
  25.566 +		c = kmalloc(sizeof(*c), GFP_ATOMIC);
  25.567 +		if (c == NULL)
  25.568 +			return 0;
  25.569 +		c->skb = NULL;
  25.570 +		INIT_LIST_HEAD(&c->active_link);
  25.571 +	}
  25.572 +
  25.573 +	/* Create the connection tracking data */
  25.574 +	++st->conns_n[conn_hash];
  25.575 +	list_add(&c->link, &st->conns[conn_hash]);
  25.576 +	c->saddr = iph->saddr;
  25.577 +	c->daddr = iph->daddr;
  25.578 +	c->source = th->source;
  25.579 +	c->dest = th->dest;
  25.580 +	c->next_seq = th_seq + data_length;
  25.581 +	c->n_in_order_pkts = 0;
  25.582 +	EFX_BUG_ON_PARANOID(c->skb);
  25.583 +	++st->n_new_stream;
  25.584 +	return 0;
  25.585 +}
  25.586 +
  25.587 +
    26.1 --- a/drivers/net/sfc/rx.h	Tue Mar 31 11:49:12 2009 +0100
    26.2 +++ b/drivers/net/sfc/rx.h	Tue Mar 31 11:59:10 2009 +0100
    26.3 @@ -26,6 +26,7 @@
    26.4  #ifndef EFX_RX_H
    26.5  #define EFX_RX_H
    26.6  
    26.7 +#include <linux/skbuff.h>
    26.8  #include "net_driver.h"
    26.9  
   26.10  
   26.11 @@ -34,6 +35,7 @@ void efx_remove_rx_queue(struct efx_rx_q
   26.12  int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
   26.13  void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
   26.14  
   26.15 +void efx_flush_lro(struct efx_channel *channel);
   26.16  void efx_rx_strategy(struct efx_channel *channel);
   26.17  void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
   26.18  void efx_rx_work(struct work_struct *data);
   26.19 @@ -41,4 +43,19 @@ void __efx_rx_packet(struct efx_channel 
   26.20  		     struct efx_rx_buffer *rx_buf, int checksummed);
   26.21  
   26.22  
   26.23 +
   26.24 +extern int efx_ssr_init(struct efx_ssr_state *st, struct efx_nic *efx);
   26.25 +extern void efx_ssr_fini(struct efx_ssr_state *st);
   26.26 +
   26.27 +extern void __efx_ssr_end_of_burst(struct efx_ssr_state *st);
   26.28 +extern int efx_ssr(struct efx_ssr_state *st, struct efx_rx_buffer *rx_buf);
   26.29 +
   26.30 +
   26.31 +static inline void efx_ssr_end_of_burst(struct efx_ssr_state *st)
   26.32 +{
   26.33 +	if (!list_empty(&st->active_conns))
   26.34 +		__efx_ssr_end_of_burst(st);
   26.35 +}
   26.36 +
   26.37 +
   26.38  #endif /* EFX_RX_H */
    27.1 --- a/drivers/net/sfc/selftest.c	Tue Mar 31 11:49:12 2009 +0100
    27.2 +++ b/drivers/net/sfc/selftest.c	Tue Mar 31 11:59:10 2009 +0100
    27.3 @@ -35,7 +35,6 @@
    27.4  #include <linux/in.h>
    27.5  #include <linux/udp.h>
    27.6  #include <linux/rtnetlink.h>
    27.7 -#include <asm/io.h>
    27.8  #include "net_driver.h"
    27.9  #include "ethtool.h"
   27.10  #include "efx.h"
   27.11 @@ -245,8 +244,7 @@ static int efx_test_eventq_irq(struct ef
   27.12  
   27.13  		if (channel->eventq_magic == magic)
   27.14  			goto eventq_ok;
   27.15 -	}
   27.16 -	while (++count < 2);
   27.17 +	} while (++count < 2);
   27.18  
   27.19  	EFX_ERR(channel->efx, "channel %d timed out in %ld jiffies waiting for"
   27.20  		" event queue\n", channel->channel, jiffies - j_start);
   27.21 @@ -471,9 +469,6 @@ static int efx_test_loopback(struct efx_
   27.22  			     struct efx_tx_queue *tx_queue,
   27.23  			     struct efx_loopback_self_tests *lb_tests)
   27.24  {
   27.25 -#if !defined(EFX_HAVE_OLD_NAPI)
   27.26 -	struct efx_channel *channel;
   27.27 -#endif
   27.28  	struct efx_selftest_state *state = efx->loopback_selftest;
   27.29  	struct efx_loopback_payload *payload;
   27.30  	struct sk_buff *skb;
   27.31 @@ -513,17 +508,8 @@ static int efx_test_loopback(struct efx_
   27.32  		udelay(10);
   27.33  	}
   27.34  
   27.35 -#if !defined(EFX_HAVE_OLD_NAPI)
   27.36 -	/* NAPI polling is not enabled, so process channels synchronously */
   27.37 -	schedule_timeout_uninterruptible(HZ / 50);
   27.38 -	efx_for_each_channel_with_interrupt(channel, efx) {
   27.39 -		if (channel->work_pending)
   27.40 -			efx_process_channel_now(channel);
   27.41 -	}
   27.42 -#else
   27.43  	/* Allow time for processing */
   27.44  	schedule_timeout_uninterruptible(HZ / 10);
   27.45 -#endif
   27.46  
   27.47  	if (state->flush)
   27.48  		goto out3;
   27.49 @@ -625,7 +611,7 @@ static int efx_test_loopbacks(struct efx
   27.50  	struct ethtool_cmd ecmd, ecmd_loopback;
   27.51  	struct efx_tx_queue *tx_queue;
   27.52  	enum efx_loopback_mode old_mode, mode;
   27.53 -	int old_powered, count, rc = 0;
   27.54 +	int old_powered, count, rc = 0, link_up;
   27.55  	int retry = EFX_WORKAROUND_8909(efx);
   27.56  
   27.57  	/* Get current PHY settings */
   27.58 @@ -663,7 +649,7 @@ static int efx_test_loopbacks(struct efx
   27.59  		state->flush = 1;
   27.60  		efx->phy_powered = 1;
   27.61  		efx->loopback_mode = mode;
   27.62 -		efx_reconfigure_port(efx, 0);
   27.63 +		efx_reconfigure_port(efx);
   27.64  
   27.65  		/* Wait for the PHY to signal the link is up */
   27.66  		count = 0;
   27.67 @@ -677,11 +663,21 @@ static int efx_test_loopbacks(struct efx
   27.68  			/* Wait for PHY events to be processed */
   27.69  			flush_workqueue(efx->workqueue);
   27.70  			rmb();
   27.71 -		} while ((++count < 20) && !efx->link_up);
   27.72 +
   27.73 +			/* efx->link_up can be 1 even if the XAUI link is down,
   27.74 +			 * (bug5762). Usually, it's not worth bothering with the
   27.75 +			 * difference, but for selftests, we need that extra
   27.76 +			 * guarantee that the link is really, really, up.
   27.77 +			 */
   27.78 +			link_up = efx->link_up;
   27.79 +			if (EFX_IS10G(efx) && !falcon_xaui_link_ok(efx))
   27.80 +				link_up = 0;
   27.81 +
   27.82 +		} while ((++count < 20) && !link_up);
   27.83  
   27.84  		/* The link should now be up. If it isn't, there is no point
   27.85  		 * in attempting a loopback test */
   27.86 -		if (!efx->link_up) {
   27.87 +		if (!link_up) {
   27.88  			EFX_ERR(efx, "loopback %s never came up\n",
   27.89  				LOOPBACK_MODE(efx));
   27.90  			rc = -EIO;
   27.91 @@ -712,7 +708,7 @@ fail:
   27.92  
   27.93  			state->flush = 1;
   27.94  			efx->loopback_mode = first;
   27.95 -			efx_reconfigure_port(efx, 0);
   27.96 +			efx_reconfigure_port(efx);
   27.97  
   27.98  			retry = rc = 0;
   27.99  			--mode;
  27.100 @@ -750,8 +746,6 @@ int efx_online_test(struct efx_nic *efx,
  27.101  	struct efx_channel *channel;
  27.102  	int rc = 0;
  27.103  
  27.104 -	ASSERT_RTNL();
  27.105 -
  27.106  	EFX_LOG(efx, "performing online self-tests\n");
  27.107  
  27.108  	rc |= efx_test_interrupts(efx, tests);
  27.109 @@ -779,8 +773,6 @@ int efx_offline_test(struct efx_nic *efx
  27.110  	struct efx_selftest_state *state;
  27.111  	int rc = 0;
  27.112  
  27.113 -	ASSERT_RTNL();
  27.114 -
  27.115  	EFX_LOG(efx, "performing offline self-tests\n");
  27.116  
  27.117  	/* Create a selftest_state structure to hold state for the test */
    28.1 --- a/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h	Tue Mar 31 11:49:12 2009 +0100
    28.2 +++ b/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h	Tue Mar 31 11:59:10 2009 +0100
    28.3 @@ -70,22 +70,23 @@
    28.4  #define efhw_nic_close_hardware(nic) \
    28.5  	((nic)->efhw_func->close_hardware(nic))
    28.6  
    28.7 -#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr) \
    28.8 -	((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr)))
    28.9 +#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr, non_irq_evq) \
   28.10 +	((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr), \
   28.11 +					 (non_irq_evq)))
   28.12  
   28.13  /*-------------- Interrupt support  ------------ */
   28.14  /** Handle interrupt.  Return 0 if not handled, 1 if handled. */
   28.15  #define efhw_nic_interrupt(nic) \
   28.16  	((nic)->efhw_func->interrupt(nic))
   28.17  
   28.18 -#define efhw_nic_interrupt_enable(nic, index) \
   28.19 -	((nic)->efhw_func->interrupt_enable(nic, index))
   28.20 +#define efhw_nic_interrupt_enable(nic) \
   28.21 +	((nic)->efhw_func->interrupt_enable(nic))
   28.22  
   28.23 -#define efhw_nic_interrupt_disable(nic, index) \
   28.24 -	((nic)->efhw_func->interrupt_disable(nic, index))
   28.25 +#define efhw_nic_interrupt_disable(nic) \
   28.26 +	((nic)->efhw_func->interrupt_disable(nic))
   28.27  
   28.28 -#define efhw_nic_set_interrupt_moderation(nic, index, val) \
   28.29 -	((nic)->efhw_func->set_interrupt_moderation(nic, index, val))
   28.30 +#define efhw_nic_set_interrupt_moderation(nic, val) \
   28.31 +	((nic)->efhw_func->set_interrupt_moderation(nic, val))
   28.32  
   28.33  /*-------------- Event support  ------------ */
   28.34  
    29.1 --- a/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h	Tue Mar 31 11:49:12 2009 +0100
    29.2 +++ b/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h	Tue Mar 31 11:59:10 2009 +0100
    29.3 @@ -197,7 +197,7 @@ union __u64to32 {
    29.4  };
    29.5  
    29.6  static inline void
    29.7 -falcon_write_ddd_d(efhw_ioaddr_t kva,
    29.8 +falcon_write_ddd_d(volatile char __iomem *kva,
    29.9  		   uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3)
   29.10  {
   29.11  	writel(d0, kva + 0);
   29.12 @@ -207,7 +207,7 @@ falcon_write_ddd_d(efhw_ioaddr_t kva,
   29.13  	writel(d3, kva + 12);
   29.14  }
   29.15  
   29.16 -static inline void falcon_write_q(efhw_ioaddr_t kva, uint64_t q)
   29.17 +static inline void falcon_write_q(volatile char __iomem *kva, uint64_t q)
   29.18  {
   29.19  	union __u64to32 u;
   29.20  	u.u64 = q;
   29.21 @@ -217,7 +217,7 @@ static inline void falcon_write_q(efhw_i
   29.22  	writel(u.s.b, kva + 4);
   29.23  }
   29.24  
   29.25 -static inline void falcon_read_q(efhw_ioaddr_t addr, uint64_t *q0)
   29.26 +static inline void falcon_read_q(volatile char __iomem *addr, uint64_t *q0)
   29.27  {
   29.28  	/* It is essential that we read dword0 first, so that
   29.29  	 * the shadow register is updated with the latest value
   29.30 @@ -232,14 +232,14 @@ static inline void falcon_read_q(efhw_io
   29.31  }
   29.32  
   29.33  static inline void
   29.34 -falcon_write_qq(efhw_ioaddr_t kva, uint64_t q0, uint64_t q1)
   29.35 +falcon_write_qq(volatile char __iomem *kva, uint64_t q0, uint64_t q1)
   29.36  {
   29.37  	writeq(q0, kva + 0);
   29.38  	falcon_write_q(kva + 8, q1);
   29.39  }
   29.40  
   29.41  static inline void
   29.42 -falcon_read_qq(efhw_ioaddr_t addr, uint64_t *q0, uint64_t *q1)
   29.43 +falcon_read_qq(volatile char __iomem *addr, uint64_t *q0, uint64_t *q1)
   29.44  {
   29.45  	falcon_read_q(addr, q0);
   29.46  	*q1 = readq(addr + 8);
   29.47 @@ -390,9 +390,6 @@ static inline int falcon_rx_dma_page_off
   29.48  /* Falcon nails down the event queue mappings */
   29.49  #define FALCON_EVQ_KERNEL0   (0)	/* hardwired for net driver */
   29.50  #define FALCON_EVQ_CHAR      (4)	/* char driver's event queue      */
   29.51 -#define FALCON_EVQ_NONIRQ    (5)	/* char driver's non interrupting
   29.52 -					   queue. Subsequent queues are
   29.53 -					   available for user apps */
   29.54  
   29.55  /* reserved by the drivers */
   29.56  #define FALCON_EVQ_TBL_RESERVED	   (8)
   29.57 @@ -411,7 +408,8 @@ static inline int falcon_rx_dma_page_off
   29.58   *
   29.59   *---------------------------------------------------------------------------*/
   29.60  
   29.61 -static inline void falcon_deadbeef(efhw_ioaddr_t efhw_kva, unsigned what)
   29.62 +static inline void
   29.63 +falcon_deadbeef(volatile char __iomem *efhw_kva, unsigned what)
   29.64  {
   29.65  	writel(what, efhw_kva + 0x300);
   29.66  	mmiowb();
    30.1 --- a/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h	Tue Mar 31 11:49:12 2009 +0100
    30.2 +++ b/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h	Tue Mar 31 11:59:10 2009 +0100
    30.3 @@ -190,8 +190,6 @@ efx_vi_filter_stop(struct efx_vi_state *
    30.4  /*! Constants for the type field in efx_vi_hw_resource */
    30.5  #define EFX_VI_HW_RESOURCE_TXDMAQ    0x0	/* PFN of TX DMA Q */
    30.6  #define EFX_VI_HW_RESOURCE_RXDMAQ    0x1	/* PFN of RX DMA Q */
    30.7 -#define EFX_VI_HW_RESOURCE_TXBELL    0x2	/* PFN of TX Doorbell (EF1) */
    30.8 -#define EFX_VI_HW_RESOURCE_RXBELL    0x3	/* PFN of RX Doorbell (EF1) */
    30.9  #define EFX_VI_HW_RESOURCE_EVQTIMER  0x4	/* Address of event q timer */
   30.10  
   30.11  /* Address of event q pointer (EF1) */
   30.12 @@ -229,7 +227,6 @@ struct efx_vi_hw_resource {
   30.13   * Metadata concerning the list of hardware resource mappings
   30.14   */
   30.15  struct efx_vi_hw_resource_metadata {
   30.16 -	int version;
   30.17  	int evq_order;
   30.18  	int evq_offs;
   30.19  	int evq_capacity;
    31.1 --- a/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h	Tue Mar 31 11:49:12 2009 +0100
    31.2 +++ b/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h	Tue Mar 31 11:59:10 2009 +0100
    31.3 @@ -38,12 +38,6 @@
    31.4  #ifndef __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
    31.5  #define __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
    31.6  
    31.7 -#ifndef __linux__
    31.8 -# error Silly
    31.9 -#endif
   31.10 -#ifndef __KERNEL__
   31.11 -# error Silly
   31.12 -#endif
   31.13  
   31.14  #include <ci/efhw/efhw_types.h>
   31.15  #include <linux/interrupt.h>
    32.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/common.h	Tue Mar 31 11:49:12 2009 +0100
    32.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/common.h	Tue Mar 31 11:59:10 2009 +0100
    32.3 @@ -56,10 +56,6 @@ typedef union {
    32.4  		uint32_t a;
    32.5  		uint32_t b;
    32.6  	} opaque;
    32.7 -	struct {
    32.8 -		uint32_t code;
    32.9 -		uint32_t status;
   32.10 -	} ev1002;
   32.11  } efhw_event_t;
   32.12  
   32.13  /* Flags for TX/RX queues */
    33.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    33.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    33.3 @@ -52,7 +52,7 @@
    33.4  
    33.5  /* Linux kernel also does not provide PRIx32... Sigh. */
    33.6  #define PRIx32 "x"
    33.7 -
    33.8 + 
    33.9  #ifdef __ia64__
   33.10  # define PRIx64 "lx"
   33.11  #else
    34.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h	Tue Mar 31 11:49:12 2009 +0100
    34.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h	Tue Mar 31 11:59:10 2009 +0100
    34.3 @@ -45,14 +45,6 @@
    34.4  
    34.5  /*--------------------------------------------------------------------
    34.6   *
    34.7 - * hardware limits used in the types
    34.8 - *
    34.9 - *--------------------------------------------------------------------*/
   34.10 -
   34.11 -#define EFHW_KEVENTQ_MAX    8
   34.12 -
   34.13 -/*--------------------------------------------------------------------
   34.14 - *
   34.15   * forward type declarations
   34.16   *
   34.17   *--------------------------------------------------------------------*/
   34.18 @@ -72,7 +64,7 @@ struct efhw_buffer_table_allocation{
   34.19  
   34.20  struct eventq_resource_hardware {
   34.21  	/*!iobuffer allocated for eventq - can be larger than eventq */
   34.22 -	efhw_iopages_t iobuff;
   34.23 +	struct efhw_iopages iobuff;
   34.24  	unsigned iobuff_off;
   34.25  	struct efhw_buffer_table_allocation buf_tbl_alloc;
   34.26  	int capacity;		/*!< capacity of event queue */
   34.27 @@ -85,7 +77,7 @@ struct eventq_resource_hardware {
   34.28   *--------------------------------------------------------------------*/
   34.29  
   34.30  struct efhw_keventq {
   34.31 -	volatile int lock;
   34.32 +	int lock;
   34.33  	caddr_t evq_base;
   34.34  	int32_t evq_ptr;
   34.35  	uint32_t evq_mask;
   34.36 @@ -115,7 +107,7 @@ struct efhw_func_ops {
   34.37  	/*! initialise all hardware functional units */
   34.38  	int (*init_hardware) (struct efhw_nic *nic,
   34.39  			      struct efhw_ev_handler *,
   34.40 -			      const uint8_t *mac_addr);
   34.41 +			      const uint8_t *mac_addr, int non_irq_evq);
   34.42  
   34.43    /*-------------- Interrupt support  ------------ */
   34.44  
   34.45 @@ -130,17 +122,17 @@ struct efhw_func_ops {
   34.46  	 */
   34.47  	int (*interrupt) (struct efhw_nic *nic);
   34.48  
   34.49 -	/*! Enable given interrupt mask for the given IRQ unit */
   34.50 -	void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
   34.51 +	/*! Enable the interrupt */
   34.52 +	void (*interrupt_enable) (struct efhw_nic *nic);
   34.53  
   34.54 -	/*! Disable given interrupt mask for the given IRQ unit */
   34.55 -	void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
   34.56 +	/*! Disable the interrupt */
   34.57 +	void (*interrupt_disable) (struct efhw_nic *nic);
   34.58  
   34.59  	/*! Set interrupt moderation strategy for the given IRQ unit
   34.60  	 ** val is in usec
   34.61  	 */
   34.62  	void (*set_interrupt_moderation)(struct efhw_nic *nic,
   34.63 -					 uint idx, uint val);
   34.64 +					 uint val);
   34.65  
   34.66    /*-------------- Event support  ------------ */
   34.67  
   34.68 @@ -255,8 +247,8 @@ struct efhw_device_type {
   34.69  
   34.70  /*! */
   34.71  struct efhw_nic {
   34.72 -	/*! zero base index in efrm_nic_table.nic array */
   34.73 -	volatile int index;
   34.74 +	/*! zero base index in efrm_nic_tablep->nic array */
   34.75 +	int index;
   34.76  	int ifindex;		/*!< OS level nic index */
   34.77  #ifdef HAS_NET_NAMESPACE
   34.78  	struct net *nd_net;
   34.79 @@ -283,7 +275,7 @@ struct efhw_nic {
   34.80  	/* hardware resources */
   34.81  
   34.82  	/*! I/O address of the start of the bar */
   34.83 -	efhw_ioaddr_t bar_ioaddr;
   34.84 +	volatile char __iomem *bar_ioaddr;
   34.85  
   34.86  	/*! Bar number of control aperture. */
   34.87  	unsigned ctr_ap_bar;
   34.88 @@ -312,14 +304,17 @@ struct efhw_nic {
   34.89  	void (*irq_handler) (struct efhw_nic *, int unit);
   34.90  
   34.91  	/*! event queues per driver */
   34.92 -	struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
   34.93 +	struct efhw_keventq interrupting_evq;
   34.94  
   34.95  /* for marking when we are not using an IRQ unit
   34.96        - 0 is a valid offset to an IRQ unit on EF1! */
   34.97  #define EFHW_IRQ_UNIT_UNUSED  0xffff
   34.98 -	/*! interrupt unit in use  */
   34.99 -	unsigned int irq_unit[EFHW_KEVENTQ_MAX];
  34.100 -	efhw_iopage_t irq_iobuff;	/*!<  Falcon SYSERR interrupt */
  34.101 +	/*! interrupt unit in use for the interrupting event queue  */
  34.102 +	unsigned int irq_unit;
  34.103 +
  34.104 +	struct efhw_keventq non_interrupting_evq;
  34.105 +
  34.106 +	struct efhw_iopage irq_iobuff;	/*!<  Falcon SYSERR interrupt */
  34.107  
  34.108  	/* The new driverlink infrastructure. */
  34.109  	struct efx_dl_device *net_driver_dev;
    35.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h	Tue Mar 31 11:49:12 2009 +0100
    35.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h	Tue Mar 31 11:59:10 2009 +0100
    35.3 @@ -47,10 +47,9 @@ extern int efhw_keventq_poll(struct efhw
    35.4  
    35.5  /*! Callbacks for handling events. */
    35.6  struct efhw_ev_handler {
    35.7 -	void (*wakeup_fn)(struct efhw_nic *nic, efhw_event_t *ev);
    35.8 -	void (*timeout_fn)(struct efhw_nic *nic, efhw_event_t *ev);
    35.9 -	void (*sw_fn)(struct efhw_nic *nic, efhw_event_t *ev);
   35.10 -	void (*dmaq_flushed_fn) (struct efhw_nic *, int, int);
   35.11 +	void (*wakeup_fn)(struct efhw_nic *nic, unsigned);
   35.12 +	void (*timeout_fn)(struct efhw_nic *nic, unsigned);
   35.13 +	void (*dmaq_flushed_fn) (struct efhw_nic *, unsigned, int);
   35.14  };
   35.15  
   35.16  extern int efhw_keventq_ctor(struct efhw_nic *, int instance,
    36.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h	Tue Mar 31 11:49:12 2009 +0100
    36.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h	Tue Mar 31 11:59:10 2009 +0100
    36.3 @@ -71,11 +71,6 @@ extern int
    36.4  falcon_handle_char_event(struct efhw_nic *nic,
    36.5  			 struct efhw_ev_handler *h, efhw_event_t *evp);
    36.6  
    36.7 -/*! map event queue instance space (0,1,2,..) onto event queue
    36.8 -  number. This function takes into account the allocation rules for
    36.9 -  the underlying driver model */
   36.10 -extern int falcon_idx_to_evq(struct efhw_nic *nic, uint idx);
   36.11 -
   36.12  /*! Acknowledge to HW that processing is complete on a given event queue */
   36.13  extern void falcon_nic_evq_ack(struct efhw_nic *nic, uint evq,	/* evq id */
   36.14  			       uint rptr,	/* new read pointer update */
    37.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    37.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    37.3 @@ -50,6 +50,10 @@
    37.4  #error Unknown endianness
    37.5  #endif
    37.6  
    37.7 +#ifndef __iomem
    37.8 +#define __iomem
    37.9 +#endif
   37.10 +
   37.11  #ifndef mmiowb
   37.12  	#if defined(__i386__) || defined(__x86_64__)
   37.13  		#define mmiowb()
   37.14 @@ -63,10 +67,8 @@
   37.15  	#endif
   37.16  #endif
   37.17  
   37.18 -typedef char *efhw_ioaddr_t;
   37.19 -
   37.20  #ifndef readq
   37.21 -static inline uint64_t __readq(void __iomem *addr)
   37.22 +static inline uint64_t __readq(volatile void __iomem *addr)
   37.23  {
   37.24  	return *(volatile uint64_t *)addr;
   37.25  }
   37.26 @@ -74,7 +76,7 @@ static inline uint64_t __readq(void __io
   37.27  #endif
   37.28  
   37.29  #ifndef writeq
   37.30 -static inline void __writeq(uint64_t v, void __iomem *addr)
   37.31 +static inline void __writeq(uint64_t v, volatile void __iomem *addr)
   37.32  {
   37.33  	*(volatile uint64_t *)addr = v;
   37.34  }
    38.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h	Tue Mar 31 11:49:12 2009 +0100
    38.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h	Tue Mar 31 11:59:10 2009 +0100
    38.3 @@ -48,11 +48,11 @@
    38.4   *
    38.5   *--------------------------------------------------------------------*/
    38.6  
    38.7 -extern int efhw_iopage_alloc(struct efhw_nic *, efhw_iopage_t *p);
    38.8 -extern void efhw_iopage_free(struct efhw_nic *, efhw_iopage_t *p);
    38.9 +extern int efhw_iopage_alloc(struct efhw_nic *, struct efhw_iopage *p);
   38.10 +extern void efhw_iopage_free(struct efhw_nic *, struct efhw_iopage *p);
   38.11  
   38.12 -extern int efhw_iopages_alloc(struct efhw_nic *, efhw_iopages_t *p,
   38.13 +extern int efhw_iopages_alloc(struct efhw_nic *, struct efhw_iopages *p,
   38.14  			      unsigned order);
   38.15 -extern void efhw_iopages_free(struct efhw_nic *, efhw_iopages_t *p);
   38.16 +extern void efhw_iopages_free(struct efhw_nic *, struct efhw_iopages *p);
   38.17  
   38.18  #endif /* __CI_DRIVER_RESOURCE_IOPAGE_H__ */
    39.1 --- a/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h	Tue Mar 31 11:49:12 2009 +0100
    39.2 +++ b/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h	Tue Mar 31 11:59:10 2009 +0100
    39.3 @@ -3,7 +3,8 @@
    39.4   *          resource management for Xen backend, OpenOnload, etc
    39.5   *           (including support for SFE4001 10GBT NIC)
    39.6   *
    39.7 - * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
    39.8 + * This file provides struct efhw_page and struct efhw_iopage for Linux
    39.9 + * kernel.
   39.10   *
   39.11   * Copyright 2005-2007: Solarflare Communications Inc,
   39.12   *                      9501 Jeronimo Road, Suite 250,
   39.13 @@ -38,77 +39,83 @@
   39.14  #ifndef __CI_EFHW_IOPAGE_LINUX_H__
   39.15  #define __CI_EFHW_IOPAGE_LINUX_H__
   39.16  
   39.17 +#include <linux/version.h>
   39.18  #include <linux/gfp.h>
   39.19 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
   39.20  #include <linux/hardirq.h>
   39.21 +#else
   39.22 +#include <asm/hardirq.h>
   39.23 +#endif
   39.24 +#include <linux/errno.h>
   39.25  #include <ci/efhw/debug.h>
   39.26  
   39.27  /*--------------------------------------------------------------------
   39.28   *
   39.29 - * efhw_page_t: A single page of memory.  Directly mapped in the driver,
   39.30 - * and can be mapped to userlevel.
   39.31 + * struct efhw_page: A single page of memory.  Directly mapped in the
   39.32 + * driver, and can be mapped to userlevel.
   39.33   *
   39.34   *--------------------------------------------------------------------*/
   39.35  
   39.36 -typedef struct {
   39.37 +struct efhw_page {
   39.38  	unsigned long kva;
   39.39 -} efhw_page_t;
   39.40 +};
   39.41  
   39.42 -static inline int efhw_page_alloc(efhw_page_t *p)
   39.43 +static inline int efhw_page_alloc(struct efhw_page *p)
   39.44  {
   39.45  	p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
   39.46  	return p->kva ? 0 : -ENOMEM;
   39.47  }
   39.48  
   39.49 -static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
   39.50 +static inline int efhw_page_alloc_zeroed(struct efhw_page *p)
   39.51  {
   39.52  	p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
   39.53  	return p->kva ? 0 : -ENOMEM;
   39.54  }
   39.55  
   39.56 -static inline void efhw_page_free(efhw_page_t *p)
   39.57 +static inline void efhw_page_free(struct efhw_page *p)
   39.58  {
   39.59  	free_page(p->kva);
   39.60  	EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
   39.61  }
   39.62  
   39.63 -static inline char *efhw_page_ptr(efhw_page_t *p)
   39.64 +static inline char *efhw_page_ptr(struct efhw_page *p)
   39.65  {
   39.66  	return (char *)p->kva;
   39.67  }
   39.68  
   39.69 -static inline unsigned efhw_page_pfn(efhw_page_t *p)
   39.70 +static inline unsigned efhw_page_pfn(struct efhw_page *p)
   39.71  {
   39.72  	return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
   39.73  }
   39.74  
   39.75 -static inline void efhw_page_mark_invalid(efhw_page_t *p)
   39.76 +static inline void efhw_page_mark_invalid(struct efhw_page *p)
   39.77  {
   39.78  	p->kva = 0;
   39.79  }
   39.80  
   39.81 -static inline int efhw_page_is_valid(efhw_page_t *p)
   39.82 +static inline int efhw_page_is_valid(struct efhw_page *p)
   39.83  {
   39.84  	return p->kva != 0;
   39.85  }
   39.86  
   39.87 -static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
   39.88 +static inline void efhw_page_init_from_va(struct efhw_page *p, void *va)
   39.89  {
   39.90  	p->kva = (unsigned long)va;
   39.91  }
   39.92  
   39.93  /*--------------------------------------------------------------------
   39.94   *
   39.95 - * efhw_iopage_t: A single page of memory.  Directly mapped in the driver,
   39.96 + * struct efhw_iopage: A single page of memory.  Directly mapped in the driver,
   39.97   * and can be mapped to userlevel.  Can also be accessed by the NIC.
   39.98   *
   39.99   *--------------------------------------------------------------------*/
  39.100  
  39.101 -typedef struct {
  39.102 -	efhw_page_t p;
  39.103 +struct efhw_iopage {
  39.104 +	struct efhw_page p;
  39.105  	dma_addr_t dma_addr;
  39.106 -} efhw_iopage_t;
  39.107 +};
  39.108  
  39.109 -static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
  39.110 +static inline dma_addr_t efhw_iopage_dma_addr(struct efhw_iopage *p)
  39.111  {
  39.112  	return p->dma_addr;
  39.113  }
  39.114 @@ -120,9 +127,9 @@ static inline dma_addr_t efhw_iopage_dma
  39.115  
  39.116  /*--------------------------------------------------------------------
  39.117   *
  39.118 - * efhw_iopages_t: A set of pages that are contiguous in physical memory.
  39.119 - * Directly mapped in the driver, and can be mapped to userlevel.  Can also
  39.120 - * be accessed by the NIC.
  39.121 + * struct efhw_iopages: A set of pages that are contiguous in physical
  39.122 + * memory.  Directly mapped in the driver, and can be mapped to userlevel.
  39.123 + * Can also be accessed by the NIC.
  39.124   *
  39.125   * NB. The O/S may be unwilling to allocate many, or even any of these.  So
  39.126   * only use this type where the NIC really needs a physically contiguous
  39.127 @@ -130,44 +137,44 @@ static inline dma_addr_t efhw_iopage_dma
  39.128   *
  39.129   *--------------------------------------------------------------------*/
  39.130  
  39.131 -typedef struct {
  39.132 +struct efhw_iopages {
  39.133  	caddr_t kva;
  39.134  	unsigned order;
  39.135  	dma_addr_t dma_addr;
  39.136 -} efhw_iopages_t;
  39.137 +};
  39.138  
  39.139 -static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
  39.140 +static inline caddr_t efhw_iopages_ptr(struct efhw_iopages *p)
  39.141  {
  39.142  	return p->kva;
  39.143  }
  39.144  
  39.145 -static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
  39.146 +static inline unsigned efhw_iopages_pfn(struct efhw_iopages *p)
  39.147  {
  39.148  	return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
  39.149  }
  39.150  
  39.151 -static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
  39.152 +static inline dma_addr_t efhw_iopages_dma_addr(struct efhw_iopages *p)
  39.153  {
  39.154  	return p->dma_addr;
  39.155  }
  39.156  
  39.157 -static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
  39.158 +static inline unsigned efhw_iopages_size(struct efhw_iopages *p)
  39.159  {
  39.160  	return 1u << (p->order + PAGE_SHIFT);
  39.161  }
  39.162  
  39.163 -/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
  39.164 - * contiguous allocations in iobufsets for iSCSI.  This allows the
  39.165 - * essential information about contiguous allocations from
  39.166 - * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
  39.167 - * iobufset.  (Changing the iobufset resource to use a union type would
  39.168 +/* struct efhw_iopage <-> struct efhw_iopages conversions for handling
  39.169 + * physically contiguous allocations in iobufsets for iSCSI.  This allows
  39.170 + * the essential information about contiguous allocations from
  39.171 + * efhw_iopages_alloc() to be saved away in the struct efhw_iopage array in
  39.172 + * an iobufset.  (Changing the iobufset resource to use a union type would
  39.173   * involve a lot of code changes, and make the iobufset's metadata larger
  39.174   * which could be bad as it's supposed to fit into a single page on some
  39.175   * platforms.)
  39.176   */
  39.177  static inline void
  39.178 -efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
  39.179 -			    efhw_iopages_t *iopages, unsigned pageno)
  39.180 +efhw_iopage_init_from_iopages(struct efhw_iopage *iopage,
  39.181 +			      struct efhw_iopages *iopages, unsigned pageno)
  39.182  {
  39.183  	iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
  39.184  	    + (pageno * PAGE_SIZE);
  39.185 @@ -176,8 +183,8 @@ efhw_iopage_init_from_iopages(efhw_iopag
  39.186  }
  39.187  
  39.188  static inline void
  39.189 -efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
  39.190 -			    efhw_iopage_t *iopage, unsigned order)
  39.191 +efhw_iopages_init_from_iopage(struct efhw_iopages *iopages,
  39.192 +			      struct efhw_iopage *iopage, unsigned order)
  39.193  {
  39.194  	iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
  39.195  	EFHW_ASSERT(iopages->kva);
    40.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h	Tue Mar 31 11:49:12 2009 +0100
    40.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h	Tue Mar 31 11:59:10 2009 +0100
    40.3 @@ -69,16 +69,16 @@ extern int efrm_driver_unregister_nic(st
    40.4   *--------------------------------------------------------------------*/
    40.5  
    40.6  struct vi_resource_dimensions {
    40.7 -	unsigned evq_int_min, evq_int_max;
    40.8 -	unsigned evq_timer_min, evq_timer_max;
    40.9 -	unsigned rxq_min, rxq_max;
   40.10 -	unsigned txq_min, txq_max;
   40.11 +	unsigned evq_int_min, evq_int_lim;
   40.12 +	unsigned evq_timer_min, evq_timer_lim;
   40.13 +	unsigned rxq_min, rxq_lim;
   40.14 +	unsigned txq_min, txq_lim;
   40.15  };
   40.16  
   40.17  /*! Initialise resources */
   40.18  extern int
   40.19  efrm_resources_init(const struct vi_resource_dimensions *,
   40.20 -		    int buffer_table_min, int buffer_table_max);
   40.21 +		    int buffer_table_min, int buffer_table_lim);
   40.22  
   40.23  /*! Tear down resources */
   40.24  extern void efrm_resources_fini(void);
    41.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h	Tue Mar 31 11:49:12 2009 +0100
    41.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h	Tue Mar 31 11:59:10 2009 +0100
    41.3 @@ -55,7 +55,7 @@ struct iobufset_resource {
    41.4  	unsigned int n_bufs;
    41.5  	unsigned int pages_per_contiguous_chunk;
    41.6  	unsigned order;
    41.7 -	efhw_iopage_t bufs[1];
    41.8 +	struct efhw_iopage bufs[1];
    41.9  	/*!< up to n_bufs can follow this, so this must be the last member */
   41.10  };
   41.11  
    42.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h	Tue Mar 31 11:49:12 2009 +0100
    42.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h	Tue Mar 31 11:59:10 2009 +0100
    42.3 @@ -62,21 +62,21 @@ struct efrm_nic_table {
    42.4  };
    42.5  
    42.6  /* Resource driver structures used by other drivers as well */
    42.7 -extern struct efrm_nic_table efrm_nic_table;
    42.8 +extern struct efrm_nic_table *efrm_nic_tablep;
    42.9  
   42.10  static inline void efrm_nic_table_hold(void)
   42.11  {
   42.12 -	atomic_inc(&efrm_nic_table.ref_count);
   42.13 +	atomic_inc(&efrm_nic_tablep->ref_count);
   42.14  }
   42.15  
   42.16  static inline void efrm_nic_table_rele(void)
   42.17  {
   42.18 -	atomic_dec(&efrm_nic_table.ref_count);
   42.19 +	atomic_dec(&efrm_nic_tablep->ref_count);
   42.20  }
   42.21  
   42.22  static inline int efrm_nic_table_held(void)
   42.23  {
   42.24 -	return (atomic_read(&efrm_nic_table.ref_count) != 0);
   42.25 +	return (atomic_read(&efrm_nic_tablep->ref_count) != 0);
   42.26  }
   42.27  
   42.28  /* Run code block _x multiple times with variable nic set to each
   42.29 @@ -86,13 +86,13 @@ static inline int efrm_nic_table_held(vo
   42.30  	for ((_nic_i) = (efrm_nic_table_hold(), 0);			\
   42.31  	     (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0);	\
   42.32  	     (_nic_i)++)						\
   42.33 -		if (((_nic) = efrm_nic_table.nic[_nic_i]))
   42.34 +		if (((_nic) = efrm_nic_tablep->nic[_nic_i]))
   42.35  
   42.36  #define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic)			\
   42.37  	for ((_i) = (efrm_nic_table_hold(), 0);				\
   42.38  	     (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0);	\
   42.39  	     ++(_i))							\
   42.40 -		if (((_nic) = efrm_nic_table.nic[_i]) &&		\
   42.41 +		if (((_nic) = efrm_nic_tablep->nic[_i]) &&		\
   42.42  		    efrm_nic_set_read((_set), (_i)))
   42.43  
   42.44  #endif /* __CI_EFRM_NIC_TABLE_H__ */
    43.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/private.h	Tue Mar 31 11:49:12 2009 +0100
    43.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/private.h	Tue Mar 31 11:59:10 2009 +0100
    43.3 @@ -93,7 +93,15 @@ efrm_kfifo_id_ctor(struct kfifo **ids_ou
    43.4  	unsigned int i;
    43.5  	struct kfifo *ids;
    43.6  	unsigned char *buffer;
    43.7 +#ifndef TCP_CHIMNEY_SUPPORT
    43.8  	unsigned int size = roundup_pow_of_two((limit - base) * sizeof(int));
    43.9 +#else
   43.10 +        /* ### TODO - Linux kfifos really are a power of two, sysdep_ci2linux
   43.11 +                      does ci_fifo2's, which only actually hold 2^n - 1.
   43.12 +                      We need to double buffer size, not add one, because
   43.13 +                      ci_fifo2 can only be a power of two. */
   43.14 +	unsigned int size = roundup_pow_of_two((limit - base) * 2 * sizeof(int));
   43.15 +#endif
   43.16  
   43.17  	EFRM_ASSERT(base <= limit);
   43.18  	buffer = vmalloc(size);
    44.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h	Tue Mar 31 11:49:12 2009 +0100
    44.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h	Tue Mar 31 11:59:10 2009 +0100
    44.3 @@ -48,7 +48,7 @@
    44.4   * level.
    44.5   ***********************************************************************/
    44.6  
    44.7 -typedef struct efrm_resource_handle_s {
    44.8 +typedef struct {
    44.9  	uint32_t handle;
   44.10  } efrm_resource_handle_t;
   44.11  
    45.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    45.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    45.3 @@ -41,14 +41,8 @@
    45.4  /* Spinlocks are defined in efhw/sysdep.h */
    45.5  #include <ci/efhw/sysdep.h>
    45.6  
    45.7 -#if defined(__linux__) && defined(__KERNEL__)
    45.8  
    45.9  # include <ci/efrm/sysdep_linux.h>
   45.10  
   45.11 -#else
   45.12 -
   45.13 -# include <ci/efrm/sysdep_ci2linux.h>
   45.14 -
   45.15 -#endif
   45.16  
   45.17  #endif /* __CI_EFRM_SYSDEP_H__ */
    46.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h	Tue Mar 31 11:49:12 2009 +0100
    46.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h	Tue Mar 31 11:59:10 2009 +0100
    46.3 @@ -50,7 +50,11 @@
    46.4  #include <linux/workqueue.h>
    46.5  #include <linux/gfp.h>
    46.6  #include <linux/slab.h>
    46.7 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
    46.8  #include <linux/hardirq.h>
    46.9 +#else
   46.10 +#include <asm/hardirq.h>
   46.11 +#endif
   46.12  #include <linux/kernel.h>
   46.13  #include <linux/if_ether.h>
   46.14  #include <linux/completion.h>
   46.15 @@ -61,6 +65,21 @@
   46.16  #include <linux/log2.h>
   46.17  #endif
   46.18  
   46.19 +
   46.20 +/********************************************************************
   46.21 + *
   46.22 + * Utility functions
   46.23 + *
   46.24 + ********************************************************************/
   46.25 +
   46.26 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
   46.27 +static inline unsigned long __attribute_const__ roundup_pow_of_two(unsigned long x)
   46.28 +{
   46.29 +        return (1UL << fls(x - 1));
   46.30 +}
   46.31 +#endif
   46.32 +
   46.33 +
   46.34  /********************************************************************
   46.35   *
   46.36   * List API
    47.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h	Tue Mar 31 11:49:12 2009 +0100
    47.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h	Tue Mar 31 11:59:10 2009 +0100
    47.3 @@ -168,4 +168,12 @@ uint32_t efrm_vi_rm_rxq_bytes(struct vi_
    47.4  uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
    47.5  			      /*,struct efhw_nic *nic */ );
    47.6  
    47.7 +
    47.8 +/* Fill [out_vi_data] with information required to allow a VI to be init'd.
    47.9 + * [out_vi_data] must ref at least VI_MAPPINGS_SIZE bytes.
   47.10 + */
   47.11 +extern void efrm_vi_resource_mappings(struct vi_resource*, int nic_i,
   47.12 +                                      void* out_vi_data);
   47.13 +
   47.14 +
   47.15  #endif /* __CI_EFRM_VI_RESOURCE_H__ */
    48.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h	Tue Mar 31 11:49:12 2009 +0100
    48.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h	Tue Mar 31 11:59:10 2009 +0100
    48.3 @@ -67,11 +67,7 @@ struct vi_resource_evq_info {
    48.4  	struct vi_resource *evq_virs;
    48.5  };
    48.6  
    48.7 -#ifdef __ci_ul_driver__
    48.8 -#define EFRM_VI_USE_WORKQUEUE 0
    48.9 -#else
   48.10  #define EFRM_VI_USE_WORKQUEUE 1
   48.11 -#endif
   48.12  
   48.13  /*! Global information for the VI resource manager. */
   48.14  struct vi_resource_manager {
   48.15 @@ -115,7 +111,7 @@ struct vi_resource_manager {
   48.16  struct vi_resource_nic_info {
   48.17  	struct eventq_resource_hardware evq_pages;
   48.18  #if defined(__CI_HARDWARE_CONFIG_FALCON__)
   48.19 -	efhw_iopages_t dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT];
   48.20 +	struct efhw_iopages dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT];
   48.21  #endif
   48.22  };
   48.23  
    49.1 --- a/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h	Tue Mar 31 11:49:12 2009 +0100
    49.2 +++ b/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h	Tue Mar 31 11:59:10 2009 +0100
    49.3 @@ -70,14 +70,17 @@ efrm_eventq_base(struct vi_resource *vir
    49.4  				 hw->iobuff_off);
    49.5  }
    49.6  
    49.7 -/*! Wakeup handler, see efhw_ev_handler_t for prototype */
    49.8 -extern void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev);
    49.9 +/*! Wakeup handler */
   49.10 +extern void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned id);
   49.11  
   49.12 -/*! Timeout handler, see efhw_ev_handler_t for prototype */
   49.13 -extern void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev);
   49.14 +/*! Timeout handler */
   49.15 +extern void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned id);
   49.16  
   49.17 -/*! DMA flush handler, see efhw_ev_handler_t for prototype */
   49.18 -extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, int instance,
   49.19 +/*! DMA flush handler */
   49.20 +extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, unsigned id,
   49.21  				   int rx_flush);
   49.22  
   49.23 +/*! SRAM update handler */
   49.24 +extern void efrm_handle_sram_event(struct efhw_nic *nic);
   49.25 +
   49.26  #endif /* __CI_EFRM_VI_RESOURCE_PRIVATE_H__ */
    50.1 --- a/drivers/net/sfc/sfc_resource/driver_object.c	Tue Mar 31 11:49:12 2009 +0100
    50.2 +++ b/drivers/net/sfc/sfc_resource/driver_object.c	Tue Mar 31 11:59:10 2009 +0100
    50.3 @@ -48,10 +48,10 @@
    50.4  */
    50.5  
    50.6  #define efrm_driver_lock(irqlock_state) \
    50.7 -	spin_lock_irqsave(&efrm_nic_table.lock, irqlock_state)
    50.8 +	spin_lock_irqsave(&efrm_nic_tablep->lock, irqlock_state)
    50.9  
   50.10  #define efrm_driver_unlock(irqlock_state)		\
   50.11 -	spin_unlock_irqrestore(&efrm_nic_table.lock,	\
   50.12 +	spin_unlock_irqrestore(&efrm_nic_tablep->lock,	\
   50.13  			       irqlock_state);
   50.14  
   50.15  /* These routines are all methods on the architecturally singleton
   50.16 @@ -63,8 +63,9 @@
   50.17  */
   50.18  
   50.19  /*! Exported driver state */
   50.20 -struct efrm_nic_table efrm_nic_table;
   50.21 -EXPORT_SYMBOL(efrm_nic_table);
   50.22 +static struct efrm_nic_table efrm_nic_table;
   50.23 +struct efrm_nic_table *efrm_nic_tablep;
   50.24 +EXPORT_SYMBOL(efrm_nic_tablep);
   50.25  
   50.26  /* Internal table with resource managers.
   50.27   * We'd like to not export it, but we are still using efrm_rm_table
   50.28 @@ -75,10 +76,8 @@ EXPORT_SYMBOL(efrm_rm_table);
   50.29  
   50.30  int efrm_driver_ctor(void)
   50.31  {
   50.32 -	memset(&efrm_nic_table, 0, sizeof(efrm_nic_table));
   50.33 -	memset(&efrm_rm_table, 0, sizeof(efrm_rm_table));
   50.34 -
   50.35 -	spin_lock_init(&efrm_nic_table.lock);
   50.36 +        efrm_nic_tablep = &efrm_nic_table;
   50.37 +	spin_lock_init(&efrm_nic_tablep->lock);
   50.38  
   50.39  	EFRM_TRACE("%s: driver created", __FUNCTION__);
   50.40  	return 0;
   50.41 @@ -88,7 +87,10 @@ int efrm_driver_dtor(void)
   50.42  {
   50.43  	EFRM_ASSERT(!efrm_nic_table_held());
   50.44  
   50.45 -	spin_lock_destroy(&efrm_nic_table.lock);
   50.46 +	spin_lock_destroy(&efrm_nic_tablep->lock);
   50.47 +	memset(&efrm_nic_table, 0, sizeof(efrm_nic_table));
   50.48 +	memset(&efrm_rm_table, 0, sizeof(efrm_rm_table));
   50.49 +
   50.50  	EFRM_TRACE("%s: driver deleted", __FUNCTION__);
   50.51  	return 0;
   50.52  }
   50.53 @@ -108,21 +110,21 @@ int efrm_driver_register_nic(struct efhw
   50.54  		goto done;
   50.55  	}
   50.56  
   50.57 -	if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
   50.58 +	if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) {
   50.59  		EFRM_WARN("%s: filled up NIC table size %d", __FUNCTION__,
   50.60  			  EFHW_MAX_NR_DEVS);
   50.61  		rc = -E2BIG;
   50.62  		goto done;
   50.63  	}
   50.64  
   50.65 -	EFRM_ASSERT(efrm_nic_table.nic[nic_index] == NULL);
   50.66 -	efrm_nic_table.nic[nic_index] = nic;
   50.67 +	EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == NULL);
   50.68 +	efrm_nic_tablep->nic[nic_index] = nic;
   50.69  	nic->index = nic_index;
   50.70  
   50.71 -	if (efrm_nic_table.a_nic == NULL)
   50.72 -		efrm_nic_table.a_nic = nic;
   50.73 +	if (efrm_nic_tablep->a_nic == NULL)
   50.74 +		efrm_nic_tablep->a_nic = nic;
   50.75  
   50.76 -	efrm_nic_table.nic_count++;
   50.77 +	efrm_nic_tablep->nic_count++;
   50.78  	efrm_driver_unlock(lock_flags);
   50.79  	return rc;
   50.80  
   50.81 @@ -147,24 +149,24 @@ int efrm_driver_unregister_nic(struct ef
   50.82  		goto done;
   50.83  	}
   50.84  
   50.85 -	EFRM_ASSERT(efrm_nic_table.nic[nic_index] == nic);
   50.86 +	EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == nic);
   50.87  
   50.88  	nic->index = -1;
   50.89 -	efrm_nic_table.nic[nic_index] = NULL;
   50.90 +	efrm_nic_tablep->nic[nic_index] = NULL;
   50.91  
   50.92 -	--efrm_nic_table.nic_count;
   50.93 +	--efrm_nic_tablep->nic_count;
   50.94  
   50.95 -	if (efrm_nic_table.a_nic == nic) {
   50.96 -		if (efrm_nic_table.nic_count == 0) {
   50.97 -			efrm_nic_table.a_nic = NULL;
   50.98 +	if (efrm_nic_tablep->a_nic == nic) {
   50.99 +		if (efrm_nic_tablep->nic_count == 0) {
  50.100 +			efrm_nic_tablep->a_nic = NULL;
  50.101  		} else {
  50.102  			for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS;
  50.103  			     nic_index++) {
  50.104 -				if (efrm_nic_table.nic[nic_index] != NULL)
  50.105 -					efrm_nic_table.a_nic =
  50.106 -					    efrm_nic_table.nic[nic_index];
  50.107 +				if (efrm_nic_tablep->nic[nic_index] != NULL)
  50.108 +					efrm_nic_tablep->a_nic =
  50.109 +					    efrm_nic_tablep->nic[nic_index];
  50.110  			}
  50.111 -			EFRM_ASSERT(efrm_nic_table.a_nic);
  50.112 +			EFRM_ASSERT(efrm_nic_tablep->a_nic);
  50.113  		}
  50.114  	}
  50.115  
    51.1 --- a/drivers/net/sfc/sfc_resource/driverlink_new.c	Tue Mar 31 11:49:12 2009 +0100
    51.2 +++ b/drivers/net/sfc/sfc_resource/driverlink_new.c	Tue Mar 31 11:59:10 2009 +0100
    51.3 @@ -66,21 +66,20 @@ init_vi_resource_dimensions(struct vi_re
    51.4  			    const struct efx_dl_falcon_resources *res)
    51.5  {
    51.6  	rd->evq_timer_min = res->evq_timer_min;
    51.7 -	rd->evq_timer_max = res->evq_timer_max;
    51.8 +	rd->evq_timer_lim = res->evq_timer_lim;
    51.9  	rd->evq_int_min = res->evq_int_min;
   51.10 -	rd->evq_int_max = res->evq_int_max;
   51.11 +	rd->evq_int_lim = res->evq_int_lim;
   51.12  	rd->rxq_min = res->rxq_min;
   51.13 -	rd->rxq_max = res->rxq_max;
   51.14 +	rd->rxq_lim = res->rxq_lim;
   51.15  	rd->txq_min = res->txq_min;
   51.16 -	rd->txq_max = res->txq_max;
   51.17 +	rd->txq_lim = res->txq_lim;
   51.18  	EFRM_TRACE
   51.19  	    ("Using evq_int(%d-%d) evq_timer(%d-%d) RXQ(%d-%d) TXQ(%d-%d)",
   51.20 -	     res->evq_int_min, res->evq_int_max, res->evq_timer_min,
   51.21 -	     res->evq_timer_max, res->rxq_min, res->rxq_max, res->txq_min,
   51.22 -	     res->txq_max);
   51.23 +	     res->evq_int_min, res->evq_int_lim, res->evq_timer_min,
   51.24 +	     res->evq_timer_lim, res->rxq_min, res->rxq_lim, res->txq_min,
   51.25 +	     res->txq_lim);
   51.26  }
   51.27  
   51.28 -#if defined(EFX_NOT_UPSTREAM)
   51.29  /* We have a module parameter that can tell us to only load the char driver
   51.30   * for 1 NIC (if there are multiple NICs in the system), and if so which one.
   51.31   * This tells us the PCI bus and slot of the NIC to load for, or -1 to just
   51.32 @@ -98,7 +97,6 @@ module_param(only_NIC, uint, 0444);
   51.33  MODULE_PARM_DESC(only_NIC,
   51.34  		 "Initialise sfc_resource driver for one NIC only, "
   51.35  		 "with specified PCI bus and slot");
   51.36 -#endif
   51.37  
   51.38  static int
   51.39  efrm_dl_probe(struct efx_dl_device *efrm_dev,
   51.40 @@ -112,16 +110,14 @@ efrm_dl_probe(struct efx_dl_device *efrm
   51.41  	struct pci_dev *dev;
   51.42  	struct efhw_nic *nic;
   51.43  	unsigned probe_flags = 0;
   51.44 +	int non_irq_evq;
   51.45  	int rc;
   51.46  
   51.47  	efrm_dev->priv = NULL;
   51.48  
   51.49 -	efx_dl_for_each_device_info_matching(dev_info, EFX_DL_FALCON_RESOURCES,
   51.50 -					     struct efx_dl_falcon_resources,
   51.51 -					     hdr, res) {
   51.52 -		/* break out, leaving res pointing at the falcon resources */
   51.53 -		break;
   51.54 -	}
   51.55 +	efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES,
   51.56 +				  struct efx_dl_falcon_resources,
   51.57 +				  hdr, res);
   51.58  
   51.59  	if (res == NULL) {
   51.60  		EFRM_ERR("%s: Unable to find falcon driverlink resources",
   51.61 @@ -132,25 +128,25 @@ efrm_dl_probe(struct efx_dl_device *efrm
   51.62  	if (res->flags & EFX_DL_FALCON_USE_MSI)
   51.63  		probe_flags |= NIC_FLAG_TRY_MSI;
   51.64  
   51.65 +#if defined(EFX_NOT_UPSTREAM)
   51.66 +	if (only_NIC != -1 &&
   51.67 +	    (efrm_dev->pci_dev->bus->number !=
   51.68 +	     ((only_NIC >> 8) & 0xFFFF)
   51.69 +	     || PCI_SLOT(efrm_dev->pci_dev->devfn) !=
   51.70 +	     (only_NIC & 0xFF))) {
   51.71 +	  EFRM_NOTICE("Hiding char device %x:%x",
   51.72 +		      efrm_dev->pci_dev->bus->number,
   51.73 +		      PCI_SLOT(efrm_dev->pci_dev->devfn));
   51.74 +	  return -ENODEV;
   51.75 +	}
   51.76 +#endif
   51.77 +	
   51.78  	dev = efrm_dev->pci_dev;
   51.79  	if (res->flags & EFX_DL_FALCON_DUAL_FUNC) {
   51.80  		unsigned vendor = dev->vendor;
   51.81  		EFRM_ASSERT(dev->bus != NULL);
   51.82  		dev = NULL;
   51.83  
   51.84 -#if defined(EFX_NOT_UPSTREAM)
   51.85 -		if (only_NIC != -1 &&
   51.86 -		    (efrm_dev->pci_dev->bus->number !=
   51.87 -		     ((only_NIC >> 8) & 0xFFFF)
   51.88 -		     || PCI_SLOT(efrm_dev->pci_dev->devfn) !=
   51.89 -		     (only_NIC & 0xFF))) {
   51.90 -			EFRM_NOTICE("Hiding char device %x:%x",
   51.91 -				    efrm_dev->pci_dev->bus->number,
   51.92 -				    PCI_SLOT(efrm_dev->pci_dev->devfn));
   51.93 -			return -ENODEV;
   51.94 -		}
   51.95 -#endif
   51.96 -
   51.97  		while ((dev = pci_get_device(vendor, FALCON_S_DEVID, dev))
   51.98  		       != NULL) {
   51.99  			EFRM_ASSERT(dev->bus != NULL);
  51.100 @@ -174,10 +170,14 @@ efrm_dl_probe(struct efx_dl_device *efrm
  51.101  
  51.102  	init_vi_resource_dimensions(&res_dim, res);
  51.103  
  51.104 +	EFRM_ASSERT(res_dim.evq_timer_lim > res_dim.evq_timer_min);
  51.105 +	res_dim.evq_timer_lim--;
  51.106 +	non_irq_evq = res_dim.evq_timer_lim;
  51.107 +
  51.108  	rc = efrm_nic_add(dev, probe_flags, net_dev->dev_addr, &lnic,
  51.109  			  res->biu_lock,
  51.110 -			  res->buffer_table_min, res->buffer_table_max,
  51.111 -			  &res_dim);
  51.112 +			  res->buffer_table_min, res->buffer_table_lim,
  51.113 +			  non_irq_evq, &res_dim);
  51.114  	if (rc != 0)
  51.115  		return rc;
  51.116  
    52.1 --- a/drivers/net/sfc/sfc_resource/efx_vi_shm.c	Tue Mar 31 11:49:12 2009 +0100
    52.2 +++ b/drivers/net/sfc/sfc_resource/efx_vi_shm.c	Tue Mar 31 11:59:10 2009 +0100
    52.3 @@ -298,7 +298,7 @@ efx_vi_dma_map_pages(struct efx_vi_state
    52.4  		/* TODO do we need to get_page() here ? */
    52.5  
    52.6  		dma_addr = pci_map_page
    52.7 -		    (linux_efhw_nic(efrm_nic_table.nic[efx_state->nic_index])->
    52.8 +		    (linux_efhw_nic(efrm_nic_tablep->nic[efx_state->nic_index])->
    52.9  		     pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE);
   52.10  
   52.11  		efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
   52.12 @@ -399,7 +399,7 @@ efx_vi_dma_unmap_pages(struct efx_vi_sta
   52.13  
   52.14  	for (i = 0; i < dm_state->n_pages; ++i)
   52.15  		pci_unmap_page(linux_efhw_nic
   52.16 -			(efrm_nic_table.nic[efx_state->nic_index])->pci_dev,
   52.17 +			(efrm_nic_tablep->nic[efx_state->nic_index])->pci_dev,
   52.18  			dm_state->dma_addrs[i], PAGE_SIZE, PCI_DMA_TODEVICE);
   52.19  
   52.20  	kfree(dm_state->dma_addrs);
   52.21 @@ -547,7 +547,7 @@ efx_vi_hw_resource_get_phys(struct efx_v
   52.22  {
   52.23  	struct efx_vi_state *efx_state = vih;
   52.24  	int i, ni = efx_state->nic_index;
   52.25 -	struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_table.nic[ni]);
   52.26 +	struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_tablep->nic[ni]);
   52.27  	unsigned long phys = lnic->ctr_ap_pci_addr;
   52.28  	struct efrm_resource *ep_res = &efx_state->vi_res->rs;
   52.29  	unsigned ep_mmap_bytes;
   52.30 @@ -555,11 +555,9 @@ efx_vi_hw_resource_get_phys(struct efx_v
   52.31  	if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
   52.32  		return -EINVAL;
   52.33  
   52.34 -	mdata->version = 0;
   52.35 -
   52.36 -	mdata->nic_arch = efrm_nic_table.nic[ni]->devtype.arch;
   52.37 -	mdata->nic_variant = efrm_nic_table.nic[ni]->devtype.variant;
   52.38 -	mdata->nic_revision = efrm_nic_table.nic[ni]->devtype.revision;
   52.39 +	mdata->nic_arch = efrm_nic_tablep->nic[ni]->devtype.arch;
   52.40 +	mdata->nic_variant = efrm_nic_tablep->nic[ni]->devtype.variant;
   52.41 +	mdata->nic_revision = efrm_nic_tablep->nic[ni]->devtype.revision;
   52.42  
   52.43  	mdata->evq_order =
   52.44  	    efx_state->vi_res->nic_info[ni].evq_pages.iobuff.order;
   52.45 @@ -634,9 +632,6 @@ efx_vi_hw_resource_get_phys(struct efx_v
   52.46  		(unsigned long)efx_state->vi_res->nic_info[ni].
   52.47  			dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;
   52.48  
   52.49 -	/* NB EFX_VI_HW_RESOURCE_TXBELL not used on Falcon */
   52.50 -	/* NB EFX_VI_HW_RESOURCE_RXBELL not used on Falcon */
   52.51 -
   52.52  	i++;
   52.53  	hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
   52.54  	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
   52.55 @@ -648,7 +643,7 @@ efx_vi_hw_resource_get_phys(struct efx_v
   52.56  	/* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */
   52.57  
   52.58  	i++;
   52.59 -	switch (efrm_nic_table.nic[ni]->devtype.variant) {
   52.60 +	switch (efrm_nic_tablep->nic[ni]->devtype.variant) {
   52.61  	case 'A':
   52.62  		hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
   52.63  		hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
    53.1 --- a/drivers/net/sfc/sfc_resource/eventq.c	Tue Mar 31 11:49:12 2009 +0100
    53.2 +++ b/drivers/net/sfc/sfc_resource/eventq.c	Tue Mar 31 11:59:10 2009 +0100
    53.3 @@ -170,7 +170,7 @@ void
    53.4  efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
    53.5  			   efhw_event_t *evp)
    53.6  {
    53.7 -	int instance = (int)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
    53.8 +	unsigned instance = (unsigned)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
    53.9  	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);
   53.10  
   53.11  	if (!h->dmaq_flushed_fn) {
   53.12 @@ -185,24 +185,28 @@ void
   53.13  efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
   53.14  			 efhw_event_t *evp)
   53.15  {
   53.16 +	unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp);
   53.17 +
   53.18  	if (!h->wakeup_fn) {
   53.19  		EFHW_WARN("%s: no handler registered", __FUNCTION__);
   53.20  		return;
   53.21  	}
   53.22  
   53.23 -	h->wakeup_fn(nic, evp);
   53.24 +	h->wakeup_fn(nic, instance);
   53.25  }
   53.26  
   53.27  void
   53.28  efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
   53.29  			  efhw_event_t *evp)
   53.30  {
   53.31 +	unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp);
   53.32 +
   53.33  	if (!h->timeout_fn) {
   53.34  		EFHW_WARN("%s: no handler registered", __FUNCTION__);
   53.35  		return;
   53.36  	}
   53.37  
   53.38 -	h->timeout_fn(nic, evp);
   53.39 +	h->timeout_fn(nic, instance);
   53.40  }
   53.41  
   53.42  /**********************************************************************
    54.1 --- a/drivers/net/sfc/sfc_resource/falcon.c	Tue Mar 31 11:49:12 2009 +0100
    54.2 +++ b/drivers/net/sfc/sfc_resource/falcon.c	Tue Mar 31 11:59:10 2009 +0100
    54.3 @@ -45,14 +45,11 @@
    54.4   *
    54.5   *---------------------------------------------------------------------------*/
    54.6  
    54.7 -/* on for debug builds */
    54.8 -#ifndef NDEBUG
    54.9 -#  define FALCON_FULL_FILTER_CACHE 1	/* complete SW shadow of filter tbl */
   54.10 -#  define FALCON_VERIFY_FILTERS    0
   54.11 -#else /* Also adds duplicate filter check */
   54.12 -#  define FALCON_FULL_FILTER_CACHE 1	/* keep this on for some security */
   54.13 -#  define FALCON_VERIFY_FILTERS    0
   54.14 -#endif
   54.15 +/* Keep a software copy of the filter table and check for duplicates. */
   54.16 +#define FALCON_FULL_FILTER_CACHE 1
   54.17 +
   54.18 +/* Read filters back from the hardware to detect corruption. */
   54.19 +#define FALCON_VERIFY_FILTERS    0
   54.20  
   54.21  /* options */
   54.22  #define RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL 8	/* default search limit */
   54.23 @@ -73,11 +70,7 @@
   54.24   *
   54.25   *---------------------------------------------------------------------------*/
   54.26  
   54.27 -#ifndef __KERNEL__
   54.28 -#define _DEBUG_SYM_ extern
   54.29 -#else
   54.30  #define _DEBUG_SYM_ static inline
   54.31 -#endif
   54.32  
   54.33   /*----------------------------------------------------------------------------
   54.34    *
   54.35 @@ -208,6 +201,7 @@ static void
   54.36  		EFHW_ASSERT(!rss_b0);
   54.37  		break;
   54.38  	case 'B':
   54.39 +	case 'C':
   54.40  		v4 |= scat_b0 << __DW4(SCATTER_EN_1_B0_LBN);
   54.41  		v4 |= rss_b0 << __DW4(RSS_EN_1_B0_LBN);
   54.42  		break;
   54.43 @@ -407,7 +401,7 @@ falcon_dmaq_tx_q_init(struct efhw_nic *n
   54.44  	uint index, desc_type;
   54.45  	uint64_t val1, val2, val3;
   54.46  	ulong offset;
   54.47 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   54.48 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   54.49  
   54.50  	/* Q attributes */
   54.51  	int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_TX_HDIG_EN) != 0);
   54.52 @@ -471,6 +465,7 @@ falcon_dmaq_tx_q_init(struct efhw_nic *n
   54.53  
   54.54  	switch (nic->devtype.variant) {
   54.55  	case 'B':
   54.56 +	case 'C':
   54.57  		__DW3CHCK(TX_NON_IP_DROP_DIS_B0_LBN,
   54.58  			  TX_NON_IP_DROP_DIS_B0_WIDTH);
   54.59  		__DW3CHCK(TX_IP_CHKSM_DIS_B0_LBN, TX_IP_CHKSM_DIS_B0_WIDTH);
   54.60 @@ -523,7 +518,7 @@ falcon_dmaq_rx_q_init(struct efhw_nic *n
   54.61  	uint i, desc_type = 1;
   54.62  	uint64_t val1, val2, val3;
   54.63  	ulong offset;
   54.64 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   54.65 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   54.66  
   54.67  	/* Q attributes */
   54.68  #if BUG5762_WORKAROUND
   54.69 @@ -613,7 +608,7 @@ static void falcon_dmaq_tx_q_disable(str
   54.70  	FALCON_LOCK_DECL;
   54.71  	uint64_t val1, val2, val3;
   54.72  	ulong offset;
   54.73 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   54.74 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   54.75  
   54.76  	/* initialise the TX descriptor queue pointer table */
   54.77  
   54.78 @@ -646,7 +641,7 @@ static void falcon_dmaq_rx_q_disable(str
   54.79  	FALCON_LOCK_DECL;
   54.80  	uint64_t val1, val2, val3;
   54.81  	ulong offset;
   54.82 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   54.83 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   54.84  
   54.85  	/* initialise the TX descriptor queue pointer table */
   54.86  	offset = falcon_dma_rx_q_offset(nic, dmaq);
   54.87 @@ -749,8 +744,8 @@ static inline void
   54.88  {
   54.89  	/* programming the half table needs to be done in pairs. */
   54.90  	uint64_t entry, val, shift;
   54.91 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   54.92 -	efhw_ioaddr_t offset;
   54.93 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   54.94 +	volatile char __iomem *offset;
   54.95  
   54.96  	EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32);
   54.97  	EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN ==
   54.98 @@ -774,9 +769,9 @@ static inline void
   54.99  	val &= ~(((uint64_t) 0xffffffff) << shift);
  54.100  	val |= (entry << shift);
  54.101  
  54.102 -	EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt ":%x:%" PRIx64 "->%x = %"
  54.103 -		   PRIx64, __FUNCTION__, buffer_id, dma_addr, own_id, entry,
  54.104 -		   (unsigned)(offset - efhw_kva), val);
  54.105 +	EFHW_TRACE("%s[%x]: %lx:%x:%" PRIx64 "->%x = %"
  54.106 +		   PRIx64, __FUNCTION__, buffer_id, (unsigned long) dma_addr,
  54.107 +		   own_id, entry, (unsigned)(offset - efhw_kva), val);
  54.108  
  54.109  	/* Falcon requires that access to this register is serialised */
  54.110  	falcon_write_q(offset, val);
  54.111 @@ -811,9 +806,9 @@ static inline void
  54.112  			       dma_addr_t dma_addr, uint bufsz,
  54.113  			       uint region, int own_id, int buffer_id)
  54.114  {
  54.115 -	efhw_ioaddr_t offset;
  54.116 +	volatile char __iomem *offset;
  54.117  	uint64_t entry;
  54.118 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.119 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.120  
  54.121  	EFHW_ASSERT(region < FALCON_REGION_NUM);
  54.122  
  54.123 @@ -826,9 +821,9 @@ static inline void
  54.124  	entry = falcon_nic_buffer_table_entry64_mk(dma_addr, bufsz, region,
  54.125  						   own_id);
  54.126  
  54.127 -	EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt
  54.128 -		   ":bufsz=%x:region=%x:ownid=%x",
  54.129 -		   __FUNCTION__, buffer_id, dma_addr, bufsz, region, own_id);
  54.130 +	EFHW_TRACE("%s[%x]: %lx:bufsz=%x:region=%x:ownid=%x",
  54.131 +		   __FUNCTION__, buffer_id, (unsigned long) dma_addr, bufsz,
  54.132 +		   region, own_id);
  54.133  
  54.134  	EFHW_TRACE("%s: BUF[%x]:NIC[%x]->%" PRIx64,
  54.135  		   __FUNCTION__, buffer_id,
  54.136 @@ -870,7 +865,7 @@ static inline void
  54.137  static inline void _falcon_nic_buffer_table_commit(struct efhw_nic *nic)
  54.138  {
  54.139  	/* MUST be called holding the FALCON_LOCK */
  54.140 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.141 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.142  	uint64_t cmd;
  54.143  
  54.144  	EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
  54.145 @@ -900,9 +895,9 @@ static inline void
  54.146  	uint64_t cmd;
  54.147  	uint64_t start_id = buffer_id;
  54.148  	uint64_t end_id = buffer_id + num - 1;
  54.149 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.150 -
  54.151 -	efhw_ioaddr_t offset = (efhw_kva + BUF_TBL_UPD_REG_OFST);
  54.152 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.153 +
  54.154 +	volatile char __iomem *offset = (efhw_kva + BUF_TBL_UPD_REG_OFST);
  54.155  
  54.156  	EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
  54.157  
  54.158 @@ -962,13 +957,10 @@ static inline void falcon_nic_srm_upd_ev
  54.159  	 * updates */
  54.160  
  54.161  	FALCON_LOCK_DECL;
  54.162 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.163 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.164  
  54.165  	EFHW_BUILD_ASSERT(SRM_UPD_EVQ_REG_OFST == SRM_UPD_EVQ_REG_KER_OFST);
  54.166  
  54.167 -	EFHW_ASSERT((evq == FALCON_EVQ_KERNEL0) || (evq == FALCON_EVQ_CHAR) ||
  54.168 -		    (evq == FALCON_EVQ_NONIRQ));
  54.169 -
  54.170  	__DWCHCK(SRM_UPD_EVQ_ID_LBN, SRM_UPD_EVQ_ID_WIDTH);
  54.171  	__RANGECHCK(evq, SRM_UPD_EVQ_ID_WIDTH);
  54.172  
  54.173 @@ -991,7 +983,7 @@ falcon_nic_evq_ptr_tbl(struct efhw_nic *
  54.174  	FALCON_LOCK_DECL;
  54.175  	uint i, val;
  54.176  	ulong offset;
  54.177 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.178 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.179  
  54.180  	/* size must be one of the various options, otherwise we assert */
  54.181  	for (i = 0; i < N_EVENTQ_SIZES; i++) {
  54.182 @@ -1045,7 +1037,7 @@ falcon_nic_evq_ack(struct efhw_nic *nic,
  54.183  {
  54.184  	uint val;
  54.185  	ulong offset;
  54.186 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.187 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.188  
  54.189  	EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
  54.190  
  54.191 @@ -1078,38 +1070,13 @@ falcon_nic_evq_ack(struct efhw_nic *nic,
  54.192  	mmiowb();
  54.193  }
  54.194  
  54.195 -/*----------------------------------------------------------------------------
  54.196 - *
  54.197 - * Helper for evq mapping
  54.198 - *
  54.199 - * idx = 0 && char   => hw eventq[4]
  54.200 - * idx = 0 && net    => hw eventq[0]
  54.201 - *   0 < idx < 5     => hw eventq[idx]  (5 is non-interrupting)
  54.202 - *
  54.203 - *
  54.204 - *---------------------------------------------------------------------------*/
  54.205 -
  54.206 -int falcon_idx_to_evq(struct efhw_nic *nic, uint idx)
  54.207 -{
  54.208 -	EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
  54.209 -	EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
  54.210 -	return (idx > 0) ? idx : FALCON_EVQ_CHAR;
  54.211 -}
  54.212 -
  54.213 -static inline int falcon_evq_is_interrupting(struct efhw_nic *nic, uint idx)
  54.214 -{
  54.215 -	EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
  54.216 -	EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
  54.217 -
  54.218 -	/* only the first CHAR driver event queue is interrupting */
  54.219 -	return (idx == FALCON_EVQ_CHAR);
  54.220 -}
  54.221 +/*---------------------------------------------------------------------------*/
  54.222  
  54.223  static inline void
  54.224  falcon_drv_ev(struct efhw_nic *nic, uint64_t data, uint qid)
  54.225  {
  54.226  	FALCON_LOCK_DECL;
  54.227 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.228 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.229  
  54.230  	/* send an event from one driver to the other */
  54.231  	EFHW_BUILD_ASSERT(DRV_EV_REG_KER_OFST == DRV_EV_REG_OFST);
  54.232 @@ -1133,7 +1100,7 @@ falcon_timer_cmd(struct efhw_nic *nic,
  54.233  	FALCON_LOCK_DECL;
  54.234  	uint val;
  54.235  	ulong offset;
  54.236 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.237 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.238  
  54.239  	EFHW_BUILD_ASSERT(TIMER_VAL_LBN == 0);
  54.240  
  54.241 @@ -1184,7 +1151,7 @@ void falcon_nic_pace(struct efhw_nic *ni
  54.242  	   Pacing only available on the virtual interfaces
  54.243  	 */
  54.244  	FALCON_LOCK_DECL;
  54.245 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.246 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.247  	ulong offset;
  54.248  
  54.249  	if (pace > 20)
  54.250 @@ -1200,6 +1167,7 @@ void falcon_nic_pace(struct efhw_nic *ni
  54.251  		offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_A1) * 16;
  54.252  		break;
  54.253  	case 'B':
  54.254 +	case 'C':
  54.255  		/* Would be nice to assert this, but as dmaq is unsigned and
  54.256  		 * TX_PACE_TBL_FIRST_QUEUE_B0 is 0, it makes no sense
  54.257  		 * EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_B0);
  54.258 @@ -1232,8 +1200,8 @@ void falcon_nic_pace(struct efhw_nic *ni
  54.259  static void falcon_nic_handle_fatal_int(struct efhw_nic *nic)
  54.260  {
  54.261  	FALCON_LOCK_DECL;
  54.262 -	efhw_ioaddr_t offset;
  54.263 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.264 +	volatile char __iomem *offset;
  54.265 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.266  	uint64_t val;
  54.267  
  54.268  	offset = (efhw_kva + FATAL_INTR_REG_OFST);
  54.269 @@ -1264,8 +1232,8 @@ static void falcon_nic_interrupt_hw_enab
  54.270  {
  54.271  	FALCON_LOCK_DECL;
  54.272  	uint val;
  54.273 -	efhw_ioaddr_t offset;
  54.274 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.275 +	volatile char __iomem *offset;
  54.276 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.277  
  54.278  	EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_WIDTH == 1);
  54.279  
  54.280 @@ -1288,8 +1256,8 @@ static void falcon_nic_interrupt_hw_enab
  54.281  static void falcon_nic_interrupt_hw_disable(struct efhw_nic *nic)
  54.282  {
  54.283  	FALCON_LOCK_DECL;
  54.284 -	efhw_ioaddr_t offset;
  54.285 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.286 +	volatile char __iomem *offset;
  54.287 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.288  
  54.289  	EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_WIDTH == 1);
  54.290  	EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == 0);
  54.291 @@ -1312,13 +1280,12 @@ static void falcon_nic_interrupt_hw_disa
  54.292  	FALCON_LOCK_UNLOCK(nic);
  54.293  }
  54.294  
  54.295 -#ifndef __ci_ul_driver__
  54.296  
  54.297  static void falcon_nic_irq_addr_set(struct efhw_nic *nic, dma_addr_t dma_addr)
  54.298  {
  54.299  	FALCON_LOCK_DECL;
  54.300 -	efhw_ioaddr_t offset;
  54.301 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.302 +	volatile char __iomem *offset;
  54.303 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.304  
  54.305  	offset = (efhw_kva + INT_ADR_REG_CHAR_OFST);
  54.306  
  54.307 @@ -1332,7 +1299,6 @@ static void falcon_nic_irq_addr_set(stru
  54.308  	FALCON_LOCK_UNLOCK(nic);
  54.309  }
  54.310  
  54.311 -#endif
  54.312  
  54.313  
  54.314  /*--------------------------------------------------------------------
  54.315 @@ -1345,7 +1311,7 @@ void
  54.316  falcon_nic_set_rx_usr_buf_size(struct efhw_nic *nic, int usr_buf_bytes)
  54.317  {
  54.318  	FALCON_LOCK_DECL;
  54.319 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.320 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.321  	uint64_t val, val2, usr_buf_size = usr_buf_bytes / 32;
  54.322  	int rubs_lbn, rubs_width, roec_lbn;
  54.323  
  54.324 @@ -1361,6 +1327,7 @@ falcon_nic_set_rx_usr_buf_size(struct ef
  54.325  		roec_lbn = RX_OWNERR_CTL_A1_LBN;
  54.326  		break;
  54.327  	case 'B':
  54.328 +	case 'C':
  54.329  		rubs_lbn = RX_USR_BUF_SIZE_B0_LBN;
  54.330  		rubs_width = RX_USR_BUF_SIZE_B0_WIDTH;
  54.331  		roec_lbn = RX_OWNERR_CTL_B0_LBN;
  54.332 @@ -1392,7 +1359,7 @@ falcon_nic_rx_filter_ctl_get(struct efhw
  54.333  			     uint32_t *tcp_wild,
  54.334  			     uint32_t *udp_full, uint32_t *udp_wild)
  54.335  {
  54.336 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.337 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.338  	FALCON_LOCK_DECL;
  54.339  	uint64_t val;
  54.340  
  54.341 @@ -1420,7 +1387,7 @@ falcon_nic_rx_filter_ctl_set(struct efhw
  54.342  			     uint32_t udp_full, uint32_t udp_wild)
  54.343  {
  54.344  	uint64_t val, val2;
  54.345 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.346 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.347  	FALCON_LOCK_DECL;
  54.348  
  54.349  	EFHW_ASSERT(tcp_full < nic->filter_tbl_size);
  54.350 @@ -1480,7 +1447,7 @@ EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_s
  54.351  _DEBUG_SYM_ void falcon_nic_tx_cfg(struct efhw_nic *nic, int unlocked)
  54.352  {
  54.353  	FALCON_LOCK_DECL;
  54.354 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.355 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.356  	uint64_t val1, val2;
  54.357  
  54.358  	EFHW_BUILD_ASSERT(TX_CFG_REG_OFST == TX_CFG_REG_KER_OFST);
  54.359 @@ -1516,7 +1483,7 @@ EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_s
  54.360  static void falcon_nic_pace_cfg(struct efhw_nic *nic)
  54.361  {
  54.362  	FALCON_LOCK_DECL;
  54.363 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.364 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.365  	unsigned offset = 0;
  54.366  	uint64_t val;
  54.367  
  54.368 @@ -1527,6 +1494,7 @@ static void falcon_nic_pace_cfg(struct e
  54.369  	switch (nic->devtype.variant) {
  54.370  	case 'A':  offset = TX_PACE_REG_A1_OFST;  break;
  54.371  	case 'B':  offset = TX_PACE_REG_B0_OFST;  break;
  54.372 +	case 'C':  offset = TX_PACE_REG_B0_OFST;  break;
  54.373  	default:   EFHW_ASSERT(0);                break;
  54.374  	}
  54.375  	falcon_write_qq(efhw_kva + offset, val, 0);
  54.376 @@ -1697,15 +1665,6 @@ host_ipfilter_cache_check_not(uint nic, 
  54.377   *
  54.378   *---------------------------------------------------------------------------*/
  54.379  
  54.380 -#ifdef __ci_ul_driver__
  54.381 -
  54.382 -static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
  54.383 -{
  54.384 -	EFHW_ERR("%s: not implemented for ul driver", __FUNCTION__);
  54.385 -	return -EOPNOTSUPP;
  54.386 -}
  54.387 -
  54.388 -#else
  54.389  
  54.390  static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
  54.391  {
  54.392 @@ -1727,14 +1686,13 @@ static int falcon_nic_init_irq_channel(s
  54.393  		falcon_nic_irq_addr_set(nic, 0);
  54.394  	}
  54.395  
  54.396 -	EFHW_TRACE("%s: " ci_dma_addr_fmt " %sable", __FUNCTION__,
  54.397 -		   efhw_iopage_dma_addr(&nic->irq_iobuff), enable ?
  54.398 -			"en" : "dis");
  54.399 +	EFHW_TRACE("%s: %lx %sable", __FUNCTION__,
  54.400 +		   (unsigned long) efhw_iopage_dma_addr(&nic->irq_iobuff),
  54.401 +		   enable ? "en" : "dis");
  54.402  
  54.403  	return 0;
  54.404  }
  54.405  
  54.406 -#endif
  54.407  
  54.408  static void falcon_nic_close_hardware(struct efhw_nic *nic)
  54.409  {
  54.410 @@ -1747,14 +1705,10 @@ static void falcon_nic_close_hardware(st
  54.411  	EFHW_NOTICE("%s:", __FUNCTION__);
  54.412  }
  54.413  
  54.414 -#ifdef __ci_ul_driver__
  54.415 -extern
  54.416 -#else
  54.417  static
  54.418 -#endif
  54.419  int falcon_nic_get_mac_config(struct efhw_nic *nic)
  54.420  {
  54.421 -	efhw_ioaddr_t efhw_kva = nic->bar_ioaddr;
  54.422 +	volatile char __iomem *efhw_kva = nic->bar_ioaddr;
  54.423  	int is_mac_type_1g;
  54.424  	uint32_t strap, altera;
  54.425  	uint64_t rx_cfg, r;
  54.426 @@ -1811,6 +1765,23 @@ int falcon_nic_get_mac_config(struct efh
  54.427  		}
  54.428  #endif
  54.429  		break;
  54.430 +	case 'C':
  54.431 +		/* Treat like B0 for now, but without the RX FIFO size check
  54.432 +		 * (don't need it, and RX_CFG_REG will likely change soon
  54.433 +		 * anyway).
  54.434 +		 */
  54.435 +		is_mac_type_1g = (0 != (strap & 2));
  54.436 +#if FALCON_MAC_SET_TYPE_BY_SPEED
  54.437 +		/* Check the selected strap pins against the MAC speed -
  54.438 +		 * and adjust if necessary.
  54.439 +		 */
  54.440 +		{
  54.441 +			int speed;
  54.442 +			speed = readl(efhw_kva + MAC0_CTRL_REG_OFST) & 0x3;
  54.443 +			is_mac_type_1g = (speed <= 2);
  54.444 +		}
  54.445 +#endif
  54.446 +		break;
  54.447  	default:
  54.448  		EFHW_ASSERT(0);
  54.449  		is_mac_type_1g = 0;
  54.450 @@ -1834,7 +1805,7 @@ int falcon_nic_get_mac_config(struct efh
  54.451  static int
  54.452  falcon_nic_init_hardware(struct efhw_nic *nic,
  54.453  			 struct efhw_ev_handler *ev_handlers,
  54.454 -			 const uint8_t *mac_addr)
  54.455 +			 const uint8_t *mac_addr, int non_irq_evq)
  54.456  {
  54.457  	int rc;
  54.458  
  54.459 @@ -1868,7 +1839,7 @@ falcon_nic_init_hardware(struct efhw_nic
  54.460  	   IFDEF FALCON's can be removed from
  54.461  	   nic.c:efhw_nic_allocate_common_hardware_resources()
  54.462  	 */
  54.463 -	nic->irq_unit[0] = INT_EN_REG_CHAR_OFST;
  54.464 +	nic->irq_unit = INT_EN_REG_CHAR_OFST;
  54.465  
  54.466  	/*****************************************************************
  54.467  	 * The rest of this function deals with initialization of the NICs
  54.468 @@ -1877,7 +1848,7 @@ falcon_nic_init_hardware(struct efhw_nic
  54.469  
  54.470  	/* char driver grabs SRM events onto the non interrupting
  54.471  	 * event queue */
  54.472 -	falcon_nic_srm_upd_evq(nic, FALCON_EVQ_NONIRQ);
  54.473 +	falcon_nic_srm_upd_evq(nic, non_irq_evq);
  54.474  
  54.475  	/* RXDP tweaks */
  54.476  
  54.477 @@ -1906,19 +1877,19 @@ falcon_nic_init_hardware(struct efhw_nic
  54.478  				     RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD);
  54.479  
  54.480  	if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
  54.481 -		rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR, &nic->evq[0],
  54.482 -				       ev_handlers);
  54.483 +		rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR,
  54.484 +				       &nic->interrupting_evq, ev_handlers);
  54.485  		if (rc < 0) {
  54.486  			EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
  54.487  				 __FUNCTION__, rc, FALCON_EVQ_CHAR);
  54.488  			return rc;
  54.489  		}
  54.490  	}
  54.491 -	rc = efhw_keventq_ctor(nic, FALCON_EVQ_NONIRQ,
  54.492 -			       &nic->evq[FALCON_EVQ_NONIRQ], NULL);
  54.493 +	rc = efhw_keventq_ctor(nic, non_irq_evq,
  54.494 +			       &nic->non_interrupting_evq, NULL);
  54.495  	if (rc < 0) {
  54.496  		EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
  54.497 -			 __FUNCTION__, rc, FALCON_EVQ_NONIRQ);
  54.498 +			 __FUNCTION__, rc, non_irq_evq);
  54.499  		return rc;
  54.500  	}
  54.501  
  54.502 @@ -1938,11 +1909,12 @@ falcon_nic_init_hardware(struct efhw_nic
  54.503   *--------------------------------------------------------------------*/
  54.504  
  54.505  static void
  54.506 -falcon_nic_interrupt_enable(struct efhw_nic *nic, unsigned idx)
  54.507 +falcon_nic_interrupt_enable(struct efhw_nic *nic)
  54.508  {
  54.509 -	int evq;
  54.510 -
  54.511 -	if (idx || (nic->flags & NIC_FLAG_NO_INTERRUPT))
  54.512 +	struct efhw_keventq *q;
  54.513 +	unsigned rdptr;
  54.514 +
  54.515 +	if (nic->flags & NIC_FLAG_NO_INTERRUPT)
  54.516  		return;
  54.517  
  54.518  	/* Enable driver interrupts */
  54.519 @@ -1950,32 +1922,26 @@ falcon_nic_interrupt_enable(struct efhw_
  54.520  	falcon_nic_interrupt_hw_enable(nic);
  54.521  
  54.522  	/* An interrupting eventq must start of day ack its read pointer */
  54.523 -	evq = falcon_idx_to_evq(nic, idx);
  54.524 -
  54.525 -	if (falcon_evq_is_interrupting(nic, evq)) {
  54.526 -		struct efhw_keventq *q = &nic->evq[idx];
  54.527 -		unsigned rdptr =
  54.528 -		    EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t);
  54.529 -		falcon_nic_evq_ack(nic, evq, rdptr, false);
  54.530 -		EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__, evq, rdptr);
  54.531 -	}
  54.532 +	q = &nic->interrupting_evq;
  54.533 +	rdptr = EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t);
  54.534 +	falcon_nic_evq_ack(nic, FALCON_EVQ_CHAR, rdptr, false);
  54.535 +	EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__,
  54.536 +		    FALCON_EVQ_CHAR, rdptr);
  54.537  }
  54.538  
  54.539 -static void falcon_nic_interrupt_disable(struct efhw_nic *nic, uint idx)
  54.540 +static void falcon_nic_interrupt_disable(struct efhw_nic *nic)
  54.541  {
  54.542  	/* NB. No need to check for NIC_FLAG_NO_INTERRUPT, as
  54.543  	 ** falcon_nic_interrupt_hw_disable() will do it. */
  54.544 -	if (idx)
  54.545 -		return;
  54.546  	falcon_nic_interrupt_hw_disable(nic);
  54.547  }
  54.548  
  54.549  static void
  54.550 -falcon_nic_set_interrupt_moderation(struct efhw_nic *nic, uint idx,
  54.551 +falcon_nic_set_interrupt_moderation(struct efhw_nic *nic,
  54.552  				    uint32_t val)
  54.553  {
  54.554 -	falcon_timer_cmd(nic, falcon_idx_to_evq(nic, idx),
  54.555 -			 TIMER_MODE_INT_HLDOFF, val / 5);
  54.556 +	falcon_timer_cmd(nic, FALCON_EVQ_CHAR, TIMER_MODE_INT_HLDOFF,
  54.557 +			 val / 5);
  54.558  }
  54.559  
  54.560  static inline void legacy_irq_ack(struct efhw_nic *nic)
  54.561 @@ -1992,7 +1958,7 @@ static inline void legacy_irq_ack(struct
  54.562  
  54.563  static int falcon_nic_interrupt(struct efhw_nic *nic)
  54.564  {
  54.565 -	volatile uint32_t *syserr_ptr =
  54.566 +	uint32_t *syserr_ptr =
  54.567  	    (uint32_t *) efhw_iopage_ptr(&nic->irq_iobuff);
  54.568  	int handled = 0;
  54.569  	int done_ack = 0;
  54.570 @@ -2102,7 +2068,7 @@ static void falcon_nic_sw_event(struct e
  54.571  void
  54.572  falcon_nic_ipfilter_ctor(struct efhw_nic *nic)
  54.573  {
  54.574 -	if (nic->devtype.variant == 'B' && nic->fpga_version)
  54.575 +	if (nic->devtype.variant >= 'B' && nic->fpga_version)
  54.576  		nic->filter_tbl_size = 8 * 1024;
  54.577  	else
  54.578  		nic->filter_tbl_size = 16 * 1024;
  54.579 @@ -2276,10 +2242,6 @@ static void falcon_nic_ipfilter_clear(st
  54.580  
  54.581  static inline void falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic)
  54.582  {
  54.583 -#if defined(__ci_ul_driver__)
  54.584 -	if (!(nic->options & NIC_OPT_EFTEST))
  54.585 -		return;
  54.586 -#endif
  54.587  
  54.588  	/* Do nothing if operating in synchronous mode. */
  54.589  	if (!nic->irq_handler)
  54.590 @@ -2291,10 +2253,6 @@ static inline void falcon_nic_buffer_tab
  54.591  	FALCON_LOCK_DECL;
  54.592  	int count = 0, rc = 0;
  54.593  
  54.594 -#if defined(__ci_ul_driver__)
  54.595 -	if (!(nic->options & NIC_OPT_EFTEST))
  54.596 -		return;
  54.597 -#endif
  54.598  
  54.599  	/* We can be called here early days */
  54.600  	if (!nic->irq_handler)
  54.601 @@ -2316,9 +2274,9 @@ static inline void falcon_nic_buffer_tab
  54.602  		 * upcalls into the core driver */
  54.603  		struct efhw_ev_handler handler;
  54.604  		memset(&handler, 0, sizeof(handler));
  54.605 -		nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = &handler;
  54.606 -		rc = efhw_keventq_poll(nic, &nic->evq[FALCON_EVQ_NONIRQ]);
  54.607 -		nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = NULL;
  54.608 +		nic->non_interrupting_evq.ev_handlers = &handler;
  54.609 +		rc = efhw_keventq_poll(nic, &nic->non_interrupting_evq);
  54.610 +		nic->non_interrupting_evq.ev_handlers = NULL;
  54.611  
  54.612  		if (rc < 0) {
  54.613  			EFHW_ERR("%s: poll ERROR (%d:%d) ***** ",
  54.614 @@ -2353,10 +2311,6 @@ void falcon_nic_buffer_table_confirm(str
  54.615  	   an event or DMA queue */
  54.616  	FALCON_LOCK_DECL;
  54.617  
  54.618 -#if defined(__ci_ul_driver__)
  54.619 -	if (!(nic->options & NIC_OPT_EFTEST))
  54.620 -		return;
  54.621 -#endif
  54.622  
  54.623  	/* Do nothing if operating in synchronous mode. */
  54.624  	if (!nic->irq_handler)
  54.625 @@ -2463,7 +2417,7 @@ falcon_check_for_bogus_tx_dma_wptr(struc
  54.626  	FALCON_LOCK_DECL;
  54.627  	uint64_t val_low64, val_high64;
  54.628  	uint64_t size, hwptr, swptr, val;
  54.629 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.630 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.631  	ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
  54.632  
  54.633  	/* Falcon requires 128 bit atomic access for this register */
  54.634 @@ -2500,7 +2454,7 @@ void falcon_clobber_tx_dma_ptrs(struct e
  54.635  {
  54.636  	FALCON_LOCK_DECL;
  54.637  	uint64_t val_low64, val_high64;
  54.638 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.639 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.640  	ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
  54.641  
  54.642  	EFHW_WARN("Recovering stuck TXQ[%d]", dmaq);
  54.643 @@ -2523,7 +2477,7 @@ static inline int
  54.644  __falcon_really_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq)
  54.645  {
  54.646  	FALCON_LOCK_DECL;
  54.647 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.648 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.649  	uint val;
  54.650  
  54.651  	EFHW_BUILD_ASSERT(TX_FLUSH_DESCQ_REG_KER_OFST ==
  54.652 @@ -2557,7 +2511,7 @@ static inline int
  54.653  	FALCON_LOCK_DECL;
  54.654  	uint64_t val_low64, val_high64;
  54.655  	uint64_t enable, flush_pending;
  54.656 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.657 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.658  	ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
  54.659  
  54.660  	/* Falcon requires 128 bit atomic access for this register */
  54.661 @@ -2606,7 +2560,7 @@ static int
  54.662  __falcon_really_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq)
  54.663  {
  54.664  	FALCON_LOCK_DECL;
  54.665 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.666 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.667  	uint val;
  54.668  
  54.669  	EFHW_BUILD_ASSERT(RX_FLUSH_DESCQ_REG_KER_OFST ==
  54.670 @@ -2634,7 +2588,7 @@ static inline int
  54.671  {
  54.672  	FALCON_LOCK_DECL;
  54.673  	uint64_t val;
  54.674 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
  54.675 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
  54.676  	ulong offset = falcon_dma_rx_q_offset(nic, dmaq);
  54.677  
  54.678  	/* Falcon requires 128 bit atomic access for this register */
  54.679 @@ -2678,17 +2632,13 @@ falcon_handle_char_event(struct efhw_nic
  54.680  	case TX_DESCQ_FLS_DONE_EV_DECODE:
  54.681  		EFHW_TRACE("TX[%d] flushed",
  54.682  			   (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
  54.683 -#if !defined(__ci_ul_driver__)
  54.684  		efhw_handle_txdmaq_flushed(nic, h, ev);
  54.685 -#endif
  54.686  		break;
  54.687  
  54.688  	case RX_DESCQ_FLS_DONE_EV_DECODE:
  54.689  		EFHW_TRACE("RX[%d] flushed",
  54.690  			   (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
  54.691 -#if !defined(__ci_ul_driver__)
  54.692  		efhw_handle_rxdmaq_flushed(nic, h, ev);
  54.693 -#endif
  54.694  		break;
  54.695  
  54.696  	case SRM_UPD_DONE_EV_DECODE:
  54.697 @@ -2698,16 +2648,16 @@ falcon_handle_char_event(struct efhw_nic
  54.698  		break;
  54.699  
  54.700  	case EVQ_INIT_DONE_EV_DECODE:
  54.701 -		EFHW_TRACE("EVQ INIT");
  54.702 +		EFHW_TRACE("%sEVQ INIT", "");
  54.703  		break;
  54.704  
  54.705  	case WAKE_UP_EV_DECODE:
  54.706 -		EFHW_TRACE("WAKE UP");
  54.707 +		EFHW_TRACE("%sWAKE UP", "");
  54.708  		efhw_handle_wakeup_event(nic, h, ev);
  54.709  		break;
  54.710  
  54.711  	case TIMER_EV_DECODE:
  54.712 -		EFHW_TRACE("TIMER");
  54.713 +		EFHW_TRACE("%sTIMER", "");
  54.714  		efhw_handle_timeout_event(nic, h, ev);
  54.715  		break;
  54.716  
    55.1 --- a/drivers/net/sfc/sfc_resource/falcon_mac.c	Tue Mar 31 11:49:12 2009 +0100
    55.2 +++ b/drivers/net/sfc/sfc_resource/falcon_mac.c	Tue Mar 31 11:59:10 2009 +0100
    55.3 @@ -65,7 +65,7 @@
    55.4  /*! Get MAC current address - i.e not necessarily the one in the EEPROM */
    55.5  static inline void mentormac_get_mac_addr(struct efhw_nic *nic)
    55.6  {
    55.7 -	efhw_ioaddr_t mac_kva;
    55.8 +	volatile char __iomem *mac_kva;
    55.9  	uint val1, val2;
   55.10  
   55.11  	MENTOR_MAC_ASSERT_VALID();
   55.12 @@ -131,7 +131,7 @@ static inline void mentormac_get_mac_add
   55.13  static inline void GDACT10mac_get_mac_addr(struct efhw_nic *nic)
   55.14  {
   55.15  	uint val1, val2;
   55.16 -	efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
   55.17 +	volatile char __iomem *efhw_kva = EFHW_KVA(nic);
   55.18  	FALCON_LOCK_DECL;
   55.19  
   55.20  	GDACT10_MAC_ASSERT_VALID();
    56.1 --- a/drivers/net/sfc/sfc_resource/filter_resource.c	Tue Mar 31 11:49:12 2009 +0100
    56.2 +++ b/drivers/net/sfc/sfc_resource/filter_resource.c	Tue Mar 31 11:59:10 2009 +0100
    56.3 @@ -234,7 +234,7 @@ int
    56.4  	EFRM_ASSERT(frs);
    56.5  	EFRM_ASSERT(frs->pt);
    56.6  
    56.7 -	if (efrm_nic_table.a_nic->devtype.variant >= 'B') {
    56.8 +	if (efrm_nic_tablep->a_nic->devtype.variant >= 'B') {
    56.9  		/* Scatter setting must match the setting for
   56.10  		 * the corresponding RX queue */
   56.11  		if (!(frs->pt->flags & EFHW_VI_JUMBO_EN))
    57.1 --- a/drivers/net/sfc/sfc_resource/iobufset_resource.c	Tue Mar 31 11:49:12 2009 +0100
    57.2 +++ b/drivers/net/sfc/sfc_resource/iobufset_resource.c	Tue Mar 31 11:59:10 2009 +0100
    57.3 @@ -58,7 +58,7 @@ struct iobufset_resource_manager *efrm_i
    57.4  static inline size_t iobsrs_size(int no_pages)
    57.5  {
    57.6  	return offsetof(struct iobufset_resource, bufs) +
    57.7 -	    no_pages * sizeof(efhw_iopage_t);
    57.8 +	    no_pages * sizeof(struct efhw_iopage);
    57.9  }
   57.10  
   57.11  void efrm_iobufset_resource_free(struct iobufset_resource *rs)
   57.12 @@ -74,21 +74,21 @@ void efrm_iobufset_resource_free(struct 
   57.13  		efrm_buffer_table_free(&rs->buf_tbl_alloc);
   57.14  
   57.15  	/* see comment on call to efhw_iopage_alloc in the alloc routine above
   57.16 -	   for discussion on use of efrm_nic_table.a_nic here */
   57.17 -	EFRM_ASSERT(efrm_nic_table.a_nic);
   57.18 +	   for discussion on use of efrm_nic_tablep->a_nic here */
   57.19 +	EFRM_ASSERT(efrm_nic_tablep->a_nic);
   57.20  	if (rs->order == 0) {
   57.21  		for (i = 0; i < rs->n_bufs; ++i)
   57.22 -			efhw_iopage_free(efrm_nic_table.a_nic, &rs->bufs[i]);
   57.23 +			efhw_iopage_free(efrm_nic_tablep->a_nic, &rs->bufs[i]);
   57.24  	} else {
   57.25  		/* it is important that this is executed in increasing page
   57.26  		 * order because some implementations of
   57.27  		 * efhw_iopages_init_from_iopage() assume this */
   57.28  		for (i = 0; i < rs->n_bufs;
   57.29  		     i += rs->pages_per_contiguous_chunk) {
   57.30 -			efhw_iopages_t iopages;
   57.31 +			struct efhw_iopages iopages;
   57.32  			efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i],
   57.33  						    rs->order);
   57.34 -			efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
   57.35 +			efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
   57.36  		}
   57.37  	}
   57.38  
   57.39 @@ -126,7 +126,7 @@ efrm_iobufset_resource_alloc(int32_t n_p
   57.40  	EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0);
   57.41  	EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) ==
   57.42  		    EFRM_RESOURCE_VI);
   57.43 -	EFRM_ASSERT(efrm_nic_table.a_nic);
   57.44 +	EFRM_ASSERT(efrm_nic_tablep->a_nic);
   57.45  
   57.46  	/* allocate the resource data structure. */
   57.47  	object_size = iobsrs_size(n_pages);
   57.48 @@ -186,11 +186,11 @@ efrm_iobufset_resource_alloc(int32_t n_p
   57.49  			/* due to bug2426 we have to specifiy a NIC when
   57.50  			 * allocating a DMAable page, which is a bit messy.
   57.51  			 * For now we assume that if the page is suitable
   57.52 -			 * (e.g. DMAable) by one nic (efrm_nic_table.a_nic),
   57.53 +			 * (e.g. DMAable) by one nic (efrm_nic_tablep->a_nic),
   57.54  			 * it is suitable for all NICs.
   57.55  			 * XXX I bet that breaks in Solaris.
   57.56  			 */
   57.57 -			rc = efhw_iopage_alloc(efrm_nic_table.a_nic,
   57.58 +			rc = efhw_iopage_alloc(efrm_nic_tablep->a_nic,
   57.59  					     &iobrs->bufs[i]);
   57.60  			if (rc < 0) {
   57.61  				EFRM_ERR("%s: failed (rc %d) to allocate "
   57.62 @@ -199,7 +199,7 @@ efrm_iobufset_resource_alloc(int32_t n_p
   57.63  			}
   57.64  		}
   57.65  	} else {
   57.66 -		efhw_iopages_t iopages;
   57.67 +		struct efhw_iopages iopages;
   57.68  		unsigned j;
   57.69  
   57.70  		/* make sure iobufs are in a known state in case we don't
   57.71 @@ -209,7 +209,7 @@ efrm_iobufset_resource_alloc(int32_t n_p
   57.72  
   57.73  		for (i = 0; i < iobrs->n_bufs;
   57.74  		     i += iobrs->pages_per_contiguous_chunk) {
   57.75 -			rc = efhw_iopages_alloc(efrm_nic_table.a_nic,
   57.76 +			rc = efhw_iopages_alloc(efrm_nic_tablep->a_nic,
   57.77  						&iopages, iobrs->order);
   57.78  			if (rc < 0) {
   57.79  				EFRM_ERR("%s: failed (rc %d) to allocate "
   57.80 @@ -277,16 +277,16 @@ fail5:
   57.81  	i = iobrs->n_bufs;
   57.82  fail4:
   57.83  	/* see comment on call to efhw_iopage_alloc above for a discussion
   57.84 -	 * on use of efrm_nic_table.a_nic here */
   57.85 +	 * on use of efrm_nic_tablep->a_nic here */
   57.86  	if (iobrs->order == 0) {
   57.87  		while (i--) {
   57.88 -			efhw_iopage_t *page = &iobrs->bufs[i];
   57.89 -			efhw_iopage_free(efrm_nic_table.a_nic, page);
   57.90 +			struct efhw_iopage *page = &iobrs->bufs[i];
   57.91 +			efhw_iopage_free(efrm_nic_tablep->a_nic, page);
   57.92  		}
   57.93  	} else {
   57.94  		unsigned int j;
   57.95  		for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) {
   57.96 -			efhw_iopages_t iopages;
   57.97 +			struct efhw_iopages iopages;
   57.98  
   57.99  			EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk
  57.100  				    == 0);
  57.101 @@ -296,7 +296,7 @@ fail4:
  57.102  			efhw_iopages_init_from_iopage(&iopages,
  57.103  						      &iobrs->bufs[j],
  57.104  						      iobrs->order);
  57.105 -			efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
  57.106 +			efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
  57.107  		}
  57.108  	}
  57.109  	efrm_vi_resource_release(iobrs->evq);
    58.1 --- a/drivers/net/sfc/sfc_resource/iopage.c	Tue Mar 31 11:49:12 2009 +0100
    58.2 +++ b/drivers/net/sfc/sfc_resource/iopage.c	Tue Mar 31 11:59:10 2009 +0100
    58.3 @@ -34,7 +34,7 @@
    58.4  #include "kernel_compat.h"
    58.5  #include <ci/efhw/common_sysdep.h> /* for dma_addr_t */
    58.6  
    58.7 -int efhw_iopage_alloc(struct efhw_nic *nic, efhw_iopage_t *p)
    58.8 +int efhw_iopage_alloc(struct efhw_nic *nic, struct efhw_iopage *p)
    58.9  {
   58.10  	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
   58.11  	dma_addr_t handle;
   58.12 @@ -55,7 +55,7 @@ int efhw_iopage_alloc(struct efhw_nic *n
   58.13  	return 0;
   58.14  }
   58.15  
   58.16 -void efhw_iopage_free(struct efhw_nic *nic, efhw_iopage_t *p)
   58.17 +void efhw_iopage_free(struct efhw_nic *nic, struct efhw_iopage *p)
   58.18  {
   58.19  	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
   58.20  	EFHW_ASSERT(efhw_page_is_valid(&p->p));
   58.21 @@ -64,7 +64,9 @@ void efhw_iopage_free(struct efhw_nic *n
   58.22  				 efhw_iopage_ptr(p), p->dma_addr);
   58.23  }
   58.24  
   58.25 -int efhw_iopages_alloc(struct efhw_nic *nic, efhw_iopages_t *p, unsigned order)
   58.26 +int
   58.27 +efhw_iopages_alloc(struct efhw_nic *nic, struct efhw_iopages *p,
   58.28 +		   unsigned order)
   58.29  {
   58.30  	unsigned bytes = 1u << (order + PAGE_SHIFT);
   58.31  	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
   58.32 @@ -91,7 +93,7 @@ int efhw_iopages_alloc(struct efhw_nic *
   58.33  	return 0;
   58.34  }
   58.35  
   58.36 -void efhw_iopages_free(struct efhw_nic *nic, efhw_iopages_t *p)
   58.37 +void efhw_iopages_free(struct efhw_nic *nic, struct efhw_iopages *p)
   58.38  {
   58.39  	unsigned bytes = 1u << (p->order + PAGE_SHIFT);
   58.40  	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
    59.1 --- a/drivers/net/sfc/sfc_resource/kernel_compat.c	Tue Mar 31 11:49:12 2009 +0100
    59.2 +++ b/drivers/net/sfc/sfc_resource/kernel_compat.c	Tue Mar 31 11:59:10 2009 +0100
    59.3 @@ -49,22 +49,6 @@
    59.4  
    59.5  
    59.6  
    59.7 -/* I admit that it's a bit ugly going straight to the field, but it
    59.8 - * seems easiest given that get_page followed by put_page on a page
    59.9 - * with PG_reserved set will increment the ref count on 2.6.14 and
   59.10 - * below, but not 2.6.15.  Also, RedHat have hidden put_page_testzero
   59.11 - * in a header file which produces warnings when compiled.  This
   59.12 - * doesn't agree with our use of -Werror.
   59.13 - */
   59.14 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5)
   59.15 -# define page_count_field(pg)  ((pg)->count)
   59.16 -#else
   59.17 -# define page_count_field(pg)  ((pg)->_count)
   59.18 -#endif
   59.19 -
   59.20 -#define inc_page_count(page)   atomic_inc(&page_count_field(page))
   59.21 -#define dec_page_count(page)   atomic_dec(&page_count_field(page))
   59.22 -
   59.23  /* Bug 5531: set_page_count doesn't work if the new page count is an
   59.24   * expression. */
   59.25  #define ci_set_page_count(page, n) set_page_count(page, (n))
   59.26 @@ -170,15 +154,6 @@
   59.27     *    Hon    - The PG_compound bit is honoured by munmap.
   59.28     *
   59.29     *                 OS      A       B       C       D
   59.30 -   * 2.4.18                  NotDef  NU      resv    NotHon
   59.31 -   * 2.4.29                  NotDef  NU      resv    NotHon
   59.32 -   * 2.4.20-31.9     rhl9    NotDef  NU      resv    NotHon
   59.33 -   *
   59.34 -   * 2.4.21-4.EL     rhel3   Comp    NU      resv    Hon
   59.35 -   * 2.4.21-15.EL    rhel3   Comp    NU      resv    Hon
   59.36 -   * 2.4.21-32.EL    rhel3   Comp    NU      resv    Hon
   59.37 -   * 2.4.21-40.EL    rhel3   Comp    NU      resv    Hon
   59.38 -   *
   59.39     * 2.6.0                   Comp    NU      resv    NotHon
   59.40     *
   59.41     * 2.6.5-7.97      sles9   OptInv  NU      resv    NotHon
   59.42 @@ -209,73 +184,40 @@
   59.43     * to one on all the sub-pages.  The SLES 9 range are affected, as
   59.44     * are kernels built without CONFIG_MMU defined.
   59.45     *
   59.46 -   * Possible strategies for multi-page allocations:
   59.47 -   *
   59.48 -   * EFRM_MMAP_USE_COMPOUND
   59.49 -   * 1. Allocate a compound page.  Reference counting should then work
   59.50 -   *    on the whole allocation.  This is a good theory, but is broken
   59.51 -   *    by bug/feature D (above).
   59.52 +   * On all kernel versions, we just allocate a compound page.
   59.53 +   * Reference counting should then work on the whole allocation but
   59.54 +   * is broken by bug/feature D (above) on old kernels.
   59.55     *
   59.56 -   * EFRM_MMAP_USE_SPLIT
   59.57 -   * 2. Convert the multi-page allocation to many single page
   59.58 -   *    allocations.  This involves incrementing the reference counts
   59.59 -   *    and clearing PG_compound on all the pages (including the
   59.60 -   *    first).  The references should be released _after_ calling
   59.61 -   *    pci_free_consistent so that that call doesn't release the
   59.62 -   *    memory.
   59.63 -   *
   59.64 -   * EFRM_MMAP_USE_INCREMENT
   59.65 -   * 3. Increment the reference count on all the pages after
   59.66 -   *    allocating and decrement them again before freeing.  This gets
   59.67 -   *    round the zero reference count problem.  It doesn't handle the
   59.68 -   *    case where someone else is holding a reference to one of our
   59.69 -   *    pages when we free the pages, but we think VM_IO stops this
   59.70 -   *    from happening.
   59.71 +   * EFRM_MMAP_USE_SPLIT 
   59.72 +
   59.73 +   *    On old kernels, we convert the multi-page allocation to many
   59.74 +   *    single page allocations.  This involves incrementing the
   59.75 +   *    reference counts and clearing PG_compound on all the pages
   59.76 +   *    (including the first).  Given that the affected kernels are
   59.77 +   *    inconsistent about the initial reference counts on high order
   59.78 +   *    page allocations, we reinitialise the reference counts instead
   59.79 +   *    of incrementing them.  The references are released _after_
   59.80 +   *    calling pci_free_consistent so that that call doesn't release
   59.81 +   *    the memory.
   59.82     */
   59.83  
   59.84 -/* Should we use strategy 1?  This can be forced on us by the OS. */
   59.85 -#if defined(PG_compound)
   59.86 -#define EFRM_MMAP_USE_COMPOUND 1
   59.87 -#else
   59.88 -#define EFRM_MMAP_USE_COMPOUND 0
   59.89 -#endif
   59.90 -
   59.91 -/* Should we use strategy 2?  This can be used even if strategy 1 is
   59.92 - * used. */
   59.93 +/* Should we split each multi-page allocation into single page
   59.94 + * allocations? */
   59.95  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
   59.96  #define EFRM_MMAP_USE_SPLIT    1
   59.97  #else
   59.98  #define EFRM_MMAP_USE_SPLIT    0
   59.99  #endif
  59.100  
  59.101 -/* Should we use strategy 3?  There's no point doing this if either
  59.102 - * strategy 1 or strategy 2 is used. */
  59.103 -#if !EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
  59.104 -#error "We shouldn't have to use this strategy."
  59.105 -#define EFRM_MMAP_USE_INCREMENT 1
  59.106 -#else
  59.107 -#define EFRM_MMAP_USE_INCREMENT 0
  59.108 -#endif
  59.109 -
  59.110 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
  59.111 -#define EFRM_MMAP_RESET_REFCNT  1
  59.112 -#else
  59.113 -#define EFRM_MMAP_RESET_REFCNT  0
  59.114 -#endif
  59.115 -
  59.116  /* NB. 2.6.17 has renamed SetPageCompound to __SetPageCompound and
  59.117   * ClearPageCompound to __ClearPageCompound. */
  59.118 -#if ((defined(PageCompound)        !=  defined(PG_compound)) ||	\
  59.119 -     (defined(SetPageCompound)     !=  defined(PG_compound) &&	\
  59.120 -      defined(__SetPageCompound)   !=  defined(PG_compound)) ||	\
  59.121 -     (defined(ClearPageCompound)   !=  defined(PG_compound) &&	\
  59.122 -      defined(__ClearPageCompound) !=  defined(PG_compound)) ||	\
  59.123 -     (defined(__GFP_COMP)          && !defined(PG_compound)))
  59.124 +#if ( ( !defined(PageCompound) ) ||					\
  59.125 +      ( !defined(PG_compound) ) ||					\
  59.126 +      ( !defined(SetPageCompound)   && !defined(__SetPageCompound) ) ||	\
  59.127 +      ( !defined(ClearPageCompound) && !defined(__ClearPageCompound) ) )
  59.128  #error Mismatch of defined page-flags.
  59.129  #endif
  59.130  
  59.131 -extern int use_pci_alloc;	/* Use pci_alloc_consistent to alloc iopages */
  59.132 -
  59.133  /****************************************************************************
  59.134   *
  59.135   * allocate a buffer suitable for DMA to/from the NIC
  59.136 @@ -286,7 +228,7 @@ static inline void pci_mmap_pages_hack_a
  59.137  {
  59.138  	unsigned pfn = __pa(kva) >> PAGE_SHIFT;
  59.139  	struct page *start_pg = pfn_to_page(pfn);
  59.140 -#if !defined(NDEBUG) || EFRM_MMAP_USE_SPLIT
  59.141 +#if EFRM_MMAP_USE_SPLIT
  59.142  	struct page *end_pg = start_pg + (1 << order);
  59.143  	struct page *pg;
  59.144  #endif
  59.145 @@ -308,45 +250,8 @@ static inline void pci_mmap_pages_hack_a
  59.146  
  59.147  	/* Check the page count and PG_compound bit. */
  59.148  #ifndef NDEBUG
  59.149 -#  if defined(PG_compound)
  59.150 -	EFRM_ASSERT(PageCompound(start_pg) == EFRM_MMAP_USE_COMPOUND);
  59.151 -#  endif
  59.152 +	EFRM_ASSERT(PageCompound(start_pg) == 1);
  59.153  	EFRM_ASSERT(page_count(start_pg) == 1);
  59.154 -
  59.155 -	{
  59.156 -		/* Some kernels have the page count field hold (ref_count-1)
  59.157 -		 * rather than (ref_count).  This is so that decrementing the
  59.158 -		 * reference count to "zero" causes the internal value to change
  59.159 -		 * from 0 to -1 which sets the carry flag.  Other kernels store
  59.160 -		 * the real reference count value in the obvious way.  We handle
  59.161 -		 * this here by reading the reference count field of the first
  59.162 -		 * page, which is always 1. */
  59.163 -		int pg_count_zero;
  59.164 -		pg_count_zero = atomic_read(&page_count_field(start_pg)) - 1;
  59.165 -		for (pg = start_pg + 1; pg < end_pg; pg++) {
  59.166 -			int pg_count;
  59.167 -#  if defined(PG_compound)
  59.168 -			EFRM_ASSERT(PageCompound(pg) == EFRM_MMAP_USE_COMPOUND);
  59.169 -#  endif
  59.170 -
  59.171 -			/* Bug 5450: Some kernels initialise the page count
  59.172 -			 * to one for pages other than the first and some
  59.173 -			 * leave it at zero.  We allow either behaviour
  59.174 -			 * here, but disallow anything strange.  Newer
  59.175 -			 * kernels only define set_page_count in an
  59.176 -			 * internal header file, so we have to make do with
  59.177 -			 * incrementing and decrementing the reference
  59.178 -			 * count.  Fortunately, those kernels don't set the
  59.179 -			 * reference count to one on all the pages. */
  59.180 -			pg_count = atomic_read(&page_count_field(pg));
  59.181 -#  if EFRM_MMAP_RESET_REFCNT
  59.182 -			if (pg_count != pg_count_zero)
  59.183 -				EFRM_ASSERT(pg_count == pg_count_zero + 1);
  59.184 -#  else
  59.185 -			EFRM_ASSERT(pg_count == pg_count_zero);
  59.186 -#  endif
  59.187 -		}
  59.188 -	}
  59.189  #endif
  59.190  
  59.191  	/* Split the multi-page allocation if necessary. */
  59.192 @@ -354,50 +259,33 @@ static inline void pci_mmap_pages_hack_a
  59.193  	for (pg = start_pg; pg < end_pg; pg++) {
  59.194  
  59.195  		/* This is no longer a compound page. */
  59.196 -#  if EFRM_MMAP_USE_COMPOUND
  59.197  		ClearPageCompound(pg);
  59.198  		EFRM_ASSERT(PageCompound(pg) == 0);
  59.199 -#  endif
  59.200  
  59.201  #  ifndef NDEBUG
  59.202  		{
  59.203  			int pg_count = page_count(pg);
  59.204  			/* Bug 5450: The page count can be zero or one here. */
  59.205 -			if (pg == start_pg) {
  59.206 +			if (pg == start_pg)
  59.207  				EFRM_ASSERT(pg_count == 1);
  59.208 -			} else {
  59.209 -#    if EFRM_MMAP_RESET_REFCNT
  59.210 -				if (pg_count != 0)
  59.211 -					EFRM_ASSERT(pg_count == 1);
  59.212 -#    else
  59.213 -				EFRM_ASSERT(pg_count == 0);
  59.214 -#    endif
  59.215 -			}
  59.216 +			else if (pg_count != 0)
  59.217 +				EFRM_ASSERT(pg_count == 1);
  59.218  		}
  59.219  #  endif
  59.220  
  59.221 -		/* Get a reference which will be released after the pages have
  59.222 -		 * been passed back to pci_free_consistent. */
  59.223 -#  if EFRM_MMAP_RESET_REFCNT
  59.224 -		/* Bug 5450: Reset the reference count since the count might
  59.225 -		 * already be 1. */
  59.226 +		/* Get a reference which will be released after the
  59.227 +		 * pages have been passed back to pci_free_consistent.
  59.228 +		 * Reset the page count instead of incrementing it
  59.229 +		 * because old kernels are inconsistent about
  59.230 +		 * initialising the reference count. */
  59.231  		ci_set_page_count(pg, (pg == start_pg) ? 2 : 1);
  59.232 -#  else
  59.233 -		get_page(pg);
  59.234 -#  endif
  59.235  	}
  59.236  #endif
  59.237 -
  59.238 -	/* Fudge the reference count if necessary. */
  59.239 -#if EFRM_MMAP_USE_INCREMENT
  59.240 -	for (pg = start_pg; pg < end_pg; pg++)
  59.241 -		inc_page_count(pg);
  59.242 -#endif
  59.243  }
  59.244  
  59.245  static inline void pci_mmap_pages_hack_before_free(caddr_t kva, unsigned order)
  59.246  {
  59.247 -#if EFRM_MMAP_USE_INCREMENT || !defined(NDEBUG)
  59.248 +#if !defined(NDEBUG)
  59.249  	/* Drop the references taken in pci_mmap_pages_hack_after_alloc */
  59.250  	unsigned pfn = __pa(kva) >> PAGE_SHIFT;
  59.251  	struct page *start_pg = pfn_to_page(pfn);
  59.252 @@ -412,15 +300,9 @@ static inline void pci_mmap_pages_hack_b
  59.253  	if (PageReserved(start_pg))
  59.254  		return;
  59.255  
  59.256 -#  if EFRM_MMAP_USE_INCREMENT
  59.257 -	for (pg = start_pg; pg < end_pg; pg++)
  59.258 -		dec_page_count(pg);
  59.259 -#  endif
  59.260 -
  59.261 -#if !defined(NDEBUG)
  59.262  	EFRM_ASSERT(page_count(start_pg) == 1+EFRM_MMAP_USE_SPLIT);
  59.263  
  59.264 -#  if EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
  59.265 +#  if !EFRM_MMAP_USE_SPLIT
  59.266  	for (pg = start_pg; pg < end_pg; pg++)
  59.267  		EFRM_ASSERT(PageCompound(pg));
  59.268  #  else
  59.269 @@ -432,7 +314,6 @@ static inline void pci_mmap_pages_hack_b
  59.270  		EFRM_ASSERT(page_count(pg) == exp_pg_count);
  59.271  	}
  59.272  #  endif
  59.273 -#endif
  59.274  
  59.275  #endif
  59.276  }
  59.277 @@ -483,52 +364,23 @@ void dump_iopage_counts(void)
  59.278  void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
  59.279  			      dma_addr_t *dma_addr, int flag)
  59.280  {
  59.281 -	struct pci_dev *pci_dev;
  59.282  	void *ptr;
  59.283  	unsigned order;
  59.284 -	EFRM_IOMMU_DECL;
  59.285  
  59.286  	order = __ffs(size/PAGE_SIZE);
  59.287  	EFRM_ASSERT(size == (PAGE_SIZE<<order));
  59.288  
  59.289 +	/* Can't take a spinlock here since the allocation can
  59.290 +	 * block. */
  59.291 +	ptr = dma_alloc_coherent(dev, size, dma_addr, flag);
  59.292 +	if (ptr == NULL)
  59.293 +		return ptr;
  59.294 +
  59.295  	/* NB. The caller may well set __GFP_COMP.  However we can't
  59.296  	 * rely on this working on older kernels.  2.6.9 only acts on
  59.297  	 * __GFP_COMP if CONFIG_HUGETLB_PAGE is defined.  If the flag
  59.298  	 * did have an effect then PG_compound will be set on the
  59.299  	 * pages. */
  59.300 -
  59.301 -	if (use_pci_alloc) {
  59.302 -		/* Can't take a spinlock here since the allocation can
  59.303 -		 * block. */
  59.304 -		ptr = dma_alloc_coherent(dev, size, dma_addr, flag);
  59.305 -		if (ptr == NULL)
  59.306 -			return ptr;
  59.307 -	} else {
  59.308 -#ifdef CONFIG_SWIOTLB		/* BUG1340 */
  59.309 -		if (swiotlb) {
  59.310 -			EFRM_ERR("%s: This kernel is using DMA bounce "
  59.311 -				 "buffers.  Please upgrade kernel to "
  59.312 -				 "linux2.6 or reduce the amount of RAM "
  59.313 -				 "with mem=XXX.", __FUNCTION__);
  59.314 -			return NULL;
  59.315 -		}
  59.316 -#endif
  59.317 -		ptr = (void *)__get_free_pages(flag, order);
  59.318 -
  59.319 -		if (ptr == NULL)
  59.320 -			return NULL;
  59.321 -
  59.322 -		EFRM_IOMMU_LOCK();
  59.323 -		pci_dev = container_of(dev, struct pci_dev, dev);
  59.324 -		*dma_addr = pci_map_single(pci_dev, ptr, size,
  59.325 -					   PCI_DMA_BIDIRECTIONAL);
  59.326 -		EFRM_IOMMU_UNLOCK();
  59.327 -		if (pci_dma_mapping_error(*dma_addr)) {
  59.328 -			free_pages((unsigned long)ptr, order);
  59.329 -			return NULL;
  59.330 -		}
  59.331 -	}
  59.332 -
  59.333  #ifndef CONFIG_IA64
  59.334  	pci_mmap_pages_hack_after_alloc(ptr, order);
  59.335  #endif
  59.336 @@ -547,9 +399,7 @@ void *efrm_dma_alloc_coherent(struct dev
  59.337  void efrm_dma_free_coherent(struct device *dev, size_t size,
  59.338  			    void *ptr, dma_addr_t dma_addr)
  59.339  {
  59.340 -	struct pci_dev *pci_dev;
  59.341  	unsigned order;
  59.342 -	EFRM_IOMMU_DECL;
  59.343  
  59.344  	order = __ffs(size/PAGE_SIZE);
  59.345  	EFRM_ASSERT(size == (PAGE_SIZE<<order));
  59.346 @@ -564,19 +414,7 @@ void efrm_dma_free_coherent(struct devic
  59.347  #ifndef CONFIG_IA64
  59.348  	pci_mmap_pages_hack_before_free(ptr, order);
  59.349  #endif
  59.350 -	if (use_pci_alloc) {
  59.351 -		EFRM_IOMMU_LOCK();
  59.352 -		dma_free_coherent(dev, size, ptr, dma_addr);
  59.353 -		EFRM_IOMMU_UNLOCK();
  59.354 -	} else {
  59.355 -		pci_dev = container_of(dev, struct pci_dev, dev);
  59.356 -		EFRM_IOMMU_LOCK();
  59.357 -		efrm_pci_unmap_single(pci_dev, dma_addr, size,
  59.358 -				      PCI_DMA_BIDIRECTIONAL);
  59.359 -		EFRM_IOMMU_UNLOCK();
  59.360 -
  59.361 -		free_pages((unsigned long)ptr, order);
  59.362 -	}
  59.363 +	dma_free_coherent(dev, size, ptr, dma_addr);
  59.364  
  59.365  #ifndef CONFIG_IA64
  59.366  	pci_mmap_pages_hack_after_free(ptr, order);
    60.1 --- a/drivers/net/sfc/sfc_resource/kernel_compat.h	Tue Mar 31 11:49:12 2009 +0100
    60.2 +++ b/drivers/net/sfc/sfc_resource/kernel_compat.h	Tue Mar 31 11:59:10 2009 +0100
    60.3 @@ -40,9 +40,12 @@
    60.4  #define DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H
    60.5  
    60.6  #include <linux/version.h>
    60.7 +#include <linux/moduleparam.h>
    60.8 +#include <linux/sched.h>
    60.9 +#include <asm/io.h>
   60.10 +#include <linux/pci.h>
   60.11  
   60.12  /********* wait_for_completion_timeout() ********************/
   60.13 -#include <linux/sched.h>
   60.14  
   60.15  /* RHEL_RELEASE_CODE from linux/version.h is only defined for 2.6.9-55EL
   60.16   * UTS_RELEASE is unfortunately unusable
   60.17 @@ -51,7 +54,7 @@
   60.18  #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)) && \
   60.19  	!defined(RHEL_RELEASE_CODE)
   60.20  
   60.21 -static inline unsigned long fastcall __sched
   60.22 +static inline unsigned long fastcall
   60.23  efrm_wait_for_completion_timeout(struct completion *x, unsigned long timeout)
   60.24  {
   60.25  	might_sleep();
   60.26 @@ -87,125 +90,24 @@ out:
   60.27  
   60.28  #endif
   60.29  
   60.30 -/********* pci_map_*() ********************/
   60.31 -
   60.32 -#include <linux/pci.h>
   60.33 -
   60.34 -/* Bug 4560: Some kernels leak IOMMU entries under heavy load.  Use a
   60.35 - * spinlock to serialise access where possible to alleviate the
   60.36 - * problem.
   60.37 - *
   60.38 - * NB. This is duplicated in the net driver.  Please keep in sync. */
   60.39 -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \
   60.40 -     (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) && \
   60.41 -      defined(__x86_64__) && defined(CONFIG_SMP))
   60.42 -
   60.43 -#define EFRM_HAVE_IOMMU_LOCK 1
   60.44 -
   60.45 -#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)) &&	\
   60.46 -      defined(CONFIG_SUSE_KERNEL))
   60.47 -#define EFRM_NEED_ALTERNATE_MAX_PFN 1
   60.48 -#endif
   60.49 -
   60.50 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
   60.51 -#if defined(CONFIG_GART_IOMMU)
   60.52 -#define EFRM_NO_IOMMU no_iommu
   60.53 -#else
   60.54 -#define EFRM_NO_IOMMU 1
   60.55 -#endif
   60.56 -#else
   60.57 -#define EFRM_NO_IOMMU 0
   60.58 -#endif
   60.59 +/********* io mapping ********************/
   60.60  
   60.61 -/* Set to 0 if we should never use the lock.  Set to 1 if we should
   60.62 - * automatically determine if we should use the lock.  Set to 2 if we
   60.63 - * should always use the lock. */
   60.64 -extern unsigned int efx_use_iommu_lock;
   60.65 -/* Defined in the net driver. */
   60.66 -extern spinlock_t efx_iommu_lock;
   60.67 -/* Non-zero if there is a card which needs the lock. */
   60.68 -extern int efrm_need_iommu_lock;
   60.69 -
   60.70 -/* The IRQ state is needed if the lock is being used.  The flag is
   60.71 - * cached to ensure that every lock is followed by an unlock, even
   60.72 - * if the global flag changes in the middle of the operation. */
   60.73 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,9)
   60.74  
   60.75 -#define EFRM_IOMMU_DECL				\
   60.76 -	unsigned long efx_iommu_irq_state = 0;	\
   60.77 -	int efx_iommu_using_lock;
   60.78 -#define EFRM_IOMMU_LOCK()						\
   60.79 -	do {								\
   60.80 -		efx_iommu_using_lock = (efx_use_iommu_lock &&		\
   60.81 -					(efrm_need_iommu_lock ||	\
   60.82 -					 efx_use_iommu_lock >= 2));	\
   60.83 -		if (efx_iommu_using_lock)				\
   60.84 -		spin_lock_irqsave(&efx_iommu_lock, efx_iommu_irq_state);\
   60.85 -	} while (0)
   60.86 -#define EFRM_IOMMU_UNLOCK()						\
   60.87 -	do {								\
   60.88 -		if (efx_iommu_using_lock)				\
   60.89 -		spin_unlock_irqrestore(&efx_iommu_lock,			\
   60.90 -				       efx_iommu_irq_state);		\
   60.91 -	} while (0)
   60.92 +  #ifndef __iomem
   60.93 +  #define __iomem
   60.94 +  #endif
   60.95  
   60.96 -#else /* defined(__x86_64__) && defined(CONFIG_SMP) */
   60.97 -
   60.98 -#define EFRM_HAVE_IOMMU_LOCK 0
   60.99 -#define EFRM_IOMMU_DECL
  60.100 -#define EFRM_IOMMU_LOCK()    do {} while (0)
  60.101 -#define EFRM_IOMMU_UNLOCK()  do {} while (0)
  60.102 +  static inline void efrm_iounmap(volatile void __iomem *addr)
  60.103 +  {
  60.104 +	  iounmap((void __iomem *)addr);
  60.105 +  }
  60.106 +  #define iounmap(arg) efrm_iounmap(arg)
  60.107  
  60.108  #endif
  60.109  
  60.110 -static inline dma_addr_t efrm_pci_map_single(struct pci_dev *hwdev, void *ptr,
  60.111 -					     size_t size, int direction)
  60.112 -{
  60.113 -	dma_addr_t dma_addr;
  60.114 -	EFRM_IOMMU_DECL;
  60.115 -
  60.116 -	EFRM_IOMMU_LOCK();
  60.117 -	dma_addr = pci_map_single(hwdev, ptr, size, direction);
  60.118 -	EFRM_IOMMU_UNLOCK();
  60.119 -
  60.120 -	return dma_addr;
  60.121 -}
  60.122 -
  60.123 -static inline void efrm_pci_unmap_single(struct pci_dev *hwdev,
  60.124 -					 dma_addr_t dma_addr, size_t size,
  60.125 -					 int direction)
  60.126 -{
  60.127 -	EFRM_IOMMU_DECL;
  60.128 -
  60.129 -	EFRM_IOMMU_LOCK();
  60.130 -	pci_unmap_single(hwdev, dma_addr, size, direction);
  60.131 -	EFRM_IOMMU_UNLOCK();
  60.132 -}
  60.133  
  60.134 -static inline dma_addr_t efrm_pci_map_page(struct pci_dev *hwdev,
  60.135 -					   struct page *page,
  60.136 -					   unsigned long offset, size_t size,
  60.137 -					   int direction)
  60.138 -{
  60.139 -	dma_addr_t dma_addr;
  60.140 -	EFRM_IOMMU_DECL;
  60.141 -
  60.142 -	EFRM_IOMMU_LOCK();
  60.143 -	dma_addr = pci_map_page(hwdev, page, offset, size, direction);
  60.144 -	EFRM_IOMMU_UNLOCK();
  60.145 -
  60.146 -	return dma_addr;
  60.147 -}
  60.148 -
  60.149 -static inline void efrm_pci_unmap_page(struct pci_dev *hwdev,
  60.150 -				       dma_addr_t dma_addr, size_t size,
  60.151 -				       int direction)
  60.152 -{
  60.153 -	EFRM_IOMMU_DECL;
  60.154 -
  60.155 -	EFRM_IOMMU_LOCK();
  60.156 -	pci_unmap_page(hwdev, dma_addr, size, direction);
  60.157 -	EFRM_IOMMU_UNLOCK();
  60.158 -}
  60.159 +/********* Memory allocation *************/
  60.160  
  60.161  #ifndef IN_KERNEL_COMPAT_C
  60.162  #  ifndef __GFP_COMP
  60.163 @@ -216,6 +118,9 @@ static inline void efrm_pci_unmap_page(s
  60.164  #  endif
  60.165  #endif
  60.166  
  60.167 +
  60.168 +/********* pci_map_*() ********************/
  60.169 +
  60.170  extern void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
  60.171  				     dma_addr_t *dma_addr, int flag);
  60.172  
  60.173 @@ -236,4 +141,11 @@ static inline void efrm_pci_free_consist
  60.174  	efrm_dma_free_coherent(&hwdev->dev, size, ptr, dma_addr);
  60.175  }
  60.176  
  60.177 +
  60.178 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8))
  60.179 +static inline void efrm_pci_disable_msi(struct pci_dev *dev) {}
  60.180 +#undef pci_disable_msi
  60.181 +#define pci_disable_msi efrm_pci_disable_msi
  60.182 +#endif
  60.183 +
  60.184  #endif /* DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H */
    61.1 --- a/drivers/net/sfc/sfc_resource/linux_resource_internal.h	Tue Mar 31 11:49:12 2009 +0100
    61.2 +++ b/drivers/net/sfc/sfc_resource/linux_resource_internal.h	Tue Mar 31 11:59:10 2009 +0100
    61.3 @@ -65,7 +65,8 @@ extern void efrm_driverlink_unregister(v
    61.4  extern int
    61.5  efrm_nic_add(struct pci_dev *dev, unsigned int opts, const uint8_t *mac_addr,
    61.6  	     struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
    61.7 -	     int bt_min, int bt_max, const struct vi_resource_dimensions *);
    61.8 +	     int bt_min, int bt_max, int non_irq_evq,
    61.9 +	     const struct vi_resource_dimensions *);
   61.10  extern void efrm_nic_del(struct linux_efhw_nic *);
   61.11  
   61.12  
    62.1 --- a/drivers/net/sfc/sfc_resource/nic.c	Tue Mar 31 11:49:12 2009 +0100
    62.2 +++ b/drivers/net/sfc/sfc_resource/nic.c	Tue Mar 31 11:59:10 2009 +0100
    62.3 @@ -71,6 +71,17 @@ int efhw_device_type_init(struct efhw_de
    62.4  			return 0;
    62.5  		}
    62.6  		break;
    62.7 +	case 0x7777:
    62.8 +		dt->arch = EFHW_ARCH_FALCON;
    62.9 +		dt->variant = 'C';
   62.10 +		switch (class_revision) {
   62.11 +		case 0:
   62.12 +			dt->revision = 0;
   62.13 +			break;
   62.14 +		default:
   62.15 +			return 0;
   62.16 +		}
   62.17 +		break;
   62.18  	default:
   62.19  		return 0;
   62.20  	}
   62.21 @@ -92,8 +103,6 @@ int efhw_device_type_init(struct efhw_de
   62.22  void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options,
   62.23  		   struct efhw_device_type dev_type)
   62.24  {
   62.25 -	int i;
   62.26 -
   62.27  	nic->devtype = dev_type;
   62.28  	nic->flags = flags;
   62.29  	nic->options = options;
   62.30 @@ -102,8 +111,7 @@ void efhw_nic_init(struct efhw_nic *nic,
   62.31  	nic->reg_lock = &nic->the_reg_lock;
   62.32  	nic->mtu = 1500 + ETH_HLEN;
   62.33  
   62.34 -	for (i = 0; i < EFHW_KEVENTQ_MAX; i++)
   62.35 -		nic->irq_unit[i] = EFHW_IRQ_UNIT_UNUSED;
   62.36 +	nic->irq_unit = EFHW_IRQ_UNIT_UNUSED;
   62.37  
   62.38  	switch (nic->devtype.arch) {
   62.39  	case EFHW_ARCH_FALCON:
   62.40 @@ -118,6 +126,7 @@ void efhw_nic_init(struct efhw_nic *nic,
   62.41  			nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR;
   62.42  			break;
   62.43  		case 'B':
   62.44 +		case 'C':
   62.45  			nic->flags |= NIC_FLAG_NO_INTERRUPT;
   62.46  			nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR;
   62.47  			break;
   62.48 @@ -135,18 +144,14 @@ void efhw_nic_init(struct efhw_nic *nic,
   62.49  
   62.50  void efhw_nic_close_interrupts(struct efhw_nic *nic)
   62.51  {
   62.52 -	int i;
   62.53 -
   62.54  	EFHW_ASSERT(nic);
   62.55  	if (!efhw_nic_have_hw(nic))
   62.56  		return;
   62.57  
   62.58  	EFHW_ASSERT(efhw_nic_have_hw(nic));
   62.59  
   62.60 -	for (i = 0; i < EFHW_KEVENTQ_MAX; i++) {
   62.61 -		if (nic->irq_unit[i] != EFHW_IRQ_UNIT_UNUSED)
   62.62 -			efhw_nic_interrupt_disable(nic, i);
   62.63 -	}
   62.64 +	if (nic->irq_unit != EFHW_IRQ_UNIT_UNUSED)
   62.65 +		efhw_nic_interrupt_disable(nic);
   62.66  }
   62.67  
   62.68  void efhw_nic_dtor(struct efhw_nic *nic)
   62.69 @@ -156,7 +161,6 @@ void efhw_nic_dtor(struct efhw_nic *nic)
   62.70  	/* Check that we have functional units because the software only
   62.71  	 * driver doesn't initialise anything hardware related any more */
   62.72  
   62.73 -#ifndef __ci_ul_driver__
   62.74  	/* close interrupts is called first because the act of deregistering
   62.75  	   the driver could cause this driver to change from master to slave
   62.76  	   and hence the implicit interrupt mappings would be wrong */
   62.77 @@ -168,19 +172,14 @@ void efhw_nic_dtor(struct efhw_nic *nic)
   62.78  		efhw_nic_close_hardware(nic);
   62.79  	}
   62.80  	EFHW_TRACE("%s: functional units ... done", __FUNCTION__);
   62.81 -#endif
   62.82  
   62.83  	/* destroy event queues */
   62.84  	EFHW_TRACE("%s: event queues ... ", __FUNCTION__);
   62.85  
   62.86 -#ifndef __ci_ul_driver__
   62.87 -	{
   62.88 -		int i;
   62.89 -		for (i = 0; i < EFHW_KEVENTQ_MAX; ++i)
   62.90 -			if (nic->evq[i].evq_mask)
   62.91 -				efhw_keventq_dtor(nic, &nic->evq[i]);
   62.92 -	}
   62.93 -#endif
   62.94 +	if (nic->interrupting_evq.evq_mask)
   62.95 +		efhw_keventq_dtor(nic, &nic->interrupting_evq);
   62.96 +	if (nic->non_interrupting_evq.evq_mask)
   62.97 +		efhw_keventq_dtor(nic, &nic->non_interrupting_evq);
   62.98  
   62.99  	EFHW_TRACE("%s: event queues ... done", __FUNCTION__);
  62.100  
    63.1 --- a/drivers/net/sfc/sfc_resource/resource_driver.c	Tue Mar 31 11:49:12 2009 +0100
    63.2 +++ b/drivers/net/sfc/sfc_resource/resource_driver.c	Tue Mar 31 11:59:10 2009 +0100
    63.3 @@ -45,15 +45,6 @@
    63.4  #include <ci/efrm/vi_resource_private.h>
    63.5  #include <ci/efrm/driver_private.h>
    63.6  
    63.7 -#if EFRM_HAVE_IOMMU_LOCK
    63.8 -#ifdef EFRM_NEED_ALTERNATE_MAX_PFN
    63.9 -extern unsigned long blk_max_pfn;
   63.10 -#define max_pfn blk_max_pfn
   63.11 -#else
   63.12 -#include <linux/bootmem.h>
   63.13 -#endif
   63.14 -#endif
   63.15 -
   63.16  MODULE_AUTHOR("Solarflare Communications");
   63.17  MODULE_LICENSE("GPL");
   63.18  
   63.19 @@ -63,11 +54,6 @@ static struct efhw_ev_handler ev_handler
   63.20  	.dmaq_flushed_fn = efrm_handle_dmaq_flushed,
   63.21  };
   63.22  
   63.23 -#if EFRM_HAVE_IOMMU_LOCK
   63.24 -int efrm_need_iommu_lock;
   63.25 -EXPORT_SYMBOL(efrm_need_iommu_lock);
   63.26 -#endif
   63.27 -
   63.28  const int max_hardware_init_repeats = 10;
   63.29  
   63.30  /*--------------------------------------------------------------------
   63.31 @@ -76,7 +62,6 @@ const int max_hardware_init_repeats = 10
   63.32   *
   63.33   *--------------------------------------------------------------------*/
   63.34  /* See docs/notes/pci_alloc_consistent */
   63.35 -int use_pci_alloc = 1;		/* Use pci_alloc_consistent to alloc iopages */
   63.36  static int do_irq = 1;		/* enable interrupts */
   63.37  
   63.38  #if defined(CONFIG_X86_XEN)
   63.39 @@ -94,9 +79,6 @@ module_param(irq_moderation, int, S_IRUG
   63.40  MODULE_PARM_DESC(irq_moderation, "IRQ moderation in usec");
   63.41  module_param(nic_options, int, S_IRUGO);
   63.42  MODULE_PARM_DESC(nic_options, "Nic options -- see efhw_types.h");
   63.43 -module_param(use_pci_alloc, int, S_IRUGO);
   63.44 -MODULE_PARM_DESC(use_pci_alloc, "Use pci_alloc_consistent to alloc iopages "
   63.45 -		 "(autodetected by kernel version)");
   63.46  module_param(efx_vi_eventq_size, int, S_IRUGO);
   63.47  MODULE_PARM_DESC(efx_vi_eventq_size,
   63.48  		 "Size of event queue allocated by efx_vi library");
   63.49 @@ -176,7 +158,6 @@ static int efrm_nic_buffer_table_alloc(s
   63.50  {
   63.51  	int capacity;
   63.52  	int page_order;
   63.53 -	int i;
   63.54  	int rc;
   63.55  
   63.56  	/* Choose queue size. */
   63.57 @@ -189,16 +170,19 @@ static int efrm_nic_buffer_table_alloc(s
   63.58  		} else if (capacity & nic->evq_sizes)
   63.59  			break;
   63.60  	}
   63.61 -	for (i = 0; i < EFHW_KEVENTQ_MAX; ++i) {
   63.62 -		nic->evq[i].hw.capacity = capacity;
   63.63 -		nic->evq[i].hw.buf_tbl_alloc.base = (unsigned)-1;
   63.64 -	}
   63.65 +
   63.66 +	nic->interrupting_evq.hw.capacity = capacity;
   63.67 +	nic->interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1;
   63.68 +
   63.69 +	nic->non_interrupting_evq.hw.capacity = capacity;
   63.70 +	nic->non_interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1;
   63.71  
   63.72  	/* allocate buffer table entries to map onto the iobuffer */
   63.73  	page_order = get_order(capacity * sizeof(efhw_event_t));
   63.74  	if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
   63.75  		rc = efrm_buffer_table_alloc(page_order,
   63.76 -					     &nic->evq[0].hw.buf_tbl_alloc);
   63.77 +					     &nic->interrupting_evq
   63.78 +					     .hw.buf_tbl_alloc);
   63.79  		if (rc < 0) {
   63.80  			EFRM_WARN
   63.81  			    ("%s: failed (%d) to alloc %d buffer table entries",
   63.82 @@ -207,7 +191,7 @@ static int efrm_nic_buffer_table_alloc(s
   63.83  		}
   63.84  	}
   63.85  	rc = efrm_buffer_table_alloc(page_order,
   63.86 -				     &nic->evq[FALCON_EVQ_NONIRQ].hw.
   63.87 +				     &nic->non_interrupting_evq.hw.
   63.88  				     buf_tbl_alloc);
   63.89  	if (rc < 0) {
   63.90  		EFRM_WARN
   63.91 @@ -223,16 +207,17 @@ static int efrm_nic_buffer_table_alloc(s
   63.92   */
   63.93  static void efrm_nic_buffer_table_free(struct efhw_nic *nic)
   63.94  {
   63.95 -	int i;
   63.96 -	for (i = 0; i <= FALCON_EVQ_NONIRQ; i++)
   63.97 -		if (nic->evq[i].hw.buf_tbl_alloc.base != (unsigned)-1)
   63.98 -			efrm_buffer_table_free(&nic->evq[i].hw.buf_tbl_alloc);
   63.99 -
  63.100 +	if (nic->interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1)
  63.101 +		efrm_buffer_table_free(&nic->interrupting_evq.hw
  63.102 +				       .buf_tbl_alloc);
  63.103 +	if (nic->non_interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1)
  63.104 +		efrm_buffer_table_free(&nic->non_interrupting_evq
  63.105 +				       .hw.buf_tbl_alloc);
  63.106  }
  63.107  
  63.108  static int iomap_bar(struct linux_efhw_nic *lnic, size_t len)
  63.109  {
  63.110 -	efhw_ioaddr_t ioaddr;
  63.111 +	volatile char __iomem *ioaddr;
  63.112  
  63.113  	ioaddr = ioremap_nocache(lnic->ctr_ap_pci_addr, len);
  63.114  	if (ioaddr == 0)
  63.115 @@ -345,12 +330,10 @@ linux_efrm_nic_ctor(struct linux_efhw_ni
  63.116  void linux_efrm_nic_dtor(struct linux_efhw_nic *lnic)
  63.117  {
  63.118  	struct efhw_nic *nic = &lnic->nic;
  63.119 -	efhw_ioaddr_t bar_ioaddr = nic->bar_ioaddr;
  63.120 +	volatile char __iomem *bar_ioaddr = nic->bar_ioaddr;
  63.121  
  63.122  	efhw_nic_dtor(nic);
  63.123  
  63.124 -	efrm_nic_buffer_table_free(nic);
  63.125 -
  63.126  	/* Unmap the bar. */
  63.127  	EFRM_ASSERT(bar_ioaddr);
  63.128  	iounmap(bar_ioaddr);
  63.129 @@ -369,7 +352,7 @@ static void efrm_tasklet(unsigned long p
  63.130  
  63.131  	EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
  63.132  
  63.133 -	efhw_keventq_poll(nic, &nic->evq[0]);
  63.134 +	efhw_keventq_poll(nic, &nic->interrupting_evq);
  63.135  	EFRM_TRACE("tasklet complete");
  63.136  }
  63.137  
  63.138 @@ -409,7 +392,7 @@ static int n_nics_probed;
  63.139  int
  63.140  efrm_nic_add(struct pci_dev *dev, unsigned flags, const uint8_t *mac_addr,
  63.141  	     struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
  63.142 -	     int bt_min, int bt_max,
  63.143 +	     int bt_min, int bt_lim, int non_irq_evq,
  63.144  	     const struct vi_resource_dimensions *res_dim)
  63.145  {
  63.146  	struct linux_efhw_nic *lnic = NULL;
  63.147 @@ -424,14 +407,14 @@ efrm_nic_add(struct pci_dev *dev, unsign
  63.148  		   pci_name(dev) ? pci_name(dev) : "?", dev->irq);
  63.149  
  63.150  	/* Ensure that we have room for the new adapter-structure. */
  63.151 -	if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
  63.152 +	if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) {
  63.153  		EFRM_WARN("%s: WARNING: too many devices", __FUNCTION__);
  63.154  		rc = -ENOMEM;
  63.155  		goto failed;
  63.156  	}
  63.157  
  63.158  	if (n_nics_probed == 0) {
  63.159 -		rc = efrm_resources_init(res_dim, bt_min, bt_max);
  63.160 +		rc = efrm_resources_init(res_dim, bt_min, bt_lim);
  63.161  		if (rc != 0)
  63.162  			goto failed;
  63.163  		resources_init = 1;
  63.164 @@ -467,7 +450,7 @@ efrm_nic_add(struct pci_dev *dev, unsign
  63.165  	rc = efrm_driver_register_nic(nic, nic_index++);
  63.166  	if (rc < 0) {
  63.167  		EFRM_ERR("%s: cannot register nic %d with nic error code %d",
  63.168 -			 __FUNCTION__, efrm_nic_table.nic_count, rc);
  63.169 +			 __FUNCTION__, efrm_nic_tablep->nic_count, rc);
  63.170  		goto failed;
  63.171  	}
  63.172  	registered_nic = 1;
  63.173 @@ -484,7 +467,8 @@ efrm_nic_add(struct pci_dev *dev, unsign
  63.174  	   we want to make sure that we maximise our chances, so we
  63.175  	   loop a few times until all is good. */
  63.176  	for (count = 0; count < max_hardware_init_repeats; count++) {
  63.177 -		rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr);
  63.178 +		rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr,
  63.179 +					    non_irq_evq);
  63.180  		if (rc >= 0)
  63.181  			break;
  63.182  
  63.183 @@ -509,19 +493,11 @@ efrm_nic_add(struct pci_dev *dev, unsign
  63.184  			EFRM_ERR("Interrupt initialisation failed (%d)", rc);
  63.185  			goto failed;
  63.186  		}
  63.187 -		efhw_nic_set_interrupt_moderation(nic, 0, irq_moderation);
  63.188 -		efhw_nic_interrupt_enable(nic, 0);
  63.189 +		efhw_nic_set_interrupt_moderation(nic, irq_moderation);
  63.190 +		efhw_nic_interrupt_enable(nic);
  63.191  	}
  63.192  	EFRM_TRACE("interrupts are %sregistered", do_irq ? "" : "not ");
  63.193  
  63.194 -#if EFRM_HAVE_IOMMU_LOCK
  63.195 -	/* Bug 4560: We need the lock if there is memory which cannot be
  63.196 -	 * accessed by the card and there is an IOMMU to access it.  In that
  63.197 -	 * case, the kernel will use the IOMMU to access the high memory. */
  63.198 -	if ((dev->dma_mask >> PAGE_SHIFT) < max_pfn && !EFRM_NO_IOMMU)
  63.199 -		efrm_need_iommu_lock = 1;
  63.200 -#endif
  63.201 -
  63.202  	*lnic_out = lnic;
  63.203  	EFRM_ASSERT(rc == 0);
  63.204  	++n_nics_probed;
  63.205 @@ -552,6 +528,8 @@ void efrm_nic_del(struct linux_efhw_nic 
  63.206  	EFRM_TRACE("%s:", __FUNCTION__);
  63.207  	EFRM_ASSERT(nic);
  63.208  
  63.209 +	efrm_nic_buffer_table_free(nic);
  63.210 +
  63.211  	efrm_driver_unregister_nic(nic);
  63.212  
  63.213  	/*
    64.1 --- a/drivers/net/sfc/sfc_resource/resources.c	Tue Mar 31 11:49:12 2009 +0100
    64.2 +++ b/drivers/net/sfc/sfc_resource/resources.c	Tue Mar 31 11:59:10 2009 +0100
    64.3 @@ -40,11 +40,11 @@
    64.4  
    64.5  int
    64.6  efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim,
    64.7 -		    int buffer_table_min, int buffer_table_max)
    64.8 +		    int buffer_table_min, int buffer_table_lim)
    64.9  {
   64.10  	int i, rc;
   64.11  
   64.12 -	rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_max);
   64.13 +	rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_lim);
   64.14  	if (rc != 0)
   64.15  		return rc;
   64.16  
    65.1 --- a/drivers/net/sfc/sfc_resource/vi_resource_alloc.c	Tue Mar 31 11:49:12 2009 +0100
    65.2 +++ b/drivers/net/sfc/sfc_resource/vi_resource_alloc.c	Tue Mar 31 11:59:10 2009 +0100
    65.3 @@ -82,13 +82,13 @@ static inline int efrm_vi_rm_alloc_id(ui
    65.4  	int instance;
    65.5  	int rc;
    65.6  
    65.7 -	if (efrm_nic_table.a_nic == NULL)	/* ?? FIXME: surely not right */
    65.8 +	if (efrm_nic_tablep->a_nic == NULL)	/* ?? FIXME: surely not right */
    65.9  		return -ENODEV;
   65.10  
   65.11  	spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
   65.12  
   65.13  	/* Falcon A1 RX phys addr wierdness. */
   65.14 -	if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
   65.15 +	if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
   65.16  	    (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) {
   65.17  		if (vi_flags & EFHW_VI_JUMBO_EN) {
   65.18  			/* Falcon-A cannot do phys + scatter. */
   65.19 @@ -141,10 +141,10 @@ static void efrm_vi_rm_free_id(int insta
   65.20  	irq_flags_t lock_flags;
   65.21  	struct kfifo *instances;
   65.22  
   65.23 -	if (efrm_nic_table.a_nic == NULL)	/* ?? FIXME: surely not right */
   65.24 +	if (efrm_nic_tablep->a_nic == NULL)	/* ?? FIXME: surely not right */
   65.25  		return;
   65.26  
   65.27 -	if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
   65.28 +	if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
   65.29  	    instance == FALCON_A1_ISCSI_DMAQ) {
   65.30  		EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free ==
   65.31  			    false);
   65.32 @@ -361,7 +361,7 @@ static inline int
   65.33  efrm_vi_rm_init_evq(struct vi_resource *virs, int nic_index)
   65.34  {
   65.35  	int rc;
   65.36 -	struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
   65.37 +	struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
   65.38  	int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
   65.39  	struct eventq_resource_hardware *evq_hw =
   65.40  	    &virs->nic_info[nic_index].evq_pages;
   65.41 @@ -419,7 +419,7 @@ efrm_vi_rm_init_evq(struct vi_resource *
   65.42  static inline void
   65.43  efrm_vi_rm_fini_evq(struct vi_resource *virs, int nic_index)
   65.44  {
   65.45 -	struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
   65.46 +	struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
   65.47  	int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
   65.48  	struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
   65.49  
   65.50 @@ -481,7 +481,7 @@ efrm_vi_rm_init_or_fini_dmaq(struct vi_r
   65.51  			     int queue_type, int init, int nic_index)
   65.52  {
   65.53  	int rc;
   65.54 -	struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
   65.55 +	struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
   65.56  	int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
   65.57  	uint32_t buf_bytes;
   65.58  	struct vi_resource *evq_virs;
   65.59 @@ -490,7 +490,7 @@ efrm_vi_rm_init_or_fini_dmaq(struct vi_r
   65.60  	struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
   65.61  	int page_order;
   65.62  	uint32_t num_pages;
   65.63 -	efhw_iopages_t *iobuff;
   65.64 +	struct efhw_iopages *iobuff;
   65.65  #endif
   65.66  
   65.67  	if (!init)
   65.68 @@ -554,6 +554,10 @@ destroy:
   65.69  	if (virs->dmaq_capacity[queue_type] == 0)
   65.70  		return 0;
   65.71  
   65.72 +	/* Ensure TX pacing turned off -- queue flush doesn't reset this. */
   65.73 +	if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
   65.74 +		falcon_nic_pace(nic, instance, 0);
   65.75 +
   65.76  	/* No need to disable the queue here.  Nobody is using it anyway. */
   65.77  
   65.78  fail_evq:
    66.1 --- a/drivers/net/sfc/sfc_resource/vi_resource_event.c	Tue Mar 31 11:49:12 2009 +0100
    66.2 +++ b/drivers/net/sfc/sfc_resource/vi_resource_event.c	Tue Mar 31 11:59:10 2009 +0100
    66.3 @@ -48,7 +48,7 @@ efrm_eventq_request_wakeup(struct vi_res
    66.4  	struct efhw_nic *nic;
    66.5  	int next_i;
    66.6  	EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
    66.7 -	nic = efrm_nic_table.nic[nic_index];
    66.8 +	nic = efrm_nic_tablep->nic[nic_index];
    66.9  	EFRM_ASSERT(nic);
   66.10  	next_i = ((current_ptr / sizeof(efhw_event_t)) &
   66.11  		  (virs->evq_capacity - 1));
   66.12 @@ -61,7 +61,7 @@ EXPORT_SYMBOL(efrm_eventq_request_wakeup
   66.13  
   66.14  void efrm_eventq_reset(struct vi_resource *virs, int nic_index)
   66.15  {
   66.16 -	struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
   66.17 +	struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
   66.18  	int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
   66.19  
   66.20  	EFRM_ASSERT(virs->evq_capacity != 0);
   66.21 @@ -217,16 +217,18 @@ efrm_eventq_do_callback(struct efhw_nic 
   66.22  	}
   66.23  }
   66.24  
   66.25 -void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev)
   66.26 +void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned instance)
   66.27  {
   66.28 -	efrm_eventq_do_callback(nic,
   66.29 -				(unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
   66.30 -				false);
   66.31 +	efrm_eventq_do_callback(nic, instance, false);
   66.32  }
   66.33  
   66.34 -void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev)
   66.35 +void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned instance)
   66.36  {
   66.37 -	efrm_eventq_do_callback(nic,
   66.38 -				(unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
   66.39 -				true);
   66.40 +	efrm_eventq_do_callback(nic, instance, true);
   66.41  }
   66.42 +
   66.43 +void efrm_handle_sram_event(struct efhw_nic *nic)
   66.44 +{
   66.45 +  if (nic->buf_commit_outstanding > 0)
   66.46 +    nic->buf_commit_outstanding--;
   66.47 +}
    67.1 --- a/drivers/net/sfc/sfc_resource/vi_resource_flush.c	Tue Mar 31 11:49:12 2009 +0100
    67.2 +++ b/drivers/net/sfc/sfc_resource/vi_resource_flush.c	Tue Mar 31 11:59:10 2009 +0100
    67.3 @@ -409,7 +409,7 @@ efrm_handle_tx_dmaq_flushed(struct efhw_
    67.4  }
    67.5  
    67.6  void
    67.7 -efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, int instance,
    67.8 +efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, unsigned instance,
    67.9  			 int rx_flush)
   67.10  {
   67.11  	irq_flags_t lock_flags;
    68.1 --- a/drivers/net/sfc/sfc_resource/vi_resource_manager.c	Tue Mar 31 11:49:12 2009 +0100
    68.2 +++ b/drivers/net/sfc/sfc_resource/vi_resource_manager.c	Tue Mar 31 11:59:10 2009 +0100
    68.3 @@ -76,7 +76,7 @@ efrm_create_or_destroy_vi_resource_manag
    68.4  	struct list_head flush_pending;
    68.5  	irq_flags_t lock_flags;
    68.6  	int rc, i, n_evqs;
    68.7 -	unsigned dmaq_min, dmaq_max;
    68.8 +	unsigned dmaq_min, dmaq_lim;
    68.9  
   68.10  	EFRM_ASSERT(rm_in_out);
   68.11  
   68.12 @@ -85,11 +85,11 @@ efrm_create_or_destroy_vi_resource_manag
   68.13  
   68.14  	EFRM_ASSERT(dims);
   68.15  	EFRM_NOTICE("vi_resource_manager: evq_int=%u-%u evq_timer=%u-%u",
   68.16 -		    dims->evq_int_min, dims->evq_int_max,
   68.17 -		    dims->evq_timer_min, dims->evq_timer_max);
   68.18 +		    dims->evq_int_min, dims->evq_int_lim,
   68.19 +		    dims->evq_timer_min, dims->evq_timer_lim);
   68.20  	EFRM_NOTICE("vi_resource_manager: rxq=%u-%u txq=%u-%u",
   68.21 -		    dims->rxq_min, dims->rxq_max,
   68.22 -		    dims->txq_min, dims->txq_max);
   68.23 +		    dims->rxq_min, dims->rxq_lim,
   68.24 +		    dims->txq_min, dims->txq_lim);
   68.25  
   68.26  	efrm_vi_manager = kmalloc(sizeof(*efrm_vi_manager), GFP_KERNEL);
   68.27  	if (efrm_vi_manager == NULL) {
   68.28 @@ -102,12 +102,12 @@ efrm_create_or_destroy_vi_resource_manag
   68.29  	efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
   68.30  
   68.31  	dmaq_min = max(dims->rxq_min, dims->txq_min);
   68.32 -	dmaq_max = min(dims->rxq_max, dims->txq_max);
   68.33 +	dmaq_lim = min(dims->rxq_lim, dims->txq_lim);
   68.34  
   68.35  	efrm_vi_manager->with_timer_base =
   68.36  	    max(dmaq_min, dims->evq_timer_min);
   68.37  	efrm_vi_manager->with_timer_limit =
   68.38 -	    min(dmaq_max, dims->evq_timer_max);
   68.39 +	    min(dmaq_lim, dims->evq_timer_lim);
   68.40  	rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_timer,
   68.41  				efrm_vi_manager->with_timer_base,
   68.42  				efrm_vi_manager->with_timer_limit,
   68.43 @@ -118,7 +118,7 @@ efrm_create_or_destroy_vi_resource_manag
   68.44  	efrm_vi_manager->with_interrupt_base =
   68.45  	    max(dmaq_min, dims->evq_int_min);
   68.46  	efrm_vi_manager->with_interrupt_limit =
   68.47 -	    min(dmaq_max, dims->evq_int_max);
   68.48 +	    min(dmaq_lim, dims->evq_int_lim);
   68.49  	efrm_vi_manager->with_interrupt_limit =
   68.50  		max(efrm_vi_manager->with_interrupt_limit,
   68.51  		    efrm_vi_manager->with_interrupt_base);
    69.1 --- a/drivers/net/sfc/sfe4001.c	Tue Mar 31 11:49:12 2009 +0100
    69.2 +++ b/drivers/net/sfc/sfe4001.c	Tue Mar 31 11:59:10 2009 +0100
    69.3 @@ -130,18 +130,18 @@ void sfe4001_poweroff(struct efx_nic *ef
    69.4  
    69.5  	/* Turn off all power rails */
    69.6  	out = 0xff;
    69.7 -	(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
    69.8 +	(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
    69.9  
   69.10  	/* Disable port 1 outputs on IO expander */
   69.11  	cfg = 0xff;
   69.12 -	(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
   69.13 +	(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
   69.14  
   69.15  	/* Disable port 0 outputs on IO expander */
   69.16  	cfg = 0xff;
   69.17 -	(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
   69.18 +	(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
   69.19  
   69.20  	/* Clear any over-temperature alert */
   69.21 -	(void) efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
   69.22 +	(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
   69.23  }
   69.24  
   69.25  static int sfe4001_check_hw(struct efx_nic *efx)
   69.26 @@ -163,7 +163,7 @@ static int sfe4001_check_hw(struct efx_n
   69.27  	if (falcon_xaui_link_ok(efx))
   69.28  		return 0;
   69.29  
   69.30 -	rc = efx_i2c_read(i2c, PCA9539, P1_IN, &status, EFX_BYTE);
   69.31 +	rc = efx_i2c_read(i2c, PCA9539, P1_IN, &status, 1);
   69.32  	status &= ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN));
   69.33  
   69.34  	/* We know we can read from the IO expander because we did
   69.35 @@ -223,12 +223,12 @@ int sfe4001_poweron(struct efx_nic *efx)
   69.36  	/* Set DSP over-temperature alert threshold */
   69.37  	EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
   69.38  	rc = efx_i2c_write(i2c, MAX6647, WLHO,
   69.39 -			   &xgphy_max_temperature, EFX_BYTE);
   69.40 +			   &xgphy_max_temperature, 1);
   69.41  	if (rc)
   69.42  		goto fail1;
   69.43  
   69.44  	/* Read it back and verify */
   69.45 -	rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, EFX_BYTE);
   69.46 +	rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
   69.47  	if (rc)
   69.48  		goto fail1;
   69.49  	if (in != xgphy_max_temperature) {
   69.50 @@ -237,17 +237,17 @@ int sfe4001_poweron(struct efx_nic *efx)
   69.51  	}
   69.52  
   69.53  	/* Clear any previous over-temperature alert */
   69.54 -	rc = efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
   69.55 +	rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
   69.56  	if (rc)
   69.57  		goto fail1;
   69.58  
   69.59  	/* Enable port 0 and port 1 outputs on IO expander */
   69.60  	cfg = 0x00;
   69.61 -	rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
   69.62 +	rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
   69.63  	if (rc)
   69.64  		goto fail1;
   69.65  	cfg = 0xff & ~(1 << P1_SPARE_LBN);
   69.66 -	rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
   69.67 +	rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
   69.68  	if (rc)
   69.69  		goto fail2;
   69.70  
   69.71 @@ -255,7 +255,7 @@ int sfe4001_poweron(struct efx_nic *efx)
   69.72  	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
   69.73  		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
   69.74  		       (0 << P0_EN_1V0X_LBN));
   69.75 -	rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
   69.76 +	rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
   69.77  	if (rc)
   69.78  		goto fail3;
   69.79  
   69.80 @@ -267,14 +267,14 @@ int sfe4001_poweron(struct efx_nic *efx)
   69.81  			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
   69.82  			       (1 << P0_X_TRST_LBN));
   69.83  
   69.84 -		rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
   69.85 +		rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
   69.86  		if (rc)
   69.87  			goto fail3;
   69.88  		msleep(10);
   69.89  
   69.90  		/* Turn on 1V power rail */
   69.91  		out &= ~(1 << P0_EN_1V0X_LBN);
   69.92 -		rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
   69.93 +		rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
   69.94  		if (rc)
   69.95  			goto fail3;
   69.96  
   69.97 @@ -283,7 +283,7 @@ int sfe4001_poweron(struct efx_nic *efx)
   69.98  		schedule_timeout_uninterruptible(HZ);
   69.99  
  69.100  		/* Check DSP is powered */
  69.101 -		rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, EFX_BYTE);
  69.102 +		rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
  69.103  		if (rc)
  69.104  			goto fail3;
  69.105  		if (in & (1 << P1_AFE_PWD_LBN))
  69.106 @@ -302,14 +302,14 @@ done:
  69.107  fail3:
  69.108  	/* Turn off all power rails */
  69.109  	out = 0xff;
  69.110 -	(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
  69.111 +	(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
  69.112  	/* Disable port 1 outputs on IO expander */
  69.113  	out = 0xff;
  69.114 -	(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
  69.115 +	(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
  69.116  fail2:
  69.117  	/* Disable port 0 outputs on IO expander */
  69.118  	out = 0xff;
  69.119 -	(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
  69.120 +	(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
  69.121  fail1:
  69.122  	return rc;
  69.123  }
    70.1 --- a/drivers/net/sfc/tenxpress.c	Tue Mar 31 11:49:12 2009 +0100
    70.2 +++ b/drivers/net/sfc/tenxpress.c	Tue Mar 31 11:59:10 2009 +0100
    70.3 @@ -574,10 +574,8 @@ static int tenxpress_phy_check_hw(struct
    70.4  
    70.5  	link_ok = phy_up && tenxpress_link_ok(efx, 1);
    70.6  
    70.7 -	if (link_ok != efx->link_up) {
    70.8 -		efx->link_up = link_ok;
    70.9 +	if (link_ok != efx->link_up)
   70.10  		efx->mac_op->fake_phy_event(efx);
   70.11 -	}
   70.12  
   70.13  	/* Nothing to check if we've already shut down the PHY */
   70.14  	if (!phy_up)
   70.15 @@ -652,7 +650,7 @@ static void tenxpress_reset_xaui(struct 
   70.16  	soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
   70.17  				       PCS_SOFT_RST2_REG);
   70.18  
   70.19 -	/* Modify => put in reset */
   70.20 +	/* Put in reset */
   70.21  	test_select &= ~(1 << CLK312_EN_LBN);
   70.22  	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
   70.23  			    PCS_TEST_SELECT_REG, test_select);
   70.24 @@ -666,7 +664,7 @@ static void tenxpress_reset_xaui(struct 
   70.25  			    PCS_CLOCK_CTRL_REG, clk_ctrl);
   70.26  	udelay(10);
   70.27  
   70.28 -	/* Modify => remove reset */
   70.29 +	/* Remove reset */
   70.30  	clk_ctrl |= (1 << PLL312_RST_N_LBN);
   70.31  	mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
   70.32  			    PCS_CLOCK_CTRL_REG, clk_ctrl);
    71.1 --- a/drivers/net/sfc/tx.c	Tue Mar 31 11:49:12 2009 +0100
    71.2 +++ b/drivers/net/sfc/tx.c	Tue Mar 31 11:59:10 2009 +0100
    71.3 @@ -61,8 +61,7 @@ void efx_stop_queue(struct efx_nic *efx)
    71.4  	EFX_TRACE(efx, "stop TX queue\n");
    71.5  
    71.6  	atomic_inc(&efx->netif_stop_count);
    71.7 -	if (likely(efx->net_dev_registered))
    71.8 -		netif_stop_queue(efx->net_dev);
    71.9 +	netif_stop_queue(efx->net_dev);
   71.10  
   71.11  	spin_unlock_bh(&efx->netif_stop_lock);
   71.12  }
   71.13 @@ -77,13 +76,36 @@ inline void efx_wake_queue(struct efx_ni
   71.14  	if (atomic_dec_and_lock(&efx->netif_stop_count,
   71.15  				&efx->netif_stop_lock)) {
   71.16  		EFX_TRACE(efx, "waking TX queue\n");
   71.17 -		if (likely(efx->net_dev_registered))
   71.18 -			netif_wake_queue(efx->net_dev);
   71.19 +		netif_wake_queue(efx->net_dev);
   71.20  		spin_unlock(&efx->netif_stop_lock);
   71.21  	}
   71.22  	local_bh_enable();
   71.23  }
   71.24  
   71.25 +static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
   71.26 +				      struct efx_tx_buffer *buffer)
   71.27 +{
   71.28 +	if (buffer->unmap_len) {
   71.29 +		struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
   71.30 +		if (buffer->unmap_single)
   71.31 +			pci_unmap_single(pci_dev, buffer->unmap_addr,
   71.32 +					 buffer->unmap_len, PCI_DMA_TODEVICE);
   71.33 +		else
   71.34 +			pci_unmap_page(pci_dev, buffer->unmap_addr,
   71.35 +				       buffer->unmap_len, PCI_DMA_TODEVICE);
   71.36 +		buffer->unmap_len = 0;
   71.37 +		buffer->unmap_single = 0;
   71.38 +	}
   71.39 +
   71.40 +	if (buffer->skb) {
   71.41 +		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
   71.42 +		buffer->skb = NULL;
   71.43 +		EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
   71.44 +			  "complete\n", tx_queue->queue, read_ptr);
   71.45 +	}
   71.46 +}
   71.47 +
   71.48 +
   71.49  /*
   71.50   * Add a socket buffer to a TX queue
   71.51   *
   71.52 @@ -239,17 +261,7 @@ static inline int efx_enqueue_skb(struct
   71.53  		--tx_queue->insert_count;
   71.54  		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
   71.55  		buffer = &tx_queue->buffer[insert_ptr];
   71.56 -		if (buffer->unmap_len) {
   71.57 -			if (buffer->unmap_single)
   71.58 -				pci_unmap_single(pci_dev, buffer->unmap_addr,
   71.59 -						 buffer->unmap_len,
   71.60 -						 PCI_DMA_TODEVICE);
   71.61 -			else
   71.62 -				pci_unmap_page(pci_dev, buffer->unmap_addr,
   71.63 -					       buffer->unmap_len,
   71.64 -					       PCI_DMA_TODEVICE);
   71.65 -		}
   71.66 -		buffer->unmap_len = 0;
   71.67 +		efx_dequeue_buffer(tx_queue, buffer);
   71.68  		buffer->len = 0;
   71.69  	}
   71.70  
   71.71 @@ -269,56 +281,30 @@ static inline int efx_enqueue_skb(struct
   71.72  static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
   71.73  				       unsigned int index)
   71.74  {
   71.75 -	struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
   71.76 -	struct efx_tx_buffer *buffer;
   71.77 +	struct efx_nic *efx = tx_queue->efx;
   71.78  	unsigned int stop_index, read_ptr;
   71.79 +	unsigned int mask = tx_queue->efx->type->txd_ring_mask;
   71.80  
   71.81 -	/* Calculate the stopping point.  Doing the check this way
   71.82 -	 * avoids wrongly completing every buffer in the ring if we
   71.83 -	 * get called twice with the same index.  (Hardware should
   71.84 -	 * never do this, since it can't complete that many buffers in
   71.85 -	 * one go.)
   71.86 -	 */
   71.87 -	stop_index = (index + 1) & tx_queue->efx->type->txd_ring_mask;
   71.88 -	read_ptr = tx_queue->read_count & tx_queue->efx->type->txd_ring_mask;
   71.89 +	stop_index = (index + 1) & mask;
   71.90 +	read_ptr = tx_queue->read_count & mask;
   71.91  
   71.92  	while (read_ptr != stop_index) {
   71.93 -		buffer = &tx_queue->buffer[read_ptr];
   71.94 +		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
   71.95  		if (unlikely(buffer->len == 0)) {
   71.96  			EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
   71.97  				"completion id %x\n", tx_queue->queue,
   71.98  				read_ptr);
   71.99 -			atomic_inc(&tx_queue->efx->errors.spurious_tx);
  71.100 -			/* Don't reset */
  71.101 -		} else {
  71.102 -			if (buffer->unmap_len) {
  71.103 -				if (buffer->unmap_single)
  71.104 -					pci_unmap_single(pci_dev,
  71.105 -							 buffer->unmap_addr,
  71.106 -							 buffer->unmap_len,
  71.107 -							 PCI_DMA_TODEVICE);
  71.108 -				else
  71.109 -					pci_unmap_page(pci_dev,
  71.110 -						       buffer->unmap_addr,
  71.111 -						       buffer->unmap_len,
  71.112 -						       PCI_DMA_TODEVICE);
  71.113 -				buffer->unmap_single = 0;
  71.114 -				buffer->unmap_len = 0;
  71.115 -			}
  71.116 -			if (buffer->skb) {
  71.117 -				dev_kfree_skb_any((struct sk_buff *)
  71.118 -						  buffer->skb);
  71.119 -				buffer->skb = NULL;
  71.120 -				EFX_TRACE(tx_queue->efx, "TX queue %d "
  71.121 -					  "transmission id %x complete\n",
  71.122 -					  tx_queue->queue, read_ptr);
  71.123 -			}
  71.124 -			buffer->continuation = 1;
  71.125 -			buffer->len = 0;
  71.126 +			atomic_inc(&efx->errors.spurious_tx);
  71.127 +			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
  71.128 +			return;
  71.129  		}
  71.130 +
  71.131 +		efx_dequeue_buffer(tx_queue, buffer);
  71.132 +		buffer->continuation = 1;
  71.133 +		buffer->len = 0;
  71.134 +
  71.135  		++tx_queue->read_count;
  71.136 -		read_ptr = (tx_queue->read_count &
  71.137 -			    tx_queue->efx->type->txd_ring_mask);
  71.138 +		read_ptr = tx_queue->read_count & mask;
  71.139  	}
  71.140  }
  71.141  
  71.142 @@ -385,13 +371,8 @@ out:
  71.143  	return rc;
  71.144  }
  71.145  
  71.146 -#if defined(EFX_USE_FASTCALL)
  71.147  void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
  71.148 -#else
  71.149 -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
  71.150 -#endif
  71.151  {
  71.152 -	unsigned long flags __attribute__ ((unused));
  71.153  	unsigned fill_level;
  71.154  	struct efx_nic *efx = tx_queue->efx;
  71.155  
  71.156 @@ -407,11 +388,7 @@ void efx_xmit_done(struct efx_tx_queue *
  71.157  	if (unlikely(tx_queue->stopped)) {
  71.158  		fill_level = tx_queue->insert_count - tx_queue->read_count;
  71.159  		if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
  71.160 -			/* If the port is stopped and the net_dev isn't
  71.161 -			 * registered, then the caller must be performing
  71.162 -			 * flow control manually */
  71.163 -			if (unlikely(!efx->net_dev_registered))
  71.164 -				return;
  71.165 +			EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
  71.166  
  71.167  			/* Do this under netif_tx_lock(), to avoid racing
  71.168  			 * with efx_xmit(). */
  71.169 @@ -464,8 +441,6 @@ int efx_init_tx_queue(struct efx_tx_queu
  71.170  {
  71.171  	EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
  71.172  
  71.173 -	ASSERT_RTNL();
  71.174 -
  71.175  	/* Initialise fields */
  71.176  	tx_queue->insert_count = 0;
  71.177  	tx_queue->write_count = 0;
  71.178 @@ -479,14 +454,20 @@ int efx_init_tx_queue(struct efx_tx_queu
  71.179  
  71.180  void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
  71.181  {
  71.182 -	unsigned int last_index, mask;
  71.183 -	if (tx_queue->buffer) {
  71.184 -		/* Free any buffers left in the ring */
  71.185 -		mask = tx_queue->efx->type->txd_ring_mask;
  71.186 -		last_index = (tx_queue->insert_count - 1) & mask;
  71.187 -		EFX_LOG(tx_queue->efx, "Will dequeue up to 0x%x from 0x%x\n",
  71.188 -			last_index, tx_queue->read_count & mask);
  71.189 -		efx_dequeue_buffers(tx_queue, last_index);
  71.190 +	struct efx_tx_buffer *buffer;
  71.191 +
  71.192 +	if (!tx_queue->buffer)
  71.193 +		return;
  71.194 +
  71.195 +	/* Free any buffers left in the ring */
  71.196 +	while (tx_queue->read_count != tx_queue->write_count) {
  71.197 +		buffer = &tx_queue->buffer[tx_queue->read_count &
  71.198 +					   tx_queue->efx->type->txd_ring_mask];
  71.199 +		efx_dequeue_buffer(tx_queue, buffer);
  71.200 +		buffer->continuation = 1;
  71.201 +		buffer->len = 0;
  71.202 +
  71.203 +		++tx_queue->read_count;
  71.204  	}
  71.205  }
  71.206  
  71.207 @@ -494,8 +475,6 @@ void efx_fini_tx_queue(struct efx_tx_que
  71.208  {
  71.209  	EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
  71.210  
  71.211 -	ASSERT_RTNL();
  71.212 -
  71.213  	/* Flush TX queue, remove descriptor ring */
  71.214  	falcon_fini_tx(tx_queue);
  71.215  
    72.1 --- a/drivers/net/sfc/txc43128_phy.c	Tue Mar 31 11:49:12 2009 +0100
    72.2 +++ b/drivers/net/sfc/txc43128_phy.c	Tue Mar 31 11:59:10 2009 +0100
    72.3 @@ -653,10 +653,9 @@ static int txc43128_phy_check_hw(struct 
    72.4  	int link_up = txc43128_phy_read_link(efx);
    72.5  
    72.6  	/* Simulate a PHY event if link state has changed */
    72.7 -	if (link_up != efx->link_up) {
    72.8 -		efx->link_up = link_up;
    72.9 +	if (link_up != efx->link_up)
   72.10  		efx->mac_op->fake_phy_event(efx);
   72.11 -	} else if (EFX_WORKAROUND_10934(efx)) {
   72.12 +	else if (EFX_WORKAROUND_10934(efx)) {
   72.13  		if (link_up || (efx->loopback_mode != LOOPBACK_NONE))
   72.14  			data->bug10934_timer = jiffies;
   72.15  		else {
    73.1 --- a/drivers/net/sfc/workarounds.h	Tue Mar 31 11:49:12 2009 +0100
    73.2 +++ b/drivers/net/sfc/workarounds.h	Tue Mar 31 11:59:10 2009 +0100
    73.3 @@ -34,7 +34,7 @@
    73.4  #define EFX_WORKAROUND_ALWAYS(efx) 1
    73.5  #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
    73.6  #define EFX_WORKAROUND_FALCON_B0FPGA(efx) \
    73.7 -	(FALCON_REV(efx) == FALCON_REV_B0 && !(efx)->is_asic)
    73.8 +	(FALCON_REV(efx) >= FALCON_REV_B0 && !(efx)->is_asic)
    73.9  
   73.10  /* XAUI resets if link not detected */
   73.11  #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
   73.12 @@ -67,6 +67,8 @@
   73.13  #define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
   73.14  /* Flush events can take a very long time to appear */
   73.15  #define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
   73.16 +/* 10Xpress is sensitive to unstable XAUI sync when going into loopback */
   73.17 +#define EFX_WORKAROUND_11667 EFX_WORKAROUND_ALWAYS
   73.18  
   73.19  /* Spurious parity errors in TSORT buffers */
   73.20  #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
    74.1 --- a/drivers/net/sfc/xfp_phy.c	Tue Mar 31 11:49:12 2009 +0100
    74.2 +++ b/drivers/net/sfc/xfp_phy.c	Tue Mar 31 11:59:10 2009 +0100
    74.3 @@ -114,11 +114,12 @@ static int xfp_phy_init(struct efx_nic *
    74.4  	phy_data->tx_disabled = efx->tx_disabled;
    74.5  
    74.6  	rc = xfp_reset_phy(efx);
    74.7 +
    74.8 +	EFX_INFO(efx, "XFP: PHY init %s.\n",
    74.9 +		 rc ? "failed" : "successful");
   74.10  	if (rc < 0)
   74.11  		goto fail;
   74.12  
   74.13 -	EFX_INFO(efx, "XFP: PHY init %s.\n",
   74.14 -		 rc ? "failed" : "successful");
   74.15  	return 0;
   74.16  
   74.17   fail:
   74.18 @@ -142,10 +143,8 @@ static int xfp_phy_check_hw(struct efx_n
   74.19  	int rc = 0;
   74.20  	int link_up = xfp_link_ok(efx);
   74.21  	/* Simulate a PHY event if link state has changed */
   74.22 -	if (link_up != efx->link_up) {
   74.23 -		efx->link_up = link_up;
   74.24 +	if (link_up != efx->link_up)
   74.25  		efx->mac_op->fake_phy_event(efx);
   74.26 -	}
   74.27  
   74.28  	rc = efx->board_info.monitor(efx);
   74.29  	if (rc) {
   74.30 @@ -192,15 +191,13 @@ static void xfp_phy_fini(struct efx_nic 
   74.31  }
   74.32  
   74.33  struct efx_phy_operations falcon_xfp_phy_ops = {
   74.34 -	.init            = xfp_phy_init,
   74.35 -	.reconfigure     = xfp_phy_reconfigure,
   74.36 -	.check_hw        = xfp_phy_check_hw,
   74.37 -	.fini            = xfp_phy_fini,
   74.38 -	.clear_interrupt = xfp_phy_clear_interrupt,
   74.39 -	.reset_xaui      = efx_port_dummy_op_void,
   74.40 -	.mmds            = XFP_REQUIRED_DEVS,
   74.41 -	.loopbacks       = XFP_LOOPBACKS,
   74.42 -	/* No loopback appears to be reliable enough for self-test
   74.43 -	 * operation. So don't do it. */
   74.44 +	.init             = xfp_phy_init,
   74.45 +	.reconfigure      = xfp_phy_reconfigure,
   74.46 +	.check_hw         = xfp_phy_check_hw,
   74.47 +	.fini             = xfp_phy_fini,
   74.48 +	.clear_interrupt  = xfp_phy_clear_interrupt,
   74.49 +	.reset_xaui       = efx_port_dummy_op_void,
   74.50 +	.mmds             = XFP_REQUIRED_DEVS,
   74.51 +	.loopbacks        = XFP_LOOPBACKS,
   74.52  	.startup_loopback = LOOPBACK_PCS,
   74.53  };
    75.1 --- a/drivers/xen/sfc_netback/accel.c	Tue Mar 31 11:49:12 2009 +0100
    75.2 +++ b/drivers/xen/sfc_netback/accel.c	Tue Mar 31 11:59:10 2009 +0100
    75.3 @@ -38,7 +38,9 @@ static int netback_accel_netdev_event(st
    75.4  	struct net_device *net_dev = (struct net_device *)ptr;
    75.5  	struct netback_accel *bend;
    75.6  
    75.7 -	if ((event == NETDEV_UP) || (event == NETDEV_DOWN)) {
    75.8 +	if ((event == NETDEV_UP) || 
    75.9 +	    (event == NETDEV_DOWN) ||
   75.10 +	    (event == NETDEV_CHANGE)) {
   75.11  		mutex_lock(&bend_list_mutex);
   75.12  		bend = bend_list;
   75.13  		while (bend != NULL) {
   75.14 @@ -51,9 +53,16 @@ static int netback_accel_netdev_event(st
   75.15  			if (bend->shared_page == NULL)
   75.16  				goto next;
   75.17  
   75.18 -			if (bend->net_dev->ifindex == net_dev->ifindex)
   75.19 -				netback_accel_set_interface_state
   75.20 -					(bend, event == NETDEV_UP);
   75.21 +			if (bend->net_dev->ifindex == net_dev->ifindex) {
   75.22 +				int ok;
   75.23 +				if (event == NETDEV_CHANGE)
   75.24 +					ok = (netif_carrier_ok(net_dev) && 
   75.25 +					      (net_dev->flags & IFF_UP));
   75.26 +				else
   75.27 +					ok = (netif_carrier_ok(net_dev) && 
   75.28 +					      (event == NETDEV_UP));
   75.29 +				netback_accel_set_interface_state(bend, ok);
   75.30 +			}
   75.31  
   75.32  		next:
   75.33  			mutex_unlock(&bend->bend_mutex);
   75.34 @@ -86,22 +95,31 @@ static int __init netback_accel_init(voi
   75.35  #endif
   75.36  
   75.37  	rc = netback_accel_init_fwd();
   75.38 -
   75.39 -	if (rc == 0)
   75.40 -		netback_accel_debugfs_init();
   75.41 -
   75.42 -	if (rc == 0)
   75.43 -		rc = netback_accel_sf_init();
   75.44 +	if (rc != 0)
   75.45 +		goto fail0;
   75.46  
   75.47 -	if (rc == 0)
   75.48 -		rc = register_netdevice_notifier
   75.49 -			(&netback_accel_netdev_notifier);
   75.50 +	netback_accel_debugfs_init();
   75.51  
   75.52 -	/*
   75.53 -	 * What if no device was found, shouldn't we clean up stuff
   75.54 -	 * we've allocated for acceleration subsystem?
   75.55 -	 */
   75.56 +	rc = netback_accel_sf_init();
   75.57 +	if (rc != 0)
   75.58 +		goto fail1;
   75.59  
   75.60 +	rc = register_netdevice_notifier
   75.61 +		(&netback_accel_netdev_notifier);
   75.62 +	if (rc != 0)
   75.63 +		goto fail2;
   75.64 +
   75.65 +	return 0;
   75.66 +
   75.67 + fail2:
   75.68 +	netback_accel_sf_shutdown();
   75.69 + fail1:
   75.70 +	netback_accel_debugfs_fini();
   75.71 +	netback_accel_shutdown_fwd();
   75.72 + fail0:
   75.73 +#ifdef EFX_GCOV
   75.74 +	gcov_provider_fini(THIS_MODULE);
   75.75 +#endif
   75.76  	return rc;
   75.77  }
   75.78  
    76.1 --- a/drivers/xen/sfc_netback/accel_solarflare.c	Tue Mar 31 11:49:12 2009 +0100
    76.2 +++ b/drivers/xen/sfc_netback/accel_solarflare.c	Tue Mar 31 11:59:10 2009 +0100
    76.3 @@ -170,25 +170,16 @@ static struct netback_accel_hooks accel_
    76.4   */
    76.5  static int efx_device_to_efab_nic_index(struct efx_dl_device *efx_dl_dev) 
    76.6  {
    76.7 -	int i;
    76.8 -
    76.9 -	for (i = 0; i < EFHW_MAX_NR_DEVS; i++) {
   76.10 -		struct efhw_nic *nic = efrm_nic_table.nic[i];
   76.11 +	int i, rc = -1;
   76.12 +	struct efhw_nic *nic;
   76.13  
   76.14 -		/*
   76.15 -		 * It's possible for the nic structure to have not
   76.16 -		 * been initialised if the resource driver failed its
   76.17 -		 * driverlink probe
   76.18 -		 */ 
   76.19 -		if (nic == NULL || nic->net_driver_dev == NULL)
   76.20 -			continue;
   76.21 -
   76.22 -		/* Work out if these are talking about the same NIC */
   76.23 -		if (nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
   76.24 -			return i;
   76.25 +	EFRM_FOR_EACH_NIC(i, nic) {
   76.26 +		if (nic != NULL && nic->net_driver_dev != NULL &&
   76.27 +		    nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
   76.28 +			rc = i;
   76.29  	}
   76.30  
   76.31 -	return -1;
   76.32 +	return rc;
   76.33  }
   76.34  
   76.35  
   76.36 @@ -600,9 +591,6 @@ static int ef_bend_hwinfo_falcon_common(
   76.37  		return rc;
   76.38  	}
   76.39  
   76.40 -	if (res_mdata.version != 0)
   76.41 -		return -EPROTO;
   76.42 -
   76.43  	hwinfo->nic_arch = res_mdata.nic_arch;
   76.44  	hwinfo->nic_variant = res_mdata.nic_variant;
   76.45  	hwinfo->nic_revision = res_mdata.nic_revision;
   76.46 @@ -648,38 +636,57 @@ static int ef_bend_hwinfo_falcon_common(
   76.47  	}
   76.48  
   76.49  	VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
   76.50 -	accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt = 
   76.51 -		net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn), 
   76.52 -				     0);
   76.53 +	rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn), 0);
   76.54 +	if (rc < 0)
   76.55 +		goto fail0;
   76.56 +	accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt = rc;
   76.57  
   76.58  	VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
   76.59 -	accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt = 
   76.60 -		net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn), 
   76.61 -				     0);
   76.62 +	rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn), 0);
   76.63 +	if (rc < 0)
   76.64 +		goto fail1;
   76.65 +	accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt = rc;
   76.66  
   76.67  	VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
   76.68  	/* Make the relevant H/W pages mappable by the far end */
   76.69 -	accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt = 
   76.70 -		net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
   76.71 +	rc = net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
   76.72 +	if (rc < 0)
   76.73 +		goto fail2;
   76.74 +	accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt = rc;
   76.75  	
   76.76  	/* Now do the same for the memory pages */
   76.77  	/* Convert the page + length we got back for the evq to grants. */
   76.78  	for (i = 0; i < accel_hw_priv->evq_npages; i++) {
   76.79 -		accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] =
   76.80 -			net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
   76.81 +		rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
   76.82 +		if (rc < 0)
   76.83 +			goto fail3;
   76.84 +		accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] = rc;
   76.85 +
   76.86  		VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i], 
   76.87  			pfn);
   76.88  		pfn++;
   76.89  	}
   76.90  
   76.91  	return 0;
   76.92 +
   76.93 + fail3:
   76.94 +	for (i = i - 1; i >= 0; i--) {
   76.95 +		ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
   76.96 +	}
   76.97 +	ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
   76.98 + fail2:
   76.99 +	ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
  76.100 + fail1:
  76.101 +	ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);	
  76.102 + fail0:
  76.103 +	return rc;
  76.104  }
  76.105  
  76.106  
  76.107  static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend, 
  76.108  				   struct net_accel_hw_falcon_a *hwinfo)
  76.109  {
  76.110 -	int rc;
  76.111 +	int rc, i;
  76.112  	struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
  76.113  
  76.114  	if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
  76.115 @@ -695,8 +702,17 @@ static int ef_bend_hwinfo_falcon_a(struc
  76.116  		hwinfo->common.evq_rptr);
  76.117  	rc = net_accel_grant_page(bend->hdev_data, 
  76.118  				  hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
  76.119 -	if (rc < 0)
  76.120 +	if (rc < 0) {
  76.121 +		/* Undo ef_bend_hwinfo_falcon_common() */
  76.122 +		ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
  76.123 +		ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
  76.124 +		ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
  76.125 +		for (i = 0; i < accel_hw_priv->evq_npages; i++) {
  76.126 +			ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i],
  76.127 +					 bend->far_end);
  76.128 +		}
  76.129  		return rc;
  76.130 +	}
  76.131  
  76.132  	accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
  76.133  	VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
    77.1 --- a/drivers/xen/sfc_netback/accel_xenbus.c	Tue Mar 31 11:49:12 2009 +0100
    77.2 +++ b/drivers/xen/sfc_netback/accel_xenbus.c	Tue Mar 31 11:59:10 2009 +0100
    77.3 @@ -240,7 +240,8 @@ static int setup_vnic(struct xenbus_devi
    77.4  
    77.5  	/* Initialise the shared page(s) used for comms */
    77.6  	net_accel_msg_init_page(bend->shared_page, PAGE_SIZE, 
    77.7 -				bend->net_dev->flags & IFF_UP);
    77.8 +				(bend->net_dev->flags & IFF_UP) && 
    77.9 +				(netif_carrier_ok(bend->net_dev)));
   77.10  
   77.11  	msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
   77.12  
    78.1 --- a/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h	Tue Mar 31 11:49:12 2009 +0100
    78.2 +++ b/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h	Tue Mar 31 11:59:10 2009 +0100
    78.3 @@ -190,8 +190,6 @@ efx_vi_filter_stop(struct efx_vi_state *
    78.4  /*! Constants for the type field in efx_vi_hw_resource */
    78.5  #define EFX_VI_HW_RESOURCE_TXDMAQ    0x0	/* PFN of TX DMA Q */
    78.6  #define EFX_VI_HW_RESOURCE_RXDMAQ    0x1	/* PFN of RX DMA Q */
    78.7 -#define EFX_VI_HW_RESOURCE_TXBELL    0x2	/* PFN of TX Doorbell (EF1) */
    78.8 -#define EFX_VI_HW_RESOURCE_RXBELL    0x3	/* PFN of RX Doorbell (EF1) */
    78.9  #define EFX_VI_HW_RESOURCE_EVQTIMER  0x4	/* Address of event q timer */
   78.10  
   78.11  /* Address of event q pointer (EF1) */
   78.12 @@ -229,7 +227,6 @@ struct efx_vi_hw_resource {
   78.13   * Metadata concerning the list of hardware resource mappings
   78.14   */
   78.15  struct efx_vi_hw_resource_metadata {
   78.16 -	int version;
   78.17  	int evq_order;
   78.18  	int evq_offs;
   78.19  	int evq_capacity;
    79.1 --- a/drivers/xen/sfc_netback/ci/efhw/common.h	Tue Mar 31 11:49:12 2009 +0100
    79.2 +++ b/drivers/xen/sfc_netback/ci/efhw/common.h	Tue Mar 31 11:59:10 2009 +0100
    79.3 @@ -56,10 +56,6 @@ typedef union {
    79.4  		uint32_t a;
    79.5  		uint32_t b;
    79.6  	} opaque;
    79.7 -	struct {
    79.8 -		uint32_t code;
    79.9 -		uint32_t status;
   79.10 -	} ev1002;
   79.11  } efhw_event_t;
   79.12  
   79.13  /* Flags for TX/RX queues */
    80.1 --- a/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    80.2 +++ b/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    80.3 @@ -52,8 +52,12 @@
    80.4  
    80.5  /* Linux kernel also does not provide PRIx32... Sigh. */
    80.6  #define PRIx32 "x"
    80.7 -#define PRIx64 "llx"
    80.8 -
    80.9 + 
   80.10 +#ifdef __ia64__
   80.11 +# define PRIx64 "lx"
   80.12 +#else
   80.13 +# define PRIx64 "llx"
   80.14 +#endif
   80.15  
   80.16  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
   80.17  enum {
    81.1 --- a/drivers/xen/sfc_netback/ci/efhw/efhw_types.h	Tue Mar 31 11:49:12 2009 +0100
    81.2 +++ b/drivers/xen/sfc_netback/ci/efhw/efhw_types.h	Tue Mar 31 11:59:10 2009 +0100
    81.3 @@ -45,14 +45,6 @@
    81.4  
    81.5  /*--------------------------------------------------------------------
    81.6   *
    81.7 - * hardware limits used in the types
    81.8 - *
    81.9 - *--------------------------------------------------------------------*/
   81.10 -
   81.11 -#define EFHW_KEVENTQ_MAX    8
   81.12 -
   81.13 -/*--------------------------------------------------------------------
   81.14 - *
   81.15   * forward type declarations
   81.16   *
   81.17   *--------------------------------------------------------------------*/
   81.18 @@ -72,7 +64,7 @@ struct efhw_buffer_table_allocation{
   81.19  
   81.20  struct eventq_resource_hardware {
   81.21  	/*!iobuffer allocated for eventq - can be larger than eventq */
   81.22 -	efhw_iopages_t iobuff;
   81.23 +	struct efhw_iopages iobuff;
   81.24  	unsigned iobuff_off;
   81.25  	struct efhw_buffer_table_allocation buf_tbl_alloc;
   81.26  	int capacity;		/*!< capacity of event queue */
   81.27 @@ -85,7 +77,7 @@ struct eventq_resource_hardware {
   81.28   *--------------------------------------------------------------------*/
   81.29  
   81.30  struct efhw_keventq {
   81.31 -	volatile int lock;
   81.32 +	int lock;
   81.33  	caddr_t evq_base;
   81.34  	int32_t evq_ptr;
   81.35  	uint32_t evq_mask;
   81.36 @@ -115,7 +107,7 @@ struct efhw_func_ops {
   81.37  	/*! initialise all hardware functional units */
   81.38  	int (*init_hardware) (struct efhw_nic *nic,
   81.39  			      struct efhw_ev_handler *,
   81.40 -			      const uint8_t *mac_addr);
   81.41 +			      const uint8_t *mac_addr, int non_irq_evq);
   81.42  
   81.43    /*-------------- Interrupt support  ------------ */
   81.44  
   81.45 @@ -130,17 +122,17 @@ struct efhw_func_ops {
   81.46  	 */
   81.47  	int (*interrupt) (struct efhw_nic *nic);
   81.48  
   81.49 -	/*! Enable given interrupt mask for the given IRQ unit */
   81.50 -	void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
   81.51 +	/*! Enable the interrupt */
   81.52 +	void (*interrupt_enable) (struct efhw_nic *nic);
   81.53  
   81.54 -	/*! Disable given interrupt mask for the given IRQ unit */
   81.55 -	void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
   81.56 +	/*! Disable the interrupt */
   81.57 +	void (*interrupt_disable) (struct efhw_nic *nic);
   81.58  
   81.59  	/*! Set interrupt moderation strategy for the given IRQ unit
   81.60  	 ** val is in usec
   81.61  	 */
   81.62  	void (*set_interrupt_moderation)(struct efhw_nic *nic,
   81.63 -					 uint idx, uint val);
   81.64 +					 uint val);
   81.65  
   81.66    /*-------------- Event support  ------------ */
   81.67  
   81.68 @@ -255,8 +247,8 @@ struct efhw_device_type {
   81.69  
   81.70  /*! */
   81.71  struct efhw_nic {
   81.72 -	/*! zero base index in efrm_nic_table.nic array */
   81.73 -	volatile int index;
   81.74 +	/*! zero base index in efrm_nic_tablep->nic array */
   81.75 +	int index;
   81.76  	int ifindex;		/*!< OS level nic index */
   81.77  #ifdef HAS_NET_NAMESPACE
   81.78  	struct net *nd_net;
   81.79 @@ -283,7 +275,7 @@ struct efhw_nic {
   81.80  	/* hardware resources */
   81.81  
   81.82  	/*! I/O address of the start of the bar */
   81.83 -	efhw_ioaddr_t bar_ioaddr;
   81.84 +	volatile char __iomem *bar_ioaddr;
   81.85  
   81.86  	/*! Bar number of control aperture. */
   81.87  	unsigned ctr_ap_bar;
   81.88 @@ -312,14 +304,17 @@ struct efhw_nic {
   81.89  	void (*irq_handler) (struct efhw_nic *, int unit);
   81.90  
   81.91  	/*! event queues per driver */
   81.92 -	struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
   81.93 +	struct efhw_keventq interrupting_evq;
   81.94  
   81.95  /* for marking when we are not using an IRQ unit
   81.96        - 0 is a valid offset to an IRQ unit on EF1! */
   81.97  #define EFHW_IRQ_UNIT_UNUSED  0xffff
   81.98 -	/*! interrupt unit in use  */
   81.99 -	unsigned int irq_unit[EFHW_KEVENTQ_MAX];
  81.100 -	efhw_iopage_t irq_iobuff;	/*!<  Falcon SYSERR interrupt */
  81.101 +	/*! interrupt unit in use for the interrupting event queue  */
  81.102 +	unsigned int irq_unit;
  81.103 +
  81.104 +	struct efhw_keventq non_interrupting_evq;
  81.105 +
  81.106 +	struct efhw_iopage irq_iobuff;	/*!<  Falcon SYSERR interrupt */
  81.107  
  81.108  	/* The new driverlink infrastructure. */
  81.109  	struct efx_dl_device *net_driver_dev;
    82.1 --- a/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    82.2 +++ b/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    82.3 @@ -50,6 +50,10 @@
    82.4  #error Unknown endianness
    82.5  #endif
    82.6  
    82.7 +#ifndef __iomem
    82.8 +#define __iomem
    82.9 +#endif
   82.10 +
   82.11  #ifndef mmiowb
   82.12  	#if defined(__i386__) || defined(__x86_64__)
   82.13  		#define mmiowb()
   82.14 @@ -63,10 +67,8 @@
   82.15  	#endif
   82.16  #endif
   82.17  
   82.18 -typedef char *efhw_ioaddr_t;
   82.19 -
   82.20  #ifndef readq
   82.21 -static inline uint64_t __readq(void __iomem *addr)
   82.22 +static inline uint64_t __readq(volatile void __iomem *addr)
   82.23  {
   82.24  	return *(volatile uint64_t *)addr;
   82.25  }
   82.26 @@ -74,7 +76,7 @@ static inline uint64_t __readq(void __io
   82.27  #endif
   82.28  
   82.29  #ifndef writeq
   82.30 -static inline void __writeq(uint64_t v, void __iomem *addr)
   82.31 +static inline void __writeq(uint64_t v, volatile void __iomem *addr)
   82.32  {
   82.33  	*(volatile uint64_t *)addr = v;
   82.34  }
    83.1 --- a/drivers/xen/sfc_netback/ci/efhw/iopage_types.h	Tue Mar 31 11:49:12 2009 +0100
    83.2 +++ b/drivers/xen/sfc_netback/ci/efhw/iopage_types.h	Tue Mar 31 11:59:10 2009 +0100
    83.3 @@ -3,7 +3,8 @@
    83.4   *          resource management for Xen backend, OpenOnload, etc
    83.5   *           (including support for SFE4001 10GBT NIC)
    83.6   *
    83.7 - * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
    83.8 + * This file provides struct efhw_page and struct efhw_iopage for Linux
    83.9 + * kernel.
   83.10   *
   83.11   * Copyright 2005-2007: Solarflare Communications Inc,
   83.12   *                      9501 Jeronimo Road, Suite 250,
   83.13 @@ -38,77 +39,83 @@
   83.14  #ifndef __CI_EFHW_IOPAGE_LINUX_H__
   83.15  #define __CI_EFHW_IOPAGE_LINUX_H__
   83.16  
   83.17 +#include <linux/version.h>
   83.18  #include <linux/gfp.h>
   83.19 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
   83.20  #include <linux/hardirq.h>
   83.21 +#else
   83.22 +#include <asm/hardirq.h>
   83.23 +#endif
   83.24 +#include <linux/errno.h>
   83.25  #include <ci/efhw/debug.h>
   83.26  
   83.27  /*--------------------------------------------------------------------
   83.28   *
   83.29 - * efhw_page_t: A single page of memory.  Directly mapped in the driver,
   83.30 - * and can be mapped to userlevel.
   83.31 + * struct efhw_page: A single page of memory.  Directly mapped in the
   83.32 + * driver, and can be mapped to userlevel.
   83.33   *
   83.34   *--------------------------------------------------------------------*/
   83.35  
   83.36 -typedef struct {
   83.37 +struct efhw_page {
   83.38  	unsigned long kva;
   83.39 -} efhw_page_t;
   83.40 +};
   83.41  
   83.42 -static inline int efhw_page_alloc(efhw_page_t *p)
   83.43 +static inline int efhw_page_alloc(struct efhw_page *p)
   83.44  {
   83.45  	p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
   83.46  	return p->kva ? 0 : -ENOMEM;
   83.47  }
   83.48  
   83.49 -static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
   83.50 +static inline int efhw_page_alloc_zeroed(struct efhw_page *p)
   83.51  {
   83.52  	p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
   83.53  	return p->kva ? 0 : -ENOMEM;
   83.54  }
   83.55  
   83.56 -static inline void efhw_page_free(efhw_page_t *p)
   83.57 +static inline void efhw_page_free(struct efhw_page *p)
   83.58  {
   83.59  	free_page(p->kva);
   83.60  	EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
   83.61  }
   83.62  
   83.63 -static inline char *efhw_page_ptr(efhw_page_t *p)
   83.64 +static inline char *efhw_page_ptr(struct efhw_page *p)
   83.65  {
   83.66  	return (char *)p->kva;
   83.67  }
   83.68  
   83.69 -static inline unsigned efhw_page_pfn(efhw_page_t *p)
   83.70 +static inline unsigned efhw_page_pfn(struct efhw_page *p)
   83.71  {
   83.72  	return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
   83.73  }
   83.74  
   83.75 -static inline void efhw_page_mark_invalid(efhw_page_t *p)
   83.76 +static inline void efhw_page_mark_invalid(struct efhw_page *p)
   83.77  {
   83.78  	p->kva = 0;
   83.79  }
   83.80  
   83.81 -static inline int efhw_page_is_valid(efhw_page_t *p)
   83.82 +static inline int efhw_page_is_valid(struct efhw_page *p)
   83.83  {
   83.84  	return p->kva != 0;
   83.85  }
   83.86  
   83.87 -static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
   83.88 +static inline void efhw_page_init_from_va(struct efhw_page *p, void *va)
   83.89  {
   83.90  	p->kva = (unsigned long)va;
   83.91  }
   83.92  
   83.93  /*--------------------------------------------------------------------
   83.94   *
   83.95 - * efhw_iopage_t: A single page of memory.  Directly mapped in the driver,
   83.96 + * struct efhw_iopage: A single page of memory.  Directly mapped in the driver,
   83.97   * and can be mapped to userlevel.  Can also be accessed by the NIC.
   83.98   *
   83.99   *--------------------------------------------------------------------*/
  83.100  
  83.101 -typedef struct {
  83.102 -	efhw_page_t p;
  83.103 +struct efhw_iopage {
  83.104 +	struct efhw_page p;
  83.105  	dma_addr_t dma_addr;
  83.106 -} efhw_iopage_t;
  83.107 +};
  83.108  
  83.109 -static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
  83.110 +static inline dma_addr_t efhw_iopage_dma_addr(struct efhw_iopage *p)
  83.111  {
  83.112  	return p->dma_addr;
  83.113  }
  83.114 @@ -120,9 +127,9 @@ static inline dma_addr_t efhw_iopage_dma
  83.115  
  83.116  /*--------------------------------------------------------------------
  83.117   *
  83.118 - * efhw_iopages_t: A set of pages that are contiguous in physical memory.
  83.119 - * Directly mapped in the driver, and can be mapped to userlevel.  Can also
  83.120 - * be accessed by the NIC.
  83.121 + * struct efhw_iopages: A set of pages that are contiguous in physical
  83.122 + * memory.  Directly mapped in the driver, and can be mapped to userlevel.
  83.123 + * Can also be accessed by the NIC.
  83.124   *
  83.125   * NB. The O/S may be unwilling to allocate many, or even any of these.  So
  83.126   * only use this type where the NIC really needs a physically contiguous
  83.127 @@ -130,44 +137,44 @@ static inline dma_addr_t efhw_iopage_dma
  83.128   *
  83.129   *--------------------------------------------------------------------*/
  83.130  
  83.131 -typedef struct {
  83.132 +struct efhw_iopages {
  83.133  	caddr_t kva;
  83.134  	unsigned order;
  83.135  	dma_addr_t dma_addr;
  83.136 -} efhw_iopages_t;
  83.137 +};
  83.138  
  83.139 -static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
  83.140 +static inline caddr_t efhw_iopages_ptr(struct efhw_iopages *p)
  83.141  {
  83.142  	return p->kva;
  83.143  }
  83.144  
  83.145 -static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
  83.146 +static inline unsigned efhw_iopages_pfn(struct efhw_iopages *p)
  83.147  {
  83.148  	return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
  83.149  }
  83.150  
  83.151 -static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
  83.152 +static inline dma_addr_t efhw_iopages_dma_addr(struct efhw_iopages *p)
  83.153  {
  83.154  	return p->dma_addr;
  83.155  }
  83.156  
  83.157 -static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
  83.158 +static inline unsigned efhw_iopages_size(struct efhw_iopages *p)
  83.159  {
  83.160  	return 1u << (p->order + PAGE_SHIFT);
  83.161  }
  83.162  
  83.163 -/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
  83.164 - * contiguous allocations in iobufsets for iSCSI.  This allows the
  83.165 - * essential information about contiguous allocations from
  83.166 - * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
  83.167 - * iobufset.  (Changing the iobufset resource to use a union type would
  83.168 +/* struct efhw_iopage <-> struct efhw_iopages conversions for handling
  83.169 + * physically contiguous allocations in iobufsets for iSCSI.  This allows
  83.170 + * the essential information about contiguous allocations from
  83.171 + * efhw_iopages_alloc() to be saved away in the struct efhw_iopage array in
  83.172 + * an iobufset.  (Changing the iobufset resource to use a union type would
  83.173   * involve a lot of code changes, and make the iobufset's metadata larger
  83.174   * which could be bad as it's supposed to fit into a single page on some
  83.175   * platforms.)
  83.176   */
  83.177  static inline void
  83.178 -efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
  83.179 -			    efhw_iopages_t *iopages, unsigned pageno)
  83.180 +efhw_iopage_init_from_iopages(struct efhw_iopage *iopage,
  83.181 +			      struct efhw_iopages *iopages, unsigned pageno)
  83.182  {
  83.183  	iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
  83.184  	    + (pageno * PAGE_SIZE);
  83.185 @@ -176,8 +183,8 @@ efhw_iopage_init_from_iopages(efhw_iopag
  83.186  }
  83.187  
  83.188  static inline void
  83.189 -efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
  83.190 -			    efhw_iopage_t *iopage, unsigned order)
  83.191 +efhw_iopages_init_from_iopage(struct efhw_iopages *iopages,
  83.192 +			      struct efhw_iopage *iopage, unsigned order)
  83.193  {
  83.194  	iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
  83.195  	EFHW_ASSERT(iopages->kva);
    84.1 --- a/drivers/xen/sfc_netback/ci/efrm/nic_table.h	Tue Mar 31 11:49:12 2009 +0100
    84.2 +++ b/drivers/xen/sfc_netback/ci/efrm/nic_table.h	Tue Mar 31 11:59:10 2009 +0100
    84.3 @@ -62,21 +62,21 @@ struct efrm_nic_table {
    84.4  };
    84.5  
    84.6  /* Resource driver structures used by other drivers as well */
    84.7 -extern struct efrm_nic_table efrm_nic_table;
    84.8 +extern struct efrm_nic_table *efrm_nic_tablep;
    84.9  
   84.10  static inline void efrm_nic_table_hold(void)
   84.11  {
   84.12 -	atomic_inc(&efrm_nic_table.ref_count);
   84.13 +	atomic_inc(&efrm_nic_tablep->ref_count);
   84.14  }
   84.15  
   84.16  static inline void efrm_nic_table_rele(void)
   84.17  {
   84.18 -	atomic_dec(&efrm_nic_table.ref_count);
   84.19 +	atomic_dec(&efrm_nic_tablep->ref_count);
   84.20  }
   84.21  
   84.22  static inline int efrm_nic_table_held(void)
   84.23  {
   84.24 -	return (atomic_read(&efrm_nic_table.ref_count) != 0);
   84.25 +	return (atomic_read(&efrm_nic_tablep->ref_count) != 0);
   84.26  }
   84.27  
   84.28  /* Run code block _x multiple times with variable nic set to each
   84.29 @@ -86,13 +86,13 @@ static inline int efrm_nic_table_held(vo
   84.30  	for ((_nic_i) = (efrm_nic_table_hold(), 0);			\
   84.31  	     (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0);	\
   84.32  	     (_nic_i)++)						\
   84.33 -		if (((_nic) = efrm_nic_table.nic[_nic_i]))
   84.34 +		if (((_nic) = efrm_nic_tablep->nic[_nic_i]))
   84.35  
   84.36  #define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic)			\
   84.37  	for ((_i) = (efrm_nic_table_hold(), 0);				\
   84.38  	     (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0);	\
   84.39  	     ++(_i))							\
   84.40 -		if (((_nic) = efrm_nic_table.nic[_i]) &&		\
   84.41 +		if (((_nic) = efrm_nic_tablep->nic[_i]) &&		\
   84.42  		    efrm_nic_set_read((_set), (_i)))
   84.43  
   84.44  #endif /* __CI_EFRM_NIC_TABLE_H__ */
    85.1 --- a/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h	Tue Mar 31 11:49:12 2009 +0100
    85.2 +++ b/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h	Tue Mar 31 11:59:10 2009 +0100
    85.3 @@ -50,7 +50,11 @@
    85.4  #include <linux/workqueue.h>
    85.5  #include <linux/gfp.h>
    85.6  #include <linux/slab.h>
    85.7 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
    85.8  #include <linux/hardirq.h>
    85.9 +#else
   85.10 +#include <asm/hardirq.h>
   85.11 +#endif
   85.12  #include <linux/kernel.h>
   85.13  #include <linux/if_ether.h>
   85.14  #include <linux/completion.h>
   85.15 @@ -61,6 +65,21 @@
   85.16  #include <linux/log2.h>
   85.17  #endif
   85.18  
   85.19 +
   85.20 +/********************************************************************
   85.21 + *
   85.22 + * Utility functions
   85.23 + *
   85.24 + ********************************************************************/
   85.25 +
   85.26 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
   85.27 +static inline unsigned long __attribute_const__ roundup_pow_of_two(unsigned long x)
   85.28 +{
   85.29 +        return (1UL << fls(x - 1));
   85.30 +}
   85.31 +#endif
   85.32 +
   85.33 +
   85.34  /********************************************************************
   85.35   *
   85.36   * List API
    86.1 --- a/drivers/xen/sfc_netback/ci/tools/log.h	Tue Mar 31 11:49:12 2009 +0100
    86.2 +++ b/drivers/xen/sfc_netback/ci/tools/log.h	Tue Mar 31 11:59:10 2009 +0100
    86.3 @@ -163,6 +163,13 @@ extern int ci_format_ip4_addr(char* buf,
    86.4    ** must be at least 16 bytes long.
    86.5    */
    86.6  
    86.7 +#if defined(__unix__) && ! defined(__KERNEL__)
    86.8 +extern int ci_format_select_set(char* s, int len_s, int nfds, const fd_set*);
    86.9 +extern int ci_format_select(char* s, int len_s,
   86.10 +			    int nfds, const fd_set* rds, const fd_set* wrs,
   86.11 +			    const fd_set* exs, struct timeval* timeout);
   86.12 +#endif
   86.13 +
   86.14  
   86.15  /**********************************************************************
   86.16   * Error checking.
    87.1 --- a/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h	Tue Mar 31 11:49:12 2009 +0100
    87.2 +++ b/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h	Tue Mar 31 11:59:10 2009 +0100
    87.3 @@ -116,6 +116,15 @@ ci_inline void ci_atomic32_and(volatile 
    87.4  ci_inline void ci_atomic32_add(volatile ci_uint32* p, ci_uint32 v)
    87.5  { __asm__ __volatile__("lock; addl %1, %0" : "+m" (*p) : "ir" (v)); }
    87.6  
    87.7 +ci_inline void ci_atomic32_inc(volatile ci_uint32* p)
    87.8 +{ __asm__ __volatile__("lock; incl %0" : "+m" (*p)); }
    87.9 +
   87.10 +ci_inline int ci_atomic32_dec_and_test(volatile ci_uint32* p) {
   87.11 +  char r;
   87.12 +  __asm__ __volatile__("lock; decl %0; sete %1" : "+m" (*p), "=qm" (r));
   87.13 +  return r;
   87.14 +}
   87.15 +
   87.16  #define ci_atomic_or(a, v)   ci_atomic32_or ((ci_uint32*) &(a)->n, (v))
   87.17  #define ci_atomic_and(a, v)  ci_atomic32_and((ci_uint32*) &(a)->n, (v))
   87.18  #define ci_atomic_add(a, v)  ci_atomic32_add((ci_uint32*) &(a)->n, (v))
    88.1 --- a/drivers/xen/sfc_netfront/accel.h	Tue Mar 31 11:49:12 2009 +0100
    88.2 +++ b/drivers/xen/sfc_netfront/accel.h	Tue Mar 31 11:59:10 2009 +0100
    88.3 @@ -108,10 +108,22 @@ struct netfront_accel_stats {
    88.4  	/** Number of frame trunc events seen on fastpath */
    88.5  	u64 fastpath_frm_trunc;
    88.6  
    88.7 +	/** Number of rx discard (bad crc) events seen on fastpath */
    88.8 +	u64 fastpath_crc_bad;
    88.9 +
   88.10 +	/** Number of rx discard (bad csum) events seen on fastpath */
   88.11 +	u64 fastpath_csum_bad;
   88.12 +
   88.13 +	/** Number of rx discard (bad rights) events seen on fastpath */
   88.14 +	u64 fastpath_rights_bad;
   88.15 +
   88.16 +	/** Number of rx discard ("other") events seen on fastpath */
   88.17 +	u64 fastpath_discard_other;
   88.18 +
   88.19  	/** Number of no rx descriptor trunc events seen on fastpath */
   88.20  	u64 rx_no_desc_trunc;
   88.21  
   88.22 -	/** The number of misc bad events (e.g. RX_DISCARD) processed. */
   88.23 +	/** The number of misc bad events processed. */
   88.24  	u64 bad_event_count;
   88.25  
   88.26  	/** Number of events dealt with in poll loop */
   88.27 @@ -163,6 +175,10 @@ struct netfront_accel_dbfs {
   88.28  	struct dentry *fastpath_tx_completions;
   88.29  	struct dentry *fastpath_tx_pending_max;
   88.30  	struct dentry *fastpath_frm_trunc;
   88.31 +	struct dentry *fastpath_crc_bad;
   88.32 +	struct dentry *fastpath_csum_bad;
   88.33 +	struct dentry *fastpath_rights_bad;
   88.34 +	struct dentry *fastpath_discard_other;
   88.35  	struct dentry *rx_no_desc_trunc;
   88.36  	struct dentry *event_count;
   88.37  	struct dentry *bad_event_count;
    89.1 --- a/drivers/xen/sfc_netfront/accel_debugfs.c	Tue Mar 31 11:49:12 2009 +0100
    89.2 +++ b/drivers/xen/sfc_netfront/accel_debugfs.c	Tue Mar 31 11:59:10 2009 +0100
    89.3 @@ -146,6 +146,18 @@ int netfront_accel_debugfs_create(netfro
    89.4  	vnic->dbfs.fastpath_frm_trunc = debugfs_create_u64
    89.5  		("fastpath_frm_trunc", S_IRUSR | S_IRGRP | S_IROTH,
    89.6  		 vnic->dbfs_dir, &vnic->stats.fastpath_frm_trunc);
    89.7 +	vnic->dbfs.fastpath_crc_bad = debugfs_create_u64
    89.8 +		("fastpath_crc_bad", S_IRUSR | S_IRGRP | S_IROTH,
    89.9 +		 vnic->dbfs_dir, &vnic->stats.fastpath_crc_bad);
   89.10 +	vnic->dbfs.fastpath_csum_bad = debugfs_create_u64
   89.11 +		("fastpath_csum_bad", S_IRUSR | S_IRGRP | S_IROTH,
   89.12 +		 vnic->dbfs_dir, &vnic->stats.fastpath_csum_bad);
   89.13 +	vnic->dbfs.fastpath_rights_bad = debugfs_create_u64
   89.14 +		("fastpath_rights_bad", S_IRUSR | S_IRGRP | S_IROTH,
   89.15 +		 vnic->dbfs_dir, &vnic->stats.fastpath_rights_bad);
   89.16 +	vnic->dbfs.fastpath_discard_other = debugfs_create_u64
   89.17 +		("fastpath_discard_other", S_IRUSR | S_IRGRP | S_IROTH,
   89.18 +		 vnic->dbfs_dir, &vnic->stats.fastpath_discard_other);
   89.19  	vnic->dbfs.rx_no_desc_trunc = debugfs_create_u64
   89.20  		("rx_no_desc_trunc", S_IRUSR | S_IRGRP | S_IROTH,
   89.21  		 vnic->dbfs_dir, &vnic->stats.rx_no_desc_trunc);
   89.22 @@ -199,6 +211,10 @@ int netfront_accel_debugfs_remove(netfro
   89.23  		debugfs_remove(vnic->dbfs.event_count_since_irq);
   89.24  		debugfs_remove(vnic->dbfs.events_per_irq_max);
   89.25  		debugfs_remove(vnic->dbfs.fastpath_frm_trunc);
   89.26 +		debugfs_remove(vnic->dbfs.fastpath_crc_bad);
   89.27 +		debugfs_remove(vnic->dbfs.fastpath_csum_bad);
   89.28 +		debugfs_remove(vnic->dbfs.fastpath_rights_bad);
   89.29 +		debugfs_remove(vnic->dbfs.fastpath_discard_other);
   89.30  		debugfs_remove(vnic->dbfs.rx_no_desc_trunc);
   89.31  		debugfs_remove(vnic->dbfs.events_per_poll_max);
   89.32  		debugfs_remove(vnic->dbfs.events_per_poll_rx_max);
    90.1 --- a/drivers/xen/sfc_netfront/accel_msg.c	Tue Mar 31 11:49:12 2009 +0100
    90.2 +++ b/drivers/xen/sfc_netfront/accel_msg.c	Tue Mar 31 11:59:10 2009 +0100
    90.3 @@ -124,7 +124,6 @@ void vnic_stop_fastpath(netfront_accel_v
    90.4  
    90.5  static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
    90.6  {
    90.7 -
    90.8  	if (!vnic->backend_netdev_up) {
    90.9  		vnic->backend_netdev_up = 1;
   90.10  		
   90.11 @@ -136,7 +135,6 @@ static void netfront_accel_interface_up(
   90.12  
   90.13  static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
   90.14  {
   90.15 -
   90.16  	if (vnic->backend_netdev_up) {
   90.17  		vnic->backend_netdev_up = 0;
   90.18  		
    91.1 --- a/drivers/xen/sfc_netfront/accel_netfront.c	Tue Mar 31 11:49:12 2009 +0100
    91.2 +++ b/drivers/xen/sfc_netfront/accel_netfront.c	Tue Mar 31 11:59:10 2009 +0100
    91.3 @@ -274,7 +274,7 @@ static int __init netfront_accel_init(vo
    91.4  
    91.5  	if (rc < 0) {
    91.6  		EPRINTK("Xen netfront accelerator version mismatch\n");
    91.7 -		return -EINVAL;
    91.8 +		goto fail;
    91.9  	}
   91.10  
   91.11  	if (rc > 0) {
   91.12 @@ -283,10 +283,19 @@ static int __init netfront_accel_init(vo
   91.13  		 * and accept certain subsets of previous versions
   91.14  		 */
   91.15  		EPRINTK("Xen netfront accelerator version mismatch\n");
   91.16 -		return -EINVAL;
   91.17 +		goto fail;
   91.18  	}
   91.19  
   91.20  	return 0;
   91.21 +
   91.22 + fail:
   91.23 +	netfront_accel_debugfs_fini();
   91.24 +	flush_workqueue(netfront_accel_workqueue);
   91.25 +	destroy_workqueue(netfront_accel_workqueue);
   91.26 +#ifdef EFX_GCOV
   91.27 + 	gcov_provider_fini(THIS_MODULE);
   91.28 +#endif
   91.29 +	return -EINVAL;
   91.30  }
   91.31  module_init(netfront_accel_init);
   91.32  
    92.1 --- a/drivers/xen/sfc_netfront/accel_vi.c	Tue Mar 31 11:49:12 2009 +0100
    92.2 +++ b/drivers/xen/sfc_netfront/accel_vi.c	Tue Mar 31 11:59:10 2009 +0100
    92.3 @@ -938,18 +938,29 @@ static int netfront_accel_vi_poll_proces
    92.4  				" buffer %d RX_DISCARD_OTHER q_id %d\n",
    92.5  				__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
    92.6  				EF_EVENT_RX_DISCARD_Q_ID(*ev) );
    92.7 -			/*
    92.8 -			 * Probably tail of packet for which error has
    92.9 -			 * already been logged, so don't count in
   92.10 -			 * stats
   92.11 -			 */
   92.12 +			NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_discard_other);
   92.13 +		} else if (EF_EVENT_RX_DISCARD_TYPE(*ev) ==
   92.14 +			   EF_EVENT_RX_DISCARD_CSUM_BAD) {
   92.15 +			DPRINTK("%s: " EF_EVENT_FMT 
   92.16 +				" buffer %d DISCARD CSUM_BAD q_id %d\n",
   92.17 +				__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
   92.18 +				EF_EVENT_RX_DISCARD_Q_ID(*ev) );
   92.19 +			NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_csum_bad);
   92.20 +		} else if (EF_EVENT_RX_DISCARD_TYPE(*ev) ==
   92.21 +			   EF_EVENT_RX_DISCARD_CRC_BAD) {
   92.22 +			DPRINTK("%s: " EF_EVENT_FMT 
   92.23 +				" buffer %d DISCARD CRC_BAD q_id %d\n",
   92.24 +				__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
   92.25 +				EF_EVENT_RX_DISCARD_Q_ID(*ev) );
   92.26 +			NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_crc_bad);
   92.27  		} else {
   92.28 -			EPRINTK("%s: " EF_EVENT_FMT 
   92.29 -				" buffer %d rx discard type %d q_id %d\n",
   92.30 +			BUG_ON(EF_EVENT_RX_DISCARD_TYPE(*ev) !=
   92.31 +			       EF_EVENT_RX_DISCARD_RIGHTS);
   92.32 +			DPRINTK("%s: " EF_EVENT_FMT 
   92.33 +				" buffer %d DISCARD RIGHTS q_id %d\n",
   92.34  				__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
   92.35 -				EF_EVENT_RX_DISCARD_TYPE(*ev), 
   92.36  				EF_EVENT_RX_DISCARD_Q_ID(*ev) );
   92.37 -			NETFRONT_ACCEL_STATS_OP(++vnic->stats.bad_event_count);
   92.38 +			NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_rights_bad);
   92.39  		}
   92.40  	}
   92.41  
    93.1 --- a/drivers/xen/sfc_netfront/ef_vi_falcon.h	Tue Mar 31 11:49:12 2009 +0100
    93.2 +++ b/drivers/xen/sfc_netfront/ef_vi_falcon.h	Tue Mar 31 11:59:10 2009 +0100
    93.3 @@ -156,7 +156,7 @@
    93.4  #define EFVI_FALCON_EVQTIMER_DISABLE  (EFVI_FALCON_TIMER_MODE_DIS  << TIMER_MODE_LBN) 
    93.5  
    93.6  
    93.7 -/* ---- efhw_event_t helpers --- */
    93.8 +/* ---- ef_vi_event helpers --- */
    93.9  
   93.10  #define EFVI_FALCON_EVENT_CODE(evp) \
   93.11         ((evp)->u64 & EFVI_FALCON_EVENT_CODE_MASK)
    94.1 --- a/drivers/xen/sfc_netfront/etherfabric/ef_vi.h	Tue Mar 31 11:49:12 2009 +0100
    94.2 +++ b/drivers/xen/sfc_netfront/etherfabric/ef_vi.h	Tue Mar 31 11:59:10 2009 +0100
    94.3 @@ -225,24 +225,6 @@ typedef struct {
    94.4  /* Falcon constants */
    94.5  #define TX_EV_DESC_PTR_LBN 0
    94.6  
    94.7 -/**********************************************************************
    94.8 - * ef_iobufset ********************************************************
    94.9 - **********************************************************************/
   94.10 -
   94.11 -/*! \i_ef_bufs An [ef_iobufset] is a collection of buffers to be used
   94.12 -** with the NIC.
   94.13 -*/
   94.14 -typedef struct ef_iobufset {
   94.15 -	unsigned                      magic;
   94.16 -	unsigned                      bufs_mmap_bytes;
   94.17 -	unsigned                      bufs_handle;
   94.18 -	int                           bufs_ptr_off;
   94.19 -	ef_addr                       bufs_addr;
   94.20 -	unsigned                      bufs_size; /* size rounded to pow2 */
   94.21 -	int                           bufs_num;
   94.22 -	int                           faultonaccess;
   94.23 -} ef_iobufset;
   94.24 -
   94.25  
   94.26  /**********************************************************************
   94.27   * ef_vi **************************************************************
    95.1 --- a/drivers/xen/sfc_netfront/falcon_event.c	Tue Mar 31 11:49:12 2009 +0100
    95.2 +++ b/drivers/xen/sfc_netfront/falcon_event.c	Tue Mar 31 11:59:10 2009 +0100
    95.3 @@ -95,7 +95,7 @@ ef_vi_inline int falcon_rx_check_dup(ef_
    95.4  				     const ef_vi_qword* ev)
    95.5  {
    95.6  	unsigned q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
    95.7 -	unsigned desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
    95.8 +	uint16_t desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
    95.9  	ef_rx_dup_state_t* rx_dup_state = &evq->evq_state->rx_dup_state[q_id];
   95.10  
   95.11  	if(likely( desc_ptr != rx_dup_state->rx_last_desc_ptr )) {
    96.1 --- a/drivers/xen/sfc_netfront/falcon_vi.c	Tue Mar 31 11:49:12 2009 +0100
    96.2 +++ b/drivers/xen/sfc_netfront/falcon_vi.c	Tue Mar 31 11:59:10 2009 +0100
    96.3 @@ -431,6 +431,14 @@ int ef_vi_receive_init(ef_vi* vi, ef_add
    96.4  }
    96.5  
    96.6  
    96.7 +int ef_vi_receive_post(ef_vi* vi, ef_addr addr, ef_request_id dma_id)
    96.8 +{
    96.9 +  int rc = ef_vi_receive_init(vi, addr, dma_id, 0);
   96.10 +  if( rc == 0 )  ef_vi_receive_push(vi);
   96.11 +  return rc;
   96.12 +}
   96.13 +
   96.14 +
   96.15  void ef_vi_receive_push(ef_vi* vi)
   96.16  {
   96.17  	ef_vi_wiob();
    97.1 --- a/drivers/xen/sfc_netfront/sysdep.h	Tue Mar 31 11:49:12 2009 +0100
    97.2 +++ b/drivers/xen/sfc_netfront/sysdep.h	Tue Mar 31 11:59:10 2009 +0100
    97.3 @@ -33,6 +33,10 @@
    97.4  #ifndef __CI_CIUL_SYSDEP_LINUX_H__
    97.5  #define __CI_CIUL_SYSDEP_LINUX_H__
    97.6  
    97.7 +
    97.8 +#define ef_vi_wiob()  mmiowb()
    97.9 +
   97.10 +
   97.11  /**********************************************************************
   97.12   * Kernel version compatability
   97.13   */
   97.14 @@ -72,12 +76,19 @@
   97.15  
   97.16  # if defined(__i386__) || defined(__x86_64__)  /* GCC x86/x64 */
   97.17     typedef unsigned long long ef_vi_dma_addr_t; 
   97.18 -#  if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
   97.19 -#   define ef_vi_wiob()  __asm__ __volatile__ ("sfence")
   97.20 -#  else
   97.21 -#   define ef_vi_wiob()  __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
   97.22 +# endif
   97.23 +#endif
   97.24 +
   97.25 +#ifndef mmiowb
   97.26 +# if defined(__i386__) || defined(__x86_64__)
   97.27 +#  define mmiowb()
   97.28 +# elif defined(__ia64__)
   97.29 +#  ifndef ia64_mfa
   97.30 +#   define ia64_mfa() asm volatile ("mf.a" ::: "memory")
   97.31  #  endif
   97.32 -
   97.33 +#  define mmiowb ia64_mfa
   97.34 +# else
   97.35 +#  error "Need definition for mmiowb"
   97.36  # endif
   97.37  #endif
   97.38  
   97.39 @@ -88,7 +99,6 @@
   97.40  #if !defined(__GNUC__)
   97.41  # if defined(__PPC__)  /* GCC, PPC */
   97.42     typedef unsigned long     ef_vi_dma_addr_t;
   97.43 -#  define ef_vi_wiob()  wmb()
   97.44  
   97.45  #  ifdef __powerpc64__
   97.46  #   ifdef CONFIG_SMP
   97.47 @@ -110,8 +120,6 @@
   97.48  
   97.49  # elif defined(__ia64__)  /* GCC, IA64 */
   97.50     typedef unsigned long     ef_vi_dma_addr_t;
   97.51 -#  define ef_vi_wiob()  __asm__ __volatile__("mf.a": : :"memory")
   97.52 -
   97.53  # else
   97.54  #  error Unknown processor - GNU C
   97.55  # endif
   97.56 @@ -127,13 +135,6 @@
   97.57  #   define EF_VI_LIKELY(t)    __builtin_expect((t), 1)
   97.58  #   define EF_VI_UNLIKELY(t)  __builtin_expect((t), 0)
   97.59  #  endif
   97.60 -
   97.61 -#  if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
   97.62 -#   define ef_vi_wiob()  __asm__ __volatile__ ("sfence")
   97.63 -#  else
   97.64 -#   define ef_vi_wiob()  __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
   97.65 -#  endif
   97.66 -
   97.67  # else
   97.68  #  error Old Intel compiler not supported.
   97.69  # endif