ia64/linux-2.6.18-xen.hg

changeset 424:fc90e9b2c12b

Solarflare: Standard network driver.
Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 18 10:29:07 2008 +0000 (2008-02-18)
parents 0034d9389130
children e4dd072db259
files MAINTAINERS drivers/net/Kconfig drivers/net/Makefile drivers/net/sfc/Kconfig drivers/net/sfc/Makefile drivers/net/sfc/alaska.c drivers/net/sfc/bitfield.h drivers/net/sfc/boards.c drivers/net/sfc/boards.h drivers/net/sfc/config.h drivers/net/sfc/debugfs.c drivers/net/sfc/debugfs.h drivers/net/sfc/driverlink.c drivers/net/sfc/driverlink.h drivers/net/sfc/driverlink_api.h drivers/net/sfc/efx.c drivers/net/sfc/efx.h drivers/net/sfc/enum.h drivers/net/sfc/ethtool.c drivers/net/sfc/ethtool.h drivers/net/sfc/extraversion.h drivers/net/sfc/falcon.c drivers/net/sfc/falcon.h drivers/net/sfc/falcon_gmac.c drivers/net/sfc/falcon_hwdefs.h drivers/net/sfc/falcon_io.h drivers/net/sfc/falcon_xmac.c drivers/net/sfc/gmii.h drivers/net/sfc/i2c-direct.c drivers/net/sfc/i2c-direct.h drivers/net/sfc/kernel_compat.c drivers/net/sfc/kernel_compat.h drivers/net/sfc/lm87_support.c drivers/net/sfc/lm87_support.h drivers/net/sfc/mac.h drivers/net/sfc/mdio_10g.c drivers/net/sfc/mdio_10g.h drivers/net/sfc/mentormac.c drivers/net/sfc/mtd.c drivers/net/sfc/net_driver.h drivers/net/sfc/null_phy.c drivers/net/sfc/phy.c drivers/net/sfc/phy.h drivers/net/sfc/pm8358_phy.c drivers/net/sfc/rx.c drivers/net/sfc/rx.h drivers/net/sfc/selftest.c drivers/net/sfc/selftest.h drivers/net/sfc/sfe4001.c drivers/net/sfc/spi.h drivers/net/sfc/tenxpress.c drivers/net/sfc/tx.c drivers/net/sfc/tx.h drivers/net/sfc/txc43128_phy.c drivers/net/sfc/workarounds.h drivers/net/sfc/xenpack.h drivers/net/sfc/xfp_phy.c
line diff
     1.1 --- a/MAINTAINERS	Fri Feb 15 10:01:06 2008 +0000
     1.2 +++ b/MAINTAINERS	Mon Feb 18 10:29:07 2008 +0000
     1.3 @@ -2558,6 +2558,13 @@ M:	pfg@sgi.com
     1.4  L:	linux-ia64@vger.kernel.org
     1.5  S:	Supported
     1.6  
     1.7 +SFC NETWORK DRIVER
     1.8 +P:	Steve Hodgson
     1.9 +P:	Ben Hutchings
    1.10 +P:	Robert Stonehouse
    1.11 +M:	linux-net-drivers@solarflare.com
    1.12 +S:	Supported
    1.13 +
    1.14  SGI VISUAL WORKSTATION 320 AND 540
    1.15  P:	Andrey Panin
    1.16  M:	pazke@donpac.ru
     2.1 --- a/drivers/net/Kconfig	Fri Feb 15 10:01:06 2008 +0000
     2.2 +++ b/drivers/net/Kconfig	Mon Feb 18 10:29:07 2008 +0000
     2.3 @@ -2399,6 +2399,8 @@ config MYRI10GE
     2.4  	  <file:Documentation/networking/net-modules.txt>.  The module
     2.5  	  will be called myri10ge.
     2.6  
     2.7 +source "drivers/net/sfc/Kconfig"
     2.8 +
     2.9  endmenu
    2.10  
    2.11  source "drivers/net/tokenring/Kconfig"
     3.1 --- a/drivers/net/Makefile	Fri Feb 15 10:01:06 2008 +0000
     3.2 +++ b/drivers/net/Makefile	Mon Feb 18 10:29:07 2008 +0000
     3.3 @@ -219,3 +219,5 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o
     3.4  
     3.5  obj-$(CONFIG_FS_ENET) += fs_enet/
     3.6  
     3.7 +obj-$(CONFIG_SFC) += sfc/
     3.8 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/drivers/net/sfc/Kconfig	Mon Feb 18 10:29:07 2008 +0000
     4.3 @@ -0,0 +1,28 @@
     4.4 +config SFC
     4.5 +	tristate "Solarflare Solarstorm SFC4000 support"
     4.6 +	depends on PCI && INET
     4.7 +	select MII
     4.8 +	help
     4.9 +	  This driver supports 10-gigabit Ethernet cards based on
    4.10 +	  the Solarflare Communications Solarstorm SFC4000 controller.
    4.11 +
    4.12 +	  To compile this driver as a module, choose M here.  The module
    4.13 +	  will be called sfc.
    4.14 +
    4.15 +config SFC_DEBUGFS
    4.16 +	bool "Solarflare Solarstorm SFC4000 debugging support"
    4.17 +	depends on SFC && DEBUG_FS
    4.18 +	default N
    4.19 +	help
    4.20 +	  This option creates an "sfc" subdirectory of debugfs with
    4.21 +	  debugging information for the SFC4000 driver.
    4.22 +
    4.23 +	  If unsure, say N.
    4.24 +
    4.25 +config SFC_MTD
    4.26 +	depends on SFC && MTD && MTD_PARTITIONS
    4.27 +	tristate "Solarflare Solarstorm SFC4000 flash/EEPROM support"
    4.28 +	help
    4.29 +	  This module exposes the on-board flash and/or EEPROM memory as
    4.30 +	  MTD devices (e.g. /dev/mtd1).  This makes it possible to upload a
    4.31 +	  new boot ROM to the NIC.
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/drivers/net/sfc/Makefile	Mon Feb 18 10:29:07 2008 +0000
     5.3 @@ -0,0 +1,42 @@
     5.4 +
     5.5 +# Final objects
     5.6 +sfc_o = sfc.o
     5.7 +sfc_mtd_o = sfc_mtd.o
     5.8 +
     5.9 +# Constituent objects
    5.10 +sfc_elements_o :=
    5.11 +sfc_elements_o += efx.o
    5.12 +sfc_elements_o += falcon.o
    5.13 +sfc_elements_o += tx.o
    5.14 +sfc_elements_o += rx.o
    5.15 +sfc_elements_o += mentormac.o
    5.16 +sfc_elements_o += falcon_gmac.o
    5.17 +sfc_elements_o += falcon_xmac.o
    5.18 +sfc_elements_o += alaska.o
    5.19 +sfc_elements_o += i2c-direct.o
    5.20 +sfc_elements_o += selftest.o
    5.21 +sfc_elements_o += driverlink.o
    5.22 +ifeq ($(CONFIG_SFC_DEBUGFS),y)
    5.23 +sfc_elements_o += debugfs.o
    5.24 +endif
    5.25 +sfc_elements_o += ethtool.o
    5.26 +sfc_elements_o += xfp_phy.o
    5.27 +sfc_elements_o += mdio_10g.o
    5.28 +sfc_elements_o += txc43128_phy.o
    5.29 +sfc_elements_o += tenxpress.o
    5.30 +sfc_elements_o += lm87_support.o
    5.31 +sfc_elements_o += boards.o
    5.32 +sfc_elements_o += sfe4001.o
    5.33 +sfc_elements_o += pm8358_phy.o
    5.34 +sfc_elements_o += null_phy.o
    5.35 +sfc_elements_o += phy.o
    5.36 +sfc_elements_o += kernel_compat.o
    5.37 +
    5.38 +sfc_mtd_elements_o := mtd.o
    5.39 +
    5.40 +obj-$(CONFIG_SFC) += $(sfc_o)
    5.41 +obj-$(CONFIG_SFC_MTD) += $(sfc_mtd_o)
    5.42 +
    5.43 +sfc-objs = $(sfc_elements_o)
    5.44 +sfc_mtd-objs = $(sfc_mtd_elements_o)
    5.45 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/drivers/net/sfc/alaska.c	Mon Feb 18 10:29:07 2008 +0000
     6.3 @@ -0,0 +1,159 @@
     6.4 +/****************************************************************************
     6.5 + * Driver for Solarflare network controllers
     6.6 + *           (including support for SFE4001 10GBT NIC)
     6.7 + *
     6.8 + * Copyright 2005:      Fen Systems Ltd.
     6.9 + * Copyright 2006-2007: Solarflare Communications Inc,
    6.10 + *                      9501 Jeronimo Road, Suite 250,
    6.11 + *                      Irvine, CA 92618, USA
    6.12 + *
    6.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
    6.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
    6.15 + *
    6.16 + * This program is free software; you can redistribute it and/or modify it
    6.17 + * under the terms of the GNU General Public License version 2 as published
    6.18 + * by the Free Software Foundation, incorporated herein by reference.
    6.19 + *
    6.20 + * This program is distributed in the hope that it will be useful,
    6.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    6.23 + * GNU General Public License for more details.
    6.24 + *
    6.25 + * You should have received a copy of the GNU General Public License
    6.26 + * along with this program; if not, write to the Free Software
    6.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    6.28 + ****************************************************************************
    6.29 + */
    6.30 +
    6.31 +#include "net_driver.h"
    6.32 +#include <linux/ethtool.h>
    6.33 +#include "gmii.h"
    6.34 +#include "phy.h"
    6.35 +
    6.36 +/* Marvell 88E1111 "Alaska" PHY control */
    6.37 +#define ALASKA_PHY_SPECIFIC 16
    6.38 +#define ALASKA_ALLOW_SLEEP 0x0200
    6.39 +
    6.40 +#define ALASKA_EXTENDED_CONTROL 20
    6.41 +#define EXTENDED_LINE_LOOPBACK 0x8000
    6.42 +
    6.43 +#define ALASKA_LED_CONTROL 24
    6.44 +#define LED_BLINK_MASK 0x0700
    6.45 +#define LED_BLINK_FAST 0x0100
    6.46 +#define LED_BLINK_SLOW 0x0300
    6.47 +#define LED_TX_CTRL_MASK 0x0041
    6.48 +#define LED_TX_CTRL_LINK_AND_ACTIVITY 0x0001
    6.49 +
    6.50 +#define ALASKA_LED_OVERRIDE 25
    6.51 +#define LED_LINK1000_MASK 0x0030
    6.52 +#define LED_LINK1000_BLINK 0x0010
    6.53 +#define LED_TX_MASK 0x0003
    6.54 +#define LED_TX_BLINK 0x0001
    6.55 +
    6.56 +static void alaska_reconfigure(struct efx_nic *efx)
    6.57 +{
    6.58 +	struct mii_if_info *gmii = &efx->mii;
    6.59 +	u32 bmcr, phy_ext;
    6.60 +
    6.61 +	/* Configure line loopback if requested */
    6.62 +	phy_ext = gmii->mdio_read(gmii->dev, gmii->phy_id,
    6.63 +				  ALASKA_EXTENDED_CONTROL);
    6.64 +	if (efx->loopback_mode == LOOPBACK_NETWORK)
    6.65 +		phy_ext |= EXTENDED_LINE_LOOPBACK;
    6.66 +	else
    6.67 +		phy_ext &= ~EXTENDED_LINE_LOOPBACK;
    6.68 +	gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_EXTENDED_CONTROL,
    6.69 +			 phy_ext);
    6.70 +
    6.71 +	/* Configure PHY loopback if requested */
    6.72 +	bmcr = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_BMCR);
    6.73 +	if (efx->loopback_mode == LOOPBACK_PHY)
    6.74 +		bmcr |= BMCR_LOOPBACK;
    6.75 +	else
    6.76 +		bmcr &= ~BMCR_LOOPBACK;
    6.77 +	gmii->mdio_write(gmii->dev, gmii->phy_id, MII_BMCR, bmcr);
    6.78 +
    6.79 +	/* Read link up status */
    6.80 +	if (efx->loopback_mode == LOOPBACK_NONE)
    6.81 +		efx->link_up = mii_link_ok(gmii);
    6.82 +	else
    6.83 +		efx->link_up = 1;
    6.84 +
    6.85 +	/* Determine link options from PHY */
    6.86 +	if (gmii->force_media) {
    6.87 +		efx->link_options = gmii_forced_result(bmcr);
    6.88 +	} else {
    6.89 +		int lpa = gmii_lpa(gmii);
    6.90 +		int adv = gmii_advertised(gmii);
    6.91 +		efx->link_options = gmii_nway_result(adv & lpa);
    6.92 +	}
    6.93 +}
    6.94 +
    6.95 +static void alaska_clear_interrupt(struct efx_nic *efx)
    6.96 +{
    6.97 +	struct mii_if_info *gmii = &efx->mii;
    6.98 +
    6.99 +	/* Read interrupt status register to clear */
   6.100 +	gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_ISR);
   6.101 +}
   6.102 +
   6.103 +static int alaska_init(struct efx_nic *efx)
   6.104 +{
   6.105 +	struct mii_if_info *gmii = &efx->mii;
   6.106 +	u32 ier, leds, ctrl_1g, phy_spec;
   6.107 +
   6.108 +	/* Read ISR to clear any outstanding PHY interrupts */
   6.109 +	gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_ISR);
   6.110 +
   6.111 +	/* Enable PHY interrupts */
   6.112 +	ier = gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_IER);
   6.113 +	ier |= IER_LINK_CHG;
   6.114 +	gmii->mdio_write(gmii->dev, gmii->phy_id, GMII_IER, ier);
   6.115 +
   6.116 +	/* Remove 1G half-duplex as unsupported in Mentor MAC */
   6.117 +	ctrl_1g = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
   6.118 +	ctrl_1g &= ~(ADVERTISE_1000HALF);
   6.119 +	gmii->mdio_write(gmii->dev, gmii->phy_id, MII_CTRL1000, ctrl_1g);
   6.120 +
   6.121 +	/*
   6.122 +	 * The PHY can save power when there is no external connection
   6.123 +	 * (sleep mode).  However, this is incompatible with PHY
   6.124 +	 * loopback, and if enable and disable it quickly the PHY can
   6.125 +	 * go to sleep even when sleep mode is disabled.  (SFC bug
   6.126 +	 * 9309.)  Therefore we disable it all the time.
   6.127 +	 */
   6.128 +	phy_spec = gmii->mdio_read(gmii->dev, gmii->phy_id,
   6.129 +				   ALASKA_PHY_SPECIFIC);
   6.130 +	phy_spec &= ~ALASKA_ALLOW_SLEEP;
   6.131 +	gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_PHY_SPECIFIC,
   6.132 +			 phy_spec);
   6.133 +
   6.134 +	/* Configure LEDs */
   6.135 +	leds = gmii->mdio_read(gmii->dev, gmii->phy_id, ALASKA_LED_CONTROL);
   6.136 +	leds &= ~(LED_BLINK_MASK | LED_TX_CTRL_MASK);
   6.137 +	leds |= (LED_BLINK_FAST | LED_TX_CTRL_LINK_AND_ACTIVITY);
   6.138 +	gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_LED_CONTROL, leds);
   6.139 +
   6.140 +	return 0;
   6.141 +}
   6.142 +
   6.143 +static void alaska_fini(struct efx_nic *efx)
   6.144 +{
   6.145 +	struct mii_if_info *gmii = &efx->mii;
   6.146 +	u32 ier;
   6.147 +
   6.148 +	/* Disable PHY interrupts */
   6.149 +	ier = gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_IER);
   6.150 +	ier &= ~IER_LINK_CHG;
   6.151 +	gmii->mdio_write(gmii->dev, gmii->phy_id, GMII_IER, ier);
   6.152 +}
   6.153 +
   6.154 +
   6.155 +struct efx_phy_operations alaska_phy_operations = {
   6.156 +	.init            = alaska_init,
   6.157 +	.fini            = alaska_fini,
   6.158 +	.reconfigure     = alaska_reconfigure,
   6.159 +	.clear_interrupt = alaska_clear_interrupt,
   6.160 +	.loopbacks       = (1 << LOOPBACK_PHY) | (1 << LOOPBACK_NETWORK),
   6.161 +	.startup_loopback = LOOPBACK_PHY,
   6.162 +};
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/drivers/net/sfc/bitfield.h	Mon Feb 18 10:29:07 2008 +0000
     7.3 @@ -0,0 +1,544 @@
     7.4 +/****************************************************************************
     7.5 + * Driver for Solarflare network controllers
     7.6 + *           (including support for SFE4001 10GBT NIC)
     7.7 + *
     7.8 + * Copyright 2005-2006: Fen Systems Ltd.
     7.9 + * Copyright 2006-2008: Solarflare Communications Inc,
    7.10 + *                      9501 Jeronimo Road, Suite 250,
    7.11 + *                      Irvine, CA 92618, USA
    7.12 + *
    7.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
    7.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
    7.15 + *
    7.16 + * This program is free software; you can redistribute it and/or modify it
    7.17 + * under the terms of the GNU General Public License version 2 as published
    7.18 + * by the Free Software Foundation, incorporated herein by reference.
    7.19 + *
    7.20 + * This program is distributed in the hope that it will be useful,
    7.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    7.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    7.23 + * GNU General Public License for more details.
    7.24 + *
    7.25 + * You should have received a copy of the GNU General Public License
    7.26 + * along with this program; if not, write to the Free Software
    7.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    7.28 + ****************************************************************************
    7.29 + */
    7.30 +
    7.31 +#ifndef EFX_BITFIELD_H
    7.32 +#define EFX_BITFIELD_H
    7.33 +
    7.34 +/*
    7.35 + * Efx bitfield access
    7.36 + *
    7.37 + * Efx NICs make extensive use of bitfields up to 128 bits
    7.38 + * wide.  Since there is no native 128-bit datatype on most systems,
    7.39 + * and since 64-bit datatypes are inefficient on 32-bit systems and
    7.40 + * vice versa, we wrap accesses in a way that uses the most efficient
    7.41 + * datatype.
    7.42 + *
    7.43 + * The NICs are PCI devices and therefore little-endian.  Since most
    7.44 + * of the quantities that we deal with are DMAed to/from host memory,
    7.45 + * we define our datatypes (efx_oword_t, efx_qword_t and
    7.46 + * efx_dword_t) to be little-endian.
    7.47 + */
    7.48 +
    7.49 +/* Lowest bit numbers and widths */
    7.50 +#define EFX_DUMMY_FIELD_LBN 0
    7.51 +#define EFX_DUMMY_FIELD_WIDTH 0
    7.52 +#define EFX_DWORD_0_LBN 0
    7.53 +#define EFX_DWORD_0_WIDTH 32
    7.54 +#define EFX_DWORD_1_LBN 32
    7.55 +#define EFX_DWORD_1_WIDTH 32
    7.56 +#define EFX_DWORD_2_LBN 64
    7.57 +#define EFX_DWORD_2_WIDTH 32
    7.58 +#define EFX_DWORD_3_LBN 96
    7.59 +#define EFX_DWORD_3_WIDTH 32
    7.60 +
    7.61 +#define EFX_BYTE  1
    7.62 +#define EFX_WORD  2
    7.63 +#define EFX_DWORD 4
    7.64 +#define EFX_OWORD 8
    7.65 +
    7.66 +/* Specified attribute (e.g. LBN) of the specified field */
    7.67 +#define EFX_VAL(field, attribute) field ## _ ## attribute
    7.68 +/* Low bit number of the specified field */
    7.69 +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
    7.70 +/* Bit width of the specified field */
    7.71 +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
    7.72 +/* High bit number of the specified field */
    7.73 +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
    7.74 +/* Mask equal in width to the specified field.
    7.75 + *
    7.76 + * For example, a field with width 5 would have a mask of 0x1f.
    7.77 + *
    7.78 + * The maximum width mask that can be generated is 64 bits.
    7.79 + */
    7.80 +#define EFX_MASK64(field)					\
    7.81 +	(EFX_WIDTH(field) == 64 ? ~((u64) 0) :		\
    7.82 +	 (((((u64) 1) << EFX_WIDTH(field))) - 1))
    7.83 +
    7.84 +/* Mask equal in width to the specified field.
    7.85 + *
    7.86 + * For example, a field with width 5 would have a mask of 0x1f.
    7.87 + *
    7.88 + * The maximum width mask that can be generated is 32 bits.  Use
    7.89 + * EFX_MASK64 for higher width fields.
    7.90 + */
    7.91 +#define EFX_MASK32(field)					\
    7.92 +	(EFX_WIDTH(field) == 32 ? ~((u32) 0) :		\
    7.93 +	 (((((u32) 1) << EFX_WIDTH(field))) - 1))
    7.94 +
    7.95 +/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
    7.96 +typedef union efx_dword {
    7.97 +	__le32 u32[1];
    7.98 +} efx_dword_t;
    7.99 +
   7.100 +/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
   7.101 +typedef union efx_qword {
   7.102 +	__le64 u64[1];
   7.103 +	__le32 u32[2];
   7.104 +	efx_dword_t dword[2];
   7.105 +} efx_qword_t;
   7.106 +
   7.107 +/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
   7.108 +typedef union efx_oword {
   7.109 +	__le64 u64[2];
   7.110 +	efx_qword_t qword[2];
   7.111 +	__le32 u32[4];
   7.112 +	efx_dword_t dword[4];
   7.113 +} efx_oword_t;
   7.114 +
   7.115 +/* Format string and value expanders for printk */
   7.116 +#define EFX_DWORD_FMT "%08x"
   7.117 +#define EFX_QWORD_FMT "%08x:%08x"
   7.118 +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
   7.119 +#define EFX_DWORD_VAL(dword)				\
   7.120 +	((unsigned int) le32_to_cpu((dword).u32[0]))
   7.121 +#define EFX_QWORD_VAL(qword)				\
   7.122 +	((unsigned int) le32_to_cpu((qword).u32[1])),	\
   7.123 +	((unsigned int) le32_to_cpu((qword).u32[0]))
   7.124 +#define EFX_OWORD_VAL(oword)				\
   7.125 +	((unsigned int) le32_to_cpu((oword).u32[3])),	\
   7.126 +	((unsigned int) le32_to_cpu((oword).u32[2])),	\
   7.127 +	((unsigned int) le32_to_cpu((oword).u32[1])),	\
   7.128 +	((unsigned int) le32_to_cpu((oword).u32[0]))
   7.129 +
   7.130 +/*
   7.131 + * Extract bit field portion [low,high) from the native-endian element
   7.132 + * which contains bits [min,max).
   7.133 + *
   7.134 + * For example, suppose "element" represents the high 32 bits of a
   7.135 + * 64-bit value, and we wish to extract the bits belonging to the bit
   7.136 + * field occupying bits 28-45 of this 64-bit value.
   7.137 + *
   7.138 + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
   7.139 + *
   7.140 + *   ( element ) << 4
   7.141 + *
   7.142 + * The result will contain the relevant bits filled in in the range
   7.143 + * [0,high-low), with garbage in bits [high-low+1,...).
   7.144 + */
   7.145 +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high)		\
   7.146 +	(((low > max) || (high < min)) ? 0 :				\
   7.147 +	 ((low > min) ?							\
   7.148 +	  ((native_element) >> (low - min)) :				\
   7.149 +	  ((native_element) << (min - low))))
   7.150 +
   7.151 +/*
   7.152 + * Extract bit field portion [low,high) from the 64-bit little-endian
   7.153 + * element which contains bits [min,max)
   7.154 + */
   7.155 +#define EFX_EXTRACT64(element, min, max, low, high)			\
   7.156 +	EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
   7.157 +
   7.158 +/*
   7.159 + * Extract bit field portion [low,high) from the 32-bit little-endian
   7.160 + * element which contains bits [min,max)
   7.161 + */
   7.162 +#define EFX_EXTRACT32(element, min, max, low, high)			\
   7.163 +	EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
   7.164 +
   7.165 +#define EFX_EXTRACT_OWORD64(oword, low, high)				\
   7.166 +	(EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |		\
   7.167 +	 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
   7.168 +
   7.169 +#define EFX_EXTRACT_QWORD64(qword, low, high)				\
   7.170 +	EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
   7.171 +
   7.172 +#define EFX_EXTRACT_OWORD32(oword, low, high)				\
   7.173 +	(EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |		\
   7.174 +	 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |		\
   7.175 +	 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |		\
   7.176 +	 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
   7.177 +
   7.178 +#define EFX_EXTRACT_QWORD32(qword, low, high)				\
   7.179 +	(EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |		\
   7.180 +	 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
   7.181 +
   7.182 +#define EFX_EXTRACT_DWORD(dword, low, high)				\
   7.183 +	EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
   7.184 +
   7.185 +#define EFX_OWORD_FIELD64(oword, field)					\
   7.186 +	(EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
   7.187 +	 & EFX_MASK64(field))
   7.188 +
   7.189 +#define EFX_QWORD_FIELD64(qword, field)					\
   7.190 +	(EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
   7.191 +	 & EFX_MASK64(field))
   7.192 +
   7.193 +#define EFX_OWORD_FIELD32(oword, field)					\
   7.194 +	(EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
   7.195 +	 & EFX_MASK32(field))
   7.196 +
   7.197 +#define EFX_QWORD_FIELD32(qword, field)					\
   7.198 +	(EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
   7.199 +	 & EFX_MASK32(field))
   7.200 +
   7.201 +#define EFX_DWORD_FIELD(dword, field)					   \
   7.202 +	(EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
   7.203 +	 & EFX_MASK32(field))
   7.204 +
   7.205 +#define EFX_OWORD_IS_ZERO64(oword)					\
   7.206 +	(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
   7.207 +
   7.208 +#define EFX_QWORD_IS_ZERO64(qword)					\
   7.209 +	(((qword).u64[0]) == (__force __le64) 0)
   7.210 +
   7.211 +#define EFX_OWORD_IS_ZERO32(oword)					     \
   7.212 +	(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
   7.213 +	 == (__force __le32) 0)
   7.214 +
   7.215 +#define EFX_QWORD_IS_ZERO32(qword)					\
   7.216 +	(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
   7.217 +
   7.218 +#define EFX_DWORD_IS_ZERO(dword)					\
   7.219 +	(((dword).u32[0]) == (__force __le32) 0)
   7.220 +
   7.221 +#define EFX_OWORD_IS_ALL_ONES64(oword)					\
   7.222 +	(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
   7.223 +
   7.224 +#define EFX_QWORD_IS_ALL_ONES64(qword)					\
   7.225 +	((qword).u64[0] == ~((__force __le64) 0))
   7.226 +
   7.227 +#define EFX_OWORD_IS_ALL_ONES32(oword)					\
   7.228 +	(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
   7.229 +	 == ~((__force __le32) 0))
   7.230 +
   7.231 +#define EFX_QWORD_IS_ALL_ONES32(qword)					\
   7.232 +	(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
   7.233 +
   7.234 +#define EFX_DWORD_IS_ALL_ONES(dword)					\
   7.235 +	((dword).u32[0] == ~((__force __le32) 0))
   7.236 +
   7.237 +#if BITS_PER_LONG == 64
   7.238 +#define EFX_OWORD_FIELD		EFX_OWORD_FIELD64
   7.239 +#define EFX_QWORD_FIELD		EFX_QWORD_FIELD64
   7.240 +#define EFX_OWORD_IS_ZERO	EFX_OWORD_IS_ZERO64
   7.241 +#define EFX_QWORD_IS_ZERO	EFX_QWORD_IS_ZERO64
   7.242 +#define EFX_OWORD_IS_ALL_ONES	EFX_OWORD_IS_ALL_ONES64
   7.243 +#define EFX_QWORD_IS_ALL_ONES	EFX_QWORD_IS_ALL_ONES64
   7.244 +#else
   7.245 +#define EFX_OWORD_FIELD		EFX_OWORD_FIELD32
   7.246 +#define EFX_QWORD_FIELD		EFX_QWORD_FIELD32
   7.247 +#define EFX_OWORD_IS_ZERO	EFX_OWORD_IS_ZERO32
   7.248 +#define EFX_QWORD_IS_ZERO	EFX_QWORD_IS_ZERO32
   7.249 +#define EFX_OWORD_IS_ALL_ONES	EFX_OWORD_IS_ALL_ONES32
   7.250 +#define EFX_QWORD_IS_ALL_ONES	EFX_QWORD_IS_ALL_ONES32
   7.251 +#endif
   7.252 +
   7.253 +/*
   7.254 + * Construct bit field portion
   7.255 + *
   7.256 + * Creates the portion of the bit field [low,high) that lies within
   7.257 + * the range [min,max).
   7.258 + */
   7.259 +#define EFX_INSERT_NATIVE64(min, max, low, high, value)		\
   7.260 +	(((low > max) || (high < min)) ? 0 :			\
   7.261 +	 ((low > min) ?						\
   7.262 +	  (((u64) (value)) << (low - min)) :		\
   7.263 +	  (((u64) (value)) >> (min - low))))
   7.264 +
   7.265 +#define EFX_INSERT_NATIVE32(min, max, low, high, value)		\
   7.266 +	(((low > max) || (high < min)) ? 0 :			\
   7.267 +	 ((low > min) ?						\
   7.268 +	  (((u32) (value)) << (low - min)) :		\
   7.269 +	  (((u32) (value)) >> (min - low))))
   7.270 +
   7.271 +#define EFX_INSERT_NATIVE(min, max, low, high, value)		\
   7.272 +	((((max - min) >= 32) || ((high - low) >= 32)) ?	\
   7.273 +	 EFX_INSERT_NATIVE64(min, max, low, high, value) :	\
   7.274 +	 EFX_INSERT_NATIVE32(min, max, low, high, value))
   7.275 +
   7.276 +/*
   7.277 + * Construct bit field portion
   7.278 + *
   7.279 + * Creates the portion of the named bit field that lies within the
   7.280 + * range [min,max).
   7.281 + */
   7.282 +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value)		\
   7.283 +	EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field),		\
   7.284 +			  EFX_HIGH_BIT(field), value)
   7.285 +
   7.286 +/*
   7.287 + * Construct bit field
   7.288 + *
   7.289 + * Creates the portion of the named bit fields that lie within the
   7.290 + * range [min,max).
   7.291 + */
   7.292 +#define EFX_INSERT_FIELDS_NATIVE(min, max,				\
   7.293 +				 field1, value1,			\
   7.294 +				 field2, value2,			\
   7.295 +				 field3, value3,			\
   7.296 +				 field4, value4,			\
   7.297 +				 field5, value5,			\
   7.298 +				 field6, value6,			\
   7.299 +				 field7, value7,			\
   7.300 +				 field8, value8,			\
   7.301 +				 field9, value9,			\
   7.302 +				 field10, value10)			\
   7.303 +	(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) |	\
   7.304 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) |	\
   7.305 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) |	\
   7.306 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) |	\
   7.307 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) |	\
   7.308 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) |	\
   7.309 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) |	\
   7.310 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) |	\
   7.311 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) |	\
   7.312 +	 EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
   7.313 +
   7.314 +#define EFX_INSERT_FIELDS64(...)				\
   7.315 +	cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
   7.316 +
   7.317 +#define EFX_INSERT_FIELDS32(...)				\
   7.318 +	cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
   7.319 +
   7.320 +#define EFX_POPULATE_OWORD64(oword, ...) do {				\
   7.321 +	(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);	\
   7.322 +	(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__);	\
   7.323 +	} while (0)
   7.324 +
   7.325 +#define EFX_POPULATE_QWORD64(qword, ...) do {				\
   7.326 +	(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);	\
   7.327 +	} while (0)
   7.328 +
   7.329 +#define EFX_POPULATE_OWORD32(oword, ...) do {				\
   7.330 +	(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
   7.331 +	(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);	\
   7.332 +	(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__);	\
   7.333 +	(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__);	\
   7.334 +	} while (0)
   7.335 +
   7.336 +#define EFX_POPULATE_QWORD32(qword, ...) do {				\
   7.337 +	(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
   7.338 +	(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);	\
   7.339 +	} while (0)
   7.340 +
   7.341 +#define EFX_POPULATE_DWORD(dword, ...) do {				\
   7.342 +	(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
   7.343 +	} while (0)
   7.344 +
   7.345 +#if BITS_PER_LONG == 64
   7.346 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
   7.347 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
   7.348 +#else
   7.349 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
   7.350 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
   7.351 +#endif
   7.352 +
   7.353 +/* Populate an octword field with various numbers of arguments */
   7.354 +#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
   7.355 +#define EFX_POPULATE_OWORD_9(oword, ...) \
   7.356 +	EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.357 +#define EFX_POPULATE_OWORD_8(oword, ...) \
   7.358 +	EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.359 +#define EFX_POPULATE_OWORD_7(oword, ...) \
   7.360 +	EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.361 +#define EFX_POPULATE_OWORD_6(oword, ...) \
   7.362 +	EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.363 +#define EFX_POPULATE_OWORD_5(oword, ...) \
   7.364 +	EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.365 +#define EFX_POPULATE_OWORD_4(oword, ...) \
   7.366 +	EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.367 +#define EFX_POPULATE_OWORD_3(oword, ...) \
   7.368 +	EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.369 +#define EFX_POPULATE_OWORD_2(oword, ...) \
   7.370 +	EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.371 +#define EFX_POPULATE_OWORD_1(oword, ...) \
   7.372 +	EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.373 +#define EFX_ZERO_OWORD(oword) \
   7.374 +	EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
   7.375 +#define EFX_SET_OWORD(oword) \
   7.376 +	EFX_POPULATE_OWORD_4(oword, \
   7.377 +			     EFX_DWORD_0, 0xffffffff, \
   7.378 +			     EFX_DWORD_1, 0xffffffff, \
   7.379 +			     EFX_DWORD_2, 0xffffffff, \
   7.380 +			     EFX_DWORD_3, 0xffffffff)
   7.381 +
   7.382 +/* Populate a quadword field with various numbers of arguments */
   7.383 +#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
   7.384 +#define EFX_POPULATE_QWORD_9(qword, ...) \
   7.385 +	EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.386 +#define EFX_POPULATE_QWORD_8(qword, ...) \
   7.387 +	EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.388 +#define EFX_POPULATE_QWORD_7(qword, ...) \
   7.389 +	EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.390 +#define EFX_POPULATE_QWORD_6(qword, ...) \
   7.391 +	EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.392 +#define EFX_POPULATE_QWORD_5(qword, ...) \
   7.393 +	EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.394 +#define EFX_POPULATE_QWORD_4(qword, ...) \
   7.395 +	EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.396 +#define EFX_POPULATE_QWORD_3(qword, ...) \
   7.397 +	EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.398 +#define EFX_POPULATE_QWORD_2(qword, ...) \
   7.399 +	EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.400 +#define EFX_POPULATE_QWORD_1(qword, ...) \
   7.401 +	EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.402 +#define EFX_ZERO_QWORD(qword) \
   7.403 +	EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
   7.404 +#define EFX_SET_QWORD(qword) \
   7.405 +	EFX_POPULATE_QWORD_2(qword, \
   7.406 +			     EFX_DWORD_0, 0xffffffff, \
   7.407 +			     EFX_DWORD_1, 0xffffffff)
   7.408 +
   7.409 +/* Populate a dword field with various numbers of arguments */
   7.410 +#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
   7.411 +#define EFX_POPULATE_DWORD_9(dword, ...) \
   7.412 +	EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.413 +#define EFX_POPULATE_DWORD_8(dword, ...) \
   7.414 +	EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.415 +#define EFX_POPULATE_DWORD_7(dword, ...) \
   7.416 +	EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.417 +#define EFX_POPULATE_DWORD_6(dword, ...) \
   7.418 +	EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.419 +#define EFX_POPULATE_DWORD_5(dword, ...) \
   7.420 +	EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.421 +#define EFX_POPULATE_DWORD_4(dword, ...) \
   7.422 +	EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.423 +#define EFX_POPULATE_DWORD_3(dword, ...) \
   7.424 +	EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.425 +#define EFX_POPULATE_DWORD_2(dword, ...) \
   7.426 +	EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.427 +#define EFX_POPULATE_DWORD_1(dword, ...) \
   7.428 +	EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
   7.429 +#define EFX_ZERO_DWORD(dword) \
   7.430 +	EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
   7.431 +#define EFX_SET_DWORD(dword) \
   7.432 +	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
   7.433 +
   7.434 +/*
   7.435 + * Modify a named field within an already-populated structure.  Used
   7.436 + * for read-modify-write operations.
   7.437 + *
   7.438 + */
   7.439 +
   7.440 +#define EFX_INVERT_OWORD(oword) do {		\
   7.441 +	(oword).u64[0] = ~((oword).u64[0]);	\
   7.442 +	(oword).u64[1] = ~((oword).u64[1]);	\
   7.443 +	} while (0)
   7.444 +
   7.445 +#define EFX_INSERT_FIELD64(...)					\
   7.446 +	cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
   7.447 +
   7.448 +#define EFX_INSERT_FIELD32(...)					\
   7.449 +	cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
   7.450 +
   7.451 +#define EFX_INPLACE_MASK64(min, max, field)			\
   7.452 +	EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
   7.453 +
   7.454 +#define EFX_INPLACE_MASK32(min, max, field)			\
   7.455 +	EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
   7.456 +
   7.457 +#define EFX_SET_OWORD_FIELD64(oword, field, value) do {			\
   7.458 +	(oword).u64[0] = (((oword).u64[0] 				\
   7.459 +			   & ~EFX_INPLACE_MASK64(0,  63, field))	\
   7.460 +			  | EFX_INSERT_FIELD64(0,  63, field, value));  \
   7.461 +	(oword).u64[1] = (((oword).u64[1] 				\
   7.462 +			   & ~EFX_INPLACE_MASK64(64, 127, field))	\
   7.463 +			  | EFX_INSERT_FIELD64(64, 127, field, value)); \
   7.464 +	} while (0)
   7.465 +
   7.466 +#define EFX_SET_QWORD_FIELD64(qword, field, value) do {			\
   7.467 +	(qword).u64[0] = (((qword).u64[0] 				\
   7.468 +			   & ~EFX_INPLACE_MASK64(0, 63, field))		\
   7.469 +			  | EFX_INSERT_FIELD64(0, 63, field, value));	\
   7.470 +	} while (0)
   7.471 +
   7.472 +#define EFX_SET_OWORD_FIELD32(oword, field, value) do {			\
   7.473 +	(oword).u32[0] = (((oword).u32[0] 				\
   7.474 +			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
   7.475 +			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
   7.476 +	(oword).u32[1] = (((oword).u32[1] 				\
   7.477 +			   & ~EFX_INPLACE_MASK32(32, 63, field))	\
   7.478 +			  | EFX_INSERT_FIELD32(32, 63, field, value));	\
   7.479 +	(oword).u32[2] = (((oword).u32[2] 				\
   7.480 +			   & ~EFX_INPLACE_MASK32(64, 95, field))	\
   7.481 +			  | EFX_INSERT_FIELD32(64, 95, field, value));	\
   7.482 +	(oword).u32[3] = (((oword).u32[3] 				\
   7.483 +			   & ~EFX_INPLACE_MASK32(96, 127, field))	\
   7.484 +			  | EFX_INSERT_FIELD32(96, 127, field, value));	\
   7.485 +	} while (0)
   7.486 +
   7.487 +#define EFX_SET_QWORD_FIELD32(qword, field, value) do {			\
   7.488 +	(qword).u32[0] = (((qword).u32[0] 				\
   7.489 +			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
   7.490 +			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
   7.491 +	(qword).u32[1] = (((qword).u32[1] 				\
   7.492 +			   & ~EFX_INPLACE_MASK32(32, 63, field))	\
   7.493 +			  | EFX_INSERT_FIELD32(32, 63, field, value));	\
   7.494 +	} while (0)
   7.495 +
   7.496 +#define EFX_SET_DWORD_FIELD(dword, field, value) do {			\
   7.497 +	(dword).u32[0] = (((dword).u32[0] 				\
   7.498 +			   & ~EFX_INPLACE_MASK32(0, 31, field))		\
   7.499 +			  | EFX_INSERT_FIELD32(0, 31, field, value));	\
   7.500 +	} while (0)
   7.501 +
   7.502 +#if BITS_PER_LONG == 64
   7.503 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
   7.504 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
   7.505 +#else
   7.506 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
   7.507 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
   7.508 +#endif
   7.509 +
   7.510 +#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
   7.511 +	if (FALCON_REV(efx) == FALCON_REV_B0) {			   \
   7.512 +		EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
   7.513 +	} else { \
   7.514 +		EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
   7.515 +	} \
   7.516 +} while (0)
   7.517 +
   7.518 +#define EFX_QWORD_FIELD_VER(efx, qword, field)	\
   7.519 +	(FALCON_REV(efx) == FALCON_REV_B0 ?	\
   7.520 +	 EFX_QWORD_FIELD((qword), field##_B0) :	\
   7.521 +	 EFX_QWORD_FIELD((qword), field##_A1))
   7.522 +
   7.523 +/* Used to avoid compiler warnings about shift range exceeding width
   7.524 + * of the data types when dma_addr_t is only 32 bits wide.
   7.525 + */
   7.526 +#define DMA_ADDR_T_WIDTH	(8 * sizeof(dma_addr_t))
   7.527 +#define EFX_DMA_TYPE_WIDTH(width) \
   7.528 +	(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
   7.529 +#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
   7.530 +			  ~((u64) 0) : ~((u32) 0))
   7.531 +#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
   7.532 +
   7.533 +/*
   7.534 + * Determine if a DMA address is over the 4GB threshold
   7.535 + *
   7.536 + * Defined in a slightly tortuous way to avoid compiler warnings.
   7.537 + */
   7.538 +static inline int efx_is_over_4gb(dma_addr_t address)
   7.539 +{
   7.540 +	if (DMA_ADDR_T_WIDTH > 32)
   7.541 +		return (((u64) address) >> 32) ? 1 : 0;
   7.542 +	else
   7.543 +		/* Can never be true */
   7.544 +		return 0;
   7.545 +}
   7.546 +
   7.547 +#endif /* EFX_BITFIELD_H */
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/drivers/net/sfc/boards.c	Mon Feb 18 10:29:07 2008 +0000
     8.3 @@ -0,0 +1,528 @@
     8.4 +/****************************************************************************
     8.5 + * Driver for Solarflare network controllers
     8.6 + *           (including support for SFE4001 10GBT NIC)
     8.7 + *
     8.8 + * Copyright 2007:      Solarflare Communications Inc,
     8.9 + *                      9501 Jeronimo Road, Suite 250,
    8.10 + *                      Irvine, CA 92618, USA
    8.11 + *
    8.12 + * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
    8.13 + *
    8.14 + * This program is free software; you can redistribute it and/or modify it
    8.15 + * under the terms of the GNU General Public License version 2 as published
    8.16 + * by the Free Software Foundation, incorporated herein by reference.
    8.17 + *
    8.18 + * This program is distributed in the hope that it will be useful,
    8.19 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    8.20 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    8.21 + * GNU General Public License for more details.
    8.22 + *
    8.23 + * You should have received a copy of the GNU General Public License
    8.24 + * along with this program; if not, write to the Free Software
    8.25 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    8.26 + ****************************************************************************
    8.27 + */
    8.28 +
    8.29 +#include "net_driver.h"
    8.30 +#include "phy.h"
    8.31 +#include "lm87_support.h"
    8.32 +#include "boards.h"
    8.33 +#include "efx.h"
    8.34 +
    8.35 +/* Macros for unpacking the board revision */
    8.36 +/* The revision info is in host byte order. */
    8.37 +#define BOARD_TYPE(_rev) (_rev >> 8)
    8.38 +#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
    8.39 +#define BOARD_MINOR(_rev) (_rev & 0xf)
    8.40 +
    8.41 +/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
    8.42 +#define BLINK_INTERVAL (HZ/2)
    8.43 +
    8.44 +static void blink_led_timer(unsigned long context)
    8.45 +{
    8.46 +	struct efx_nic *efx = (struct efx_nic *)context;
    8.47 +	struct efx_blinker *bl = &efx->board_info.blinker;
    8.48 +	efx->board_info.set_fault_led(efx, bl->state);
    8.49 +	bl->state = !bl->state;
    8.50 +	if (bl->resubmit) {
    8.51 +		bl->timer.expires = jiffies + BLINK_INTERVAL;
    8.52 +		add_timer(&bl->timer);
    8.53 +	}
    8.54 +}
    8.55 +
    8.56 +static void board_blink(struct efx_nic *efx, int blink)
    8.57 +{
    8.58 +	struct efx_blinker *blinker = &efx->board_info.blinker;
    8.59 +
    8.60 +	/* The rtnl mutex serialises all ethtool ioctls, so
    8.61 +	 * nothing special needs doing here. */
    8.62 +	if (blink) {
    8.63 +		blinker->resubmit = 1;
    8.64 +		blinker->state = 0;
    8.65 +		setup_timer(&blinker->timer, blink_led_timer,
    8.66 +			    (unsigned long)efx);
    8.67 +		blinker->timer.expires = jiffies + BLINK_INTERVAL;
    8.68 +		add_timer(&blinker->timer);
    8.69 +	} else {
    8.70 +		blinker->resubmit = 0;
    8.71 +		if (blinker->timer.function)
    8.72 +			del_timer_sync(&blinker->timer);
    8.73 +		efx->board_info.set_fault_led(efx, 0);
    8.74 +	}
    8.75 +}
    8.76 +
    8.77 +
    8.78 +struct sensor_conf {
    8.79 +	const char *name;
    8.80 +	const unsigned high;
    8.81 +	const unsigned low;
    8.82 +};
    8.83 +
    8.84 +#define NO_LIMIT	((unsigned)-1)
    8.85 +
    8.86 +#define LM87_SENSOR_BYTES	(18)
    8.87 +
    8.88 +static int sensor_limits_to_bytes(const struct sensor_conf *limits,
    8.89 +				  int nlimits, u8 *bytes, int maxbytes)
    8.90 +{
    8.91 +	int i, nbytes;
    8.92 +	nbytes = 0;
    8.93 +	for (i = 0; i < nlimits; i++) {
    8.94 +		bytes[nbytes++] = limits[i].high;
    8.95 +		if (limits[i].low != NO_LIMIT)
    8.96 +			bytes[nbytes++] = limits[i].low;
    8.97 +		/* We may have overrun by one at this point, but this test
    8.98 +		 * should only trigger in development drivers as the sizes
    8.99 +		 * are not dynamic. */
   8.100 +		if (nbytes > maxbytes) {
   8.101 +			printk(KERN_ERR "%s: out of space!\n", __func__);
   8.102 +			break;
   8.103 +		}
   8.104 +	}
   8.105 +	return nbytes;
   8.106 +}
   8.107 +
   8.108 +/*****************************************************************************
   8.109 + * Support for the SFE4002
   8.110 + *
   8.111 + */
   8.112 +/* LM87 configuration data for the sensor on the SFE4002 board */
   8.113 +static const struct sensor_conf sfe4002_lm87_limits[] = {
   8.114 +	{"1.8V line", 0x91, 0x83},	/* 2.5V sensor, scaled for 1.8V */
   8.115 +	{"1.2V line", 0x5a, 0x51},	/* Vccp1 */
   8.116 +	{"3.3V line", 0xca, 0xb6},
   8.117 +	{"5V line", 0xc9, 0xb6},
   8.118 +	{"12V line", 0xe0, 0xb0},
   8.119 +	{"1V line", 0x4b, 0x44},	/* vccp2 */
   8.120 +	{"Ext. temp.", 0x46, 0x0a},	/* ASIC temp. */
   8.121 +	{"Int. temp.", 0x3c, 0x0a},	/* Board temp. */
   8.122 +	{"1.66V line", 0xb2, NO_LIMIT},	/* AIN1 only takes 1 value */
   8.123 +	{"1.5V line", 0xa1, NO_LIMIT}	/* AIN2 only takes 1 value */
   8.124 +};
   8.125 +
   8.126 +static const int sfe4002_lm87_nlimits = ARRAY_SIZE(sfe4002_lm87_limits);
   8.127 +
   8.128 +static u16 sfe4002_lm87_irq_mask = EFX_LM87_NO_INTS;
   8.129 +
   8.130 +/* I2C ID of the onboard LM87 chip. This is board-specific as the bottom two
   8.131 + * bits are set by strap pins */
   8.132 +#define SFE4002_LM87_I2C_ID (0x2e)
   8.133 +
   8.134 +/****************************************************************************/
   8.135 +/* LED allocations. Note that on rev A0 boards the schematic and the reality
   8.136 + * differ: red and green are swapped. Below is the fixed (A1) layout (there
   8.137 + * are only 3 A0 boards in existence, so no real reason to make this
   8.138 + * conditional).
   8.139 + */
   8.140 +#define SFE4002_FAULT_LED (2)	/* Red */
   8.141 +#define SFE4002_RX_LED    (0)	/* Green */
   8.142 +#define SFE4002_TX_LED    (1)	/* Amber */
   8.143 +
   8.144 +static int sfe4002_init_leds(struct efx_nic *efx)
   8.145 +{
   8.146 +	/* Set the TX and RX LEDs to reflect status and activity, and the
   8.147 +	 * fault LED off */
   8.148 +	xfp_set_led(efx, SFE4002_TX_LED,
   8.149 +		    QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
   8.150 +	xfp_set_led(efx, SFE4002_RX_LED,
   8.151 +		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
   8.152 +	xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
   8.153 +	efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
   8.154 +	return 0;
   8.155 +}
   8.156 +
   8.157 +static void sfe4002_fault_led(struct efx_nic *efx, int state)
   8.158 +{
   8.159 +	xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
   8.160 +			QUAKE_LED_OFF);
   8.161 +}
   8.162 +
   8.163 +static int sfe4002_sensor_meaning(struct efx_nic *efx, int limit_num,
   8.164 +				  unsigned val)
   8.165 +{
   8.166 +	const struct sensor_conf *lim = &sfe4002_lm87_limits[limit_num];
   8.167 +	if (lim->low == NO_LIMIT)
   8.168 +		EFX_ERR(efx, "%10s  0x%02x (nominal value 0x%02x)\n", lim->name,
   8.169 +			val, lim->high);
   8.170 +	else
   8.171 +		EFX_ERR(efx, "%10s  0x%02x (nominal range 0x%02x - 0x%02x)\n",
   8.172 +			lim->name, val, lim->high, lim->low);
   8.173 +	return 1;
   8.174 +}
   8.175 +
   8.176 +static int sfe4002_check_hw(struct efx_nic *efx)
   8.177 +{
   8.178 +	int rc;
   8.179 +
   8.180 +	/* A0 board rev. 4002s  report a temperature fault the whole time
   8.181 +	 * (bad sensor) so we mask it out. */
   8.182 +	unsigned alarm_mask = (efx->board_info.minor > 0) ?
   8.183 +		0 : ~EFX_LM87_ETMP_INT;
   8.184 +
   8.185 +	/* Check the sensor (NOP if not present). */
   8.186 +	rc = efx_check_lm87(efx, alarm_mask);
   8.187 +
   8.188 +	/* We treat both lm87 interrupts and failure to talk to the lm87
   8.189 +	 * as problems (since failure will only be reported if we did
   8.190 +	 * find the sensor at probe time. */
   8.191 +	if (rc)
   8.192 +		EFX_ERR(efx, "sensor alert!\n");
   8.193 +	return rc;
   8.194 +}
   8.195 +
   8.196 +static int sfe4002_init(struct efx_nic *efx)
   8.197 +{
   8.198 +	u8 lm87_bytes[LM87_SENSOR_BYTES];
   8.199 +	int nbytes;
   8.200 +	int rc;
   8.201 +
   8.202 +	efx->board_info.monitor = sfe4002_check_hw;
   8.203 +	efx->board_info.interpret_sensor = sfe4002_sensor_meaning;
   8.204 +	efx->board_info.init_leds = sfe4002_init_leds;
   8.205 +	efx->board_info.set_fault_led = sfe4002_fault_led;
   8.206 +	efx->board_info.blink = board_blink;
   8.207 +	/* To clean up shut down the lm87 (NOP if not present) */
   8.208 +	efx->board_info.fini = efx_remove_lm87;
   8.209 +
   8.210 +	nbytes = sensor_limits_to_bytes(sfe4002_lm87_limits,
   8.211 +					sfe4002_lm87_nlimits, lm87_bytes,
   8.212 +					LM87_SENSOR_BYTES);
   8.213 +
   8.214 +	/* Activate the lm87 sensor if present (succeeds if nothing there) */
   8.215 +	rc = efx_probe_lm87(efx, SFE4002_LM87_I2C_ID,
   8.216 +			    lm87_bytes, nbytes, sfe4002_lm87_irq_mask);
   8.217 +
   8.218 +	return rc;
   8.219 +}
   8.220 +
   8.221 +/*****************************************************************************
   8.222 + * Support for the SFE4003
   8.223 + *
   8.224 + */
   8.225 +/* LM87 configuration data for the sensor on the SFE4003 board */
   8.226 +static const struct sensor_conf sfe4003_lm87_limits[] = {
   8.227 +	{"1.5V line", 0x78, 0x6d},	/* 2.5V input, values scaled for 1.5V */
   8.228 +	{"1.2V line", 0x5a, 0x51},	/* Vccp1 */
   8.229 +	{"3.3V line", 0xca, 0xb6},
   8.230 +	{"5V line", 0xc0, 0x00},	/* Sensor not connected. */
   8.231 +	{"12V line", 0xe0, 0xb0},
   8.232 +	{"1V line", 0x4b, 0x44},	/* Vccp2 */
   8.233 +	{"Ext. temp.", 0x46, 0x0a},	/* ASIC temp. */
   8.234 +	{"Int. temp.", 0x3c, 0x0a},	/* Board temp. */
   8.235 +	{"", 0xff, NO_LIMIT},		/* FAN1/AIN1 unused */
   8.236 +	{"", 0xff, NO_LIMIT}		/* FAN2/AIN2 unused */
   8.237 +};
   8.238 +
   8.239 +static const int sfe4003_lm87_nlimits = ARRAY_SIZE(sfe4003_lm87_limits);
   8.240 +
   8.241 +static u16 sfe4003_lm87_irq_mask = EFX_LM87_NO_INTS;
   8.242 +
   8.243 +
   8.244 +static int sfe4003_sensor_meaning(struct efx_nic *efx, int limit_num,
   8.245 +				  unsigned val)
   8.246 +{
   8.247 +	const struct sensor_conf *lim = &sfe4003_lm87_limits[limit_num];
   8.248 +	if (lim->low == NO_LIMIT)
   8.249 +		return 0; /* Neither AIN1 nor AIN2 mean anything to us */
   8.250 +	else
   8.251 +		EFX_ERR(efx, "%10s  0x%02x (nominal range 0x%02x - 0x%02x)\n",
   8.252 +			lim->name, val, lim->high, lim->low);
   8.253 +	return 1;
   8.254 +}
   8.255 +
   8.256 +/* I2C ID of the onboard LM87 chip. This is board-specific as the bottom two
   8.257 + * bits are set by strap pins */
   8.258 +#define SFE4003_LM87_I2C_ID (0x2e)
   8.259 +
   8.260 +/* Board-specific LED info. */
   8.261 +#define SFE4003_RED_LED_GPIO	(11)
   8.262 +#define SFE4003_LED_ON		(1)
   8.263 +#define SFE4003_LED_OFF		(0)
   8.264 +
   8.265 +static void sfe4003_fault_led(struct efx_nic *efx, int state)
   8.266 +{
   8.267 +	/* The LEDs were not wired to GPIOs before A3 */
   8.268 +	if (efx->board_info.minor < 3 && efx->board_info.major == 0)
   8.269 +		return;
   8.270 +
   8.271 +	txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO,
   8.272 +			 state ? SFE4003_LED_ON : SFE4003_LED_OFF);
   8.273 +}
   8.274 +
   8.275 +static int sfe4003_init_leds(struct efx_nic *efx)
   8.276 +{
   8.277 +	/* The LEDs were not wired to GPIOs before A3 */
   8.278 +	if (efx->board_info.minor < 3 && efx->board_info.major == 0)
   8.279 +		return 0;
   8.280 +
   8.281 +	txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
   8.282 +	txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
   8.283 +	return 0;
   8.284 +}
   8.285 +
   8.286 +static int sfe4003_check_hw(struct efx_nic *efx)
   8.287 +{
   8.288 +	int rc;
   8.289 +	/* A0/A1/A2 board rev. 4003s  report a temperature fault the whole time
   8.290 +	 * (bad sensor) so we mask it out. */
   8.291 +	unsigned alarm_mask =
   8.292 +		~(EFX_LM87_ETMP_INT | EFX_LM87_FAN1_INT | EFX_LM87_FAN2_INT);
   8.293 +
   8.294 +	/* Check the sensor (NOP if not present). */
   8.295 +
   8.296 +	rc = efx_check_lm87(efx, alarm_mask);
   8.297 +	/* We treat both lm87 interrupts and failure to talk to the lm87
   8.298 +	 * as problems (since failure will only be reported if we did
   8.299 +	 * find the sensor at probe time. */
   8.300 +	if (rc)
   8.301 +		EFX_ERR(efx, "sensor alert!\n");
   8.302 +
   8.303 +	return rc;
   8.304 +}
   8.305 +
   8.306 +static int sfe4003_init(struct efx_nic *efx)
   8.307 +{
   8.308 +	u8 lm87_bytes[LM87_SENSOR_BYTES];
   8.309 +	int nbytes;
   8.310 +	int rc;
   8.311 +	efx->board_info.monitor = sfe4003_check_hw;
   8.312 +	efx->board_info.interpret_sensor = sfe4003_sensor_meaning;
   8.313 +	efx->board_info.init_leds = sfe4003_init_leds;
   8.314 +	efx->board_info.set_fault_led = sfe4003_fault_led;
   8.315 +	efx->board_info.blink = board_blink;
   8.316 +	/* To clean up shut down the lm87 (NOP if not present) */
   8.317 +	efx->board_info.fini = efx_remove_lm87;
   8.318 +
   8.319 +	nbytes = sensor_limits_to_bytes(sfe4003_lm87_limits,
   8.320 +					sfe4003_lm87_nlimits, lm87_bytes,
   8.321 +					LM87_SENSOR_BYTES);
   8.322 +
   8.323 +	/* Activate the lm87 sensor if present (succeeds if nothing there) */
   8.324 +	rc = efx_probe_lm87(efx, SFE4003_LM87_I2C_ID,
   8.325 +			    lm87_bytes, nbytes, sfe4003_lm87_irq_mask);
   8.326 +
   8.327 +	if (rc < 0)
   8.328 +		EFX_ERR(efx, "Temperature sensor probe failure: "
   8.329 +			"please check the jumper position\n");
   8.330 +	return rc;
   8.331 +}
   8.332 +
   8.333 +/*****************************************************************************
   8.334 + * Support for the SFE4005
   8.335 + *
   8.336 + */
   8.337 +/* LM87 configuration data for the sensor on the SFE4005 board */
   8.338 +static const u8 sfe4005_lm87_limits[] = {
   8.339 +	0x51, /* 2.5V high lim. (actually monitor 1.0V line, so 1050mV)  */
   8.340 +	0x49, /* 2.5V low lim. (950mV) */
   8.341 +	0xf6, /* Vccp1 high lim. (3.3V rail, 3465 mV) */
   8.342 +	0xde, /* Vcpp1 low lim. (3.3V rail, 3135 mV) */
   8.343 +	0xca, /* 3.3V AUX high lim. (3465 mV)  */
   8.344 +	0xb6, /* 3.3V AUX low lim. (3135mV) */
   8.345 +	0xc0, /* 5V high lim. not connected) */
   8.346 +	0x00, /* 5V low lim. (not connected) */
   8.347 +	0xd0, /* 12V high lim. (13000mV) */
   8.348 +	0xb0, /* 12V low lim. (11000mV) */
   8.349 +	0xc0, /* Vccp2 high lim. (unused) */
   8.350 +	0x00, /* Vccp2 low lim. (unused) */
   8.351 +	0x46, /* Ext temp 1 (ASIC) high lim. */
   8.352 +	0x0a, /* Ext temp 1 low lim. */
   8.353 +	0x3c, /* Int temp (board) high lim. */
   8.354 +	0x0a, /* Int temp 1 low lim. */
   8.355 +	0xff, /* Fan 1 high (unused) */
   8.356 +	0xff, /* Fan 2 high (unused) */
   8.357 +};
   8.358 +
   8.359 +#define SFE4005_LM87_I2C_ID (0x2e)
   8.360 +
   8.361 +/* Until the LM87 monitoring is interrupt driven. */
   8.362 +#define SFE4005_LM87_IRQMASK	EFX_LM87_NO_INTS
   8.363 +
   8.364 +#define SFE4005_PCF8575_I2C_ID	(0x20)
   8.365 +/* Definitions for the I/O expander that controls the CX4 chip:
   8.366 + * which PCF8575 pin maps to which function */
   8.367 +#define SFE4005_PORT0_EXTLOOP	(1 << 0)
   8.368 +#define SFE4005_PORT1_EXTLOOP	(1 << 1)
   8.369 +#define SFE4005_HOSTPROT_LOOP	(1 << 2)
   8.370 +#define SFE4005_BCAST		(1 << 3) /* TX on both ports */
   8.371 +#define SFE4005_PORT0_EQ	(1 << 4)
   8.372 +#define SFE4005_PORT1_EQ	(1 << 5)
   8.373 +#define SFE4005_HOSTPORT_EQ	(1 << 6)
   8.374 +#define	SFE4005_PORTSEL		(1 << 7) /* Which port (for RX in BCAST mode) */
   8.375 +#define SFE4005_PORT0_PRE_LBN	(8)      /* Preemphasis on port 0 (2 bits)*/
   8.376 +#define SFE4005_PORT1_PRE_LBN	(10)     /* Preemphasis on port 1 (2 bits)*/
   8.377 +#define SFE4005_HOSTPORT_PRE_LBN (12)    /* Preemphasis on host port (2 bits) */
   8.378 +#define SFE4005_UNUSED		(1 << 14)
   8.379 +#define SFE4005_CX4uC_nRESET	(1 << 15) /* Reset the controller on CX4 chip */
   8.380 +
   8.381 +
   8.382 +/* By default only turn on host port EQ. Can also OR in SFE4005_PORT0_EQ,
   8.383 + * SFE4005_PORT1_EQ but this hasn't been seen to make a difference. */
   8.384 +#define SFE4005_CX4_DEFAULTS (SFE4005_CX4uC_nRESET | SFE4005_HOSTPORT_EQ)
   8.385 +
   8.386 +static int sfe4005_write_ioexpander(struct efx_nic *efx)
   8.387 +{
   8.388 +	unsigned long iobits = (unsigned long)efx->phy_data;
   8.389 +	struct efx_i2c_interface *i2c = &efx->i2c;
   8.390 +	u8 send[2], check[2];
   8.391 +	int rc;
   8.392 +	/* Do not, EVER, deassert nRESET as that will reset Falcon too,
   8.393 +	 * and the driver won't know to repush the configuration, so
   8.394 +	 * nothing will work until the next power cycle. */
   8.395 +	BUG_ON(!(iobits & SFE4005_CX4uC_nRESET));
   8.396 +	send[0] = (iobits & 0xff);
   8.397 +	send[1] = ((iobits >> 8) & 0xff);
   8.398 +	rc = efx_i2c_send_bytes(i2c, SFE4005_PCF8575_I2C_ID, send, 2);
   8.399 +	if (rc) {
   8.400 +		EFX_ERR(efx, "failed to write to I/O expander: %d\n", rc);
   8.401 +		return rc;
   8.402 +	}
   8.403 +	/* Paranoia: just check what the I/O expander reads back */
   8.404 +	rc = efx_i2c_recv_bytes(i2c, SFE4005_PCF8575_I2C_ID, check, 2);
   8.405 +	if (rc)
   8.406 +		EFX_ERR(efx, "failed to read back from I/O expander: %d\n", rc);
   8.407 +	else if (check[0] != send[0] || check[1] != send[1])
   8.408 +		EFX_ERR(efx, "read back wrong value from I/O expander: "
   8.409 +			"wanted %.2x%.2x, got %.2x%.2x\n",
   8.410 +			send[1], send[0], check[1], check[0]);
   8.411 +	return rc;
   8.412 +}
   8.413 +
   8.414 +static int sfe4005_init(struct efx_nic *efx)
   8.415 +{
   8.416 +	unsigned long iobits = SFE4005_CX4_DEFAULTS;
   8.417 +	int rc;
   8.418 +
   8.419 +	/* There is no PHY as such on the SFE4005 so phy_data is ours. */
   8.420 +	efx->phy_data = (void *)iobits;
   8.421 +
   8.422 +	/* Push the values */
   8.423 +	rc = sfe4005_write_ioexpander(efx);
   8.424 +	if (rc)
   8.425 +		return rc;
   8.426 +
   8.427 +	/* Activate the lm87 sensor if present (succeeds if nothing there) */
   8.428 +	rc = efx_probe_lm87(efx, SFE4005_LM87_I2C_ID,
   8.429 +			    sfe4005_lm87_limits,
   8.430 +			    sizeof(sfe4005_lm87_limits), SFE4005_LM87_IRQMASK);
   8.431 +
   8.432 +	/* To clean up shut down the lm87 (NOP if not present) */
   8.433 +	efx->board_info.fini = efx_remove_lm87;
   8.434 +
   8.435 +	return rc;
   8.436 +}
   8.437 +
   8.438 +/* This will get expanded as board-specific details get moved out of the
   8.439 + * PHY drivers. */
   8.440 +struct efx_board_data {
   8.441 +	const char *ref_model;
   8.442 +	const char *gen_type;
   8.443 +	int (*init) (struct efx_nic *nic);
   8.444 +	unsigned mwatts;
   8.445 +};
   8.446 +
   8.447 +static void dummy_fini(struct efx_nic *nic)
   8.448 +{
   8.449 +}
   8.450 +
   8.451 +static int dummy_init(struct efx_nic *nic)
   8.452 +{
   8.453 +	nic->board_info.fini = dummy_fini;
   8.454 +	return 0;
   8.455 +}
   8.456 +
   8.457 +/* Maximum board power (mW)
   8.458 + * Falcon controller ASIC accounts for 2.2W
   8.459 + * 10Xpress PHY accounts for 12W
   8.460 + *
   8.461 + */
   8.462 +#define SFE4001_POWER 18000
   8.463 +#define SFE4002_POWER 7500
   8.464 +#define SFE4003_POWER 4500
   8.465 +#define SFE4005_POWER 4500
   8.466 +
   8.467 +static struct efx_board_data board_data[] = {
   8.468 +	[EFX_BOARD_INVALID] =
   8.469 +	{NULL,	    NULL,                  dummy_init,      0},
   8.470 +	[EFX_BOARD_SFE4001] =
   8.471 +	{"SFE4001", "10GBASE-T adapter",   sfe4001_poweron, SFE4001_POWER },
   8.472 +	[EFX_BOARD_SFE4002] =
   8.473 +	{"SFE4002", "XFP adapter",         sfe4002_init,    SFE4002_POWER },
   8.474 +	[EFX_BOARD_SFE4003] =
   8.475 +	{"SFE4003", "10GBASE-CX4 adapter", sfe4003_init,    SFE4003_POWER },
   8.476 +	[EFX_BOARD_SFE4005] =
   8.477 +	{"SFE4005", "10G blade adapter",   sfe4005_init,    SFE4005_POWER },
   8.478 +};
   8.479 +
   8.480 +int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
   8.481 +{
   8.482 +	int rc = 0;
   8.483 +	struct efx_board_data *data;
   8.484 +
   8.485 +	if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
   8.486 +		EFX_ERR(efx, "squashing unknown board type %d\n",
   8.487 +			BOARD_TYPE(revision_info));
   8.488 +		revision_info = 0;
   8.489 +	}
   8.490 +
   8.491 +	if (BOARD_TYPE(revision_info) == 0) {
   8.492 +		efx->board_info.major = 0;
   8.493 +		efx->board_info.minor = 0;
   8.494 +		/* For early boards that don't have revision info. there is
   8.495 +		 * only 1 board for each PHY type, so we can work it out, with
   8.496 +		 * the exception of the PHY-less boards. */
   8.497 +		switch (efx->phy_type) {
   8.498 +		case PHY_TYPE_10XPRESS:
   8.499 +			efx->board_info.type = EFX_BOARD_SFE4001;
   8.500 +			break;
   8.501 +		case PHY_TYPE_XFP:
   8.502 +			efx->board_info.type = EFX_BOARD_SFE4002;
   8.503 +			break;
   8.504 +		case PHY_TYPE_CX4_RTMR:
   8.505 +			efx->board_info.type = EFX_BOARD_SFE4003;
   8.506 +			break;
   8.507 +		default:
   8.508 +			efx->board_info.type = 0;
   8.509 +			break;
   8.510 +		}
   8.511 +	} else {
   8.512 +		efx->board_info.type = BOARD_TYPE(revision_info);
   8.513 +		efx->board_info.major = BOARD_MAJOR(revision_info);
   8.514 +		efx->board_info.minor = BOARD_MINOR(revision_info);
   8.515 +	}
   8.516 +
   8.517 +	data = &board_data[efx->board_info.type];
   8.518 +
   8.519 +	/* Report the board model number or generic type for recognisable
   8.520 +	 * boards. */
   8.521 +	if (efx->board_info.type != 0)
   8.522 +		EFX_INFO(efx, "board is %s rev %c%d\n",
   8.523 +			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
   8.524 +			 ? data->ref_model : data->gen_type,
   8.525 +			 'A' + efx->board_info.major, efx->board_info.minor);
   8.526 +
   8.527 +	efx->board_info.init = data->init;
   8.528 +	efx->board_info.mwatts = data->mwatts;
   8.529 +
   8.530 +	return rc;
   8.531 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/drivers/net/sfc/boards.h	Mon Feb 18 10:29:07 2008 +0000
     9.3 @@ -0,0 +1,51 @@
     9.4 +/****************************************************************************
     9.5 + * Driver for Solarflare network controllers
     9.6 + *           (including support for SFE4001 10GBT NIC)
     9.7 + *
     9.8 + * Copyright 2007:      Solarflare Communications Inc,
     9.9 + *                      9501 Jeronimo Road, Suite 250,
    9.10 + *                      Irvine, CA 92618, USA
    9.11 + *
    9.12 + * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
    9.13 + *
    9.14 + * This program is free software; you can redistribute it and/or modify it
    9.15 + * under the terms of the GNU General Public License version 2 as published
    9.16 + * by the Free Software Foundation, incorporated herein by reference.
    9.17 + *
    9.18 + * This program is distributed in the hope that it will be useful,
    9.19 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    9.20 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    9.21 + * GNU General Public License for more details.
    9.22 + *
    9.23 + * You should have received a copy of the GNU General Public License
    9.24 + * along with this program; if not, write to the Free Software
    9.25 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    9.26 + ****************************************************************************
    9.27 + */
    9.28 +
    9.29 +#ifndef EFX_BOARDS_H
    9.30 +#define EFX_BOARDS_H
    9.31 +
    9.32 +/* Board IDs (must fit in 8 bits). Note that 0 must never be assigned because
    9.33 + * on early boards it means there is no revision info. Board types pre 400x
    9.34 + * are not covered here, but this is not a problem because:
    9.35 + * - the early Falcon boards (FPGA, 401, 403) don't have any extra H/W we
    9.36 + * need care about and aren't being updated.
    9.37 + */
    9.38 +enum efx_board_type {
    9.39 +	EFX_BOARD_INVALID = 0, /* Early boards do not have board rev. info. */
    9.40 +	EFX_BOARD_SFE4001 = 1,
    9.41 +	EFX_BOARD_SFE4002 = 2,
    9.42 +	EFX_BOARD_SFE4003 = 3,
    9.43 +	EFX_BOARD_SFE4005 = 4,
    9.44 +	/* Insert new types before here */
    9.45 +	EFX_BOARD_MAX
    9.46 +};
    9.47 +
    9.48 +extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
    9.49 +
    9.50 +/* SFE4001 (10GBASE-T) */
    9.51 +extern int sfe4001_poweron(struct efx_nic *efx);
    9.52 +extern void sfe4001_poweroff(struct efx_nic *efx);
    9.53 +
    9.54 +#endif
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/drivers/net/sfc/config.h	Mon Feb 18 10:29:07 2008 +0000
    10.3 @@ -0,0 +1,1 @@
    10.4 +/* SFC config options can go here */
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/drivers/net/sfc/debugfs.c	Mon Feb 18 10:29:07 2008 +0000
    11.3 @@ -0,0 +1,924 @@
    11.4 +/****************************************************************************
    11.5 + * Driver for Solarflare network controllers
    11.6 + *           (including support for SFE4001 10GBT NIC)
    11.7 + *
    11.8 + * Copyright 2005-2006: Fen Systems Ltd.
    11.9 + * Copyright 2006-2008: Solarflare Communications Inc,
   11.10 + *                      9501 Jeronimo Road, Suite 250,
   11.11 + *                      Irvine, CA 92618, USA
   11.12 + *
   11.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   11.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   11.15 + *
   11.16 + * This program is free software; you can redistribute it and/or modify it
   11.17 + * under the terms of the GNU General Public License version 2 as published
   11.18 + * by the Free Software Foundation, incorporated herein by reference.
   11.19 + *
   11.20 + * This program is distributed in the hope that it will be useful,
   11.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   11.23 + * GNU General Public License for more details.
   11.24 + *
   11.25 + * You should have received a copy of the GNU General Public License
   11.26 + * along with this program; if not, write to the Free Software
   11.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   11.28 + ****************************************************************************
   11.29 + */
   11.30 +
   11.31 +#include <linux/module.h>
   11.32 +#include <linux/pci.h>
   11.33 +/* For out-of-tree builds we always need procfs, if only for a compatibility
   11.34 + * symlink.
   11.35 + */
   11.36 +#include <linux/proc_fs.h>
   11.37 +#include <linux/dcache.h>
   11.38 +#include <linux/seq_file.h>
   11.39 +#include "net_driver.h"
   11.40 +#include "efx.h"
   11.41 +#include "debugfs.h"
   11.42 +#include "falcon.h"
   11.43 +
   11.44 +/* EFX_USE_DEBUGFS is defined by kernel_compat.h so we can't decide whether to
   11.45 + * include this earlier.
   11.46 + */
   11.47 +#ifdef EFX_USE_DEBUGFS
   11.48 +#include <linux/debugfs.h>
   11.49 +#endif
   11.50 +
   11.51 +#ifndef PRIu64
   11.52 +#	if (BITS_PER_LONG == 64)
   11.53 +#		define PRIu64 "lu"
   11.54 +#	else
   11.55 +#		define PRIu64 "llu"
   11.56 +#	endif
   11.57 +#endif
   11.58 +
   11.59 +#ifndef EFX_USE_DEBUGFS
   11.60 +
   11.61 +static void efx_debugfs_remove(struct proc_dir_entry *entry)
   11.62 +{
   11.63 +	if (entry)
   11.64 +		remove_proc_entry(entry->name, entry->parent);
   11.65 +}
   11.66 +#define debugfs_remove efx_debugfs_remove
   11.67 +
   11.68 +#define debugfs_create_dir proc_mkdir
   11.69 +#define debugfs_create_symlink proc_symlink
   11.70 +
   11.71 +#endif /* !EFX_USE_DEBUGFS */
   11.72 +
   11.73 +/* Parameter definition bound to a structure - each file has one of these */
   11.74 +struct efx_debugfs_bound_param {
   11.75 +	const struct efx_debugfs_parameter *param;
   11.76 +	void *structure;
   11.77 +};
   11.78 +
   11.79 +
   11.80 +/* Maximum length for a name component or symlink target */
   11.81 +#define EFX_DEBUGFS_NAME_LEN 32
   11.82 +
   11.83 +
   11.84 +/* Top-level debug directory ([/sys/kernel]/debug/sfc) */
   11.85 +static struct dentry *efx_debug_root;
   11.86 +
   11.87 +/* "cards" directory ([/sys/kernel]/debug/sfc/cards) */
   11.88 +static struct dentry *efx_debug_cards;
   11.89 +
   11.90 +
   11.91 +/* Sequential file interface to bound parameters */
   11.92 +
   11.93 +#if defined(EFX_USE_DEBUGFS)
   11.94 +
   11.95 +static int efx_debugfs_seq_show(struct seq_file *file, void *v)
   11.96 +{
   11.97 +	struct efx_debugfs_bound_param *binding =
   11.98 +		(struct efx_debugfs_bound_param *)file->private;
   11.99 +
  11.100 +	return binding->param->reader(file,
  11.101 +				      binding->structure +
  11.102 +				      binding->param->offset);
  11.103 +}
  11.104 +
  11.105 +static int efx_debugfs_open(struct inode *inode, struct file *file)
  11.106 +{
  11.107 +	return single_open(file, efx_debugfs_seq_show, inode->i_private);
  11.108 +}
  11.109 +
  11.110 +#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
  11.111 +
  11.112 +static int efx_debugfs_seq_show(struct seq_file *file, void *v)
  11.113 +{
  11.114 +	struct proc_dir_entry *entry = (struct proc_dir_entry *)file->private;
  11.115 +	struct efx_debugfs_parameter *param =
  11.116 +		(struct efx_debugfs_parameter *)entry->data;
  11.117 +	void *structure = (void *)entry->read_proc;
  11.118 +
  11.119 +	if (!structure)
  11.120 +		return -EIO;
  11.121 +
  11.122 +	return param->reader(file, structure + param->offset);
  11.123 +}
  11.124 +
  11.125 +static int efx_debugfs_open(struct inode *inode, struct file *file)
  11.126 +{
  11.127 +	return single_open(file, efx_debugfs_seq_show, PROC_I(inode)->pde);
  11.128 +}
  11.129 +
  11.130 +#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
  11.131 +
  11.132 +
  11.133 +static struct file_operations efx_debugfs_file_ops = {
  11.134 +	.owner   = THIS_MODULE,
  11.135 +	.open    = efx_debugfs_open,
  11.136 +	.read    = seq_read,
  11.137 +	.llseek  = seq_lseek,
  11.138 +	.release = seq_release
  11.139 +};
  11.140 +
  11.141 +
  11.142 +#if defined(EFX_USE_DEBUGFS)
  11.143 +
  11.144 +/**
  11.145 + * efx_fini_debugfs_child - remove a named child of a debugfs directory
  11.146 + * @dir:		Directory
  11.147 + * @name:		Name of child
  11.148 + *
  11.149 + * This removes the named child from the directory, if it exists.
  11.150 + */
  11.151 +void efx_fini_debugfs_child(struct dentry *dir, const char *name)
  11.152 +{
  11.153 +	struct qstr child_name;
  11.154 +	struct dentry *child;
  11.155 +
  11.156 +	child_name.len = strlen(name);
  11.157 +	child_name.name = name;
  11.158 +	child_name.hash = full_name_hash(child_name.name, child_name.len);
  11.159 +	child = d_lookup(dir, &child_name);
  11.160 +	if (child) {
  11.161 +		/* If it's a "regular" file, free its parameter binding */
  11.162 +		if (S_ISREG(child->d_inode->i_mode))
  11.163 +			kfree(child->d_inode->i_private);
  11.164 +		debugfs_remove(child);
  11.165 +		dput(child);
  11.166 +	}
  11.167 +}
  11.168 +
  11.169 +#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
  11.170 +
  11.171 +void efx_fini_debugfs_child(struct proc_dir_entry *dir, const char *name)
  11.172 +{
  11.173 +	remove_proc_entry(name, dir);
  11.174 +}
  11.175 +
  11.176 +#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
  11.177 +
  11.178 +/*
  11.179 + * Remove a debugfs directory.
  11.180 + *
  11.181 + * This removes the named parameter-files and sym-links from the
  11.182 + * directory, and the directory itself.  It does not do any recursion
  11.183 + * to subdirectories.
  11.184 + */
  11.185 +static void efx_fini_debugfs_dir(struct dentry *dir,
  11.186 +				 struct efx_debugfs_parameter *params,
  11.187 +				 const char *const *symlink_names)
  11.188 +{
  11.189 +	if (!dir)
  11.190 +		return;
  11.191 +
  11.192 +	while (params->name) {
  11.193 +		efx_fini_debugfs_child(dir, params->name);
  11.194 +		params++;
  11.195 +	}
  11.196 +	while (symlink_names && *symlink_names) {
  11.197 +		efx_fini_debugfs_child(dir, *symlink_names);
  11.198 +		symlink_names++;
  11.199 +	}
  11.200 +	debugfs_remove(dir);
  11.201 +}
  11.202 +
  11.203 +/* Functions for printing various types of parameter. */
  11.204 +
  11.205 +int efx_debugfs_read_uint(struct seq_file *file, void *data)
  11.206 +{
  11.207 +	return seq_printf(file, "%#x\n", *(unsigned int *)data);
  11.208 +}
  11.209 +
  11.210 +int efx_debugfs_read_int(struct seq_file *file, void *data)
  11.211 +{
  11.212 +	return seq_printf(file, "%d\n", *(int *)data);
  11.213 +}
  11.214 +
  11.215 +int efx_debugfs_read_atomic(struct seq_file *file, void *data)
  11.216 +{
  11.217 +	unsigned int value = atomic_read((atomic_t *) data);
  11.218 +
  11.219 +	return seq_printf(file, "%#x\n", value);
  11.220 +}
  11.221 +
  11.222 +int efx_debugfs_read_dword(struct seq_file *file, void *data)
  11.223 +{
  11.224 +	unsigned int value = EFX_DWORD_FIELD(*(efx_dword_t *) data,
  11.225 +					     EFX_DWORD_0);
  11.226 +
  11.227 +	return seq_printf(file, "%#x\n", value);
  11.228 +}
  11.229 +
  11.230 +static int efx_debugfs_read_int_mode(struct seq_file *file, void *data)
  11.231 +{
  11.232 +	unsigned int value = *(enum efx_int_mode *) data;
  11.233 +
  11.234 +	return seq_printf(file, "%d => %s\n", value,
  11.235 +			  STRING_TABLE_LOOKUP(value, efx_interrupt_mode));
  11.236 +}
  11.237 +
  11.238 +#define EFX_INT_MODE_PARAMETER(container_type, parameter)		\
  11.239 +	EFX_PARAMETER(container_type, parameter,			\
  11.240 +		      enum efx_int_mode, efx_debugfs_read_int_mode)
  11.241 +
  11.242 +static int efx_debugfs_read_loop_mode(struct seq_file *file, void *data)
  11.243 +{
  11.244 +	unsigned int value = *(enum efx_loopback_mode *)data;
  11.245 +
  11.246 +	return seq_printf(file, "%d => %s\n", value,
  11.247 +			  STRING_TABLE_LOOKUP(value, efx_loopback_mode));
  11.248 +}
  11.249 +
  11.250 +#define EFX_LOOPBACK_MODE_PARAMETER(container_type, parameter)		\
  11.251 +	EFX_PARAMETER(container_type, parameter,			\
  11.252 +		      enum efx_loopback_mode, efx_debugfs_read_loop_mode)
  11.253 +
  11.254 +static int efx_debugfs_read_phy_type(struct seq_file *file, void *data)
  11.255 +{
  11.256 +	unsigned int value = *(enum phy_type *) data;
  11.257 +
  11.258 +	return seq_printf(file, "%d => %s\n", value,
  11.259 +			  STRING_TABLE_LOOKUP(value, efx_phy_type));
  11.260 +}
  11.261 +
  11.262 +#define EFX_PHY_TYPE_PARAMETER(container_type, parameter)		\
  11.263 +	EFX_PARAMETER(container_type, parameter,			\
  11.264 +		      enum phy_type, efx_debugfs_read_phy_type)
  11.265 +
  11.266 +int efx_debugfs_read_string(struct seq_file *file, void *data)
  11.267 +{
  11.268 +	return seq_puts(file, (const char *)data);
  11.269 +}
  11.270 +
  11.271 +
  11.272 +/**
  11.273 + * efx_init_debugfs_files - create parameter-files in a debugfs directory
  11.274 + * @parent:		Containing directory
  11.275 + * @params:		Pointer to zero-terminated parameter definition array
  11.276 + * @structure:		Structure containing parameters
  11.277 + *
  11.278 + * Add parameter-files to the given debugfs directory.  Return a
  11.279 + * negative error code or 0 on success.
  11.280 + */
  11.281 +static int efx_init_debugfs_files(struct dentry *parent,
  11.282 +				  struct efx_debugfs_parameter *params,
  11.283 +				  void *structure)
  11.284 +{
  11.285 +	struct efx_debugfs_parameter *param = params;
  11.286 +
  11.287 +	while (param->name) {
  11.288 +		struct dentry *entry;
  11.289 +#if defined(EFX_USE_DEBUGFS)
  11.290 +		struct efx_debugfs_bound_param *binding;
  11.291 +
  11.292 +		binding = kmalloc(sizeof(*binding), GFP_KERNEL);
  11.293 +		if (!binding)
  11.294 +			goto err;
  11.295 +		binding->param = param;
  11.296 +		binding->structure = structure;
  11.297 +
  11.298 +		entry = debugfs_create_file(param->name, S_IRUGO, parent,
  11.299 +					    binding, &efx_debugfs_file_ops);
  11.300 +		if (!entry) {
  11.301 +			kfree(binding);
  11.302 +			goto err;
  11.303 +		}
  11.304 +#else
  11.305 +		entry = create_proc_entry(param->name, S_IRUGO, parent);
  11.306 +		if (!entry)
  11.307 +			goto err;
  11.308 +		/*
  11.309 +		 * We have no good way to free a binding created here.
  11.310 +		 * However, once we install our file_operations the
  11.311 +		 * read_proc pointer becomes redundant and we can
  11.312 +		 * abuse it as a structure pointer.
  11.313 +		 */
  11.314 +		entry->data = param;
  11.315 +		entry->read_proc = NULL;
  11.316 +		smp_wmb();
  11.317 +		entry->proc_fops = &efx_debugfs_file_ops;
  11.318 +		smp_wmb();
  11.319 +		entry->read_proc = (read_proc_t *) structure;
  11.320 +#endif
  11.321 +
  11.322 +		param++;
  11.323 +	}
  11.324 +
  11.325 +	return 0;
  11.326 +
  11.327 + err:
  11.328 +	while (param != params) {
  11.329 +		param--;
  11.330 +		efx_fini_debugfs_child(parent, param->name);
  11.331 +	}
  11.332 +	return -ENOMEM;
  11.333 +}
  11.334 +
  11.335 +/**
  11.336 + * efx_init_debugfs_netdev - create debugfs sym-links for net device
  11.337 + * @net_dev:		Net device
  11.338 + *
  11.339 + * Create sym-links named after @net_dev to the debugfs directories for
  11.340 + * the corresponding NIC and  port.  Return a negative error code or 0 on
  11.341 + * success.  The sym-links must be cleaned up using
  11.342 + * efx_fini_debugfs_netdev().
  11.343 + */
  11.344 +int efx_init_debugfs_netdev(struct net_device *net_dev)
  11.345 +{
  11.346 +	struct efx_nic *efx = net_dev->priv;
  11.347 +	char name[EFX_DEBUGFS_NAME_LEN];
  11.348 +	char target[EFX_DEBUGFS_NAME_LEN];
  11.349 +	size_t len;
  11.350 +
  11.351 +	if (snprintf(name, sizeof(name), "nic_%s", net_dev->name) >=
  11.352 +	    sizeof(name))
  11.353 +		return -ENAMETOOLONG;
  11.354 +	if (snprintf(target, sizeof(target), "cards/%s", pci_name(efx->pci_dev))
  11.355 +	    >= sizeof(target))
  11.356 +		return -ENAMETOOLONG;
  11.357 +	efx->debug_symlink = debugfs_create_symlink(name,
  11.358 +						    efx_debug_root, target);
  11.359 +	if (!efx->debug_symlink)
  11.360 +		return -ENOMEM;
  11.361 +
  11.362 +	if (snprintf(name, sizeof(name), "if_%s", net_dev->name) >=
  11.363 +	    sizeof(name))
  11.364 +		return -ENAMETOOLONG;
  11.365 +	len = snprintf(target, sizeof(target),
  11.366 +		       "cards/%s/port0", pci_name(efx->pci_dev));
  11.367 +	if (len >= sizeof(target))
  11.368 +		return -ENAMETOOLONG;
  11.369 +	efx->debug_port_symlink = debugfs_create_symlink(name,
  11.370 +							 efx_debug_root,
  11.371 +							 target);
  11.372 +	if (!efx->debug_port_symlink)
  11.373 +		return -ENOMEM;
  11.374 +
  11.375 +	return 0;
  11.376 +}
  11.377 +
  11.378 +/**
  11.379 + * efx_fini_debugfs_netdev - remove debugfs sym-links for net device
  11.380 + * @net_dev:		Net device
  11.381 + *
  11.382 + * Remove sym-links created for @net_dev by efx_init_debugfs_netdev().
  11.383 + */
  11.384 +void efx_fini_debugfs_netdev(struct net_device *net_dev)
  11.385 +{
  11.386 +	struct efx_nic *efx = net_dev->priv;
  11.387 +
  11.388 +	debugfs_remove(efx->debug_port_symlink);
  11.389 +	efx->debug_port_symlink = NULL;
  11.390 +	debugfs_remove(efx->debug_symlink);
  11.391 +	efx->debug_symlink = NULL;
  11.392 +}
  11.393 +
  11.394 +/* Per-port parameters */
  11.395 +static struct efx_debugfs_parameter efx_debugfs_port_parameters[] = {
  11.396 +	EFX_NAMED_PARAMETER(enabled, struct efx_nic, port_enabled,
  11.397 +			    int, efx_debugfs_read_int),
  11.398 +	EFX_INT_PARAMETER(struct efx_nic, net_dev_registered),
  11.399 +	EFX_INT_PARAMETER(struct efx_nic, rx_checksum_enabled),
  11.400 +	EFX_ATOMIC_PARAMETER(struct efx_nic, netif_stop_count),
  11.401 +	EFX_INT_PARAMETER(struct efx_nic, link_up),
  11.402 +	EFX_UINT_PARAMETER(struct efx_nic, link_options),
  11.403 +	EFX_INT_PARAMETER(struct efx_nic, promiscuous),
  11.404 +	EFX_UINT_PARAMETER(struct efx_nic, loopback_modes),
  11.405 +	EFX_LOOPBACK_MODE_PARAMETER(struct efx_nic, loopback_mode),
  11.406 +	EFX_PHY_TYPE_PARAMETER(struct efx_nic, phy_type),
  11.407 +	EFX_NAMED_PARAMETER(phy_id, struct efx_nic, mii.phy_id,
  11.408 +			    int, efx_debugfs_read_int),
  11.409 +	EFX_UINT_PARAMETER(struct efx_nic, n_link_state_changes),
  11.410 +	{NULL},
  11.411 +};
  11.412 +
  11.413 +/**
  11.414 + * efx_init_debugfs_port - create debugfs directory for port
  11.415 + * @efx:		Efx NIC
  11.416 + *
  11.417 + * Create a debugfs directory containing parameter-files for @efx.
  11.418 + * Return a negative error code or 0 on success.  The directory must be
  11.419 + * cleaned up using efx_fini_debugfs_port().
  11.420 + */
  11.421 +int efx_init_debugfs_port(struct efx_nic *efx)
  11.422 +{
  11.423 +	int rc;
  11.424 +
  11.425 +	/* Create directory */
  11.426 +	efx->debug_port_dir = debugfs_create_dir("port0", efx->debug_dir);
  11.427 +	if (!efx->debug_port_dir)
  11.428 +		return -ENOMEM;
  11.429 +
  11.430 +	/* Create files */
  11.431 +	rc = efx_init_debugfs_files(efx->debug_port_dir,
  11.432 +				    efx_debugfs_port_parameters,
  11.433 +				    (void *)efx);
  11.434 +	if (rc)
  11.435 +		efx_fini_debugfs_port(efx);
  11.436 +
  11.437 +	return rc;
  11.438 +}
  11.439 +
  11.440 +/**
  11.441 + * efx_fini_debugfs_port - remove debugfs directory for port
  11.442 + * @efx:		Efx NIC
  11.443 + *
  11.444 + * Remove directory created for @efx by efx_init_debugfs_port().
  11.445 + */
  11.446 +void efx_fini_debugfs_port(struct efx_nic *efx)
  11.447 +{
  11.448 +	efx_fini_debugfs_dir(efx->debug_port_dir,
  11.449 +			     efx_debugfs_port_parameters, NULL);
  11.450 +	efx->debug_port_dir = NULL;
  11.451 +}
  11.452 +
  11.453 +/**
  11.454 + * efx_extend_debugfs_port - add parameter-files to directory for port
  11.455 + * @efx:		Efx NIC
  11.456 + * @structure:		Structure containing parameters
  11.457 + * @params:		Pointer to zero-terminated parameter definition array
  11.458 + *
  11.459 + * Add parameter-files to the debugfs directory for @efx.  Return
  11.460 + * a negative error code or 0 on success.  This is intended for
  11.461 + * PHY-specific parameters.  The files must be cleaned up using
  11.462 + * efx_trim_debugfs_port().
  11.463 + */
  11.464 +int efx_extend_debugfs_port(struct efx_nic *efx,
  11.465 +			    void *structure,
  11.466 +			    struct efx_debugfs_parameter *params)
  11.467 +{
  11.468 +	return efx_init_debugfs_files(efx->debug_port_dir, params, structure);
  11.469 +}
  11.470 +
  11.471 +/**
  11.472 + * efx_trim_debugfs_port - remove parameter-files from directory for port
  11.473 + * @efx:		Efx NIC
  11.474 + * @params:		Pointer to zero-terminated parameter definition array
  11.475 + *
  11.476 + * Remove parameter-files previously added to the debugfs directory
  11.477 + * for @efx using efx_extend_debugfs_port().
  11.478 + */
  11.479 +void efx_trim_debugfs_port(struct efx_nic *efx,
  11.480 +			   struct efx_debugfs_parameter *params)
  11.481 +{
  11.482 +	struct dentry *dir = efx->debug_port_dir;
  11.483 +
  11.484 +	if (dir) {
  11.485 +		struct efx_debugfs_parameter *field;
  11.486 +		for (field = params; field->name; field++)
  11.487 +			efx_fini_debugfs_child(dir, field->name);
  11.488 +	}
  11.489 +}
  11.490 +
  11.491 +/* Per-TX-queue parameters */
  11.492 +static struct efx_debugfs_parameter efx_debugfs_tx_queue_parameters[] = {
  11.493 +	EFX_UINT_PARAMETER(struct efx_tx_queue, insert_count),
  11.494 +	EFX_UINT_PARAMETER(struct efx_tx_queue, write_count),
  11.495 +	EFX_UINT_PARAMETER(struct efx_tx_queue, read_count),
  11.496 +	EFX_INT_PARAMETER(struct efx_tx_queue, stopped),
  11.497 +	{NULL},
  11.498 +};
  11.499 +
  11.500 +static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue);
  11.501 +
  11.502 +/**
  11.503 + * efx_init_debugfs_tx_queue - create debugfs directory for TX queue
  11.504 + * @tx_queue:		Efx TX queue
  11.505 + *
  11.506 + * Create a debugfs directory containing parameter-files for @tx_queue.
  11.507 + * Return a negative error code or 0 on success.  The directory must be
  11.508 + * cleaned up using efx_fini_debugfs_tx_queue().
  11.509 + */
  11.510 +static int efx_init_debugfs_tx_queue(struct efx_tx_queue *tx_queue)
  11.511 +{
  11.512 +	char name[EFX_DEBUGFS_NAME_LEN];
  11.513 +	char target[EFX_DEBUGFS_NAME_LEN];
  11.514 +	int rc;
  11.515 +
  11.516 +	/* Create directory */
  11.517 +	if (snprintf(name, sizeof(name), EFX_TX_QUEUE_NAME(tx_queue))
  11.518 +	    >= sizeof(name))
  11.519 +		goto err_len;
  11.520 +	tx_queue->debug_dir = debugfs_create_dir(name,
  11.521 +						 tx_queue->efx->debug_dir);
  11.522 +	if (!tx_queue->debug_dir)
  11.523 +		goto err_mem;
  11.524 +
  11.525 +	/* Create files */
  11.526 +	rc = efx_init_debugfs_files(tx_queue->debug_dir,
  11.527 +				    efx_debugfs_tx_queue_parameters,
  11.528 +				    (void *)tx_queue);
  11.529 +	if (rc)
  11.530 +		goto err;
  11.531 +
  11.532 +	/* Create symlink to channel */
  11.533 +	if (snprintf(target, sizeof(target),
  11.534 +		     "../" EFX_CHANNEL_NAME(tx_queue->channel)) >=
  11.535 +	    sizeof(target))
  11.536 +		goto err_len;
  11.537 +	if (!debugfs_create_symlink("channel", tx_queue->debug_dir, target))
  11.538 +		goto err_mem;
  11.539 +
  11.540 +	/* Create symlink to port */
  11.541 +	if (!debugfs_create_symlink("port", tx_queue->debug_dir, "../port0"))
  11.542 +		goto err_mem;
  11.543 +
  11.544 +	return 0;
  11.545 +
  11.546 + err_len:
  11.547 +	rc = -ENAMETOOLONG;
  11.548 +	goto err;
  11.549 + err_mem:
  11.550 +	rc = -ENOMEM;
  11.551 + err:
  11.552 +	efx_fini_debugfs_tx_queue(tx_queue);
  11.553 +	return rc;
  11.554 +}
  11.555 +
  11.556 +/**
  11.557 + * efx_fini_debugfs_tx_queue - remove debugfs directory for TX queue
  11.558 + * @tx_queue:		Efx TX queue
  11.559 + *
  11.560 + * Remove directory created for @tx_queue by efx_init_debugfs_tx_queue().
  11.561 + */
  11.562 +static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue)
  11.563 +{
  11.564 +	static const char *const symlink_names[] = {
  11.565 +		"channel", "port", NULL
  11.566 +	};
  11.567 +
  11.568 +	efx_fini_debugfs_dir(tx_queue->debug_dir,
  11.569 +			     efx_debugfs_tx_queue_parameters, symlink_names);
  11.570 +	tx_queue->debug_dir = NULL;
  11.571 +}
  11.572 +
  11.573 +/* Per-RX-queue parameters */
  11.574 +static struct efx_debugfs_parameter efx_debugfs_rx_queue_parameters[] = {
  11.575 +	EFX_INT_PARAMETER(struct efx_rx_queue, added_count),
  11.576 +	EFX_INT_PARAMETER(struct efx_rx_queue, removed_count),
  11.577 +	EFX_UINT_PARAMETER(struct efx_rx_queue, max_fill),
  11.578 +	EFX_UINT_PARAMETER(struct efx_rx_queue, fast_fill_trigger),
  11.579 +	EFX_UINT_PARAMETER(struct efx_rx_queue, fast_fill_limit),
  11.580 +	EFX_UINT_PARAMETER(struct efx_rx_queue, min_fill),
  11.581 +	EFX_UINT_PARAMETER(struct efx_rx_queue, min_overfill),
  11.582 +	EFX_UINT_PARAMETER(struct efx_rx_queue, alloc_page_count),
  11.583 +	EFX_UINT_PARAMETER(struct efx_rx_queue, alloc_skb_count),
  11.584 +	EFX_UINT_PARAMETER(struct efx_rx_queue, slow_fill_count),
  11.585 +	{NULL},
  11.586 +};
  11.587 +
  11.588 +static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue);
  11.589 +
  11.590 +/**
  11.591 + * efx_init_debugfs_rx_queue - create debugfs directory for RX queue
  11.592 + * @rx_queue:		Efx RX queue
  11.593 + *
  11.594 + * Create a debugfs directory containing parameter-files for @rx_queue.
  11.595 + * Return a negative error code or 0 on success.  The directory must be
  11.596 + * cleaned up using efx_fini_debugfs_rx_queue().
  11.597 + */
  11.598 +static int efx_init_debugfs_rx_queue(struct efx_rx_queue *rx_queue)
  11.599 +{
  11.600 +	char name[EFX_DEBUGFS_NAME_LEN];
  11.601 +	char target[EFX_DEBUGFS_NAME_LEN];
  11.602 +	int rc;
  11.603 +
  11.604 +	/* Create directory */
  11.605 +	if (snprintf(name, sizeof(name), EFX_RX_QUEUE_NAME(rx_queue))
  11.606 +	    >= sizeof(name))
  11.607 +		goto err_len;
  11.608 +	rx_queue->debug_dir = debugfs_create_dir(name,
  11.609 +						 rx_queue->efx->debug_dir);
  11.610 +	if (!rx_queue->debug_dir)
  11.611 +		goto err_mem;
  11.612 +
  11.613 +	/* Create files */
  11.614 +	rc = efx_init_debugfs_files(rx_queue->debug_dir,
  11.615 +				    efx_debugfs_rx_queue_parameters,
  11.616 +				    (void *)rx_queue);
  11.617 +	if (rc)
  11.618 +		goto err;
  11.619 +
  11.620 +	/* Create symlink to channel */
  11.621 +	if (snprintf(target, sizeof(target),
  11.622 +		     "../" EFX_CHANNEL_NAME(rx_queue->channel)) >=
  11.623 +	    sizeof(target))
  11.624 +		goto err_len;
  11.625 +	if (!debugfs_create_symlink("channel", rx_queue->debug_dir, target))
  11.626 +		goto err_mem;
  11.627 +
  11.628 +	return 0;
  11.629 +
  11.630 + err_len:
  11.631 +	rc = -ENAMETOOLONG;
  11.632 +	goto err;
  11.633 + err_mem:
  11.634 +	rc = -ENOMEM;
  11.635 + err:
  11.636 +	efx_fini_debugfs_rx_queue(rx_queue);
  11.637 +	return rc;
  11.638 +}
  11.639 +
  11.640 +/**
  11.641 + * efx_fini_debugfs_rx_queue - remove debugfs directory for RX queue
  11.642 + * @rx_queue:		Efx RX queue
  11.643 + *
  11.644 + * Remove directory created for @rx_queue by efx_init_debugfs_rx_queue().
  11.645 + */
  11.646 +static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue)
  11.647 +{
  11.648 +	const char *const symlink_names[] = {
  11.649 +		"channel", NULL
  11.650 +	};
  11.651 +
  11.652 +	efx_fini_debugfs_dir(rx_queue->debug_dir,
  11.653 +			     efx_debugfs_rx_queue_parameters, symlink_names);
  11.654 +	rx_queue->debug_dir = NULL;
  11.655 +}
  11.656 +
  11.657 +/* Per-channel parameters */
  11.658 +static struct efx_debugfs_parameter efx_debugfs_channel_parameters[] = {
  11.659 +	EFX_INT_PARAMETER(struct efx_channel, enabled),
  11.660 +	EFX_INT_PARAMETER(struct efx_channel, irq),
  11.661 +	EFX_UINT_PARAMETER(struct efx_channel, has_interrupt),
  11.662 +	EFX_UINT_PARAMETER(struct efx_channel, irq_moderation),
  11.663 +	EFX_UINT_PARAMETER(struct efx_channel, eventq_read_ptr),
  11.664 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_tobe_disc),
  11.665 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_ip_frag_err),
  11.666 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_ip_hdr_chksum_err),
  11.667 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_tcp_udp_chksum_err),
  11.668 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_frm_trunc),
  11.669 +	EFX_UINT_PARAMETER(struct efx_channel, n_rx_overlength),
  11.670 +	EFX_UINT_PARAMETER(struct efx_channel, n_skbuff_leaks),
  11.671 +	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_level),
  11.672 +	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_push_pages),
  11.673 +	EFX_INT_PARAMETER(struct efx_channel, rx_alloc_pop_pages),
  11.674 +	{NULL},
  11.675 +};
  11.676 +
  11.677 +static void efx_fini_debugfs_channel(struct efx_channel *channel);
  11.678 +
  11.679 +/**
  11.680 + * efx_init_debugfs_channel - create debugfs directory for channel
  11.681 + * @channel:		Efx channel
  11.682 + *
  11.683 + * Create a debugfs directory containing parameter-files for @channel.
  11.684 + * Return a negative error code or 0 on success.  The directory must be
  11.685 + * cleaned up using efx_fini_debugfs_channel().
  11.686 + */
  11.687 +static int efx_init_debugfs_channel(struct efx_channel *channel)
  11.688 +{
  11.689 +	char name[EFX_DEBUGFS_NAME_LEN];
  11.690 +	int rc;
  11.691 +
  11.692 +	/* Create directory */
  11.693 +	if (snprintf(name, sizeof(name), EFX_CHANNEL_NAME(channel))
  11.694 +	    >= sizeof(name))
  11.695 +		goto err_len;
  11.696 +	channel->debug_dir = debugfs_create_dir(name, channel->efx->debug_dir);
  11.697 +	if (!channel->debug_dir)
  11.698 +		goto err_mem;
  11.699 +
  11.700 +	/* Create files */
  11.701 +	rc = efx_init_debugfs_files(channel->debug_dir,
  11.702 +				    efx_debugfs_channel_parameters,
  11.703 +				    (void *)channel);
  11.704 +	if (rc)
  11.705 +		goto err;
  11.706 +
  11.707 +	return 0;
  11.708 +
  11.709 + err_len:
  11.710 +	rc = -ENAMETOOLONG;
  11.711 +	goto err;
  11.712 + err_mem:
  11.713 +	rc = -ENOMEM;
  11.714 + err:
  11.715 +	efx_fini_debugfs_channel(channel);
  11.716 +	return rc;
  11.717 +}
  11.718 +
  11.719 +/**
  11.720 + * efx_fini_debugfs_channel - remove debugfs directory for channel
  11.721 + * @channel:		Efx channel
  11.722 + *
  11.723 + * Remove directory created for @channel by efx_init_debugfs_channel().
  11.724 + */
  11.725 +static void efx_fini_debugfs_channel(struct efx_channel *channel)
  11.726 +{
  11.727 +	efx_fini_debugfs_dir(channel->debug_dir,
  11.728 +			     efx_debugfs_channel_parameters, NULL);
  11.729 +	channel->debug_dir = NULL;
  11.730 +}
  11.731 +
  11.732 +/* Per-NIC parameters */
  11.733 +static struct efx_debugfs_parameter efx_debugfs_nic_parameters[] = {
  11.734 +	EFX_INT_PARAMETER(struct efx_nic, legacy_irq),
  11.735 +	EFX_INT_PARAMETER(struct efx_nic, rss_queues),
  11.736 +	EFX_UINT_PARAMETER(struct efx_nic, rx_buffer_len),
  11.737 +	EFX_INT_MODE_PARAMETER(struct efx_nic, interrupt_mode),
  11.738 +	{.name = "hardware_desc",
  11.739 +	 .offset = 0,
  11.740 +	 .reader = falcon_debugfs_read_hardware_desc},
  11.741 +	{NULL},
  11.742 +};
  11.743 +
  11.744 +/* Per-NIC error counts */
  11.745 +static struct efx_debugfs_parameter efx_debugfs_nic_error_parameters[] = {
  11.746 +	EFX_ATOMIC_PARAMETER(struct efx_nic_errors, missing_event),
  11.747 +	EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_reset),
  11.748 +	EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_desc_fetch),
  11.749 +	EFX_ATOMIC_PARAMETER(struct efx_nic_errors, tx_desc_fetch),
  11.750 +	EFX_ATOMIC_PARAMETER(struct efx_nic_errors, spurious_tx),
  11.751 +	{NULL},
  11.752 +};
  11.753 +
  11.754 +/**
  11.755 + * efx_init_debugfs_channels - create debugfs directories for NIC channels
  11.756 + * @efx:		Efx NIC
  11.757 + *
  11.758 + * Create subdirectories of @efx's debugfs directory for all the
  11.759 + * channels, RX queues and TX queues used by this driver.  Return a
  11.760 + * negative error code or 0 on success.  The subdirectories must be
  11.761 + * cleaned up using efx_fini_debugfs_channels().
  11.762 + */
  11.763 +int efx_init_debugfs_channels(struct efx_nic *efx)
  11.764 +{
  11.765 +	struct efx_channel *channel;
  11.766 +	struct efx_rx_queue *rx_queue;
  11.767 +	struct efx_tx_queue *tx_queue;
  11.768 +	int rc;
  11.769 +
  11.770 +	efx_for_each_channel(channel, efx) {
  11.771 +		rc = efx_init_debugfs_channel(channel);
  11.772 +		if (rc)
  11.773 +			goto err;
  11.774 +	}
  11.775 +
  11.776 +	efx_for_each_rx_queue(rx_queue, efx) {
  11.777 +		rc = efx_init_debugfs_rx_queue(rx_queue);
  11.778 +		if (rc)
  11.779 +			goto err;
  11.780 +	}
  11.781 +
  11.782 +	efx_for_each_tx_queue(tx_queue, efx) {
  11.783 +		rc = efx_init_debugfs_tx_queue(tx_queue);
  11.784 +		if (rc)
  11.785 +			goto err;
  11.786 +	}
  11.787 +
  11.788 +	return 0;
  11.789 +
  11.790 + err:
  11.791 +	efx_fini_debugfs_channels(efx);
  11.792 +	return rc;
  11.793 +}
  11.794 +
  11.795 +/**
  11.796 + * efx_fini_debugfs_channels - remove debugfs directories for NIC queues
  11.797 + * @efx:		Efx NIC
  11.798 + *
  11.799 + * Remove subdirectories of @efx's debugfs directory created by
  11.800 + * efx_init_debugfs_channels().
  11.801 + */
  11.802 +void efx_fini_debugfs_channels(struct efx_nic *efx)
  11.803 +{
  11.804 +	struct efx_channel *channel;
  11.805 +	struct efx_rx_queue *rx_queue;
  11.806 +	struct efx_tx_queue *tx_queue;
  11.807 +
  11.808 +	efx_for_each_tx_queue(tx_queue, efx)
  11.809 +		efx_fini_debugfs_tx_queue(tx_queue);
  11.810 +
  11.811 +	efx_for_each_rx_queue(rx_queue, efx)
  11.812 +		efx_fini_debugfs_rx_queue(rx_queue);
  11.813 +
  11.814 +	efx_for_each_channel(channel, efx)
  11.815 +		efx_fini_debugfs_channel(channel);
  11.816 +}
  11.817 +
  11.818 +/**
  11.819 + * efx_init_debugfs_nic - create debugfs directory for NIC
  11.820 + * @efx:		Efx NIC
  11.821 + *
  11.822 + * Create debugfs directory containing parameter-files for @efx,
  11.823 + * and a subdirectory "errors" containing per-NIC error counts.
  11.824 + * Return a negative error code or 0 on success.  The directories
  11.825 + * must be cleaned up using efx_fini_debugfs_nic().
  11.826 + */
  11.827 +int efx_init_debugfs_nic(struct efx_nic *efx)
  11.828 +{
  11.829 +	int rc;
  11.830 +
  11.831 +	/* Create directory */
  11.832 +	efx->debug_dir = debugfs_create_dir(pci_name(efx->pci_dev),
  11.833 +					    efx_debug_cards);
  11.834 +	if (!efx->debug_dir)
  11.835 +		goto err_mem;
  11.836 +
  11.837 +	/* Create errors directory */
  11.838 +	efx->errors.debug_dir = debugfs_create_dir("errors", efx->debug_dir);
  11.839 +	if (!efx->errors.debug_dir)
  11.840 +		goto err_mem;
  11.841 +
  11.842 +	/* Create files */
  11.843 +	rc = efx_init_debugfs_files(efx->debug_dir,
  11.844 +				    efx_debugfs_nic_parameters, (void *)efx);
  11.845 +	if (rc)
  11.846 +		goto err;
  11.847 +	rc = efx_init_debugfs_files(efx->errors.debug_dir,
  11.848 +				    efx_debugfs_nic_error_parameters,
  11.849 +				    (void *)&efx->errors);
  11.850 +	if (rc)
  11.851 +		goto err;
  11.852 +
  11.853 +	return 0;
  11.854 +
  11.855 + err_mem:
  11.856 +	rc = -ENOMEM;
  11.857 + err:
  11.858 +	efx_fini_debugfs_nic(efx);
  11.859 +	return rc;
  11.860 +}
  11.861 +
  11.862 +/**
  11.863 + * efx_fini_debugfs_nic - remove debugfs directories for NIC
  11.864 + * @efx:		Efx NIC
  11.865 + *
  11.866 + * Remove debugfs directories created for @efx by efx_init_debugfs_nic().
  11.867 + */
  11.868 +void efx_fini_debugfs_nic(struct efx_nic *efx)
  11.869 +{
  11.870 +	efx_fini_debugfs_dir(efx->errors.debug_dir,
  11.871 +			     efx_debugfs_nic_error_parameters, NULL);
  11.872 +	efx->errors.debug_dir = NULL;
  11.873 +	efx_fini_debugfs_dir(efx->debug_dir, efx_debugfs_nic_parameters, NULL);
  11.874 +	efx->debug_dir = NULL;
  11.875 +}
  11.876 +
  11.877 +/**
  11.878 + * efx_init_debugfs - create debugfs directories for sfc driver
  11.879 + *
  11.880 + * Create debugfs directories "sfc" and "sfc/cards".  This must be
  11.881 + * called before any of the other functions that create debugfs
  11.882 + * directories.  Return a negative error code or 0 on success.  The
  11.883 + * directories must be cleaned up using efx_fini_debugfs().
  11.884 + */
  11.885 +int efx_init_debugfs(void)
  11.886 +{
  11.887 +	/* Create top-level directory */
  11.888 +#if defined(EFX_USE_DEBUGFS)
  11.889 +	efx_debug_root = debugfs_create_dir("sfc", NULL);
  11.890 +#else
  11.891 +	efx_debug_root = proc_mkdir("sfc", proc_root_driver);
  11.892 +#endif
  11.893 +	if (!efx_debug_root)
  11.894 +		goto err;
  11.895 +
  11.896 +	/* Create "cards" directory */
  11.897 +	efx_debug_cards = debugfs_create_dir("cards", efx_debug_root);
  11.898 +	if (!efx_debug_cards)
  11.899 +		goto err;
  11.900 +
  11.901 +#if defined(EFX_USE_DEBUGFS)
  11.902 +	/* Create compatibility sym-link */
  11.903 +	if (!proc_symlink("sfc", proc_root_driver, "/sys/kernel/debug/sfc"))
  11.904 +		goto err;
  11.905 +#endif
  11.906 +	return 0;
  11.907 +
  11.908 + err:
  11.909 +	efx_fini_debugfs();
  11.910 +	return -ENOMEM;
  11.911 +}
  11.912 +
  11.913 +/**
  11.914 + * efx_fini_debugfs - remove debugfs directories for sfc driver
  11.915 + *
  11.916 + * Remove directories created by efx_init_debugfs().
  11.917 + */
  11.918 +void efx_fini_debugfs(void)
  11.919 +{
  11.920 +#if defined(EFX_USE_DEBUGFS)
  11.921 +	remove_proc_entry("sfc", proc_root_driver);
  11.922 +#endif
  11.923 +	debugfs_remove(efx_debug_cards);
  11.924 +	efx_debug_cards = NULL;
  11.925 +	debugfs_remove(efx_debug_root);
  11.926 +	efx_debug_root = NULL;
  11.927 +}
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/drivers/net/sfc/debugfs.h	Mon Feb 18 10:29:07 2008 +0000
    12.3 @@ -0,0 +1,172 @@
    12.4 +/****************************************************************************
    12.5 + * Driver for Solarflare network controllers
    12.6 + *           (including support for SFE4001 10GBT NIC)
    12.7 + *
    12.8 + * Copyright 2005-2006: Fen Systems Ltd.
    12.9 + * Copyright 2006-2008: Solarflare Communications Inc,
   12.10 + *                      9501 Jeronimo Road, Suite 250,
   12.11 + *                      Irvine, CA 92618, USA
   12.12 + *
   12.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   12.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   12.15 + *
   12.16 + * This program is free software; you can redistribute it and/or modify it
   12.17 + * under the terms of the GNU General Public License version 2 as published
   12.18 + * by the Free Software Foundation, incorporated herein by reference.
   12.19 + *
   12.20 + * This program is distributed in the hope that it will be useful,
   12.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   12.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12.23 + * GNU General Public License for more details.
   12.24 + *
   12.25 + * You should have received a copy of the GNU General Public License
   12.26 + * along with this program; if not, write to the Free Software
   12.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   12.28 + ****************************************************************************
   12.29 + */
   12.30 +
   12.31 +#ifndef EFX_DEBUGFS_H
   12.32 +#define EFX_DEBUGFS_H
   12.33 +
   12.34 +#ifdef CONFIG_SFC_DEBUGFS
   12.35 +
   12.36 +struct seq_file;
   12.37 +
   12.38 +struct efx_debugfs_parameter {
   12.39 +	const char *name;
   12.40 +	size_t offset;
   12.41 +	int (*reader)(struct seq_file *, void *);
   12.42 +};
   12.43 +
   12.44 +extern void efx_fini_debugfs_child(struct dentry *dir, const char *name);
   12.45 +extern int efx_init_debugfs_netdev(struct net_device *net_dev);
   12.46 +extern void efx_fini_debugfs_netdev(struct net_device *net_dev);
   12.47 +extern int efx_init_debugfs_port(struct efx_nic *efx);
   12.48 +extern void efx_fini_debugfs_port(struct efx_nic *efx);
   12.49 +extern int efx_init_debugfs_nic(struct efx_nic *efx);
   12.50 +extern void efx_fini_debugfs_nic(struct efx_nic *efx);
   12.51 +extern int efx_init_debugfs_channels(struct efx_nic *efx);
   12.52 +extern void efx_fini_debugfs_channels(struct efx_nic *efx);
   12.53 +extern int efx_init_debugfs(void);
   12.54 +extern void efx_fini_debugfs(void);
   12.55 +extern int efx_extend_debugfs_port(struct efx_nic *efx,
   12.56 +				   void *context,
   12.57 +				   struct efx_debugfs_parameter *params);
   12.58 +extern void efx_trim_debugfs_port(struct efx_nic *efx,
   12.59 +				  struct efx_debugfs_parameter *params);
   12.60 +
   12.61 +/* Helpers for handling debugfs entry reads */
   12.62 +extern int efx_debugfs_read_uint(struct seq_file *, void *);
   12.63 +extern int efx_debugfs_read_string(struct seq_file *, void *);
   12.64 +extern int efx_debugfs_read_int(struct seq_file *, void *);
   12.65 +extern int efx_debugfs_read_atomic(struct seq_file *, void *);
   12.66 +extern int efx_debugfs_read_dword(struct seq_file *, void *);
   12.67 +
   12.68 +/* Handy macros for filling out parameters */
   12.69 +
   12.70 +/* Initialiser for a struct efx_debugfs_parameter with type-checking */
   12.71 +#define EFX_PARAMETER(container_type, parameter, field_type,		\
   12.72 +			reader_function) {				\
   12.73 +	.name = #parameter,						\
   12.74 +	.offset = ((((field_type *) 0) ==				\
   12.75 +		    &((container_type *) 0)->parameter) ?		\
   12.76 +		   offsetof(container_type, parameter) :		\
   12.77 +		   offsetof(container_type, parameter)),		\
   12.78 +	.reader = reader_function,					\
   12.79 +}
   12.80 +
   12.81 +/* Likewise, but the file name is not taken from the field name */
   12.82 +#define EFX_NAMED_PARAMETER(_name, container_type, parameter, field_type, \
   12.83 +				reader_function) {			\
   12.84 +	.name = #_name,							\
   12.85 +	.offset = ((((field_type *) 0) ==				\
   12.86 +		    &((container_type *) 0)->parameter) ?		\
   12.87 +		   offsetof(container_type, parameter) :		\
   12.88 +		   offsetof(container_type, parameter)),		\
   12.89 +	.reader = reader_function,					\
   12.90 +}
   12.91 +
   12.92 +/* Likewise, but with one file for each of 4 lanes */
   12.93 +#define EFX_PER_LANE_PARAMETER(prefix, suffix, container_type, parameter, \
   12.94 +				field_type, reader_function) {		\
   12.95 +	.name = prefix "0" suffix,					\
   12.96 +	.offset = ((((field_type *) 0) ==				\
   12.97 +		      ((container_type *) 0)->parameter) ?		\
   12.98 +		    offsetof(container_type, parameter[0]) :		\
   12.99 +		    offsetof(container_type, parameter[0])),		\
  12.100 +	.reader = reader_function,					\
  12.101 +},  {									\
  12.102 +	.name = prefix "1" suffix,					\
  12.103 +	.offset = offsetof(container_type, parameter[1]),		\
  12.104 +	.reader = reader_function,					\
  12.105 +}, {									\
  12.106 +	.name = prefix "2" suffix,					\
  12.107 +	.offset = offsetof(container_type, parameter[2]),		\
  12.108 +	.reader = reader_function,					\
  12.109 +}, {									\
  12.110 +	.name = prefix "3" suffix,					\
  12.111 +	.offset = offsetof(container_type, parameter[3]),		\
  12.112 +	.reader = reader_function,					\
  12.113 +}
  12.114 +
  12.115 +/* A string parameter (string embedded in the structure) */
  12.116 +#define EFX_STRING_PARAMETER(container_type, parameter) {	\
  12.117 +	.name = #parameter,					\
  12.118 +	.offset = ((((char *) 0) ==				\
  12.119 +		    ((container_type *) 0)->parameter) ?	\
  12.120 +		   offsetof(container_type, parameter) :	\
  12.121 +		   offsetof(container_type, parameter)),	\
  12.122 +	.reader = efx_debugfs_read_string,			\
  12.123 +}
  12.124 +
  12.125 +/* An unsigned integer parameter */
  12.126 +#define EFX_UINT_PARAMETER(container_type, parameter)		\
  12.127 +	EFX_PARAMETER(container_type, parameter,		\
  12.128 +		      unsigned int, efx_debugfs_read_uint)
  12.129 +
  12.130 +/* A dword parameter */
  12.131 +#define EFX_DWORD_PARAMETER(container_type, parameter)		\
  12.132 +	EFX_PARAMETER(container_type, parameter,		\
  12.133 +		      efx_dword_t, efx_debugfs_read_dword)
  12.134 +
  12.135 +/* An atomic_t parameter */
  12.136 +#define EFX_ATOMIC_PARAMETER(container_type, parameter)		\
  12.137 +	EFX_PARAMETER(container_type, parameter,		\
  12.138 +		      atomic_t, efx_debugfs_read_atomic)
  12.139 +
  12.140 +/* An integer parameter */
  12.141 +#define EFX_INT_PARAMETER(container_type, parameter)		\
  12.142 +	EFX_PARAMETER(container_type, parameter,		\
  12.143 +		      int, efx_debugfs_read_int)
  12.144 +
  12.145 +#else /* !CONFIG_SFC_DEBUGFS */
  12.146 +
  12.147 +static inline int efx_init_debugfs_netdev(struct net_device *net_dev)
  12.148 +{
  12.149 +	return 0;
  12.150 +}
  12.151 +static inline void efx_fini_debugfs_netdev(struct net_device *net_dev) {}
  12.152 +static inline int efx_init_debugfs_port(struct efx_nic *efx)
  12.153 +{
  12.154 +	return 0;
  12.155 +}
  12.156 +static inline void efx_fini_debugfs_port(struct efx_nic *efx) {}
  12.157 +static inline int efx_init_debugfs_nic(struct efx_nic *efx)
  12.158 +{
  12.159 +	return 0;
  12.160 +}
  12.161 +static inline void efx_fini_debugfs_nic(struct efx_nic *efx) {}
  12.162 +static inline int efx_init_debugfs_channels(struct efx_nic *efx)
  12.163 +{
  12.164 +	return 0;
  12.165 +}
  12.166 +static inline void efx_fini_debugfs_channels(struct efx_nic *efx) {}
  12.167 +static inline int efx_init_debugfs(void)
  12.168 +{
  12.169 +	return 0;
  12.170 +}
  12.171 +static inline void efx_fini_debugfs(void) {}
  12.172 +
  12.173 +#endif /* CONFIG_SFC_DEBUGFS */
  12.174 +
  12.175 +#endif /* EFX_DEBUGFS_H */
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/drivers/net/sfc/driverlink.c	Mon Feb 18 10:29:07 2008 +0000
    13.3 @@ -0,0 +1,544 @@
    13.4 +/****************************************************************************
    13.5 + * Driver for Solarflare network controllers
    13.6 + *           (including support for SFE4001 10GBT NIC)
    13.7 + *
    13.8 + * Copyright 2005:      Fen Systems Ltd.
    13.9 + * Copyright 2005-2008: Solarflare Communications Inc,
   13.10 + *                      9501 Jeronimo Road, Suite 250,
   13.11 + *                      Irvine, CA 92618, USA
   13.12 + *
   13.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   13.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   13.15 + *
   13.16 + * This program is free software; you can redistribute it and/or modify it
   13.17 + * under the terms of the GNU General Public License version 2 as published
   13.18 + * by the Free Software Foundation, incorporated herein by reference.
   13.19 + *
   13.20 + * This program is distributed in the hope that it will be useful,
   13.21 + * but WITHOUT ANY WARRANTY without even the implied warranty of
   13.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   13.23 + * GNU General Public License for more details.
   13.24 + *
   13.25 + * You should have received a copy of the GNU General Public License
   13.26 + * along with this program; if not, write to the Free Software
   13.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   13.28 + ****************************************************************************
   13.29 + */
   13.30 +
   13.31 +#include <linux/module.h>
   13.32 +#include <linux/list.h>
   13.33 +#include <linux/skbuff.h>
   13.34 +#include <linux/rtnetlink.h>
   13.35 +#include "net_driver.h"
   13.36 +#include "efx.h"
   13.37 +#include "driverlink.h"
   13.38 +
   13.39 +/* Driverlink semaphore
   13.40 + * This semaphore must be held for any operation that modifies any of
   13.41 + * the driverlink lists.
   13.42 + */
   13.43 +static DEFINE_MUTEX(efx_driverlink_lock);
   13.44 +
   13.45 +/* List of all registered drivers */
   13.46 +static LIST_HEAD(efx_driver_list);
   13.47 +
   13.48 +/* List of all registered Efx ports */
   13.49 +static LIST_HEAD(efx_port_list);
   13.50 +
   13.51 +/* Driver link handle used internally to track devices */
   13.52 +struct efx_dl_handle {
   13.53 +	/* The efx_dl_device consumers see */
   13.54 +	struct efx_dl_device efx_dev;
   13.55 +	/* The efx_nic providers provide */
   13.56 +	struct efx_nic *efx;
   13.57 +	/* Per-device list */
   13.58 +	struct list_head port_node;
   13.59 +	/* Per-driver list */
   13.60 +	struct list_head driver_node;
   13.61 +};
   13.62 +
   13.63 +/* Get the handle for an efx_dl_device */
   13.64 +static struct efx_dl_handle *efx_dl_handle(struct efx_dl_device *efx_dev)
   13.65 +{
   13.66 +	return container_of(efx_dev, struct efx_dl_handle, efx_dev);
   13.67 +}
   13.68 +
   13.69 +/* Remove an Efx device
   13.70 + * You must hold the efx_driverlink_lock before calling this
   13.71 + * function.
   13.72 + */
   13.73 +static void efx_dl_del_device(struct efx_dl_device *efx_dev)
   13.74 +{
   13.75 +	struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
   13.76 +
   13.77 +	EFX_INFO(efx_handle->efx, "%s driverlink client unregistering\n",
   13.78 +		 efx_dev->driver->name);
   13.79 +
   13.80 +	/* Call driver's remove() routine */
   13.81 +	if (efx_dev->driver->remove)
   13.82 +		efx_dev->driver->remove(efx_dev);
   13.83 +
   13.84 +	/* Remove handle from per-driver and per-NIC lists */
   13.85 +	list_del(&efx_handle->driver_node);
   13.86 +	list_del(&efx_handle->port_node);
   13.87 +
   13.88 +	/* Free efx_handle structure */
   13.89 +	kfree(efx_handle);
   13.90 +}
   13.91 +
   13.92 +/* Try to add an Efx device
   13.93 + * Attempt to probe the given device with the driver, creating a
   13.94 + * new efx_dl_device. If the probe routine fails, because the driver
   13.95 + * doesn't support this port, then the efx_dl_device is destroyed,
   13.96 + */
   13.97 +static void efx_dl_try_add_device(struct efx_nic *efx,
   13.98 +				  struct efx_dl_driver *driver)
   13.99 +{
  13.100 +	struct efx_dl_handle *efx_handle;
  13.101 +	struct efx_dl_device *efx_dev;
  13.102 +	int rc;
  13.103 +
  13.104 +	/* Allocate and initialise new efx_dl_device structure */
  13.105 +	efx_handle = kzalloc(sizeof(*efx_handle), GFP_KERNEL);
  13.106 +	efx_dev = &efx_handle->efx_dev;
  13.107 +	efx_handle->efx = efx;
  13.108 +	efx_dev->driver = driver;
  13.109 +	efx_dev->pci_dev = efx->pci_dev;
  13.110 +	INIT_LIST_HEAD(&efx_handle->port_node);
  13.111 +	INIT_LIST_HEAD(&efx_handle->driver_node);
  13.112 +
  13.113 +	/* Attempt driver probe */
  13.114 +	rc = driver->probe(efx_dev, efx->net_dev,
  13.115 +			   efx->dl_info, efx->silicon_rev);
  13.116 +	if (rc)
  13.117 +		goto fail;
  13.118 +
  13.119 +	/* Add device to per-driver and per-NIC lists */
  13.120 +	list_add_tail(&efx_handle->driver_node, &driver->device_list);
  13.121 +	list_add_tail(&efx_handle->port_node, &efx->dl_device_list);
  13.122 +
  13.123 +	EFX_INFO(efx, "%s driverlink client registered\n", driver->name);
  13.124 +	return;
  13.125 +
  13.126 + fail:
  13.127 +	EFX_INFO(efx, "%s driverlink client skipped\n", driver->name);
  13.128 +
  13.129 +	kfree(efx_dev);
  13.130 +}
  13.131 +
  13.132 +/**
  13.133 + * efx_dl_unregister_driver - unregister an Efx device driver
  13.134 + * @driver:		Efx driverlink driver
  13.135 + *
  13.136 + * Unregisters an Efx driver.  The driver's remove() method will be
  13.137 + * called for all Efx devices currently claimed by the driver.
  13.138 + */
  13.139 +void efx_dl_unregister_driver(struct efx_dl_driver *driver)
  13.140 +{
  13.141 +	struct efx_dl_handle *efx_handle, *efx_handle_n;
  13.142 +
  13.143 +	printk(KERN_INFO "Efx driverlink unregistering %s driver\n",
  13.144 +		 driver->name);
  13.145 +
  13.146 +	/* Acquire lock.  We can't return failure, so have to use
  13.147 +	 * down() instead of down_interruptible()
  13.148 +	 */
  13.149 +	mutex_lock(&efx_driverlink_lock);
  13.150 +
  13.151 +	/* Remove all devices claimed by the driver */
  13.152 +	list_for_each_entry_safe(efx_handle, efx_handle_n,
  13.153 +				 &driver->device_list, driver_node)
  13.154 +		efx_dl_del_device(&efx_handle->efx_dev);
  13.155 +
  13.156 +	/* Remove driver from driver list */
  13.157 +	list_del(&driver->node);
  13.158 +
  13.159 +	/* Release lock */
  13.160 +	mutex_unlock(&efx_driverlink_lock);
  13.161 +}
  13.162 +EXPORT_SYMBOL(efx_dl_unregister_driver);
  13.163 +
  13.164 +/**
  13.165 + * efx_dl_register_driver - register an Efx device driver
  13.166 + * @driver:		Efx driverlink driver
  13.167 + *
  13.168 + * Registers a new Efx driver.  The driver's probe() method will be
  13.169 + * called for all Efx NICs currently registered.
  13.170 + *
  13.171 + * Return a negative error code or 0 on success.
  13.172 + */
  13.173 +int efx_dl_register_driver(struct efx_dl_driver *driver)
  13.174 +{
  13.175 +	struct efx_nic *efx;
  13.176 +	int rc;
  13.177 +
  13.178 +	printk(KERN_INFO "Efx driverlink registering %s driver\n",
  13.179 +		 driver->name);
  13.180 +
  13.181 +	/* Initialise driver list structures */
  13.182 +	INIT_LIST_HEAD(&driver->node);
  13.183 +	INIT_LIST_HEAD(&driver->device_list);
  13.184 +
  13.185 +	/* Acquire lock */
  13.186 +	rc = mutex_lock_interruptible(&efx_driverlink_lock);
  13.187 +	if (rc)
  13.188 +		return rc;
  13.189 +
  13.190 +	/* Add driver to driver list */
  13.191 +	list_add_tail(&driver->node, &efx_driver_list);
  13.192 +
  13.193 +	/* Feed all existing devices to driver */
  13.194 +	list_for_each_entry(efx, &efx_port_list, dl_node)
  13.195 +		efx_dl_try_add_device(efx, driver);
  13.196 +
  13.197 +	/* Release locks */
  13.198 +	mutex_unlock(&efx_driverlink_lock);
  13.199 +
  13.200 +	return 0;
  13.201 +}
  13.202 +EXPORT_SYMBOL(efx_dl_register_driver);
  13.203 +
  13.204 +void efx_dl_unregister_nic(struct efx_nic *efx)
  13.205 +{
  13.206 +	struct efx_dl_handle *efx_handle, *efx_handle_n;
  13.207 +
  13.208 +	if (!efx)
  13.209 +		return;
  13.210 +
  13.211 +	/* Acquire lock.  We can't return failure, so have to use
  13.212 +	 * down() instead of down_interruptible()
  13.213 +	 */
  13.214 +	mutex_lock(&efx_driverlink_lock);
  13.215 +
  13.216 +	/* Remove all devices related to this NIC */
  13.217 +	list_for_each_entry_safe_reverse(efx_handle, efx_handle_n,
  13.218 +					 &efx->dl_device_list,
  13.219 +					 port_node)
  13.220 +		efx_dl_del_device(&efx_handle->efx_dev);
  13.221 +
  13.222 +	/* Remove port from port list */
  13.223 +	list_del(&efx->dl_node);
  13.224 +
  13.225 +	/* Release lock */
  13.226 +	mutex_unlock(&efx_driverlink_lock);
  13.227 +}
  13.228 +
  13.229 +int efx_dl_register_nic(struct efx_nic *efx)
  13.230 +{
  13.231 +	struct efx_dl_driver *driver;
  13.232 +	int rc;
  13.233 +
  13.234 +	/* Acquire lock */
  13.235 +	rc = mutex_lock_interruptible(&efx_driverlink_lock);
  13.236 +	if (rc)
  13.237 +		return rc;
  13.238 +
  13.239 +	/* Add port to port list */
  13.240 +	list_add_tail(&efx->dl_node, &efx_port_list);
  13.241 +
  13.242 +	/* Feed port to all existing drivers */
  13.243 +	list_for_each_entry(driver, &efx_driver_list, node)
  13.244 +		efx_dl_try_add_device(efx, driver);
  13.245 +
  13.246 +	/* Release lock */
  13.247 +	mutex_unlock(&efx_driverlink_lock);
  13.248 +
  13.249 +	return 0;
  13.250 +}
  13.251 +
  13.252 +/*
  13.253 + * Dummy callback implementations.
  13.254 + *
  13.255 + * To avoid a branch point on the fast-path, the callbacks are always
  13.256 + * implemented - they are never NULL.
  13.257 + */
  13.258 +#if defined(EFX_USE_FASTCALL)
  13.259 +static enum efx_veto fastcall
  13.260 +#else
  13.261 +static enum efx_veto
  13.262 +#endif
  13.263 +efx_dummy_tx_packet_callback(struct efx_dl_device *efx_dev, struct sk_buff *skb)
  13.264 +{
  13.265 +	/* Never veto the packet */
  13.266 +	return EFX_ALLOW_PACKET;
  13.267 +}
  13.268 +
  13.269 +#if defined(EFX_USE_FASTCALL)
  13.270 +static enum efx_veto fastcall
  13.271 +#else
  13.272 +static enum efx_veto
  13.273 +#endif
  13.274 +efx_dummy_rx_packet_callback(struct efx_dl_device *efx_dev,
  13.275 +			     const char *pkt_buf, int len)
  13.276 +{
  13.277 +	/* Never veto the packet */
  13.278 +	return EFX_ALLOW_PACKET;
  13.279 +}
  13.280 +
  13.281 +static void
  13.282 +efx_dummy_link_change_callback(struct efx_dl_device *efx_dev, int link_up)
  13.283 +{
  13.284 +}
  13.285 +
  13.286 +static int
  13.287 +efx_dummy_request_mtu_callback(struct efx_dl_device *efx_dev, int new_mtu)
  13.288 +{
  13.289 +	/* Always allow */
  13.290 +	return 0;
  13.291 +}
  13.292 +
  13.293 +static void
  13.294 +efx_dummy_mtu_changed_callback(struct efx_dl_device *efx_dev, int mtu)
  13.295 +{
  13.296 +	return;
  13.297 +}
  13.298 +
  13.299 +static void efx_dummy_event_callback(struct efx_dl_device *efx_dev, void *event)
  13.300 +{
  13.301 +	return;
  13.302 +}
  13.303 +
  13.304 +struct efx_dl_callbacks efx_default_callbacks = {
  13.305 +	.tx_packet	= efx_dummy_tx_packet_callback,
  13.306 +	.rx_packet	= efx_dummy_rx_packet_callback,
  13.307 +	.link_change	= efx_dummy_link_change_callback,
  13.308 +	.request_mtu	= efx_dummy_request_mtu_callback,
  13.309 +	.mtu_changed	= efx_dummy_mtu_changed_callback,
  13.310 +	.event		= efx_dummy_event_callback,
  13.311 +};
  13.312 +
  13.313 +#define EFX_DL_UNREGISTER_CALLBACK(_port, _dev, _member)		\
  13.314 +	do {								\
  13.315 +		BUG_ON((_port)->dl_cb_dev._member != (_dev));		\
  13.316 +		(_port)->dl_cb._member =				\
  13.317 +			efx_default_callbacks._member;			\
  13.318 +		(_port)->dl_cb_dev._member = NULL;			\
  13.319 +	} while (0)
  13.320 +
  13.321 +
  13.322 +#define EFX_DL_REGISTER_CALLBACK(_port, _dev, _from, _member)		\
  13.323 +	if ((_from)->_member) {						\
  13.324 +		BUG_ON((_port)->dl_cb_dev._member != NULL);		\
  13.325 +		(_port)->dl_cb._member = (_from)->_member;		\
  13.326 +		(_port)->dl_cb_dev._member = _dev;			\
  13.327 +	}
  13.328 +
  13.329 +/**
  13.330 + * efx_dl_unregister_callbacks - unregister callbacks for an Efx NIC
  13.331 + * @efx_dev:		Efx driverlink device
  13.332 + * @callbacks:		Callback list
  13.333 + *
  13.334 + * This removes a set of callbacks registered with
  13.335 + * efx_dl_register_callbacks().  It should be called as part of the
  13.336 + * client's remove() method.
  13.337 + *
  13.338 + * The net driver will ensure that all callback functions have
  13.339 + * returned to the net driver before efx_dl_unregister_callbacks()
  13.340 + * returns.  Note that the device itself may still be running when the
  13.341 + * client's remove() method is called.  The client must therefore
  13.342 + * unhook its callbacks using efx_dl_unregister_callbacks() and only
  13.343 + * then ensure that any delayed tasks triggered by callback methods
  13.344 + * (e.g. scheduled tasklets) have completed.
  13.345 + */
  13.346 +void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev,
  13.347 +				 struct efx_dl_callbacks *callbacks)
  13.348 +{
  13.349 +	struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
  13.350 +	struct efx_nic *efx = efx_handle->efx;
  13.351 +
  13.352 +	/* Suspend net driver operations */
  13.353 +	efx_suspend(efx);
  13.354 +
  13.355 +	EFX_INFO(efx, "removing callback hooks into %s driver\n",
  13.356 +		 efx_dev->driver->name);
  13.357 +
  13.358 +	if (callbacks->tx_packet)
  13.359 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, tx_packet);
  13.360 +
  13.361 +	if (callbacks->rx_packet)
  13.362 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, rx_packet);
  13.363 +
  13.364 +	if (callbacks->link_change)
  13.365 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, link_change);
  13.366 +
  13.367 +	if (callbacks->request_mtu)
  13.368 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, request_mtu);
  13.369 +
  13.370 +	if (callbacks->mtu_changed)
  13.371 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, mtu_changed);
  13.372 +
  13.373 +	if (callbacks->event)
  13.374 +		EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, event);
  13.375 +
  13.376 +	/* Resume net driver operations */
  13.377 +	efx_resume(efx);
  13.378 +}
  13.379 +EXPORT_SYMBOL(efx_dl_unregister_callbacks);
  13.380 +
  13.381 +/**
  13.382 + * efx_dl_register_callbacks - register callbacks for an Efx NIC
  13.383 + * @efx_dev:		Efx driverlink device
  13.384 + * @callbacks:		Callback list
  13.385 + *
  13.386 + * This registers a set of callback functions with the net driver.
  13.387 + * These functions will be called at various key points to allow
  13.388 + * external code to monitor and/or modify the behaviour of the network
  13.389 + * driver.  Any of the callback function pointers may be %NULL if a
  13.390 + * callback is not required.  The intended user of this mechanism is
  13.391 + * the SFC char driver.
  13.392 + *
  13.393 + * This client should call efx_dl_register_callbacks() during its
  13.394 + * probe() method.  The client must ensure that it also calls
  13.395 + * efx_dl_unregister_callbacks() as part of its remove() method.
  13.396 + *
  13.397 + * Only one function may be registered for each callback per NIC.
  13.398 + * If a requested callback is already registered for this NIC, this
  13.399 + * function will return -%EBUSY.
  13.400 + *
  13.401 + * The device may already be running, so the client must be prepared
  13.402 + * for callbacks to be triggered immediately after calling
  13.403 + * efx_dl_register_callbacks().
  13.404 + *
  13.405 + * Return a negative error code or 0 on success.
  13.406 + */
  13.407 +int efx_dl_register_callbacks(struct efx_dl_device *efx_dev,
  13.408 +			      struct efx_dl_callbacks *callbacks)
  13.409 +{
  13.410 +	struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
  13.411 +	struct efx_nic *efx = efx_handle->efx;
  13.412 +	int rc = 0;
  13.413 +
  13.414 +	/* Suspend net driver operations */
  13.415 +	efx_suspend(efx);
  13.416 +
  13.417 +	/* Check that the requested callbacks are not already hooked. */
  13.418 +	if ((callbacks->tx_packet && efx->dl_cb_dev.tx_packet) ||
  13.419 +	    (callbacks->rx_packet && efx->dl_cb_dev.rx_packet) ||
  13.420 +	    (callbacks->link_change && efx->dl_cb_dev.link_change) ||
  13.421 +	    (callbacks->request_mtu && efx->dl_cb_dev.request_mtu) ||
  13.422 +	    (callbacks->mtu_changed && efx->dl_cb_dev.mtu_changed) ||
  13.423 +	    (callbacks->event && efx->dl_cb_dev.event)) {
  13.424 +		rc = -EBUSY;
  13.425 +		goto out;
  13.426 +	}
  13.427 +
  13.428 +	EFX_INFO(efx, "adding callback hooks to %s driver\n",
  13.429 +		 efx_dev->driver->name);
  13.430 +
  13.431 +	/* Hook in callbacks.  For maximum speed, we never check to
  13.432 +	 * see whether these are NULL before calling; therefore we
  13.433 +	 * must ensure that they are never NULL.  If the set we're
  13.434 +	 * being asked to hook in is sparse, we leave the default
  13.435 +	 * values in place for the empty hooks.
  13.436 +	 */
  13.437 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, tx_packet);
  13.438 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, rx_packet);
  13.439 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, link_change);
  13.440 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, request_mtu);
  13.441 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, mtu_changed);
  13.442 +	EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, event);
  13.443 +
  13.444 + out:
  13.445 +	/* Resume net driver operations */
  13.446 +	efx_resume(efx);
  13.447 +
  13.448 +	return rc;
  13.449 +}
  13.450 +EXPORT_SYMBOL(efx_dl_register_callbacks);
  13.451 +
  13.452 +/**
  13.453 + * efx_dl_schedule_reset - schedule an Efx NIC reset
  13.454 + * @efx_dev:		Efx driverlink device
  13.455 + *
  13.456 + * This schedules a hardware reset for a short time in the future.  It
  13.457 + * can be called from any context, and so can be used when
  13.458 + * efx_dl_reset() cannot be called.
  13.459 + */
  13.460 +void efx_dl_schedule_reset(struct efx_dl_device *efx_dev)
  13.461 +{
  13.462 +	struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
  13.463 +	struct efx_nic *efx = efx_handle->efx;
  13.464 +
  13.465 +	efx_schedule_reset(efx, RESET_TYPE_ALL);
  13.466 +}
  13.467 +EXPORT_SYMBOL(efx_dl_schedule_reset);
  13.468 +
  13.469 +/*
  13.470 + * Lock the driverlink layer before a reset
  13.471 + * To avoid deadlock, efx_driverlink_lock needs to be acquired before
  13.472 + * efx->suspend_lock.
  13.473 + */
  13.474 +void efx_dl_reset_lock(void)
  13.475 +{
  13.476 +	/* Acquire lock */
  13.477 +	mutex_lock(&efx_driverlink_lock);
  13.478 +}
  13.479 +
  13.480 +/*
  13.481 + * Unlock the driverlink layer after a reset
  13.482 + * This call must be matched against efx_dl_reset_lock.
  13.483 + */
  13.484 +void efx_dl_reset_unlock(void)
  13.485 +{
  13.486 +	/* Acquire lock */
  13.487 +	mutex_unlock(&efx_driverlink_lock);
  13.488 +}
  13.489 +
  13.490 +/*
  13.491 + * Suspend ready for reset
  13.492 + * This calls the reset_suspend method of all drivers registered to
  13.493 + * the specified NIC.  It must only be called between
  13.494 + * efx_dl_reset_lock and efx_dl_reset_unlock.
  13.495 + */
  13.496 +void efx_dl_reset_suspend(struct efx_nic *efx)
  13.497 +{
  13.498 +	struct efx_dl_handle *efx_handle;
  13.499 +	struct efx_dl_device *efx_dev;
  13.500 +
  13.501 +	BUG_ON(!mutex_is_locked(&efx_driverlink_lock));
  13.502 +
  13.503 +	/* Call suspend method of each driver in turn */
  13.504 +	list_for_each_entry_reverse(efx_handle,
  13.505 +				    &efx->dl_device_list,
  13.506 +				    port_node) {
  13.507 +		efx_dev = &efx_handle->efx_dev;
  13.508 +		if (efx_dev->driver->reset_suspend)
  13.509 +			efx_dev->driver->reset_suspend(efx_dev);
  13.510 +	}
  13.511 +}
  13.512 +
  13.513 +/*
  13.514 + * Resume after a reset
  13.515 + * This calls the reset_resume method of all drivers registered to the
  13.516 + * specified NIC.  It must only be called between efx_dl_reset_lock
  13.517 + * and efx_dl_reset_unlock.
  13.518 + */
  13.519 +void efx_dl_reset_resume(struct efx_nic *efx, int ok)
  13.520 +{
  13.521 +	struct efx_dl_handle *efx_handle;
  13.522 +	struct efx_dl_device *efx_dev;
  13.523 +
  13.524 +	BUG_ON(!mutex_is_locked(&efx_driverlink_lock));
  13.525 +
  13.526 +	/* Call resume method of each driver in turn */
  13.527 +	list_for_each_entry(efx_handle, &efx->dl_device_list,
  13.528 +			    port_node) {
  13.529 +		efx_dev = &efx_handle->efx_dev;
  13.530 +		if (efx_dev->driver->reset_resume)
  13.531 +			efx_dev->driver->reset_resume(efx_dev, ok);
  13.532 +	}
  13.533 +}
  13.534 +
  13.535 +/**
  13.536 + * efx_dl_get_nic - obtain the Efx NIC for the given driverlink device
  13.537 + * @efx_dev:		Efx driverlink device
  13.538 + *
  13.539 + * Get a pointer to the &struct efx_nic corresponding to
  13.540 + * @efx_dev.  This can be used by driverlink clients built along with
  13.541 + * the sfc driver, which may have intimate knowledge of its internals.
  13.542 + */
  13.543 +struct efx_nic *efx_dl_get_nic(struct efx_dl_device *efx_dev)
  13.544 +{
  13.545 +	return efx_dl_handle(efx_dev)->efx;
  13.546 +}
  13.547 +EXPORT_SYMBOL(efx_dl_get_nic);
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/drivers/net/sfc/driverlink.h	Mon Feb 18 10:29:07 2008 +0000
    14.3 @@ -0,0 +1,93 @@
    14.4 +/****************************************************************************
    14.5 + * Driver for Solarflare network controllers
    14.6 + *           (including support for SFE4001 10GBT NIC)
    14.7 + *
    14.8 + * Copyright 2005:      Fen Systems Ltd.
    14.9 + * Copyright 2006:      Solarflare Communications Inc,
   14.10 + *                      9501 Jeronimo Road, Suite 250,
   14.11 + *                      Irvine, CA 92618, USA
   14.12 + *
   14.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   14.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   14.15 + *
   14.16 + * This program is free software; you can redistribute it and/or modify it
   14.17 + * under the terms of the GNU General Public License version 2 as published
   14.18 + * by the Free Software Foundation, incorporated herein by reference.
   14.19 + *
   14.20 + * This program is distributed in the hope that it will be useful,
   14.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   14.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14.23 + * GNU General Public License for more details.
   14.24 + *
   14.25 + * You should have received a copy of the GNU General Public License
   14.26 + * along with this program; if not, write to the Free Software
   14.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   14.28 + ****************************************************************************
   14.29 + */
   14.30 +
   14.31 +#ifndef EFX_DRIVERLINK_H
   14.32 +#define EFX_DRIVERLINK_H
   14.33 +
   14.34 +/* Forward declarations */
   14.35 +struct efx_dl_device;
   14.36 +struct efx_nic;
   14.37 +
   14.38 +/*
   14.39 + * Efx driverlink
   14.40 + *
   14.41 + * This header file defines the portions of the Efx driverlink
   14.42 + * interface that are used only within the sfc module.  It also
   14.43 + * declares efx_dl_get_nic(), which may be used by sfc_mtd
   14.44 + * and any other module built along with sfc.
   14.45 + */
   14.46 +
   14.47 +
   14.48 +/* Efx callback devices
   14.49 + *
   14.50 + * A list of the devices that own each callback. The partner to
   14.51 + * struct efx_dl_callbacks
   14.52 + */
   14.53 +struct efx_dl_cb_devices {
   14.54 +	/* Device owning the tx_packet callback */
   14.55 +	struct efx_dl_device *tx_packet;
   14.56 +	/* Device owning the rx_packet callback */
   14.57 +	struct efx_dl_device *rx_packet;
   14.58 +	/* Device owning the link_change callback. */
   14.59 +	struct efx_dl_device *link_change;
   14.60 +	/* Device owning the request_mtu callback. */
   14.61 +	struct efx_dl_device *request_mtu;
   14.62 +	/* Device owning the mtu_changed callback. */
   14.63 +	struct efx_dl_device *mtu_changed;
   14.64 +	/* Device owning the event callback. */
   14.65 +	struct efx_dl_device *event;
   14.66 +};
   14.67 +
   14.68 +/* No-op callbacks used for initialisation */
   14.69 +extern struct efx_dl_callbacks efx_default_callbacks;
   14.70 +
   14.71 +/* Macro used to invoke callbacks */
   14.72 +#define EFX_DL_CALLBACK(_port, _name, ...)				\
   14.73 +	(_port)->dl_cb._name((_port)->dl_cb_dev._name, __VA_ARGS__)
   14.74 +
   14.75 +/* Register an Efx NIC */
   14.76 +extern int efx_dl_register_nic(struct efx_nic *efx);
   14.77 +
   14.78 +/* Unregister an Efx NIC */
   14.79 +extern void efx_dl_unregister_nic(struct efx_nic *efx);
   14.80 +
   14.81 +/* Lock the driverlink layer prior to a reset */
   14.82 +extern void efx_dl_reset_lock(void);
   14.83 +
   14.84 +/* Unlock the driverlink layer following a reset */
   14.85 +extern void efx_dl_reset_unlock(void);
   14.86 +
   14.87 +/* Suspend all drivers prior to a hardware reset */
   14.88 +extern void efx_dl_reset_suspend(struct efx_nic *efx);
   14.89 +
   14.90 +/* Resume all drivers after a hardware reset */
   14.91 +extern void efx_dl_reset_resume(struct efx_nic *efx, int ok);
   14.92 +
   14.93 +/* Obtain the Efx NIC for the given driverlink device. */
   14.94 +extern struct efx_nic *efx_dl_get_nic(struct efx_dl_device *efx_dev);
   14.95 +
   14.96 +#endif /* EFX_DRIVERLINK_H */
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/drivers/net/sfc/driverlink_api.h	Mon Feb 18 10:29:07 2008 +0000
    15.3 @@ -0,0 +1,612 @@
    15.4 +/****************************************************************************
    15.5 + * Driver for Solarflare network controllers
    15.6 + *           (including support for SFE4001 10GBT NIC)
    15.7 + *
    15.8 + * Copyright 2005-2006: Fen Systems Ltd.
    15.9 + * Copyright 2005-2008: Solarflare Communications Inc,
   15.10 + *                      9501 Jeronimo Road, Suite 250,
   15.11 + *                      Irvine, CA 92618, USA
   15.12 + *
   15.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   15.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   15.15 + *
   15.16 + * This program is free software; you can redistribute it and/or modify it
   15.17 + * under the terms of the GNU General Public License version 2 as published
   15.18 + * by the Free Software Foundation, incorporated herein by reference.
   15.19 + *
   15.20 + * This program is distributed in the hope that it will be useful,
   15.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   15.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   15.23 + * GNU General Public License for more details.
   15.24 + *
   15.25 + * You should have received a copy of the GNU General Public License
   15.26 + * along with this program; if not, write to the Free Software
   15.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   15.28 + ****************************************************************************
   15.29 + */
   15.30 +
   15.31 +#ifndef EFX_DRIVERLINK_API_H
   15.32 +#define EFX_DRIVERLINK_API_H
   15.33 +
   15.34 +#include <linux/list.h> /* for struct list_head */
   15.35 +#if !defined(EFX_USE_FASTCALL)
   15.36 +	#include <linux/version.h>
   15.37 +	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
   15.38 +		#define EFX_USE_FASTCALL yes
   15.39 +		#include <linux/linkage.h>
   15.40 +	#endif
   15.41 +#endif
   15.42 +
   15.43 +/**
   15.44 + * DOC: Efx driverlink API
   15.45 + *
   15.46 + * This file must be included by any driver that wishes to attach to
   15.47 + * devices claimed by the Solarflare NIC driver (sfc). It allows separate
   15.48 + * kernel modules to expose other functionality offered by the NIC, with
   15.49 + * the sfc driver remaining in overall control.
   15.50 + *
   15.51 + * Overview:
   15.52 + *
   15.53 + * Driverlink clients define a &struct efx_dl_driver, and register
   15.54 + * this structure with the driverlink layer using
   15.55 + * efx_dl_register_driver(), which is exported by the sfc driver.
   15.56 + *
   15.57 + * The probe() routine of each driverlink client driver is called by
   15.58 + * the driverlink layer for each physical port in the system, after
   15.59 + * the sfc driver has performed start-of-day hardware initialisation
   15.60 + * and self-test. If ports are added or removed via pci hotplug then
   15.61 + * the &struct efx_dl_driver probe() or remove() routines are called
   15.62 + * as appropriate.
   15.63 + *
   15.64 + * If the port doesn't provide the necessary hardware resources for a
   15.65 + * client, then that client can return failure from its probe()
   15.66 + * routine. Information provided to the client driver at probe time
   15.67 + * includes
   15.68 + *
   15.69 + * Each probe() routine is given a unique &struct efx_dl_device per
   15.70 + * port, which means it can safely use the @priv member to store any
   15.71 + * useful state it needs. The probe routine also has the opportunity
   15.72 + * to provide a &struct efx_dl_callbacks via
   15.73 + * efx_dl_register_callbacks(), which allows the client to intercept
   15.74 + * the sfc driver's operations at strategic points.
   15.75 + *
   15.76 + * Occasionally, the underlying Efx device may need to be reset to
   15.77 + * recover from an error condition.  The client's reset_suspend() and
   15.78 + * reset_resume() methods [if provided] will be called to enable the
   15.79 + * client to suspend operations and preserve any state before the
   15.80 + * reset.  The client can itself request a reset using efx_dl_reset()
   15.81 + * or efx_dl_schedule_reset(), should it detect an error condition
   15.82 + * necessitating a reset.
   15.83 + *
   15.84 + * Example:
   15.85 + *
   15.86 + * The MTD driver (mtd.c) uses the driverlink layer.
   15.87 + */
   15.88 +
   15.89 +/* Forward declarations */
   15.90 +struct pci_dev;
   15.91 +struct net_device;
   15.92 +struct sk_buff;
   15.93 +struct efx_dl_device;
   15.94 +struct efx_dl_device_info;
   15.95 +
   15.96 +/*
   15.97 + * This is used to guard against the registration of driverlink
   15.98 + * clients using an incorrect version of the API.
   15.99 + */
  15.100 +#define EFX_DRIVERLINK_API_VERSION 1
  15.101 +
  15.102 +
  15.103 +/**
  15.104 + * struct efx_dl_driver - An Efx driverlink device driver
  15.105 + *
  15.106 + * This is the analogue of a struct pci_driver for a normal PCI
  15.107 + * driver.  Driverlink clients should register themselves using
  15.108 + * efx_dl_register_driver() at module initialisation, and deregister
  15.109 + * themselves using efx_dl_unregister_driver() at module exit.
  15.110 + *
  15.111 + * All calls to members of efx_dl_driver are serialised by a single
  15.112 + * semaphore, so you are allowed to sleep in these functions. Take care
  15.113 + * to not call driverlink methods from within these callbacks, otherwise
  15.114 + * a deadlock is possible.
  15.115 + *
  15.116 + * @name: Name of the driver
  15.117 + * @probe: Called when device added
  15.118 + * @remove: Called when device removed
  15.119 + * @reset_suspend: Called before device is reset
  15.120 + * @reset_resume: Called after device is reset
  15.121 + */
  15.122 +struct efx_dl_driver {
  15.123 +	const char *name;
  15.124 +
  15.125 +	/*
  15.126 +	 * probe - Handle device addition.
  15.127 +	 * @efx_dev:		Efx driverlink device
  15.128 +	 * @net_dev:		The net_dev relevant to this port
  15.129 +	 * @dev_info:		A linked list of device information.
  15.130 +	 * @silicon_rev:	Silicon revision name.
  15.131 +	 *
  15.132 +	 * This will be called after driverlink client registration for
  15.133 +	 * every port on the system, and for every port that appears
  15.134 +	 * thereafter via hotplug.
  15.135 +	 *
  15.136 +	 * The client may use either @efx_dev->pci_dev, the dev_info linked
  15.137 +	 * list of available driver information, or the silicon revision
  15.138 +	 * name to determine if they can support this port. If they can,
  15.139 +	 * they should return 0 to indicate the probe was successful. Any
  15.140 +	 * other return code indicates that the probe failed, and the
  15.141 +	 * @efx_dl_dev will be invalidated.
  15.142 +	 *
  15.143 +	 * The client should perform whatever initialisation it
  15.144 +	 * requires, and store a pointer to its private data in
  15.145 +	 * @efx_dl_dev->priv (which is not shared between clients).
  15.146 +	 * It may also wish to hook in a callbacks table using
  15.147 +	 * efx_dl_register_callbacks().
  15.148 +	 *
  15.149 +	 * Return a negative error code or 0 on success.
  15.150 +	 */
  15.151 +	int (*probe) (struct efx_dl_device *efx_dl_dev,
  15.152 +		      const struct net_device *net_dev,
  15.153 +		      const struct efx_dl_device_info *dev_info,
  15.154 +		      const char *silicon_rev);
  15.155 +
  15.156 +	/*
  15.157 +	 * remove - Handle device removal.
  15.158 +	 * @efx_dev:		Efx driverlink device
  15.159 +	 *
  15.160 +	 * This will be called at driver exit (or hotplug removal) for
  15.161 +	 * each registered driverlink client.
  15.162 +	 *
  15.163 +	 * The client must ensure that it has finished all operations
  15.164 +	 * using this device before returning from this method.  If it
  15.165 +	 * has hooked in a callbacks table using
  15.166 +	 * efx_dl_register_callbacks(), it must unhook it using
  15.167 +	 * efx_dl_unregister_callbacks(), and then ensure that all
  15.168 +	 * callback-triggered operations (e.g. scheduled tasklets)
  15.169 +	 * have completed before returning.  (It does not need to
  15.170 +	 * explicitly wait for callback methods to finish executing,
  15.171 +	 * since efx_dl_unregister_callbacks() will sleep until all
  15.172 +	 * callbacks have returned anyway.)
  15.173 +	 *
  15.174 +	 * Note that the device itself may not have been removed; it
  15.175 +	 * may be simply that the client is being unloaded
  15.176 +	 * via efx_dl_unregister_driver(). In this case other clients
  15.177 +	 * (and the sfc driver itself) will still be using the device,
  15.178 +	 * so the client cannot assume that the device itself is quiescent.
  15.179 +	 * In particular, callbacks may continue to be triggered at any
  15.180 +	 * point until efx_dl_unregister_callbacks() is called.
  15.181 +	 */
  15.182 +	void (*remove) (struct efx_dl_device *efx_dev);
  15.183 +
  15.184 +	/*
  15.185 +	 * reset_suspend - Suspend ready for reset.
  15.186 +	 * @efx_dev:		Efx driverlink device
  15.187 +	 *
  15.188 +	 * This method will be called immediately before a hardware
  15.189 +	 * reset (which may or may not have been initiated by the
  15.190 +	 * driverlink client).  This client must save any state that it
  15.191 +	 * will need to restore after the reset, and suspend all
  15.192 +	 * operations that might access the hardware.  It must not
  15.193 +	 * return until the client can guarantee to have stopped
  15.194 +	 * touching the hardware.
  15.195 +	 *
  15.196 +	 * It is guaranteed that callbacks will be inactive by the
  15.197 +	 * time this method is called; the driverlink layer will
  15.198 +	 * already have prevented new callbacks being made and waited
  15.199 +	 * for all callbacks functions to return before calling
  15.200 +	 * reset_suspend().  However, any delayed work scheduled by
  15.201 +	 * the callback functions (e.g. tasklets) may not yet have
  15.202 +	 * completed.
  15.203 +	 *
  15.204 +	 * This method is allowed to sleep, so waiting on tasklets,
  15.205 +	 * work queues etc. is permitted.  There will always be a
  15.206 +	 * corresponding call to the reset_resume() method, so it is
  15.207 +	 * safe to e.g. down a semaphore within reset_suspend() and up
  15.208 +	 * it within reset_resume().  (However, you obviously cannot
  15.209 +	 * do the same with a spinlock).
  15.210 +	 *
  15.211 +	 * Note that the reset operation may be being carried out in
  15.212 +	 * the context of scheduled work, so you cannot use
  15.213 +	 * flush_scheduled_work() to ensure that any work you may have
  15.214 +	 * scheduled has completed.
  15.215 +	 *
  15.216 +	 * During hardware reset, there is a chance of receiving
  15.217 +	 * spurious interrupts, so the client's ISR (if any) should be
  15.218 +	 * unhooked or otherwise disabled.
  15.219 +	 */
  15.220 +	void (*reset_suspend) (struct efx_dl_device *efx_dev);
  15.221 +
  15.222 +	/*
  15.223 +	 * reset_resume - Restore after a reset.
  15.224 +	 * @efx_dev:		Efx driverlink device
  15.225 +	 * @ok:			Reset success indicator
  15.226 +	 *
  15.227 +	 * This method will be called after a hardware reset.  There
  15.228 +	 * will always have been a corresponding call to the
  15.229 +	 * reset_suspend() method beforehand.
  15.230 +	 *
  15.231 +	 * If @ok is non-zero, the client should restore the state
  15.232 +	 * that it saved during the call to reset_suspend() and resume
  15.233 +	 * normal operations.
  15.234 +	 *
  15.235 +	 * If @ok is zero, the reset operation has failed and the
  15.236 +	 * hardware is currently in an unusable state.  In this case,
  15.237 +	 * the client should release any locks taken out by
  15.238 +	 * reset_suspend(), but should not take any other action; in
  15.239 +	 * particular, it must not access the hardware, nor resume
  15.240 +	 * normal operations.  The hardware is effectively dead at
  15.241 +	 * this point, and our sole aim is to avoid deadlocking or
  15.242 +	 * crashing the host.
  15.243 +	 *
  15.244 +	 * The driverlink layer will still be locked when
  15.245 +	 * reset_resume() is called, so the client may not call
  15.246 +	 * driverlink functions.  In particular, if the reset failed,
  15.247 +	 * the client must not call efx_dl_unregister_callbacks() at
  15.248 +	 * this point; it should wait until remove() is called.
  15.249 +	 */
  15.250 +	void (*reset_resume) (struct efx_dl_device *efx_dev, int ok);
  15.251 +
  15.252 +/* private: */
  15.253 +	struct list_head node;
  15.254 +	struct list_head device_list;
  15.255 +};
  15.256 +
  15.257 +/**
  15.258 + * DOC: Efx driverlink device information
  15.259 + *
  15.260 + * Each &struct efx_dl_device makes certain hardware resources visible
  15.261 + * to driverlink clients, and they describe which resources are
  15.262 + * available by passing a linked list of &struct efx_dl_device_info
  15.263 + * into the probe() routine.
  15.264 + *
  15.265 + * The driverlink client's probe function can iterate through the linked list,
  15.266 + * and provided that it understands the resources that are exported, it can
  15.267 + * choose to make use of them through an external interface.
  15.268 + */
  15.269 +
  15.270 +/**
  15.271 + * enum efx_dl_device_info_type - Device information identifier.
  15.272 + *
  15.273 + * Each distinct hardware resource API will have a member in this
  15.274 + * enumeration.
  15.275 + *
  15.276 + * @EFX_DL_FALCON_RESOURCES: Information type is &struct efx_dl_falcon_resources
  15.277 + */
  15.278 +enum efx_dl_device_info_type {
  15.279 +	/** Falcon resources available for export */
  15.280 +	EFX_DL_FALCON_RESOURCES = 0,
  15.281 +};
  15.282 +
  15.283 +/**
  15.284 + * struct efx_dl_device_info - device information structure
  15.285 + * @next: Link to next structure, if any
  15.286 + * @type: Type code for this structure
  15.287 + *
  15.288 + * This structure is embedded in other structures provided by the
  15.289 + * driverlink device provider, and implements a linked list of
  15.290 + * resources pertinent to a driverlink client.
  15.291 + *
  15.292 + * Example: &struct efx_dl_falcon_resources
  15.293 + */
  15.294 +struct efx_dl_device_info {
  15.295 +	struct efx_dl_device_info *next;
  15.296 +	enum efx_dl_device_info_type type;
  15.297 +};
  15.298 +
  15.299 +/**
  15.300 + * enum efx_dl_falcon_resource_flags - Falcon resource information flags.
  15.301 + *
  15.302 + * Flags that describe hardware variations for the described Falcon based port.
  15.303 + *
  15.304 + * @EFX_DL_FALCON_DUAL_FUNC: Port is dual-function.
  15.305 + *	Certain silicon revisions have two pci functions, and require
  15.306 + *	certain hardware resources to be accessed via the secondary
  15.307 + *	function. See the discussion of @pci_dev in &struct efx_dl_device
  15.308 + *	below.
  15.309 + * @EFX_DL_FALCON_USE_MSI: Port is initialised to use MSI/MSI-X interrupts.
  15.310 + *	Falcon supports traditional legacy interrupts and MSI/MSI-X
  15.311 + *	interrupts. Since the sfc driver supports either, as a run
  15.312 + *	time configuration, driverlink drivers need to be aware of which
  15.313 + *	one to use for their interrupting resources.
  15.314 + */
  15.315 +enum efx_dl_falcon_resource_flags {
  15.316 +	EFX_DL_FALCON_DUAL_FUNC = 0x1,
  15.317 +	EFX_DL_FALCON_USE_MSI = 0x2,
  15.318 +};
  15.319 +
  15.320 +/**
  15.321 + * struct efx_dl_falcon_resources - Falcon resource information.
  15.322 + *
  15.323 + * This structure describes Falcon hardware resources available for
  15.324 + * use by a driverlink driver.
  15.325 + *
  15.326 + * @hdr: Resource linked list header
  15.327 + * @biu_lock: Register access lock.
  15.328 + *	Some Falcon revisions require register access for configuration
  15.329 + *	registers to be serialised between ports and PCI functions.
  15.330 + *	The sfc driver will provide the appropriate lock semantics for
  15.331 + *	the underlying hardware.
  15.332 + * @buffer_table_min: First available buffer table entry
  15.333 + * @buffer_table_max: Last available buffer table entry + 1
  15.334 + * @evq_timer_min: First available event queue with timer
  15.335 + * @evq_timer_max: Last available event queue with timer + 1
  15.336 + * @evq_int_min: First available event queue with interrupt
  15.337 + * @evq_int_max: Last available event queue with interrupt + 1
  15.338 + * @rxq_min: First available RX queue
  15.339 + * @rxq_max: Last available RX queue + 1
  15.340 + * @txq_min: First available TX queue
  15.341 + * @txq_max: Last available TX queue + 1
  15.342 + * @flags: Hardware variation flags
  15.343 + */
  15.344 +struct efx_dl_falcon_resources {
  15.345 +	struct efx_dl_device_info hdr;
  15.346 +	spinlock_t *biu_lock;
  15.347 +	unsigned buffer_table_min, buffer_table_max;
  15.348 +	unsigned evq_timer_min, evq_timer_max;
  15.349 +	unsigned evq_int_min, evq_int_max;
  15.350 +	unsigned rxq_min, rxq_max;
  15.351 +	unsigned txq_min, txq_max;
  15.352 +	enum efx_dl_falcon_resource_flags flags;
  15.353 +};
  15.354 +
  15.355 +/**
  15.356 + * struct efx_dl_device - An Efx driverlink device.
  15.357 + *
  15.358 + * @pci_dev: Underlying PCI device.
  15.359 + *	This is the PCI device used by the sfc driver.  It will
  15.360 + *	already have been enabled for bus-mastering DMA etc.
  15.361 + * @priv: Driver private data
  15.362 + *	Driverlink clients can use this to store a pointer to their
  15.363 + *	internal per-device data structure. Each (driver, device)
  15.364 + *	tuple has a separate &struct efx_dl_device, so clients can use
  15.365 + *	this @priv field independently.
  15.366 + * @driver: Efx driverlink driver for this device
  15.367 + */
  15.368 +struct efx_dl_device {
  15.369 +	struct pci_dev *pci_dev;
  15.370 +	void *priv;
  15.371 +	struct efx_dl_driver *driver;
  15.372 +};
  15.373 +
  15.374 +/**
  15.375 + * enum efx_veto - Packet veto request flag.
  15.376 + *
  15.377 + * This is the return type for the rx_packet() and tx_packet() methods
  15.378 + * in &struct efx_dl_callbacks.
  15.379 + *
  15.380 + * @EFX_ALLOW_PACKET: Packet may be transmitted/received
  15.381 + * @EFX_VETO_PACKET: Packet must not be transmitted/received
  15.382 + */
  15.383 +enum efx_veto {
  15.384 +	EFX_ALLOW_PACKET = 0,
  15.385 +	EFX_VETO_PACKET = 1,
  15.386 +};
  15.387 +
  15.388 +/**
  15.389 + * struct efx_dl_callbacks - Efx callbacks
  15.390 + *
  15.391 + * These methods can be hooked in to the sfc driver via
  15.392 + * efx_dl_register_callbacks().  They allow clients to intercept and/or
  15.393 + * modify the behaviour of the sfc driver at predetermined points.
  15.394 + *
  15.395 + * For efficiency, only one client can hook each callback.
  15.396 + *
  15.397 + * Since these callbacks are called on packet transmit and reception
  15.398 + * paths, clients should avoid acquiring locks or allocating memory.
  15.399 + *
  15.400 + * @tx_packet: Called when packet is about to be transmitted
  15.401 + * @rx_packet: Called when packet is received
  15.402 + * @link_change: Called when link status has changed
  15.403 + * @request_mtu: Called to request MTU change
  15.404 + * @mtu_changed: Called when MTU has been changed
  15.405 + * @event: Called when NIC event is not handled by the sfc driver
  15.406 + */
  15.407 +struct efx_dl_callbacks {
  15.408 +	/*
  15.409 +	 * tx_packet - Packet about to be transmitted.
  15.410 +	 * @efx_dev:		Efx driverlink device
  15.411 +	 * @skb:		Socket buffer containing the packet to be sent
  15.412 +	 *
  15.413 +	 * This method is called for every packet about to be
  15.414 +	 * transmitted.  It allows the client to snoop on traffic sent
  15.415 +	 * via the kernel queues.
  15.416 +	 *
  15.417 +	 * The method may return %EFX_VETO_PACKET in order to prevent
  15.418 +	 * the sfc driver from transmitting the packet.  The net
  15.419 +	 * driver will then discard the packet.  If the client wishes
  15.420 +	 * to retain a reference to the packet data after returning
  15.421 +	 * %EFX_VETO_PACKET, it must obtain its own copy of the
  15.422 +	 * packet (e.g. by calling skb_get(), or by copying out the
  15.423 +	 * packet data to an external buffer).
  15.424 +	 *
  15.425 +	 * This method must return quickly, since it will have a
  15.426 +	 * direct performance impact upon the sfc driver.  It will be
  15.427 +	 * called with interrupts disabled (and may be called in
  15.428 +	 * interrupt context), so may not sleep. Since the sfc driver
  15.429 +	 * may have multiple TX queues, running in parallel, please avoid
  15.430 +	 * the need for locking if it all possible.
  15.431 +	 */
  15.432 +#if defined(EFX_USE_FASTCALL)
  15.433 +	enum efx_veto fastcall (*tx_packet) (struct efx_dl_device *efx_dev,
  15.434 +					     struct sk_buff *skb);
  15.435 +#else
  15.436 +	enum efx_veto (*tx_packet) (struct efx_dl_device *efx_dev,
  15.437 +				    struct sk_buff *skb);
  15.438 +#endif
  15.439 +
  15.440 +	/*
  15.441 +	 * rx_packet - Packet received.
  15.442 +	 * @efx_dev:		Efx driverlink device
  15.443 +	 * @pkt_hdr:		Pointer to received packet
  15.444 +	 * @pkt_len:		Length of received packet
  15.445 +	 *
  15.446 +	 * This method is called for every received packet.  It allows
  15.447 +	 * the client to snoop on traffic received by the kernel
  15.448 +	 * queues.
  15.449 +	 *
  15.450 +	 * The method may return %EFX_VETO_PACKET in order to prevent
  15.451 +	 * the sfc driver from passing the packet to the kernel.  The net
  15.452 +	 * driver will then discard the packet.
  15.453 +	 *
  15.454 +	 * This method must return quickly, since it will have a
  15.455 +	 * direct performance impact upon the sfc driver.  It is
  15.456 +	 * called in tasklet context, so may not sleep.  Note that
  15.457 +	 * there are per-channel tasklets in the sfc driver, so
  15.458 +	 * rx_packet() may be called simultaneously on different CPUs
  15.459 +	 * and must lock appropriately.  The design of the sfc driver
  15.460 +	 * allows for lockless operation between receive channels, so
  15.461 +	 * please avoid the need for locking if at all possible.
  15.462 +	 */
  15.463 +#if defined(EFX_USE_FASTCALL)
  15.464 +	enum efx_veto fastcall (*rx_packet) (struct efx_dl_device *efx_dev,
  15.465 +					     const char *pkt_hdr, int pkt_len);
  15.466 +#else
  15.467 +	enum efx_veto (*rx_packet) (struct efx_dl_device *efx_dev,
  15.468 +				    const char *pkt_hdr, int pkt_len);
  15.469 +#endif
  15.470 +
  15.471 +	/*
  15.472 +	 * link_change - Link status change.
  15.473 +	 * @efx_dev:		Efx driverlink device
  15.474 +	 * @link_up:		Link up indicator
  15.475 +	 *
  15.476 +	 * This method is called to inform the driverlink client
  15.477 +	 * whenever the PHY link status changes.  By the time this
  15.478 +	 * function is called, the MAC has already been reconfigured
  15.479 +	 * with the new autonegotiation settings from the PHY.
  15.480 +	 *
  15.481 +	 * This method is called from tasklet context and may not
  15.482 +	 * sleep.
  15.483 +	 */
  15.484 +	void (*link_change) (struct efx_dl_device *efx_dev, int link_up);
  15.485 +
  15.486 +	/*
  15.487 +	 * request_mtu: Request MTU change.
  15.488 +	 * @efx_dev:		Efx driverlink device
  15.489 +	 * @new_mtu:		Requested new MTU
  15.490 +	 *
  15.491 +	 * This method is called whenever the user requests an MTU
  15.492 +	 * change on an interface.  The client may return an error, in
  15.493 +	 * which case the MTU change request will be denied.  If the
  15.494 +	 * client returns success, the MAC will be reconfigured with a
  15.495 +	 * new maxmimum frame length equal to
  15.496 +	 * EFX_MAX_FRAME_LEN(new_mtu).  The client will be notified
  15.497 +	 * via the mtu_changed() method once the MAC has been
  15.498 +	 * reconfigured.
  15.499 +	 *
  15.500 +	 * The current MTU for the port can be obtained via
  15.501 +	 * efx_dl_get_netdev(efx_dl_device)->mtu.
  15.502 +	 *
  15.503 +	 * The sfc driver guarantees that no other callback functions
  15.504 +	 * are in progress when this method is called.  This function
  15.505 +	 * is called in process context and may sleep.
  15.506 +	 *
  15.507 +	 * Return a negative error code or 0 on success.
  15.508 +	 */
  15.509 +	int (*request_mtu) (struct efx_dl_device *efx_dev, int new_mtu);
  15.510 +
  15.511 +	/*
  15.512 +	 * mtu_changed - MTU has been changed.
  15.513 +	 * @efx_dev:		Efx driverlink device
  15.514 +	 * @mtu:		The new MTU
  15.515 +	 *
  15.516 +	 * This method is called once the MAC has been reconfigured
  15.517 +	 * with a new MTU.  There will have been a preceding call to
  15.518 +	 * request_mtu().
  15.519 +	 *
  15.520 +	 * The sfc driver guarantees that no other callback functions
  15.521 +	 * are in progress when this method is called.  This function
  15.522 +	 * is called in process context and may sleep.
  15.523 +	 */
  15.524 +	void (*mtu_changed) (struct efx_dl_device *efx_dev, int mtu);
  15.525 +
  15.526 +	/*
  15.527 +	 * event - Event callback.
  15.528 +	 * @efx_dev:		Efx driverlink device
  15.529 +	 * @p_event:		Pointer to event
  15.530 +	 *
  15.531 +	 * This method is called for each event that is not handled by the
  15.532 +	 * sfc driver.
  15.533 +	 */
  15.534 +	void (*event) (struct efx_dl_device *efx_dev, void *p_event);
  15.535 +};
  15.536 +
  15.537 +/* Include API version number in symbol used for efx_dl_register_driver */
  15.538 +#define efx_dl_stringify_1(x, y) x ## y
  15.539 +#define efx_dl_stringify_2(x, y) efx_dl_stringify_1(x, y)
  15.540 +#define efx_dl_register_driver					\
  15.541 +	efx_dl_stringify_2(efx_dl_register_driver_api_ver_,	\
  15.542 +			   EFX_DRIVERLINK_API_VERSION)
  15.543 +
  15.544 +extern int efx_dl_register_driver(struct efx_dl_driver *driver);
  15.545 +
  15.546 +extern void efx_dl_unregister_driver(struct efx_dl_driver *driver);
  15.547 +
  15.548 +extern int efx_dl_register_callbacks(struct efx_dl_device *efx_dev,
  15.549 +				     struct efx_dl_callbacks *callbacks);
  15.550 +
  15.551 +extern void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev,
  15.552 +					struct efx_dl_callbacks *callbacks);
  15.553 +
  15.554 +extern void efx_dl_schedule_reset(struct efx_dl_device *efx_dev);
  15.555 +
  15.556 +/**
  15.557 + * efx_dl_for_each_device_info_matching - iterate an efx_dl_device_info list
  15.558 + * @_dev_info: Pointer to first &struct efx_dl_device_info
  15.559 + * @_type: Type code to look for
  15.560 + * @_info_type: Structure type corresponding to type code
  15.561 + * @_field: Name of &struct efx_dl_device_info field in the type
  15.562 + * @_p: Iterator variable
  15.563 + *
  15.564 + * Example:
  15.565 + *
  15.566 + * static int driver_dl_probe(... const struct efx_dl_device_info *dev_info ...)
  15.567 + * {
  15.568 + *        struct efx_dl_falcon_resources *res;
  15.569 + *
  15.570 + *        efx_dl_for_each_device_info_matching(dev_info,EFX_DL_FALCON_RESOURCES,
  15.571 + *                                             struct efx_dl_falcon_resources,
  15.572 + *                                             hdr, res) {
  15.573 + *                if (res->flags & EFX_DL_FALCON_DUAL_FUNC) {
  15.574 + *                          .....
  15.575 + *                }
  15.576 + *        }
  15.577 + * }
  15.578 + */
  15.579 +#define efx_dl_for_each_device_info_matching(_dev_info, _type,		\
  15.580 +					     _info_type, _field, _p)	\
  15.581 +	for ((_p) = container_of((_dev_info), _info_type, _field);	\
  15.582 +	     (_p) != NULL;						\
  15.583 +	     (_p) = container_of((_p)->_field.next, _info_type, _field))\
  15.584 +		if ((_p)->_field.type != _type)				\
  15.585 +			continue;					\
  15.586 +		else
  15.587 +
  15.588 +/**
  15.589 + * efx_dl_search_device_info - search an efx_dl_device_info list
  15.590 + * @_dev_info: Pointer to first &struct efx_dl_device_info
  15.591 + * @_type: Type code to look for
  15.592 + * @_info_type: Structure type corresponding to type code
  15.593 + * @_field: Name of &struct efx_dl_device_info member in this type
  15.594 + * @_p: Result variable
  15.595 + *
  15.596 + * Example:
  15.597 + *
  15.598 + * static int driver_dl_probe(... const struct efx_dl_device_info *dev_info ...)
  15.599 + * {
  15.600 + *        struct efx_dl_falcon_resources *res;
  15.601 + *
  15.602 + *        efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES,
  15.603 + *                                  struct efx_dl_falcon_resources, hdr, res);
  15.604 + *        if (res != NULL) {
  15.605 + *                 ....
  15.606 + *        }
  15.607 + * }
  15.608 + */
  15.609 +#define efx_dl_search_device_info(_dev_info, _type, _info_type,		\
  15.610 +				  _field, _p)				\
  15.611 +	efx_dl_for_each_device_info_matching((_dev_info), (_type),	\
  15.612 +					     _info_type, _field, (_p))	\
  15.613 +		break;
  15.614 +
  15.615 +#endif /* EFX_DRIVERLINK_API_H */
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/drivers/net/sfc/efx.c	Mon Feb 18 10:29:07 2008 +0000
    16.3 @@ -0,0 +1,2783 @@
    16.4 +/****************************************************************************
    16.5 + * Driver for Solarflare network controllers
    16.6 + *           (including support for SFE4001 10GBT NIC)
    16.7 + *
    16.8 + * Copyright 2005-2006: Fen Systems Ltd.
    16.9 + * Copyright 2005-2008: Solarflare Communications Inc,
   16.10 + *                      9501 Jeronimo Road, Suite 250,
   16.11 + *                      Irvine, CA 92618, USA
   16.12 + *
   16.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   16.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   16.15 + *
   16.16 + * This program is free software; you can redistribute it and/or modify it
   16.17 + * under the terms of the GNU General Public License version 2 as published
   16.18 + * by the Free Software Foundation, incorporated herein by reference.
   16.19 + *
   16.20 + * This program is distributed in the hope that it will be useful,
   16.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   16.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   16.23 + * GNU General Public License for more details.
   16.24 + *
   16.25 + * You should have received a copy of the GNU General Public License
   16.26 + * along with this program; if not, write to the Free Software
   16.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   16.28 + ****************************************************************************
   16.29 + */
   16.30 +
   16.31 +#include <linux/module.h>
   16.32 +#include <linux/pci.h>
   16.33 +#include <linux/netdevice.h>
   16.34 +#include <linux/etherdevice.h>
   16.35 +#include <linux/delay.h>
   16.36 +#include <linux/notifier.h>
   16.37 +#include <linux/ip.h>
   16.38 +#include <linux/tcp.h>
   16.39 +#include <linux/in.h>
   16.40 +#include <linux/crc32.h>
   16.41 +#include <linux/ethtool.h>
   16.42 +#include <asm/uaccess.h>
   16.43 +#include "net_driver.h"
   16.44 +#include "gmii.h"
   16.45 +#include "driverlink.h"
   16.46 +#include "selftest.h"
   16.47 +#include "debugfs.h"
   16.48 +#include "ethtool.h"
   16.49 +#include "tx.h"
   16.50 +#include "rx.h"
   16.51 +#include "efx.h"
   16.52 +#include "mdio_10g.h"
   16.53 +#include "falcon.h"
   16.54 +#include "workarounds.h"
   16.55 +
   16.56 +/**************************************************************************
   16.57 + *
   16.58 + * Type name strings
   16.59 + *
   16.60 + **************************************************************************
   16.61 + */
   16.62 +
   16.63 +/* Loopback mode names (see LOOPBACK_MODE()) */
   16.64 +const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
   16.65 +const char *efx_loopback_mode_names[] = {
   16.66 +	[LOOPBACK_NONE]	   = "NONE",
   16.67 +	[LOOPBACK_MAC]	   = "MAC",
   16.68 +	[LOOPBACK_XGMII]   = "XGMII",
   16.69 +	[LOOPBACK_XGXS]	   = "XGXS",
   16.70 +	[LOOPBACK_XAUI]    = "XAUI",
   16.71 +	[LOOPBACK_PHY]	   = "PHY",
   16.72 +	[LOOPBACK_PHYXS]   = "PHY(XS)",
   16.73 +	[LOOPBACK_PCS]     = "PHY(PCS)",
   16.74 +	[LOOPBACK_PMAPMD]  = "PHY(PMAPMD)",
   16.75 +	[LOOPBACK_NETWORK] = "NETWORK",
   16.76 +};
   16.77 +
   16.78 +/* Interrupt mode names (see INT_MODE())) */
   16.79 +const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
   16.80 +const char *efx_interrupt_mode_names[] = {
   16.81 +	[EFX_INT_MODE_MSIX]   = "MSI-X",
   16.82 +	[EFX_INT_MODE_MSI]    = "MSI",
   16.83 +	[EFX_INT_MODE_LEGACY] = "legacy",
   16.84 +};
   16.85 +
   16.86 +/* PHY type names (see PHY_TYPE())) */
   16.87 +const unsigned int efx_phy_type_max = PHY_TYPE_MAX;
   16.88 +const char *efx_phy_type_names[] = {
   16.89 +	[PHY_TYPE_NONE]        = "none",
   16.90 +	[PHY_TYPE_CX4_RTMR]    = "Mysticom CX4",
   16.91 +	[PHY_TYPE_1G_ALASKA]   = "1G Alaska",
   16.92 +	[PHY_TYPE_10XPRESS]    = "SFC 10Xpress",
   16.93 +	[PHY_TYPE_XFP]         = "Quake XFP",
   16.94 +	[PHY_TYPE_PM8358]      = "PM8358 XAUI",
   16.95 +};
   16.96 +
   16.97 +const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
   16.98 +const char *efx_reset_type_names[] = {
   16.99 +	[RESET_TYPE_INVISIBLE]    = "INVISIBLE",
  16.100 +	[RESET_TYPE_ALL]          = "ALL",
  16.101 +	[RESET_TYPE_WORLD]        = "WORLD",
  16.102 +	[RESET_TYPE_DISABLE]      = "DISABLE",
  16.103 +	[RESET_TYPE_MONITOR]      = "MONITOR",
  16.104 +	[RESET_TYPE_INT_ERROR]    = "INT_ERROR",
  16.105 +	[RESET_TYPE_RX_RECOVERY]  = "RX_RECOVERY",
  16.106 +};
  16.107 +
  16.108 +const unsigned int efx_nic_state_max = STATE_MAX;
  16.109 +const char *efx_nic_state_names[] = {
  16.110 +	[STATE_INIT]          = "INIT",
  16.111 +	[STATE_RUNNING]       = "RUNNING",
  16.112 +	[STATE_FINI]          = "FINI",
  16.113 +	[STATE_RESETTING]     = "RESETTING",
  16.114 +	[STATE_DISABLED]      = "DISABLED",
  16.115 +};
  16.116 +
  16.117 +#define EFX_MAX_MTU (9 * 1024)
  16.118 +
  16.119 +
  16.120 +/**************************************************************************
  16.121 + *
  16.122 + * Configurable values
  16.123 + *
  16.124 + *************************************************************************/
  16.125 +
  16.126 +/*
  16.127 + * Use separate channels for TX and RX events
  16.128 + *
  16.129 + * Set this to 1 to use separate channels for TX and RX. It allows us to
  16.130 + * apply a higher level of interrupt moderation to TX events.
  16.131 + *
  16.132 + * This is forced to 0 for MSI interrupt mode as the interrupt vector
  16.133 + * is not written
  16.134 + */
  16.135 +static unsigned int separate_tx_and_rx_channels = 1;
  16.136 +
  16.137 +/* This is the weight assigned to each of the (per-channel) virtual
  16.138 + * NAPI devices.
  16.139 + */
  16.140 +static int napi_weight = 64;
  16.141 +
  16.142 +/* This is the time (in jiffies) between invocations of the hardware
  16.143 + * monitor, which checks for known hardware bugs and resets the
  16.144 + * hardware and driver as necessary.
  16.145 + */
  16.146 +unsigned int efx_monitor_interval = 1 * HZ;
  16.147 +
  16.148 +/* This controls whether or not the hardware monitor will trigger a
  16.149 + * reset when it detects an error condition.
  16.150 + */
  16.151 +static unsigned int monitor_reset = 1;
  16.152 +
  16.153 +/* This controls whether or not the driver will initialise devices
  16.154 + * with invalid MAC addresses stored in the EEPROM or flash.  If true,
  16.155 + * such devices will be initialised with a random locally-generated
  16.156 + * MAC address.  This allows for loading the efx_mtd driver to
  16.157 + * reprogram the flash, even if the flash contents (including the MAC
  16.158 + * address) have previously been erased.
  16.159 + */
  16.160 +static unsigned int allow_bad_hwaddr;
  16.161 +
  16.162 +/* Initial interrupt moderation settings.  They can be modified after
  16.163 + * module load with ethtool.
  16.164 + *
  16.165 + * The default for RX should strike a balance between increasing the
  16.166 + * round-trip latency and reducing overhead.
  16.167 + */
  16.168 +static unsigned int rx_irq_mod_usec = 60;
  16.169 +
  16.170 +/* Initial interrupt moderation settings.  They can be modified after
  16.171 + * module load with ethtool.
  16.172 + *
  16.173 + * This default is chosen to ensure that a 10G link does not go idle
  16.174 + * while a TX queue is stopped after it has become full.  A queue is
  16.175 + * restarted when it drops below half full.  The time this takes (assuming
  16.176 + * worst case 3 descriptors per packet and 1024 descriptors) is
  16.177 + *   512 / 3 * 1.2 = 205 usec.
  16.178 + */
  16.179 +static unsigned int tx_irq_mod_usec = 150;
  16.180 +
  16.181 +/* Ignore online self-test failures at load
  16.182 + *
  16.183 + * If set to 1, then the driver will not fail to load
  16.184 + * if the online self-test fails. Useful only during testing
  16.185 + */
  16.186 +static unsigned int allow_load_on_failure;
  16.187 +
  16.188 +/* Set to 1 to enable the use of Message-Signalled Interrupts (MSI).
  16.189 + * MSI will not work on some motherboards due to limitations of the
  16.190 + * chipset, so the default is off.
  16.191 + *
  16.192 + * This is the highest capability interrupt mode to use
  16.193 + * 0 => MSI-X
  16.194 + * 1 => MSI
  16.195 + * 2 => legacy
  16.196 + */
  16.197 +static unsigned int interrupt_mode;
  16.198 +
  16.199 +/* If set to 1, then the driver will perform an offline self test
  16.200 + * when each interface first comes up. This will appear like the
  16.201 + * interface bounces up and down
  16.202 + */
  16.203 +static unsigned int onload_offline_selftest = 1;
  16.204 +
  16.205 +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  16.206 + * i.e. the number of CPUs among which we may distribute simultaneous
  16.207 + * interrupt handling.
  16.208 + *
  16.209 + * Cards without MSI-X will only target one CPU
  16.210 + *
  16.211 + * Default (0) means to use all CPUs in the system.  This parameter
  16.212 + * can be set using "rss_cpus=xxx" when loading the module.
  16.213 + */
  16.214 +static unsigned int rss_cpus;
  16.215 +module_param(rss_cpus, uint, 0444);
  16.216 +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  16.217 +
  16.218 +/**************************************************************************
  16.219 + *
  16.220 + * Utility functions and prototypes
  16.221 + *
  16.222 + *************************************************************************/
  16.223 +static void efx_remove_channel(struct efx_channel *channel);
  16.224 +static void efx_remove_port(struct efx_nic *efx);
  16.225 +static void efx_fini_napi(struct efx_nic *efx);
  16.226 +static void efx_fini_channels(struct efx_nic *efx);
  16.227 +
  16.228 +/**************************************************************************
  16.229 + *
  16.230 + * Event queue processing
  16.231 + *
  16.232 + *************************************************************************/
  16.233 +
  16.234 +/* Process channel's event queue
  16.235 + *
  16.236 + * This function is responsible for processing the event queue of a
  16.237 + * single channel.  The caller must guarantee that this function will
  16.238 + * never be concurrently called more than once on the same channel,
  16.239 + * though different channels may be being processed concurrently.
  16.240 + */
  16.241 +static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
  16.242 +{
  16.243 +	int rxdmaqs;
  16.244 +	struct efx_rx_queue *rx_queue;
  16.245 +
  16.246 +	if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
  16.247 +		     !channel->enabled))
  16.248 +		return rx_quota;
  16.249 +
  16.250 +	rxdmaqs = falcon_process_eventq(channel, &rx_quota);
  16.251 +
  16.252 +	/* Deliver last RX packet. */
  16.253 +	if (channel->rx_pkt) {
  16.254 +		__efx_rx_packet(channel, channel->rx_pkt,
  16.255 +				channel->rx_pkt_csummed);
  16.256 +		channel->rx_pkt = NULL;
  16.257 +	}
  16.258 +
  16.259 +	efx_rx_strategy(channel);
  16.260 +
  16.261 +	/* Refill descriptor rings as necessary */
  16.262 +	rx_queue = &channel->efx->rx_queue[0];
  16.263 +	while (rxdmaqs) {
  16.264 +		if (rxdmaqs & 0x01)
  16.265 +			efx_fast_push_rx_descriptors(rx_queue);
  16.266 +		rx_queue++;
  16.267 +		rxdmaqs >>= 1;
  16.268 +	}
  16.269 +
  16.270 +	return rx_quota;
  16.271 +}
  16.272 +
  16.273 +/* Mark channel as finished processing
  16.274 + *
  16.275 + * Note that since we will not receive further interrupts for this
  16.276 + * channel before we finish processing and call the eventq_read_ack()
  16.277 + * method, there is no need to use the interrupt hold-off timers.
  16.278 + */
  16.279 +static inline void efx_channel_processed(struct efx_channel *channel)
  16.280 +{
  16.281 +	/* Write to EVQ_RPTR_REG.  If a new event arrived in a race
  16.282 +	 * with finishing processing, a new interrupt will be raised.
  16.283 +	 */
  16.284 +	channel->work_pending = 0;
  16.285 +	smp_wmb(); /* Ensure channel updated before any new interrupt. */
  16.286 +	falcon_eventq_read_ack(channel);
  16.287 +}
  16.288 +
  16.289 +/* NAPI poll handler
  16.290 + *
  16.291 + * NAPI guarantees serialisation of polls of the same device, which
  16.292 + * provides the guarantee required by efx_process_channel().
  16.293 + */
  16.294 +#if !defined(EFX_HAVE_OLD_NAPI)
  16.295 +static int efx_poll(struct napi_struct *napi, int budget)
  16.296 +{
  16.297 +	struct efx_channel *channel =
  16.298 +		container_of(napi, struct efx_channel, napi_str);
  16.299 +	struct net_device *napi_dev = channel->napi_dev;
  16.300 +#else
  16.301 +static int efx_poll(struct net_device *napi, int *budget_ret)
  16.302 +{
  16.303 +	struct net_device *napi_dev = napi;
  16.304 +	struct efx_channel *channel = napi_dev->priv;
  16.305 +	int budget = min(napi_dev->quota, *budget_ret);
  16.306 +#endif
  16.307 +	int unused;
  16.308 +	int rx_packets;
  16.309 +
  16.310 +	EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
  16.311 +		  channel->channel, raw_smp_processor_id());
  16.312 +
  16.313 +	unused = efx_process_channel(channel, budget);
  16.314 +	rx_packets = (budget - unused);
  16.315 +#if defined(EFX_HAVE_OLD_NAPI)
  16.316 +	napi_dev->quota -= rx_packets;
  16.317 +	*budget_ret -= rx_packets;
  16.318 +#endif
  16.319 +
  16.320 +	if (rx_packets < budget) {
  16.321 +		/* There is no race here; although napi_disable() will
  16.322 +		 * only wait for netif_rx_complete(), this isn't a problem
  16.323 +		 * since efx_channel_processed() will have no effect if
  16.324 +		 * interrupts have already been disabled.
  16.325 +		 */
  16.326 +		netif_rx_complete(napi_dev, napi);
  16.327 +		efx_channel_processed(channel);
  16.328 +	}
  16.329 +
  16.330 +#if !defined(EFX_HAVE_OLD_NAPI)
  16.331 +	return rx_packets;
  16.332 +#else
  16.333 +	return (rx_packets >= budget);
  16.334 +#endif
  16.335 +}
  16.336 +
  16.337 +/* Process the eventq of the specified channel immediately on this CPU
  16.338 + *
  16.339 + * Disable hardware generated interrupts, wait for any existing
  16.340 + * processing to finish, then directly poll (and ack ) the eventq.
  16.341 + * Finally reenable NAPI and interrupts.
  16.342 + *
  16.343 + * Since we are touching interrupts the caller should hold the suspend lock
  16.344 + */
  16.345 +void efx_process_channel_now(struct efx_channel *channel)
  16.346 +{
  16.347 +	struct efx_nic *efx = channel->efx;
  16.348 +
  16.349 +	BUG_ON(!channel->used_flags);
  16.350 +	BUG_ON(!channel->enabled);
  16.351 +
  16.352 +	/* Disable interrupts and wait for ISRs to complete */
  16.353 +	falcon_disable_interrupts(efx);
  16.354 +	if (efx->legacy_irq)
  16.355 +		synchronize_irq(efx->legacy_irq);
  16.356 +	if (channel->has_interrupt && channel->irq)
  16.357 +		synchronize_irq(channel->irq);
  16.358 +
  16.359 +	/* Wait for any NAPI processing to complete */
  16.360 +	napi_disable(&channel->napi_str);
  16.361 +
  16.362 +	/* Poll the channel */
  16.363 +	(void) efx_process_channel(channel, efx->type->evq_size);
  16.364 +
  16.365 +	/* Ack the eventq. This may cause an interrupt to be generated
  16.366 +	 * when they are reenabled */
  16.367 +	efx_channel_processed(channel);
  16.368 +
  16.369 +	/* Reenable NAPI polling */
  16.370 +	napi_enable(&channel->napi_str);
  16.371 +
  16.372 +	/* Reenable interrupts */
  16.373 +	falcon_enable_interrupts(efx);
  16.374 +}
  16.375 +
  16.376 +/* Create event queue
  16.377 + * Event queue memory allocations are done only once.  If the channel
  16.378 + * is reset, the memory buffer will be reused; this guards against
  16.379 + * errors during channel reset and also simplifies interrupt handling.
  16.380 + */
  16.381 +static int efx_probe_eventq(struct efx_channel *channel)
  16.382 +{
  16.383 +	EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
  16.384 +
  16.385 +	return falcon_probe_eventq(channel);
  16.386 +}
  16.387 +
  16.388 +/* Prepare channel's event queue */
  16.389 +static int efx_init_eventq(struct efx_channel *channel)
  16.390 +{
  16.391 +	EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
  16.392 +
  16.393 +	ASSERT_RTNL();
  16.394 +
  16.395 +	/* Initialise fields */
  16.396 +	channel->eventq_read_ptr = 0;
  16.397 +
  16.398 +	return falcon_init_eventq(channel);
  16.399 +}
  16.400 +
  16.401 +static void efx_fini_eventq(struct efx_channel *channel)
  16.402 +{
  16.403 +	EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
  16.404 +
  16.405 +	ASSERT_RTNL();
  16.406 +
  16.407 +	falcon_fini_eventq(channel);
  16.408 +}
  16.409 +
  16.410 +static void efx_remove_eventq(struct efx_channel *channel)
  16.411 +{
  16.412 +	EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
  16.413 +
  16.414 +	falcon_remove_eventq(channel);
  16.415 +}
  16.416 +
  16.417 +/**************************************************************************
  16.418 + *
  16.419 + * Channel handling
  16.420 + *
  16.421 + *************************************************************************/
  16.422 +
  16.423 +/* Setup per-NIC RX buffer parameters.
  16.424 + * Calculate the rx buffer allocation parameters required to support
  16.425 + * the current MTU, including padding for header alignment and overruns.
  16.426 + */
  16.427 +static void efx_calc_rx_buffer_params(struct efx_nic *efx)
  16.428 +{
  16.429 +	unsigned int order, len;
  16.430 +
  16.431 +	len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
  16.432 +	       EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  16.433 +	       efx->type->rx_buffer_padding);
  16.434 +
  16.435 +	/* Page-based allocation page-order */
  16.436 +	for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
  16.437 +		;
  16.438 +
  16.439 +	efx->rx_buffer_len = len;
  16.440 +	efx->rx_buffer_order = order;
  16.441 +}
  16.442 +
  16.443 +static int efx_probe_channel(struct efx_channel *channel)
  16.444 +{
  16.445 +	struct efx_tx_queue *tx_queue;
  16.446 +	struct efx_rx_queue *rx_queue;
  16.447 +	int rc;
  16.448 +
  16.449 +	EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
  16.450 +
  16.451 +	rc = efx_probe_eventq(channel);
  16.452 +	if (rc)
  16.453 +		goto fail1;
  16.454 +
  16.455 +	efx_for_each_channel_tx_queue(tx_queue, channel) {
  16.456 +		rc = efx_probe_tx_queue(tx_queue);
  16.457 +		if (rc)
  16.458 +			goto fail2;
  16.459 +	}
  16.460 +
  16.461 +	efx_for_each_channel_rx_queue(rx_queue, channel) {
  16.462 +		rc = efx_probe_rx_queue(rx_queue);
  16.463 +		if (rc)
  16.464 +			goto fail3;
  16.465 +	}
  16.466 +
  16.467 +	channel->n_rx_frm_trunc = 0;
  16.468 +
  16.469 +	return 0;
  16.470 +
  16.471 + fail3:
  16.472 +	efx_for_each_channel_rx_queue(rx_queue, channel)
  16.473 +		efx_remove_rx_queue(rx_queue);
  16.474 + fail2:
  16.475 +	efx_for_each_channel_tx_queue(tx_queue, channel)
  16.476 +		efx_remove_tx_queue(tx_queue);
  16.477 + fail1:
  16.478 +	return rc;
  16.479 +}
  16.480 +
  16.481 +
  16.482 +/* Channels are shutdown and reinitialised whilst the NIC is running
  16.483 + * to propagate configuration changes (mtu, checksum offload), or
  16.484 + * to clear hardware error conditions
  16.485 + */
  16.486 +static int efx_init_channels(struct efx_nic *efx)
  16.487 +{
  16.488 +	struct efx_tx_queue *tx_queue;
  16.489 +	struct efx_rx_queue *rx_queue;
  16.490 +	struct efx_channel *channel;
  16.491 +	int rc = 0;
  16.492 +
  16.493 +	/* Recalculate the rx buffer parameters */
  16.494 +	efx_calc_rx_buffer_params(efx);
  16.495 +
  16.496 +	/* Initialise the channels */
  16.497 +	efx_for_each_channel(channel, efx) {
  16.498 +		EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
  16.499 +
  16.500 +		rc = efx_init_eventq(channel);
  16.501 +		if (rc)
  16.502 +			goto err;
  16.503 +
  16.504 +		efx_for_each_channel_tx_queue(tx_queue, channel) {
  16.505 +			rc = efx_init_tx_queue(tx_queue);
  16.506 +			if (rc)
  16.507 +				goto err;
  16.508 +		}
  16.509 +
  16.510 +		/* The rx buffer allocation strategy is MTU dependent */
  16.511 +		efx_rx_strategy(channel);
  16.512 +
  16.513 +		efx_for_each_channel_rx_queue(rx_queue, channel) {
  16.514 +			rc = efx_init_rx_queue(rx_queue);
  16.515 +			if (rc)
  16.516 +				goto err;
  16.517 +		}
  16.518 +
  16.519 +		WARN_ON(channel->rx_pkt != NULL);
  16.520 +		efx_rx_strategy(channel);
  16.521 +	}
  16.522 +
  16.523 +	return 0;
  16.524 +
  16.525 + err:
  16.526 +	EFX_ERR(efx, "failed to initialise channel %d\n",
  16.527 +		channel ? channel->channel : -1);
  16.528 +	efx_fini_channels(efx);
  16.529 +	return rc;
  16.530 +}
  16.531 +
  16.532 +/* This enables event queue processing and packet transmission.
  16.533 + *
  16.534 + * Note that this function is not allowed to fail, since that would
  16.535 + * introduce too much complexity into the suspend/resume path.
  16.536 + */
  16.537 +static void efx_start_channel(struct efx_channel *channel)
  16.538 +{
  16.539 +	struct efx_rx_queue *rx_queue;
  16.540 +
  16.541 +	EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
  16.542 +
  16.543 +	if (!(channel->efx->net_dev->flags & IFF_UP))
  16.544 +		netif_napi_add(channel->napi_dev, &channel->napi_str,
  16.545 +			       efx_poll, napi_weight);
  16.546 +
  16.547 +	/* Mark channel as enabled */
  16.548 +	channel->work_pending = 0;
  16.549 +	channel->enabled = 1;
  16.550 +	smp_wmb(); /* ensure channel updated before first interrupt */
  16.551 +
  16.552 +	/* Enable NAPI poll handler */
  16.553 +	napi_enable(&channel->napi_str);
  16.554 +
  16.555 +	/* Load up RX descriptors */
  16.556 +	efx_for_each_channel_rx_queue(rx_queue, channel)
  16.557 +		efx_fast_push_rx_descriptors(rx_queue);
  16.558 +}
  16.559 +
  16.560 +/* This disables event queue processing and packet transmission.
  16.561 + * This function does not guarantee that all queue processing
  16.562 + * (e.g. RX refill) is complete.
  16.563 + */
  16.564 +static void efx_stop_channel(struct efx_channel *channel)
  16.565 +{
  16.566 +	struct efx_rx_queue *rx_queue;
  16.567 +
  16.568 +	if (!channel->enabled)
  16.569 +		return;
  16.570 +
  16.571 +	EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
  16.572 +
  16.573 +	/* Mark channel as disabled */
  16.574 +	channel->enabled = 0;
  16.575 +
  16.576 +	/* Wait for any NAPI processing to complete */
  16.577 +	napi_disable(&channel->napi_str);
  16.578 +
  16.579 +	/* Ensure that any worker threads have exited or will be
  16.580 +	 * no-ops.
  16.581 +	 */
  16.582 +	efx_for_each_channel_rx_queue(rx_queue, channel) {
  16.583 +		spin_lock_bh(&rx_queue->add_lock);
  16.584 +		spin_unlock_bh(&rx_queue->add_lock);
  16.585 +	}
  16.586 +}
  16.587 +
  16.588 +static void efx_fini_channels(struct efx_nic *efx)
  16.589 +{
  16.590 +	struct efx_channel *channel;
  16.591 +	struct efx_tx_queue *tx_queue;
  16.592 +	struct efx_rx_queue *rx_queue;
  16.593 +
  16.594 +	ASSERT_RTNL();
  16.595 +
  16.596 +	efx_for_each_channel(channel, efx) {
  16.597 +		EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
  16.598 +
  16.599 +		efx_for_each_channel_rx_queue(rx_queue, channel)
  16.600 +			efx_fini_rx_queue(rx_queue);
  16.601 +		efx_for_each_channel_tx_queue(tx_queue, channel)
  16.602 +			efx_fini_tx_queue(tx_queue);
  16.603 +	}
  16.604 +
  16.605 +	/* Do the event queues last so that we can handle flush events
  16.606 +	 * for all DMA queues. */
  16.607 +	efx_for_each_channel(channel, efx) {
  16.608 +		EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
  16.609 +
  16.610 +		efx_fini_eventq(channel);
  16.611 +	}
  16.612 +}
  16.613 +
  16.614 +static void efx_remove_channel(struct efx_channel *channel)
  16.615 +{
  16.616 +	struct efx_tx_queue *tx_queue;
  16.617 +	struct efx_rx_queue *rx_queue;
  16.618 +
  16.619 +	EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
  16.620 +
  16.621 +	efx_for_each_channel_rx_queue(rx_queue, channel)
  16.622 +		efx_remove_rx_queue(rx_queue);
  16.623 +	efx_for_each_channel_tx_queue(tx_queue, channel)
  16.624 +		efx_remove_tx_queue(tx_queue);
  16.625 +	efx_remove_eventq(channel);
  16.626 +
  16.627 +	channel->used_flags = 0;
  16.628 +}
  16.629 +
  16.630 +/**************************************************************************
  16.631 + *
  16.632 + * Port handling
  16.633 + *
  16.634 + **************************************************************************/
  16.635 +
  16.636 +/* This ensures that the kernel is kept informed (via
  16.637 + * netif_carrier_on/off) of the link status, and also maintains the
  16.638 + * link status's stop on the port's TX queue.
  16.639 + */
  16.640 +static void efx_link_status_changed(struct efx_nic *efx)
  16.641 +{
  16.642 +	unsigned long flags __attribute__ ((unused));
  16.643 +	int carrier_ok;
  16.644 +
  16.645 +	/* Ensure no link status notifications get sent to the OS after the net
  16.646 +	 * device has been unregistered. */
  16.647 +	if (!efx->net_dev_registered)
  16.648 +		return;
  16.649 +
  16.650 +	carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
  16.651 +	if (efx->link_up != carrier_ok) {
  16.652 +		efx->n_link_state_changes++;
  16.653 +
  16.654 +		if (efx->link_up)
  16.655 +			netif_carrier_on(efx->net_dev);
  16.656 +		else
  16.657 +			netif_carrier_off(efx->net_dev);
  16.658 +	}
  16.659 +
  16.660 +	/* Inform driverlink client */
  16.661 +	EFX_DL_CALLBACK(efx, link_change, efx->link_up);
  16.662 +
  16.663 +	/* Status message for kernel log */
  16.664 +	if (efx->link_up) {
  16.665 +		struct mii_if_info *gmii = &efx->mii;
  16.666 +		unsigned adv, lpa;
  16.667 +		/* NONE here means direct XAUI from the controller, with no
  16.668 +		 * MDIO-attached device we can query. */
  16.669 +		if (efx->phy_type != PHY_TYPE_NONE) {
  16.670 +			adv = gmii_advertised(gmii);
  16.671 +			lpa = gmii_lpa(gmii);
  16.672 +		} else {
  16.673 +			lpa = GM_LPA_10000 | LPA_DUPLEX;
  16.674 +			adv = lpa;
  16.675 +		}
  16.676 +		EFX_INFO(efx, "link up at %dMbps %s-duplex "
  16.677 +			 "(adv %04x lpa %04x) (MTU %d)%s%s%s%s\n",
  16.678 +			 (efx->link_options & GM_LPA_10000 ? 10000 :
  16.679 +			  (efx->link_options & GM_LPA_1000 ? 1000 :
  16.680 +			   (efx->link_options & GM_LPA_100 ? 100 :
  16.681 +			    10))),
  16.682 +			 (efx->link_options & GM_LPA_DUPLEX ?
  16.683 +			  "full" : "half"),
  16.684 +			 adv, lpa,
  16.685 +			 efx->net_dev->mtu,
  16.686 +			 (efx->loopback_mode ? " [" : ""),
  16.687 +			 (efx->loopback_mode ? LOOPBACK_MODE(efx) : ""),
  16.688 +			 (efx->loopback_mode ? " LOOPBACK]" : ""),
  16.689 +			 (efx->promiscuous ? " [PROMISC]" : ""));
  16.690 +	} else {
  16.691 +		EFX_INFO(efx, "link down\n");
  16.692 +	}
  16.693 +
  16.694 +}
  16.695 +
  16.696 +/* This call reinitialises the MAC to pick up new PHY settings
  16.697 + * To call from a context that cannot sleep use reconfigure_work work item
  16.698 + * For on_disabled=1 the caller must be serialised against efx_reset,
  16.699 + * ideally by holding the rtnl lock.
  16.700 + */
  16.701 +void efx_reconfigure_port(struct efx_nic *efx, int on_disabled)
  16.702 +{
  16.703 +	mutex_lock(&efx->mac_lock);
  16.704 +
  16.705 +	EFX_LOG(efx, "reconfiguring MAC from PHY settings\n");
  16.706 +
  16.707 +	if (on_disabled)
  16.708 +		ASSERT_RTNL();
  16.709 +	else if (!efx->port_enabled)
  16.710 +		goto out;
  16.711 +
  16.712 +	efx->mac_op->reconfigure(efx);
  16.713 +
  16.714 +out:
  16.715 +	/* Inform kernel of loss/gain of carrier */
  16.716 +	efx_link_status_changed(efx);
  16.717 +
  16.718 +	mutex_unlock(&efx->mac_lock);
  16.719 +}
  16.720 +
  16.721 +static void efx_reconfigure_work(struct work_struct *data)
  16.722 +{
  16.723 +	struct efx_nic *efx = container_of(data, struct efx_nic,
  16.724 +					   reconfigure_work);
  16.725 +
  16.726 +	EFX_LOG(efx, "MAC reconfigure executing on CPU %d\n",
  16.727 +		raw_smp_processor_id());
  16.728 +
  16.729 +	/* Reinitialise MAC to activate new PHY parameters */
  16.730 +	efx_reconfigure_port(efx, 0);
  16.731 +}
  16.732 +
  16.733 +static int efx_probe_port(struct efx_nic *efx)
  16.734 +{
  16.735 +	unsigned char *dev_addr;
  16.736 +	int rc;
  16.737 +
  16.738 +	EFX_LOG(efx, "create port\n");
  16.739 +
  16.740 +	/* Connect up MAC/PHY operations table and read MAC address */
  16.741 +	rc = falcon_probe_port(efx);
  16.742 +	if (rc)
  16.743 +		goto err;
  16.744 +
  16.745 +	/* Sanity check MAC address */
  16.746 +	dev_addr = efx->mac_address;
  16.747 +	if (!is_valid_ether_addr(dev_addr)) {
  16.748 +		DECLARE_MAC_BUF(mac);
  16.749 +
  16.750 +		EFX_ERR(efx, "invalid MAC address %s\n",
  16.751 +			print_mac(mac, dev_addr));
  16.752 +		if (!allow_bad_hwaddr) {
  16.753 +			rc = -EINVAL;
  16.754 +			goto err;
  16.755 +		}
  16.756 +		random_ether_addr(dev_addr);
  16.757 +		EFX_INFO(efx, "using locally-generated MAC %s\n",
  16.758 +			 print_mac(mac, dev_addr));
  16.759 +	}
  16.760 +
  16.761 +	/* Register debugfs entries */
  16.762 +	rc = efx_init_debugfs_port(efx);
  16.763 +	if (rc)
  16.764 +		goto err;
  16.765 +
  16.766 +	return 0;
  16.767 +
  16.768 + err:
  16.769 +	efx_remove_port(efx);
  16.770 +	return rc;
  16.771 +}
  16.772 +
  16.773 +static int efx_init_port(struct efx_nic *efx)
  16.774 +{
  16.775 +	int rc;
  16.776 +
  16.777 +	EFX_LOG(efx, "init port\n");
  16.778 +
  16.779 +	/* The default power state is ON */
  16.780 +	efx->phy_powered = 1;
  16.781 +
  16.782 +	/* Initialise the MAC and PHY */
  16.783 +	rc = efx->mac_op->init(efx);
  16.784 +	if (rc)
  16.785 +		return rc;
  16.786 +
  16.787 +	efx->port_initialized = 1;
  16.788 +
  16.789 +	/* Reconfigure port to program MAC registers */
  16.790 +	efx->mac_op->reconfigure(efx);
  16.791 +
  16.792 +	return 0;
  16.793 +}
  16.794 +
  16.795 +/* Allow efx_reconfigure_port() to run, and propagate delayed changes
  16.796 + * to the promiscuous flag to the MAC if needed */
  16.797 +static void efx_start_port(struct efx_nic *efx)
  16.798 +{
  16.799 +	EFX_LOG(efx, "start port\n");
  16.800 +	ASSERT_RTNL();
  16.801 +
  16.802 +	BUG_ON(efx->port_enabled);
  16.803 +
  16.804 +	mutex_lock(&efx->mac_lock);
  16.805 +	efx->port_enabled = 1;
  16.806 +	mutex_unlock(&efx->mac_lock);
  16.807 +
  16.808 +	if (efx->net_dev_registered) {
  16.809 +		int promiscuous;
  16.810 +
  16.811 +		netif_tx_lock_bh(efx->net_dev);
  16.812 +		promiscuous = (efx->net_dev->flags & IFF_PROMISC) ? 1 : 0;
  16.813 +		if (efx->promiscuous != promiscuous) {
  16.814 +			efx->promiscuous = promiscuous;
  16.815 +			queue_work(efx->workqueue, &efx->reconfigure_work);
  16.816 +		}
  16.817 +		netif_tx_unlock_bh(efx->net_dev);
  16.818 +	}
  16.819 +}
  16.820 +
  16.821 +/* Prevents efx_reconfigure_port() from executing, and prevents
  16.822 + * efx_set_multicast_list() from scheduling efx_reconfigure_work.
  16.823 + * efx_reconfigure_work can still be scheduled via NAPI processing
  16.824 + * until efx_flush_all() is called */
  16.825 +static void efx_stop_port(struct efx_nic *efx)
  16.826 +{
  16.827 +	EFX_LOG(efx, "stop port\n");
  16.828 +	ASSERT_RTNL();
  16.829 +
  16.830 +	mutex_lock(&efx->mac_lock);
  16.831 +	efx->port_enabled = 0;
  16.832 +	mutex_unlock(&efx->mac_lock);
  16.833 +
  16.834 +	/* Serialise against efx_set_multicast_list() */
  16.835 +	if (efx->net_dev_registered) {
  16.836 +		netif_tx_lock_bh(efx->net_dev);
  16.837 +		netif_tx_unlock_bh(efx->net_dev);
  16.838 +	}
  16.839 +}
  16.840 +
  16.841 +static void efx_fini_port(struct efx_nic *efx)
  16.842 +{
  16.843 +	EFX_LOG(efx, "shut down port\n");
  16.844 +
  16.845 +	if (!efx->port_initialized)
  16.846 +		return;
  16.847 +
  16.848 +	efx->mac_op->fini(efx);
  16.849 +	efx->port_initialized = 0;
  16.850 +
  16.851 +	/* Mark the link down */
  16.852 +	efx->link_up = 0;
  16.853 +	efx_link_status_changed(efx);
  16.854 +}
  16.855 +
  16.856 +static void efx_remove_port(struct efx_nic *efx)
  16.857 +{
  16.858 +	EFX_LOG(efx, "destroying port\n");
  16.859 +
  16.860 +	efx_fini_debugfs_port(efx);
  16.861 +	falcon_remove_port(efx);
  16.862 +}
  16.863 +
  16.864 +/**************************************************************************
  16.865 + *
  16.866 + * NIC handling
  16.867 + *
  16.868 + **************************************************************************/
  16.869 +
  16.870 +/* This configures the PCI device to enable I/O and DMA. */
  16.871 +static int efx_init_io(struct efx_nic *efx)
  16.872 +{
  16.873 +	struct pci_dev *pci_dev = efx->pci_dev;
  16.874 +	int rc;
  16.875 +
  16.876 +	EFX_LOG(efx, "initialising I/O\n");
  16.877 +
  16.878 +	/* Generic device-enabling code */
  16.879 +	rc = pci_enable_device(pci_dev);
  16.880 +	if (rc) {
  16.881 +		EFX_ERR(efx, "failed to enable PCI device\n");
  16.882 +		goto fail1;
  16.883 +	}
  16.884 +
  16.885 +	pci_set_master(pci_dev);
  16.886 +
  16.887 +	/* Set the PCI DMA mask.  Try all possibilities from our
  16.888 +	 * genuine mask down to 32 bits, because some architectures
  16.889 +	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  16.890 +	 * masks event though they reject 46 bit masks.
  16.891 +	 */
  16.892 +	efx->dma_mask = efx->type->max_dma_mask;
  16.893 +	while (efx->dma_mask > 0x7fffffffUL) {
  16.894 +		if (pci_dma_supported(pci_dev, efx->dma_mask) &&
  16.895 +		    ((rc = pci_set_dma_mask(pci_dev, efx->dma_mask)) == 0))
  16.896 +			break;
  16.897 +		efx->dma_mask >>= 1;
  16.898 +	}
  16.899 +	if (rc) {
  16.900 +		EFX_ERR(efx, "could not find a suitable DMA mask\n");
  16.901 +		goto fail2;
  16.902 +	}
  16.903 +	EFX_LOG(efx, "using DMA mask %llx\n",
  16.904 +		(unsigned long long)efx->dma_mask);
  16.905 +	rc = pci_set_consistent_dma_mask(pci_dev, efx->dma_mask);
  16.906 +	if (rc) {
  16.907 +		/* pci_set_consistent_dma_mask() is not *allowed* to
  16.908 +		 * fail with a mask that pci_set_dma_mask() accepted,
  16.909 +		 * but just in case...
  16.910 +		 */
  16.911 +		EFX_ERR(efx, "failed to set consistent DMA mask\n");
  16.912 +		goto fail2;
  16.913 +	}
  16.914 +
  16.915 +	/* Get memory base address */
  16.916 +	efx->membase_phys = pci_resource_start(efx->pci_dev,
  16.917 +					       efx->type->mem_bar);
  16.918 +#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
  16.919 +	rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
  16.920 +#else
  16.921 +	if (!request_mem_region(efx->membase_phys, efx->type->mem_map_size,
  16.922 +				"sfc"))
  16.923 +		rc = -EIO;
  16.924 +#endif
  16.925 +	if (rc) {
  16.926 +		EFX_ERR(efx, "request for memory BAR failed\n");
  16.927 +		rc = -EIO;
  16.928 +		goto fail3;
  16.929 +	}
  16.930 +	efx->membase = ioremap_nocache(efx->membase_phys,
  16.931 +				       efx->type->mem_map_size);
  16.932 +	if (!efx->membase) {
  16.933 +		EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
  16.934 +			efx->type->mem_bar, efx->membase_phys,
  16.935 +			efx->type->mem_map_size);
  16.936 +		rc = -ENOMEM;
  16.937 +		goto fail4;
  16.938 +	}
  16.939 +	EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
  16.940 +		efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
  16.941 +		efx->membase);
  16.942 +
  16.943 +	return 0;
  16.944 +
  16.945 + fail4:
  16.946 +	release_mem_region(efx->membase_phys, efx->type->mem_map_size);
  16.947 + fail3:
  16.948 +	efx->membase_phys = 0UL;
  16.949 +	/* fall-thru */
  16.950 + fail2:
  16.951 +	pci_disable_device(efx->pci_dev);
  16.952 + fail1:
  16.953 +	return rc;
  16.954 +}
  16.955 +
  16.956 +static void efx_fini_io(struct efx_nic *efx)
  16.957 +{
  16.958 +	EFX_LOG(efx, "shutting down I/O\n");
  16.959 +
  16.960 +	if (efx->membase) {
  16.961 +		iounmap(efx->membase);
  16.962 +		efx->membase = NULL;
  16.963 +	}
  16.964 +
  16.965 +	if (efx->membase_phys) {
  16.966 +#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
  16.967 +		pci_release_region(efx->pci_dev, efx->type->mem_bar);
  16.968 +#else
  16.969 +		release_mem_region(efx->membase_phys, efx->type->mem_map_size);
  16.970 +#endif
  16.971 +		efx->membase_phys = 0UL;
  16.972 +	}
  16.973 +
  16.974 +	pci_disable_device(efx->pci_dev);
  16.975 +}
  16.976 +
  16.977 +/* Probe the number and type of interrupts we are able to obtain. */
  16.978 +static int efx_probe_interrupts(struct efx_nic *efx)
  16.979 +{
  16.980 +	struct msix_entry xentries[EFX_MAX_CHANNELS];
  16.981 +	int rc, i;
  16.982 +
  16.983 +	/* Select number of used RSS queues */
  16.984 +	/* TODO: Can we react to CPU hotplug? */
  16.985 +	if (rss_cpus == 0)
  16.986 +		rss_cpus = num_online_cpus();
  16.987 +
  16.988 +	efx->rss_queues = 1;
  16.989 +	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  16.990 +		unsigned int max_channel = efx->type->phys_addr_channels - 1;
  16.991 +
  16.992 +		BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
  16.993 +		efx->rss_queues = min(max_channel + 1, rss_cpus);
  16.994 +		efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
  16.995 +	}
  16.996 +
  16.997 +	/* Determine how many RSS queues we can use, and mark channels
  16.998 +	 * with the appropriate interrupt state */
  16.999 +	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
 16.1000 +		/* Build MSI request structure */
 16.1001 +		for (i = 0; i < efx->rss_queues; i++)
 16.1002 +			xentries[i].entry = i;
 16.1003 +
 16.1004 +		/* Request maximum number of MSI interrupts */
 16.1005 +		rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
 16.1006 +		if (rc > 0) {
 16.1007 +			EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
 16.1008 +			efx->rss_queues = rc;
 16.1009 +			rc = pci_enable_msix(efx->pci_dev, xentries,
 16.1010 +					     efx->rss_queues);
 16.1011 +		}
 16.1012 +		if (rc == 0) {
 16.1013 +			for (i = 0; i < efx->rss_queues; i++) {
 16.1014 +				efx->channel[i].has_interrupt = 1;
 16.1015 +				efx->channel[i].irq = xentries[i].vector;
 16.1016 +			}
 16.1017 +		} else {
 16.1018 +			/* Fall back to single channel MSI */
 16.1019 +			efx->interrupt_mode = EFX_INT_MODE_MSI;
 16.1020 +			EFX_ERR(efx, "could not enable MSI-X\n");
 16.1021 +		}
 16.1022 +	}
 16.1023 +
 16.1024 +	/* Try single interrupt MSI */
 16.1025 +	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
 16.1026 +		efx->rss_queues = 1;
 16.1027 +		rc = pci_enable_msi(efx->pci_dev);
 16.1028 +		if (rc == 0) {
 16.1029 +			efx->channel[0].irq = efx->pci_dev->irq;
 16.1030 +			efx->channel[0].has_interrupt = 1;
 16.1031 +		} else {
 16.1032 +			EFX_ERR(efx, "could not enable MSI\n");
 16.1033 +			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
 16.1034 +		}
 16.1035 +	}
 16.1036 +
 16.1037 +	/* Assume legacy interrupts */
 16.1038 +	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
 16.1039 +		/* Every channel is interruptible */
 16.1040 +		for (i = 0; i < EFX_MAX_CHANNELS; i++)
 16.1041 +			efx->channel[i].has_interrupt = 1;
 16.1042 +		efx->legacy_irq = efx->pci_dev->irq;
 16.1043 +	}
 16.1044 +
 16.1045 +	return 0;
 16.1046 +}
 16.1047 +
 16.1048 +static void efx_remove_interrupts(struct efx_nic *efx)
 16.1049 +{
 16.1050 +	struct efx_channel *channel;
 16.1051 +
 16.1052 +	/* Remove MSI/MSI-X interrupts */
 16.1053 +	efx_for_each_channel_with_interrupt(channel, efx)
 16.1054 +		channel->irq = 0;
 16.1055 +	pci_disable_msi(efx->pci_dev);
 16.1056 +	pci_disable_msix(efx->pci_dev);
 16.1057 +
 16.1058 +	/* Remove legacy interrupt */
 16.1059 +	efx->legacy_irq = 0;
 16.1060 +}
 16.1061 +
 16.1062 +/* Select number of used resources
 16.1063 + * Should be called after probe_interrupts()
 16.1064 + */
 16.1065 +static int efx_select_used(struct efx_nic *efx)
 16.1066 +{
 16.1067 +	struct efx_tx_queue *tx_queue;
 16.1068 +	struct efx_rx_queue *rx_queue;
 16.1069 +	int i;
 16.1070 +
 16.1071 +	/* TX queues.  One per port per channel with TX capability
 16.1072 +	 * (more than one per port won't work on Linux, due to out
 16.1073 +	 *  of order issues... but will be fine on Solaris)
 16.1074 +	 */
 16.1075 +	tx_queue = &efx->tx_queue[0];
 16.1076 +
 16.1077 +	/* Perform this for each channel with TX capabilities.
 16.1078 +	 * At the moment, we only support a single TX queue
 16.1079 +	 */
 16.1080 +	tx_queue->used = 1;
 16.1081 +	if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
 16.1082 +		tx_queue->channel = &efx->channel[1];
 16.1083 +	else
 16.1084 +		tx_queue->channel = &efx->channel[0];
 16.1085 +	tx_queue->channel->used_flags |= EFX_USED_BY_TX;
 16.1086 +	tx_queue++;
 16.1087 +
 16.1088 +	/* RX queues.  Each has a dedicated channel. */
 16.1089 +	for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
 16.1090 +		rx_queue = &efx->rx_queue[i];
 16.1091 +
 16.1092 +		if (i < efx->rss_queues) {
 16.1093 +			rx_queue->used = 1;
 16.1094 +			/* If we allow multiple RX queues per channel
 16.1095 +			 * we need to decide that here
 16.1096 +			 */
 16.1097 +			rx_queue->channel = &efx->channel[rx_queue->queue];
 16.1098 +			rx_queue->channel->used_flags |= EFX_USED_BY_RX;
 16.1099 +			rx_queue++;
 16.1100 +		}
 16.1101 +	}
 16.1102 +	return 0;
 16.1103 +}
 16.1104 +
 16.1105 +static int efx_probe_nic(struct efx_nic *efx)
 16.1106 +{
 16.1107 +	int rc;
 16.1108 +
 16.1109 +	EFX_LOG(efx, "creating NIC\n");
 16.1110 +
 16.1111 +	/* Carry out hardware-type specific initialisation */
 16.1112 +	rc = falcon_probe_nic(efx);
 16.1113 +	if (rc)
 16.1114 +		goto fail1;
 16.1115 +
 16.1116 +	/* Determine the number of channels and RX queues by trying to hook
 16.1117 +	 * in MSI-X interrupts. */
 16.1118 +	rc = efx_probe_interrupts(efx);
 16.1119 +	if (rc)
 16.1120 +		goto fail2;
 16.1121 +
 16.1122 +	/* Determine number of RX queues and TX queues */
 16.1123 +	rc = efx_select_used(efx);
 16.1124 +	if (rc)
 16.1125 +		goto fail3;
 16.1126 +
 16.1127 +	/* Register debugfs entries */
 16.1128 +	rc = efx_init_debugfs_nic(efx);
 16.1129 +	if (rc)
 16.1130 +		goto fail4;
 16.1131 +	/* Initialise the interrupt moderation settings */
 16.1132 +	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
 16.1133 +
 16.1134 +	return 0;
 16.1135 +
 16.1136 + fail4:
 16.1137 +	/* fall-thru */
 16.1138 + fail3:
 16.1139 +	efx_remove_interrupts(efx);
 16.1140 + fail2:
 16.1141 +	falcon_remove_nic(efx);
 16.1142 + fail1:
 16.1143 +	return rc;
 16.1144 +}
 16.1145 +
 16.1146 +static void efx_remove_nic(struct efx_nic *efx)
 16.1147 +{
 16.1148 +	EFX_LOG(efx, "destroying NIC\n");
 16.1149 +
 16.1150 +	efx_remove_interrupts(efx);
 16.1151 +	falcon_remove_nic(efx);
 16.1152 +
 16.1153 +	efx_fini_debugfs_nic(efx);
 16.1154 +}
 16.1155 +
 16.1156 +/**************************************************************************
 16.1157 + *
 16.1158 + * NIC startup/shutdown
 16.1159 + *
 16.1160 + *************************************************************************/
 16.1161 +
 16.1162 +static int efx_probe_all(struct efx_nic *efx)
 16.1163 +{
 16.1164 +	struct efx_channel *channel;
 16.1165 +	int rc;
 16.1166 +
 16.1167 +	/* Create NIC */
 16.1168 +	rc = efx_probe_nic(efx);
 16.1169 +	if (rc) {
 16.1170 +		EFX_ERR(efx, "failed to create NIC\n");
 16.1171 +		goto fail1;
 16.1172 +	}
 16.1173 +
 16.1174 +	/* Create port */
 16.1175 +	rc = efx_probe_port(efx);
 16.1176 +	if (rc) {
 16.1177 +		EFX_ERR(efx, "failed to create port\n");
 16.1178 +		goto fail2;
 16.1179 +	}
 16.1180 +
 16.1181 +	/* Create channels */
 16.1182 +	efx_for_each_channel(channel, efx) {
 16.1183 +		rc = efx_probe_channel(channel);
 16.1184 +		if (rc) {
 16.1185 +			EFX_ERR(efx, "failed to create channel %d\n",
 16.1186 +				channel->channel);
 16.1187 +			goto fail3;
 16.1188 +		}
 16.1189 +	}
 16.1190 +
 16.1191 +	return 0;
 16.1192 +
 16.1193 + fail3:
 16.1194 +	efx_for_each_channel(channel, efx)
 16.1195 +		efx_remove_channel(channel);
 16.1196 + fail2:
 16.1197 +	efx_remove_port(efx);
 16.1198 + fail1:
 16.1199 +	return rc;
 16.1200 +}
 16.1201 +
 16.1202 +/* Called after previous invocation(s) of efx_stop_all, restarts the
 16.1203 + * port, kernel transmit queue, NAPI processing and hardware interrupts.
 16.1204 + * This function is safe to call multiple times when the NIC is in any
 16.1205 + * state. */
 16.1206 +static void efx_start_all(struct efx_nic *efx)
 16.1207 +{
 16.1208 +	struct efx_channel *channel;
 16.1209 +
 16.1210 +	ASSERT_RTNL();
 16.1211 +
 16.1212 +	/* Check that it is appropriate to restart the interface. All
 16.1213 +	 * of these flags are safe to read under just the rtnl lock */
 16.1214 +	if (efx->port_enabled)
 16.1215 +		return;
 16.1216 +	if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
 16.1217 +		return;
 16.1218 +	if (efx->net_dev_registered && !netif_running(efx->net_dev))
 16.1219 +		return;
 16.1220 +
 16.1221 +	/* Mark the port as enabled so port reconfigurations can start, then
 16.1222 +	 * restart the transmit interface early so the watchdog timer stops */
 16.1223 +	efx_start_port(efx);
 16.1224 +	efx_wake_queue(efx);
 16.1225 +
 16.1226 +	efx_for_each_channel(channel, efx)
 16.1227 +		efx_start_channel(channel);
 16.1228 +
 16.1229 +	falcon_enable_interrupts(efx);
 16.1230 +
 16.1231 +	/* Start hardware monitor if we're in RUNNING */
 16.1232 +	if (efx->state == STATE_RUNNING)
 16.1233 +		queue_delayed_work(efx->workqueue, &efx->monitor_work,
 16.1234 +				   efx_monitor_interval);
 16.1235 +}
 16.1236 +
 16.1237 +/* Flush all delayed work. Should only be called when no more delayed work
 16.1238 + * will be scheduled. This doesn't flush pending online resets (efx_reset),
 16.1239 + * since we're holding the rtnl_lock at this point. */
 16.1240 +static void efx_flush_all(struct efx_nic *efx)
 16.1241 +{
 16.1242 +#if defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.1243 +	struct efx_rx_queue *rx_queue;
 16.1244 +
 16.1245 +	/* Make sure the hardware monitor is stopped */
 16.1246 +	cancel_delayed_work_sync(&efx->monitor_work);
 16.1247 +
 16.1248 +	/* Ensure that all RX slow refills are complete. */
 16.1249 +	efx_for_each_rx_queue(rx_queue, efx) {
 16.1250 +		cancel_delayed_work_sync(&rx_queue->work);
 16.1251 +	}
 16.1252 +#endif
 16.1253 +
 16.1254 +#if defined(EFX_USE_CANCEL_WORK_SYNC)
 16.1255 +	/* Stop scheduled port reconfigurations */
 16.1256 +	cancel_work_sync(&efx->reconfigure_work);
 16.1257 +#endif
 16.1258 +
 16.1259 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.1260 +	/* Ensure that the hardware monitor and asynchronous port
 16.1261 +	 * reconfigurations are complete, which are the only two consumers
 16.1262 +	 * of efx->workqueue. Since the hardware monitor runs on a long period,
 16.1263 +	 * we put in some effort to cancel the delayed work safely rather
 16.1264 +	 * than just flushing the queue twice (which is guaranteed to flush
 16.1265 +	 * all the work since both efx_monitor and efx_reconfigure_work disarm
 16.1266 +	 * if !efx->port_enabled. */
 16.1267 +	if (timer_pending(&efx->monitor_work.timer))
 16.1268 +		cancel_delayed_work(&efx->monitor_work);
 16.1269 +	flush_workqueue(efx->workqueue);
 16.1270 +	if (timer_pending(&efx->monitor_work.timer))
 16.1271 +		cancel_delayed_work(&efx->monitor_work);
 16.1272 +	flush_workqueue(efx->workqueue);
 16.1273 +
 16.1274 +	/* efx_rx_work will disarm if !channel->enabled, so we can just
 16.1275 +	 * flush the refill workqueue twice as well. */
 16.1276 +	flush_workqueue(efx->refill_workqueue);
 16.1277 +	flush_workqueue(efx->refill_workqueue);
 16.1278 +#endif
 16.1279 +}
 16.1280 +
 16.1281 +/* Quiesce hardware and software without bringing the link down.
 16.1282 + * Safe to call multiple times, when the nic and interface is in any
 16.1283 + * state. The caller is guaranteed to subsequently be in a position
 16.1284 + * to modify any hardware and software state they see fit without
 16.1285 + * taking locks. */
 16.1286 +static void efx_stop_all(struct efx_nic *efx)
 16.1287 +{
 16.1288 +	struct efx_channel *channel;
 16.1289 +
 16.1290 +	ASSERT_RTNL();
 16.1291 +
 16.1292 +	/* port_enabled can be read safely under the rtnl lock */
 16.1293 +	if (!efx->port_enabled)
 16.1294 +		return;
 16.1295 +
 16.1296 +	/* Disable interrupts and wait for ISR to complete */
 16.1297 +	falcon_disable_interrupts(efx);
 16.1298 +	if (efx->legacy_irq)
 16.1299 +		synchronize_irq(efx->legacy_irq);
 16.1300 +	efx_for_each_channel_with_interrupt(channel, efx)
 16.1301 +		if (channel->irq)
 16.1302 +			synchronize_irq(channel->irq);
 16.1303 +
 16.1304 +	/* Stop all synchronous port reconfigurations. */
 16.1305 +	efx_stop_port(efx);
 16.1306 +
 16.1307 +	/* Stop all NAPI processing and synchronous rx refills */
 16.1308 +	efx_for_each_channel(channel, efx)
 16.1309 +		efx_stop_channel(channel);
 16.1310 +
 16.1311 +	/* Flush reconfigure_work, refill_workqueue, monitor_work */
 16.1312 +	efx_flush_all(efx);
 16.1313 +
 16.1314 +	/* Stop the kernel transmit interface late, so the watchdog
 16.1315 +	 * timer isn't ticking over the flush */
 16.1316 +	efx_stop_queue(efx);
 16.1317 +	if (efx->net_dev_registered) {
 16.1318 +		netif_tx_lock_bh(efx->net_dev);
 16.1319 +		netif_tx_unlock_bh(efx->net_dev);
 16.1320 +	}
 16.1321 +}
 16.1322 +
 16.1323 +static void efx_remove_all(struct efx_nic *efx)
 16.1324 +{
 16.1325 +	struct efx_channel *channel;
 16.1326 +
 16.1327 +	efx_for_each_channel(channel, efx)
 16.1328 +		efx_remove_channel(channel);
 16.1329 +	efx_remove_port(efx);
 16.1330 +	efx_remove_nic(efx);
 16.1331 +}
 16.1332 +
 16.1333 +static int efx_run_selftests(struct efx_nic *efx)
 16.1334 +{
 16.1335 +	struct efx_self_tests tests;
 16.1336 +	unsigned modes = efx->startup_loopbacks & efx->loopback_modes;
 16.1337 +	int rc;
 16.1338 +
 16.1339 +	rc = efx_online_test(efx, &tests);
 16.1340 +	if (rc) {
 16.1341 +		EFX_ERR(efx, "failed self-tests with interrupt_mode of %s\n",
 16.1342 +			INT_MODE(efx));
 16.1343 +		goto fail;
 16.1344 +	}
 16.1345 +
 16.1346 +	if (onload_offline_selftest && modes) {
 16.1347 +		/* Run offline self test */
 16.1348 +		EFX_LOG(efx, "performing on-load offline self-tests\n");
 16.1349 +		rc = efx_offline_test(efx, &tests, modes);
 16.1350 +		EFX_LOG(efx, "%s on-load offline self-tests\n",
 16.1351 +			rc ? "FAILED" : "PASSED");
 16.1352 +		if (rc)
 16.1353 +			goto fail;
 16.1354 +	}
 16.1355 +
 16.1356 +	return 0;
 16.1357 +
 16.1358 + fail:
 16.1359 +	EFX_ERR(efx, "self-tests failed. Given up!\n");
 16.1360 +	if (allow_load_on_failure)
 16.1361 +		rc = 0;
 16.1362 +
 16.1363 +	return rc;
 16.1364 +}
 16.1365 +
 16.1366 +int efx_flush_queues(struct efx_nic *efx)
 16.1367 +{
 16.1368 +	int rc;
 16.1369 +
 16.1370 +	ASSERT_RTNL();
 16.1371 +
 16.1372 +	efx_stop_all(efx);
 16.1373 +
 16.1374 +	/* We can't just flush the tx queues because the event queues
 16.1375 +	 * may contain tx completions from that queue. Just flush everything */
 16.1376 +	efx_fini_channels(efx);
 16.1377 +	rc = efx_init_channels(efx);
 16.1378 +	if (rc) {
 16.1379 +		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 16.1380 +		return rc;
 16.1381 +	}
 16.1382 +
 16.1383 +	efx_start_all(efx);
 16.1384 +
 16.1385 +	return 0;
 16.1386 +}
 16.1387 +
 16.1388 +/**************************************************************************
 16.1389 + *
 16.1390 + * Interrupt moderation
 16.1391 + *
 16.1392 + **************************************************************************/
 16.1393 +
 16.1394 +/* Set interrupt moderation parameters */
 16.1395 +void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
 16.1396 +{
 16.1397 +	struct efx_tx_queue *tx_queue;
 16.1398 +	struct efx_rx_queue *rx_queue;
 16.1399 +
 16.1400 +	ASSERT_RTNL();
 16.1401 +
 16.1402 +	efx_for_each_tx_queue(tx_queue, efx)
 16.1403 +		tx_queue->channel->irq_moderation = tx_usecs;
 16.1404 +
 16.1405 +	efx_for_each_rx_queue(rx_queue, efx)
 16.1406 +		rx_queue->channel->irq_moderation = rx_usecs;
 16.1407 +}
 16.1408 +
 16.1409 +/**************************************************************************
 16.1410 + *
 16.1411 + * Hardware monitor
 16.1412 + *
 16.1413 + **************************************************************************/
 16.1414 +
 16.1415 +/* Run periodically off the general workqueue. Serialised against
 16.1416 + * efx_reconfigure_port via the mac_lock */
 16.1417 +static void efx_monitor(struct work_struct *data)
 16.1418 +{
 16.1419 +#if !defined(EFX_NEED_WORK_API_WRAPPERS)
 16.1420 +	struct efx_nic *efx = container_of(data, struct efx_nic,
 16.1421 +					   monitor_work.work);
 16.1422 +#else
 16.1423 +	struct efx_nic *efx = container_of(data, struct efx_nic,
 16.1424 +					   monitor_work);
 16.1425 +#endif
 16.1426 +	int rc = 0;
 16.1427 +
 16.1428 +	EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
 16.1429 +		  raw_smp_processor_id());
 16.1430 +
 16.1431 +
 16.1432 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.1433 +	/* Without cancel_delayed_work_sync(), we have to make sure that
 16.1434 +	 * we don't rearm when port_enabled == 0 */
 16.1435 +	mutex_lock(&efx->mac_lock);
 16.1436 +	if (!efx->port_enabled) {
 16.1437 +		mutex_unlock(&efx->mac_lock);
 16.1438 +		return;
 16.1439 +	}
 16.1440 +
 16.1441 +	rc = efx->mac_op->check_hw(efx);
 16.1442 +#else
 16.1443 +	/* If the mac_lock is already held then it is likely a port
 16.1444 +	 * reconfiguration is already in place, which will likely do
 16.1445 +	 * most of the work of check_hw() anyway. */
 16.1446 +	if (!mutex_trylock(&efx->mac_lock)) {
 16.1447 +		queue_delayed_work(efx->workqueue, &efx->monitor_work,
 16.1448 +				   efx_monitor_interval);
 16.1449 +		return;
 16.1450 +	}
 16.1451 +
 16.1452 +	if (efx->port_enabled)
 16.1453 +		rc = efx->mac_op->check_hw(efx);
 16.1454 +#endif
 16.1455 +	mutex_unlock(&efx->mac_lock);
 16.1456 +
 16.1457 +	if (rc) {
 16.1458 +		if (monitor_reset) {
 16.1459 +			EFX_ERR(efx, "hardware monitor detected a fault: "
 16.1460 +				"triggering reset\n");
 16.1461 +			efx_schedule_reset(efx, RESET_TYPE_MONITOR);
 16.1462 +		} else {
 16.1463 +			EFX_ERR(efx, "hardware monitor detected a fault, "
 16.1464 +				"skipping reset\n");
 16.1465 +		}
 16.1466 +	}
 16.1467 +
 16.1468 +	queue_delayed_work(efx->workqueue, &efx->monitor_work,
 16.1469 +			   efx_monitor_interval);
 16.1470 +}
 16.1471 +
 16.1472 +/**************************************************************************
 16.1473 + *
 16.1474 + * ioctls
 16.1475 + *
 16.1476 + *************************************************************************/
 16.1477 +
 16.1478 +/* Net device ioctl
 16.1479 + * Context: process, rtnl_lock() held.
 16.1480 + */
 16.1481 +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 16.1482 +{
 16.1483 +	struct efx_nic *efx = net_dev->priv;
 16.1484 +	int rc;
 16.1485 +
 16.1486 +	ASSERT_RTNL();
 16.1487 +
 16.1488 +	switch (cmd) {
 16.1489 +	case SIOCGMIIPHY:
 16.1490 +	case SIOCGMIIREG:
 16.1491 +		rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
 16.1492 +		break;
 16.1493 +	case SIOCSMIIREG:
 16.1494 +		rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
 16.1495 +		efx_reconfigure_port(efx, 0);
 16.1496 +		break;
 16.1497 +	default:
 16.1498 +		rc = -EOPNOTSUPP;
 16.1499 +	}
 16.1500 +
 16.1501 +	return rc;
 16.1502 +}
 16.1503 +
 16.1504 +/**************************************************************************
 16.1505 + *
 16.1506 + * NAPI interface
 16.1507 + *
 16.1508 + **************************************************************************/
 16.1509 +
 16.1510 +/* Allocate the NAPI dev's.
 16.1511 + * Called after we know how many channels there are.
 16.1512 + */
 16.1513 +static int efx_init_napi(struct efx_nic *efx)
 16.1514 +{
 16.1515 +	struct efx_channel *channel;
 16.1516 +	int rc;
 16.1517 +
 16.1518 +	ASSERT_RTNL();
 16.1519 +
 16.1520 +	/* Allocate the NAPI dev for the port */
 16.1521 +	efx->net_dev = alloc_etherdev(0);
 16.1522 +	if (!efx->net_dev) {
 16.1523 +		rc = -ENOMEM;
 16.1524 +		goto err;
 16.1525 +	}
 16.1526 +	efx->net_dev->priv = efx;
 16.1527 +	efx->mii.dev = efx->net_dev;
 16.1528 +
 16.1529 +	/* Set features based on module parameters and DMA mask.
 16.1530 +	 * Enable DMA to ZONE_HIGHMEM if the NIC can access all memory
 16.1531 +	 * directly.  This only has an effect on 32-bit systems and
 16.1532 +	 * PAE on x86 limits memory to 64GB so 40 bits is plenty to
 16.1533 +	 * address everything.  If the device can't address 40 bits
 16.1534 +	 * then it's safest to turn NETIF_F_HIGHDMA off because this
 16.1535 +	 * might be a PAE system with more than 4G of RAM and a 32-bit
 16.1536 +	 * NIC.  The use of EFX_DMA_MASK is to eliminate compiler
 16.1537 +	 * warnings on platforms where dma_addr_t is 32-bit.  We
 16.1538 +	 * assume that in those cases we can access all memory
 16.1539 +	 * directly if our DMA mask is all ones. */
 16.1540 +	efx->net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 16.1541 +	if (efx->dma_mask >= EFX_DMA_MASK(DMA_40BIT_MASK))
 16.1542 +		efx->net_dev->features |= NETIF_F_HIGHDMA;
 16.1543 +
 16.1544 +	/* Copy MAC address */
 16.1545 +	memcpy(&efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
 16.1546 +
 16.1547 +	/* Allocate the per channel devs */
 16.1548 +	efx_for_each_channel(channel, efx) {
 16.1549 +#if !defined(EFX_HAVE_OLD_NAPI)
 16.1550 +		channel->napi_dev = efx->net_dev;
 16.1551 +#else
 16.1552 +		channel->napi_dev = alloc_etherdev(0);
 16.1553 +		if (!channel->napi_dev) {
 16.1554 +			rc = -ENOMEM;
 16.1555 +			goto err;
 16.1556 +		}
 16.1557 +		channel->napi_dev->priv = channel;
 16.1558 +		atomic_set(&channel->napi_dev->refcnt, 1);
 16.1559 +#endif
 16.1560 +	}
 16.1561 +
 16.1562 +	return 0;
 16.1563 + err:
 16.1564 +	efx_fini_napi(efx);
 16.1565 +	return rc;
 16.1566 +}
 16.1567 +
 16.1568 +/* Free the NAPI state for the port and channels */
 16.1569 +static void efx_fini_napi(struct efx_nic *efx)
 16.1570 +{
 16.1571 +	struct efx_channel *channel;
 16.1572 +
 16.1573 +	ASSERT_RTNL();
 16.1574 +
 16.1575 +	efx_for_each_channel(channel, efx) {
 16.1576 +		/* Finish per channel NAPI */
 16.1577 +#if defined(EFX_HAVE_OLD_NAPI)
 16.1578 +		if (channel->napi_dev) {
 16.1579 +			channel->napi_dev->priv = NULL;
 16.1580 +			free_netdev(channel->napi_dev);
 16.1581 +		}
 16.1582 +#endif
 16.1583 +		channel->napi_dev = NULL;
 16.1584 +	}
 16.1585 +
 16.1586 +	/* Finish port NAPI */
 16.1587 +	if (efx->net_dev) {
 16.1588 +		efx->net_dev->priv = NULL;
 16.1589 +		free_netdev(efx->net_dev);
 16.1590 +		efx->net_dev = NULL;
 16.1591 +	}
 16.1592 +}
 16.1593 +
 16.1594 +/**************************************************************************
 16.1595 + *
 16.1596 + * Kernel netpoll interface
 16.1597 + *
 16.1598 + *************************************************************************/
 16.1599 +
 16.1600 +#ifdef CONFIG_NET_POLL_CONTROLLER
 16.1601 +
 16.1602 +/* Although in the common case interrupts will be disabled, this is not
 16.1603 + * guaranteed. However, all our work happens inside the NAPI callback,
 16.1604 + * so no locking is required.
 16.1605 + */
 16.1606 +static void efx_netpoll(struct net_device *net_dev)
 16.1607 +{
 16.1608 +	struct efx_nic *efx = net_dev->priv;
 16.1609 +	struct efx_channel *channel;
 16.1610 +
 16.1611 +	efx_for_each_channel_with_interrupt(channel, efx)
 16.1612 +		efx_schedule_channel(channel);
 16.1613 +}
 16.1614 +
 16.1615 +#endif
 16.1616 +
 16.1617 +/**************************************************************************
 16.1618 + *
 16.1619 + * Kernel net device interface
 16.1620 + *
 16.1621 + *************************************************************************/
 16.1622 +
 16.1623 +/* Context: process, rtnl_lock() held. */
 16.1624 +static int efx_net_open(struct net_device *net_dev)
 16.1625 +{
 16.1626 +	struct efx_nic *efx = net_dev->priv;
 16.1627 +	ASSERT_RTNL();
 16.1628 +
 16.1629 +	EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
 16.1630 +		raw_smp_processor_id());
 16.1631 +	efx_start_all(efx);
 16.1632 +	return 0;
 16.1633 +}
 16.1634 +
 16.1635 +/* Context: process, rtnl_lock() held.
 16.1636 + * Note that the kernel will ignore our return code; this method
 16.1637 + * should really be a void.
 16.1638 + */
 16.1639 +static int efx_net_stop(struct net_device *net_dev)
 16.1640 +{
 16.1641 +	struct efx_nic *efx = net_dev->priv;
 16.1642 +	int rc;
 16.1643 +
 16.1644 +	EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
 16.1645 +		raw_smp_processor_id());
 16.1646 +
 16.1647 +	/* Stop device and flush all the channels */
 16.1648 +	efx_stop_all(efx);
 16.1649 +	efx_fini_channels(efx);
 16.1650 +	rc = efx_init_channels(efx);
 16.1651 +	if (rc)
 16.1652 +		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 16.1653 +
 16.1654 +	return 0;
 16.1655 +}
 16.1656 +
 16.1657 +/* Context: process, dev_base_lock held, non-blocking.
 16.1658 + * Statistics are taken directly from the MAC.
 16.1659 + */
 16.1660 +static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
 16.1661 +{
 16.1662 +	struct efx_nic *efx = net_dev->priv;
 16.1663 +	struct efx_mac_stats *mac_stats = &efx->mac_stats;
 16.1664 +	struct net_device_stats *stats = &efx->stats;
 16.1665 +
 16.1666 +	if (!spin_trylock(&efx->stats_lock))
 16.1667 +		return stats;
 16.1668 +	if (efx->state == STATE_RUNNING)
 16.1669 +		efx->mac_op->update_stats(efx);
 16.1670 +	spin_unlock(&efx->stats_lock);
 16.1671 +
 16.1672 +	stats->rx_packets = mac_stats->rx_packets;
 16.1673 +	stats->tx_packets = mac_stats->tx_packets;
 16.1674 +	stats->rx_bytes = mac_stats->rx_bytes;
 16.1675 +	stats->tx_bytes = mac_stats->tx_bytes;
 16.1676 +	stats->tx_errors = mac_stats->tx_bad;
 16.1677 +	stats->multicast = mac_stats->rx_multicast;
 16.1678 +	stats->collisions = mac_stats->tx_collision;
 16.1679 +	stats->rx_length_errors = mac_stats->rx_gtjumbo;
 16.1680 +	stats->rx_over_errors = mac_stats->rx_overflow;
 16.1681 +	stats->rx_crc_errors = mac_stats->rx_bad;
 16.1682 +	stats->rx_frame_errors = mac_stats->rx_align_error;
 16.1683 +	stats->rx_fifo_errors = 0;
 16.1684 +	stats->rx_missed_errors = mac_stats->rx_missed;
 16.1685 +	stats->rx_errors = (stats->rx_length_errors +
 16.1686 +			    stats->rx_over_errors +
 16.1687 +			    stats->rx_crc_errors +
 16.1688 +			    stats->rx_frame_errors +
 16.1689 +			    stats->rx_fifo_errors +
 16.1690 +			    stats->rx_missed_errors +
 16.1691 +			    mac_stats->rx_symbol_error);
 16.1692 +	stats->tx_aborted_errors = 0;
 16.1693 +	stats->tx_carrier_errors = 0;
 16.1694 +	stats->tx_fifo_errors = 0;
 16.1695 +	stats->tx_heartbeat_errors = 0;
 16.1696 +	stats->tx_window_errors = 0;
 16.1697 +
 16.1698 +	return stats;
 16.1699 +}
 16.1700 +
 16.1701 +/* Context: netif_tx_lock held, BHs disabled. */
 16.1702 +static void efx_watchdog(struct net_device *net_dev)
 16.1703 +{
 16.1704 +	struct efx_nic *efx = net_dev->priv;
 16.1705 +
 16.1706 +	EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
 16.1707 +		atomic_read(&efx->netif_stop_count), efx->port_enabled,
 16.1708 +		monitor_reset ? "resetting channels" : "skipping reset");
 16.1709 +
 16.1710 +	if (monitor_reset)
 16.1711 +		efx_schedule_reset(efx, RESET_TYPE_MONITOR);
 16.1712 +}
 16.1713 +
 16.1714 +
 16.1715 +/* Context: process, rtnl_lock() held. */
 16.1716 +static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 16.1717 +{
 16.1718 +	struct efx_nic *efx = net_dev->priv;
 16.1719 +	int rc = 0;
 16.1720 +
 16.1721 +	ASSERT_RTNL();
 16.1722 +
 16.1723 +	if (new_mtu > EFX_MAX_MTU)
 16.1724 +		return -EINVAL;
 16.1725 +
 16.1726 +	efx_stop_all(efx);
 16.1727 +
 16.1728 +	/* Ask driverlink client if we can change MTU */
 16.1729 +	rc = EFX_DL_CALLBACK(efx, request_mtu, new_mtu);
 16.1730 +	if (rc) {
 16.1731 +		EFX_ERR(efx, "MTU change vetoed by driverlink %s driver\n",
 16.1732 +			efx->dl_cb_dev.request_mtu->driver->name);
 16.1733 +		goto out;
 16.1734 +	}
 16.1735 +
 16.1736 +	EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
 16.1737 +
 16.1738 +	efx_fini_channels(efx);
 16.1739 +	net_dev->mtu = new_mtu;
 16.1740 +	rc = efx_init_channels(efx);
 16.1741 +	if (rc)
 16.1742 +		goto fail;
 16.1743 +
 16.1744 +	/* Reconfigure the MAC */
 16.1745 +	efx_reconfigure_port(efx, 1);
 16.1746 +
 16.1747 +	/* Notify driverlink client of new MTU */
 16.1748 +	EFX_DL_CALLBACK(efx, mtu_changed, new_mtu);
 16.1749 +
 16.1750 +	efx_start_all(efx);
 16.1751 +
 16.1752 + out:
 16.1753 +	return rc;
 16.1754 +
 16.1755 + fail:
 16.1756 +	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 16.1757 +	return rc;
 16.1758 +}
 16.1759 +
 16.1760 +static int efx_set_mac_address(struct net_device *net_dev, void *data)
 16.1761 +{
 16.1762 +	struct efx_nic *efx = net_dev->priv;
 16.1763 +	struct sockaddr *addr = data;
 16.1764 +	char *new_addr = addr->sa_data;
 16.1765 +
 16.1766 +	ASSERT_RTNL();
 16.1767 +
 16.1768 +	if (!is_valid_ether_addr(new_addr)) {
 16.1769 +		DECLARE_MAC_BUF(mac);
 16.1770 +		EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
 16.1771 +			print_mac(mac, new_addr));
 16.1772 +		return -EINVAL;
 16.1773 +	}
 16.1774 +
 16.1775 +	memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
 16.1776 +
 16.1777 +	/* Reconfigure the MAC */
 16.1778 +	efx_reconfigure_port(efx, 1);
 16.1779 +
 16.1780 +	return 0;
 16.1781 +}
 16.1782 +
 16.1783 +/* Context: netif_tx_lock held, BHs disabled. */
 16.1784 +static void efx_set_multicast_list(struct net_device *net_dev)
 16.1785 +{
 16.1786 +	struct efx_nic *efx = net_dev->priv;
 16.1787 +	struct dev_mc_list *mc_list = net_dev->mc_list;
 16.1788 +	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
 16.1789 +	unsigned long flags __attribute__ ((unused));
 16.1790 +	int promiscuous;
 16.1791 +	u32 crc;
 16.1792 +	int bit;
 16.1793 +	int i;
 16.1794 +
 16.1795 +	/* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
 16.1796 +	promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
 16.1797 +	if (efx->promiscuous != promiscuous) {
 16.1798 +		if (efx->port_enabled) {
 16.1799 +			efx->promiscuous = promiscuous;
 16.1800 +			queue_work(efx->workqueue, &efx->reconfigure_work);
 16.1801 +		}
 16.1802 +	}
 16.1803 +
 16.1804 +	/* Build multicast hash table */
 16.1805 +	if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
 16.1806 +		memset(mc_hash, 0xff, sizeof(*mc_hash));
 16.1807 +	} else {
 16.1808 +		memset(mc_hash, 0x00, sizeof(*mc_hash));
 16.1809 +		for (i = 0; i < net_dev->mc_count; i++) {
 16.1810 +			crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
 16.1811 +			bit = (crc & ((1 << EFX_MCAST_HASH_BITS) - 1));
 16.1812 +			set_bit_le(bit, (void *)mc_hash);
 16.1813 +			mc_list = mc_list->next;
 16.1814 +		}
 16.1815 +	}
 16.1816 +
 16.1817 +	/* Create and activate new global multicast hash table */
 16.1818 +	falcon_set_multicast_hash(efx);
 16.1819 +}
 16.1820 +
 16.1821 +/* Handle net device notifier events */
 16.1822 +static int efx_netdev_event(struct notifier_block *this,
 16.1823 +			    unsigned long event, void *ptr)
 16.1824 +{
 16.1825 +	struct net_device *net_dev = (struct net_device *)ptr;
 16.1826 +
 16.1827 +	if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
 16.1828 +		struct efx_nic *efx = net_dev->priv;
 16.1829 +
 16.1830 +		strcpy(efx->name, net_dev->name);
 16.1831 +		efx_fini_debugfs_netdev(net_dev);
 16.1832 +		efx_init_debugfs_netdev(net_dev);
 16.1833 +	}
 16.1834 +
 16.1835 +	return NOTIFY_DONE;
 16.1836 +}
 16.1837 +
 16.1838 +static struct notifier_block efx_netdev_notifier = {
 16.1839 +	.notifier_call = efx_netdev_event,
 16.1840 +};
 16.1841 +
 16.1842 +static int efx_register_netdev(struct efx_nic *efx)
 16.1843 +{
 16.1844 +	struct net_device *net_dev = efx->net_dev;
 16.1845 +	int rc;
 16.1846 +
 16.1847 +	net_dev->watchdog_timeo = 5 * HZ;
 16.1848 +	net_dev->irq = efx->pci_dev->irq;
 16.1849 +	net_dev->open = efx_net_open;
 16.1850 +	net_dev->stop = efx_net_stop;
 16.1851 +	net_dev->get_stats = efx_net_stats;
 16.1852 +	net_dev->tx_timeout = &efx_watchdog;
 16.1853 +	net_dev->hard_start_xmit = efx_hard_start_xmit;
 16.1854 +	net_dev->do_ioctl = efx_ioctl;
 16.1855 +	net_dev->change_mtu = efx_change_mtu;
 16.1856 +	net_dev->set_mac_address = efx_set_mac_address;
 16.1857 +	net_dev->set_multicast_list = efx_set_multicast_list;
 16.1858 +#ifdef CONFIG_NET_POLL_CONTROLLER
 16.1859 +	net_dev->poll_controller = efx_netpoll;
 16.1860 +#endif
 16.1861 +	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
 16.1862 +	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
 16.1863 +
 16.1864 +	/* Always start with carrier off; PHY events will detect the link */
 16.1865 +	netif_carrier_off(efx->net_dev);
 16.1866 +
 16.1867 +	BUG_ON(efx->net_dev_registered);
 16.1868 +
 16.1869 +	/* Clear MAC statistics */
 16.1870 +	efx->mac_op->update_stats(efx);
 16.1871 +	memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
 16.1872 +
 16.1873 +	rc = register_netdev(net_dev);
 16.1874 +	if (rc) {
 16.1875 +		EFX_ERR(efx, "could not register net dev\n");
 16.1876 +		return rc;
 16.1877 +	}
 16.1878 +	strcpy(efx->name, net_dev->name);
 16.1879 +
 16.1880 +	/* Create debugfs symlinks */
 16.1881 +	rc = efx_init_debugfs_netdev(net_dev);
 16.1882 +	if (rc) {
 16.1883 +		EFX_ERR(efx, "failed to init net dev debugfs\n");
 16.1884 +		unregister_netdev(efx->net_dev);
 16.1885 +		return rc;
 16.1886 +	}
 16.1887 +
 16.1888 +	/* Allow link change notifications to be sent to the operating
 16.1889 +	 * system.  The must happen after register_netdev so that
 16.1890 +	 * there are no outstanding link changes if that call fails.
 16.1891 +	 * It must happen before efx_reconfigure_port so that the
 16.1892 +	 * initial state of the link is reported. */
 16.1893 +	mutex_lock(&efx->mac_lock);
 16.1894 +	efx->net_dev_registered = 1;
 16.1895 +	mutex_unlock(&efx->mac_lock);
 16.1896 +
 16.1897 +	/* Safety net: in case we don't get a PHY event */
 16.1898 +	rtnl_lock();
 16.1899 +	efx_reconfigure_port(efx, 1);
 16.1900 +	rtnl_unlock();
 16.1901 +
 16.1902 +	EFX_LOG(efx, "registered\n");
 16.1903 +
 16.1904 +	return 0;
 16.1905 +}
 16.1906 +
 16.1907 +static void efx_unregister_netdev(struct efx_nic *efx)
 16.1908 +{
 16.1909 +	int was_registered = efx->net_dev_registered;
 16.1910 +	struct efx_tx_queue *tx_queue;
 16.1911 +
 16.1912 +	if (!efx->net_dev)
 16.1913 +		return;
 16.1914 +
 16.1915 +	BUG_ON(efx->net_dev->priv != efx);
 16.1916 +
 16.1917 +	/* SFC Bug 5356: Ensure that no more link status notifications get
 16.1918 +	 * sent to the stack.  Bad things happen if there's an
 16.1919 +	 * outstanding notification after the net device is freed, but
 16.1920 +	 * they only get flushed out by unregister_netdev, not by
 16.1921 +	 * free_netdev. */
 16.1922 +	mutex_lock(&efx->mac_lock);
 16.1923 +	efx->net_dev_registered = 0;
 16.1924 +	mutex_unlock(&efx->mac_lock);
 16.1925 +
 16.1926 +	/* Free up any skbs still remaining. This has to happen before
 16.1927 +	 * we try to unregister the netdev as running their destructors
 16.1928 +	 * may be needed to get the device ref. count to 0. */
 16.1929 +	efx_for_each_tx_queue(tx_queue, efx)
 16.1930 +		efx_release_tx_buffers(tx_queue);
 16.1931 +
 16.1932 +	if (was_registered) {
 16.1933 +		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 16.1934 +		efx_fini_debugfs_netdev(efx->net_dev);
 16.1935 +		unregister_netdev(efx->net_dev);
 16.1936 +	}
 16.1937 +}
 16.1938 +
 16.1939 +/**************************************************************************
 16.1940 + *
 16.1941 + * Device reset and suspend
 16.1942 + *
 16.1943 + **************************************************************************/
 16.1944 +
 16.1945 +/* This suspends the device (and acquires the suspend lock) without
 16.1946 + * flushing the descriptor queues.  It is included for the convenience
 16.1947 + * of the driverlink layer.
 16.1948 + */
 16.1949 +void efx_suspend(struct efx_nic *efx)
 16.1950 +{
 16.1951 +	EFX_LOG(efx, "suspending operations\n");
 16.1952 +
 16.1953 +	down(&efx->suspend_lock);
 16.1954 +
 16.1955 +	rtnl_lock();
 16.1956 +	efx_stop_all(efx);
 16.1957 +}
 16.1958 +
 16.1959 +void efx_resume(struct efx_nic *efx)
 16.1960 +{
 16.1961 +	EFX_LOG(efx, "resuming operations\n");
 16.1962 +
 16.1963 +	efx_start_all(efx);
 16.1964 +	rtnl_unlock();
 16.1965 +
 16.1966 +	up(&efx->suspend_lock);
 16.1967 +}
 16.1968 +
 16.1969 +/* The final hardware and software finalisation before reset.
 16.1970 + * This function does not handle serialisation with the kernel, it
 16.1971 + * assumes the caller has done this */
 16.1972 +static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 16.1973 +{
 16.1974 +	int rc;
 16.1975 +
 16.1976 +	ASSERT_RTNL();
 16.1977 +
 16.1978 +	rc = efx->mac_op->get_settings(efx, ecmd);
 16.1979 +	if (rc) {
 16.1980 +		EFX_ERR(efx, "could not back up PHY settings\n");
 16.1981 +		goto fail;
 16.1982 +	}
 16.1983 +
 16.1984 +	efx_fini_channels(efx);
 16.1985 +	return 0;
 16.1986 +
 16.1987 + fail:
 16.1988 +	return rc;
 16.1989 +}
 16.1990 +
 16.1991 +/* The first part of software initialisation after a hardware reset
 16.1992 + * This function does not handle serialisation with the kernel, it
 16.1993 + * assumes the caller has done this */
 16.1994 +static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 16.1995 +{
 16.1996 +	int rc;
 16.1997 +
 16.1998 +	rc = efx_init_channels(efx);
 16.1999 +	if (rc)
 16.2000 +		goto fail1;
 16.2001 +
 16.2002 +	/* In an INVISIBLE_RESET there might not be a link state transition,
 16.2003 +	 * so we push the multicast list here. */
 16.2004 +	falcon_set_multicast_hash(efx);
 16.2005 +
 16.2006 +	/* Restore MAC and PHY settings. */
 16.2007 +	rc = efx->mac_op->set_settings(efx, ecmd);
 16.2008 +	if (rc) {
 16.2009 +		EFX_ERR(efx, "could not restore PHY settings\n");
 16.2010 +		goto fail2;
 16.2011 +	}
 16.2012 +
 16.2013 +	return 0;
 16.2014 +
 16.2015 + fail2:
 16.2016 +	efx_fini_channels(efx);
 16.2017 + fail1:
 16.2018 +	return rc;
 16.2019 +}
 16.2020 +
 16.2021 +/* Reset the NIC as transparently as possible. Do not reset the PHY
 16.2022 + * Note that the reset may fail, in which case the card will be left
 16.2023 + * in a most-probably-unusable state.
 16.2024 + *
 16.2025 + * This function will sleep.  You cannot reset from within an atomic
 16.2026 + * state; use efx_schedule_reset() instead.
 16.2027 + */
 16.2028 +static int efx_reset(struct efx_nic *efx)
 16.2029 +{
 16.2030 +	struct ethtool_cmd ecmd;
 16.2031 +	unsigned long flags __attribute__ ((unused));
 16.2032 +	enum reset_type method = efx->reset_pending;
 16.2033 +	int rc;
 16.2034 +
 16.2035 +	efx_dl_reset_lock();
 16.2036 +
 16.2037 +	rc = down_interruptible(&efx->suspend_lock);
 16.2038 +	if (rc) {
 16.2039 +		EFX_ERR(efx, "reset aborted by signal\n");
 16.2040 +		goto unlock_dl_lock;
 16.2041 +	}
 16.2042 +
 16.2043 +	/* We've got suspend_lock, which means we can only be in
 16.2044 +	 * STATE_RUNNING or STATE_FINI. Don't clear
 16.2045 +	 * efx->reset_pending, since this flag indicates that we
 16.2046 +	 * should retry device initialisation.
 16.2047 +	 */
 16.2048 +	if (efx->state != STATE_RUNNING) {
 16.2049 +		EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
 16.2050 +		goto unlock_suspend_lock;
 16.2051 +	}
 16.2052 +
 16.2053 +	/* Notify driverlink clients of imminent reset. */
 16.2054 +	efx_dl_reset_suspend(efx);
 16.2055 +	rtnl_lock();
 16.2056 +
 16.2057 +	efx->state = STATE_RESETTING;
 16.2058 +	EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
 16.2059 +
 16.2060 +	/* The net_dev->get_stats handler is quite slow, and will fail
 16.2061 +	 * if a fetch is pending over reset. Serialise against it. */
 16.2062 +	spin_lock(&efx->stats_lock);
 16.2063 +	spin_unlock(&efx->stats_lock);
 16.2064 +
 16.2065 +	efx_stop_all(efx);
 16.2066 +	mutex_lock(&efx->mac_lock);
 16.2067 +
 16.2068 +	rc = efx_reset_down(efx, &ecmd);
 16.2069 +	if (rc)
 16.2070 +		goto fail1;
 16.2071 +	falcon_fini_nic(efx);
 16.2072 +
 16.2073 +	rc = falcon_reset_hw(efx, method);
 16.2074 +	if (rc) {
 16.2075 +		EFX_ERR(efx, "failed to reset hardware\n");
 16.2076 +		goto fail2;
 16.2077 +	}
 16.2078 +
 16.2079 +	/* Allow resets to be rescheduled. */
 16.2080 +	efx->reset_pending = RESET_TYPE_NONE;
 16.2081 +
 16.2082 +	/* Reinitialise bus-mastering, which may have been turned off before
 16.2083 +	 * the reset was scheduled. This is still appropriate, even in the
 16.2084 +	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
 16.2085 +	 * can respond to requests. */
 16.2086 +	pci_set_master(efx->pci_dev);
 16.2087 +
 16.2088 +	/* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
 16.2089 +	 * case so the driver can talk to external SRAM */
 16.2090 +	rc = falcon_init_nic(efx);
 16.2091 +	if (rc) {
 16.2092 +		EFX_ERR(efx, "failed to initialise NIC\n");
 16.2093 +		goto fail3;
 16.2094 +	}
 16.2095 +
 16.2096 +	/* Leave device stopped if necessary */
 16.2097 +	if (method == RESET_TYPE_DISABLE) {
 16.2098 +		/* Reinitialise the device anyway so the driver unload sequence
 16.2099 +		 * can talk to the external SRAM */
 16.2100 +		(void) falcon_init_nic(efx);
 16.2101 +		rc = -EIO;
 16.2102 +		goto fail4;
 16.2103 +	}
 16.2104 +
 16.2105 +	rc = efx_reset_up(efx, &ecmd);
 16.2106 +	if (rc)
 16.2107 +		goto fail5;
 16.2108 +
 16.2109 +	mutex_unlock(&efx->mac_lock);
 16.2110 +	efx_reconfigure_port(efx, 1);
 16.2111 +	EFX_LOG(efx, "reset complete\n");
 16.2112 +
 16.2113 +	efx->state = STATE_RUNNING;
 16.2114 +	efx_start_all(efx);
 16.2115 +
 16.2116 +	rtnl_unlock();
 16.2117 +
 16.2118 +	goto notify;
 16.2119 +
 16.2120 + fail5:
 16.2121 + fail4:
 16.2122 + fail3:
 16.2123 + fail2:
 16.2124 + fail1:
 16.2125 +	EFX_ERR(efx, "has been disabled\n");
 16.2126 +	efx->state = STATE_DISABLED;
 16.2127 +
 16.2128 +	/* Remove the net_dev */
 16.2129 +	mutex_unlock(&efx->mac_lock);
 16.2130 +	rtnl_unlock();
 16.2131 +	efx_unregister_netdev(efx);
 16.2132 +	efx_fini_port(efx);
 16.2133 +
 16.2134 + notify:
 16.2135 +	/* Notify driverlink clients of completed reset */
 16.2136 +	efx_dl_reset_resume(efx, (rc == 0));
 16.2137 +
 16.2138 + unlock_suspend_lock:
 16.2139 +	up(&efx->suspend_lock);
 16.2140 +
 16.2141 + unlock_dl_lock:
 16.2142 +	efx_dl_reset_unlock();
 16.2143 +
 16.2144 +	return rc;
 16.2145 +}
 16.2146 +
 16.2147 +/* The worker thread exists so that code that cannot sleep can
 16.2148 + * schedule a reset for later.
 16.2149 + */
 16.2150 +static void efx_reset_work(struct work_struct *data)
 16.2151 +{
 16.2152 +	struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
 16.2153 +
 16.2154 +	efx_reset(nic);
 16.2155 +}
 16.2156 +
 16.2157 +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 16.2158 +{
 16.2159 +	enum reset_type method;
 16.2160 +
 16.2161 +	if (efx->reset_pending != RESET_TYPE_NONE) {
 16.2162 +		EFX_INFO(efx, "quenching already scheduled reset\n");
 16.2163 +		return;
 16.2164 +	}
 16.2165 +
 16.2166 +	switch (type) {
 16.2167 +	case RESET_TYPE_INVISIBLE:
 16.2168 +	case RESET_TYPE_ALL:
 16.2169 +	case RESET_TYPE_WORLD:
 16.2170 +	case RESET_TYPE_DISABLE:
 16.2171 +		method = type;
 16.2172 +		break;
 16.2173 +	case RESET_TYPE_RX_RECOVERY:
 16.2174 +	case RESET_TYPE_RX_DESC_FETCH:
 16.2175 +	case RESET_TYPE_TX_DESC_FETCH:
 16.2176 +		method = RESET_TYPE_INVISIBLE;
 16.2177 +		break;
 16.2178 +	default:
 16.2179 +		method = RESET_TYPE_ALL;
 16.2180 +		break;
 16.2181 +	}
 16.2182 +
 16.2183 +	if (method != type)
 16.2184 +		EFX_LOG(efx, "scheduling %s reset for %s\n",
 16.2185 +			RESET_TYPE(method), RESET_TYPE(type));
 16.2186 +	else
 16.2187 +		EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
 16.2188 +
 16.2189 +	efx->reset_pending = method;
 16.2190 +
 16.2191 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.2192 +	queue_work(efx->reset_workqueue, &efx->reset_work);
 16.2193 +#else
 16.2194 +	queue_work(efx->workqueue, &efx->reset_work);
 16.2195 +#endif
 16.2196 +}
 16.2197 +
 16.2198 +/**************************************************************************
 16.2199 + *
 16.2200 + * List of NICs we support
 16.2201 + *
 16.2202 + **************************************************************************/
 16.2203 +
 16.2204 +enum efx_type_index {
 16.2205 +	EFX_TYPE_FALCON_A = 0,
 16.2206 +	EFX_TYPE_FALCON_B = 1,
 16.2207 +};
 16.2208 +
 16.2209 +static struct efx_nic_type *efx_nic_types[] = {
 16.2210 +	[EFX_TYPE_FALCON_A] = &falcon_a_nic_type,
 16.2211 +	[EFX_TYPE_FALCON_B] = &falcon_b_nic_type,
 16.2212 +};
 16.2213 +
 16.2214 +
 16.2215 +/* PCI device ID table */
 16.2216 +static struct pci_device_id efx_pci_table[] __devinitdata = {
 16.2217 +	{EFX_VENDID_SFC, FALCON_A_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
 16.2218 +	 0, 0, EFX_TYPE_FALCON_A},
 16.2219 +	{EFX_VENDID_SFC, FALCON_B_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
 16.2220 +	 0, 0, EFX_TYPE_FALCON_B},
 16.2221 +	{0}			/* end of list */
 16.2222 +};
 16.2223 +
 16.2224 +/**************************************************************************
 16.2225 + *
 16.2226 + * Dummy PHY/MAC/Board operations
 16.2227 + *
 16.2228 + * Can be used where the MAC does not implement this operation
 16.2229 + * Needed so all function pointers are valid and do not have to be tested
 16.2230 + * before use
 16.2231 + *
 16.2232 + **************************************************************************/
 16.2233 +int efx_port_dummy_op_int(struct efx_nic *efx)
 16.2234 +{
 16.2235 +	return 0;
 16.2236 +}
 16.2237 +void efx_port_dummy_op_void(struct efx_nic *efx) {}
 16.2238 +void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
 16.2239 +
 16.2240 +static struct efx_mac_operations efx_dummy_mac_operations = {
 16.2241 +	.init		= efx_port_dummy_op_int,
 16.2242 +	.reconfigure	= efx_port_dummy_op_void,
 16.2243 +	.fini		= efx_port_dummy_op_void,
 16.2244 +};
 16.2245 +
 16.2246 +static struct efx_phy_operations efx_dummy_phy_operations = {
 16.2247 +	.init		 = efx_port_dummy_op_int,
 16.2248 +	.reconfigure	 = efx_port_dummy_op_void,
 16.2249 +	.check_hw        = efx_port_dummy_op_int,
 16.2250 +	.fini		 = efx_port_dummy_op_void,
 16.2251 +	.clear_interrupt = efx_port_dummy_op_void,
 16.2252 +	.reset_xaui      = efx_port_dummy_op_void,
 16.2253 +};
 16.2254 +
 16.2255 +/* Dummy board operations */
 16.2256 +static int efx_nic_dummy_op_int(struct efx_nic *nic)
 16.2257 +{
 16.2258 +	return 0;
 16.2259 +}
 16.2260 +
 16.2261 +static void efx_nic_dummy_op_void(struct efx_nic *nic) {}
 16.2262 +
 16.2263 +static struct efx_board efx_dummy_board_info = {
 16.2264 +	.init    = efx_nic_dummy_op_int,
 16.2265 +	.init_leds = efx_port_dummy_op_int,
 16.2266 +	.set_fault_led = efx_port_dummy_op_blink,
 16.2267 +	.monitor = efx_nic_dummy_op_int,
 16.2268 +	.blink = efx_port_dummy_op_blink,
 16.2269 +	.fini    = efx_nic_dummy_op_void,
 16.2270 +};
 16.2271 +
 16.2272 +/**************************************************************************
 16.2273 + *
 16.2274 + * Data housekeeping
 16.2275 + *
 16.2276 + **************************************************************************/
 16.2277 +
 16.2278 +/* This zeroes out and then fills in the invariants in a struct
 16.2279 + * efx_nic (including all sub-structures).
 16.2280 + */
 16.2281 +static int efx_init_struct(struct efx_nic *efx, enum efx_type_index type,
 16.2282 +			   struct pci_dev *pci_dev)
 16.2283 +{
 16.2284 +	struct efx_channel *channel;
 16.2285 +	struct efx_tx_queue *tx_queue;
 16.2286 +	struct efx_rx_queue *rx_queue;
 16.2287 +	int i, rc;
 16.2288 +
 16.2289 +	/* Initialise common structures */
 16.2290 +	memset(efx, 0, sizeof(*efx));
 16.2291 +	spin_lock_init(&efx->biu_lock);
 16.2292 +	spin_lock_init(&efx->phy_lock);
 16.2293 +	mutex_init(&efx->spi_lock);
 16.2294 +	sema_init(&efx->suspend_lock, 1);
 16.2295 +	INIT_WORK(&efx->reset_work, efx_reset_work);
 16.2296 +	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
 16.2297 +	efx->pci_dev = pci_dev;
 16.2298 +	efx->state = STATE_INIT;
 16.2299 +	efx->reset_pending = RESET_TYPE_NONE;
 16.2300 +	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 16.2301 +	efx->board_info = efx_dummy_board_info;
 16.2302 +
 16.2303 +	efx->rx_checksum_enabled = 1;
 16.2304 +	spin_lock_init(&efx->netif_stop_lock);
 16.2305 +	spin_lock_init(&efx->stats_lock);
 16.2306 +	mutex_init(&efx->mac_lock);
 16.2307 +	efx->mac_op = &efx_dummy_mac_operations;
 16.2308 +	efx->phy_op = &efx_dummy_phy_operations;
 16.2309 +	INIT_LIST_HEAD(&efx->dl_node);
 16.2310 +	INIT_LIST_HEAD(&efx->dl_device_list);
 16.2311 +	efx->dl_cb = efx_default_callbacks;
 16.2312 +	INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
 16.2313 +	atomic_set(&efx->netif_stop_count, 1);
 16.2314 +
 16.2315 +	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
 16.2316 +		channel = &efx->channel[i];
 16.2317 +		channel->efx = efx;
 16.2318 +		channel->channel = i;
 16.2319 +		channel->evqnum = i;
 16.2320 +		channel->work_pending = 0;
 16.2321 +	}
 16.2322 +	for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
 16.2323 +		tx_queue = &efx->tx_queue[i];
 16.2324 +		tx_queue->efx = efx;
 16.2325 +		tx_queue->queue = i;
 16.2326 +		tx_queue->buffer = NULL;
 16.2327 +		tx_queue->channel = &efx->channel[0]; /* for safety */
 16.2328 +	}
 16.2329 +	for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
 16.2330 +		rx_queue = &efx->rx_queue[i];
 16.2331 +		rx_queue->efx = efx;
 16.2332 +		rx_queue->queue = i;
 16.2333 +		rx_queue->channel = &efx->channel[0]; /* for safety */
 16.2334 +		rx_queue->buffer = NULL;
 16.2335 +		spin_lock_init(&rx_queue->add_lock);
 16.2336 +		INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
 16.2337 +	}
 16.2338 +
 16.2339 +	efx->type = efx_nic_types[type];
 16.2340 +
 16.2341 +	/* Sanity-check NIC type */
 16.2342 +	EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
 16.2343 +			    (efx->type->txd_ring_mask + 1));
 16.2344 +	EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
 16.2345 +			    (efx->type->rxd_ring_mask + 1));
 16.2346 +	EFX_BUG_ON_PARANOID(efx->type->evq_size &
 16.2347 +			    (efx->type->evq_size - 1));
 16.2348 +	/* As close as we can get to guaranteeing that we don't overflow */
 16.2349 +	EFX_BUG_ON_PARANOID(efx->type->evq_size <
 16.2350 +			    (efx->type->txd_ring_mask + 1 +
 16.2351 +			     efx->type->rxd_ring_mask + 1));
 16.2352 +
 16.2353 +	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 16.2354 +
 16.2355 +	/* Higher numbered interrupt modes are less capable! */
 16.2356 +	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
 16.2357 +				  interrupt_mode);
 16.2358 +#if defined(EFX_NEED_DUMMY_MSIX)
 16.2359 +	if (efx->interrupt_mode == EFX_INT_MODE_MSIX)
 16.2360 +		efx->interrupt_mode = EFX_INT_MODE_MSI;
 16.2361 +#endif
 16.2362 +
 16.2363 +	/* Tasks that can fail are last */
 16.2364 +	efx->refill_workqueue = create_workqueue("sfc_refill");
 16.2365 +	if (!efx->refill_workqueue) {
 16.2366 +		rc = -ENOMEM;
 16.2367 +		goto fail1;
 16.2368 +	}
 16.2369 +
 16.2370 +	efx->workqueue = create_singlethread_workqueue("sfc_work");
 16.2371 +	if (!efx->workqueue) {
 16.2372 +		rc = -ENOMEM;
 16.2373 +		goto fail2;
 16.2374 +	}
 16.2375 +
 16.2376 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.2377 +	efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
 16.2378 +	if (!efx->reset_workqueue) {
 16.2379 +		rc = -ENOMEM;
 16.2380 +		goto fail3;
 16.2381 +	}
 16.2382 +#endif
 16.2383 +
 16.2384 +	return 0;
 16.2385 +
 16.2386 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.2387 + fail3:
 16.2388 +	destroy_workqueue(efx->workqueue);
 16.2389 +	efx->workqueue = NULL;
 16.2390 +#endif
 16.2391 +
 16.2392 + fail2:
 16.2393 +	destroy_workqueue(efx->refill_workqueue);
 16.2394 +	efx->refill_workqueue = NULL;
 16.2395 + fail1:
 16.2396 +	return rc;
 16.2397 +}
 16.2398 +
 16.2399 +static void efx_fini_struct(struct efx_nic *efx)
 16.2400 +{
 16.2401 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.2402 +	if (efx->reset_workqueue) {
 16.2403 +		destroy_workqueue(efx->reset_workqueue);
 16.2404 +		efx->reset_workqueue = NULL;
 16.2405 +	}
 16.2406 +#endif
 16.2407 +	if (efx->workqueue) {
 16.2408 +		destroy_workqueue(efx->workqueue);
 16.2409 +		efx->workqueue = NULL;
 16.2410 +	}
 16.2411 +	if (efx->refill_workqueue) {
 16.2412 +		destroy_workqueue(efx->refill_workqueue);
 16.2413 +		efx->refill_workqueue = NULL;
 16.2414 +	}
 16.2415 +}
 16.2416 +
 16.2417 +/**************************************************************************
 16.2418 + *
 16.2419 + * PCI interface
 16.2420 + *
 16.2421 + **************************************************************************/
 16.2422 +
 16.2423 +/* Main body of final NIC shutdown code
 16.2424 + * This is called only at module unload (or hotplug removal).
 16.2425 + */
 16.2426 +static void efx_pci_remove_main(struct efx_nic *efx)
 16.2427 +{
 16.2428 +	ASSERT_RTNL();
 16.2429 +
 16.2430 +	/* Skip everything if we never obtained a valid membase */
 16.2431 +	if (!efx->membase)
 16.2432 +		return;
 16.2433 +
 16.2434 +	efx_fini_channels(efx);
 16.2435 +	efx_fini_port(efx);
 16.2436 +
 16.2437 +	/* Shutdown the board, then the NIC and board state */
 16.2438 +	efx->board_info.fini(efx);
 16.2439 +	falcon_fini_nic(efx);
 16.2440 +	falcon_fini_interrupt(efx);
 16.2441 +	efx->board_info.fini(efx);
 16.2442 +
 16.2443 +	/* Tear down NAPI and LRO */
 16.2444 +	efx_fini_napi(efx);
 16.2445 +	efx_remove_all(efx);
 16.2446 +}
 16.2447 +
 16.2448 +/* Final NIC shutdown
 16.2449 + * This is called only at module unload (or hotplug removal).
 16.2450 + */
 16.2451 +static void efx_pci_remove(struct pci_dev *pci_dev)
 16.2452 +{
 16.2453 +	struct efx_nic *efx;
 16.2454 +
 16.2455 +	efx = pci_get_drvdata(pci_dev);
 16.2456 +	if (!efx)
 16.2457 +		return;
 16.2458 +
 16.2459 +	/* Unregister driver from driverlink layer */
 16.2460 +	efx_dl_unregister_nic(efx);
 16.2461 +
 16.2462 +	/* Mark the NIC as fini under both suspend_lock and
 16.2463 +	 * rtnl_lock */
 16.2464 +	down(&efx->suspend_lock);
 16.2465 +	rtnl_lock();
 16.2466 +	efx->state = STATE_FINI;
 16.2467 +	up(&efx->suspend_lock);
 16.2468 +
 16.2469 +	if (efx->membase) {
 16.2470 +		/* Stop the NIC. Since we're in STATE_FINI, this
 16.2471 +		 * won't be reversed. */
 16.2472 +		if (efx->net_dev_registered)
 16.2473 +			dev_close(efx->net_dev);
 16.2474 +
 16.2475 +		/* Release the rtnl lock. Any queued efx_resets()
 16.2476 +		 * can now return early [we're in STATE_FINI]. */
 16.2477 +		rtnl_unlock();
 16.2478 +
 16.2479 +		efx_unregister_netdev(efx);
 16.2480 +		efx_fini_debugfs_channels(efx);
 16.2481 +
 16.2482 +		/* Wait for any scheduled resets to complete. No more will be
 16.2483 +		 * scheduled from this point because efx_stop_all() has been
 16.2484 +		 * called, we are no longer registered with driverlink, and
 16.2485 +		 * the net_device's have been removed. */
 16.2486 +#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
 16.2487 +		flush_workqueue(efx->reset_workqueue);
 16.2488 +#else
 16.2489 +		flush_workqueue(efx->workqueue);
 16.2490 +#endif
 16.2491 +
 16.2492 +		/* Fini and remove all the software state */
 16.2493 +		rtnl_lock();
 16.2494 +		efx_pci_remove_main(efx);
 16.2495 +	}
 16.2496 +
 16.2497 +	rtnl_unlock();
 16.2498 +
 16.2499 +	efx_fini_io(efx);
 16.2500 +	EFX_LOG(efx, "shutdown successful\n");
 16.2501 +
 16.2502 +	pci_set_drvdata(pci_dev, NULL);
 16.2503 +	efx_fini_struct(efx);
 16.2504 +	kfree(efx);
 16.2505 +};
 16.2506 +
 16.2507 +/* Main body of NIC initialisation
 16.2508 + * This is called at module load (or hotplug insertion, theoretically).
 16.2509 + */
 16.2510 +static int efx_pci_probe_main(struct efx_nic *efx)
 16.2511 +{
 16.2512 +	int rc;
 16.2513 +
 16.2514 +	/* Do start-of-day initialisation */
 16.2515 +	rc = efx_probe_all(efx);
 16.2516 +	if (rc)
 16.2517 +		goto fail1;
 16.2518 +
 16.2519 +	/* Initialise port/channel net_dev's  */
 16.2520 +	rc = efx_init_napi(efx);
 16.2521 +	if (rc)
 16.2522 +		goto fail2;
 16.2523 +
 16.2524 +	/* Initialise the board */
 16.2525 +	rc = efx->board_info.init(efx);
 16.2526 +	if (rc) {
 16.2527 +		EFX_ERR(efx, "failed to initialise board\n");
 16.2528 +		goto fail3;
 16.2529 +	}
 16.2530 +
 16.2531 +	/* Initialise device */
 16.2532 +	rc = falcon_init_nic(efx);
 16.2533 +	if (rc) {
 16.2534 +		EFX_ERR(efx, "failed to initialise NIC\n");
 16.2535 +		goto fail4;
 16.2536 +	}
 16.2537 +
 16.2538 +	/* Initialise port */
 16.2539 +	rc = efx_init_port(efx);
 16.2540 +	if (rc) {
 16.2541 +		EFX_ERR(efx, "failed to initialise port\n");
 16.2542 +		goto fail5;
 16.2543 +	}
 16.2544 +
 16.2545 +	/* Initialise channels */
 16.2546 +	rc = efx_init_channels(efx);
 16.2547 +	if (rc)
 16.2548 +		goto fail6;
 16.2549 +
 16.2550 +	rc = falcon_init_interrupt(efx);
 16.2551 +	if (rc)
 16.2552 +		goto fail7;
 16.2553 +
 16.2554 +	/* Start up device - interrupts can occur from this point */
 16.2555 +	efx_start_all(efx);
 16.2556 +
 16.2557 +	/* Check basic functionality and set interrupt mode */
 16.2558 +	rc = efx_run_selftests(efx);
 16.2559 +	if (rc)
 16.2560 +		goto fail8;
 16.2561 +
 16.2562 +	/* Stop the NIC */
 16.2563 +	efx_stop_all(efx);
 16.2564 +
 16.2565 +	return 0;
 16.2566 +
 16.2567 + fail8:
 16.2568 +	efx_stop_all(efx);
 16.2569 +	falcon_fini_interrupt(efx);
 16.2570 + fail7:
 16.2571 +	efx_fini_channels(efx);
 16.2572 + fail6:
 16.2573 +	efx_fini_port(efx);
 16.2574 + fail5:
 16.2575 +	falcon_fini_nic(efx);
 16.2576 + fail4:
 16.2577 +	efx->board_info.fini(efx);
 16.2578 + fail3:
 16.2579 +	efx_fini_napi(efx);
 16.2580 + fail2:
 16.2581 +	efx_remove_all(efx);
 16.2582 + fail1:
 16.2583 +	return rc;
 16.2584 +}
 16.2585 +
 16.2586 +/* NIC initialisation
 16.2587 + *
 16.2588 + * This is called at module load (or hotplug insertion,
 16.2589 + * theoretically).  It sets up PCI mappings, tests and resets the NIC,
 16.2590 + * sets up and registers the network devices with the kernel and hooks
 16.2591 + * the interrupt service routine.  It does not prepare the device for
 16.2592 + * transmission; this is left to the first time one of the network
 16.2593 + * interfaces is brought up (i.e. efx_net_open).
 16.2594 + */
 16.2595 +static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 16.2596 +				   const struct pci_device_id *entry)
 16.2597 +{
 16.2598 +	struct efx_nic *efx;
 16.2599 +	enum efx_type_index type = entry->driver_data;
 16.2600 +	int i, rc;
 16.2601 +
 16.2602 +	/* Allocate and initialise a struct efx_nic */
 16.2603 +	efx = kmalloc(sizeof(*efx), GFP_KERNEL);
 16.2604 +	if (!efx) {
 16.2605 +		rc = -ENOMEM;
 16.2606 +		goto fail1;
 16.2607 +	}
 16.2608 +	pci_set_drvdata(pci_dev, efx);
 16.2609 +	rc = efx_init_struct(efx, type, pci_dev);
 16.2610 +	if (rc)
 16.2611 +		goto fail2;
 16.2612 +
 16.2613 +	EFX_INFO(efx, "Solarflare Communications NIC detected\n");
 16.2614 +
 16.2615 +	/* Set up basic I/O (BAR mappings etc) */
 16.2616 +	rc = efx_init_io(efx);
 16.2617 +	if (rc)
 16.2618 +		goto fail3;
 16.2619 +
 16.2620 +	/* From this point on we begin to expose the driver to the OS
 16.2621 +	 * to varying degrees, so lets grab the suspend_lock and
 16.2622 +	 * rtnl_lock to serialise against efx_reset() and
 16.2623 +	 * friends. efx->state is not STATE_RUNNING yet, but we don't
 16.2624 +	 * want these tasks to fail, just to block until we drop the
 16.2625 +	 * lock
 16.2626 +	 */
 16.2627 +	rc = down_interruptible(&efx->suspend_lock);
 16.2628 +	if (rc) {
 16.2629 +		EFX_ERR(efx, "suspend interrupted - aborting\n");
 16.2630 +		goto fail4;
 16.2631 +	}
 16.2632 +
 16.2633 +	rtnl_lock();
 16.2634 +
 16.2635 +	/* Probe, initialise and start everything. Run self-test */
 16.2636 +	for (i = 0; i < 5; i++) {
 16.2637 +		rc = efx_pci_probe_main(efx);
 16.2638 +		if (rc == 0)
 16.2639 +			break;
 16.2640 +
 16.2641 +		/* Retry if a recoverably reset event has been scheduled */
 16.2642 +		if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
 16.2643 +		    (efx->reset_pending != RESET_TYPE_ALL))
 16.2644 +			goto fail5;
 16.2645 +
 16.2646 +		/* Serialise against efx_reset(). No more resets will be
 16.2647 +		 * scheduled since efx_stop_all() has been called, and we
 16.2648 +		 * have not and never have been registered with either
 16.2649 +		 * the rtnetlink or driverlink layers. */
 16.2650 +		rtnl_unlock();
 16.2651 +		up(&efx->suspend_lock);
 16.2652 +
 16.2653 +#if defined(EFX_USE_CANCEL_WORK_SYNC)
 16.2654 +		cancel_work_sync(&efx->reset_work);
 16.2655 +#else
 16.2656 +		flush_workqueue(efx->reset_workqueue);
 16.2657 +#endif
 16.2658 +
 16.2659 +		down(&efx->suspend_lock);
 16.2660 +		rtnl_lock();
 16.2661 +
 16.2662 +		efx->reset_pending = RESET_TYPE_NONE;
 16.2663 +	};
 16.2664 +	if (rc) {
 16.2665 +		EFX_ERR(efx, "Could not reset NIC\n");
 16.2666 +		goto fail5;
 16.2667 +	}
 16.2668 +
 16.2669 +	/* Self-tests have all passed */
 16.2670 +	rc = efx_init_debugfs_channels(efx);
 16.2671 +	if (rc)
 16.2672 +		goto fail6;
 16.2673 +
 16.2674 +	/* Switch to the running state before we expose the device to
 16.2675 +	 * the OS.  This is to ensure that the initial gathering of
 16.2676 +	 * MAC stats succeeds. */
 16.2677 +	efx->state = STATE_RUNNING;
 16.2678 +
 16.2679 +	rtnl_unlock();
 16.2680 +
 16.2681 +	rc = efx_register_netdev(efx);
 16.2682 +	if (rc)
 16.2683 +		goto fail7;
 16.2684 +
 16.2685 +	up(&efx->suspend_lock);
 16.2686 +
 16.2687 +	EFX_LOG(efx, "initialisation successful\n");
 16.2688 +
 16.2689 +	/* Register with driverlink layer */
 16.2690 +	rc = efx_dl_register_nic(efx);
 16.2691 +	if (rc)
 16.2692 +		goto fail8;
 16.2693 +
 16.2694 +	return 0;
 16.2695 +
 16.2696 + fail8:
 16.2697 +	down(&efx->suspend_lock);
 16.2698 +	efx_unregister_netdev(efx);
 16.2699 + fail7:
 16.2700 +	/* Re-acquire the rtnl lock around pci_remove_main() */
 16.2701 +	rtnl_lock();
 16.2702 +	efx_fini_debugfs_channels(efx);
 16.2703 + fail6:
 16.2704 +	efx_pci_remove_main(efx);
 16.2705 + fail5:
 16.2706 +	/* Drop the locks before fini */
 16.2707 +	rtnl_unlock();
 16.2708 +	up(&efx->suspend_lock);
 16.2709 + fail4:
 16.2710 +	efx_fini_io(efx);
 16.2711 + fail3:
 16.2712 +	efx_fini_struct(efx);
 16.2713 + fail2:
 16.2714 +	kfree(efx);
 16.2715 + fail1:
 16.2716 +	EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
 16.2717 +	return rc;
 16.2718 +}
 16.2719 +
 16.2720 +/* PCI driver definition */
 16.2721 +static struct pci_driver efx_pci_driver = {
 16.2722 +	.name		= EFX_DRIVER_NAME,
 16.2723 +	.id_table	= efx_pci_table,
 16.2724 +	.probe		= efx_pci_probe,
 16.2725 +	.remove		= efx_pci_remove,
 16.2726 +};
 16.2727 +
 16.2728 +/**************************************************************************
 16.2729 + *
 16.2730 + * Kernel module interface
 16.2731 + *
 16.2732 + *************************************************************************/
 16.2733 +
 16.2734 +module_param(interrupt_mode, uint, 0444);
 16.2735 +MODULE_PARM_DESC(interrupt_mode,
 16.2736 +		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
 16.2737 +
 16.2738 +module_param(onload_offline_selftest, uint, 0444);
 16.2739 +MODULE_PARM_DESC(onload_offline_selftest, "Perform offline selftest on load");
 16.2740 +
 16.2741 +static int __init efx_init_module(void)
 16.2742 +{
 16.2743 +	int rc;
 16.2744 +
 16.2745 +	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
 16.2746 +
 16.2747 +	rc = efx_init_debugfs();
 16.2748 +	if (rc)
 16.2749 +		goto err_debugfs;
 16.2750 +
 16.2751 +	rc = register_netdevice_notifier(&efx_netdev_notifier);
 16.2752 +	if (rc)
 16.2753 +		goto err_notifier;
 16.2754 +
 16.2755 +	rc = pci_register_driver(&efx_pci_driver);
 16.2756 +	if (rc < 0)
 16.2757 +		goto err_pci;
 16.2758 +
 16.2759 +	return 0;
 16.2760 +
 16.2761 + err_pci:
 16.2762 +	unregister_netdevice_notifier(&efx_netdev_notifier);
 16.2763 + err_notifier:
 16.2764 +	efx_fini_debugfs();
 16.2765 + err_debugfs:
 16.2766 +	return rc;
 16.2767 +}
 16.2768 +
 16.2769 +static void __exit efx_exit_module(void)
 16.2770 +{
 16.2771 +	printk(KERN_INFO "Solarflare NET driver unloading\n");
 16.2772 +
 16.2773 +	pci_unregister_driver(&efx_pci_driver);
 16.2774 +	unregister_netdevice_notifier(&efx_netdev_notifier);
 16.2775 +	efx_fini_debugfs();
 16.2776 +
 16.2777 +}
 16.2778 +
 16.2779 +module_init(efx_init_module);
 16.2780 +module_exit(efx_exit_module);
 16.2781 +
 16.2782 +MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
 16.2783 +	      "Solarflare Communications");
 16.2784 +MODULE_DESCRIPTION("Solarflare Communications network driver");
 16.2785 +MODULE_LICENSE("GPL");
 16.2786 +MODULE_DEVICE_TABLE(pci, efx_pci_table);
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/drivers/net/sfc/efx.h	Mon Feb 18 10:29:07 2008 +0000
    17.3 @@ -0,0 +1,103 @@
    17.4 +/****************************************************************************
    17.5 + * Driver for Solarflare network controllers
    17.6 + *           (including support for SFE4001 10GBT NIC)
    17.7 + *
    17.8 + * Copyright 2005-2006: Fen Systems Ltd.
    17.9 + * Copyright 2006-2008: Solarflare Communications Inc,
   17.10 + *                      9501 Jeronimo Road, Suite 250,
   17.11 + *                      Irvine, CA 92618, USA
   17.12 + *
   17.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   17.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   17.15 + *
   17.16 + * This program is free software; you can redistribute it and/or modify it
   17.17 + * under the terms of the GNU General Public License version 2 as published
   17.18 + * by the Free Software Foundation, incorporated herein by reference.
   17.19 + *
   17.20 + * This program is distributed in the hope that it will be useful,
   17.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   17.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   17.23 + * GNU General Public License for more details.
   17.24 + *
   17.25 + * You should have received a copy of the GNU General Public License
   17.26 + * along with this program; if not, write to the Free Software
   17.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   17.28 + ****************************************************************************
   17.29 + */
   17.30 +
   17.31 +#ifndef EFX_EFX_H
   17.32 +#define EFX_EFX_H
   17.33 +
   17.34 +#include "net_driver.h"
   17.35 +
   17.36 +/* PCI IDs */
   17.37 +#define EFX_VENDID_SFC	        0x1924
   17.38 +#define FALCON_A_P_DEVID	0x0703
   17.39 +#define FALCON_A_S_DEVID        0x6703
   17.40 +#define FALCON_B_P_DEVID        0x0710
   17.41 +
   17.42 +/* TX */
   17.43 +extern int efx_xmit(struct efx_nic *efx,
   17.44 +		    struct efx_tx_queue *tx_queue, struct sk_buff *skb);
   17.45 +extern void efx_stop_queue(struct efx_nic *efx);
   17.46 +extern void efx_wake_queue(struct efx_nic *efx);
   17.47 +
   17.48 +/* RX */
   17.49 +#if defined(EFX_USE_FASTCALL)
   17.50 +extern void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue,
   17.51 +				   unsigned int index);
   17.52 +#else
   17.53 +extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
   17.54 +#endif
   17.55 +#if defined(EFX_USE_FASTCALL)
   17.56 +extern void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
   17.57 +				   unsigned int index, unsigned int len,
   17.58 +				   int checksummed, int discard);
   17.59 +#else
   17.60 +extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
   17.61 +			  unsigned int len, int checksummed, int discard);
   17.62 +#endif
   17.63 +extern void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
   17.64 +			       struct efx_rx_buffer *rx_buf);
   17.65 +
   17.66 +/* Channels */
   17.67 +extern void efx_process_channel_now(struct efx_channel *channel);
   17.68 +extern int efx_flush_queues(struct efx_nic *efx);
   17.69 +
   17.70 +/* Ports */
   17.71 +extern void efx_reconfigure_port(struct efx_nic *efx,
   17.72 +				 int on_disabled);
   17.73 +
   17.74 +/* Global */
   17.75 +extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
   17.76 +extern void efx_suspend(struct efx_nic *efx);
   17.77 +extern void efx_resume(struct efx_nic *efx);
   17.78 +extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
   17.79 +				    int rx_usecs);
   17.80 +extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
   17.81 +extern void efx_hex_dump(const u8 *, unsigned int, const char *);
   17.82 +
   17.83 +/* Dummy PHY ops for PHY drivers */
   17.84 +extern int efx_port_dummy_op_int(struct efx_nic *efx);
   17.85 +extern void efx_port_dummy_op_void(struct efx_nic *efx);
   17.86 +extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
   17.87 +
   17.88 +
   17.89 +extern unsigned int efx_monitor_interval;
   17.90 +
   17.91 +static inline void efx_schedule_channel(struct efx_channel *channel)
   17.92 +{
   17.93 +	EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
   17.94 +		  channel->channel, raw_smp_processor_id());
   17.95 +	channel->work_pending = 1;
   17.96 +
   17.97 +#if defined(EFX_HAVE_OLD_NAPI)
   17.98 +	if (!test_and_set_bit(__LINK_STATE_RX_SCHED, &channel->napi_dev->state))
   17.99 +		__netif_rx_schedule(channel->napi_dev);
  17.100 +#else
  17.101 +	netif_rx_schedule(channel->napi_dev, &channel->napi_str);
  17.102 +#endif
  17.103 +}
  17.104 +
  17.105 +
  17.106 +#endif /* EFX_EFX_H */
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/drivers/net/sfc/enum.h	Mon Feb 18 10:29:07 2008 +0000
    18.3 @@ -0,0 +1,117 @@
    18.4 +/****************************************************************************
    18.5 + * Driver for Solarflare network controllers
    18.6 + *           (including support for SFE4001 10GBT NIC)
    18.7 + *
    18.8 + * Copyright 2007:      Solarflare Communications Inc,
    18.9 + *                      9501 Jeronimo Road, Suite 250,
   18.10 + *                      Irvine, CA 92618, USA
   18.11 + *
   18.12 + * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
   18.13 + *
   18.14 + * This program is free software; you can redistribute it and/or modify it
   18.15 + * under the terms of the GNU General Public License version 2 as published
   18.16 + * by the Free Software Foundation, incorporated herein by reference.
   18.17 + *
   18.18 + * This program is distributed in the hope that it will be useful,
   18.19 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   18.20 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   18.21 + * GNU General Public License for more details.
   18.22 + *
   18.23 + * You should have received a copy of the GNU General Public License
   18.24 + * along with this program; if not, write to the Free Software
   18.25 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   18.26 + ****************************************************************************
   18.27 + */
   18.28 +
   18.29 +#ifndef EFX_ENUM_H
   18.30 +#define EFX_ENUM_H
   18.31 +
   18.32 +/**
   18.33 + * enum efx_loopback_mode - loopback modes
   18.34 + * @LOOPBACK_NONE: no loopback
   18.35 + * @LOOPBACK_NEAR: loopback nearest to bus
   18.36 + * @LOOPBACK_MAC: loopback within MAC unspecified level
   18.37 + * @LOOPBACK_XGMII: loopback within MAC at XGMII level
   18.38 + * @LOOPBACK_XGXS: loopback within MAC at XGXS level
   18.39 + * @LOOPBACK_XAUI: loopback within MAC at XAUI level
   18.40 + * @LOOPBACK_PHY: loopback within PHY unspecified level
   18.41 + * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
   18.42 + * @LOOPBACK_PCS: loopback within PHY at PCS level
   18.43 + * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
   18.44 + * @LOOPBACK_FAR: loopback furthest from bus
   18.45 + * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
   18.46 + */
   18.47 +/* Please keep in order and up-to-date w.r.t the following two #defines */
   18.48 +enum efx_loopback_mode {
   18.49 +	LOOPBACK_NONE = 0,
   18.50 +	LOOPBACK_NEAR = 1,
   18.51 +	LOOPBACK_MAC = 2,
   18.52 +	LOOPBACK_XGMII = 3,
   18.53 +	LOOPBACK_XGXS = 4,
   18.54 +	LOOPBACK_XAUI = 5,
   18.55 +	LOOPBACK_PHY = 6,
   18.56 +	LOOPBACK_PHYXS = 7,
   18.57 +	LOOPBACK_PCS = 8,
   18.58 +	LOOPBACK_PMAPMD = 9,
   18.59 +	LOOPBACK_FAR = 10,
   18.60 +	LOOPBACK_NETWORK = 11,
   18.61 +	LOOPBACK_MAX
   18.62 +};
   18.63 +#define LOOPBACK_TEST_MAX LOOPBACK_FAR
   18.64 +
   18.65 +/* These loopbacks occur within the controller */
   18.66 +#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
   18.67 +				(1 << LOOPBACK_XGXS) | \
   18.68 +				(1 << LOOPBACK_XAUI))
   18.69 +
   18.70 +#define LOOPBACKS_1G_INTERNAL (1 << LOOPBACK_MAC)
   18.71 +
   18.72 +#define LOOPBACK_MASK(_efx)			\
   18.73 +	(1 << (_efx)->loopback_mode)
   18.74 +
   18.75 +#define LOOPBACK_INTERNAL(_efx)					\
   18.76 +	(((LOOPBACKS_10G_INTERNAL | LOOPBACKS_1G_INTERNAL) &	\
   18.77 +	  LOOPBACK_MASK(_efx)) ? 1 : 0)
   18.78 +
   18.79 +#define LOOPBACK_CHANGED(_from, _to, _mask)		\
   18.80 +	((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) &	\
   18.81 +	 (_mask) ? 1 : 0)
   18.82 +
   18.83 +#define LOOPBACK_OUT_OF(_from, _to, _mask)		\
   18.84 +	(((LOOPBACK_MASK(_from) & (_mask)) &&		\
   18.85 +	  ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
   18.86 +
   18.87 +/*****************************************************************************/
   18.88 +
   18.89 +/**
   18.90 + * enum reset_type - reset types
   18.91 + *
   18.92 + * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
   18.93 + * %RESET_TYPE_DISABLE specify the method/scope of the reset.  The
   18.94 + * other valuesspecify reasons, which efx_schedule_reset() will choose
   18.95 + * a method for.
   18.96 + *
   18.97 + * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
   18.98 + * @RESET_TYPE_ALL: reset everything but PCI core blocks
   18.99 + * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
  18.100 + * @RESET_TYPE_DISABLE: disable NIC
  18.101 + * @RESET_TYPE_MONITOR: reset due to hardware monitor
  18.102 + * @RESET_TYPE_INT_ERROR: reset due to internal error
  18.103 + * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
  18.104 + */
  18.105 +enum reset_type {
  18.106 +	RESET_TYPE_NONE = -1,
  18.107 +	RESET_TYPE_INVISIBLE = 0,
  18.108 +	RESET_TYPE_ALL = 1,
  18.109 +	RESET_TYPE_WORLD = 2,
  18.110 +	RESET_TYPE_DISABLE = 3,
  18.111 +	RESET_TYPE_MAX_METHOD,
  18.112 +	RESET_TYPE_MONITOR,
  18.113 +	RESET_TYPE_INT_ERROR,
  18.114 +	RESET_TYPE_RX_RECOVERY,
  18.115 +	RESET_TYPE_RX_DESC_FETCH,
  18.116 +	RESET_TYPE_TX_DESC_FETCH,
  18.117 +	RESET_TYPE_MAX,
  18.118 +};
  18.119 +
  18.120 +#endif /* EFX_ENUM_H */
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/drivers/net/sfc/ethtool.c	Mon Feb 18 10:29:07 2008 +0000
    19.3 @@ -0,0 +1,734 @@
    19.4 +/****************************************************************************
    19.5 + * Driver for Solarflare network controllers
    19.6 + *           (including support for SFE4001 10GBT NIC)
    19.7 + *
    19.8 + * Copyright 2005-2006: Fen Systems Ltd.
    19.9 + * Copyright 2006-2008: Solarflare Communications Inc,
   19.10 + *                      9501 Jeronimo Road, Suite 250,
   19.11 + *                      Irvine, CA 92618, USA
   19.12 + *
   19.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   19.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   19.15 + *
   19.16 + * This program is free software; you can redistribute it and/or modify it
   19.17 + * under the terms of the GNU General Public License version 2 as published
   19.18 + * by the Free Software Foundation, incorporated herein by reference.
   19.19 + *
   19.20 + * This program is distributed in the hope that it will be useful,
   19.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   19.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   19.23 + * GNU General Public License for more details.
   19.24 + *
   19.25 + * You should have received a copy of the GNU General Public License
   19.26 + * along with this program; if not, write to the Free Software
   19.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   19.28 + ****************************************************************************
   19.29 + */
   19.30 +
   19.31 +#include <linux/netdevice.h>
   19.32 +#include <linux/ethtool.h>
   19.33 +#include <linux/rtnetlink.h>
   19.34 +#include <asm/uaccess.h>
   19.35 +#include "net_driver.h"
   19.36 +#include "selftest.h"
   19.37 +#include "efx.h"
   19.38 +#include "ethtool.h"
   19.39 +#include "falcon.h"
   19.40 +#include "gmii.h"
   19.41 +
   19.42 +static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
   19.43 +
   19.44 +struct ethtool_string {
   19.45 +	char name[ETH_GSTRING_LEN];
   19.46 +};
   19.47 +
   19.48 +struct efx_ethtool_stat {
   19.49 +	const char *name;
   19.50 +	enum {
   19.51 +		EFX_ETHTOOL_STAT_SOURCE_mac_stats,
   19.52 +		EFX_ETHTOOL_STAT_SOURCE_nic,
   19.53 +		EFX_ETHTOOL_STAT_SOURCE_channel
   19.54 +	} source;
   19.55 +	unsigned offset;
   19.56 +	u64(*get_stat) (void *field); /* Reader function */
   19.57 +};
   19.58 +
   19.59 +/* Initialiser for a struct #efx_ethtool_stat with type-checking */
   19.60 +#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
   19.61 +				get_stat_function) {			\
   19.62 +	.name = #stat_name,						\
   19.63 +	.source = EFX_ETHTOOL_STAT_SOURCE_##source_name,		\
   19.64 +	.offset = ((((field_type *) 0) ==				\
   19.65 +		      &((struct efx_##source_name *)0)->field) ?	\
   19.66 +		    offsetof(struct efx_##source_name, field) :		\
   19.67 +		    offsetof(struct efx_##source_name, field)),		\
   19.68 +	.get_stat = get_stat_function,					\
   19.69 +}
   19.70 +
   19.71 +static u64 efx_get_uint_stat(void *field)
   19.72 +{
   19.73 +	return *(unsigned int *)field;
   19.74 +}
   19.75 +
   19.76 +static u64 efx_get_ulong_stat(void *field)
   19.77 +{
   19.78 +	return *(unsigned long *)field;
   19.79 +}
   19.80 +
   19.81 +static u64 efx_get_u64_stat(void *field)
   19.82 +{
   19.83 +	return *(u64 *) field;
   19.84 +}
   19.85 +
   19.86 +static u64 efx_get_atomic_stat(void *field)
   19.87 +{
   19.88 +	return atomic_read((atomic_t *) field);
   19.89 +}
   19.90 +
   19.91 +#define EFX_ETHTOOL_ULONG_MAC_STAT(field)			\
   19.92 +	EFX_ETHTOOL_STAT(field, mac_stats, field, 		\
   19.93 +			  unsigned long, efx_get_ulong_stat)
   19.94 +
   19.95 +#define EFX_ETHTOOL_U64_MAC_STAT(field)				\
   19.96 +	EFX_ETHTOOL_STAT(field, mac_stats, field, 		\
   19.97 +			  u64, efx_get_u64_stat)
   19.98 +
   19.99 +#define EFX_ETHTOOL_UINT_NIC_STAT(name)				\
  19.100 +	EFX_ETHTOOL_STAT(name, nic, n_##name,			\
  19.101 +			 unsigned int, efx_get_uint_stat)
  19.102 +
  19.103 +#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field)		\
  19.104 +	EFX_ETHTOOL_STAT(field, nic, errors.field,		\
  19.105 +			 atomic_t, efx_get_atomic_stat)
  19.106 +
  19.107 +#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field)			\
  19.108 +	EFX_ETHTOOL_STAT(field, channel, n_##field,		\
  19.109 +			 unsigned int, efx_get_uint_stat)
  19.110 +
  19.111 +static struct efx_ethtool_stat efx_ethtool_stats[] = {
  19.112 +	EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
  19.113 +	EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
  19.114 +	EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
  19.115 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
  19.116 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
  19.117 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
  19.118 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
  19.119 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
  19.120 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
  19.121 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
  19.122 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
  19.123 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
  19.124 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
  19.125 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
  19.126 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
  19.127 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
  19.128 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
  19.129 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
  19.130 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
  19.131 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
  19.132 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
  19.133 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
  19.134 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
  19.135 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
  19.136 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
  19.137 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
  19.138 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
  19.139 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
  19.140 +	EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
  19.141 +	EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
  19.142 +	EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
  19.143 +	EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
  19.144 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
  19.145 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
  19.146 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
  19.147 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
  19.148 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
  19.149 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
  19.150 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
  19.151 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
  19.152 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
  19.153 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
  19.154 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
  19.155 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
  19.156 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
  19.157 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
  19.158 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
  19.159 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
  19.160 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
  19.161 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
  19.162 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
  19.163 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
  19.164 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
  19.165 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
  19.166 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
  19.167 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
  19.168 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
  19.169 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
  19.170 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
  19.171 +	EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
  19.172 +	EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
  19.173 +	EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
  19.174 +	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
  19.175 +	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
  19.176 +	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
  19.177 +	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
  19.178 +};
  19.179 +
  19.180 +/* Number of ethtool statistics */
  19.181 +#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
  19.182 +
  19.183 +/**************************************************************************
  19.184 + *
  19.185 + * Ethtool operations
  19.186 + *
  19.187 + **************************************************************************
  19.188 + */
  19.189 +
  19.190 +/* Identify device by flashing LEDs */
  19.191 +static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
  19.192 +{
  19.193 +	struct efx_nic *efx = net_dev->priv;
  19.194 +
  19.195 +	efx->board_info.blink(efx, 1);
  19.196 +	schedule_timeout_interruptible(seconds * HZ);
  19.197 +	efx->board_info.blink(efx, 0);
  19.198 +	return 0;
  19.199 +}
  19.200 +
  19.201 +/* This must be called with rtnl_lock held. */
  19.202 +int efx_ethtool_get_settings(struct net_device *net_dev,
  19.203 +			     struct ethtool_cmd *ecmd)
  19.204 +{
  19.205 +	struct efx_nic *efx = net_dev->priv;
  19.206 +
  19.207 +	return efx->mac_op->get_settings(efx, ecmd);
  19.208 +}
  19.209 +
  19.210 +/* This must be called with rtnl_lock held. */
  19.211 +int efx_ethtool_set_settings(struct net_device *net_dev,
  19.212 +			     struct ethtool_cmd *ecmd)
  19.213 +{
  19.214 +	struct efx_nic *efx = net_dev->priv;
  19.215 +	int rc;
  19.216 +
  19.217 +	rc = efx->mac_op->set_settings(efx, ecmd);
  19.218 +	if (rc)
  19.219 +		return rc;
  19.220 +
  19.221 +	/* Push the settings to the MAC */
  19.222 +	efx_reconfigure_port(efx, 0);
  19.223 +
  19.224 +	return 0;
  19.225 +}
  19.226 +
  19.227 +static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
  19.228 +				    struct ethtool_drvinfo *info)
  19.229 +{
  19.230 +	struct efx_nic *efx = net_dev->priv;
  19.231 +
  19.232 +	strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
  19.233 +	strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
  19.234 +	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
  19.235 +}
  19.236 +
  19.237 +/**
  19.238 + * efx_fill_test - fill in an individual self-test entry
  19.239 + * @test_index:		Index of the test
  19.240 + * @strings:		Ethtool strings, or %NULL
  19.241 + * @data:		Ethtool test results, or %NULL
  19.242 + * @test:		Pointer to test result (used only if data != %NULL)
  19.243 + * @unit_format:	Unit name format (e.g. "channel\%d")
  19.244 + * @unit_id:		Unit id (e.g. 0 for "channel0")
  19.245 + * @test_format:	Test name format (e.g. "loopback.\%s.tx.sent")
  19.246 + * @test_id:		Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
  19.247 + *
  19.248 + * Fill in an individual self-test entry.
  19.249 + */
  19.250 +static void efx_fill_test(unsigned int test_index,
  19.251 +			  struct ethtool_string *strings, u64 *data,
  19.252 +			  int *test, const char *unit_format, int unit_id,
  19.253 +			  const char *test_format, const char *test_id)
  19.254 +{
  19.255 +	struct ethtool_string unit_str, test_str;
  19.256 +
  19.257 +	/* Fill data value, if applicable */
  19.258 +	if (data)
  19.259 +		data[test_index] = *test;
  19.260 +
  19.261 +	/* Fill string, if applicable */
  19.262 +	if (strings) {
  19.263 +		snprintf(unit_str.name, sizeof(unit_str.name),
  19.264 +			 unit_format, unit_id);
  19.265 +		snprintf(test_str.name, sizeof(test_str.name),
  19.266 +			 test_format, test_id);
  19.267 +		snprintf(strings[test_index].name,
  19.268 +			 sizeof(strings[test_index].name),
  19.269 +			 "%-9s%-17s", unit_str.name, test_str.name);
  19.270 +	}
  19.271 +}
  19.272 +
  19.273 +#define EFX_PORT_NAME "port%d", 0
  19.274 +
  19.275 +/**
  19.276 + * efx_fill_loopback_test - fill in a block of loopback self-test entries
  19.277 + * @efx:		Efx NIC
  19.278 + * @lb_tests:		Efx loopback self-test results structure
  19.279 + * @mode:		Loopback test mode
  19.280 + * @test_index:		Starting index of the test
  19.281 + * @strings:		Ethtool strings, or %NULL
  19.282 + * @data:		Ethtool test results, or %NULL
  19.283 + *
  19.284 + * Fill in a block of loopback self-test entries.  Return new test
  19.285 + * index.
  19.286 + */
  19.287 +static int efx_fill_loopback_test(struct efx_nic *efx,
  19.288 +				  struct efx_loopback_self_tests *lb_tests,
  19.289 +				  enum efx_loopback_mode mode,
  19.290 +				  unsigned int test_index,
  19.291 +				  struct ethtool_string *strings, u64 *data)
  19.292 +{
  19.293 +	struct efx_tx_queue *tx_queue;
  19.294 +
  19.295 +	efx_for_each_tx_queue(tx_queue, efx) {
  19.296 +		efx_fill_test(test_index++, strings, data,
  19.297 +			      &lb_tests->tx_sent[tx_queue->queue],
  19.298 +			      EFX_TX_QUEUE_NAME(tx_queue),
  19.299 +			      "loopback.%s.tx_sent",
  19.300 +			      efx_loopback_mode_names[mode]);
  19.301 +		efx_fill_test(test_index++, strings, data,
  19.302 +			      &lb_tests->tx_done[tx_queue->queue],
  19.303 +			      EFX_TX_QUEUE_NAME(tx_queue),
  19.304 +			      "loopback.%s.tx_done",
  19.305 +			      efx_loopback_mode_names[mode]);
  19.306 +	}
  19.307 +	efx_fill_test(test_index++, strings, data,
  19.308 +		      &lb_tests->rx_good,
  19.309 +		      EFX_PORT_NAME,
  19.310 +		      "loopback.%s.rx_good",
  19.311 +		      efx_loopback_mode_names[mode]);
  19.312 +	efx_fill_test(test_index++, strings, data,
  19.313 +		      &lb_tests->rx_bad,
  19.314 +		      EFX_PORT_NAME,
  19.315 +		      "loopback.%s.rx_bad",
  19.316 +		      efx_loopback_mode_names[mode]);
  19.317 +
  19.318 +	return test_index;
  19.319 +}
  19.320 +
  19.321 +/**
  19.322 + * efx_ethtool_fill_self_tests - get self-test details
  19.323 + * @efx:		Efx NIC
  19.324 + * @tests:		Efx self-test results structure, or %NULL
  19.325 + * @strings:		Ethtool strings, or %NULL
  19.326 + * @data:		Ethtool test results, or %NULL
  19.327 + *
  19.328 + * Get self-test number of strings, strings, and/or test results.
  19.329 + * Return number of strings (== number of test results).
  19.330 + *
  19.331 + * The reason for merging these three functions is to make sure that
  19.332 + * they can never be inconsistent.
  19.333 + */
  19.334 +static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
  19.335 +				       struct efx_self_tests *tests,
  19.336 +				       struct ethtool_string *strings,
  19.337 +				       u64 *data)
  19.338 +{
  19.339 +	struct efx_channel *channel;
  19.340 +	unsigned int n = 0;
  19.341 +	enum efx_loopback_mode mode;
  19.342 +
  19.343 +	/* Interrupt */
  19.344 +	efx_fill_test(n++, strings, data, &tests->interrupt,
  19.345 +		      "core", 0, "interrupt", NULL);
  19.346 +
  19.347 +	/* Event queues */
  19.348 +	efx_for_each_channel(channel, efx) {
  19.349 +		efx_fill_test(n++, strings, data,
  19.350 +			      &tests->eventq_dma[channel->channel],
  19.351 +			      EFX_CHANNEL_NAME(channel),
  19.352 +			      "eventq.dma", NULL);
  19.353 +		efx_fill_test(n++, strings, data,
  19.354 +			      &tests->eventq_int[channel->channel],
  19.355 +			      EFX_CHANNEL_NAME(channel),
  19.356 +			      "eventq.int", NULL);
  19.357 +		efx_fill_test(n++, strings, data,
  19.358 +			      &tests->eventq_poll[channel->channel],
  19.359 +			      EFX_CHANNEL_NAME(channel),
  19.360 +			      "eventq.poll", NULL);
  19.361 +	}
  19.362 +
  19.363 +	/* PHY presence */
  19.364 +	efx_fill_test(n++, strings, data, &tests->phy_ok,
  19.365 +		      EFX_PORT_NAME, "phy_ok", NULL);
  19.366 +
  19.367 +	/* Loopback tests */
  19.368 +	efx_fill_test(n++, strings, data, &tests->loopback_speed,
  19.369 +		      EFX_PORT_NAME, "loopback.speed", NULL);
  19.370 +	efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
  19.371 +		      EFX_PORT_NAME, "loopback.full_duplex", NULL);
  19.372 +	for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
  19.373 +		if (!(efx->loopback_modes & (1 << mode)))
  19.374 +			continue;
  19.375 +		n = efx_fill_loopback_test(efx,
  19.376 +					   &tests->loopback[mode], mode, n,
  19.377 +					   strings, data);
  19.378 +	}
  19.379 +
  19.380 +	return n;
  19.381 +}
  19.382 +
  19.383 +static int efx_ethtool_get_stats_count(struct net_device *net_dev)
  19.384 +{
  19.385 +	return EFX_ETHTOOL_NUM_STATS;
  19.386 +}
  19.387 +
  19.388 +static int efx_ethtool_self_test_count(struct net_device *net_dev)
  19.389 +{
  19.390 +	struct efx_nic *efx = net_dev->priv;
  19.391 +
  19.392 +	return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
  19.393 +}
  19.394 +
  19.395 +static void efx_ethtool_get_strings(struct net_device *net_dev,
  19.396 +				    u32 string_set, u8 *strings)
  19.397 +{
  19.398 +	struct efx_nic *efx = net_dev->priv;
  19.399 +	struct ethtool_string *ethtool_strings =
  19.400 +		(struct ethtool_string *)strings;
  19.401 +	int i;
  19.402 +
  19.403 +	switch (string_set) {
  19.404 +	case ETH_SS_STATS:
  19.405 +		for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
  19.406 +			strncpy(ethtool_strings[i].name,
  19.407 +				efx_ethtool_stats[i].name,
  19.408 +				sizeof(ethtool_strings[i].name));
  19.409 +		break;
  19.410 +	case ETH_SS_TEST:
  19.411 +		efx_ethtool_fill_self_tests(efx, NULL,
  19.412 +					    ethtool_strings, NULL);
  19.413 +		break;
  19.414 +	default:
  19.415 +		/* No other string sets */
  19.416 +		break;
  19.417 +	}
  19.418 +}
  19.419 +
  19.420 +static void efx_ethtool_get_stats(struct net_device *net_dev,
  19.421 +				  struct ethtool_stats *stats
  19.422 +				  __attribute__ ((unused)), u64 *data)
  19.423 +{
  19.424 +	unsigned long flags __attribute__ ((unused));
  19.425 +	struct efx_nic *efx = net_dev->priv;
  19.426 +	struct efx_mac_stats *mac_stats = &efx->mac_stats;
  19.427 +	struct efx_ethtool_stat *stat;
  19.428 +	struct efx_channel *channel;
  19.429 +	int i;
  19.430 +
  19.431 +	EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
  19.432 +
  19.433 +	/* Update MAC and NIC statistics */
  19.434 +	net_dev->get_stats(net_dev);
  19.435 +	falcon_update_nic_stats(efx);
  19.436 +
  19.437 +	/* Fill detailed statistics buffer */
  19.438 +	for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
  19.439 +		stat = &efx_ethtool_stats[i];
  19.440 +		switch (stat->source) {
  19.441 +		case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
  19.442 +			data[i] = stat->get_stat((void *)mac_stats +
  19.443 +						 stat->offset);
  19.444 +			break;
  19.445 +		case EFX_ETHTOOL_STAT_SOURCE_nic:
  19.446 +			data[i] = stat->get_stat((void *)efx + stat->offset);
  19.447 +			break;
  19.448 +		case EFX_ETHTOOL_STAT_SOURCE_channel:
  19.449 +			data[i] = 0;
  19.450 +			efx_for_each_channel(channel, efx)
  19.451 +				data[i] += stat->get_stat((void *)channel +
  19.452 +							  stat->offset);
  19.453 +			break;
  19.454 +		}
  19.455 +	}
  19.456 +}
  19.457 +
  19.458 +static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
  19.459 +{
  19.460 +	struct efx_nic *efx = net_dev->priv;
  19.461 +	int rc;
  19.462 +
  19.463 +	rc = ethtool_op_set_tx_csum(net_dev, enable);
  19.464 +	if (rc)
  19.465 +		return rc;
  19.466 +
  19.467 +
  19.468 +	efx_flush_queues(efx);
  19.469 +
  19.470 +	return 0;
  19.471 +}
  19.472 +
  19.473 +static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
  19.474 +{
  19.475 +	struct efx_nic *efx = net_dev->priv;
  19.476 +
  19.477 +	/* No way to stop the hardware doing the checks; we just
  19.478 +	 * ignore the result.
  19.479 +	 */
  19.480 +	efx->rx_checksum_enabled = (enable ? 1 : 0);
  19.481 +
  19.482 +	return 0;
  19.483 +}
  19.484 +
  19.485 +static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
  19.486 +{
  19.487 +	struct efx_nic *efx = net_dev->priv;
  19.488 +
  19.489 +	return efx->rx_checksum_enabled;
  19.490 +}
  19.491 +
  19.492 +static void efx_ethtool_self_test(struct net_device *net_dev,
  19.493 +				  struct ethtool_test *test, u64 *data)
  19.494 +{
  19.495 +	struct efx_nic *efx = net_dev->priv;
  19.496 +	struct efx_self_tests efx_tests;
  19.497 +	int offline, already_up;
  19.498 +	int rc;
  19.499 +
  19.500 +	/* Make sure we've got rtnl lock since we're playing with
  19.501 +	 * interrupts, and calling efx_process_channel_now and others
  19.502 +	 */
  19.503 +	ASSERT_RTNL();
  19.504 +
  19.505 +	/* If the NIC isn't in the RUNNING state then exit */
  19.506 +	if (efx->state != STATE_RUNNING) {
  19.507 +		rc = -EIO;
  19.508 +		goto fail1;
  19.509 +	}
  19.510 +
  19.511 +	/* Make sure the interface is up. We need interrupts, NAPI
  19.512 +	 * and some RX buffers so this is helpful.  NB. The caller has
  19.513 +	 * rtnl_lock so nobody else can call dev_open. */
  19.514 +	already_up = (efx->net_dev->flags & IFF_UP);
  19.515 +	if (!already_up) {
  19.516 +		rc = dev_open(efx->net_dev);
  19.517 +		if (rc) {
  19.518 +			EFX_ERR(efx, "failed opening device.\n");
  19.519 +			goto fail2;
  19.520 +		}
  19.521 +	}
  19.522 +
  19.523 +	memset(&efx_tests, 0, sizeof(efx_tests));
  19.524 +	offline = (test->flags & ETH_TEST_FL_OFFLINE);
  19.525 +
  19.526 +	/* Perform online self tests first */
  19.527 +	rc = efx_online_test(efx, &efx_tests);
  19.528 +	if (rc)
  19.529 +		goto out;
  19.530 +
  19.531 +	/* Perform offline tests only if online tests passed */
  19.532 +	if (offline) {
  19.533 +		/* Stop the kernel from sending packets during the test. The
  19.534 +		 * selftest will be consistently bringing the port up and down
  19.535 +		 * as it moves between loopback modes, so the watchdog timer
  19.536 +		 * probably won't run anyway */
  19.537 +		efx_stop_queue(efx);
  19.538 +
  19.539 +		rc = efx_flush_queues(efx);
  19.540 +		if (rc != 0)
  19.541 +			goto out_offline;
  19.542 +
  19.543 +		rc = efx_offline_test(efx, &efx_tests,
  19.544 +				      efx->loopback_modes);
  19.545 + out_offline:
  19.546 +		efx_wake_queue(efx);
  19.547 +	}
  19.548 +
  19.549 +	/* fall-thru */
  19.550 + out:
  19.551 +	if (!already_up)
  19.552 +		dev_close(efx->net_dev);
  19.553 +
  19.554 +	EFX_LOG(efx, "%s all %sline self-tests\n",
  19.555 +		rc == 0 ? "passed" : "failed", offline ? "off" : "on");
  19.556 +
  19.557 + fail2:
  19.558 + fail1:
  19.559 +	/* Fill ethtool results structures */
  19.560 +	efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
  19.561 +	if (rc)
  19.562 +		test->flags |= ETH_TEST_FL_FAILED;
  19.563 +}
  19.564 +
  19.565 +/* Restart autonegotiation */
  19.566 +static int efx_ethtool_nway_reset(struct net_device *net_dev)
  19.567 +{
  19.568 +	struct efx_nic *efx = net_dev->priv;
  19.569 +
  19.570 +	return mii_nway_restart(&efx->mii);
  19.571 +}
  19.572 +
  19.573 +static u32 efx_ethtool_get_link(struct net_device *net_dev)
  19.574 +{
  19.575 +	struct efx_nic *efx = net_dev->priv;
  19.576 +
  19.577 +	return efx->link_up;
  19.578 +}
  19.579 +
  19.580 +static int efx_ethtool_get_coalesce(struct net_device *net_dev,
  19.581 +				    struct ethtool_coalesce *coalesce)
  19.582 +{
  19.583 +	struct efx_nic *efx = net_dev->priv;
  19.584 +	struct efx_tx_queue *tx_queue;
  19.585 +	struct efx_rx_queue *rx_queue;
  19.586 +	struct efx_channel *channel;
  19.587 +
  19.588 +	memset(coalesce, 0, sizeof(*coalesce));
  19.589 +
  19.590 +	/* Find lowest IRQ moderation across all used TX queues */
  19.591 +	coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
  19.592 +	efx_for_each_tx_queue(tx_queue, efx) {
  19.593 +		channel = tx_queue->channel;
  19.594 +		if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
  19.595 +			if (channel->used_flags != EFX_USED_BY_RX_TX)
  19.596 +				coalesce->tx_coalesce_usecs_irq =
  19.597 +					channel->irq_moderation;
  19.598 +			else
  19.599 +				coalesce->tx_coalesce_usecs_irq = 0;
  19.600 +		}
  19.601 +	}
  19.602 +
  19.603 +	/* Find lowest IRQ moderation across all used RX queues */
  19.604 +	coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
  19.605 +	efx_for_each_rx_queue(rx_queue, efx) {
  19.606 +		channel = rx_queue->channel;
  19.607 +		if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
  19.608 +			coalesce->rx_coalesce_usecs_irq =
  19.609 +				channel->irq_moderation;
  19.610 +	}
  19.611 +
  19.612 +	return 0;
  19.613 +}
  19.614 +
  19.615 +/* Set coalescing parameters
  19.616 + * The difficulties occur for shared channels
  19.617 + */
  19.618 +static int efx_ethtool_set_coalesce(struct net_device *net_dev,
  19.619 +				    struct ethtool_coalesce *coalesce)
  19.620 +{
  19.621 +	struct efx_nic *efx = net_dev->priv;
  19.622 +	struct efx_channel *channel;
  19.623 +	struct efx_tx_queue *tx_queue;
  19.624 +	unsigned tx_usecs, rx_usecs;
  19.625 +
  19.626 +	if (coalesce->use_adaptive_rx_coalesce ||
  19.627 +	    coalesce->use_adaptive_tx_coalesce)
  19.628 +		return -EOPNOTSUPP;
  19.629 +
  19.630 +	if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
  19.631 +		EFX_ERR(efx, "invalid coalescing setting. "
  19.632 +			"Only rx/tx_coalesce_usecs_irq are supported\n");
  19.633 +		return -EOPNOTSUPP;
  19.634 +	}
  19.635 +
  19.636 +	rx_usecs = coalesce->rx_coalesce_usecs_irq;
  19.637 +	tx_usecs = coalesce->tx_coalesce_usecs_irq;
  19.638 +
  19.639 +	/* If the channel is shared only allow RX parameters to be set */
  19.640 +	efx_for_each_tx_queue(tx_queue, efx) {
  19.641 +		if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
  19.642 +		    tx_usecs) {
  19.643 +			EFX_ERR(efx, "Channel is shared. "
  19.644 +				"Only RX coalescing may be set\n");
  19.645 +			return -EOPNOTSUPP;
  19.646 +		}
  19.647 +	}
  19.648 +
  19.649 +	efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
  19.650 +
  19.651 +	/* Reset channel to pick up new moderation value.  Note that
  19.652 +	 * this may change the value of the irq_moderation field
  19.653 +	 * (e.g. to allow for hardware timer granularity).
  19.654 +	 */
  19.655 +	efx_for_each_channel(channel, efx)
  19.656 +		falcon_set_int_moderation(channel);
  19.657 +
  19.658 +	return 0;
  19.659 +}
  19.660 +
  19.661 +static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
  19.662 +				      struct ethtool_pauseparam *pause)
  19.663 +{
  19.664 +	struct efx_nic *efx = net_dev->priv;
  19.665 +	enum efx_fc_type flow_control = efx->flow_control;
  19.666 +	int rc;
  19.667 +
  19.668 +	flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
  19.669 +	flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
  19.670 +	flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
  19.671 +	flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
  19.672 +
  19.673 +	/* Try to push the pause parameters */
  19.674 +	rc = efx->mac_op->set_pause(efx, flow_control);
  19.675 +	if (rc)
  19.676 +		return rc;
  19.677 +
  19.678 +	/* Push the settings to the MAC */
  19.679 +	efx_reconfigure_port(efx, 0);
  19.680 +
  19.681 +	return 0;
  19.682 +}
  19.683 +
  19.684 +static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
  19.685 +				       struct ethtool_pauseparam *pause)
  19.686 +{
  19.687 +	struct efx_nic *efx = net_dev->priv;
  19.688 +
  19.689 +	pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
  19.690 +	pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
  19.691 +	pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
  19.692 +}
  19.693 +
  19.694 +
  19.695 +#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
  19.696 +static int efx_ethtool_op_get_perm_addr(struct net_device *net_dev,
  19.697 +					struct ethtool_perm_addr *addr,
  19.698 +					u8 *data)
  19.699 +{
  19.700 +	struct efx_nic *efx = net_dev->priv;
  19.701 +
  19.702 +	memcpy(data, efx->mac_address, ETH_ALEN);
  19.703 +
  19.704 +	return 0;
  19.705 +}
  19.706 +#endif
  19.707 +
  19.708 +struct ethtool_ops efx_ethtool_ops = {
  19.709 +	.get_settings		= efx_ethtool_get_settings,
  19.710 +	.set_settings		= efx_ethtool_set_settings,
  19.711 +	.get_drvinfo		= efx_ethtool_get_drvinfo,
  19.712 +	.nway_reset		= efx_ethtool_nway_reset,
  19.713 +	.get_link		= efx_ethtool_get_link,
  19.714 +	.get_coalesce		= efx_ethtool_get_coalesce,
  19.715 +	.set_coalesce		= efx_ethtool_set_coalesce,
  19.716 +	.get_pauseparam         = efx_ethtool_get_pauseparam,
  19.717 +	.set_pauseparam         = efx_ethtool_set_pauseparam,
  19.718 +	.get_rx_csum		= efx_ethtool_get_rx_csum,
  19.719 +	.set_rx_csum		= efx_ethtool_set_rx_csum,
  19.720 +	.get_tx_csum		= ethtool_op_get_tx_csum,
  19.721 +	.set_tx_csum		= efx_ethtool_set_tx_csum,
  19.722 +	.get_sg			= ethtool_op_get_sg,
  19.723 +	.set_sg			= ethtool_op_set_sg,
  19.724 +#if defined(EFX_USE_ETHTOOL_FLAGS)
  19.725 +	.get_flags		= ethtool_op_get_flags,
  19.726 +	.set_flags		= ethtool_op_set_flags,
  19.727 +#endif
  19.728 +	.self_test_count	= efx_ethtool_self_test_count,
  19.729 +	.self_test		= efx_ethtool_self_test,
  19.730 +	.get_strings		= efx_ethtool_get_strings,
  19.731 +	.phys_id		= efx_ethtool_phys_id,
  19.732 +	.get_stats_count	= efx_ethtool_get_stats_count,
  19.733 +	.get_ethtool_stats	= efx_ethtool_get_stats,
  19.734 +#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
  19.735 +	.get_perm_addr          = efx_ethtool_op_get_perm_addr,
  19.736 +#endif
  19.737 +};
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/drivers/net/sfc/ethtool.h	Mon Feb 18 10:29:07 2008 +0000
    20.3 @@ -0,0 +1,44 @@
    20.4 +/****************************************************************************
    20.5 + * Driver for Solarflare network controllers
    20.6 + *           (including support for SFE4001 10GBT NIC)
    20.7 + *
    20.8 + * Copyright 2005:      Fen Systems Ltd.
    20.9 + * Copyright 2006:      Solarflare Communications Inc,
   20.10 + *                      9501 Jeronimo Road, Suite 250,
   20.11 + *                      Irvine, CA 92618, USA
   20.12 + *
   20.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   20.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   20.15 + *
   20.16 + * This program is free software; you can redistribute it and/or modify it
   20.17 + * under the terms of the GNU General Public License version 2 as published
   20.18 + * by the Free Software Foundation, incorporated herein by reference.
   20.19 + *
   20.20 + * This program is distributed in the hope that it will be useful,
   20.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   20.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   20.23 + * GNU General Public License for more details.
   20.24 + *
   20.25 + * You should have received a copy of the GNU General Public License
   20.26 + * along with this program; if not, write to the Free Software
   20.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   20.28 + ****************************************************************************
   20.29 + */
   20.30 +
   20.31 +#ifndef EFX_ETHTOOL_H
   20.32 +#define EFX_ETHTOOL_H
   20.33 +
   20.34 +#include "net_driver.h"
   20.35 +
   20.36 +/*
   20.37 + * Ethtool support
   20.38 + */
   20.39 +
   20.40 +extern int efx_ethtool_get_settings(struct net_device *net_dev,
   20.41 +				    struct ethtool_cmd *ecmd);
   20.42 +extern int efx_ethtool_set_settings(struct net_device *net_dev,
   20.43 +				    struct ethtool_cmd *ecmd);
   20.44 +
   20.45 +extern struct ethtool_ops efx_ethtool_ops;
   20.46 +
   20.47 +#endif /* EFX_ETHTOOL_H */
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/drivers/net/sfc/extraversion.h	Mon Feb 18 10:29:07 2008 +0000
    21.3 @@ -0,0 +1,4 @@
    21.4 +/*
    21.5 + * If compiling on kernels with backported features you may need to
    21.6 + * define EFX_DIST_KVER_ symbols here
    21.7 + */
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/drivers/net/sfc/falcon.c	Mon Feb 18 10:29:07 2008 +0000
    22.3 @@ -0,0 +1,3708 @@
    22.4 +/****************************************************************************
    22.5 + * Driver for Solarflare network controllers
    22.6 + *           (including support for SFE4001 10GBT NIC)
    22.7 + *
    22.8 + * Copyright 2005-2006: Fen Systems Ltd.
    22.9 + * Copyright 2006-2008: Solarflare Communications Inc,
   22.10 + *                      9501 Jeronimo Road, Suite 250,
   22.11 + *                      Irvine, CA 92618, USA
   22.12 + *
   22.13 + * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
   22.14 + * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
   22.15 + *
   22.16 + * This program is free software; you can redistribute it and/or modify it
   22.17 + * under the terms of the GNU General Public License version 2 as published
   22.18 + * by the Free Software Foundation, incorporated herein by reference.
   22.19 + *
   22.20 + * This program is distributed in the hope that it will be useful,
   22.21 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   22.22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   22.23 + * GNU General Public License for more details.
   22.24 + *
   22.25 + * You should have received a copy of the GNU General Public License
   22.26 + * along with this program; if not, write to the Free Software
   22.27 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   22.28 + ****************************************************************************
   22.29 + */
   22.30 +
   22.31 +#include <asm/io.h>
   22.32 +#include <asm/bitops.h>
   22.33 +#include <linux/delay.h>
   22.34 +#include <linux/pci.h>
   22.35 +#include <linux/module.h>
   22.36 +#include <linux/seq_file.h>
   22.37 +#include "net_driver.h"
   22.38 +#include "bitfield.h"
   22.39 +#include "efx.h"
   22.40 +#include "mac.h"
   22.41 +#include "gmii.h"
   22.42 +#include "spi.h"
   22.43 +#include "falcon.h"
   22.44 +#include "falcon_hwdefs.h"
   22.45 +#include "falcon_io.h"
   22.46 +#include "mdio_10g.h"
   22.47 +#include "phy.h"
   22.48 +#include "boards.h"
   22.49 +#include "driverlink.h"
   22.50 +#include "workarounds.h"
   22.51 +
   22.52 +/* Falcon hardware control.
   22.53 + * Falcon is the internal codename for the SFC4000 controller that is
   22.54 + * present in SFE400X evaluation boards
   22.55 + */
   22.56 +
   22.57 +struct falcon_nic_data {
   22.58 +	/* Number of entries in each TX queue descriptor cache. */
   22.59 +	unsigned tx_dc_entries;
   22.60 +	/* Number of entries in each RX queue descriptor cache. */
   22.61 +	unsigned rx_dc_entries;
   22.62 +	/* Base address in SRAM of TX queue descriptor caches. */
   22.63 +	unsigned tx_dc_base;
   22.64 +	/* Base address in SRAM of RX queue descriptor caches. */
   22.65 +	unsigned rx_dc_base;
   22.66 +
   22.67 +	/* Previous loopback mode used in deconfigure_mac_wrapper */
   22.68 +	enum efx_loopback_mode old_loopback_mode;
   22.69 +
   22.70 +	/* Driverlink parameters */
   22.71 +	struct efx_dl_falcon_resources resources;
   22.72 +};
   22.73 +
   22.74 +/**************************************************************************
   22.75 + *
   22.76 + * Configurable values
   22.77 + *
   22.78 + **************************************************************************
   22.79 + */
   22.80 +
   22.81 +static int disable_dma_stats;
   22.82 +
   22.83 +/* Specify the size of the RX descriptor cache */
   22.84 +static int descriptor_cache_size = 64;
   22.85 +
   22.86 +/*
   22.87 + * Override EEPROM/flash type from non-volatile configuration or GPIO;
   22.88 + * may need to be specified if bootstrapping from blank flash.
   22.89 + */
   22.90 +static unsigned int eeprom_type = -1;
   22.91 +static unsigned int flash_type = -1;
   22.92 +
   22.93 +/* RX FIFO XOFF watermark
   22.94 + *
   22.95 + * When the amount of the RX FIFO increases used increases past this
   22.96 + * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
   22.97 + * This also has an effect on RX/TX arbitration
   22.98 + */
   22.99 +static int rx_xoff_thresh_bytes = -1;
  22.100 +module_param(rx_xoff_thresh_bytes, int, 0644);
  22.101 +MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
  22.102 +
  22.103 +/* RX FIFO XON watermark
  22.104 + *
  22.105 + * When the amount of the RX FIFO used decreases below this
  22.106 + * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
  22.107 + * This also has an effect on RX/TX arbitration
  22.108 + */
  22.109 +static int rx_xon_thresh_bytes = -1;
  22.110 +module_param(rx_xon_thresh_bytes, int, 0644);
  22.111 +MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
  22.112 +
  22.113 +/* TX descriptor ring size - min 512 max 4k */
  22.114 +#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
  22.115 +#define FALCON_TXD_RING_SIZE 1024
  22.116 +#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
  22.117 +
  22.118 +/* RX descriptor ring size - min 512 max 4k */
  22.119 +#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
  22.120 +#define FALCON_RXD_RING_SIZE 1024
  22.121 +#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
  22.122 +
  22.123 +/* Event queue size - max 32k */
  22.124 +#define FALCON_EVQ_ORDER EVQ_SIZE_4K
  22.125 +#define FALCON_EVQ_SIZE 4096
  22.126 +#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
  22.127 +
  22.128 +/* Max number of internal errors. After this resets will not be performed */
  22.129 +#define FALCON_MAX_INT_ERRORS 4
  22.130 +
  22.131 +/* Maximum period that we wait for flush events. If the flush event
  22.132 + * doesn't arrive in this period of time then we check if the queue
  22.133 + * was disabled anyway. */
  22.134 +#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
  22.135 +
  22.136 +/**************************************************************************
  22.137 + *
  22.138 + * Falcon constants
  22.139 + *
  22.140 + **************************************************************************
  22.141 + */
  22.142 +
  22.143 +/* DMA address mask (up to 46-bit, avoiding compiler warnings)
  22.144 + *
  22.145 + * Note that it is possible to have a platform with 64-bit longs and
  22.146 + * 32-bit DMA addresses, or vice versa.  EFX_DMA_MASK takes care of the
  22.147 + * platform DMA mask.
  22.148 + */
  22.149 +#if BITS_PER_LONG == 64
  22.150 +#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
  22.151 +#else
  22.152 +#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
  22.153 +#endif
  22.154 +
  22.155 +/* TX DMA length mask (13-bit) */
  22.156 +#define FALCON_TX_DMA_MASK (8192 - 1)
  22.157 +
  22.158 +/* Alignment of special buffers (4KB) */
  22.159 +#define FALCON_BUF_ALIGN 4096
  22.160 +
  22.161 +/* Dummy SRAM size code */
  22.162 +#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
  22.163 +
  22.164 +/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
  22.165 +#define PCI_EXP_DEVCAP_PWR_VAL_LBN	(18)
  22.166 +/* This field takes up bits 26 and 27. */
  22.167 +#define PCI_EXP_DEVCAP_PWR_SCL_LBN	(26)
  22.168 +#define PCI_EXP_LNKSTA_LNK_WID		(0x3f0)
  22.169 +#define PCI_EXP_LNKSTA_LNK_WID_LBN	(4)
  22.170 +
  22.171 +
  22.172 +/**************************************************************************
  22.173 + *
  22.174 + * Falcon hardware access
  22.175 + *
  22.176 + **************************************************************************/
  22.177 +
  22.178 +/* Read the current event from the event queue */
  22.179 +static inline efx_qword_t *falcon_event(struct efx_channel *channel,
  22.180 +					unsigned int index)
  22.181 +{
  22.182 +	return (((efx_qword_t *) (channel->eventq.addr)) + index);
  22.183 +}
  22.184 +
  22.185 +/* See if an event is present
  22.186 + *
  22.187 + * We check both the high and low dword of the event for all ones.  We
  22.188 + * wrote all ones when we cleared the event, and no valid event can
  22.189 + * have all ones in either its high or low dwords.  This approach is
  22.190 + * robust against reordering.
  22.191 + *
  22.192 + * Note that using a single 64-bit comparison is incorrect; even
  22.193 + * though the CPU read will be atomic, the DMA write may not be.
  22.194 + */
  22.195 +static inline int falcon_event_present(efx_qword_t *event)
  22.196 +{
  22.197 +	return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  22.198 +		  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
  22.199 +}
  22.200 +
  22.201 +/* Read dword from a Falcon PCIE core register */
  22.202 +static void falcon_pcie_core_read_reg(struct efx_nic *efx, int address,
  22.203 +				      efx_dword_t *result)
  22.204 +{
  22.205 +	efx_oword_t temp;
  22.206 +
  22.207 +	BUG_ON(FALCON_REV(efx) < FALCON_REV_B0);
  22.208 +	BUG_ON(address & 3 || address < 0);
  22.209 +
  22.210 +	EFX_POPULATE_OWORD_1(temp, PCIE_CORE_ADDR, address);
  22.211 +
  22.212 +	falcon_write(efx, &temp, PCIE_CORE_INDIRECT_REG);
  22.213 +	falcon_read(efx, &temp, PCIE_CORE_INDIRECT_REG);
  22.214 +	/* Extract PCIE_CORE_VALUE without byte-swapping */
  22.215 +	BUILD_BUG_ON(PCIE_CORE_VALUE_LBN != 32 ||
  22.216 +		     PCIE_CORE_VALUE_WIDTH != 32);
  22.217 +	result->u32[0] = temp.u32[1];
  22.218 +}
  22.219 +
  22.220 +/* Write dword to a Falcon PCIE core register */
  22.221 +static void falcon_pcie_core_write_reg(struct efx_nic *efx, int address,
  22.222 +				       efx_dword_t value)
  22.223 +{
  22.224 +	efx_oword_t temp;
  22.225 +
  22.226 +	BUG_ON(FALCON_REV(efx) < FALCON_REV_B0);
  22.227 +	BUG_ON(address & 0x3 || address < 0);
  22.228 +
  22.229 +	EFX_POPULATE_OWORD_2(temp,
  22.230 +			     PCIE_CORE_ADDR, address,
  22.231 +			     PCIE_CORE_RW, 1);
  22.232 +	/* Fill PCIE_CORE_VALUE without byte-swapping */
  22.233 +	BUILD_BUG_ON(PCIE_CORE_VALUE_LBN != 32 ||
  22.234 +		     PCIE_CORE_VALUE_WIDTH != 32);
  22.235 +	temp.u32[1] = value.u32[0];
  22.236 +	falcon_write(efx, &temp, PCIE_CORE_INDIRECT_REG);
  22.237 +}
  22.238 +
  22.239 +/**************************************************************************
  22.240 + *
  22.241 + * I2C bus - this is a bit-bashing interface using GPIO pins
  22.242 + * Note that it uses the output enables to tristate the outputs
  22.243 + * SDA is the data pin and SCL is the clock
  22.244 + *
  22.245 + **************************************************************************
  22.246 + */
  22.247 +static void falcon_setsdascl(struct efx_i2c_interface *i2c)
  22.248 +{
  22.249 +	efx_oword_t reg;
  22.250 +
  22.251 +	falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
  22.252 +	EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
  22.253 +	EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
  22.254 +	falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
  22.255 +}
  22.256 +
  22.257 +static int falcon_getsda(struct efx_i2c_interface *i2c)
  22.258 +{
  22.259 +	efx_oword_t reg;
  22.260 +
  22.261 +	falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
  22.262 +	return EFX_OWORD_FIELD(reg, GPIO3_IN);
  22.263 +}
  22.264 +
  22.265 +static int falcon_getscl(struct efx_i2c_interface *i2c)
  22.266 +{
  22.267 +	efx_oword_t reg;
  22.268 +
  22.269 +	falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
  22.270 +	return EFX_DWORD_FIELD(reg, GPIO0_IN);
  22.271 +}
  22.272 +
  22.273 +static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
  22.274 +	.setsda		= falcon_setsdascl,
  22.275 +	.setscl		= falcon_setsdascl,
  22.276 +	.getsda		= falcon_getsda,
  22.277 +	.getscl		= falcon_getscl,
  22.278 +	.udelay		= 100,
  22.279 +	.mdelay		= 10,
  22.280 +};
  22.281 +
  22.282 +/**************************************************************************
  22.283 + *
  22.284 + * Falcon special buffer handling
  22.285 + * Special buffers are used for event queues and the TX and RX
  22.286 + * descriptor rings.
  22.287 + *
  22.288 + *************************************************************************/
  22.289 +
  22.290 +/* Adds the relevant entries to the full-mode buffer table. */
  22.291 +static int
  22.292 +falcon_pin_special_buffer_full(struct efx_nic *efx,
  22.293 +			       struct efx_special_buffer *buffer)
  22.294 +{
  22.295 +	efx_qword_t buf_desc;
  22.296 +	int index;
  22.297 +	dma_addr_t dma_addr;
  22.298 +	int i;
  22.299 +
  22.300 +	/* Write buffer descriptors to NIC */
  22.301 +	for (i = 0; i < buffer->entries; i++) {
  22.302 +		index = buffer->index + i;
  22.303 +		dma_addr = buffer->dma_addr + (i * 4096);
  22.304 +		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
  22.305 +			index, (unsigned long long)dma_addr);
  22.306 +		EFX_POPULATE_QWORD_4(buf_desc,
  22.307 +				     IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
  22.308 +				     BUF_ADR_REGION, 0,
  22.309 +				     BUF_ADR_FBUF, (dma_addr >> 12),
  22.310 +				     BUF_OWNER_ID_FBUF, 0);
  22.311 +		falcon_write_sram(efx, &buf_desc, index);
  22.312 +	}
  22.313 +
  22.314 +	return 0;
  22.315 +}
  22.316 +
  22.317 +/* Clears the relevant entries from the buffer table */
  22.318 +static void
  22.319 +falcon_clear_special_buffer_full(struct efx_nic *efx,
  22.320 +				 struct efx_special_buffer *buffer)
  22.321 +{
  22.322 +	efx_oword_t buf_tbl_upd;
  22.323 +	unsigned int start = buffer->index;
  22.324 +	unsigned int end = (buffer->index + buffer->entries - 1);
  22.325 +
  22.326 +	EFX_LOG(efx, "unmapping special buffers %d-%d\n",
  22.327 +		buffer->index, buffer->index + buffer->entries - 1);
  22.328 +
  22.329 +	EFX_POPULATE_OWORD_4(buf_tbl_upd,
  22.330 +			     BUF_UPD_CMD, 0,
  22.331 +			     BUF_CLR_CMD, 1,
  22.332 +			     BUF_CLR_END_ID, end,
  22.333 +			     BUF_CLR_START_ID, start);
  22.334 +	falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
  22.335 +}
  22.336 +
  22.337 +/*
  22.338 + * Allocate a new Falcon special buffer
  22.339 + *
  22.340 + * This allocates memory for a new buffer, clears it and allocates a
  22.341 + * new buffer ID range.  It does not write into Falcon's buffer table.
  22.342 + *
  22.343 + * This call will allocate 4kB buffers, since Falcon can't use 8kB
  22.344 + * buffers for event queues and descriptor rings.  It will always
  22.345 + * allocate an even number of 4kB buffers, since when we're in
  22.346 + * half-entry mode for the buffer table we can only deal with pairs of
  22.347 + * buffers.
  22.348 + */
  22.349 +static int falcon_alloc_special_buffer(struct efx_nic *efx,
  22.350 +				       struct efx_special_buffer *buffer,
  22.351 +				       unsigned int len)
  22.352 +{
  22.353 +	struct falcon_nic_data *nic_data = efx->nic_data;
  22.354 +
  22.355 +	/* Round size up to an 8kB boundary (i.e. pairs of 4kB buffers) */
  22.356 +	len = (len + 8192 - 1) & ~(8192 - 1);
  22.357 +
  22.358 +	/* Allocate buffer as consistent PCI DMA space */
  22.359 +	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  22.360 +					    &buffer->dma_addr);
  22.361 +	if (!buffer->addr)
  22.362 +		return -ENOMEM;
  22.363 +	buffer->len = len;
  22.364 +	buffer->entries = len / 4096;
  22.365 +	BUG_ON(buffer->dma_addr & (FALCON_BUF_ALIGN - 1));
  22.366 +
  22.367 +	/* All zeros is a potentially valid event so memset to 0xff */
  22.368 +	memset(buffer->addr, 0xff, len);
  22.369 +
  22.370 +	/* Select new buffer ID */
  22.371 +	buffer->index = nic_data->resources.buffer_table_min;
  22.372 +	nic_data->resources.buffer_table_min += buffer->entries;
  22.373 +
  22.374 +	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
  22.375 +		"(virt %p phys %lx)\n", buffer->index,
  22.376 +		buffer->index + buffer->entries - 1,
  22.377 +		(unsigned long long)buffer->dma_addr, len,
  22.378 +		buffer->addr, virt_to_phys(buffer->addr));
  22.379 +
  22.380 +	return 0;
  22.381 +}
  22.382 +
  22.383 +/*
  22.384 + * Initialise a Falcon special buffer
  22.385 + *
  22.386 + * This will define a buffer (previously allocated via
  22.387 + * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
  22.388 + * it to be used for event queues, descriptor rings etc.
  22.389 + */
  22.390 +static int falcon_init_special_buffer(struct efx_nic *efx,
  22.391 +				      struct efx_special_buffer *buffer)
  22.392 +{
  22.393 +	EFX_BUG_ON_PARANOID(!buffer->addr);
  22.394 +
  22.395 +	/* Write buffer descriptors to NIC */
  22.396 +	return falcon_pin_special_buffer_full(efx, buffer);
  22.397 +}
  22.398 +
  22.399 +/* Unmaps a buffer from Falcon and clears the buffer table
  22.400 + * entries */
  22.401 +static void falcon_fini_special_buffer(struct efx_nic *efx,
  22.402 +				       struct efx_special_buffer *buffer)
  22.403 +{
  22.404 +
  22.405 +	if (!buffer->entries)
  22.406 +		return;
  22.407 +
  22.408 +	falcon_clear_special_buffer_full(efx, buffer);
  22.409 +}
  22.410 +
  22.411 +/* Release the buffer memory. */
  22.412 +static void falcon_free_special_buffer(struct efx_nic *efx,
  22.413 +				       struct efx_special_buffer *buffer)
  22.414 +{
  22.415 +	if (!buffer->addr)
  22.416 +		return;
  22.417 +
  22.418 +	EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
  22.419 +		"(virt %p phys %lx)\n", buffer->index,
  22.420 +		buffer->index + buffer->entries - 1,
  22.421 +		(unsigned long long)buffer->dma_addr, buffer->len,
  22.422 +		buffer->addr, virt_to_phys(buffer->addr));
  22.423 +
  22.424 +	pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
  22.425 +			    buffer->dma_addr);
  22.426 +	buffer->addr = NULL;
  22.427 +	buffer->entries = 0;
  22.428 +}
  22.429 +
  22.430 +/**************************************************************************
  22.431 + *
  22.432 + * Falcon generic buffer handling
  22.433 + * These buffers are used for interrupt status and MAC stats
  22.434 + *
  22.435 + **************************************************************************/
  22.436 +
  22.437 +static int falcon_alloc_buffer(struct efx_nic *efx,
  22.438 +			       struct efx_buffer *buffer, unsigned int len)
  22.439 +{
  22.440 +	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  22.441 +					    &buffer->dma_addr);
  22.442 +	if (!buffer->addr)
  22.443 +		return -ENOMEM;
  22.444 +	buffer->len = len;
  22.445 +	memset(buffer->addr, 0, len);
  22.446 +	return 0;
  22.447 +}
  22.448 +
  22.449 +static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
  22.450 +{
  22.451 +	if (buffer->addr) {
  22.452 +		pci_free_consistent(efx->pci_dev, buffer->len,
  22.453 +				    buffer->addr, buffer->dma_addr);
  22.454 +		buffer->addr = NULL;
  22.455 +	}
  22.456 +}
  22.457 +
  22.458 +/**************************************************************************
  22.459 + *
  22.460 + * Falcon TX path
  22.461 + *
  22.462 + **************************************************************************/
  22.463 +
  22.464 +/* Returns a pointer to the specified transmit descriptor in the TX
  22.465 + * descriptor queue belonging to the specified channel.
  22.466 + */
  22.467 +static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
  22.468 +					       unsigned int index)
  22.469 +{
  22.470 +	return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
  22.471 +}
  22.472 +
  22.473 +/* Update TX descriptor write pointer
  22.474 + * This writes to the TX_DESC_WPTR register for the specified
  22.475 + * channel's transmit descriptor ring.
  22.476 + */
  22.477 +static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
  22.478 +{
  22.479 +	unsigned write_ptr;
  22.480 +	efx_dword_t reg;
  22.481 +
  22.482 +	write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
  22.483 +	EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
  22.484 +	falcon_writel_page(tx_queue->efx, &reg,
  22.485 +			   TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
  22.486 +}
  22.487 +
  22.488 +
  22.489 +/* For each entry inserted into the software descriptor ring, create a
  22.490 + * descriptor in the hardware TX descriptor ring (in host memory), and
  22.491 + * write a doorbell.
  22.492 + */
  22.493 +#if defined(EFX_USE_FASTCALL)
  22.494 +void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue)
  22.495 +#else
  22.496 +void falcon_push_buffers(struct efx_tx_queue *tx_queue)
  22.497 +#endif
  22.498 +{
  22.499 +
  22.500 +	struct efx_tx_buffer *buffer;
  22.501 +	efx_qword_t *txd;
  22.502 +	unsigned write_ptr;
  22.503 +
  22.504 +	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
  22.505 +
  22.506 +	do {
  22.507 +		write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
  22.508 +		buffer = &tx_queue->buffer[write_ptr];
  22.509 +		txd = falcon_tx_desc(tx_queue, write_ptr);
  22.510 +		++tx_queue->write_count;
  22.511 +
  22.512 +		/* Create TX descriptor ring entry */
  22.513 +		EFX_POPULATE_QWORD_5(*txd,
  22.514 +				     TX_KER_PORT, 0,
  22.515 +				     TX_KER_CONT, buffer->continuation,
  22.516 +				     TX_KER_BYTE_CNT, buffer->len,
  22.517 +				     TX_KER_BUF_REGION, 0,
  22.518 +				     TX_KER_BUF_ADR, buffer->dma_addr);
  22.519 +	} while (tx_queue->write_count != tx_queue->insert_count);
  22.520 +
  22.521 +	wmb(); /* Ensure descriptors are written before they are fetched */
  22.522 +	falcon_notify_tx_desc(tx_queue);
  22.523 +}
  22.524 +
  22.525 +/* Allocate hardware resources for a TX queue */
  22.526 +int falcon_probe_tx(struct efx_tx_queue *tx_queue)
  22.527 +{
  22.528 +	struct efx_nic *efx = tx_queue->efx;
  22.529 +	struct falcon_nic_data *nic_data = efx->nic_data;
  22.530 +	int rc;
  22.531 +
  22.532 +	rc = falcon_alloc_special_buffer(efx, &tx_queue->txd,
  22.533 +					 FALCON_TXD_RING_SIZE *
  22.534 +					 sizeof(efx_qword_t));
  22.535 +	if (rc)
  22.536 +		return rc;
  22.537 +
  22.538 +	nic_data->resources.txq_min = max(nic_data->resources.txq_min,
  22.539 +					  (unsigned)tx_queue->queue + 1);
  22.540 +
  22.541 +	return 0;
  22.542 +}
  22.543 +
  22.544 +/* Prepare channel's TX datapath. */
  22.545 +int falcon_init_tx(struct efx_tx_queue *tx_queue)
  22.546 +{
  22.547 +	efx_oword_t tx_desc_ptr;
  22.548 +	struct efx_nic *efx = tx_queue->efx;
  22.549 +	int rc;
  22.550 +
  22.551 +	/* Pin TX descriptor ring */
  22.552 +	rc = falcon_init_special_buffer(efx, &tx_queue->txd);
  22.553 +	if (rc)
  22.554 +		return rc;
  22.555 +
  22.556 +	/* Push TX descriptor ring to card */
  22.557 +	EFX_POPULATE_OWORD_10(tx_desc_ptr,
  22.558 +			      TX_DESCQ_EN, 1,
  22.559 +			      TX_ISCSI_DDIG_EN, 0,
  22.560 +			      TX_ISCSI_HDIG_EN, 0,
  22.561 +			      TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  22.562 +			      TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
  22.563 +			      TX_DESCQ_OWNER_ID, 0,
  22.564 +			      TX_DESCQ_LABEL, tx_queue->queue,
  22.565 +			      TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
  22.566 +			      TX_DESCQ_TYPE, 0,	/* kernel queue */
  22.567 +			      TX_NON_IP_DROP_DIS_B0, 1);
  22.568 +
  22.569 +	if (FALCON_REV(efx) >= FALCON_REV_B0) {
  22.570 +		int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
  22.571 +		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
  22.572 +		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
  22.573 +	}
  22.574 +
  22.575 +	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  22.576 +			   tx_queue->queue);
  22.577 +
  22.578 +	if (FALCON_REV(efx) < FALCON_REV_B0) {
  22.579 +		efx_oword_t reg;
  22.580 +
  22.581 +		/* Only 128 bits in this register */
  22.582 +		BUG_ON(tx_queue->queue >= 128);
  22.583 +
  22.584 +		falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
  22.585 +		if (efx->net_dev->features & NETIF_F_IP_CSUM)
  22.586 +			clear_bit_le(tx_queue->queue, (void *)&reg);
  22.587 +		else
  22.588 +			set_bit_le(tx_queue->queue, (void *)&reg);
  22.589 +		falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
  22.590 +	}
  22.591 +
  22.592 +	return 0;
  22.593 +}
  22.594 +
  22.595 +static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
  22.596 +{
  22.597 +	struct efx_nic *efx = tx_queue->efx;
  22.598 +	struct efx_channel *channel = &efx->channel[0];
  22.599 +	efx_oword_t tx_flush_descq;
  22.600 +	unsigned int read_ptr, i;
  22.601 +
  22.602 +	/* Post a flush command */
  22.603 +	EFX_POPULATE_OWORD_2(tx_flush_descq,
  22.604 +			     TX_FLUSH_DESCQ_CMD, 1,
  22.605 +			     TX_FLUSH_DESCQ, tx_queue->queue);
  22.606 +	falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
  22.607 +	msleep(FALCON_FLUSH_TIMEOUT);
  22.608 +
  22.609 +	/* If the NIC is resetting then don't bother checking */
  22.610 +	if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
  22.611 +		return 0;
  22.612 +
  22.613 +	/* Look for a flush completed event */
  22.614 +	read_ptr = channel->eventq_read_ptr;
  22.615 +	for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
  22.616 +		efx_qword_t *event = falcon_event(channel, read_ptr);
  22.617 +		int ev_code, ev_sub_code, ev_queue;
  22.618 +		if (!falcon_event_present(event))
  22.619 +			break;
  22.620 +
  22.621 +		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
  22.622 +		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
  22.623 +		ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
  22.624 +		if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
  22.625 +		    (ev_queue == tx_queue->queue)) {
  22.626 +			EFX_LOG(efx, "tx queue %d flush command succesful\n",
  22.627 +				tx_queue->queue);
  22.628 +			return 0;
  22.629 +		}
  22.630 +
  22.631 +		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
  22.632 +	}
  22.633 +
  22.634 +	if (EFX_WORKAROUND_11557(efx)) {
  22.635 +		efx_oword_t reg;
  22.636 +		int enabled;
  22.637 +
  22.638 +		falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  22.639 +				  tx_queue->queue);
  22.640 +		enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
  22.641 +		if (!enabled) {
  22.642 +			EFX_LOG(efx, "tx queue %d disabled without a "
  22.643 +				"flush event seen\n", tx_queue->queue);
  22.644 +			return 0;
  22.645 +		}
  22.646 +	}
  22.647 +
  22.648 +	EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
  22.649 +	return -ETIMEDOUT;
  22.650 +}
  22.651 +
  22.652 +void falcon_fini_tx(struct efx_tx_queue *tx_queue)
  22.653 +{
  22.654 +	struct efx_nic *efx = tx_queue->efx;
  22.655 +	efx_oword_t tx_desc_ptr;
  22.656 +
  22.657 +	/* Stop the hardware using the queue */
  22.658 +	if (falcon_flush_tx_queue(tx_queue))
  22.659 +		EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
  22.660 +
  22.661 +	/* Remove TX descriptor ring from card */
  22.662 +	EFX_ZERO_OWORD(tx_desc_ptr);
  22.663 +	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  22.664 +			   tx_queue->queue);
  22.665 +
  22.666 +	/* Unpin TX descriptor ring */
  22.667 +	falcon_fini_special_buffer(efx, &tx_queue->txd);
  22.668 +}
  22.669 +
  22.670 +/* Free buffers backing TX queue */
  22.671 +void falcon_remove_tx(struct efx_tx_queue *tx_queue)
  22.672 +{
  22.673 +	falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  22.674 +}
  22.675 +
  22.676 +/**************************************************************************
  22.677 + *
  22.678 + * Falcon RX path
  22.679 + *
  22.680 + **************************************************************************/
  22.681 +
  22.682 +/* Returns a pointer to the specified transmit descriptor in the RX
  22.683 + * descriptor queue.
  22.684 + */
  22.685 +static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
  22.686 +					       unsigned int index)
  22.687 +{
  22.688 +	return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
  22.689 +}
  22.690 +
  22.691 +/* This creates an entry in the RX descriptor queue corresponding to
  22.692 + * the receive buffer.
  22.693 + */
  22.694 +static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
  22.695 +					unsigned index)
  22.696 +{
  22.697 +	struct efx_rx_buffer *rx_buf;
  22.698 +	efx_qword_t *rxd;
  22.699 +
  22.700 +	rxd = falcon_rx_desc(rx_queue, index);
  22.701 +	rx_buf = efx_rx_buffer(rx_queue, index);
  22.702 +	EFX_POPULATE_QWORD_3(*rxd,
  22.703 +			     RX_KER_BUF_SIZE,
  22.704 +			     rx_buf->len -
  22.705 +			     rx_queue->efx->type->rx_buffer_padding,
  22.706 +			     RX_KER_BUF_REGION, 0,
  22.707 +			     RX_KER_BUF_ADR, rx_buf->dma_addr);
  22.708 +}
  22.709 +
  22.710 +/* This writes to the RX_DESC_WPTR register for the specified receive
  22.711 + * descriptor ring.
  22.712 + */
  22.713 +#if defined(EFX_USE_FASTCALL)
  22.714 +void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
  22.715 +#else
  22.716 +void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
  22.717 +#endif
  22.718 +{
  22.719 +	efx_dword_t reg;
  22.720 +	unsigned write_ptr;
  22.721 +
  22.722 +	while (rx_queue->notified_count != rx_queue->added_count) {
  22.723 +		falcon_build_rx_desc(rx_queue,
  22.724 +				     rx_queue->notified_count &
  22.725 +				     FALCON_RXD_RING_MASK);
  22.726 +		++rx_queue->notified_count;
  22.727 +	}
  22.728 +
  22.729 +	wmb();
  22.730 +	write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
  22.731 +	EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
  22.732 +	falcon_writel_page(rx_queue->efx, &reg,
  22.733 +			   RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
  22.734 +}
  22.735 +
  22.736 +int falcon_probe_rx(struct efx_rx_queue *rx_queue)
  22.737 +{
  22.738 +	struct efx_nic *efx = rx_queue->efx;
  22.739 +	struct falcon_nic_data *nic_data = efx->nic_data;
  22.740 +	int rc;
  22.741 +
  22.742 +	rc = falcon_alloc_special_buffer(efx, &rx_queue->rxd,
  22.743 +					 FALCON_RXD_RING_SIZE *
  22.744 +					 sizeof(efx_qword_t));
  22.745 +	if (rc)
  22.746 +		return rc;
  22.747 +
  22.748 +	/* Increment the rxq_min counter */
  22.749 +	nic_data->resources.rxq_min = max(nic_data->resources.rxq_min,
  22.750 +					  (unsigned)rx_queue->queue + 1);
  22.751 +
  22.752 +	return 0;
  22.753 +}
  22.754 +
  22.755 +int falcon_init_rx(struct efx_rx_queue *rx_queue)
  22.756 +{
  22.757 +	efx_oword_t rx_desc_ptr;
  22.758 +	struct efx_nic *efx = rx_queue->efx;
  22.759 +	int rc;
  22.760 +	int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
  22.761 +	int iscsi_digest_en = is_b0;
  22.762 +
  22.763 +	EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
  22.764 +		rx_queue->queue, rx_queue->rxd.index,
  22.765 +		rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  22.766 +
  22.767 +	/* Pin RX descriptor ring */
  22.768 +	rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
  22.769 +	if (rc)
  22.770 +		return rc;
  22.771 +
  22.772 +	/* Push RX descriptor ring to card */
  22.773 +	EFX_POPULATE_OWORD_10(rx_desc_ptr,
  22.774 +			      RX_ISCSI_DDIG_EN, iscsi_digest_en,
  22.775 +			      RX_ISCSI_HDIG_EN, iscsi_digest_en,
  22.776 +			      RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  22.777 +			      RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
  22.778 +			      RX_DESCQ_OWNER_ID, 0,
  22.779 +			      RX_DESCQ_LABEL, rx_queue->queue,
  22.780 +			      RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
  22.781 +			      RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  22.782 +			      /* For >=B0 this is scatter so disable */
  22.783 +			      RX_DESCQ_JUMBO, !is_b0,
  22.784 +			      RX_DESCQ_EN, 1);
  22.785 +	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  22.786 +			   rx_queue->queue);
  22.787 +	return 0;
  22.788 +}
  22.789 +
  22.790 +static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
  22.791 +{
  22.792 +	struct efx_nic *efx = rx_queue->efx;
  22.793 +	struct efx_channel *channel = &efx->channel[0];
  22.794 +	unsigned int read_ptr, i;
  22.795 +	efx_oword_t rx_flush_descq;
  22.796 +
  22.797 +	/* Post a flush command */
  22.798 +	EFX_POPULATE_OWORD_2(rx_flush_descq,
  22.799 +			     RX_FLUSH_DESCQ_CMD, 1,
  22.800 +			     RX_FLUSH_DESCQ, rx_queue->queue);
  22.801 +
  22.802 +	falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
  22.803 +	msleep(FALCON_FLUSH_TIMEOUT);
  22.804 +
  22.805 +	/* If the NIC is resetting then don't bother checking */
  22.806 +	if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
  22.807 +		return 0;
  22.808 +
  22.809 +	/* Look for a flush completed event */
  22.810 +	read_ptr = channel->eventq_read_ptr;
  22.811 +	for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
  22.812 +		efx_qword_t *event = falcon_event(channel, read_ptr);
  22.813 +		int ev_code, ev_sub_code, ev_queue, ev_failed;
  22.814 +		if (!falcon_event_present(event))
  22.815 +			break;
  22.816 +
  22.817 +		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
  22.818 +		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
  22.819 +		ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
  22.820 +		ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
  22.821 +
  22.822 +		if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
  22.823 +		    (ev_queue == rx_queue->queue)) {
  22.824 +			if (ev_failed) {
  22.825 +				EFX_INFO(efx, "rx queue %d flush command "
  22.826 +					 "failed\n", rx_queue->queue);
  22.827 +				return -EAGAIN;
  22.828 +			} else {
  22.829 +				EFX_LOG(efx, "rx queue %d flush command "
  22.830 +					"succesful\n", rx_queue->queue);
  22.831 +				return 0;
  22.832 +			}
  22.833 +		}
  22.834 +
  22.835 +		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
  22.836 +	}
  22.837 +
  22.838 +	if (EFX_WORKAROUND_11557(efx)) {
  22.839 +		efx_oword_t reg;
  22.840 +		int enabled;
  22.841 +
  22.842 +		falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
  22.843 +				  rx_queue->queue);
  22.844 +		enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
  22.845 +		if (!enabled) {
  22.846 +			EFX_LOG(efx, "rx queue %d disabled without a "
  22.847 +				"flush event seen\n", rx_queue->queue);
  22.848 +			return 0;
  22.849 +		}
  22.850 +	}
  22.851 +
  22.852 +	EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
  22.853 +	return -ETIMEDOUT;
  22.854 +}
  22.855 +
  22.856 +void falcon_fini_rx(struct efx_rx_queue *rx_queue)
  22.857 +{
  22.858 +	efx_oword_t rx_desc_ptr;
  22.859 +	struct efx_nic *efx = rx_queue->efx;
  22.860 +	int i, rc;
  22.861 +
  22.862 +	/* Try and flush the rx queue. This may need to be repeated */
  22.863 +	for (i = 0; i < 5; i++) {
  22.864 +		rc = falcon_flush_rx_queue(rx_queue);
  22.865 +		if (rc == -EAGAIN)
  22.866 +			continue;
  22.867 +		break;
  22.868 +	}
  22.869 +	if (rc)
  22.870 +		EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
  22.871 +
  22.872 +	/* Remove RX descriptor ring from card */
  22.873 +	EFX_ZERO_OWORD(rx_desc_ptr);
  22.874 +	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  22.875 +			   rx_queue->queue);
  22.876 +
  22.877 +	/* Unpin RX descriptor ring */
  22.878 +	falcon_fini_special_buffer(efx, &rx_queue->rxd);
  22.879 +}
  22.880 +
  22.881 +/* Free buffers backing RX queue */
  22.882 +void falcon_remove_rx(struct efx_rx_queue *rx_queue)
  22.883 +{
  22.884 +	falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  22.885 +}
  22.886 +
  22.887 +/**************************************************************************
  22.888 + *
  22.889 + * Falcon event queue processing
  22.890 + * Event queues are processed by per-channel tasklets.
  22.891 + *
  22.892 + **************************************************************************/
  22.893 +
  22.894 +/* Update a channel's event queue's read pointer (RPTR) register
  22.895 + *
  22.896 + * This writes the EVQ_RPTR_REG register for the specified channel's
  22.897 + * event queue.
  22.898 + *
  22.899 + * Note that EVQ_RPTR_REG contains the index of the "last read" event,
  22.900 + * whereas channel->eventq_read_ptr contains the index of the "next to
  22.901 + * read" event.
  22.902 + */
  22.903 +#if defined(EFX_USE_FASTCALL)
  22.904 +void fastcall falcon_eventq_read_ack(struct efx_channel *channel)
  22.905 +#else
  22.906 +void falcon_eventq_read_ack(struct efx_channel *channel)
  22.907 +#endif
  22.908 +{
  22.909 +	efx_dword_t reg;
  22.910 +	struct efx_nic *efx = channel->efx;
  22.911 +
  22.912 +	EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
  22.913 +	falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
  22.914 +			    channel->evqnum);
  22.915 +}
  22.916 +
  22.917 +/* Use HW to insert a SW defined event */
  22.918 +void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
  22.919 +{
  22.920 +	efx_oword_t drv_ev_reg;
  22.921 +
  22.922 +	EFX_POPULATE_OWORD_2(drv_ev_reg,
  22.923 +			     DRV_EV_QID, channel->evqnum,
  22.924 +			     DRV_EV_DATA,
  22.925 +			     EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
  22.926 +	falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
  22.927 +}
  22.928 +
  22.929 +/* Handle a transmit completion event
  22.930 + *
  22.931 + * Falcon batches TX completion events; the message we receive is of
  22.932 + * the form "complete all TX events up to this index".
  22.933 + */
  22.934 +static inline void falcon_handle_tx_event(struct efx_channel *channel,
  22.935 +					  efx_qword_t *event)
  22.936 +{
  22.937 +	unsigned int tx_ev_desc_ptr;
  22.938 +	unsigned int tx_ev_q_label;
  22.939 +	struct efx_tx_queue *tx_queue;
  22.940 +	struct efx_nic *efx = channel->efx;
  22.941 +
  22.942 +	if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
  22.943 +		/* Transmit completion */
  22.944 +		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
  22.945 +		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
  22.946 +		tx_queue = &efx->tx_queue[tx_ev_q_label];
  22.947 +		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
  22.948 +	} else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
  22.949 +		/* Rewrite the FIFO write pointer */
  22.950 +		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
  22.951 +		tx_queue = &efx->tx_queue[tx_ev_q_label];
  22.952 +
  22.953 +		if (efx->net_dev_registered)
  22.954 +			netif_tx_lock(efx->net_dev);
  22.955 +		falcon_notify_tx_desc(tx_queue);
  22.956 +		if (efx->net_dev_registered)
  22.957 +			netif_tx_unlock(efx->net_dev);
  22.958 +	} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
  22.959 +		   EFX_WORKAROUND_10727(efx)) {
  22.960 +		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  22.961 +	} else {
  22.962 +		EFX_ERR(efx, "channel %d unexpected TX event "
  22.963 +			EFX_QWORD_FMT"\n", channel->channel,
  22.964 +			EFX_QWORD_VAL(*event));
  22.965 +	}
  22.966 +}
  22.967 +
  22.968 +/* Check received packet's destination MAC address. */
  22.969 +static int check_dest_mac(struct efx_rx_queue *rx_queue,
  22.970 +			  const efx_qword_t *event)
  22.971 +{
  22.972 +	struct efx_rx_buffer *rx_buf;
  22.973 +	struct efx_nic *efx = rx_queue->efx;
  22.974 +	int rx_ev_desc_ptr;
  22.975 +	struct ethhdr *eh;
  22.976 +
  22.977 +	if (efx->promiscuous)
  22.978 +		return 1;
  22.979 +
  22.980 +	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
  22.981 +	rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
  22.982 +	eh = (struct ethhdr *)rx_buf->data;
  22.983 +	if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
  22.984 +		return 0;
  22.985 +	return 1;
  22.986 +}
  22.987 +
  22.988 +/* Detect errors included in the rx_evt_pkt_ok bit. */
  22.989 +static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
  22.990 +				    const efx_qword_t *event,
  22.991 +				    unsigned *rx_ev_pkt_ok,
  22.992 +				    int *discard, int byte_count)
  22.993 +{
  22.994 +	struct efx_nic *efx = rx_queue->efx;
  22.995 +	unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  22.996 +	unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  22.997 +	unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  22.998 +	unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
  22.999 +	unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
 22.1000 +	int snap, non_ip;
 22.1001 +
 22.1002 +	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
 22.1003 +	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
 22.1004 +	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
 22.1005 +	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
 22.1006 +	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
 22.1007 +						 RX_EV_BUF_OWNER_ID_ERR);
 22.1008 +	rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
 22.1009 +	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
 22.1010 +						  RX_EV_IP_HDR_CHKSUM_ERR);
 22.1011 +	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
 22.1012 +						   RX_EV_TCP_UDP_CHKSUM_ERR);
 22.1013 +	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
 22.1014 +	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
 22.1015 +	rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
 22.1016 +			  0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
 22.1017 +	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
 22.1018 +
 22.1019 +	/* Every error apart from tobe_disc and pause_frm */
 22.1020 +	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
 22.1021 +			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
 22.1022 +			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
 22.1023 +
 22.1024 +	snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
 22.1025 +		(rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
 22.1026 +	non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
 22.1027 +
 22.1028 +	/* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
 22.1029 +	 * length field of an LLC frame, which sets TOBE_DISC. We could set
 22.1030 +	 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
 22.1031 +	 * protect the RX block).
 22.1032 +	 *
 22.1033 +	 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
 22.1034 +	 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
 22.1035 +	 *                       LLC can't encapsulate IP, so by definition
 22.1036 +	 *                       these packets are NON_IP.
 22.1037 +	 *
 22.1038 +	 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
 22.1039 +	 * to check this.
 22.1040 +	 */
 22.1041 +	if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
 22.1042 +		/* If all the other flags are zero then we can state the
 22.1043 +		 * entire packet is ok, which will flag to the kernel not
 22.1044 +		 * to recalculate checksums.
 22.1045 +		 */
 22.1046 +		if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
 22.1047 +			*rx_ev_pkt_ok = 1;
 22.1048 +
 22.1049 +		rx_ev_tobe_disc = 0;
 22.1050 +
 22.1051 +		/* TOBE_DISC is set for unicast mismatch.  But given that
 22.1052 +		 * we can't trust TOBE_DISC here, we must validate the dest
 22.1053 +		 * MAC address ourselves.
 22.1054 +		 */
 22.1055 +		if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
 22.1056 +			rx_ev_tobe_disc = 1;
 22.1057 +	}
 22.1058 +
 22.1059 +	/* Count errors that are not in MAC stats. */
 22.1060 +	if (rx_ev_frm_trunc)
 22.1061 +		++rx_queue->channel->n_rx_frm_trunc;
 22.1062 +	else if (rx_ev_tobe_disc)
 22.1063 +		++rx_queue->channel->n_rx_tobe_disc;
 22.1064 +	else if (rx_ev_ip_hdr_chksum_err)
 22.1065 +		++rx_queue->channel->n_rx_ip_hdr_chksum_err;
 22.1066 +	else if (rx_ev_tcp_udp_chksum_err)
 22.1067 +		++rx_queue->channel->n_rx_tcp_udp_chksum_err;
 22.1068 +	if (rx_ev_ip_frag_err)
 22.1069 +		++rx_queue->channel->n_rx_ip_frag_err;
 22.1070 +
 22.1071 +	/* The frame must be discarded if any of these are true. */
 22.1072 +	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
 22.1073 +		    rx_ev_tobe_disc | rx_ev_pause_frm);
 22.1074 +
 22.1075 +	/* TOBE_DISC is expected on unicast mismatches; don't print out an
 22.1076 +	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
 22.1077 +	 * to a FIFO overflow.
 22.1078 +	 */
 22.1079 +#ifdef EFX_ENABLE_DEBUG
 22.1080 +	if (rx_ev_other_err) {
 22.1081 +		EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
 22.1082 +			    EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
 22.1083 +			    rx_queue->queue, EFX_QWORD_VAL(*event),
 22.1084 +			    rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
 22.1085 +			    rx_ev_ip_hdr_chksum_err ?
 22.1086 +			    " [IP_HDR_CHKSUM_ERR]" : "",
 22.1087 +			    rx_ev_tcp_udp_chksum_err ?
 22.1088 +			    " [TCP_UDP_CHKSUM_ERR]" : "",
 22.1089 +			    rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
 22.1090 +			    rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
 22.1091 +			    rx_ev_drib_nib ? " [DRIB_NIB]" : "",
 22.1092 +			    rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
 22.1093 +			    rx_ev_pause_frm ? " [PAUSE]" : "",
 22.1094 +			    snap ? " [SNAP/LLC]" : "");
 22.1095 +	}
 22.1096 +#endif
 22.1097 +
 22.1098 +	if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
 22.1099 +		     efx->phy_type == PHY_TYPE_10XPRESS))
 22.1100 +		tenxpress_crc_err(efx);
 22.1101 +}
 22.1102 +
 22.1103 +
 22.1104 +/* Handle receive events that are not in-order. */
 22.1105 +static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
 22.1106 +				       unsigned index)
 22.1107 +{
 22.1108 +	struct efx_nic *efx = rx_queue->efx;
 22.1109 +	unsigned expected, dropped;
 22.1110 +
 22.1111 +	expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
 22.1112 +	dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
 22.1113 +		   FALCON_RXD_RING_MASK);
 22.1114 +	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
 22.1115 +		dropped, index, expected);
 22.1116 +
 22.1117 +	atomic_inc(&efx->errors.missing_event);
 22.1118 +	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
 22.1119 +			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 22.1120 +}
 22.1121 +
 22.1122 +
 22.1123 +/* Handle a packet received event
 22.1124 + *
 22.1125 + * Falcon silicon gives a "discard" flag if it's a unicast packet with the
 22.1126 + * wrong destination address
 22.1127 + * Also "is multicast" and "matches multicast filter" flags can be used to
 22.1128 + * discard non-matching multicast packets.
 22.1129 + */
 22.1130 +static inline int falcon_handle_rx_event(struct efx_channel *channel,
 22.1131 +					 const efx_qword_t *event)
 22.1132 +{
 22.1133 +	unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
 22.1134 +	unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
 22.1135 +	unsigned expected_ptr;
 22.1136 +	int discard = 0, checksummed;
 22.1137 +	struct efx_rx_queue *rx_queue;
 22.1138 +	struct efx_nic *efx = channel->efx;
 22.1139 +
 22.1140 +	/* Basic packet information */
 22.1141 +	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
 22.1142 +	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
 22.1143 +	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
 22.1144 +	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
 22.1145 +	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
 22.1146 +
 22.1147 +	rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
 22.1148 +	rx_queue = &efx->rx_queue[rx_ev_q_label];
 22.1149 +
 22.1150 +	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
 22.1151 +	expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
 22.1152 +	if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
 22.1153 +		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
 22.1154 +		return rx_ev_q_label;
 22.1155 +	}
 22.1156 +
 22.1157 +	if (likely(rx_ev_pkt_ok)) {
 22.1158 +		/* If packet is marked as OK and packet type is TCP/IPv4 or
 22.1159 +		 * UDP/IPv4, then we can rely on the hardware checksum.
 22.1160 +		 */
 22.1161 +		checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
 22.1162 +	} else {
 22.1163 +		falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
 22.1164 +					&discard, rx_ev_byte_cnt);
 22.1165 +		checksummed = 0;
 22.1166 +	}
 22.1167 +
 22.1168 +	/* Detect multicast packets that didn't match the filter */
 22.1169 +	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
 22.1170 +	if (rx_ev_mcast_pkt) {
 22.1171 +		unsigned int rx_ev_mcast_hash_match =
 22.1172 +			EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
 22.1173 +
 22.1174 +		if (unlikely(!rx_ev_mcast_hash_match))
 22.1175 +			discard = 1;
 22.1176 +	}
 22.1177 +
 22.1178 +	/* Handle received packet */
 22.1179 +	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
 22.1180 +		      checksummed, discard);
 22.1181 +
 22.1182 +	return rx_ev_q_label;
 22.1183 +}
 22.1184 +
 22.1185 +/* Global events are basically PHY events */
 22.1186 +static void falcon_handle_global_event(struct efx_channel *channel,
 22.1187 +				       efx_qword_t *event)
 22.1188 +{
 22.1189 +	struct efx_nic *efx = channel->efx;
 22.1190 +	int is_phy_event = 0, handled = 0;
 22.1191 +
 22.1192 +	/* Check for interrupt on either port.  Some boards have a
 22.1193 +	 * single PHY wired to the interrupt line for port 1. */
 22.1194 +	if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
 22.1195 +	    EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
 22.1196 +	    EFX_QWORD_FIELD(*event, XG_PHY_INTR))
 22.1197 +		is_phy_event = 1;
 22.1198 +
 22.1199 +	if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
 22.1200 +	    EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
 22.1201 +		is_phy_event = 1;
 22.1202 +
 22.1203 +	if (is_phy_event) {
 22.1204 +		efx->phy_op->clear_interrupt(efx);
 22.1205 +		queue_work(efx->workqueue, &efx->reconfigure_work);
 22.1206 +		handled = 1;
 22.1207 +	}
 22.1208 +
 22.1209 +	if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
 22.1210 +		EFX_ERR(efx, "channel %d seen global RX_RESET "
 22.1211 +			"event. Resetting.\n", channel->channel);
 22.1212 +
 22.1213 +		atomic_inc(&efx->errors.rx_reset);
 22.1214 +		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
 22.1215 +				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 22.1216 +		handled = 1;
 22.1217 +	}
 22.1218 +
 22.1219 +	if (!handled)
 22.1220 +		EFX_ERR(efx, "channel %d unknown global event "
 22.1221 +			EFX_QWORD_FMT "\n", channel->channel,
 22.1222 +			EFX_QWORD_VAL(*event));
 22.1223 +}
 22.1224 +
 22.1225 +static void falcon_handle_driver_event(struct efx_channel *channel,
 22.1226 +				       efx_qword_t *event)
 22.1227 +{
 22.1228 +	struct efx_nic *efx = channel->efx;
 22.1229 +	unsigned int ev_sub_code;
 22.1230 +	unsigned int ev_sub_data;
 22.1231 +
 22.1232 +	ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
 22.1233 +	ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
 22.1234 +
 22.1235 +	switch (ev_sub_code) {
 22.1236 +	case TX_DESCQ_FLS_DONE_EV_DECODE:
 22.1237 +		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
 22.1238 +			  channel->channel, ev_sub_data);
 22.1239 +		EFX_DL_CALLBACK(efx, event, event);
 22.1240 +		break;
 22.1241 +	case RX_DESCQ_FLS_DONE_EV_DECODE:
 22.1242 +		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
 22.1243 +			  channel->channel, ev_sub_data);
 22.1244 +		EFX_DL_CALLBACK(efx, event, event);
 22.1245 +		break;
 22.1246 +	case EVQ_INIT_DONE_EV_DECODE:
 22.1247 +		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
 22.1248 +			channel->channel, ev_sub_data);
 22.1249 +		break;
 22.1250 +	case SRM_UPD_DONE_EV_DECODE:
 22.1251 +		EFX_TRACE(efx, "channel %d SRAM update done\n",
 22.1252 +			  channel->channel);
 22.1253 +		EFX_DL_CALLBACK(efx, event, event);
 22.1254 +		break;
 22.1255 +	case WAKE_UP_EV_DECODE:
 22.1256 +		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
 22.1257 +			  channel->channel, ev_sub_data);
 22.1258 +		EFX_DL_CALLBACK(efx, event, event);
 22.1259 +		break;
 22.1260 +	case TIMER_EV_DECODE:
 22.1261 +		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
 22.1262 +			  channel->channel, ev_sub_data);
 22.1263 +		EFX_DL_CALLBACK(efx, event, event);
 22.1264 +		break;
 22.1265 +	case RX_RECOVERY_EV_DECODE:
 22.1266 +		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
 22.1267 +			"Resetting.\n", channel->channel);
 22.1268 +
 22.1269 +		atomic_inc(&efx->errors.rx_reset);
 22.1270 +		efx_schedule_reset(efx,
 22.1271 +				   EFX_WORKAROUND_6555(efx) ?
 22.1272 +				   RESET_TYPE_RX_RECOVERY :
 22.1273 +				   RESET_TYPE_DISABLE);
 22.1274 +		break;
 22.1275 +	case RX_DSC_ERROR_EV_DECODE:
 22.1276 +		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
 22.1277 +			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 22.1278 +		atomic_inc(&efx->errors.rx_desc_fetch);
 22.1279 +		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
 22.1280 +		break;
 22.1281 +	case TX_DSC_ERROR_EV_DECODE:
 22.1282 +		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
 22.1283 +			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 22.1284 +		atomic_inc(&efx->errors.tx_desc_fetch);
 22.1285 +		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
 22.1286 +		break;
 22.1287 +	default:
 22.1288 +		EFX_TRACE(efx, "channel %d unknown driver event code %d "
 22.1289 +			  "data %04x\n", channel->channel, ev_sub_code,
 22.1290 +			  ev_sub_data);
 22.1291 +		EFX_DL_CALLBACK(efx, event, event);
 22.1292 +		break;
 22.1293 +	}
 22.1294 +}
 22.1295 +
 22.1296 +#if defined(EFX_USE_FASTCALL)
 22.1297 +int fastcall falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
 22.1298 +#else
 22.1299 +int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
 22.1300 +#endif
 22.1301 +{
 22.1302 +	unsigned int read_ptr;
 22.1303 +	efx_qword_t event, *p_event;
 22.1304 +	int ev_code;
 22.1305 +	int rxq;
 22.1306 +	int rxdmaqs = 0;
 22.1307 +
 22.1308 +	read_ptr = channel->eventq_read_ptr;
 22.1309 +
 22.1310 +	do {
 22.1311 +		p_event = falcon_event(channel, read_ptr);
 22.1312 +		event = *p_event;
 22.1313 +
 22.1314 +		if (!falcon_event_present(&event))
 22.1315 +			/* End of events */
 22.1316 +			break;
 22.1317 +
 22.1318 +		EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
 22.1319 +			  channel->channel, EFX_QWORD_VAL(event));
 22.1320 +
 22.1321 +		/* Clear this event by marking it all ones */
 22.1322 +		EFX_SET_QWORD(*p_event);
 22.1323 +
 22.1324 +		ev_code = EFX_QWORD_FIELD(event, EV_CODE);
 22.1325 +
 22.1326 +		switch (ev_code) {
 22.1327 +		case RX_IP_EV_DECODE:
 22.1328 +			rxq = falcon_handle_rx_event(channel, &event);
 22.1329 +			rxdmaqs |= (1 << rxq);
 22.1330 +			(*rx_quota)--;
 22.1331 +			break;
 22.1332 +		case TX_IP_EV_DECODE:
 22.1333 +			falcon_handle_tx_event(channel, &event);
 22.1334 +			break;
 22.1335 +		case DRV_GEN_EV_DECODE:
 22.1336 +			channel->eventq_magic
 22.1337 +				= EFX_QWORD_FIELD(event, EVQ_MAGIC);
 22.1338 +			EFX_LOG(channel->efx, "channel %d received generated "
 22.1339 +				"event "EFX_QWORD_FMT"\n", channel->channel,
 22.1340 +				EFX_QWORD_VAL(event));
 22.1341 +			break;
 22.1342 +		case GLOBAL_EV_DECODE:
 22.1343 +			falcon_handle_global_event(channel, &event);
 22.1344 +			break;
 22.1345 +		case DRIVER_EV_DECODE:
 22.1346 +			falcon_handle_driver_event(channel, &event);
 22.1347 +			break;
 22.1348 +		default:
 22.1349 +			EFX_ERR(channel->efx, "channel %d unknown event type %d"
 22.1350 +				" (data " EFX_QWORD_FMT ")\n", channel->channel,
 22.1351 +				ev_code, EFX_QWORD_VAL(event));
 22.1352 +		}
 22.1353 +
 22.1354 +		/* Increment read pointer */
 22.1355 +		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
 22.1356 +
 22.1357 +	} while (*rx_quota);
 22.1358 +
 22.1359 +	channel->eventq_read_ptr = read_ptr;
 22.1360 +	return rxdmaqs;
 22.1361 +}
 22.1362 +
 22.1363 +void falcon_set_int_moderation(struct efx_channel *channel)
 22.1364 +{
 22.1365 +	efx_dword_t timer_cmd;
 22.1366 +	struct efx_nic *efx = channel->efx;
 22.1367 +
 22.1368 +	/* Set timer register */
 22.1369 +	if (channel->irq_moderation) {
 22.1370 +		/* Round to resolution supported by hardware.  The value we
 22.1371 +		 * program is based at 0.  So actual interrupt moderation
 22.1372 +		 * achieved is ((x + 1) * res).
 22.1373 +		 */
 22.1374 +		unsigned int res = 5;
 22.1375 +		channel->irq_moderation -= (channel->irq_moderation % res);
 22.1376 +		if (channel->irq_moderation < res)
 22.1377 +			channel->irq_moderation = res;
 22.1378 +		EFX_POPULATE_DWORD_2(timer_cmd,
 22.1379 +				     TIMER_MODE, TIMER_MODE_INT_HLDOFF,
 22.1380 +				     TIMER_VAL,
 22.1381 +				     (channel->irq_moderation / res) - 1);
 22.1382 +	} else {
 22.1383 +		EFX_POPULATE_DWORD_2(timer_cmd,
 22.1384 +				     TIMER_MODE, TIMER_MODE_DIS,
 22.1385 +				     TIMER_VAL, 0);
 22.1386 +	}
 22.1387 +	falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
 22.1388 +				  channel->evqnum);
 22.1389 +
 22.1390 +}
 22.1391 +
 22.1392 +/* Allocate buffer table entries for event queue */
 22.1393 +int falcon_probe_eventq(struct efx_channel *channel)
 22.1394 +{
 22.1395 +	struct efx_nic *efx = channel->efx;
 22.1396 +	struct falcon_nic_data *nic_data = efx->nic_data;
 22.1397 +	unsigned int evq_size;
 22.1398 +	int rc;
 22.1399 +
 22.1400 +	evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
 22.1401 +	rc = falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
 22.1402 +	if (rc)
 22.1403 +		return rc;
 22.1404 +
 22.1405 +	nic_data->resources.evq_int_min = max(nic_data->resources.evq_int_min,
 22.1406 +					      (unsigned)channel->evqnum + 1);
 22.1407 +
 22.1408 +	return 0;
 22.1409 +}
 22.1410 +
 22.1411 +int falcon_init_eventq(struct efx_channel *channel)
 22.1412 +{
 22.1413 +	efx_oword_t evq_ptr;
 22.1414 +	struct efx_nic *efx = channel->efx;
 22.1415 +	int rc;
 22.1416 +
 22.1417 +	EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
 22.1418 +		channel->channel, channel->eventq.index,
 22.1419 +		channel->eventq.index + channel->eventq.entries - 1);
 22.1420 +
 22.1421 +	/* Pin event queue buffer */
 22.1422 +	rc = falcon_init_special_buffer(efx, &channel->eventq);
 22.1423 +	if (rc)
 22.1424 +		return rc;
 22.1425 +
 22.1426 +	/* Fill event queue with all ones (i.e. empty events) */
 22.1427 +	memset(channel->eventq.addr, 0xff, channel->eventq.len);
 22.1428 +
 22.1429 +	/* Push event queue to card */
 22.1430 +	EFX_POPULATE_OWORD_3(evq_ptr,
 22.1431 +			     EVQ_EN, 1,
 22.1432 +			     EVQ_SIZE, FALCON_EVQ_ORDER,
 22.1433 +			     EVQ_BUF_BASE_ID, channel->eventq.index);
 22.1434 +	falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
 22.1435 +			   channel->evqnum);
 22.1436 +
 22.1437 +	falcon_set_int_moderation(channel);
 22.1438 +
 22.1439 +	return 0;
 22.1440 +}
 22.1441 +
 22.1442 +void falcon_fini_eventq(struct efx_channel *channel)
 22.1443 +{
 22.1444 +	efx_oword_t eventq_ptr;
 22.1445 +	struct efx_nic *efx = channel->efx;
 22.1446 +
 22.1447 +	/* Remove event queue from card */
 22.1448 +	EFX_ZERO_OWORD(eventq_ptr);
 22.1449 +	falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
 22.1450 +			   channel->evqnum);
 22.1451 +
 22.1452 +	/* Unpin event queue */
 22.1453 +	falcon_fini_special_buffer(efx, &channel->eventq);
 22.1454 +}
 22.1455 +
 22.1456 +/* Free buffers backing event queue */
 22.1457 +void falcon_remove_eventq(struct efx_channel *channel)
 22.1458 +{
 22.1459 +	falcon_free_special_buffer(channel->efx, &channel->eventq);
 22.1460 +}
 22.1461 +
 22.1462 +
 22.1463 +/* Generates a test event on the event queue.  A subsequent call to
 22.1464 + * process_eventq() should pick up the event and place the value of
 22.1465 + * "magic" into channel->eventq_magic;
 22.1466 + */
 22.1467 +void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
 22.1468 +{
 22.1469 +	efx_qword_t test_event;
 22.1470 +
 22.1471 +	EFX_POPULATE_QWORD_2(test_event,
 22.1472 +			     EV_CODE, DRV_GEN_EV_DECODE,
 22.1473 +			     EVQ_MAGIC, magic);
 22.1474 +	falcon_generate_event(channel, &test_event);
 22.1475 +}
 22.1476 +
 22.1477 +
 22.1478 +/**************************************************************************
 22.1479 + *
 22.1480 + * Falcon hardware interrupts
 22.1481 + * The hardware interrupt handler does very little work; all the event
 22.1482 + * queue processing is carried out by per-channel tasklets.
 22.1483 + *
 22.1484 + **************************************************************************/
 22.1485 +
 22.1486 +/* Enable/disable/generate Falcon interrupts */
 22.1487 +static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
 22.1488 +				     int force)
 22.1489 +{
 22.1490 +	efx_oword_t int_en_reg_ker;
 22.1491 +
 22.1492 +	EFX_POPULATE_OWORD_2(int_en_reg_ker,
 22.1493 +			     KER_INT_KER, force,
 22.1494 +			     DRV_INT_EN_KER, enabled);
 22.1495 +	falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
 22.1496 +}
 22.1497 +
 22.1498 +void falcon_enable_interrupts(struct efx_nic *efx)
 22.1499 +{
 22.1500 +	efx_oword_t int_adr_reg_ker;
 22.1501 +	struct efx_channel *channel;
 22.1502 +
 22.1503 +	/* Zero INT_KER */
 22.1504 +	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
 22.1505 +	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
 22.1506 +
 22.1507 +	/* Program INT_ADR_KER_REG */
 22.1508 +	EFX_POPULATE_OWORD_2(int_adr_reg_ker,
 22.1509 +			     NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
 22.1510 +			     INT_ADR_KER, efx->irq_status.dma_addr);
 22.1511 +	falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
 22.1512 +
 22.1513 +	/* Enable interrupts */
 22.1514 +	falcon_interrupts(efx, 1, 0);
 22.1515 +
 22.1516 +	/* Force processing of all the channels to get the EVQ RPTRs up to
 22.1517 +	   date */
 22.1518 +	efx_for_each_channel_with_interrupt(channel, efx)
 22.1519 +		efx_schedule_channel(channel);
 22.1520 +}
 22.1521 +
 22.1522 +void falcon_disable_interrupts(struct efx_nic *efx)
 22.1523 +{
 22.1524 +	/* Disable interrupts */
 22.1525 +	falcon_interrupts(efx, 0, 0);
 22.1526 +}
 22.1527 +
 22.1528 +/* Generate a Falcon test interrupt
 22.1529 + * Interrupt must already have been enabled, otherwise nasty things
 22.1530 + * may happen.
 22.1531 + */
 22.1532 +void falcon_generate_interrupt(struct efx_nic *efx)
 22.1533 +{
 22.1534 +	falcon_interrupts(efx, 1, 1);
 22.1535 +}
 22.1536 +
 22.1537 +/* Acknowledge a legacy interrupt from Falcon
 22.1538 + *
 22.1539 + * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 22.1540 + *
 22.1541 + * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 22.1542 + * BIU. Interrupt acknowledge is read sensitive so must write instead
 22.1543 + * (then read to ensure the BIU collector is flushed)
 22.1544 + *
 22.1545 + * NB most hardware supports MSI interrupts
 22.1546 + */
 22.1547 +static inline void falcon_irq_ack_a1(struct efx_nic *efx)
 22.1548 +{
 22.1549 +	efx_dword_t reg;
 22.1550 +
 22.1551 +	EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
 22.1552 +	falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
 22.1553 +	falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
 22.1554 +}
 22.1555 +
 22.1556 +/* Process a fatal interrupt
 22.1557 + * Disable bus mastering ASAP and schedule a reset
 22.1558 + */
 22.1559 +static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
 22.1560 +{
 22.1561 +	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
 22.1562 +	efx_oword_t fatal_intr;
 22.1563 +	int error, mem_perr;
 22.1564 +	static int n_int_errors;
 22.1565 +
 22.1566 +	falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
 22.1567 +	error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
 22.1568 +
 22.1569 +	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
 22.1570 +		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
 22.1571 +		EFX_OWORD_VAL(fatal_intr),
 22.1572 +		error ? "disabling bus mastering" : "no recognised error");
 22.1573 +	if (error == 0)
 22.1574 +		goto out;
 22.1575 +
 22.1576 +	/* If this is a memory parity error dump which blocks are offending */
 22.1577 +	mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
 22.1578 +	if (mem_perr) {
 22.1579 +		efx_oword_t reg;
 22.1580 +		falcon_read(efx, &reg, MEM_STAT_REG_KER);
 22.1581 +		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
 22.1582 +			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
 22.1583 +	}
 22.1584 +
 22.1585 +	/* Disable DMA bus mastering on both devices */
 22.1586 +	pci_disable_device(efx->pci_dev);
 22.1587 +	if (efx->type->is_dual_func)
 22.1588 +		pci_disable_device(efx->pci_dev2);
 22.1589 +
 22.1590 +	if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
 22.1591 +		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
 22.1592 +		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
 22.1593 +	} else {
 22.1594 +		EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
 22.1595 +			"NIC will be disabled\n");
 22.1596 +		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 22.1597 +	}
 22.1598 +out:
 22.1599 +	return IRQ_HANDLED;
 22.1600 +}
 22.1601 +
 22.1602 +/* Handle a legacy interrupt from Falcon
 22.1603 + * Acknowledges the interrupt and schedule event queue processing.
 22.1604 + *
 22.1605 + * This routine must guarantee not to touch the hardware when
 22.1606 + * interrupts are disabled, to allow for correct semantics of
 22.1607 + * efx_suspend() and efx_resume().
 22.1608 + */
 22.1609 +#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
 22.1610 +static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
 22.1611 +#else
 22.1612 +static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id,
 22.1613 +					      struct pt_regs *regs
 22.1614 +					      __attribute__ ((unused)))
 22.1615 +#endif
 22.1616 +{
 22.1617 +	struct efx_nic *efx = (struct efx_nic *)dev_id;
 22.1618 +	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
 22.1619 +	struct efx_channel *channel;
 22.1620 +	efx_dword_t reg;
 22.1621 +	u32 queues;
 22.1622 +	int syserr;
 22.1623 +
 22.1624 +	/* Read the ISR which also ACKs the interrupts */
 22.1625 +	falcon_readl(efx, &reg, INT_ISR0_B0);
 22.1626 +	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
 22.1627 +
 22.1628 +	/* Check to see if we have a serious error condition */
 22.1629 +	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
 22.1630 +	if (unlikely(syserr))
 22.1631 +		return falcon_fatal_interrupt(efx);
 22.1632 +
 22.1633 +	if (queues == 0)
 22.1634 +		return IRQ_NONE;
 22.1635 +
 22.1636 +	efx->last_irq_cpu = raw_smp_processor_id();
 22.1637 +	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
 22.1638 +		  irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
 22.1639 +
 22.1640 +	/* Schedule processing of any interrupting queues */
 22.1641 +	channel = &efx->channel[0];
 22.1642 +	while (queues) {
 22.1643 +		if (queues & 0x01)
 22.1644 +			efx_schedule_channel(channel);
 22.1645 +		channel++;
 22.1646 +		queues >>= 1;
 22.1647 +	}
 22.1648 +
 22.1649 +	return IRQ_HANDLED;
 22.1650 +}
 22.1651 +
 22.1652 +
 22.1653 +#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
 22.1654 +static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 22.1655 +#else
 22.1656 +static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id,
 22.1657 +					      struct pt_regs *regs
 22.1658 +					      __attribute__ ((unused)))
 22.1659 +#endif
 22.1660 +{
 22.1661 +	struct efx_nic *efx = (struct efx_nic *)dev_id;
 22.1662 +	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
 22.1663 +	struct efx_channel *channel;
 22.1664 +	int syserr;
 22.1665 +	int queues;
 22.1666 +
 22.1667 +	/* Check to see if this is our interrupt.  If it isn't, we
 22.1668 +	 * exit without having touched the hardware.
 22.1669 +	 */
 22.1670 +	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
 22.1671 +		EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
 22.1672 +			  raw_smp_processor_id());
 22.1673 +		return IRQ_NONE;
 22.1674 +	}
 22.1675 +	efx->last_irq_cpu = raw_smp_processor_id();
 22.1676 +	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
 22.1677 +		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 22.1678 +
 22.1679 +	/* Check to see if we have a serious error condition */
 22.1680 +	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
 22.1681 +	if (unlikely(syserr))
 22.1682 +		return falcon_fatal_interrupt(efx);
 22.1683 +
 22.1684 +	/* Determine interrupting queues, clear interrupt status
 22.1685 +	 * register and acknowledge the device interrupt.
 22.1686 +	 */
 22.1687 +	BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
 22.1688 +	queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
 22.1689 +	EFX_ZERO_OWORD(*int_ker);
 22.1690 +	wmb(); /* Ensure the vector is cleared before interrupt ack */
 22.1691 +	falcon_irq_ack_a1(efx);
 22.1692 +
 22.1693 +	/* Schedule processing of any interrupting queues */
 22.1694 +	channel = &efx->channel[0];
 22.1695 +	while (queues) {
 22.1696 +		if (queues & 0x01)
 22.1697 +			efx_schedule_channel(channel);
 22.1698 +		channel++;
 22.1699 +		queues >>= 1;
 22.1700 +	}
 22.1701 +
 22.1702 +	return IRQ_HANDLED;
 22.1703 +}
 22.1704 +
 22.1705 +/* Handle an MSI interrupt from Falcon
 22.1706 + *
 22.1707 + * Handle an MSI hardware interrupt.  This routine schedules event
 22.1708 + * queue processing.  No interrupt acknowledgement cycle is necessary.
 22.1709 + * Also, we never need to check that the interrupt is for us, since
 22.1710 + * MSI interrupts cannot be shared.
 22.1711 + *
 22.1712 + * This routine must guarantee not to touch the hardware when
 22.1713 + * interrupts are disabled, to allow for correct semantics of
 22.1714 + * efx_suspend() and efx_resume().
 22.1715 + */
 22.1716 +#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
 22.1717 +static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
 22.1718 +#else
 22.1719 +static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id,
 22.1720 +					struct pt_regs *regs
 22.1721 +					__attribute__ ((unused)))
 22.1722 +#endif
 22.1723 +{
 22.1724 +	struct efx_channel *channel = (struct efx_channel *)dev_id;
 22.1725 +	struct efx_nic *efx = channel->efx;
 22.1726 +	efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
 22.1727 +	int syserr;
 22.1728 +
 22.1729 +	efx->last_irq_cpu = raw_smp_processor_id();
 22.1730 +	EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
 22.1731 +		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 22.1732 +
 22.1733 +	/* Check to see if we have a serious error condition */
 22.1734 +	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
 22.1735 +	if (unlikely(syserr))
 22.1736 +		return falcon_fatal_interrupt(efx);
 22.1737 +
 22.1738 +	/* Schedule processing of the channel */
 22.1739 +	efx_schedule_channel(channel);
 22.1740 +
 22.1741 +	return IRQ_HANDLED;
 22.1742 +}
 22.1743 +
 22.1744 +
 22.1745 +/* Setup RSS indirection table.
 22.1746 + * This maps from the hash value of the packet to RXQ
 22.1747 + */
 22.1748 +static void falcon_setup_rss_indir_table(struct efx_nic *efx)
 22.1749 +{
 22.1750 +	int i = 0;
 22.1751 +	unsigned long offset;
 22.1752 +	unsigned long flags __attribute__ ((unused));
 22.1753 +	efx_dword_t dword;
 22.1754 +
 22.1755 +	if (FALCON_REV(efx) < FALCON_REV_B0)
 22.1756 +		return;
 22.1757 +
 22.1758 +	for (offset = RX_RSS_INDIR_TBL_B0;
 22.1759 +	     offset < RX_RSS_INDIR_TBL_B0 + 0x800;
 22.1760 +	     offset += 0x10) {
 22.1761 +		EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
 22.1762 +				     i % efx->rss_queues);
 22.1763 +		falcon_writel(efx, &dword, offset);
 22.1764 +		i++;
 22.1765 +	}
 22.1766 +}
 22.1767 +
 22.1768 +/* Hook interrupt handler(s)
 22.1769 + * Try MSI and then legacy interrupts.
 22.1770 + */
 22.1771 +int falcon_init_interrupt(struct efx_nic *efx)
 22.1772 +{
 22.1773 +	struct efx_channel *channel;
 22.1774 +	int rc;
 22.1775 +
 22.1776 +	if (!EFX_INT_MODE_USE_MSI(efx)) {
 22.1777 +		irq_handler_t handler;
 22.1778 +		if (FALCON_REV(efx) >= FALCON_REV_B0)
 22.1779 +			handler = falcon_legacy_interrupt_b0;
 22.1780 +		else
 22.1781 +			handler = falcon_legacy_interrupt_a1;
 22.1782 +
 22.1783 +		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
 22.1784 +				 efx->name, efx);
 22.1785 +		if (rc) {
 22.1786 +			EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
 22.1787 +				efx->pci_dev->irq);
 22.1788 +			goto fail1;
 22.1789 +		}
 22.1790 +		return 0;
 22.1791 +	}
 22.1792 +
 22.1793 +	/* Hook MSI or MSI-X interrupt */
 22.1794 +	efx_for_each_channel_with_interrupt(channel, efx) {
 22.1795 +		rc = request_irq(channel->irq, falcon_msi_interrupt,
 22.1796 +				 IRQF_PROBE_SHARED, /* Not shared */
 22.1797 +				 efx->name, channel);
 22.1798 +		if (rc) {
 22.1799 +			EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
 22.1800 +			goto fail2;
 22.1801 +		}
 22.1802 +	}
 22.1803 +
 22.1804 +	return 0;
 22.1805 +
 22.1806 + fail2:
 22.1807 +	efx_for_each_channel_with_interrupt(channel, efx)
 22.1808 +		free_irq(channel->irq, channel);
 22.1809 + fail1:
 22.1810 +	return rc;
 22.1811 +}
 22.1812 +
 22.1813 +void falcon_fini_interrupt(struct efx_nic *efx)
 22.1814 +{
 22.1815 +	struct efx_channel *channel;
 22.1816 +	efx_oword_t reg;
 22.1817 +
 22.1818 +	/* Disable MSI/MSI-X interrupts */
 22.1819 +	efx_for_each_channel_with_interrupt(channel, efx)
 22.1820 +		if (channel->irq)
 22.1821 +			free_irq(channel->irq, channel);
 22.1822 +
 22.1823 +	/* ACK legacy interrupt */
 22.1824 +	if (FALCON_REV(efx) >= FALCON_REV_B0)
 22.1825 +		falcon_read(efx, &reg, INT_ISR0_B0);
 22.1826 +	else
 22.1827 +		falcon_irq_ack_a1(efx);
 22.1828 +
 22.1829 +	/* Disable legacy interrupt */
 22.1830 +	if (efx->legacy_irq)
 22.1831 +		free_irq(efx->legacy_irq, efx);
 22.1832 +}
 22.1833 +
 22.1834 +/**************************************************************************
 22.1835 + *
 22.1836 + * EEPROM/flash
 22.1837 + *
 22.1838 + **************************************************************************
 22.1839 + */
 22.1840 +
 22.1841 +/* Wait for SPI command completion */
 22.1842 +static int falcon_spi_wait(struct efx_nic *efx)
 22.1843 +{
 22.1844 +	efx_oword_t reg;
 22.1845 +	int cmd_en, timer_active;
 22.1846 +	int count;
 22.1847 +
 22.1848 +	count = 0;
 22.1849 +	do {
 22.1850 +		falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
 22.1851 +		cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
 22.1852 +		timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
 22.1853 +		if (!cmd_en && !timer_active)
 22.1854 +			return 0;
 22.1855 +		udelay(10);
 22.1856 +	} while (++count < 10000); /* wait upto 100msec */
 22.1857 +	EFX_ERR(efx, "timed out waiting for SPI\n");
 22.1858 +	return -ETIMEDOUT;
 22.1859 +}
 22.1860 +
 22.1861 +static int
 22.1862 +falcon_spi_read(const struct efx_spi_device *spi, struct efx_nic *efx,
 22.1863 +		unsigned int command, int address, void *data, unsigned int len)
 22.1864 +{
 22.1865 +	int addressed = (address >= 0);
 22.1866 +	efx_oword_t reg;
 22.1867 +	int rc;
 22.1868 +
 22.1869 +	/* Input validation */
 22.1870 +	if (len > FALCON_SPI_MAX_LEN)
 22.1871 +		return -EINVAL;
 22.1872 +
 22.1873 +	/* Acquire SPI lock */
 22.1874 +	mutex_lock(&efx->spi_lock);
 22.1875 +
 22.1876 +	/* Check SPI not currently being accessed */
 22.1877 +	rc = falcon_spi_wait(efx);
 22.1878 +	if (rc)
 22.1879 +		goto out;
 22.1880 +
 22.1881 +	/* Program address register, if we have an address */
 22.1882 +	if (addressed) {
 22.1883 +		EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
 22.1884 +		falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
 22.1885 +	}
 22.1886 +
 22.1887 +	/* Issue read command */
 22.1888 +	EFX_POPULATE_OWORD_7(reg,
 22.1889 +			     EE_SPI_HCMD_CMD_EN, 1,
 22.1890 +			     EE_SPI_HCMD_SF_SEL, spi->device_id,
 22.1891 +			     EE_SPI_HCMD_DABCNT, len,
 22.1892 +			     EE_SPI_HCMD_READ, EE_SPI_READ,
 22.1893 +			     EE_SPI_HCMD_DUBCNT, 0,
 22.1894 +			     EE_SPI_HCMD_ADBCNT,
 22.1895 +			     (addressed ? spi->addr_len : 0),
 22.1896 +			     EE_SPI_HCMD_ENC, command);
 22.1897 +	falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
 22.1898 +
 22.1899 +	/* Wait for read to complete */
 22.1900 +	rc = falcon_spi_wait(efx);
 22.1901 +	if (rc)
 22.1902 +		goto out;
 22.1903 +
 22.1904 +	/* Read data */
 22.1905 +	falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
 22.1906 +	memcpy(data, &reg, len);
 22.1907 +
 22.1908 + out:
 22.1909 +	/* Release SPI lock */
 22.1910 +	mutex_unlock(&efx->spi_lock);
 22.1911 +
 22.1912 +	return rc;
 22.1913 +}
 22.1914 +
 22.1915 +static int
 22.1916 +falcon_spi_write(const struct efx_spi_device *spi, struct efx_nic *efx,
 22.1917 +		 unsigned int command, int address, const void *data,
 22.1918 +		 unsigned int len)
 22.1919 +{
 22.1920 +	int addressed = (address >= 0);
 22.1921 +	efx_oword_t reg;
 22.1922 +	int rc;
 22.1923 +
 22.1924 +	/* Input validation */
 22.1925 +	if (len > (addressed ? efx_spi_write_limit(spi, address)
 22.1926 +		   : FALCON_SPI_MAX_LEN))
 22.1927 +		return -EINVAL;
 22.1928 +
 22.1929 +	/* Acquire SPI lock */
 22.1930 +	mutex_lock(&efx->spi_lock);
 22.1931 +
 22.1932 +	/* Check SPI not currently being accessed */
 22.1933 +	rc = falcon_spi_wait(efx);
 22.1934 +	if (rc)
 22.1935 +		goto out;
 22.1936 +
 22.1937 +	/* Program address register, if we have an address */
 22.1938 +	if (addressed) {
 22.1939 +		EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
 22.1940 +		falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
 22.1941 +	}
 22.1942 +
 22.1943 +	/* Program data register, if we have data */
 22.1944 +	if (data) {
 22.1945 +		memcpy(&reg, data, len);
 22.1946 +		falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
 22.1947 +	}
 22.1948 +
 22.1949 +	/* Issue write command */
 22.1950 +	EFX_POPULATE_OWORD_7(reg,
 22.1951 +			     EE_SPI_HCMD_CMD_EN, 1,
 22.1952 +			     EE_SPI_HCMD_SF_SEL, spi->device_id,
 22.1953 +			     EE_SPI_HCMD_DABCNT, len,
 22.1954 +			     EE_SPI_HCMD_READ, EE_SPI_WRITE,
 22.1955 +			     EE_SPI_HCMD_DUBCNT, 0,
 22.1956 +			     EE_SPI_HCMD_ADBCNT,
 22.1957 +			     (addressed ? spi->addr_len : 0),
 22.1958 +			     EE_SPI_HCMD_ENC, command);
 22.1959 +	falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
 22.1960 +
 22.1961 +	/* Wait for write to complete */
 22.1962 +	rc = falcon_spi_wait(efx);
 22.1963 +	if (rc)
 22.1964 +		goto out;
 22.1965 +
 22.1966 + out:
 22.1967 +	/* Release SPI lock */
 22.1968 +	mutex_unlock(&efx->spi_lock);
 22.1969 +
 22.1970 +	return rc;
 22.1971 +}
 22.1972 +
 22.1973 +/**************************************************************************
 22.1974 + *
 22.1975 + * MAC wrapper
 22.1976 + *
 22.1977 + **************************************************************************
 22.1978 + */
 22.1979 +void falcon_drain_tx_fifo(struct efx_nic *efx)
 22.1980 +{
 22.1981 +	efx_oword_t temp;
 22.1982 +	efx_oword_t mcast_reg0;
 22.1983 +	efx_oword_t mcast_reg1;
 22.1984 +	int count;
 22.1985 +
 22.1986 +	if (FALCON_REV(efx) < FALCON_REV_B0)
 22.1987 +		return;
 22.1988 +
 22.1989 +	falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
 22.1990 +	/* There is no point in draining more than once */
 22.1991 +	if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
 22.1992 +		return;
 22.1993 +
 22.1994 +	/* MAC stats will fail whilst the TX fifo is draining. Serialise
 22.1995 +	 * the drain sequence with the statistics fetch */
 22.1996 +	spin_lock(&efx->stats_lock);
 22.1997 +
 22.1998 +	EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
 22.1999 +	falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
 22.2000 +
 22.2001 +	falcon_read(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
 22.2002 +	falcon_read(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
 22.2003 +
 22.2004 +	/* Reset the MAC and EM block. */
 22.2005 +	falcon_read(efx, &temp, GLB_CTL_REG_KER);
 22.2006 +	EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
 22.2007 +	EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
 22.2008 +	EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
 22.2009 +	falcon_write(efx, &temp, GLB_CTL_REG_KER);
 22.2010 +
 22.2011 +	count = 0;
 22.2012 +	while (1) {
 22.2013 +		falcon_read(efx, &temp, GLB_CTL_REG_KER);
 22.2014 +		if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
 22.2015 +		    !EFX_OWORD_FIELD(temp, RST_XGRX) &&
 22.2016 +		    !EFX_OWORD_FIELD(temp, RST_EM)) {
 22.2017 +			EFX_LOG(efx, "Completed MAC reset after %d loops\n",
 22.2018 +				count);
 22.2019 +			break;
 22.2020 +		}
 22.2021 +		if (count > 20) {
 22.2022 +			EFX_ERR(efx, "MAC reset failed\n");
 22.2023 +			break;
 22.2024 +		}
 22.2025 +		count++;
 22.2026 +		udelay(10);
 22.2027 +	}
 22.2028 +
 22.2029 +	spin_unlock(&efx->stats_lock);
 22.2030 +
 22.2031 +	/* Restore the multicast hash registers. */
 22.2032 +	falcon_write(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
 22.2033 +	falcon_write(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
 22.2034 +
 22.2035 +	/* If we've reset the EM block and the link is up, then
 22.2036 +	 * we'll have to kick the XAUI link so the PHY can recover */
 22.2037 +	if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
 22.2038 +		falcon_reset_xaui(efx);
 22.2039 +}
 22.2040 +
 22.2041 +void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
 22.2042 +{
 22.2043 +	struct falcon_nic_data *nic_data = efx->nic_data;
 22.2044 +	efx_oword_t temp;
 22.2045 +	int changing_loopback;
 22.2046 +
 22.2047 +	if (FALCON_REV(efx) < FALCON_REV_B0)
 22.2048 +		return;
 22.2049 +
 22.2050 +	/* Isolate the MAC -> RX */
 22.2051 +	falcon_read(efx, &temp, RX_CFG_REG_KER);
 22.2052 +	EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
 22.2053 +	falcon_write(efx, &temp, RX_CFG_REG_KER);
 22.2054 +
 22.2055 +	/* Synchronise the EM block against any loopback mode changes by
 22.2056 +	 * draining the TX fifo and resetting. */
 22.2057 +	changing_loopback = (efx->loopback_mode != nic_data->old_loopback_mode);
 22.2058 +	nic_data->old_loopback_mode = efx->loopback_mode;
 22.2059 +	if (changing_loopback || !efx->link_up)
 22.2060 +		falcon_drain_tx_fifo(efx);
 22.2061 +}
 22.2062 +
 22.2063 +void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 22.2064 +{
 22.2065 +	efx_oword_t reg;
 22.2066 +	int link_speed;
 22.2067 +	unsigned int tx_fc;
 22.2068 +
 22.2069 +	if (efx->link_options & GM_LPA_10000)
 22.2070 +		link_speed = 0x3;
 22.2071 +	else if (efx->link_options & GM_LPA_1000)
 22.2072 +		link_speed = 0x2;
 22.2073 +	else if (efx->link_options & GM_LPA_100)
 22.2074 +		link_speed = 0x1;
 22.2075 +	else
 22.2076 +		link_speed = 0x0;
 22.2077 +	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
 22.2078 +	 * as advertised.  Disable to ensure packets are not
 22.2079 +	 * indefinitely held and TX queue can be flushed at any point
 22.2080 +	 * while the link is down.
 22.2081 +	 */
 22.2082 +	EFX_POPULATE_OWORD_5(reg,
 22.2083 +			     MAC_XOFF_VAL, 0xffff /* max pause time */,
 22.2084 +			     MAC_BCAD_ACPT, 1,
 22.2085 +			     MAC_UC_PROM, efx->promiscuous,
 22.2086 +			     MAC_LINK_STATUS, 1, /* always set */
 22.2087 +			     MAC_SPEED, link_speed);
 22.2088 +	/* On B0, MAC backpressure can be disabled and packets get
 22.2089 +	 * discarded. */
 22.2090 +	if (FALCON_REV(efx) >= FALCON_REV_B0) {
 22.2091 +		EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
 22.2092 +				    !efx->link_up);
 22.2093 +	}
 22.2094 +
 22.2095 +	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
 22.2096 +
 22.2097 +	/*
 22.2098 +	 * Transmission of pause frames when RX crosses the threshold is
 22.2099 +	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
 22.2100 +	 *
 22.2101 +	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL
 22.2102 +	 */
 22.2103 +	tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
 22.2104 +	falcon_read(efx, &reg, RX_CFG_REG_KER);
 22.2105 +	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
 22.2106 +
 22.2107 +	/* Unisolate the MAC -> RX */
 22.2108 +	if (FALCON_REV(efx) >= FALCON_REV_B0)
 22.2109 +		EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
 22.2110 +	falcon_write(efx, &reg, RX_CFG_REG_KER);
 22.2111 +}
 22.2112 +
 22.2113 +int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
 22.2114 +{
 22.2115 +	efx_oword_t reg;
 22.2116 +	u32 *dma_done;
 22.2117 +	int i;
 22.2118 +
 22.2119 +	if (disable_dma_stats)
 22.2120 +		return 0;
 22.2121 +
 22.2122 +	/* Statistics fetch will fail if the MAC is in TX drain */
 22.2123 +	if (FALCON_REV(efx) >= FALCON_REV_B0) {
 22.2124 +		efx_oword_t temp;
 22.2125 +		falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
 22.2126 +		if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
 22.2127 +			return 0;
 22.2128 +	}
 22.2129 +
 22.2130 +	/* Clear completion pointer */
 22.2131 +	dma_done = (efx->stats_buffer.addr + done_offset);
 22.2132 +	*dma_done = FALCON_STATS_NOT_DONE;
 22.2133 +	wmb(); /* ensure done flag is clear */
 22.2134 +
 22.2135 +	/* Initiate DMA transfer of stats */
 22.2136 +	EFX_POPULATE_OWORD_2(reg,
 22.2137 +			     MAC_STAT_DMA_CMD, 1,
 22.2138 +			     MAC_STAT_DMA_ADR,