tristate "Solarflare Solarstorm SFC4000 support"
depends on PCI && INET
select MII
+ select CRC32
help
This driver supports 10-gigabit Ethernet cards based on
the Solarflare Communications Solarstorm SFC4000 controller.
new boot ROM to the NIC.
config SFC_RESOURCE
- depends on SFC && X86
- tristate "Solarflare Solarstorm SFC4000 resource driver"
- help
- This module provides the SFC resource manager driver.
-
+ depends on SFC && X86
+ tristate "Solarflare Solarstorm SFC4000 resource driver"
+ help
+ This module provides the SFC resource manager driver.
+sfc-y += efx.o falcon.o tx.o rx.o mentormac.o falcon_gmac.o \
+ falcon_xmac.o alaska.o i2c-direct.o selftest.o \
+ driverlink.o ethtool.o xfp_phy.o mdio_10g.o \
+ txc43128_phy.o tenxpress.o lm87_support.o boards.o \
+ sfe4001.o pm8358_phy.o null_phy.o kernel_compat.o
+sfc-$(CONFIG_SFC_DEBUGFS) += debugfs.o
+obj-$(CONFIG_SFC) += sfc.o
-# Final objects
-sfc_o = sfc.o
-sfc_mtd_o = sfc_mtd.o
-
-# Constituent objects
-sfc_elements_o :=
-sfc_elements_o += efx.o
-sfc_elements_o += falcon.o
-sfc_elements_o += tx.o
-sfc_elements_o += rx.o
-sfc_elements_o += mentormac.o
-sfc_elements_o += falcon_gmac.o
-sfc_elements_o += falcon_xmac.o
-sfc_elements_o += alaska.o
-sfc_elements_o += i2c-direct.o
-sfc_elements_o += selftest.o
-sfc_elements_o += driverlink.o
-ifeq ($(CONFIG_SFC_DEBUGFS),y)
-sfc_elements_o += debugfs.o
-endif
-sfc_elements_o += ethtool.o
-sfc_elements_o += xfp_phy.o
-sfc_elements_o += mdio_10g.o
-sfc_elements_o += txc43128_phy.o
-sfc_elements_o += tenxpress.o
-sfc_elements_o += lm87_support.o
-sfc_elements_o += boards.o
-sfc_elements_o += sfe4001.o
-sfc_elements_o += pm8358_phy.o
-sfc_elements_o += null_phy.o
-sfc_elements_o += phy.o
-sfc_elements_o += kernel_compat.o
-
-sfc_mtd_elements_o := mtd.o
-
-obj-$(CONFIG_SFC) += $(sfc_o)
-obj-$(CONFIG_SFC_MTD) += $(sfc_mtd_o)
-
-sfc-objs = $(sfc_elements_o)
-sfc_mtd-objs = $(sfc_mtd_elements_o)
+sfc_mtd-y = mtd.o
+obj-$(CONFIG_SFC_MTD) += sfc_mtd.o
obj-$(CONFIG_SFC_RESOURCE) += sfc_resource/
+
--- /dev/null
+0x2e5e77fa efx_dl_unregister_driver drivers/net/sfc/sfc EXPORT_SYMBOL
+0x4ac7afe9 efx_dl_schedule_reset drivers/net/sfc/sfc EXPORT_SYMBOL
+0xbb52ca8e efx_dl_register_driver_api_ver_1 drivers/net/sfc/sfc EXPORT_SYMBOL
+0x278552f6 efx_dl_register_callbacks drivers/net/sfc/sfc EXPORT_SYMBOL
+0xc4414515 efx_dl_get_nic drivers/net/sfc/sfc EXPORT_SYMBOL
+0x42cae6c4 efx_dl_unregister_callbacks drivers/net/sfc/sfc EXPORT_SYMBOL
#define EFX_DWORD_3_LBN 96
#define EFX_DWORD_3_WIDTH 32
-#define EFX_BYTE 1
-#define EFX_WORD 2
-#define EFX_DWORD 4
-#define EFX_OWORD 8
-
/* Specified attribute (e.g. LBN) of the specified field */
#define EFX_VAL(field, attribute) field ## _ ## attribute
/* Low bit number of the specified field */
#endif
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
- if (FALCON_REV(efx) == FALCON_REV_B0) { \
+ if (FALCON_REV(efx) >= FALCON_REV_B0) { \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
} else { \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
} while (0)
#define EFX_QWORD_FIELD_VER(efx, qword, field) \
- (FALCON_REV(efx) == FALCON_REV_B0 ? \
+ (FALCON_REV(efx) >= FALCON_REV_B0 ? \
EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_A1))
~((u64) 0) : ~((u32) 0))
#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
-/*
- * Determine if a DMA address is over the 4GB threshold
- *
- * Defined in a slightly tortuous way to avoid compiler warnings.
- */
-static inline int efx_is_over_4gb(dma_addr_t address)
-{
- if (DMA_ADDR_T_WIDTH > 32)
- return (((u64) address) >> 32) ? 1 : 0;
- else
- /* Can never be true */
- return 0;
-}
-
#endif /* EFX_BITFIELD_H */
#include <linux/module.h>
#include <linux/pci.h>
-/* For out-of-tree builds we always need procfs, if only for a compatibility
- * symlink.
- */
#include <linux/proc_fs.h>
#include <linux/dcache.h>
#include <linux/seq_file.h>
#include "debugfs.h"
#include "falcon.h"
-/* EFX_USE_DEBUGFS is defined by kernel_compat.h so we can't decide whether to
- * include this earlier.
- */
-#ifdef EFX_USE_DEBUGFS
-#include <linux/debugfs.h>
-#endif
-
#ifndef PRIu64
# if (BITS_PER_LONG == 64)
# define PRIu64 "lu"
# endif
#endif
-#ifndef EFX_USE_DEBUGFS
-
static void efx_debugfs_remove(struct proc_dir_entry *entry)
{
if (entry)
#define debugfs_create_dir proc_mkdir
#define debugfs_create_symlink proc_symlink
-#endif /* !EFX_USE_DEBUGFS */
/* Parameter definition bound to a structure - each file has one of these */
struct efx_debugfs_bound_param {
/* Sequential file interface to bound parameters */
-#if defined(EFX_USE_DEBUGFS)
-
-static int efx_debugfs_seq_show(struct seq_file *file, void *v)
-{
- struct efx_debugfs_bound_param *binding =
- (struct efx_debugfs_bound_param *)file->private;
-
- return binding->param->reader(file,
- binding->structure +
- binding->param->offset);
-}
-
-static int efx_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, efx_debugfs_seq_show, inode->i_private);
-}
-
-#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
-
static int efx_debugfs_seq_show(struct seq_file *file, void *v)
{
struct proc_dir_entry *entry = (struct proc_dir_entry *)file->private;
return single_open(file, efx_debugfs_seq_show, PROC_I(inode)->pde);
}
-#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
-
static struct file_operations efx_debugfs_file_ops = {
.owner = THIS_MODULE,
};
-#if defined(EFX_USE_DEBUGFS)
-
-/**
- * efx_fini_debugfs_child - remove a named child of a debugfs directory
- * @dir: Directory
- * @name: Name of child
- *
- * This removes the named child from the directory, if it exists.
- */
-void efx_fini_debugfs_child(struct dentry *dir, const char *name)
-{
- struct qstr child_name;
- struct dentry *child;
-
- child_name.len = strlen(name);
- child_name.name = name;
- child_name.hash = full_name_hash(child_name.name, child_name.len);
- child = d_lookup(dir, &child_name);
- if (child) {
- /* If it's a "regular" file, free its parameter binding */
- if (S_ISREG(child->d_inode->i_mode))
- kfree(child->d_inode->i_private);
- debugfs_remove(child);
- dput(child);
- }
-}
-
-#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
-
void efx_fini_debugfs_child(struct proc_dir_entry *dir, const char *name)
{
remove_proc_entry(name, dir);
}
-#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
-
/*
* Remove a debugfs directory.
*
while (param->name) {
struct dentry *entry;
-#if defined(EFX_USE_DEBUGFS)
- struct efx_debugfs_bound_param *binding;
-
- binding = kmalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- goto err;
- binding->param = param;
- binding->structure = structure;
-
- entry = debugfs_create_file(param->name, S_IRUGO, parent,
- binding, &efx_debugfs_file_ops);
- if (!entry) {
- kfree(binding);
- goto err;
- }
-#else
entry = create_proc_entry(param->name, S_IRUGO, parent);
if (!entry)
goto err;
entry->proc_fops = &efx_debugfs_file_ops;
smp_wmb();
entry->read_proc = (read_proc_t *) structure;
-#endif
param++;
}
static struct efx_debugfs_parameter efx_debugfs_port_parameters[] = {
EFX_NAMED_PARAMETER(enabled, struct efx_nic, port_enabled,
int, efx_debugfs_read_int),
- EFX_INT_PARAMETER(struct efx_nic, net_dev_registered),
EFX_INT_PARAMETER(struct efx_nic, rx_checksum_enabled),
EFX_ATOMIC_PARAMETER(struct efx_nic, netif_stop_count),
EFX_INT_PARAMETER(struct efx_nic, link_up),
EFX_INT_PARAMETER(struct efx_channel, rx_alloc_level),
EFX_INT_PARAMETER(struct efx_channel, rx_alloc_push_pages),
EFX_INT_PARAMETER(struct efx_channel, rx_alloc_pop_pages),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_merges),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_bursts),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_slow_start),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_misorder),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_too_many),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_new_stream),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_drop_idle),
+ EFX_UINT_PARAMETER(struct efx_channel, ssr.n_drop_closed),
{NULL},
};
int efx_init_debugfs(void)
{
/* Create top-level directory */
-#if defined(EFX_USE_DEBUGFS)
- efx_debug_root = debugfs_create_dir("sfc", NULL);
-#else
efx_debug_root = proc_mkdir("sfc", proc_root_driver);
-#endif
if (!efx_debug_root)
goto err;
if (!efx_debug_cards)
goto err;
-#if defined(EFX_USE_DEBUGFS)
- /* Create compatibility sym-link */
- if (!proc_symlink("sfc", proc_root_driver, "/sys/kernel/debug/sfc"))
- goto err;
-#endif
return 0;
err:
*/
void efx_fini_debugfs(void)
{
-#if defined(EFX_USE_DEBUGFS)
remove_proc_entry("sfc", proc_root_driver);
-#endif
debugfs_remove(efx_debug_cards);
efx_debug_cards = NULL;
debugfs_remove(efx_debug_root);
printk(KERN_INFO "Efx driverlink unregistering %s driver\n",
driver->name);
- /* Acquire lock. We can't return failure, so have to use
- * down() instead of down_interruptible()
- */
+ /* Acquire lock. We can't return failure */
mutex_lock(&efx_driverlink_lock);
- /* Remove all devices claimed by the driver */
list_for_each_entry_safe(efx_handle, efx_handle_n,
&driver->device_list, driver_node)
efx_dl_del_device(&efx_handle->efx_dev);
- /* Remove driver from driver list */
list_del(&driver->node);
- /* Release lock */
mutex_unlock(&efx_driverlink_lock);
}
EXPORT_SYMBOL(efx_dl_unregister_driver);
* To avoid a branch point on the fast-path, the callbacks are always
* implemented - they are never NULL.
*/
-#if defined(EFX_USE_FASTCALL)
static enum efx_veto fastcall
-#else
-static enum efx_veto
-#endif
efx_dummy_tx_packet_callback(struct efx_dl_device *efx_dev, struct sk_buff *skb)
{
/* Never veto the packet */
return EFX_ALLOW_PACKET;
}
-#if defined(EFX_USE_FASTCALL)
static enum efx_veto fastcall
-#else
-static enum efx_veto
-#endif
efx_dummy_rx_packet_callback(struct efx_dl_device *efx_dev,
const char *pkt_buf, int len)
{
#define EFX_DRIVERLINK_API_H
#include <linux/list.h> /* for struct list_head */
-#if !defined(EFX_USE_FASTCALL)
- #include <linux/version.h>
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- #define EFX_USE_FASTCALL yes
- #include <linux/linkage.h>
- #endif
-#endif
+#include <linux/linkage.h>
+#define EFX_USE_FASTCALL yes
/**
* DOC: Efx driverlink API
* The sfc driver will provide the appropriate lock semantics for
* the underlying hardware.
* @buffer_table_min: First available buffer table entry
- * @buffer_table_max: Last available buffer table entry + 1
+ * @buffer_table_lim: Last available buffer table entry + 1
* @evq_timer_min: First available event queue with timer
- * @evq_timer_max: Last available event queue with timer + 1
+ * @evq_timer_lim: Last available event queue with timer + 1
* @evq_int_min: First available event queue with interrupt
- * @evq_int_max: Last available event queue with interrupt + 1
+ * @evq_int_lim: Last available event queue with interrupt + 1
* @rxq_min: First available RX queue
- * @rxq_max: Last available RX queue + 1
+ * @rxq_lim: Last available RX queue + 1
* @txq_min: First available TX queue
- * @txq_max: Last available TX queue + 1
+ * @txq_lim: Last available TX queue + 1
* @flags: Hardware variation flags
*/
struct efx_dl_falcon_resources {
struct efx_dl_device_info hdr;
spinlock_t *biu_lock;
- unsigned buffer_table_min, buffer_table_max;
- unsigned evq_timer_min, evq_timer_max;
- unsigned evq_int_min, evq_int_max;
- unsigned rxq_min, rxq_max;
- unsigned txq_min, txq_max;
+ unsigned buffer_table_min, buffer_table_lim;
+ unsigned evq_timer_min, evq_timer_lim;
+ unsigned evq_int_min, evq_int_lim;
+ unsigned rxq_min, rxq_lim;
+ unsigned txq_min, txq_lim;
enum efx_dl_falcon_resource_flags flags;
};
* may have multiple TX queues, running in parallel, please avoid
* the need for locking if it all possible.
*/
-#if defined(EFX_USE_FASTCALL)
enum efx_veto fastcall (*tx_packet) (struct efx_dl_device *efx_dev,
struct sk_buff *skb);
-#else
- enum efx_veto (*tx_packet) (struct efx_dl_device *efx_dev,
- struct sk_buff *skb);
-#endif
/*
* rx_packet - Packet received.
* allows for lockless operation between receive channels, so
* please avoid the need for locking if at all possible.
*/
-#if defined(EFX_USE_FASTCALL)
enum efx_veto fastcall (*rx_packet) (struct efx_dl_device *efx_dev,
const char *pkt_hdr, int pkt_len);
-#else
- enum efx_veto (*rx_packet) (struct efx_dl_device *efx_dev,
- const char *pkt_hdr, int pkt_len);
-#endif
/*
* link_change - Link status change.
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
-#include <asm/uaccess.h>
#include "net_driver.h"
#include "gmii.h"
#include "driverlink.h"
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_MONITOR] = "MONITOR",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_MONITOR] = "MONITOR",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
};
const unsigned int efx_nic_state_max = STATE_MAX;
#define EFX_MAX_MTU (9 * 1024)
+/* RX slow fill workqueue. If memory allocation fails in the fast path,
+ * a work item is pushed onto this work queue to retry the allocation later,
+ * to avoid the NIC being starved of RX buffers. Since this is a per cpu
+ * workqueue, there is nothing to be gained in making it per NIC
+ */
+static struct workqueue_struct *refill_workqueue;
/**************************************************************************
*
*
*************************************************************************/
+/*
+ * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
+ *
+ * This sets the default for new devices. It can be controlled later
+ * using ethtool.
+ */
+static int lro = 1;
+module_param(lro, int, 0644);
+MODULE_PARM_DESC(lro, "Large receive offload acceleration");
+
/*
* Use separate channels for TX and RX events
*
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash. If true,
* such devices will be initialised with a random locally-generated
- * MAC address. This allows for loading the efx_mtd driver to
+ * MAC address. This allows for loading the sfc_mtd driver to
* reprogram the flash, even if the flash contents (including the MAC
* address) have previously been erased.
*/
*/
static unsigned int allow_load_on_failure;
-/* Set to 1 to enable the use of Message-Signalled Interrupts (MSI).
- * MSI will not work on some motherboards due to limitations of the
- * chipset, so the default is off.
- *
- * This is the highest capability interrupt mode to use
+/* This is the first interrupt mode to try out of:
* 0 => MSI-X
* 1 => MSI
* 2 => legacy
* i.e. the number of CPUs among which we may distribute simultaneous
* interrupt handling.
*
- * Cards without MSI-X will only target one CPU
- *
- * Default (0) means to use all CPUs in the system. This parameter
- * can be set using "rss_cpus=xxx" when loading the module.
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each package (level II cache)
*/
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
static void efx_fini_napi(struct efx_nic *efx);
static void efx_fini_channels(struct efx_nic *efx);
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_RUNNING) || \
+ (efx->state == STATE_RESETTING)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
/**************************************************************************
*
* Event queue processing
channel->rx_pkt = NULL;
}
+ efx_flush_lro(channel);
efx_rx_strategy(channel);
/* Refill descriptor rings as necessary */
* NAPI guarantees serialisation of polls of the same device, which
* provides the guarantee required by efx_process_channel().
*/
-#if !defined(EFX_HAVE_OLD_NAPI)
-static int efx_poll(struct napi_struct *napi, int budget)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct net_device *napi_dev = channel->napi_dev;
-#else
static int efx_poll(struct net_device *napi, int *budget_ret)
{
struct net_device *napi_dev = napi;
struct efx_channel *channel = napi_dev->priv;
int budget = min(napi_dev->quota, *budget_ret);
-#endif
int unused;
int rx_packets;
unused = efx_process_channel(channel, budget);
rx_packets = (budget - unused);
-#if defined(EFX_HAVE_OLD_NAPI)
napi_dev->quota -= rx_packets;
*budget_ret -= rx_packets;
-#endif
if (rx_packets < budget) {
/* There is no race here; although napi_disable() will
efx_channel_processed(channel);
}
-#if !defined(EFX_HAVE_OLD_NAPI)
- return rx_packets;
-#else
return (rx_packets >= budget);
-#endif
}
/* Process the eventq of the specified channel immediately on this CPU
{
EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
- ASSERT_RTNL();
-
/* Initialise fields */
channel->eventq_read_ptr = 0;
{
EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
- ASSERT_RTNL();
-
falcon_fini_eventq(channel);
}
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- /* Page-based allocation page-order */
+ /* Calculate page-order */
for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
;
/* Wait for any NAPI processing to complete */
napi_disable(&channel->napi_str);
- /* Ensure that any worker threads have exited or will be
- * no-ops.
- */
+ /* Ensure that any worker threads have exited or will be no-ops */
efx_for_each_channel_rx_queue(rx_queue, channel) {
spin_lock_bh(&rx_queue->add_lock);
spin_unlock_bh(&rx_queue->add_lock);
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
efx_for_each_channel(channel, efx) {
EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
channel->used_flags = 0;
}
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+{
+ queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+}
+
/**************************************************************************
*
* Port handling
*/
static void efx_link_status_changed(struct efx_nic *efx)
{
- unsigned long flags __attribute__ ((unused));
int carrier_ok;
- /* Ensure no link status notifications get sent to the OS after the net
- * device has been unregistered. */
- if (!efx->net_dev_registered)
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN */
+ if (!netif_running(efx->net_dev))
return;
carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
(efx->loopback_mode ? " LOOPBACK]" : ""),
(efx->promiscuous ? " [PROMISC]" : ""));
} else {
- EFX_INFO(efx, "link down\n");
+ EFX_INFO(efx, "link down%s\n",
+ efx->phy_powered ? "" : " [OFF]");
}
}
-/* This call reinitialises the MAC to pick up new PHY settings
- * To call from a context that cannot sleep use reconfigure_work work item
- * For on_disabled=1 the caller must be serialised against efx_reset,
- * ideally by holding the rtnl lock.
- */
-void efx_reconfigure_port(struct efx_nic *efx, int on_disabled)
+/* This call reinitialises the MAC to pick up new PHY settings. The
+ * caller must hold the mac_lock */
+static void __efx_reconfigure_port(struct efx_nic *efx)
{
- mutex_lock(&efx->mac_lock);
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
- EFX_LOG(efx, "reconfiguring MAC from PHY settings\n");
-
- if (on_disabled)
- ASSERT_RTNL();
- else if (!efx->port_enabled)
- goto out;
+ EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
+ raw_smp_processor_id());
efx->mac_op->reconfigure(efx);
-out:
/* Inform kernel of loss/gain of carrier */
efx_link_status_changed(efx);
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+void efx_reconfigure_port(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ mutex_lock(&efx->mac_lock);
+ __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
}
+/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
+ * we don't efx_reconfigure_port() if the port is disabled. Care is taken
+ * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
static void efx_reconfigure_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
reconfigure_work);
- EFX_LOG(efx, "MAC reconfigure executing on CPU %d\n",
- raw_smp_processor_id());
-
- /* Reinitialise MAC to activate new PHY parameters */
- efx_reconfigure_port(efx, 0);
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
}
static int efx_probe_port(struct efx_nic *efx)
return 0;
}
-/* Allow efx_reconfigure_port() to run, and propagate delayed changes
- * to the promiscuous flag to the MAC if needed */
+/* Allow efx_reconfigure_port() to be scheduled, and close the window
+ * between efx_stop_port and efx_flush_all whereby a previously scheduled
+ * efx_reconfigure_port() may have been cancelled */
static void efx_start_port(struct efx_nic *efx)
{
EFX_LOG(efx, "start port\n");
- ASSERT_RTNL();
-
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
efx->port_enabled = 1;
+ __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
-
- if (efx->net_dev_registered) {
- int promiscuous;
-
- netif_tx_lock_bh(efx->net_dev);
- promiscuous = (efx->net_dev->flags & IFF_PROMISC) ? 1 : 0;
- if (efx->promiscuous != promiscuous) {
- efx->promiscuous = promiscuous;
- queue_work(efx->workqueue, &efx->reconfigure_work);
- }
- netif_tx_unlock_bh(efx->net_dev);
- }
}
-/* Prevents efx_reconfigure_port() from executing, and prevents
+/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
* efx_set_multicast_list() from scheduling efx_reconfigure_work.
* efx_reconfigure_work can still be scheduled via NAPI processing
* until efx_flush_all() is called */
static void efx_stop_port(struct efx_nic *efx)
{
EFX_LOG(efx, "stop port\n");
- ASSERT_RTNL();
mutex_lock(&efx->mac_lock);
efx->port_enabled = 0;
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
- if (efx->net_dev_registered) {
+ if (NET_DEV_REGISTERED(efx)) {
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
+ dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
EFX_LOG(efx, "initialising I/O\n");
* (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
* masks event though they reject 46 bit masks.
*/
- efx->dma_mask = efx->type->max_dma_mask;
- while (efx->dma_mask > 0x7fffffffUL) {
- if (pci_dma_supported(pci_dev, efx->dma_mask) &&
- ((rc = pci_set_dma_mask(pci_dev, efx->dma_mask)) == 0))
+ while (dma_mask > 0x7fffffffUL) {
+ if (pci_dma_supported(pci_dev, dma_mask) &&
+ ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
break;
- efx->dma_mask >>= 1;
+ dma_mask >>= 1;
}
if (rc) {
EFX_ERR(efx, "could not find a suitable DMA mask\n");
goto fail2;
}
- EFX_LOG(efx, "using DMA mask %llx\n",
- (unsigned long long)efx->dma_mask);
- rc = pci_set_consistent_dma_mask(pci_dev, efx->dma_mask);
+ EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
+ rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted,
/* Get memory base address */
efx->membase_phys = pci_resource_start(efx->pci_dev,
efx->type->mem_bar);
-#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
-#else
- if (!request_mem_region(efx->membase_phys, efx->type->mem_map_size,
- "sfc"))
- rc = -EIO;
-#endif
if (rc) {
EFX_ERR(efx, "request for memory BAR failed\n");
rc = -EIO;
}
if (efx->membase_phys) {
-#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
pci_release_region(efx->pci_dev, efx->type->mem_bar);
-#else
- release_mem_region(efx->membase_phys, efx->type->mem_map_size);
-#endif
efx->membase_phys = 0UL;
}
}
/* Probe the number and type of interrupts we are able to obtain. */
-static int efx_probe_interrupts(struct efx_nic *efx)
+static void efx_probe_interrupts(struct efx_nic *efx)
{
+ int max_channel = efx->type->phys_addr_channels - 1;
struct msix_entry xentries[EFX_MAX_CHANNELS];
int rc, i;
- /* Select number of used RSS queues */
- /* TODO: Can we react to CPU hotplug? */
- if (rss_cpus == 0)
- rss_cpus = num_online_cpus();
-
- efx->rss_queues = 1;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- unsigned int max_channel = efx->type->phys_addr_channels - 1;
-
BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
- efx->rss_queues = min(max_channel + 1, rss_cpus);
+
+ if (rss_cpus == 0) {
+#ifdef topology_core_siblings
+ cpumask_t core_mask;
+ int cpu;
+
+ cpus_clear(core_mask);
+ efx->rss_queues = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpu_isset(cpu, core_mask)) {
+ ++efx->rss_queues;
+ cpus_or(core_mask, core_mask,
+ topology_core_siblings(cpu));
+ }
+ }
+#else
+ efx->rss_queues = num_online_cpus();
+#endif
+ } else {
+ efx->rss_queues = rss_cpus;
+ }
+
+ /* Limit the number of rss queues appropriately */
+ efx->rss_queues = min(efx->rss_queues, max_channel + 1);
efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
- }
- /* Determine how many RSS queues we can use, and mark channels
- * with the appropriate interrupt state */
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- /* Build MSI request structure */
+ /* Request maximum number of MSI interrupts, and fill out
+ * the channel interrupt information the allowed allocation */
for (i = 0; i < efx->rss_queues; i++)
xentries[i].entry = i;
-
- /* Request maximum number of MSI interrupts */
rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
if (rc > 0) {
EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
rc = pci_enable_msix(efx->pci_dev, xentries,
efx->rss_queues);
}
+
if (rc == 0) {
for (i = 0; i < efx->rss_queues; i++) {
efx->channel[i].has_interrupt = 1;
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->rss_queues = 1;
/* Every channel is interruptible */
for (i = 0; i < EFX_MAX_CHANNELS; i++)
efx->channel[i].has_interrupt = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
-
- return 0;
}
static void efx_remove_interrupts(struct efx_nic *efx)
/* Select number of used resources
* Should be called after probe_interrupts()
*/
-static int efx_select_used(struct efx_nic *efx)
+static void efx_select_used(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
rx_queue++;
}
}
- return 0;
}
static int efx_probe_nic(struct efx_nic *efx)
/* Determine the number of channels and RX queues by trying to hook
* in MSI-X interrupts. */
- rc = efx_probe_interrupts(efx);
- if (rc)
- goto fail2;
+ efx_probe_interrupts(efx);
/* Determine number of RX queues and TX queues */
- rc = efx_select_used(efx);
- if (rc)
- goto fail3;
+ efx_select_used(efx);
/* Register debugfs entries */
rc = efx_init_debugfs_nic(efx);
if (rc)
- goto fail4;
+ goto fail2;
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
return 0;
- fail4:
- /* fall-thru */
- fail3:
- efx_remove_interrupts(efx);
fail2:
+ efx_remove_interrupts(efx);
falcon_remove_nic(efx);
fail1:
return rc;
fail3:
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
- fail2:
efx_remove_port(efx);
+ fail2:
+ efx_remove_nic(efx);
fail1:
return rc;
}
/* Called after previous invocation(s) of efx_stop_all, restarts the
- * port, kernel transmit queue, NAPI processing and hardware interrupts.
+ * port, kernel transmit queue, NAPI processing and hardware interrupts,
+ * and ensures that the port is scheduled to be reconfigured.
* This function is safe to call multiple times when the NIC is in any
* state. */
static void efx_start_all(struct efx_nic *efx)
{
struct efx_channel *channel;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
- if (efx->net_dev_registered && !netif_running(efx->net_dev))
+ if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
return;
/* Mark the port as enabled so port reconfigurations can start, then
/* Start hardware monitor if we're in RUNNING */
if (efx->state == STATE_RUNNING)
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
+ queue_work(efx->workqueue, &efx->monitor_work);
}
/* Flush all delayed work. Should only be called when no more delayed work
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
-#if defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
- struct efx_rx_queue *rx_queue;
-
- /* Make sure the hardware monitor is stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
-
- /* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx) {
- cancel_delayed_work_sync(&rx_queue->work);
- }
-#endif
-
-#if defined(EFX_USE_CANCEL_WORK_SYNC)
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->reconfigure_work);
-#endif
-
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
/* Ensure that the hardware monitor and asynchronous port
* reconfigurations are complete, which are the only two consumers
* of efx->workqueue. Since the hardware monitor runs on a long period,
/* efx_rx_work will disarm if !channel->enabled, so we can just
* flush the refill workqueue twice as well. */
- flush_workqueue(efx->refill_workqueue);
- flush_workqueue(efx->refill_workqueue);
-#endif
+ flush_workqueue(refill_workqueue);
+ flush_workqueue(refill_workqueue);
}
/* Quiesce hardware and software without bringing the link down.
{
struct efx_channel *channel;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
/* port_enabled can be read safely under the rtnl lock */
if (!efx->port_enabled)
if (channel->irq)
synchronize_irq(channel->irq);
- /* Stop all synchronous port reconfigurations. */
- efx_stop_port(efx);
-
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx)
efx_stop_channel(channel);
+ /* Stop all asynchronous port reconfigurations. Since all
+ * event processing has already been stopped, there is no
+ * window to loose phy events */
+ efx_stop_port(efx);
+
/* Flush reconfigure_work, refill_workqueue, monitor_work */
efx_flush_all(efx);
+ /* Isolate the MAC from the TX and RX engines, so that queue
+ * flushes will complete in a timely fashion. */
+ falcon_deconfigure_mac_wrapper(efx);
+ falcon_drain_tx_fifo(efx);
+
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
efx_stop_queue(efx);
- if (efx->net_dev_registered) {
+ if (NET_DEV_REGISTERED(efx)) {
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
return rc;
}
+/* A convinience function to safely flush all the queues */
int efx_flush_queues(struct efx_nic *efx)
{
int rc;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- /* We can't just flush the tx queues because the event queues
- * may contain tx completions from that queue. Just flush everything */
efx_fini_channels(efx);
rc = efx_init_channels(efx);
if (rc) {
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
efx_for_each_tx_queue(tx_queue, efx)
tx_queue->channel->irq_moderation = tx_usecs;
* efx_reconfigure_port via the mac_lock */
static void efx_monitor(struct work_struct *data)
{
-#if !defined(EFX_NEED_WORK_API_WRAPPERS)
- struct efx_nic *efx = container_of(data, struct efx_nic,
- monitor_work.work);
-#else
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work);
-#endif
int rc = 0;
EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
raw_smp_processor_id());
-
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
/* Without cancel_delayed_work_sync(), we have to make sure that
* we don't rearm when port_enabled == 0 */
mutex_lock(&efx->mac_lock);
}
rc = efx->mac_op->check_hw(efx);
-#else
- /* If the mac_lock is already held then it is likely a port
- * reconfiguration is already in place, which will likely do
- * most of the work of check_hw() anyway. */
- if (!mutex_trylock(&efx->mac_lock)) {
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
- return;
- }
-
- if (efx->port_enabled)
- rc = efx->mac_op->check_hw(efx);
-#endif
mutex_unlock(&efx->mac_lock);
if (rc) {
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = net_dev->priv;
- int rc;
- ASSERT_RTNL();
+ if (!in_interrupt())
+ EFX_ASSERT_RESET_SERIALISED(efx);
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
- break;
- case SIOCSMIIREG:
- rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
- efx_reconfigure_port(efx, 0);
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
}
/**************************************************************************
struct efx_channel *channel;
int rc;
- ASSERT_RTNL();
-
/* Allocate the NAPI dev for the port */
efx->net_dev = alloc_etherdev(0);
if (!efx->net_dev) {
efx->net_dev->priv = efx;
efx->mii.dev = efx->net_dev;
- /* Set features based on module parameters and DMA mask.
- * Enable DMA to ZONE_HIGHMEM if the NIC can access all memory
- * directly. This only has an effect on 32-bit systems and
- * PAE on x86 limits memory to 64GB so 40 bits is plenty to
- * address everything. If the device can't address 40 bits
- * then it's safest to turn NETIF_F_HIGHDMA off because this
- * might be a PAE system with more than 4G of RAM and a 32-bit
- * NIC. The use of EFX_DMA_MASK is to eliminate compiler
- * warnings on platforms where dma_addr_t is 32-bit. We
- * assume that in those cases we can access all memory
- * directly if our DMA mask is all ones. */
- efx->net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
- if (efx->dma_mask >= EFX_DMA_MASK(DMA_40BIT_MASK))
- efx->net_dev->features |= NETIF_F_HIGHDMA;
+ efx->net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA);
+ efx->lro_enabled = lro;
/* Copy MAC address */
memcpy(&efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
/* Allocate the per channel devs */
efx_for_each_channel(channel, efx) {
-#if !defined(EFX_HAVE_OLD_NAPI)
- channel->napi_dev = efx->net_dev;
-#else
channel->napi_dev = alloc_etherdev(0);
if (!channel->napi_dev) {
rc = -ENOMEM;
}
channel->napi_dev->priv = channel;
atomic_set(&channel->napi_dev->refcnt, 1);
-#endif
+
+ /* Initialise LRO/SSR */
+ rc = efx_ssr_init(&channel->ssr, efx);
+ if (rc)
+ goto err;
}
return 0;
{
struct efx_channel *channel;
- ASSERT_RTNL();
-
efx_for_each_channel(channel, efx) {
+ /* Fini LRO/SSR */
+ efx_ssr_fini(&channel->ssr);
+
/* Finish per channel NAPI */
-#if defined(EFX_HAVE_OLD_NAPI)
if (channel->napi_dev) {
channel->napi_dev->priv = NULL;
free_netdev(channel->napi_dev);
}
-#endif
channel->napi_dev = NULL;
}
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = net_dev->priv;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
+
efx_start_all(efx);
return 0;
}
EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
- /* Stop device and flush all the channels */
+ /* Stop the device and flush all the channels */
efx_stop_all(efx);
efx_fini_channels(efx);
rc = efx_init_channels(efx);
return 0;
}
-/* Context: process, dev_base_lock held, non-blocking.
- * Statistics are taken directly from the MAC.
- */
+/* Context: process, dev_base_lock held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
struct efx_nic *efx = net_dev->priv;
if (!spin_trylock(&efx->stats_lock))
return stats;
- if (efx->state == STATE_RUNNING)
+ if (efx->state == STATE_RUNNING) {
efx->mac_op->update_stats(efx);
+ falcon_update_nic_stats(efx);
+ }
spin_unlock(&efx->stats_lock);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
- stats->tx_errors = mac_stats->tx_bad;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
- stats->rx_length_errors = mac_stats->rx_gtjumbo;
- stats->rx_over_errors = mac_stats->rx_overflow;
+ stats->rx_length_errors = (mac_stats->rx_gtjumbo +
+ mac_stats->rx_length_error);
+ stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
- stats->rx_fifo_errors = 0;
+ stats->rx_fifo_errors = mac_stats->rx_overflow;
stats->rx_missed_errors = mac_stats->rx_missed;
+ stats->tx_window_errors = mac_stats->tx_late_collision;
+
stats->rx_errors = (stats->rx_length_errors +
stats->rx_over_errors +
stats->rx_crc_errors +
stats->rx_fifo_errors +
stats->rx_missed_errors +
mac_stats->rx_symbol_error);
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
+ stats->tx_errors = (stats->tx_window_errors +
+ mac_stats->tx_bad);
return stats;
}
struct efx_nic *efx = net_dev->priv;
int rc = 0;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
if (rc)
goto fail;
- /* Reconfigure the MAC */
- efx_reconfigure_port(efx, 1);
-
/* Notify driverlink client of new MTU */
EFX_DL_CALLBACK(efx, mtu_changed, new_mtu);
- efx_start_all(efx);
-
out:
+ efx_start_all(efx);
return rc;
fail:
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
DECLARE_MAC_BUF(mac);
memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
/* Reconfigure the MAC */
- efx_reconfigure_port(efx, 1);
+ efx_reconfigure_port(efx);
return 0;
}
struct efx_nic *efx = net_dev->priv;
struct dev_mc_list *mc_list = net_dev->mc_list;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- unsigned long flags __attribute__ ((unused));
int promiscuous;
u32 crc;
int bit;
/* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
if (efx->promiscuous != promiscuous) {
- if (efx->port_enabled) {
- efx->promiscuous = promiscuous;
+ efx->promiscuous = promiscuous;
+ /* Close the window between efx_stop_port() and efx_flush_all()
+ * by only queuing work when the port is enabled. */
+ if (efx->port_enabled)
queue_work(efx->workqueue, &efx->reconfigure_work);
- }
}
/* Build multicast hash table */
memset(mc_hash, 0x00, sizeof(*mc_hash));
for (i = 0; i < net_dev->mc_count; i++) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
- bit = (crc & ((1 << EFX_MCAST_HASH_BITS) - 1));
- set_bit_le(bit, (void *)mc_hash);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ set_bit_le(bit, mc_hash->byte);
mc_list = mc_list->next;
}
}
.notifier_call = efx_netdev_event,
};
+/* Prior to Linux 2.6.24, the bonding driver may call change_mtu()
+ * without holding the RTNL, unlike all other callers. We try to
+ * mitigate the risk of a race with other reconfiguration using
+ * rtnl_trylock(), but we cannot eliminate it completely.
+ */
+static int efx_locked_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ int must_unlock = rtnl_trylock();
+ int rc = efx_change_mtu(net_dev, new_mtu);
+ if (must_unlock)
+ rtnl_unlock();
+ return rc;
+}
+#define efx_change_mtu efx_locked_change_mtu
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);
- BUG_ON(efx->net_dev_registered);
-
/* Clear MAC statistics */
efx->mac_op->update_stats(efx);
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
return rc;
}
- /* Allow link change notifications to be sent to the operating
- * system. The must happen after register_netdev so that
- * there are no outstanding link changes if that call fails.
- * It must happen before efx_reconfigure_port so that the
- * initial state of the link is reported. */
- mutex_lock(&efx->mac_lock);
- efx->net_dev_registered = 1;
- mutex_unlock(&efx->mac_lock);
-
- /* Safety net: in case we don't get a PHY event */
- rtnl_lock();
- efx_reconfigure_port(efx, 1);
- rtnl_unlock();
-
- EFX_LOG(efx, "registered\n");
-
return 0;
}
static void efx_unregister_netdev(struct efx_nic *efx)
{
- int was_registered = efx->net_dev_registered;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
BUG_ON(efx->net_dev->priv != efx);
- /* SFC Bug 5356: Ensure that no more link status notifications get
- * sent to the stack. Bad things happen if there's an
- * outstanding notification after the net device is freed, but
- * they only get flushed out by unregister_netdev, not by
- * free_netdev. */
- mutex_lock(&efx->mac_lock);
- efx->net_dev_registered = 0;
- mutex_unlock(&efx->mac_lock);
-
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
efx_for_each_tx_queue(tx_queue, efx)
efx_release_tx_buffers(tx_queue);
- if (was_registered) {
+ if (NET_DEV_REGISTERED(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_debugfs_netdev(efx->net_dev);
unregister_netdev(efx->net_dev);
*
**************************************************************************/
-/* This suspends the device (and acquires the suspend lock) without
- * flushing the descriptor queues. It is included for the convenience
- * of the driverlink layer.
- */
+/* Serialise access to the driverlink callbacks, by quiescing event processing
+ * (without flushing the descriptor queues), and acquiring the rtnl_lock */
void efx_suspend(struct efx_nic *efx)
{
EFX_LOG(efx, "suspending operations\n");
- down(&efx->suspend_lock);
-
rtnl_lock();
efx_stop_all(efx);
}
efx_start_all(efx);
rtnl_unlock();
-
- up(&efx->suspend_lock);
}
/* The final hardware and software finalisation before reset.
{
int rc;
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
rc = efx->mac_op->get_settings(efx, ecmd);
if (rc) {
if (rc)
goto fail1;
- /* In an INVISIBLE_RESET there might not be a link state transition,
- * so we push the multicast list here. */
- falcon_set_multicast_hash(efx);
-
/* Restore MAC and PHY settings. */
rc = efx->mac_op->set_settings(efx, ecmd);
if (rc) {
*
* This function will sleep. You cannot reset from within an atomic
* state; use efx_schedule_reset() instead.
+ *
+ * Grabs the dl_reset_lock, and to serialise with kernel interfaces the
+ * rtnl_lock.
*/
static int efx_reset(struct efx_nic *efx)
{
struct ethtool_cmd ecmd;
- unsigned long flags __attribute__ ((unused));
enum reset_type method = efx->reset_pending;
int rc;
+ /* Notify driverlink clients of imminent reset. */
efx_dl_reset_lock();
+ efx_dl_reset_suspend(efx);
- rc = down_interruptible(&efx->suspend_lock);
- if (rc) {
- EFX_ERR(efx, "reset aborted by signal\n");
- goto unlock_dl_lock;
- }
+ /* Serialise with kernel interfaces */
+ rtnl_lock();
- /* We've got suspend_lock, which means we can only be in
- * STATE_RUNNING or STATE_FINI. Don't clear
- * efx->reset_pending, since this flag indicates that we
- * should retry device initialisation.
- */
+ /* If we're not RUNNING then don't reset. Leave the reset_pending
+ * flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
- goto unlock_suspend_lock;
+ goto unlock_rtnl;
}
- /* Notify driverlink clients of imminent reset. */
- efx_dl_reset_suspend(efx);
- rtnl_lock();
-
efx->state = STATE_RESETTING;
EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
goto fail5;
mutex_unlock(&efx->mac_lock);
- efx_reconfigure_port(efx, 1);
EFX_LOG(efx, "reset complete\n");
efx->state = STATE_RUNNING;
efx_start_all(efx);
+ unlock_rtnl:
rtnl_unlock();
-
- goto notify;
+ efx_dl_reset_resume(efx, 1);
+ efx_dl_reset_unlock();
+ return 0;
fail5:
fail4:
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
- /* Remove the net_dev */
mutex_unlock(&efx->mac_lock);
rtnl_unlock();
+ /* Remove the net_dev */
efx_unregister_netdev(efx);
efx_fini_port(efx);
-
- notify:
- /* Notify driverlink clients of completed reset */
- efx_dl_reset_resume(efx, (rc == 0));
-
- unlock_suspend_lock:
- up(&efx->suspend_lock);
-
- unlock_dl_lock:
+ efx_dl_reset_resume(efx, 0);
efx_dl_reset_unlock();
-
return rc;
}
case RESET_TYPE_RX_RECOVERY:
case RESET_TYPE_RX_DESC_FETCH:
case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_TX_SKIP:
method = RESET_TYPE_INVISIBLE;
break;
default:
efx->reset_pending = method;
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
queue_work(efx->reset_workqueue, &efx->reset_work);
-#else
- queue_work(efx->workqueue, &efx->reset_work);
-#endif
}
/**************************************************************************
*
**************************************************************************/
-enum efx_type_index {
- EFX_TYPE_FALCON_A = 0,
- EFX_TYPE_FALCON_B = 1,
-};
-
-static struct efx_nic_type *efx_nic_types[] = {
- [EFX_TYPE_FALCON_A] = &falcon_a_nic_type,
- [EFX_TYPE_FALCON_B] = &falcon_b_nic_type,
-};
-
-
/* PCI device ID table */
static struct pci_device_id efx_pci_table[] __devinitdata = {
- {EFX_VENDID_SFC, FALCON_A_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, EFX_TYPE_FALCON_A},
- {EFX_VENDID_SFC, FALCON_B_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, EFX_TYPE_FALCON_B},
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
+ .driver_data = (unsigned long) &falcon_a_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
+ .driver_data = (unsigned long) &falcon_b_nic_type},
{0} /* end of list */
};
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-static int efx_init_struct(struct efx_nic *efx, enum efx_type_index type,
+static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
struct pci_dev *pci_dev)
{
struct efx_channel *channel;
spin_lock_init(&efx->biu_lock);
spin_lock_init(&efx->phy_lock);
mutex_init(&efx->spi_lock);
- sema_init(&efx->suspend_lock, 1);
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
}
- efx->type = efx_nic_types[type];
+ efx->type = type;
/* Sanity-check NIC type */
EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
-#if defined(EFX_NEED_DUMMY_MSIX)
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX)
- efx->interrupt_mode = EFX_INT_MODE_MSI;
-#endif
-
- /* Tasks that can fail are last */
- efx->refill_workqueue = create_workqueue("sfc_refill");
- if (!efx->refill_workqueue) {
- rc = -ENOMEM;
- goto fail1;
- }
efx->workqueue = create_singlethread_workqueue("sfc_work");
if (!efx->workqueue) {
rc = -ENOMEM;
- goto fail2;
+ goto fail1;
}
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!efx->reset_workqueue) {
rc = -ENOMEM;
- goto fail3;
+ goto fail2;
}
-#endif
return 0;
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
- fail3:
+ fail2:
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
-#endif
- fail2:
- destroy_workqueue(efx->refill_workqueue);
- efx->refill_workqueue = NULL;
fail1:
return rc;
}
static void efx_fini_struct(struct efx_nic *efx)
{
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
if (efx->reset_workqueue) {
destroy_workqueue(efx->reset_workqueue);
efx->reset_workqueue = NULL;
}
-#endif
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
}
- if (efx->refill_workqueue) {
- destroy_workqueue(efx->refill_workqueue);
- efx->refill_workqueue = NULL;
- }
}
/**************************************************************************
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
- ASSERT_RTNL();
+ EFX_ASSERT_RESET_SERIALISED(efx);
/* Skip everything if we never obtained a valid membase */
if (!efx->membase)
/* Unregister driver from driverlink layer */
efx_dl_unregister_nic(efx);
- /* Mark the NIC as fini under both suspend_lock and
- * rtnl_lock */
- down(&efx->suspend_lock);
+ /* Mark the NIC as fini, then stop the interface */
rtnl_lock();
efx->state = STATE_FINI;
- up(&efx->suspend_lock);
-
- if (efx->membase) {
- /* Stop the NIC. Since we're in STATE_FINI, this
- * won't be reversed. */
- if (efx->net_dev_registered)
- dev_close(efx->net_dev);
+ dev_close(efx->net_dev);
- /* Release the rtnl lock. Any queued efx_resets()
- * can now return early [we're in STATE_FINI]. */
- rtnl_unlock();
+ /* Allow any queued efx_resets() to complete */
+ rtnl_unlock();
- efx_unregister_netdev(efx);
- efx_fini_debugfs_channels(efx);
+ if (efx->membase == NULL)
+ goto out;
- /* Wait for any scheduled resets to complete. No more will be
- * scheduled from this point because efx_stop_all() has been
- * called, we are no longer registered with driverlink, and
- * the net_device's have been removed. */
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
- flush_workqueue(efx->reset_workqueue);
-#else
- flush_workqueue(efx->workqueue);
-#endif
+ efx_unregister_netdev(efx);
+ efx_fini_debugfs_channels(efx);
- /* Fini and remove all the software state */
- rtnl_lock();
- efx_pci_remove_main(efx);
- }
+ /* Wait for any scheduled resets to complete. No more will be
+ * scheduled from this point because efx_stop_all() has been
+ * called, we are no longer registered with driverlink, and
+ * the net_device's have been removed. */
+ flush_workqueue(efx->reset_workqueue);
- rtnl_unlock();
+ efx_pci_remove_main(efx);
+out:
efx_fini_io(efx);
EFX_LOG(efx, "shutdown successful\n");
const struct pci_device_id *entry)
{
struct efx_nic *efx;
- enum efx_type_index type = entry->driver_data;
+ struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
int i, rc;
/* Allocate and initialise a struct efx_nic */
if (rc)
goto fail3;
- /* From this point on we begin to expose the driver to the OS
- * to varying degrees, so lets grab the suspend_lock and
- * rtnl_lock to serialise against efx_reset() and
- * friends. efx->state is not STATE_RUNNING yet, but we don't
- * want these tasks to fail, just to block until we drop the
- * lock
- */
- rc = down_interruptible(&efx->suspend_lock);
- if (rc) {
- EFX_ERR(efx, "suspend interrupted - aborting\n");
- goto fail4;
- }
-
- rtnl_lock();
-
- /* Probe, initialise and start everything. Run self-test */
+ /* No serialisation is required with the reset path because
+ * we're in STATE_INIT. */
for (i = 0; i < 5; i++) {
rc = efx_pci_probe_main(efx);
if (rc == 0)
break;
- /* Retry if a recoverably reset event has been scheduled */
- if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
- (efx->reset_pending != RESET_TYPE_ALL))
- goto fail5;
-
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
- rtnl_unlock();
- up(&efx->suspend_lock);
-
-#if defined(EFX_USE_CANCEL_WORK_SYNC)
- cancel_work_sync(&efx->reset_work);
-#else
flush_workqueue(efx->reset_workqueue);
-#endif
- down(&efx->suspend_lock);
- rtnl_lock();
+ /* Retry if a recoverably reset event has been scheduled */
+ if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
+ (efx->reset_pending != RESET_TYPE_ALL))
+ goto fail4;
efx->reset_pending = RESET_TYPE_NONE;
- };
+ }
+
if (rc) {
EFX_ERR(efx, "Could not reset NIC\n");
goto fail5;
/* Switch to the running state before we expose the device to
* the OS. This is to ensure that the initial gathering of
* MAC stats succeeds. */
+ rtnl_lock();
efx->state = STATE_RUNNING;
-
rtnl_unlock();
rc = efx_register_netdev(efx);
if (rc)
goto fail7;
- up(&efx->suspend_lock);
-
EFX_LOG(efx, "initialisation successful\n");
/* Register with driverlink layer */
return 0;
fail8:
- down(&efx->suspend_lock);
efx_unregister_netdev(efx);
fail7:
- /* Re-acquire the rtnl lock around pci_remove_main() */
- rtnl_lock();
efx_fini_debugfs_channels(efx);
fail6:
efx_pci_remove_main(efx);
fail5:
- /* Drop the locks before fini */
- rtnl_unlock();
- up(&efx->suspend_lock);
fail4:
efx_fini_io(efx);
fail3:
if (rc)
goto err_notifier;
+ refill_workqueue = create_workqueue("sfc_refill");
+ if (!refill_workqueue) {
+ rc = -ENOMEM;
+ goto err_refill;
+ }
+
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
goto err_pci;
return 0;
err_pci:
+ destroy_workqueue(refill_workqueue);
+ err_refill:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
efx_fini_debugfs();
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
+ destroy_workqueue(refill_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
efx_fini_debugfs();
extern void efx_wake_queue(struct efx_nic *efx);
/* RX */
-#if defined(EFX_USE_FASTCALL)
extern void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue,
unsigned int index);
-#else
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-#endif
-#if defined(EFX_USE_FASTCALL)
extern void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
unsigned int index, unsigned int len,
int checksummed, int discard);
-#else
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, int checksummed, int discard);
-#endif
-extern void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
extern int efx_flush_queues(struct efx_nic *efx);
/* Ports */
-extern void efx_reconfigure_port(struct efx_nic *efx,
- int on_disabled);
+extern void efx_reconfigure_port(struct efx_nic *efx);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
channel->channel, raw_smp_processor_id());
channel->work_pending = 1;
-#if defined(EFX_HAVE_OLD_NAPI)
if (!test_and_set_bit(__LINK_STATE_RX_SCHED, &channel->napi_dev->state))
__netif_rx_schedule(channel->napi_dev);
-#else
- netif_rx_schedule(channel->napi_dev, &channel->napi_str);
-#endif
}
-
#endif /* EFX_EFX_H */
* @RESET_TYPE_MONITOR: reset due to hardware monitor
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
+ * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
+ * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
*/
enum reset_type {
RESET_TYPE_NONE = -1,
RESET_TYPE_RX_RECOVERY,
RESET_TYPE_RX_DESC_FETCH,
RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_TX_SKIP,
RESET_TYPE_MAX,
};
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
-#include <asm/uaccess.h>
#include "net_driver.h"
#include "selftest.h"
#include "efx.h"
struct ethtool_cmd *ecmd)
{
struct efx_nic *efx = net_dev->priv;
+ int rc;
+
+ if (!in_interrupt())
+ mutex_lock(&efx->mac_lock);
+ rc = efx->mac_op->get_settings(efx, ecmd);
+ if (!in_interrupt())
+ mutex_unlock(&efx->mac_lock);
- return efx->mac_op->get_settings(efx, ecmd);
+ return rc;
}
/* This must be called with rtnl_lock held. */
struct efx_nic *efx = net_dev->priv;
int rc;
+ mutex_lock(&efx->mac_lock);
rc = efx->mac_op->set_settings(efx, ecmd);
- if (rc)
- return rc;
-
- /* Push the settings to the MAC */
- efx_reconfigure_port(efx, 0);
+ mutex_unlock(&efx->mac_lock);
+ if (!rc)
+ efx_reconfigure_port(efx);
- return 0;
+ return rc;
}
static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_stats *stats
__attribute__ ((unused)), u64 *data)
{
- unsigned long flags __attribute__ ((unused));
struct efx_nic *efx = net_dev->priv;
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
/* Update MAC and NIC statistics */
net_dev->get_stats(net_dev);
- falcon_update_nic_stats(efx);
/* Fill detailed statistics buffer */
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
if (rc)
return rc;
-
efx_flush_queues(efx);
return 0;
flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
/* Try to push the pause parameters */
+ mutex_lock(&efx->mac_lock);
rc = efx->mac_op->set_pause(efx, flow_control);
- if (rc)
- return rc;
+ mutex_unlock(&efx->mac_lock);
- /* Push the settings to the MAC */
- efx_reconfigure_port(efx, 0);
+ if (!rc)
+ efx_reconfigure_port(efx);
- return 0;
+ return rc;
}
static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
}
-#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
static int efx_ethtool_op_get_perm_addr(struct net_device *net_dev,
struct ethtool_perm_addr *addr,
u8 *data)
return 0;
}
-#endif
struct ethtool_ops efx_ethtool_ops = {
.get_settings = efx_ethtool_get_settings,
.set_tx_csum = efx_ethtool_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
-#if defined(EFX_USE_ETHTOOL_FLAGS)
- .get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
-#endif
.self_test_count = efx_ethtool_self_test_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.phys_id = efx_ethtool_phys_id,
.get_stats_count = efx_ethtool_get_stats_count,
.get_ethtool_stats = efx_ethtool_get_stats,
-#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
.get_perm_addr = efx_ethtool_op_get_perm_addr,
-#endif
};
****************************************************************************
*/
-#include <asm/io.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
* present in SFE400X evaluation boards
*/
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @tx_dc_entries: Number of entries in each TX queue descriptor cache
+ * @rx_dc_entries: Number of entries in each RX queue descriptor cache
+ * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
+ * @old_loopback_mode: Previous loopback mode used in deconfigure_mac_wrapper
+ * @external_sram_cfg: Size and number of banks of external SRAM
+ * @pci_dev2: The secondary PCI device if present
+ * @resources: Driverlink parameters
+ */
struct falcon_nic_data {
- /* Number of entries in each TX queue descriptor cache. */
unsigned tx_dc_entries;
- /* Number of entries in each RX queue descriptor cache. */
unsigned rx_dc_entries;
- /* Base address in SRAM of TX queue descriptor caches. */
unsigned tx_dc_base;
- /* Base address in SRAM of RX queue descriptor caches. */
unsigned rx_dc_base;
- /* Previous loopback mode used in deconfigure_mac_wrapper */
enum efx_loopback_mode old_loopback_mode;
- /* Driverlink parameters */
+ struct pci_dev *pci_dev2;
+
+ int external_sram_cfg;
+
struct efx_dl_falcon_resources resources;
};
#endif
/* TX DMA length mask (13-bit) */
-#define FALCON_TX_DMA_MASK (8192 - 1)
+#define FALCON_TX_DMA_MASK (4096 - 1)
-/* Alignment of special buffers (4KB) */
-#define FALCON_BUF_ALIGN 4096
+/* Size and alignment of special buffers (4KB) */
+#define FALCON_BUF_SIZE 4096
/* Dummy SRAM size code */
#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
-#define PCI_EXP_DEVCAP_PWR_VAL_LBN (18)
-/* This field takes up bits 26 and 27. */
-#define PCI_EXP_DEVCAP_PWR_SCL_LBN (26)
-#define PCI_EXP_LNKSTA_LNK_WID (0x3f0)
-#define PCI_EXP_LNKSTA_LNK_WID_LBN (4)
+#define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
+#define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
+#define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
+#define PCI_EXP_LNKSTA_LNK_WID 0x3f0
+#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
+#define FALCON_IS_DUAL_FUNC(efx) \
+ (FALCON_REV(efx) < FALCON_REV_B0)
/**************************************************************************
*
*
*************************************************************************/
-/* Adds the relevant entries to the full-mode buffer table. */
+/*
+ * Initialise a Falcon special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
static int
-falcon_pin_special_buffer_full(struct efx_nic *efx,
- struct efx_special_buffer *buffer)
+falcon_init_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
int index;
dma_addr_t dma_addr;
int i;
+ EFX_BUG_ON_PARANOID(!buffer->addr);
+
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
return 0;
}
-/* Clears the relevant entries from the buffer table */
+/* Unmaps a buffer from Falcon and clears the buffer table entries */
static void
-falcon_clear_special_buffer_full(struct efx_nic *efx,
- struct efx_special_buffer *buffer)
+falcon_fini_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd;
unsigned int start = buffer->index;
unsigned int end = (buffer->index + buffer->entries - 1);
+ if (!buffer->entries)
+ return;
+
EFX_LOG(efx, "unmapping special buffers %d-%d\n",
buffer->index, buffer->index + buffer->entries - 1);
* This allocates memory for a new buffer, clears it and allocates a
* new buffer ID range. It does not write into Falcon's buffer table.
*
- * This call will allocate 4kB buffers, since Falcon can't use 8kB
- * buffers for event queues and descriptor rings. It will always
- * allocate an even number of 4kB buffers, since when we're in
- * half-entry mode for the buffer table we can only deal with pairs of
- * buffers.
+ * This call will allocate 4KB buffers, since Falcon can't use 8KB
+ * buffers for event queues and descriptor rings.
*/
static int falcon_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
{
struct falcon_nic_data *nic_data = efx->nic_data;
- /* Round size up to an 8kB boundary (i.e. pairs of 4kB buffers) */
- len = (len + 8192 - 1) & ~(8192 - 1);
+ len = ALIGN(len, FALCON_BUF_SIZE);
/* Allocate buffer as consistent PCI DMA space */
buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
- buffer->entries = len / 4096;
- BUG_ON(buffer->dma_addr & (FALCON_BUF_ALIGN - 1));
+ buffer->entries = len / FALCON_BUF_SIZE;
+ BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
/* All zeros is a potentially valid event so memset to 0xff */
memset(buffer->addr, 0xff, len);
return 0;
}
-/*
- * Initialise a Falcon special buffer
- *
- * This will define a buffer (previously allocated via
- * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static int falcon_init_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer)
-{
- EFX_BUG_ON_PARANOID(!buffer->addr);
-
- /* Write buffer descriptors to NIC */
- return falcon_pin_special_buffer_full(efx, buffer);
-}
-
-/* Unmaps a buffer from Falcon and clears the buffer table
- * entries */
-static void falcon_fini_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer)
-{
-
- if (!buffer->entries)
- return;
-
- falcon_clear_special_buffer_full(efx, buffer);
-}
-
/* Release the buffer memory. */
static void falcon_free_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer)
* descriptor in the hardware TX descriptor ring (in host memory), and
* write a doorbell.
*/
-#if defined(EFX_USE_FASTCALL)
void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue)
-#else
-void falcon_push_buffers(struct efx_tx_queue *tx_queue)
-#endif
{
struct efx_tx_buffer *buffer;
falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
msleep(FALCON_FLUSH_TIMEOUT);
- /* If the NIC is resetting then don't bother checking */
- if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
+ if (EFX_WORKAROUND_7803(efx))
return 0;
/* Look for a flush completed event */
/* This writes to the RX_DESC_WPTR register for the specified receive
* descriptor ring.
*/
-#if defined(EFX_USE_FASTCALL)
void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
-#else
-void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
-#endif
{
efx_dword_t reg;
unsigned write_ptr;
falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
msleep(FALCON_FLUSH_TIMEOUT);
- /* If the NIC is resetting then don't bother checking */
- if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
+ if (EFX_WORKAROUND_7803(efx))
return 0;
/* Look for a flush completed event */
continue;
break;
}
- if (rc)
+ if (rc) {
EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+ }
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
* whereas channel->eventq_read_ptr contains the index of the "next to
* read" event.
*/
-#if defined(EFX_USE_FASTCALL)
void fastcall falcon_eventq_read_ack(struct efx_channel *channel)
-#else
-void falcon_eventq_read_ack(struct efx_channel *channel)
-#endif
{
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- if (efx->net_dev_registered)
+ if (NET_DEV_REGISTERED(efx))
netif_tx_lock(efx->net_dev);
falcon_notify_tx_desc(tx_queue);
- if (efx->net_dev_registered)
+ if (NET_DEV_REGISTERED(efx))
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
}
}
-#if defined(EFX_USE_FASTCALL)
int fastcall falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
-#else
-int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
-#endif
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
*/
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
/* Disable DMA bus mastering on both devices */
pci_disable_device(efx->pci_dev);
- if (efx->type->is_dual_func)
- pci_disable_device(efx->pci_dev2);
+ if (FALCON_IS_DUAL_FUNC(efx))
+ pci_disable_device(nic_data->pci_dev2);
if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
* interrupts are disabled, to allow for correct semantics of
* efx_suspend() and efx_resume().
*/
-#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
-static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
-#else
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id,
struct pt_regs *regs
__attribute__ ((unused)))
-#endif
{
struct efx_nic *efx = (struct efx_nic *)dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
}
-#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
-static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
-#else
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id,
struct pt_regs *regs
__attribute__ ((unused)))
-#endif
{
struct efx_nic *efx = (struct efx_nic *)dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
* interrupts are disabled, to allow for correct semantics of
* efx_suspend() and efx_resume().
*/
-#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
-static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
-#else
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id,
struct pt_regs *regs
__attribute__ ((unused)))
-#endif
{
struct efx_channel *channel = (struct efx_channel *)dev_id;
struct efx_nic *efx = channel->efx;
{
int i = 0;
unsigned long offset;
- unsigned long flags __attribute__ ((unused));
efx_dword_t dword;
if (FALCON_REV(efx) < FALCON_REV_B0)
void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t temp;
- efx_oword_t mcast_reg0;
- efx_oword_t mcast_reg1;
int count;
if (FALCON_REV(efx) < FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
- falcon_read(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
- falcon_read(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
-
/* Reset the MAC and EM block. */
falcon_read(efx, &temp, GLB_CTL_REG_KER);
EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
spin_unlock(&efx->stats_lock);
- /* Restore the multicast hash registers. */
- falcon_write(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
- falcon_write(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
-
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
* draining the TX fifo and resetting. */
changing_loopback = (efx->loopback_mode != nic_data->old_loopback_mode);
nic_data->old_loopback_mode = efx->loopback_mode;
+
+ if (EFX_WORKAROUND_11667(efx) && (efx->phy_type == PHY_TYPE_10XPRESS)) {
+ if (changing_loopback)
+ return;
+ }
+
if (changing_loopback || !efx->link_up)
falcon_drain_tx_fifo(efx);
}
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
- * while the link is down.
- */
+ * while the link is down. */
EFX_POPULATE_OWORD_5(reg,
MAC_XOFF_VAL, 0xffff /* max pause time */,
MAC_BCAD_ACPT, 1,
falcon_write(efx, ®, MAC0_CTRL_REG_KER);
- /*
- * Transmission of pause frames when RX crosses the threshold is
+ /* Restore the multicast hash registers. */
+ falcon_set_multicast_hash(efx);
+
+ /* Transmission of pause frames when RX crosses the threshold is
* covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
- *
- * Action on receipt of pause frames is controller by XM_DIS_FCNTL
- */
+ * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
falcon_read(efx, ®, RX_CFG_REG_KER);
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
unsigned int phy_10g = phy_id & FALCON_PHY_ID_10G;
efx_oword_t reg;
int value = -1;
- unsigned long flags __attribute__ ((unused));
if (phy_addr == PHY_ADDR_INVALID)
return -1;
gmii->mdio_read = falcon_mdio_read;
gmii->mdio_write = falcon_mdio_write;
gmii->phy_id_mask = FALCON_PHY_ID_MASK;
- gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_DEV_ADR)) - 1);
+ gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
}
static int falcon_probe_gmac_port(struct efx_nic *efx)
void falcon_set_multicast_hash(struct efx_nic *efx)
{
- union efx_multicast_hash falcon_mc_hash;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
- * so we always add bit 0xff to the mask we are given.
+ * so we always add bit 0xff to the mask.
*/
- memcpy(&falcon_mc_hash, &efx->multicast_hash, sizeof(falcon_mc_hash));
- set_bit_le(0xff, (void *)&falcon_mc_hash);
+ set_bit_le(0xff, mc_hash->byte);
- falcon_write(efx, &falcon_mc_hash.oword[0], MAC_MCAST_HASH_REG0_KER);
- falcon_write(efx, &falcon_mc_hash.oword[1], MAC_MCAST_HASH_REG1_KER);
+ falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
+ falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
}
/**************************************************************************
* context and is allowed to sleep. */
int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t glb_ctl_reg_ker;
int rc;
"function prior to hardware reset\n");
goto fail1;
}
- if (efx->type->is_dual_func) {
- rc = pci_save_state(efx->pci_dev2);
+ if (FALCON_IS_DUAL_FUNC(efx)) {
+ rc = pci_save_state(nic_data->pci_dev2);
if (rc) {
EFX_ERR(efx, "failed to backup PCI state of "
"secondary function prior to "
/* Restore PCI configuration if needed */
if (method == RESET_TYPE_WORLD) {
- if (efx->type->is_dual_func) {
- rc = pci_restore_state(efx->pci_dev2);
+ if (FALCON_IS_DUAL_FUNC(efx)) {
+ rc = pci_restore_state(nic_data->pci_dev2);
if (rc) {
EFX_ERR(efx, "failed to restore PCI config for "
"the secondary function\n");
*/
static int falcon_reset_sram(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
int count, onchip, sram_cfg_val;
/* Set the SRAM wake/sleep GPIO appropriately. */
- onchip = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY);
+ onchip = (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY);
falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, onchip ? 1 : 0);
falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
/* Initiate SRAM reset */
- sram_cfg_val = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) ?
- 0 : efx->external_sram_cfg;
+ sram_cfg_val = nic_data->external_sram_cfg;
+ if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
+ sram_cfg_val = 0;
EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
SRAM_OOB_BT_INIT_EN, 1,
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
- int rc;
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
struct efx_spi_device *spi;
size_t offset, len;
int magic_num, struct_ver, board_rev, onchip_sram;
+ int rc;
nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
* automatically but might may been reset since boot.
*/
if (onchip_sram) {
- efx->external_sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY;
+ nic_data->external_sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY;
} else {
- efx->external_sram_cfg =
- EFX_OWORD_FIELD(nvconfig->srm_cfg_reg,
- SRM_NUM_BANKS_AND_BANK_SIZE);
- WARN_ON(efx->external_sram_cfg == SRM_NB_BSZ_RESERVED);
+ nic_data->external_sram_cfg =
+ EFX_OWORD_FIELD(nvconfig->srm_cfg_reg,
+ SRM_NUM_BANKS_AND_BANK_SIZE);
+ WARN_ON(nic_data->external_sram_cfg == SRM_NB_BSZ_RESERVED);
/* Replace invalid setting with the smallest defaults */
- if (efx->external_sram_cfg == SRM_NB_BSZ_DEFAULT)
- efx->external_sram_cfg = SRM_NB_BSZ_1BANKS_2M;
+ if (nic_data->external_sram_cfg == SRM_NB_BSZ_DEFAULT)
+ nic_data->external_sram_cfg = SRM_NB_BSZ_1BANKS_2M;
}
EFX_LOG(efx, "external_sram_cfg=%d (>=0 is external)\n",
- efx->external_sram_cfg);
+ nic_data->external_sram_cfg);
out:
kfree(nvconfig);
*/
switch (FALCON_REV(efx)) {
case FALCON_REV_A1:
- res->rxq_min = res->txq_min = 16;
- res->evq_int_min = res->evq_int_max = 4;
+ res->rxq_min = 16;
+ res->txq_min = 16;
+ res->evq_int_min = 4;
+ res->evq_int_lim = 5;
res->evq_timer_min = 5;
- res->evq_timer_max = 4096;
+ res->evq_timer_lim = 4096;
internal_dcs_entries = 8192;
break;
case FALCON_REV_B0:
default:
- res->rxq_min = res->txq_min = res->evq_int_min = 0;
- res->evq_int_max = 64;
+ res->rxq_min = 0;
+ res->txq_min = 0;
+ res->evq_int_min = 0;
+ res->evq_int_lim = 64;
res->evq_timer_min = 64;
- res->evq_timer_max = 4096;
+ res->evq_timer_lim = 4096;
internal_dcs_entries = 4096;
break;
}
buffer_entry_bytes = 8;
- if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) {
- res->rxq_max = internal_dcs_entries / nic_data->rx_dc_entries;
- res->txq_max = internal_dcs_entries / nic_data->tx_dc_entries;
+ if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) {
+ res->rxq_lim = internal_dcs_entries / nic_data->rx_dc_entries;
+ res->txq_lim = internal_dcs_entries / nic_data->tx_dc_entries;
/* Prog model says 8K entries for buffer table in internal
* mode. But does this not depend on full/half mode?
*/
- res->buffer_table_max = 8192;
+ res->buffer_table_lim = 8192;
nic_data->tx_dc_base = 0x130000;
nic_data->rx_dc_base = 0x100000;
} else {
/* Determine how much SRAM we have to play with. We have
* to fit buffer table and descriptor caches in.
*/
- switch (efx->external_sram_cfg) {
+ switch (nic_data->external_sram_cfg) {
case SRM_NB_BSZ_1BANKS_2M:
default:
sram_bytes = 2 * 1024 * 1024;
max_vnics = sram_bytes / vnic_bytes;
for (n_vnics = 1; n_vnics < res->evq_timer_min + max_vnics;)
n_vnics *= 2;
- res->rxq_max = n_vnics;
- res->txq_max = n_vnics;
+ res->rxq_lim = n_vnics;
+ res->txq_lim = n_vnics;
dcs = n_vnics * nic_data->tx_dc_entries * 8;
nic_data->tx_dc_base = sram_bytes - dcs;
dcs = n_vnics * nic_data->rx_dc_entries * 8;
nic_data->rx_dc_base = nic_data->tx_dc_base - dcs;
- res->buffer_table_max = nic_data->rx_dc_base / 8;
+ res->buffer_table_lim = nic_data->rx_dc_base / 8;
}
- if (efx->type->is_dual_func)
+ if (FALCON_IS_DUAL_FUNC(efx))
res->flags |= EFX_DL_FALCON_DUAL_FUNC;
if (EFX_INT_MODE_USE_MSI(efx))
falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
efx->is_asic = EFX_OWORD_FIELD(altera_build, VER_ALL) == 0;
-#if !defined(EFX_USE_PCI_DEV_REVISION)
{
int rc;
rc = pci_read_config_byte(efx->pci_dev, PCI_CLASS_REVISION,
if (rc)
return rc;
}
-#endif
+
switch (FALCON_REV(efx)) {
case FALCON_REV_A0:
case 0xff:
efx->i2c.sda = 1;
efx->i2c.scl = 1;
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ efx->nic_data = (void *) nic_data;
+
/* Determine number of ports etc. */
rc = falcon_probe_nic_variant(efx);
if (rc)
goto fail1;
/* Probe secondary function if expected */
- if (efx->type->is_dual_func) {
+ if (FALCON_IS_DUAL_FUNC(efx)) {
struct pci_dev *dev = pci_dev_get(efx->pci_dev);
while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
dev))) {
if (dev->bus == efx->pci_dev->bus &&
dev->devfn == efx->pci_dev->devfn + 1) {
- efx->pci_dev2 = dev;
+ nic_data->pci_dev2 = dev;
break;
}
}
- if (!efx->pci_dev2) {
+ if (!nic_data->pci_dev2) {
EFX_ERR(efx, "failed to find secondary function\n");
rc = -ENODEV;
goto fail2;
efx->mii.phy_id = 2;
}
- /* Decide how many resources we can allocate, to ourselves
- * and to driverlink clients */
- nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
- efx->nic_data = (void *) nic_data;
-
rc = falcon_dimension_resources(efx);
if (rc)
goto fail6;
return 0;
fail6:
- kfree(nic_data);
- efx->nic_data = efx->dl_info = NULL;
+ efx->dl_info = NULL;
fail5:
falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
fail4:
/* fall-thru */
fail3:
- if (efx->pci_dev2) {
- pci_dev_put(efx->pci_dev2);
- efx->pci_dev2 = NULL;
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
}
fail2:
/* fall-thru */
fail1:
+ kfree(efx->nic_data);
return rc;
}
&pcie_ctrl_stat_reg);
pcie_devicectrl = (u16) EFX_EXTRACT_DWORD(pcie_ctrl_stat_reg, 0, 15);
tlp_size = ((PCI_EXP_DEVCTL_PAYLOAD & pcie_devicectrl) >>
- ffs(PCI_EXP_DEVCTL_PAYLOAD));
+ PCI_EXP_DEVCTL_PAYLOAD_LBN);
EFX_WARN_ON_PARANOID(tlp_size > 3); /* => 1024 bytes */
tlp_ack_factor = &tlp_ack_factor_lut[tlp_size & 0x3];
tlp_size_decoded = tlp_ack_factor->tlp;
*/
int falcon_init_nic(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nic_data *data;
efx_oword_t temp;
unsigned thresh;
/* Use on-chip SRAM if needed.
*/
falcon_read(efx, &temp, NIC_STAT_REG);
- if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
+ if (nic_data->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
else
EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 0);
void falcon_remove_nic(struct efx_nic *efx)
{
- /* Tear down the private nic state, and the driverlink nic params */
- kfree(efx->nic_data);
- efx->nic_data = efx->dl_info = NULL;
+ struct falcon_nic_data *nic_data = efx->nic_data;
falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
(void) falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */
- if (efx->pci_dev2) {
- pci_dev_put(efx->pci_dev2);
- efx->pci_dev2 = NULL;
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
}
+
+ /* Tear down the private nic state, and the driverlink nic params */
+ kfree(efx->nic_data);
+ efx->nic_data = efx->dl_info = NULL;
}
void falcon_update_nic_stats(struct efx_nic *efx)
*/
struct efx_nic_type falcon_a_nic_type = {
- .is_dual_func = 1,
.mem_bar = 2,
.mem_map_size = 0x20000,
.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
};
struct efx_nic_type falcon_b_nic_type = {
- .is_dual_func = 0,
.mem_bar = 2,
/* Map everything up to and including the RSS indirection
* table. Don't map MSI-X table, MSI-X PBA since Linux
#ifndef EFX_FALCON_H
#define EFX_FALCON_H
-#include <asm/io.h>
-#include <linux/spinlock.h>
#include "net_driver.h"
/*
FALCON_REV_B0 = 2,
};
-#if defined(EFX_USE_PCI_DEV_REVISION)
-#define FALCON_REV(efx) ((efx)->pci_dev->revision)
-#else
#define FALCON_REV(efx) ((efx)->revision)
-#endif
extern struct efx_nic_type falcon_a_nic_type;
extern struct efx_nic_type falcon_b_nic_type;
extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
-#if defined(EFX_USE_FASTCALL)
extern void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue);
-#else
-extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
-#endif
/* RX data path */
extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
-#if defined(EFX_USE_FASTCALL)
extern void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
-#else
-extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
-#endif
/* Event data path */
extern int falcon_probe_eventq(struct efx_channel *channel);
extern int falcon_init_eventq(struct efx_channel *channel);
extern void falcon_fini_eventq(struct efx_channel *channel);
extern void falcon_remove_eventq(struct efx_channel *channel);
-#if defined(EFX_USE_FASTCALL)
extern int fastcall falcon_process_eventq(struct efx_channel *channel,
int *rx_quota);
-#else
-extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
-#endif
-#if defined(EFX_USE_FASTCALL)
extern void fastcall falcon_eventq_read_ack(struct efx_channel *channel);
-#else
-extern void falcon_eventq_read_ack(struct efx_channel *channel);
-#endif
/* Ports */
extern int falcon_probe_port(struct efx_nic *efx);
extern void falcon_remove_port(struct efx_nic *efx);
/* MAC/PHY */
-extern void falcon_check_xaui_link_up(struct efx_nic *efx);
extern int falcon_xaui_link_ok(struct efx_nic *efx);
extern int falcon_dma_stats(struct efx_nic *efx,
unsigned int done_offset);
#define XM_DIS_FCNTL_WIDTH 1
/* XGMAC pause time count register */
+/* XGMAC management interrupt mask register */
+#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
+#define XM_MSK_PRMBLE_ERR_LBN 2
+#define XM_MSK_PRMBLE_ERR_WIDTH 1
+#define XM_MSK_RMTFLT_LBN 1
+#define XM_MSK_RMTFLT_WIDTH 1
+#define XM_MSK_LCLFLT_LBN 0
+#define XM_MSK_LCLFLT_WIDTH 1
+
#define XM_PAUSE_TIME_REG_MAC 0x9
#define XM_TX_PAUSE_CNT_LBN 16
#define XM_TX_PAUSE_CNT_WIDTH 16
#define XX_PWRDNC_EN_LBN 14
#define XX_PWRDNC_EN_WIDTH 1
#define XX_PWRDNB_EN_LBN 13
+/* XGMAC management interrupt status register */
+#define XM_MGT_INT_REG_MAC_B0 0x0f
+#define XM_PRMBLE_ERR 2
+#define XM_PRMBLE_WIDTH 1
+#define XM_RMTFLT_LBN 1
+#define XM_RMTFLT_WIDTH 1
+#define XM_LCLFLT_LBN 0
+#define XM_LCLFLT_WIDTH 1
+
#define XX_PWRDNB_EN_WIDTH 1
#define XX_PWRDNA_EN_LBN 12
#define XX_PWRDNA_EN_WIDTH 1
#ifndef EFX_FALCON_IO_H
#define EFX_FALCON_IO_H
+#include <linux/io.h>
+#include <linux/spinlock.h>
#include "net_driver.h"
-#include "falcon.h"
/**************************************************************************
*
return rc;
}
+static int falcon_xgmii_status(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ if (FALCON_REV(efx) < FALCON_REV_B0)
+ return 1;
+
+ /* The ISR latches, so clear it and re-read */
+ efx->mac_op->mac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0);
+ efx->mac_op->mac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0);
+
+ if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
+ EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
+ EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
+ return 0;
+ }
+
+ return 1;
+}
+
+static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
+{
+ efx_dword_t reg;
+
+ if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* Flush the ISR */
+ if (enable)
+ efx->mac_op->mac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0);
+
+ EFX_POPULATE_DWORD_2(reg,
+ XM_MSK_RMTFLT, !enable,
+ XM_MSK_LCLFLT, !enable);
+ efx->mac_op->mac_writel(efx, ®, XM_MGT_INT_MSK_REG_MAC_B0);
+}
+
static int falcon_init_xmac(struct efx_nic *efx)
{
int rc;
if (rc)
goto fail2;
+ falcon_mask_status_intr(efx, 1);
return 0;
fail2:
int falcon_xaui_link_ok(struct efx_nic *efx)
{
efx_dword_t reg;
- int align_done;
- int sync_status;
- int link_ok = 0;
+ int align_done, sync_status, link_ok = 0;
/* If we're in internal loopback, then the link is up.
* The A1 FPGA/4G has RX and TX XAUI wired together, so the link is up.
EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
-
efx->mac_op->mac_writel(efx, ®, XX_CORE_STAT_REG_MAC);
+ /* If the link is up, then check the phy side of the xaui link
+ * (error conditions from the wire side propoagate back through
+ * the phy to the xaui side). */
+ if (efx->link_up && link_ok) {
+ int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
+ if (has_phyxs)
+ link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
+ }
+
+ /* If the PHY and XAUI links are up, then check the mac's xgmii
+ * fault state */
+ if (efx->link_up && link_ok)
+ link_ok = falcon_xgmii_status(efx);
+
return link_ok;
}
-/* Do most of the heavy lifting of falcon_reconfigure_xmac */
static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
efx->mac_op->mac_writel(efx, ®, XM_ADR_HI_REG_MAC);
/* Handle B0 FPGA loopback where RAMBUS XGXS block not present */
- if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic) {
+ if (FALCON_REV(efx) >= FALCON_REV_B0 && !efx->is_asic) {
int xgmii_loopback =
(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
}
}
-/* Do most of the heavy lifting of falcon_reconfigure_xmac */
static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
{
efx_dword_t reg;
int xgmii_loopback =
(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
- if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic)
+ if (FALCON_REV(efx) >= FALCON_REV_B0 && !efx->is_asic)
/* RAMBUS XGXS block is not present */
return;
}
-/* Sometimes the XAUI link between Falcon and XFP fails to come up. The state
- * of the link is checked during phy_reconfigure(). After XAIU is reset then
- * the MAC must be reconfigured.
- */
-#define MAX_XAUI_TRIES (5) /* It's never been seen to take more than 2 */
-
-void falcon_check_xaui_link_up(struct efx_nic *efx)
+/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
+ * to come back up. Bash it until it comes back up */
+static int falcon_check_xaui_link_up(struct efx_nic *efx)
{
int max_tries, tries;
- tries = EFX_WORKAROUND_5147(efx) ? MAX_XAUI_TRIES : 1;
+ tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
max_tries = tries;
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
(efx->phy_type == PHY_TYPE_NONE) ||
!efx->phy_powered)
- return;
+ return 0;
while (tries) {
if (falcon_xaui_link_ok(efx))
- return;
+ return 1;
EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
__func__, tries);
(void) falcon_reset_xaui(efx);
- /* Cannot use full reconfigure. Need to avoid recursion */
-
- /* Give the poor thing time to sort itself out: if we retry
- * too fast it will never train. */
udelay(200);
-
- falcon_reconfigure_xgxs_core(efx);
-
tries--;
}
- EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
+ EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
max_tries);
+ return 0;
}
static void falcon_reconfigure_xmac(struct efx_nic *efx)
{
+ int xaui_link_ok;
+
+ falcon_mask_status_intr(efx, 0);
+
+ /* Deconfigure the mac wrapper, draining the tx fifo if necessary */
falcon_deconfigure_mac_wrapper(efx);
- /* In internal loopback modes disable transmit */
+ /* Reconfigure the PHY, disabling transmit in mac level loopback. */
efx->tx_disabled = LOOPBACK_INTERNAL(efx);
-
efx->phy_op->reconfigure(efx);
falcon_reconfigure_xgxs_core(efx);
/* Reconfigure MAC wrapper */
falcon_reconfigure_mac_wrapper(efx);
- /* Ensure XAUI link is up - might repeat reconfigure_xmac_core */
- falcon_check_xaui_link_up(efx);
+ /* Ensure XAUI link is up */
+ xaui_link_ok = falcon_check_xaui_link_up(efx);
+
+ if (xaui_link_ok && efx->link_up)
+ falcon_mask_status_intr(efx, 1);
}
static void falcon_fini_xmac(struct efx_nic *efx)
static int falcon_check_xmac(struct efx_nic *efx)
{
- unsigned link_ok, phyxs_ok = 1;
- unsigned has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
-
- /* Check the remote XAUI link status */
- link_ok = falcon_xaui_link_ok(efx);
+ unsigned xaui_link_ok;
+ int rc;
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+ (efx->phy_type == PHY_TYPE_NONE) ||
!efx->phy_powered)
return 0;
- if (link_ok && has_phyxs && !LOOPBACK_INTERNAL(efx)) {
- /* Does the PHYXS think we have lane sync? */
- phyxs_ok = mdio_clause45_phyxgxs_lane_sync(efx);
- }
+ falcon_mask_status_intr(efx, 0);
+ xaui_link_ok = falcon_xaui_link_ok(efx);
- if (EFX_WORKAROUND_5147(efx) && (!link_ok || !phyxs_ok)) {
+ if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
(void) falcon_reset_xaui(efx);
- falcon_reconfigure_xgxs_core(efx);
- }
/* Call the PHY check_hw routine */
- efx->phy_op->check_hw(efx);
- return 0;
+ rc = efx->phy_op->check_hw(efx);
+
+ /* Unmask interrupt if everything was (and still is) ok */
+ if (xaui_link_ok && efx->link_up)
+ falcon_mask_status_intr(efx, 1);
+
+ return rc;
}
/* Simulate a PHY event */
reset = ((flow_control & EFX_FC_TX) &&
!(efx->flow_control & EFX_FC_TX));
if (EFX_WORKAROUND_11482(efx) && reset) {
- if (FALCON_REV(efx) == FALCON_REV_B0) {
+ if (FALCON_REV(efx) >= FALCON_REV_B0) {
/* Recover by resetting the EM block */
- mutex_lock(&efx->mac_lock);
if (efx->link_up)
falcon_drain_tx_fifo(efx);
- mutex_unlock(&efx->mac_lock);
} else {
/* Schedule a reset to recover */
efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
****************************************************************************
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include "net_driver.h"
#include "i2c-direct.h"
-/* EEPROM access via I2C
- * data (SDA) and clock (SCL) line read/writes
+/*
+ * I2C data (SDA) and clock (SCL) line read/writes with appropriate
+ * delays.
*/
static inline void setsda(struct efx_i2c_interface *i2c, int state)
{
EFX_WARN_ON_PARANOID(!i2c->scl);
EFX_WARN_ON_PARANOID(!i2c->sda);
- /* Just in case */
+ /* Devices may time out if operations do not end */
setscl(i2c, 1);
setsda(i2c, 1);
EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
****************************************************************************
*/
-#define EFX_IN_KCOMPAT_C 1
-
#include "net_driver.h"
#include <linux/mii.h>
#include <linux/ethtool.h>
* This file provides functionality missing from earlier kernels.
*/
-/**************************************************************************
- *
- * GMII-friendly versions of mii_ethtool_[gs]set
- *
- **************************************************************************
- *
- * Kernels prior to 2.6.12 don't support GMII PHYs via
- * mii_ethtool_gset and mii_ethtool_sset. These are those functions
- * taken from a 2.6.12 kernel tree, with the tests for
- * mii->supports_gmii removed (since that field doesn't exist in older
- * kernels).
- *
- */
-
-#ifdef EFX_NEED_MII_ETHTOOL_FIX
-int efx_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
-{
- struct net_device *dev = mii->dev;
- u32 advert, bmcr, lpa, nego;
- u32 advert2 = 0, bmcr2 = 0, lpa2 = 0;
-
- ecmd->supported =
- (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
- ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
-
- /* only supports twisted-pair */
- ecmd->port = PORT_MII;
-
- /* only supports internal transceiver */
- ecmd->transceiver = XCVR_INTERNAL;
-
- /* this isn't fully supported at higher layers */
- ecmd->phy_address = mii->phy_id;
-
- ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
- advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
-
- if (advert & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (advert2 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (advert2 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
-
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
- bmcr2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
- lpa2 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
- if (bmcr & BMCR_ANENABLE) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
-
- nego = mii_nway_result(advert & lpa);
- if ((bmcr2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &
- (lpa2 >> 2))
- ecmd->speed = SPEED_1000;
- else if (nego == LPA_100FULL || nego == LPA_100HALF)
- ecmd->speed = SPEED_100;
- else
- ecmd->speed = SPEED_10;
- if ((lpa2 & LPA_1000FULL) || nego == LPA_100FULL ||
- nego == LPA_10FULL) {
- ecmd->duplex = DUPLEX_FULL;
- mii->full_duplex = 1;
- } else {
- ecmd->duplex = DUPLEX_HALF;
- mii->full_duplex = 0;
- }
- } else {
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
- (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
- (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
- ecmd->duplex =
- (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- /* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
-}
-
-int efx_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
-{
- struct net_device *dev = mii->dev;
-
- if (ecmd->speed != SPEED_10 &&
- ecmd->speed != SPEED_100 &&
- ecmd->speed != SPEED_1000)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (ecmd->port != PORT_MII)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
- if (ecmd->phy_address != mii->phy_id)
- return -EINVAL;
- if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
- return -EINVAL;
-
- /* ignore supported, maxtxpkt, maxrxpkt */
-
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- u32 bmcr, advert, tmp;
- u32 advert2 = 0, tmp2 = 0;
-
- if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full)) == 0)
- return -EINVAL;
-
- /* advertise only what has been requested */
- advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
- tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
- tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- tmp2 |= ADVERTISE_1000HALF;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- tmp2 |= ADVERTISE_1000FULL;
- if (advert != tmp) {
- mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
- mii->advertising = tmp;
- }
- if (advert2 != tmp2)
- mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2);
-
- /* turn on autonegotiation, and force a renegotiate */
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
-
- mii->force_media = 0;
- } else {
- u32 bmcr, tmp;
-
- /* turn off auto negotiation, set speed and duplexity */
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
- BMCR_SPEED1000 | BMCR_FULLDPLX);
- if (ecmd->speed == SPEED_1000)
- tmp |= BMCR_SPEED1000;
- else if (ecmd->speed == SPEED_100)
- tmp |= BMCR_SPEED100;
- if (ecmd->duplex == DUPLEX_FULL) {
- tmp |= BMCR_FULLDPLX;
- mii->full_duplex = 1;
- } else {
- mii->full_duplex = 0;
- }
- if (bmcr != tmp)
- mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
-
- mii->force_media = 1;
- }
- return 0;
-}
-#endif /* NEED_EFX_MII_ETHTOOL_GSET */
-
-/**************************************************************************
- *
- * unregister_netdevice_notifier : Has a race before 2.6.17
- *
- **************************************************************************
- *
- */
-
-#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
-/**
- * efx_unregister_netdevice_notifier - fixed unregister_netdevice_notifier
- * @nb: notifier to unregister
- *
- * unregister_netdevice_notifier() does not wait for the notifier
- * to be unused before 2.6.17. This wrapper fixes that.
- */
-int efx_unregister_netdevice_notifier(struct notifier_block *nb)
-{
- int res;
-
- res = unregister_netdevice_notifier(nb);
- /* Ensure any outstanding calls complete. */
- rtnl_lock();
- rtnl_unlock();
- return res;
-}
-#endif /* NEED_EFX_UNREGISTER_NETDEVICE_NOTIFIER */
-
-/**************************************************************************
- *
- * IOMMU-locking versions of pci_[un]map_single and
- * pci_{alloc,free}_consistent. See SFC bug 4560.
- *
- **************************************************************************
- *
- */
-#ifdef EFX_NEED_IOMMU_LOCK
-
-/*
- * efx_use_iommu_lock - IOMMU lock use control
- *
- * If set to 1, the driver will attempt to mitigate the race condition
- * bug around IOMMU accesses in some 2.6 kernels. If set to 2, the
- * driver will use the lock even if it thinks it doesn't need to.
- * Note that this is only a best-effort attempt; in particular, we
- * cannot do anything about other drivers touching the IOMMU.
- */
-static unsigned int efx_use_iommu_lock = 1;
-EXPORT_SYMBOL(efx_use_iommu_lock);
-
-/*
- * efx_iommu_lock - lock around IOMMU accesses
- *
- * This spinlock should be held while calling functions that access
- * the IOMMU if efx_use_iommu_lock is >= 2. The efx_pci_*()
- * functions do this where possible.
- */
-static spinlock_t efx_iommu_lock = SPIN_LOCK_UNLOCKED;
-EXPORT_SYMBOL(efx_iommu_lock);
-
-/* Don't use the IOMMU lock if the device can access the whole of memory */
-#define EFX_DMA_CONSISTENT(_efx) \
- (((_efx)->dma_mask >> PAGE_SHIFT) >= max_pfn)
-/**
- * efx_pci_map_single - map buffer for DMA, under IOMMU lock
- * @pci: PCI device
- * @ptr: Buffer
- * @size: Buffer length
- * @direction: DMA direction
- *
- * Wrapper for pci_map_single that uses efx_iommu_lock if necessary.
- */
-dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr, size_t size,
- int direction)
-{
- struct efx_nic *efx = pci_get_drvdata(pci);
- unsigned long flags __attribute__ ((unused));
- dma_addr_t dma_addr;
-
- if (unlikely((efx_use_iommu_lock &&
- (!EFX_NO_IOMMU) &&
- (!EFX_DMA_CONSISTENT(efx))) ||
- efx_use_iommu_lock >= 2)) {
- spin_lock_irqsave(&efx_iommu_lock, flags);
- dma_addr = pci_map_single(pci, ptr, size, direction);
- spin_unlock_irqrestore(&efx_iommu_lock, flags);
- } else {
- dma_addr = pci_map_single(pci, ptr, size, direction);
- }
- return dma_addr;
-}
-
-/**
- * efx_pci_unmap_single - unmap buffer for DMA, under IOMMU lock
- * @pci: PCI device
- * @dma_addr: DMA address
- * @size: Buffer length
- * @direction: DMA direction
- *
- * Wrapper for pci_unmap_single that uses efx_iommu_lock if necessary.
- */
-void efx_pci_unmap_single(struct pci_dev *pci, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- struct efx_nic *efx = pci_get_drvdata(pci);
- unsigned long flags __attribute__ ((unused));
-
- if (unlikely((efx_use_iommu_lock &&
- (!EFX_NO_IOMMU) &&
- (!EFX_DMA_CONSISTENT(efx))) ||
- efx_use_iommu_lock >= 2)) {
- spin_lock_irqsave(&efx_iommu_lock, flags);
- pci_unmap_single(pci, dma_addr, size, direction);
- spin_unlock_irqrestore(&efx_iommu_lock, flags);
- } else {
- pci_unmap_single(pci, dma_addr, size, direction);
- }
-}
-
-/**
- * efx_pci_alloc_consistent - allocate DMA-consistent buffer, under IOMMU lock
- * @pci: PCI device
- * @size: Buffer length
- * @dma_addr: DMA address
- *
- * Wrapper for pci_alloc_consistent that uses efx_iommu_lock if necessary.
- *
- * Bugs: Currently this can't use the spinlock because
- * pci_alloc_consistent may block.
- */
-void *efx_pci_alloc_consistent(struct pci_dev *pci, size_t size,
- dma_addr_t *dma_addr)
-{
- return pci_alloc_consistent(pci, size, dma_addr);
-}
-
-/**
- * efx_pci_free_consistent - free DMA-consistent buffer, under IOMMU lock
- * @pci: PCI device
- * @size: Buffer length
- * @ptr: Buffer
- * @dma_addr: DMA address
- *
- * Wrapper for pci_free_consistent that uses efx_iommu_lock if necessary.
- */
-void efx_pci_free_consistent(struct pci_dev *pci, size_t size, void *ptr,
- dma_addr_t dma_addr)
-{
- struct efx_nic *efx = pci_get_drvdata(pci);
- unsigned long flags __attribute__ ((unused));
-
- if (unlikely((efx_use_iommu_lock &&
- (!EFX_NO_IOMMU) &&
- (!EFX_DMA_CONSISTENT(efx))) ||
- efx_use_iommu_lock >= 2)) {
- spin_lock_irqsave(&efx_iommu_lock, flags);
- pci_free_consistent(pci, size, ptr, dma_addr);
- spin_unlock_irqrestore(&efx_iommu_lock, flags);
- } else {
- pci_free_consistent(pci, size, ptr, dma_addr);
- }
-}
-
-module_param(efx_use_iommu_lock, uint, 0644);
-MODULE_PARM_DESC(efx_use_iommu_lock, "Enable lock for bug in free_iommu");
-
-#endif
-
-#ifdef EFX_NEED_COMPOUND_PAGE_FIX
-
-void efx_compound_page_destructor(struct page *page)
-{
- /* Fake up page state to keep __free_pages happy */
- set_page_count(page, 1);
- page[1].mapping = NULL;
-
- __free_pages(page, (unsigned long)page[1].index);
-}
-
-#endif /* NEED_COMPOUND_PAGE_FIX */
-
-/**************************************************************************
- *
- * print_hex_dump, taken from lib/hexdump.c.
- *
- **************************************************************************
- *
- */
-#ifdef EFX_NEED_HEX_DUMP
-
-#define hex_asc(x) "0123456789abcdef"[x]
-#define isascii(c) (((unsigned char)(c))<=0x7f)
-
-static void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
- int groupsize, char *linebuf, size_t linebuflen,
- int ascii)
-{
- const u8 *ptr = buf;
- u8 ch;
- int j, lx = 0;
- int ascii_column;
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- if (!len)
- goto nil;
- if (len > rowsize) /* limit to one line at a time */
- len = rowsize;
- if ((len % groupsize) != 0) /* no mixed size output */
- groupsize = 1;
-
- switch (groupsize) {
- case 8: {
- const u64 *ptr8 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%16.16llx ", (unsigned long long)*(ptr8 + j));
- ascii_column = 17 * ngroups + 2;
- break;
- }
-
- case 4: {
- const u32 *ptr4 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%8.8x ", *(ptr4 + j));
- ascii_column = 9 * ngroups + 2;
- break;
- }
-
- case 2: {
- const u16 *ptr2 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%4.4x ", *(ptr2 + j));
- ascii_column = 5 * ngroups + 2;
- break;
- }
-
- default:
- for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
- j++) {
- ch = ptr[j];
- linebuf[lx++] = hex_asc(ch >> 4);
- linebuf[lx++] = hex_asc(ch & 0x0f);
- linebuf[lx++] = ' ';
- }
- ascii_column = 3 * rowsize + 2;
- break;
- }
- if (!ascii)
- goto nil;
-
- while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
- linebuf[lx++] = ' ';
- /* Removed is_print() check */
- for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++)
- linebuf[lx++] = isascii(ptr[j]) ? ptr[j] : '.';
-nil:
- linebuf[lx++] = '\0';
-}
-
-void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
- int rowsize, int groupsize,
- const void *buf, size_t len, int ascii)
-{
- const u8 *ptr = buf;
- int i, linelen, remaining = len;
- char linebuf[200];
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- for (i = 0; i < len; i += rowsize) {
- linelen = min(remaining, rowsize);
- remaining -= rowsize;
- hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
- linebuf, sizeof(linebuf), ascii);
-
- switch (prefix_type) {
- case DUMP_PREFIX_ADDRESS:
- printk("%s%s%*p: %s\n", level, prefix_str,
- (int)(2 * sizeof(void *)), ptr + i, linebuf);
- break;
- case DUMP_PREFIX_OFFSET:
- printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
- break;
- default:
- printk("%s%s%s\n", level, prefix_str, linebuf);
- break;
- }
- }
-}
-
-#endif /* EFX_NEED_HEX_DUMP */
-
/**************************************************************************
*
* print_mac, from net/ethernet/eth.c in v2.6.24
**************************************************************************
*
*/
-#ifdef EFX_NEED_PRINT_MAC
char *print_mac(char *buf, const u8 *addr)
{
sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
return buf;
}
-#endif /* EFX_NEED_PRINT_MAC */
#ifdef EFX_NEED_CSUM_TCPUDP_NOFOLD
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
-__wsum
-csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-#else
__wsum
csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, __wsum sum)
-#endif
{
unsigned long result;
}
#endif /* EFX_NEED_CSUM_TCPUDP_NOFOLD */
-
-#ifdef EFX_NEED_RANDOM_ETHER_ADDR
-/* Generate random MAC address */
-void efx_random_ether_addr(uint8_t *addr) {
- get_random_bytes (addr, ETH_ALEN);
- addr [0] &= 0xfe; /* clear multicast bit */
- addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
-}
-#endif /* EFX_NEED_RANDOM_ETHER_ADDR */
-
-#ifdef EFX_NEED_MSECS_TO_JIFFIES
-/*
- * When we convert to jiffies then we interpret incoming values
- * the following way:
- *
- * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
- *
- * - 'too large' values [that would result in larger than
- * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
- *
- * - all other values are converted to jiffies by either multiplying
- * the input value by a factor or dividing it with a factor
- *
- * We must also be careful about 32-bit overflows.
- */
-#ifndef MSEC_PER_SEC
-#define MSEC_PER_SEC 1000L
-#endif
-unsigned long msecs_to_jiffies(const unsigned int m)
-{
- /*
- * Negative value, means infinite timeout:
- */
- if ((int)m < 0)
- return MAX_JIFFY_OFFSET;
-
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- /*
- * HZ is equal to or smaller than 1000, and 1000 is a nice
- * round multiple of HZ, divide with the factor between them,
- * but round upwards:
- */
- return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- /*
- * HZ is larger than 1000, and HZ is a nice round multiple of
- * 1000 - simply multiply with the factor between them.
- *
- * But first make sure the multiplication result cannot
- * overflow:
- */
- if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-
- return m * (HZ / MSEC_PER_SEC);
-#else
- /*
- * Generic case - multiply, round and divide. But first
- * check that if we are doing a net multiplication, that
- * we wouldnt overflow:
- */
- if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-
- return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
-#endif
-}
-#endif /* EFX_NEED_MSECS_TO_JIFFIES */
-
-#ifdef EFX_NEED_MSLEEP
-/**
- * msleep - sleep safely even with waitqueue interruptions
- * @msecs: Time in milliseconds to sleep for
- */
-void msleep(unsigned int msecs)
-{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-
- while (timeout)
- timeout = schedule_timeout_uninterruptible(timeout);
-}
-#endif
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-
-#include "extraversion.h"
+#include <linux/rtnetlink.h>
/*
* Kernel backwards compatibility
*
- * This file provides macros that enable the driver to be compiled on
- * any kernel from 2.6.9 onward (plus SLES 9 2.6.5), without requiring
- * explicit version tests scattered throughout the code.
- */
-
-/**************************************************************************
- *
- * Version/config/architecture tests to set feature flags
- *
- **************************************************************************
- *
- * NOTE: For simplicity, these initial version tests cover kernel.org
- * releases only. Backported features in "enterprise" kernels are
- * handled further down.
+ * This file provides macros to facilitate backporting the driver.
*/
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) && \
- !(defined(EFX_DIST_SUSE) && \
- LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
- EFX_DIST_KVER_LEVEL_1 == 7)
- #error "This kernel version is now unsupported"
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
- #define EFX_NEED_RANDOM_ETHER_ADDR yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
- #define EFX_NEED_I2C_CLASS_HWMON yes
- #define EFX_NEED_IF_MII yes
- #define EFX_NEED_MSLEEP yes
- #define EFX_NEED_MSECS_TO_JIFFIES yes
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,8)
- #define EFX_USE_MTD_ERASE_FAIL_ADDR yes
-#else
- #define EFX_NEED_MTD_ERASE_CALLBACK yes
- #define EFX_NEED_DUMMY_PCI_DISABLE_MSI yes
- #define EFX_NEED_DUMMY_MSIX yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
- #define EFX_NEED_BYTEORDER_TYPES yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
- #define EFX_NEED_MMIOWB yes
- #define EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
- #define EFX_NEED_DUMMY_SUPPORTS_GMII yes
- #define EFX_NEED_MII_CONSTANTS yes
- #define EFX_NEED_MII_ETHTOOL_FIX yes
- #define EFX_HAVE_MSIX_TABLE_RESERVED yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
- #define EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE yes
- #define EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE yes
- #define EFX_NEED_GFP_T yes
- #define EFX_NEED_KZALLOC yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
- #define EFX_NEED_SETUP_TIMER yes
- #ifdef CONFIG_HUGETLB_PAGE
- #define EFX_USE_COMPOUND_PAGES yes
- #endif
-#else
- #define EFX_USE_COMPOUND_PAGES yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
- #define EFX_NEED_MUTEX yes
- #define EFX_NEED_SAFE_LISTS yes
- #ifdef EFX_USE_COMPOUND_PAGES
- #define EFX_NEED_COMPOUND_PAGE_FIX yes
- #endif
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
- #define EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX yes
- #define EFX_NEED_DEV_NOTICE yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
- #define EFX_NEED_IRQF_FLAGS yes
- #define EFX_NEED_NETDEV_ALLOC_SKB yes
- /* Fedora backported 2.6.18 netdevice.h changes */
- #ifndef NETIF_F_GSO
- #define EFX_NEED_NETIF_TX_LOCK yes
- #endif
-#else
- #define EFX_USE_MTD_WRITESIZE yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
- #define EFX_NEED_IRQ_HANDLER_T yes
- #define EFX_HAVE_IRQ_HANDLER_REGS yes
+#ifdef __ia64__
+ /* csum_tcpudp_nofold() is extern but not exported */
+ #define EFX_NEED_CSUM_TCPUDP_NOFOLD yes
#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- #define EFX_NEED_WORK_API_WRAPPERS yes
- #define EFX_USE_FASTCALL yes
- #define EFX_NEED_CSUM_UNFOLDED yes
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
- /*
- * debugfs was introduced earlier, but only supports sym-links
- * from 2.6.21
- */
- #ifdef CONFIG_DEBUG_FS
- #define EFX_USE_DEBUGFS yes
- #endif
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
- #define EFX_NEED_SKB_HEADER_MACROS yes
- #define EFX_NEED_HEX_DUMP yes
-#else
- #define EFX_USE_CANCEL_WORK_SYNC yes
-#endif
-
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,22)
- #define EFX_NEED_HEX_DUMP_CONST_FIX yes
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
- #define EFX_USE_ETHTOOL_GET_PERM_ADDR yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
- #ifdef __ia64__
- /* csum_tcpudp_nofold() is extern but not exported */
- #define EFX_NEED_CSUM_TCPUDP_NOFOLD yes
- #endif
-#else
- #define EFX_USE_PCI_DEV_REVISION yes
- #define EFX_USE_CANCEL_DELAYED_WORK_SYNC yes
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
- #define EFX_HAVE_OLD_NAPI yes
- #define EFX_NEED_GENERIC_LRO yes
- #define EFX_NEED_PRINT_MAC yes
-#else
- #define EFX_USE_ETHTOOL_FLAGS yes
-#endif
-
-/*
- * SFC Bug 4560: Some kernels leak IOMMU entries under heavy load. Use a
- * spinlock to serialise access where possible to alleviate the
- * problem.
- *
- * NB. The following definition is duplicated in
- * the char driver. Please keep in sync.
- */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && \
- defined(__x86_64__) && defined(CONFIG_SMP))
- #define EFX_NEED_IOMMU_LOCK yes
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
- #if defined(CONFIG_GART_IOMMU)
- #define EFX_NO_IOMMU no_iommu
- #else
- #define EFX_NO_IOMMU 1
- #endif
- #else
- #define EFX_NO_IOMMU 0
- #endif
-#endif
-
#ifdef CONFIG_PPC64
/* __raw_writel and friends are broken on ppc64 */
#define EFX_NEED_RAW_READ_AND_WRITE_FIX yes
#endif
-/**************************************************************************
- *
- * Exceptions for backported features
- *
- **************************************************************************
- */
-
-/* RHEL4 */
-#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)
- #if EFX_DIST_KVER_LEVEL_1 >= 22
- /* linux-2.6.9-mmiowb.patch */
- #undef EFX_NEED_MMIOWB
- #endif
- #if EFX_DIST_KVER_LEVEL_1 >= 34
- /* linux-2.6.9-net-mii-update.patch */
- #undef EFX_NEED_DUMMY_SUPPORTS_GMII
- #undef EFX_NEED_MII_CONSTANTS
- #undef EFX_NEED_MII_ETHTOOL_FIX
- /* linux-2.6.9-gfp_t-typedef.patch */
- #undef EFX_NEED_GFP_T
- /* linux-2.6.9-slab-update.patch */
- #undef EFX_NEED_KZALLOC
- #endif
- #if EFX_DIST_KVER_LEVEL_1 >= 55
- /* linux-2.6.18-sata-update.patch (ported from 2.6.18->2.6.9) */
- #undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
- #undef EFX_NEED_IRQ_HANDLER_T
- #endif
-#endif
-
-/* RHEL5 */
-#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
- #if EFX_DIST_KVER_LEVEL_1 >= 53
- /* linux-2.6.18-sata-update.patch */
- #undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
- #undef EFX_NEED_IRQ_HANDLER_T
- #endif
-#endif
-
-#if defined(EFX_DIST_RHEL)
- #if (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,9)) && \
- (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,18))
- #error "Unknown Red Hat Enterprise kernel version"
- #endif
-#endif
-
-/* SLES9 */
-#if defined(EFX_DIST_SUSE) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
- EFX_DIST_KVER_LEVEL_1 == 7
- #if EFX_DIST_KVER_LEVEL_2 >= 139
- #undef EFX_NEED_MMIOWB
- #endif
- #if EFX_DIST_KVER_LEVEL_2 >= 191
- #undef EFX_NEED_MSLEEP
- #undef EFX_NEED_MSECS_TO_JIFFIES
- #endif
- #if EFX_DIST_KVER_LEVEL_2 >= 244
- #undef EFX_NEED_BYTEORDER_TYPES
- #endif
- #if EFX_DIST_KVER_LEVEL_2 >= 252
- #undef EFX_NEED_KZALLOC
- #endif
-#endif
-
-/**************************************************************************
- *
- * Definitions of missing constants, types, functions and macros
- *
- **************************************************************************
- *
- */
-
-#ifndef DMA_40BIT_MASK
- #define DMA_40BIT_MASK 0x000000ffffffffffULL
-#endif
-
-#ifndef spin_trylock_irqsave
- #define spin_trylock_irqsave(lock, flags) \
- ({ \
- local_irq_save(flags); \
- spin_trylock(lock) ? \
- 1 : ({local_irq_restore(flags); 0;}); \
- })
-#endif
-
-#ifndef raw_smp_processor_id
- #define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif
-
-#ifndef NETIF_F_LRO
- #define NETIF_F_LRO 0
-#endif
-
-/* Cope with small changes in PCI constants between minor kernel revisions */
-#if PCI_X_STATUS != 4
- #undef PCI_X_STATUS
- #define PCI_X_STATUS 4
- #undef PCI_X_STATUS_MAX_SPLIT
- #define PCI_X_STATUS_MAX_SPLIT 0x03800000
-#endif
-
-#ifndef PCI_EXP_LNKSTA
- #define PCI_EXP_LNKSTA 18 /* Link Status */
-#endif
-
-/* Used for struct pt_regs */
-#ifndef regs_return_value
- #if defined(__x86_64__)
- #define regs_return_value(regs) ((regs)->rax)
- #elif defined(__i386__)
- #define regs_return_value(regs) ((regs)->eax)
- #elif defined(__ia64__)
- #define regs_return_value(regs) ((regs)->r8)
- #else
- #error "Need definition for regs_return_value()"
- #endif
-#endif
+typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
-#ifndef __GFP_COMP
- #define __GFP_COMP 0
-#endif
+#define skb_mac_header(skb) (skb)->mac.raw
+#define skb_network_header(skb) (skb)->nh.raw
+#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
+#define tcp_hdr(skb) (skb)->h.th
+#define ip_hdr(skb) (skb)->nh.iph
+#define skb_tail_pointer(skb) (skb)->tail
-#ifndef __iomem
- #define __iomem
-#endif
-
-#ifndef NET_IP_ALIGN
- #define NET_IP_ALIGN 2
-#endif
-
-#ifndef PCI_CAP_ID_EXP
-#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
-#endif
-
-#ifndef PCI_EXP_FLAGS
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
-#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Capability version */
-#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
-#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
-#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
-#endif
-
-#ifndef PCI_EXP_DEVCAP
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
-#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
-#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
-#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
-#endif
-
-#ifndef PCI_EXP_DEVCTL
-#define PCI_EXP_DEVCTL 8 /* Device Control */
-#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
-#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#endif
-
-#ifndef PCI_EXP_LNKSTA
-#define PCI_EXP_LNKSTA 18 /* Link Status */
-#endif
-
-#ifndef NETDEV_TX_OK
- #define NETDEV_TX_OK 0
-#endif
-
-#ifndef NETDEV_TX_BUSY
- #define NETDEV_TX_BUSY 1
-#endif
-
-#ifndef __force
- #define __force
-#endif
-
-#if ! defined(for_each_cpu_mask) && ! defined(CONFIG_SMP)
- #define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif
-
-/**************************************************************************/
-
-#ifdef EFX_NEED_IRQ_HANDLER_T
- typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
-#endif
-
-#ifdef EFX_NEED_I2C_CLASS_HWMON
- #define I2C_CLASS_HWMON (1<<0)
-#endif
-
-#ifdef EFX_NEED_MII_CONSTANTS
- #define MII_CTRL1000 0x09
- #define MII_STAT1000 0x0a
- #define BMCR_SPEED1000 0x0040
- #define ADVERTISE_PAUSE_ASYM 0x0800
- #define ADVERTISE_PAUSE_CAP 0x0400
- #define LPA_PAUSE_ASYM 0x0800
- #define LPA_PAUSE_CAP 0x0400
- #define ADVERTISE_1000FULL 0x0200
- #define ADVERTISE_1000HALF 0x0100
- #define LPA_1000FULL 0x0800
- #define LPA_1000HALF 0x0400
-#endif
-
-#ifdef EFX_NEED_DUMMY_SUPPORTS_GMII
- #include <linux/mii.h>
- /* Ugly; redirect nonexistent new field to an old unused field */
- #undef supports_gmii
- #define supports_gmii full_duplex
-#endif
-
-#ifdef EFX_NEED_SKB_HEADER_MACROS
- #define skb_mac_header(skb) (skb)->mac.raw
- #define skb_network_header(skb) (skb)->nh.raw
- #define tcp_hdr(skb) (skb)->h.th
- #define ip_hdr(skb) (skb)->nh.iph
- #define skb_tail_pointer(skb) (skb)->tail
-#endif
#ifdef EFX_NEED_RAW_READ_AND_WRITE_FIX
#include <asm/io.h>
#define __raw_readq efx_raw_readq
#endif
-#ifdef EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE
- static inline signed long
- schedule_timeout_interruptible(signed long timeout)
- {
- set_current_state(TASK_INTERRUPTIBLE);
- return schedule_timeout(timeout);
- }
-#endif
+typedef u32 __wsum;
+#define csum_unfold(x) ((__force __wsum) x)
-#ifdef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
- static inline signed long
- schedule_timeout_uninterruptible(signed long timeout)
- {
- set_current_state(TASK_UNINTERRUPTIBLE);
- return schedule_timeout(timeout);
- }
-#endif
-
-#ifdef EFX_NEED_MMIOWB
- #if defined(__i386__) || defined(__x86_64__)
- #define mmiowb()
- #elif defined(__ia64__)
- #ifndef ia64_mfa
- #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
- #endif
- #define mmiowb ia64_mfa
- #else
- #error "Need definition for mmiowb()"
- #endif
-#endif
-
-#ifdef EFX_NEED_KZALLOC
- static inline void *kzalloc(size_t size, int flags)
- {
- void *buf = kmalloc(size, flags);
- if (buf)
- memset(buf, 0,size);
- return buf;
- }
-#endif
-
-#ifdef EFX_NEED_SETUP_TIMER
- static inline void setup_timer(struct timer_list * timer,
- void (*function)(unsigned long),
- unsigned long data)
- {
- timer->function = function;
- timer->data = data;
- init_timer(timer);
- }
-#endif
-
-#ifdef EFX_NEED_MUTEX
- #define EFX_DEFINE_MUTEX(x) DECLARE_MUTEX(x)
- #undef DEFINE_MUTEX
- #define DEFINE_MUTEX EFX_DEFINE_MUTEX
-
- #define efx_mutex semaphore
- #undef mutex
- #define mutex efx_mutex
-
- #define efx_mutex_init(x) init_MUTEX(x)
- #undef mutex_init
- #define mutex_init efx_mutex_init
-
- #define efx_mutex_destroy(x) do { } while(0)
- #undef mutex_destroy
- #define mutex_destroy efx_mutex_destroy
+#define DECLARE_MAC_BUF(var) char var[18] __attribute__((unused))
+extern char *print_mac(char *buf, const u8 *addr);
- #define efx_mutex_lock(x) down(x)
- #undef mutex_lock
- #define mutex_lock efx_mutex_lock
-
- #define efx_mutex_lock_interruptible(x) down_interruptible(x)
- #undef mutex_lock_interruptible
- #define mutex_lock_interruptible efx_mutex_lock_interruptible
-
- #define efx_mutex_unlock(x) up(x)
- #undef mutex_unlock
- #define mutex_unlock efx_mutex_unlock
-
- #define efx_mutex_trylock(x) (!down_trylock(x))
- #undef mutex_trylock
- #define mutex_trylock efx_mutex_trylock
-
- static inline int efx_mutex_is_locked(struct efx_mutex *m)
- {
- /* NB. This is quite inefficient, but it's the best we
- * can do with the semaphore API. */
- if ( down_trylock(m) )
- return 1;
- /* Undo the effect of down_trylock. */
- up(m);
- return 0;
- }
- #undef mutex_is_locked
- #define mutex_is_locked efx_mutex_is_locked
-#endif
-
-#ifndef NETIF_F_GSO
- #define efx_gso_size tso_size
- #undef gso_size
- #define gso_size efx_gso_size
- #define efx_gso_segs tso_segs
- #undef gso_segs
- #define gso_segs efx_gso_segs
-#endif
-
-#ifdef EFX_NEED_IRQF_FLAGS
- #ifdef SA_PROBEIRQ
- #define IRQF_PROBE_SHARED SA_PROBEIRQ
- #else
- #define IRQF_PROBE_SHARED 0
- #endif
- #define IRQF_SHARED SA_SHIRQ
-#endif
-
-#ifdef EFX_NEED_NETDEV_ALLOC_SKB
- #ifndef NET_SKB_PAD
- #define NET_SKB_PAD 16
- #endif
-
- static inline
- struct sk_buff *netdev_alloc_skb(struct net_device *dev,
- unsigned int length)
- {
- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD,
- GFP_ATOMIC | __GFP_COLD);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD);
- skb->dev = dev;
- }
- return skb;
- }
-#endif
-
-#ifdef EFX_NEED_NETIF_TX_LOCK
- static inline void netif_tx_lock(struct net_device *dev)
- {
- spin_lock(&dev->xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
- }
- static inline void netif_tx_lock_bh(struct net_device *dev)
- {
- spin_lock_bh(&dev->xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
- }
- static inline void netif_tx_unlock_bh(struct net_device *dev)
- {
- dev->xmit_lock_owner = -1;
- spin_unlock_bh(&dev->xmit_lock);
- }
- static inline void netif_tx_unlock(struct net_device *dev)
- {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
- }
-#endif
-
-#ifdef EFX_NEED_CSUM_UNFOLDED
- typedef u32 __wsum;
- #define csum_unfold(x) ((__force __wsum) x)
-#endif
-
-#ifdef EFX_NEED_HEX_DUMP
- enum {
- DUMP_PREFIX_NONE,
- DUMP_PREFIX_ADDRESS,
- DUMP_PREFIX_OFFSET
- };
-#endif
-
-#ifdef EFX_NEED_PRINT_MAC
- #define DECLARE_MAC_BUF(var) char var[18] __attribute__((unused))
-#endif
-
-#ifdef EFX_NEED_GFP_T
- typedef unsigned int gfp_t;
-#endif
-
-#ifdef EFX_NEED_SAFE_LISTS
- #define list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member), \
- n = list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, \
- n = list_entry(n->member.prev, typeof(*n), member))
-#endif
-
-#ifdef EFX_NEED_DEV_NOTICE
- #define dev_notice dev_warn
-#endif
-
-#ifdef EFX_NEED_IF_MII
- #include <linux/mii.h>
- static inline struct mii_ioctl_data *efx_if_mii ( struct ifreq *rq ) {
- return ( struct mii_ioctl_data * ) &rq->ifr_ifru;
- }
- #undef if_mii
- #define if_mii efx_if_mii
-#endif
-
-#ifdef EFX_NEED_MTD_ERASE_CALLBACK
- #include <linux/mtd/mtd.h>
- static inline void efx_mtd_erase_callback(struct erase_info *instr) {
- if ( instr->callback )
- instr->callback ( instr );
- }
- #undef mtd_erase_callback
- #define mtd_erase_callback efx_mtd_erase_callback
-#endif
-
-#ifdef EFX_NEED_DUMMY_PCI_DISABLE_MSI
- #include <linux/pci.h>
- static inline void dummy_pci_disable_msi ( struct pci_dev *dev ) {
- /* Do nothing */
- }
- #undef pci_disable_msi
- #define pci_disable_msi dummy_pci_disable_msi
-#endif
-
-#ifdef EFX_NEED_DUMMY_MSIX
- struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
- };
- static inline int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec)
- {return -1;}
- static inline void pci_disable_msix(struct pci_dev *dev) { /* Do nothing */}
-#endif
-
-#ifdef EFX_NEED_BYTEORDER_TYPES
- typedef __u16 __be16;
- typedef __u32 __be32;
- typedef __u64 __be64;
- typedef __u16 __le16;
- typedef __u32 __le32;
- typedef __u64 __le64;
-#endif
-
-/**************************************************************************
- *
- * Missing functions provided by kernel_compat.c
- *
- **************************************************************************
- *
+/**
+ * queue_delayed_work in pre 2.6.20 can't rearm from inside
+ * the work member. So instead do a rather hacky sleep
*/
-#ifdef EFX_NEED_RANDOM_ETHER_ADDR
- extern void efx_random_ether_addr(uint8_t *addr);
- #ifndef EFX_IN_KCOMPAT_C
- #undef random_ether_addr
- #define random_ether_addr efx_random_ether_addr
- #endif
-#endif
-
-#ifdef EFX_NEED_MII_ETHTOOL_FIX
- extern int efx_mii_ethtool_gset(struct mii_if_info *mii,
- struct ethtool_cmd *ecmd);
- extern int efx_mii_ethtool_sset(struct mii_if_info *mii,
- struct ethtool_cmd *ecmd);
- #ifndef EFX_IN_KCOMPAT_C
- #undef mii_ethtool_gset
- #define mii_ethtool_gset efx_mii_ethtool_gset
- #undef mii_ethtool_sset
- #define mii_ethtool_sset efx_mii_ethtool_sset
- #endif
-#endif
-
-#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
- extern int efx_unregister_netdevice_notifier(struct notifier_block *nb);
- #ifndef EFX_IN_KCOMPAT_C
- #undef unregister_netdevice_notifier
- #define unregister_netdevice_notifier \
- efx_unregister_netdevice_notifier
- #endif
-#endif
-
-#ifdef EFX_NEED_IOMMU_LOCK
- extern dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr,
- size_t size, int direction);
- extern void efx_pci_unmap_single(struct pci_dev *pci,
- dma_addr_t dma_addr, size_t size,
- int direction);
- extern void * efx_pci_alloc_consistent(struct pci_dev *pci,
- size_t size,
- dma_addr_t *dma_addr);
- extern void efx_pci_free_consistent(struct pci_dev *pci,
- size_t size, void *ptr,
- dma_addr_t dma_addr);
- #ifndef EFX_IN_KCOMPAT_C
- #undef pci_map_single
- #undef pci_unmap_single
- #undef pci_alloc_consistent
- #undef pci_free_consistent
- #define pci_map_single efx_pci_map_single
- #define pci_unmap_single efx_pci_unmap_single
- #define pci_alloc_consistent efx_pci_alloc_consistent
- #define pci_free_consistent efx_pci_free_consistent
- #endif
-#endif
-
-#ifdef EFX_NEED_PRINT_MAC
- extern char *print_mac(char *buf, const u8 *addr);
-#endif
-
-#ifdef EFX_NEED_COMPOUND_PAGE_FIX
- extern void efx_compound_page_destructor(struct page *page);
-#endif
-
-#ifdef EFX_NEED_HEX_DUMP
- extern void
- print_hex_dump(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- const void *buf, size_t len, int ascii);
-#endif
-
-#ifdef EFX_NEED_MSECS_TO_JIFFIES
- extern unsigned long msecs_to_jiffies(const unsigned int m);
-#endif
-
-#ifdef EFX_NEED_MSLEEP
- extern void msleep(unsigned int msecs);
-#endif
-
-/**************************************************************************
- *
- * Wrappers to fix bugs and parameter changes
- *
- **************************************************************************
- *
+#define delayed_work work_struct
+#define INIT_DELAYED_WORK INIT_WORK
+
+static int inline efx_queue_delayed_work(struct workqueue_struct *wq,
+ struct work_struct *work,
+ unsigned long delay)
+{
+ if (unlikely(delay > 0))
+ schedule_timeout_uninterruptible(delay);
+ return queue_work(wq, work);
+}
+#define queue_delayed_work efx_queue_delayed_work
+
+/**
+ * The old and new work-function prototypes just differ
+ * in the type of the pointer returned, so it's safe
+ * to cast between the prototypes.
*/
-
-#ifdef EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS
- #define pci_save_state(_dev) \
- pci_save_state(_dev, (_dev)->saved_config_space)
-
- #define pci_restore_state(_dev) \
- pci_restore_state(_dev, (_dev)->saved_config_space)
-#endif
-
-#ifdef EFX_NEED_WORK_API_WRAPPERS
- /**
- * queue_delayed_work in pre 2.6.20 can't rearm from inside
- * the work member. So instead do a rather hacky sleep
- */
- #define delayed_work work_struct
- #define INIT_DELAYED_WORK INIT_WORK
-
- static int inline efx_queue_delayed_work(struct workqueue_struct *wq,
- struct work_struct *work,
- unsigned long delay)
- {
- if (unlikely(delay > 0))
- schedule_timeout_uninterruptible(delay);
- return queue_work(wq, work);
- }
- #define queue_delayed_work efx_queue_delayed_work
-
- /**
- * The old and new work-function prototypes just differ
- * in the type of the pointer returned, so it's safe
- * to cast between the prototypes.
- */
- typedef void (*efx_old_work_func_t)(void *p);
-
- #undef INIT_WORK
- #define INIT_WORK(_work, _func) \
- do { \
- INIT_LIST_HEAD(&(_work)->entry); \
- (_work)->pending = 0; \
- PREPARE_WORK((_work), \
- (efx_old_work_func_t) (_func), \
- (_work)); \
- } while (0)
-#endif
-
-#ifdef EFX_HAVE_OLD_NAPI
- #define napi_str napi_dev[0]
-
- static inline void netif_napi_add(struct net_device *dev,
- struct net_device *dummy,
- int (*poll) (struct net_device *,
- int *),
- int weight)
- {
- dev->weight = weight;
- dev->poll = poll;
- }
-
- #define napi_enable netif_poll_enable
- #define napi_disable netif_poll_disable
-
- #define netif_rx_complete(dev, dummy) netif_rx_complete(dev)
-#endif
-
-#ifdef EFX_NEED_COMPOUND_PAGE_FIX
- static inline
- struct page *efx_alloc_pages(gfp_t flags, unsigned int order)
- {
- struct page *p = alloc_pages(flags, order);
- if ((flags & __GFP_COMP) && (p != NULL) && (order > 0))
- p[1].mapping = (void *)efx_compound_page_destructor;
- return p;
- }
- #undef alloc_pages
- #define alloc_pages efx_alloc_pages
-
- static inline
- void efx_free_pages(struct page *p, unsigned int order)
- {
- if ((order > 0) && (page_count(p) == 1))
- p[1].mapping = NULL;
- __free_pages(p, order);
- }
- #define __free_pages efx_free_pages
-#endif
-
-#ifdef EFX_NEED_HEX_DUMP_CONST_FIX
- #define print_hex_dump(v,s,t,r,g,b,l,a) \
- print_hex_dump((v),(s),(t),(r),(g),(void*)(b),(l),(a))
-#endif
+typedef void (*efx_old_work_func_t)(void *p);
+
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+ do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ PREPARE_WORK((_work), \
+ (efx_old_work_func_t) (_func), \
+ (_work)); \
+ } while (0)
+
+#define napi_str napi_dev[0]
+
+static inline void netif_napi_add(struct net_device *dev,
+ struct net_device *dummy,
+ int (*poll) (struct net_device *, int *),
+ int weight)
+{
+ dev->weight = weight;
+ dev->poll = poll;
+}
+
+#define napi_enable netif_poll_enable
+#define napi_disable netif_poll_disable
+
+#define netif_rx_complete(dev, dummy) netif_rx_complete(dev)
#endif /* EFX_KERNEL_COMPAT_H */
}
/* This ought to be ridiculous overkill. We expect it to fail rarely */
-#define MDIO45_RESET_TIME HZ
-#define MDIO45_RESET_ITERS (100)
+#define MDIO45_RESET_TIME 1000 /* ms */
+#define MDIO45_RESET_ITERS 100
int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
unsigned int mmd_mask)
erase->state = MTD_ERASE_DONE;
} else {
erase->state = MTD_ERASE_FAILED;
-#if defined(EFX_USE_MTD_ERASE_FAIL_ADDR)
erase->fail_addr = 0xffffffff;
-#endif
}
mtd_erase_callback(erase);
return rc;
efx_mtd->mtd.size = spi->size;
efx_mtd->mtd.erasesize = spi->erase_size;
-#if defined(EFX_USE_MTD_WRITESIZE)
efx_mtd->mtd.writesize = 1;
-#endif
if (snprintf(efx_mtd->name, sizeof(efx_mtd->name),
"%s %s", efx->name, type_name) >=
sizeof(efx_mtd->name))
#include "driverlink.h"
#include "i2c-direct.h"
- #ifndef EFX_USE_DEBUGFS
- /* Sick, but we have no other use for dentry */
- #define dentry proc_dir_entry
- #endif
-
-#define EFX_MAX_LRO_DESCRIPTORS 8
-#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
+/* Sick, but we have no other use for dentry */
+#define dentry proc_dir_entry
/**************************************************************************
*
#ifndef EFX_DRIVER_NAME
#define EFX_DRIVER_NAME "sfc"
#endif
-#define EFX_DRIVER_VERSION "2.2.0101"
+#define EFX_DRIVER_VERSION "2.2.0204"
#ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
+#define NET_DEV_REGISTERED(efx) \
+ ((efx)->net_dev && \
+ ((efx)->net_dev->reg_state == NETREG_REGISTERED))
+
/* Include net device name in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
-#define NET_DEV_NAME(efx) ((efx)->net_dev_registered ? (efx)->name : "")
+#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
/* Un-rate-limited logging */
#define EFX_ERR(efx, fmt, args...) \
};
+
+/**
+ * struct efx_ssr_conn - Connection state for Soft Segment Reassembly (SSR) aka LRO
+ * @link: Link for hash table and free list.
+ * @active_link: Link for active_conns list
+ * @saddr: Source IP address
+ * @daddr: Destination IP address
+ * @source: Source TCP port number
+ * @dest: Destination TCP port number
+ * @n_in_order_pkts: Number of in-order packets we've seen with payload.
+ * @next_seq: Next in-order sequence number.
+ * @last_pkt_jiffies: Time we last saw a packet on this connection.
+ * @skb: The SKB we are currently holding.
+ * If %NULL, then all following fields are undefined.
+ * @skb_tail: The tail of the frag_list of SKBs we're holding.
+ * Only valid after at least one merge.
+ * @eh: The ethernet header of the skb we are holding.
+ * @iph: The IP header of the skb we are holding.
+ * @th: The TCP header of the skb we are holding.
+ * @th_last: The TCP header of the last packet merged.
+ */
+struct efx_ssr_conn {
+ struct list_head link;
+ struct list_head active_link;
+ unsigned saddr, daddr;
+ unsigned short source, dest;
+ unsigned n_in_order_pkts;
+ unsigned next_seq;
+ unsigned long last_pkt_jiffies;
+ struct sk_buff *skb;
+ struct sk_buff *skb_tail;
+ struct ethhdr *eh;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct tcphdr *th_last;
+};
+
+/**
+ * struct efx_ssr_state - Port state for Soft Segment Reassembly (SSR) aka LRO
+ * @efx: The associated NIC.
+ * @conns_mask: Number of hash buckets - 1.
+ * @conns: Hash buckets for tracked connections.
+ * @conns_n: Length of linked list for each hash bucket.
+ * @active_conns: Connections that are holding a packet.
+ * Connections are self-linked when not in this list.
+ * @free_conns: Free efx_ssr_conn instances.
+ * @last_purge_jiffies: The value of jiffies last time we purged idle
+ * connections.
+ * @n_merges: Number of packets absorbed by SSR.
+ * @n_bursts: Number of bursts spotted by SSR.
+ * @n_slow_start: Number of packets not merged because connection may be in
+ * slow-start.
+ * @n_misorder: Number of out-of-order packets seen in tracked streams.
+ * @n_too_many: Incremented when we're trying to track too many streams.
+ * @n_new_stream: Number of distinct streams we've tracked.
+ * @n_drop_idle: Number of streams discarded because they went idle.
+ * @n_drop_closed: Number of streams that have seen a FIN or RST.
+ */
+struct efx_ssr_state {
+ struct efx_nic *efx;
+ unsigned conns_mask;
+ struct list_head *conns;
+ unsigned *conns_n;
+ struct list_head active_conns;
+ struct list_head free_conns;
+ unsigned long last_purge_jiffies;
+ unsigned n_merges;
+ unsigned n_bursts;
+ unsigned n_slow_start;
+ unsigned n_misorder;
+ unsigned n_too_many;
+ unsigned n_new_stream;
+ unsigned n_drop_idle;
+ unsigned n_drop_closed;
+};
+
+
/* Flags for channel->used_flags */
#define EFX_USED_BY_RX 1
#define EFX_USED_BY_TX 2
* @last_eventq_read_ptr: Last event queue read pointer value.
* @eventq_magic: Event queue magic value for driver-generated test events
* @debug_dir: debugfs directory
+ * @ssr: LRO/SSR state
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
* and diagnostic counters
* @rx_alloc_push_pages: RX allocation method currently in use for pushing
unsigned int has_interrupt;
unsigned int irq_moderation;
struct net_device *napi_dev;
-#if !defined(EFX_HAVE_OLD_NAPI)
- struct napi_struct napi_str;
-#endif
struct work_struct reset_work;
int work_pending;
struct efx_special_buffer eventq;
struct dentry *debug_dir;
#endif
+ struct efx_ssr_state ssr;
int rx_alloc_level;
int rx_alloc_push_pages;
int rx_alloc_pop_pages;
#define EFX_ISCLAUSE45(efx) ((efx)->phy_type != PHY_TYPE_1G_ALASKA)
enum nic_state {
- STATE_INIT = 0, /* suspend_lock always held */
+ STATE_INIT = 0,
STATE_RUNNING = 1,
STATE_FINI = 2,
- STATE_RESETTING = 3, /* suspend_lock always held */
+ STATE_RESETTING = 3, /* rtnl_lock always held */
STATE_DISABLED = 4,
STATE_MAX,
};
* This is the equivalent of NET_IP_ALIGN [which controls the alignment
* of the skb->head for hardware DMA].
*/
-#ifdef __ia64__
-#define EFX_PAGE_IP_ALIGN 2
-#else
+#if defined(__i386__) || defined(__x86_64__)
#define EFX_PAGE_IP_ALIGN 0
+#else
+#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
#endif
/*
* @mac_writel: Write dword to MAC register
* @mac_readl: Read dword from a MAC register
* @init: Initialise MAC and PHY
- * @reconfigure: Reconfigure MAC and PHY (e.g. for new link parameters)
+ * @reconfigure: Reconfigure MAC and PHY. Serialised by the mac_lock
* @update_stats: Update statistics
* @fini: Shut down MAC and PHY
- * @check_hw: Check hardware
+ * @check_hw: Check hardware. Serialised by the mac_lock
* @fake_phy_event: Simulate a PHY event on a port
- * @get_settings: Get ethtool settings
- * @set_settings: Set ethtool settings
- * @set_pause: Set pause parameters
+ * @get_settings: Get ethtool settings. Serialised by the mac_lock
+ * @set_settings: Set ethtool settings. Serialised by the mac_lock
+ * @set_pause: Set pause parameters. Serialised by the mac_lock
*/
struct efx_mac_operations {
void (*mac_writel) (struct efx_nic *efx,
/* An Efx multicast filter hash */
union efx_multicast_hash {
- u8 byte[EFX_MCAST_HASH_ENTRIES / sizeof(u8)];
- efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t)];
+ u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+ efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
/* Efx Error condition statistics */
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
- * @pci_dev2: The secondary PCI device if present
* @type: Controller type attributes
- * @dma_mask: DMA mask
* @legacy_irq: IRQ number
* @workqueue: Workqueue for resets, port reconfigures and the HW monitor
- * @refill_workqueue: RX refill workqueue
* @reset_work: Scheduled reset workitem
* @monitor_work: Hardware monitor workitem
* @membase_phys: Memory BAR value as physical address
* @interrupt_mode: Interrupt mode
* @is_asic: Is ASIC (else FPGA)
* @is_10g: Is set to 10G (else 1G)
- * @external_sram_cfg: Size and number of banks of external SRAM
* @i2c: I2C interface
* @board_info: Board-level information
- * @state: Device state flag. Can only be manipulated when both
- * suspend_lock and rtnl_lock are held. Can be read when
- * either is held.
+ * @state: Device state flag. Serialised by the rtnl_lock.
* @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
- * @suspend_lock: Device suspend lock. This must not be acquired with
- * rtnl_lock held.
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
* @spi_lock: SPI bus lock
* @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @nic_data: Hardware dependant state
- * @mac_lock: MAC access lock. Protects efx->port_enabled/net_dev_registered
- * and efx_reconfigure_port()
+ * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
+ * efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
- * Serialises efx_stop_all and efx_start_all with kernel interfaces.
- * Safe to read under the rtnl_lock, mac_lock, or netif_tx_lock, but
- * all three must be held to modify it.
- * @net_dev_registered: Port is registered with operating system.
+ * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
+ * efx_reconfigure_work with kernel interfaces. Safe to read under any
+ * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ * be held to modify it.
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
* @netif_stop_count: Port stop count
* @netif_stop_lock: Port stop lock
- * @mac_stats: MAC statistics
- * @stats: Net device statistics.
- * Hardware-specific code fills in @mac_stats, which provides a
- * detailed breakdown. Generic code aggregates these statistics
- * into a standard &struct net_device_stats.
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ * can provide. Generic code converts these into a standard
+ * &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock
* @mac_op: MAC interface
struct efx_nic {
char name[IFNAMSIZ];
struct pci_dev *pci_dev;
- struct pci_dev *pci_dev2;
-#if !defined(EFX_USE_PCI_DEV_REVISION)
u8 revision;
-#endif
const struct efx_nic_type *type;
- dma_addr_t dma_mask;
int legacy_irq;
struct workqueue_struct *workqueue;
-#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
/* Since we can't use cancel_delayed_work_sync efx_reset() has to
* flush efx->workqueue to serialise against efx_reconfigure_port
* and efx_monitor. So it can't also run on workqueue */
struct workqueue_struct *reset_workqueue;
-#endif
- struct workqueue_struct *refill_workqueue;
struct work_struct reset_work;
struct delayed_work monitor_work;
unsigned long membase_phys;
enum efx_int_mode interrupt_mode;
unsigned int is_asic:1;
unsigned int is_10g:1;
- int external_sram_cfg;
struct efx_i2c_interface i2c;
struct efx_board board_info;
enum nic_state state;
enum reset_type reset_pending;
- struct semaphore suspend_lock;
-
struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
struct mutex mac_lock;
int port_enabled;
- int net_dev_registered;
int port_initialized;
struct net_device *net_dev;
int rx_checksum_enabled;
+ int lro_enabled;
atomic_t netif_stop_count;
spinlock_t netif_stop_lock;
/**
* struct efx_nic_type - Efx device type definition
- * @is_dual_func: Is dual-function (else single-function)
* @mem_bar: Memory BAR number
* @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
* descriptors
*/
struct efx_nic_type {
- unsigned int is_dual_func;
unsigned int mem_bar;
unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base;
int link_ok = falcon_xaui_link_ok(efx);
/* Generate PHY event that a PHY would have generated */
- if (link_ok != efx->link_up) {
- efx->link_up = link_ok;
+ if (link_ok != efx->link_up)
efx->mac_op->fake_phy_event(efx);
- }
return 0;
}
/* CX4 is always 10000FD only */
efx->link_options = GM_LPA_10000FULL;
- falcon_null_phy_check_hw(efx);
+ efx->link_up = falcon_xaui_link_ok(efx);
}
struct efx_phy_operations falcon_null_phy_ops = {
int rc = 0;
int link_up = pm8358_link_ok(efx);
/* Simulate a PHY event if link state has changed */
- if (link_up != efx->link_up) {
- efx->link_up = link_up;
+ if (link_up != efx->link_up)
efx->mac_op->fake_phy_event(efx);
- }
return rc;
}
#define RX_ALLOC_LEVEL_LRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
#define RX_ALLOC_FACTOR_LRO 1
-#define RX_ALLOC_FACTOR_SKB -2
+#define RX_ALLOC_FACTOR_SKB (-2)
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
}
}
-inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
/* Unmap for DMA */
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
rx_queue->added_count - rx_queue->removed_count);
out:
- /* Send write pointer to card. */
+ /* Send write pointer to card. */
falcon_notify_rx_desc(rx_queue);
/* If the fast fill is running inside from the refill tasklet, then
* that work is immediately pending to free some memory
* (e.g. an RX event or TX completion)
*/
- queue_delayed_work(rx_queue->efx->refill_workqueue,
- &rx_queue->work, 0);
+ efx_schedule_slow_fill(rx_queue, 0);
}
}
struct efx_rx_queue *rx_queue;
int rc;
-#if !defined(EFX_NEED_WORK_API_WRAPPERS)
- rx_queue = container_of(data, struct efx_rx_queue, work.work);
-#else
rx_queue = container_of(data, struct efx_rx_queue, work);
-#endif
if (unlikely(!rx_queue->channel->enabled))
return;
/* Push new RX descriptors, allowing at least 1 jiffy for
* the kernel to free some more memory. */
rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
- if (rc) {
- queue_delayed_work(rx_queue->efx->refill_workqueue,
- &rx_queue->work, 1);
- }
+ if (rc)
+ efx_schedule_slow_fill(rx_queue, 1);
}
static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
return skb;
}
-#if defined(EFX_USE_FASTCALL)
void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
unsigned int index, unsigned int len,
int checksummed, int discard)
-#else
-void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, int checksummed, int discard)
-#endif
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct efx_nic *efx = channel->efx;
enum efx_veto veto;
struct sk_buff *skb;
+ int lro = efx->lro_enabled;
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
* changed, then flush the LRO state.
*/
if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
+ efx_flush_lro(channel);
channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
}
+ if (likely(checksummed && lro)) {
+ if (efx_ssr(&channel->ssr, rx_buf)) {
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+ goto done;
+ }
+ }
/* Allow callback to veto the packet */
veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len);
enum efx_rx_alloc_method method = rx_alloc_method;
/* Only makes sense to use page based allocation if LRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
+ if (!(channel->efx->lro_enabled)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
/* Constrain the rx_alloc_level */
EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
- ASSERT_RTNL();
-
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
- ASSERT_RTNL();
-
/* Flush RX queue and remove descriptor ring */
falcon_fini_rx(rx_queue);
rx_queue->used = 0;
}
+/* Flush LRO/SSR state for the given channel */
+void efx_flush_lro(struct efx_channel *channel)
+{
+ efx_ssr_end_of_burst(&channel->ssr);
+}
+
module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring fast/slow fill threshold (%)");
+
+
+/* Size of the LRO hash table. Must be a power of 2. A larger table
+ * means we can accelerate a larger number of streams.
+ */
+static unsigned lro_table_size = 128;
+module_param(lro_table_size, uint, 0644);
+MODULE_PARM_DESC(lro_table_size,
+ "Size of the LRO hash table. Must be a power of 2");
+
+/* Maximum length of a hash chain. If chains get too long then the lookup
+ * time increases and may exceed the benefit of LRO.
+ */
+static unsigned lro_chain_max = 20;
+module_param(lro_chain_max, uint, 0644);
+MODULE_PARM_DESC(lro_chain_max,
+ "Maximum length of chains in the LRO hash table");
+
+
+/* Maximum time (in jiffies) that a connection can be idle before it's LRO
+ * state is discarded.
+ */
+static unsigned lro_idle_jiffies = HZ / 10 + 1; /* 100ms */
+module_param(lro_idle_jiffies, uint, 0644);
+MODULE_PARM_DESC(lro_idle_jiffies, "Time (in jiffies) after which an"
+ " idle connection's LRO state is discarded");
+
+
+/* Number of packets with payload that must arrive in-order before a
+ * connection is eligible for LRO. The idea is we should avoid coalescing
+ * segments when the sender is in slow-start because reducing the ACK rate
+ * can damage performance.
+ */
+static unsigned lro_slow_start_packets = 20;
+module_param(lro_slow_start_packets, uint, 0644);
+MODULE_PARM_DESC(lro_slow_start_packets, "Number of packets that must "
+ "pass in-order before starting LRO.");
+
+
+int efx_ssr_init(struct efx_ssr_state *st, struct efx_nic *efx)
+{
+ unsigned i;
+ st->conns_mask = lro_table_size - 1;
+ if ((st->conns_mask + 1) & st->conns_mask) {
+ EFX_ERR(efx, "lro_table_size(=%u) must be a power of 2\n",
+ lro_table_size);
+ return -EINVAL;
+ }
+ st->efx = efx;
+ st->conns = kmalloc((st->conns_mask + 1)
+ * sizeof(st->conns[0]), GFP_KERNEL);
+ if (st->conns == NULL)
+ return -ENOMEM;
+ st->conns_n = kmalloc((st->conns_mask + 1)
+ * sizeof(st->conns_n[0]), GFP_KERNEL);
+ if (st->conns_n == NULL) {
+ kfree(st->conns);
+ st->conns = NULL;
+ return -ENOMEM;
+ }
+ for (i = 0; i <= st->conns_mask; ++i) {
+ INIT_LIST_HEAD(&st->conns[i]);
+ st->conns_n[i] = 0;
+ }
+ INIT_LIST_HEAD(&st->active_conns);
+ INIT_LIST_HEAD(&st->free_conns);
+ return 0;
+}
+
+/* Drop the given connection, and add it to the free list */
+static inline void efx_ssr_drop(struct efx_ssr_state *st,
+ struct efx_ssr_conn *c, unsigned conn_hash)
+{
+ EFX_BUG_ON_PARANOID(c->skb);
+ EFX_BUG_ON_PARANOID(st->conns_n[conn_hash] <= 0);
+ --st->conns_n[conn_hash];
+ list_del(&c->link);
+ list_add(&c->link, &st->free_conns);
+}
+
+void efx_ssr_fini(struct efx_ssr_state *st)
+{
+ struct efx_ssr_conn *c;
+ unsigned i;
+
+ /* Return cleanly if efx_ssr_init() has not been called. */
+ if (st->conns == NULL)
+ return;
+
+ EFX_BUG_ON_PARANOID(!list_empty(&st->active_conns));
+
+ for (i = 0; i <= st->conns_mask; ++i)
+ while (!list_empty(&st->conns[i])) {
+ c = list_entry(st->conns[i].prev,
+ struct efx_ssr_conn, link);
+ efx_ssr_drop(st, c, i);
+ }
+
+ while (!list_empty(&st->free_conns)) {
+ c = list_entry(st->free_conns.prev, struct efx_ssr_conn, link);
+ list_del(&c->link);
+ EFX_BUG_ON_PARANOID(c->skb);
+ kfree(c);
+ }
+
+ kfree(st->conns_n);
+ kfree(st->conns);
+ st->conns = NULL;
+}
+
+/* Calc IP checksum and deliver to the OS */
+static void efx_ssr_deliver(struct efx_ssr_state *st, struct efx_ssr_conn *c)
+{
+ struct efx_nic *efx = st->efx;
+ int veto, len;
+
+ EFX_BUG_ON_PARANOID(!c->skb);
+
+ ++st->n_bursts;
+
+ /* Finish off packet munging and recalculate IP header checksum. */
+ c->iph->tot_len = htons(c->iph->tot_len);
+ c->iph->check = 0;
+ c->iph->check = ip_fast_csum((u8 *) c->iph, c->iph->ihl);
+
+ len = c->skb->len + ((char *)c->iph - (char *)c->eh);
+ c->skb->truesize = len + sizeof(struct sk_buff);
+
+ c->th->window = c->th_last->window;
+ c->th->ack_seq = c->th_last->ack_seq;
+ if (c->th->doff == c->th_last->doff) {
+ /* Copy TCP options (take care to avoid going negative). */
+ len = ((c->th->doff - 5) & 0xf) << 2u;
+ memcpy(c->th + 1, c->th_last + 1, len);
+ }
+
+ /* Allow callback to veto the packet. */
+ veto = EFX_DL_CALLBACK(efx, rx_packet, (char *)c->eh, len);
+ if (unlikely(veto)) {
+ EFX_LOG(efx, "RX vetoed by driverlink %s driver\n",
+ efx->dl_cb_dev.rx_packet->driver->name);
+ dev_kfree_skb_any(c->skb);
+ } else {
+ netif_receive_skb(c->skb);
+ }
+
+ c->skb = NULL;
+ list_del_init(&c->active_link);
+}
+
+/* Stop tracking connections that have gone idle in order to keep hash
+ * chains short.
+ */
+static void efx_ssr_purge_idle(struct efx_ssr_state *st, unsigned now)
+{
+ struct efx_ssr_conn *c;
+ unsigned i;
+
+ EFX_BUG_ON_PARANOID(!list_empty(&st->active_conns));
+
+ st->last_purge_jiffies = now;
+ for (i = 0; i <= st->conns_mask; ++i) {
+ if (list_empty(&st->conns[i]))
+ continue;
+
+ c = list_entry(st->conns[i].prev, struct efx_ssr_conn, link);
+ if (now - c->last_pkt_jiffies > lro_idle_jiffies) {
+ ++st->n_drop_idle;
+ efx_ssr_drop(st, c, i);
+ }
+ }
+}
+
+/* Push held skbs down into network stack.
+ * Only called when active list is non-empty.
+ */
+void __efx_ssr_end_of_burst(struct efx_ssr_state *st)
+{
+ struct efx_ssr_conn *c;
+ unsigned j;
+
+ EFX_BUG_ON_PARANOID(list_empty(&st->active_conns));
+
+ do {
+ c = list_entry(st->active_conns.next, struct efx_ssr_conn,
+ active_link);
+ EFX_BUG_ON_PARANOID(!c->skb);
+ efx_ssr_deliver(st, c);
+ } while (!list_empty(&st->active_conns));
+
+ j = jiffies;
+ if (unlikely(j != st->last_purge_jiffies))
+ efx_ssr_purge_idle(st, j);
+}
+
+/* Construct an skb Push held skbs down into network stack.
+ * Only called when active list is non-empty.
+ */
+static inline int
+efx_ssr_merge(struct efx_ssr_state *st, struct efx_ssr_conn *c,
+ struct tcphdr *th, int data_length)
+{
+ /* Increase lengths appropriately */
+ c->skb->len += data_length;
+ c->skb->data_len += data_length;
+
+ /*
+ * Keep track of max MSS seen and store in
+ * gso_size for kernel to use
+ */
+ if (data_length > skb_shinfo(c->skb)->gso_size)
+ skb_shinfo(c->skb)->gso_size = data_length;
+
+ /* Update the connection state flags */
+ c->iph->tot_len += data_length;
+ c->th->psh |= th->psh;
+ c->th_last = th;
+ ++st->n_merges;
+
+ /* Pass packet up now if another segment could overflow the IP
+ * length.
+ */
+ return (c->skb->len > 65536 - 9200);
+}
+
+static inline void
+efx_ssr_start(struct efx_ssr_state *st, struct efx_ssr_conn *c,
+ struct tcphdr *th, int data_length)
+{
+ /* Initialise gso_size appropriately */
+ skb_shinfo(c->skb)->gso_size = data_length;
+
+ /* Mangle header fields for later processing */
+ c->iph->tot_len = ntohs(c->iph->tot_len);
+
+ /* Move this connection the head of the active list */
+ list_del(&c->active_link);
+ list_add(&c->active_link, &st->active_conns);
+}
+
+static inline int
+efx_ssr_conn_page(struct efx_ssr_state *st, struct efx_ssr_conn *c,
+ struct efx_rx_buffer *rx_buf, struct tcphdr *th,
+ int hdr_length, int data_length)
+{
+ if (likely(c->skb)) {
+ struct skb_frag_struct *frag;
+ frag = skb_shinfo(c->skb)->frags;
+ frag += skb_shinfo(c->skb)->nr_frags;
+ frag->page = rx_buf->page;
+ frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_length;
+ frag->size = data_length;
+ ++skb_shinfo(c->skb)->nr_frags;
+ rx_buf->page = NULL;
+
+ if (efx_ssr_merge(st, c, th, data_length) ||
+ (skb_shinfo(c->skb)->nr_frags == MAX_SKB_FRAGS))
+ efx_ssr_deliver(st, c);
+
+ return 1;
+ } else {
+ c->skb = efx_rx_mk_skb(rx_buf, st->efx, hdr_length);
+ if (unlikely(c->skb == NULL))
+ return 0;
+
+ c->eh = eth_hdr(c->skb);
+ c->iph = (struct iphdr *)c->skb->data;
+ c->th = (struct tcphdr *)((u8 *) c->iph + c->iph->ihl * 4);
+ c->th_last = c->th;
+
+ efx_ssr_start(st, c, th, data_length);
+
+ return 1;
+ }
+}
+
+static inline void
+efx_ssr_conn_skb(struct efx_ssr_state *st, struct efx_ssr_conn *c,
+ struct efx_rx_buffer *rx_buf, struct ethhdr *eh,
+ struct iphdr *iph, struct tcphdr *th, int data_length)
+{
+ /* Transfer ownership of the rx_buf->skb to the LRO chain */
+ struct sk_buff *skb = rx_buf->skb;
+ rx_buf->skb = NULL;
+
+ if (likely(c->skb)) {
+ /* Remove the headers */
+ skb_pull(skb, skb->len - data_length);
+
+ /* Tack the new skb onto the head skb's frag_list. */
+ EFX_BUG_ON_PARANOID(skb->next);
+ if (!skb_shinfo(c->skb)->frag_list)
+ skb_shinfo(c->skb)->frag_list = skb;
+ else
+ c->skb_tail->next = skb;
+ c->skb_tail = skb;
+
+ if (efx_ssr_merge(st, c, th, data_length))
+ efx_ssr_deliver(st, c);
+ } else {
+ c->skb = skb;
+ c->eh = eh;
+ c->iph = iph;
+ c->th = th;
+ c->th_last = th;
+
+ efx_ssr_start(st, c, th, data_length);
+ }
+}
+
+/* Process SKB and decide whether to dispatch it to the stack now or
+ * later.
+ */
+int efx_ssr(struct efx_ssr_state *st, struct efx_rx_buffer *rx_buf)
+{
+
+ int eh_proto, data_length, hdr_length, dont_merge;
+ struct efx_ssr_conn *c;
+ struct ethhdr *eh;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ unsigned th_seq, conn_hash, pkt_length;
+
+ /* This does not handle VLAN code */
+ /* Find the IP header. The ethernet header is always at rx_buf->data */
+ eh = (struct ethhdr *)rx_buf->data;
+ if (rx_buf->page) {
+ eh_proto = eh->h_proto;
+ iph = (struct iphdr *)(eh + 1);
+ } else {
+ /* The skb head is at the IP header */
+ eh_proto = rx_buf->skb->protocol;
+ iph = (struct iphdr *)rx_buf->skb->data;
+ }
+
+ /* We're not interested if it isn't TCP over IPv4, or if fragged. */
+ if ((eh_proto - htons(ETH_P_IP)) |
+ (iph->protocol - IPPROTO_TCP) |
+ (iph->frag_off & htons(IP_MF | IP_OFFSET)))
+ return 0;
+
+ /* Get the TCP protocol */
+ th = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
+ hdr_length = (u8 *) th + th->doff * 4 - (u8 *) eh;
+ /* Cope with padding after IP header */
+ pkt_length = ntohs(iph->tot_len) + (u8 *)iph - (u8 *)eh;
+ rx_buf->len = min(pkt_length, rx_buf->len);
+ data_length = rx_buf->len - hdr_length;
+ th_seq = ntohl(th->seq);
+ dont_merge = ((data_length <= 0)
+ | th->urg | th->syn | th->rst | th->fin);
+
+ /* Very cheap and crude hash. */
+ conn_hash = (th->source ^ th->dest) & st->conns_mask;
+
+ list_for_each_entry(c, &st->conns[conn_hash], link) {
+ if ((c->saddr - iph->saddr) | (c->daddr - iph->daddr) |
+ (c->source - th->source) | (c->dest - th->dest))
+ continue;
+
+ /* Re-insert at head of list to reduce lookup time. */
+ list_del(&c->link);
+ list_add(&c->link, &st->conns[conn_hash]);
+
+ if (unlikely(th_seq - c->next_seq)) {
+ /* Out-of-order, so start counting again. */
+ if (c->skb)
+ efx_ssr_deliver(st, c);
+ c->n_in_order_pkts = 0;
+ c->next_seq = th_seq + data_length;
+ ++st->n_misorder;
+ return 0;
+ }
+ c->next_seq = th_seq + data_length;
+ c->last_pkt_jiffies = jiffies;
+
+ if (c->n_in_order_pkts < lro_slow_start_packets) {
+ /* May be in slow-start, so don't merge. */
+ ++st->n_slow_start;
+ ++c->n_in_order_pkts;
+ return 0;
+ }
+
+ if (unlikely(dont_merge)) {
+ if (c->skb)
+ efx_ssr_deliver(st, c);
+ if (th->fin || th->rst) {
+ ++st->n_drop_closed;
+ efx_ssr_drop(st, c, conn_hash);
+ }
+ return 0;
+ }
+
+ if (rx_buf->page) {
+ return efx_ssr_conn_page(st, c, rx_buf, th, hdr_length,
+ data_length);
+ } else {
+ efx_ssr_conn_skb(st, c, rx_buf, eh, iph, th,
+ data_length);
+ return 1;
+ }
+ }
+
+ /* We're not yet tracking this connection. */
+ if (dont_merge)
+ return 0;
+
+ if (st->conns_n[conn_hash] >= lro_chain_max) {
+ ++st->n_too_many;
+ return 0;
+ }
+
+ if (!list_empty(&st->free_conns)) {
+ c = list_entry(st->free_conns.next, struct efx_ssr_conn, link);
+ list_del(&c->link);
+ } else {
+ c = kmalloc(sizeof(*c), GFP_ATOMIC);
+ if (c == NULL)
+ return 0;
+ c->skb = NULL;
+ INIT_LIST_HEAD(&c->active_link);
+ }
+
+ /* Create the connection tracking data */
+ ++st->conns_n[conn_hash];
+ list_add(&c->link, &st->conns[conn_hash]);
+ c->saddr = iph->saddr;
+ c->daddr = iph->daddr;
+ c->source = th->source;
+ c->dest = th->dest;
+ c->next_seq = th_seq + data_length;
+ c->n_in_order_pkts = 0;
+ EFX_BUG_ON_PARANOID(c->skb);
+ ++st->n_new_stream;
+ return 0;
+}
+
+
#ifndef EFX_RX_H
#define EFX_RX_H
+#include <linux/skbuff.h>
#include "net_driver.h"
int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_flush_lro(struct efx_channel *channel);
void efx_rx_strategy(struct efx_channel *channel);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
void efx_rx_work(struct work_struct *data);
struct efx_rx_buffer *rx_buf, int checksummed);
+
+extern int efx_ssr_init(struct efx_ssr_state *st, struct efx_nic *efx);
+extern void efx_ssr_fini(struct efx_ssr_state *st);
+
+extern void __efx_ssr_end_of_burst(struct efx_ssr_state *st);
+extern int efx_ssr(struct efx_ssr_state *st, struct efx_rx_buffer *rx_buf);
+
+
+static inline void efx_ssr_end_of_burst(struct efx_ssr_state *st)
+{
+ if (!list_empty(&st->active_conns))
+ __efx_ssr_end_of_burst(st);
+}
+
+
#endif /* EFX_RX_H */
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
-#include <asm/io.h>
#include "net_driver.h"
#include "ethtool.h"
#include "efx.h"
if (channel->eventq_magic == magic)
goto eventq_ok;
- }
- while (++count < 2);
+ } while (++count < 2);
EFX_ERR(channel->efx, "channel %d timed out in %ld jiffies waiting for"
" event queue\n", channel->channel, jiffies - j_start);
struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
-#if !defined(EFX_HAVE_OLD_NAPI)
- struct efx_channel *channel;
-#endif
struct efx_selftest_state *state = efx->loopback_selftest;
struct efx_loopback_payload *payload;
struct sk_buff *skb;
udelay(10);
}
-#if !defined(EFX_HAVE_OLD_NAPI)
- /* NAPI polling is not enabled, so process channels synchronously */
- schedule_timeout_uninterruptible(HZ / 50);
- efx_for_each_channel_with_interrupt(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- }
-#else
/* Allow time for processing */
schedule_timeout_uninterruptible(HZ / 10);
-#endif
if (state->flush)
goto out3;
struct ethtool_cmd ecmd, ecmd_loopback;
struct efx_tx_queue *tx_queue;
enum efx_loopback_mode old_mode, mode;
- int old_powered, count, rc = 0;
+ int old_powered, count, rc = 0, link_up;
int retry = EFX_WORKAROUND_8909(efx);
/* Get current PHY settings */
state->flush = 1;
efx->phy_powered = 1;
efx->loopback_mode = mode;
- efx_reconfigure_port(efx, 0);
+ efx_reconfigure_port(efx);
/* Wait for the PHY to signal the link is up */
count = 0;
/* Wait for PHY events to be processed */
flush_workqueue(efx->workqueue);
rmb();
- } while ((++count < 20) && !efx->link_up);
+
+ /* efx->link_up can be 1 even if the XAUI link is down,
+ * (bug5762). Usually, it's not worth bothering with the
+ * difference, but for selftests, we need that extra
+ * guarantee that the link is really, really, up.
+ */
+ link_up = efx->link_up;
+ if (EFX_IS10G(efx) && !falcon_xaui_link_ok(efx))
+ link_up = 0;
+
+ } while ((++count < 20) && !link_up);
/* The link should now be up. If it isn't, there is no point
* in attempting a loopback test */
- if (!efx->link_up) {
+ if (!link_up) {
EFX_ERR(efx, "loopback %s never came up\n",
LOOPBACK_MODE(efx));
rc = -EIO;
state->flush = 1;
efx->loopback_mode = first;
- efx_reconfigure_port(efx, 0);
+ efx_reconfigure_port(efx);
retry = rc = 0;
--mode;
struct efx_channel *channel;
int rc = 0;
- ASSERT_RTNL();
-
EFX_LOG(efx, "performing online self-tests\n");
rc |= efx_test_interrupts(efx, tests);
struct efx_selftest_state *state;
int rc = 0;
- ASSERT_RTNL();
-
EFX_LOG(efx, "performing offline self-tests\n");
/* Create a selftest_state structure to hold state for the test */
#define efhw_nic_close_hardware(nic) \
((nic)->efhw_func->close_hardware(nic))
-#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr) \
- ((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr)))
+#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr, non_irq_evq) \
+ ((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr), \
+ (non_irq_evq)))
/*-------------- Interrupt support ------------ */
/** Handle interrupt. Return 0 if not handled, 1 if handled. */
#define efhw_nic_interrupt(nic) \
((nic)->efhw_func->interrupt(nic))
-#define efhw_nic_interrupt_enable(nic, index) \
- ((nic)->efhw_func->interrupt_enable(nic, index))
+#define efhw_nic_interrupt_enable(nic) \
+ ((nic)->efhw_func->interrupt_enable(nic))
-#define efhw_nic_interrupt_disable(nic, index) \
- ((nic)->efhw_func->interrupt_disable(nic, index))
+#define efhw_nic_interrupt_disable(nic) \
+ ((nic)->efhw_func->interrupt_disable(nic))
-#define efhw_nic_set_interrupt_moderation(nic, index, val) \
- ((nic)->efhw_func->set_interrupt_moderation(nic, index, val))
+#define efhw_nic_set_interrupt_moderation(nic, val) \
+ ((nic)->efhw_func->set_interrupt_moderation(nic, val))
/*-------------- Event support ------------ */
};
static inline void
-falcon_write_ddd_d(efhw_ioaddr_t kva,
+falcon_write_ddd_d(volatile char __iomem *kva,
uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3)
{
writel(d0, kva + 0);
writel(d3, kva + 12);
}
-static inline void falcon_write_q(efhw_ioaddr_t kva, uint64_t q)
+static inline void falcon_write_q(volatile char __iomem *kva, uint64_t q)
{
union __u64to32 u;
u.u64 = q;
writel(u.s.b, kva + 4);
}
-static inline void falcon_read_q(efhw_ioaddr_t addr, uint64_t *q0)
+static inline void falcon_read_q(volatile char __iomem *addr, uint64_t *q0)
{
/* It is essential that we read dword0 first, so that
* the shadow register is updated with the latest value
}
static inline void
-falcon_write_qq(efhw_ioaddr_t kva, uint64_t q0, uint64_t q1)
+falcon_write_qq(volatile char __iomem *kva, uint64_t q0, uint64_t q1)
{
writeq(q0, kva + 0);
falcon_write_q(kva + 8, q1);
}
static inline void
-falcon_read_qq(efhw_ioaddr_t addr, uint64_t *q0, uint64_t *q1)
+falcon_read_qq(volatile char __iomem *addr, uint64_t *q0, uint64_t *q1)
{
falcon_read_q(addr, q0);
*q1 = readq(addr + 8);
/* Falcon nails down the event queue mappings */
#define FALCON_EVQ_KERNEL0 (0) /* hardwired for net driver */
#define FALCON_EVQ_CHAR (4) /* char driver's event queue */
-#define FALCON_EVQ_NONIRQ (5) /* char driver's non interrupting
- queue. Subsequent queues are
- available for user apps */
/* reserved by the drivers */
#define FALCON_EVQ_TBL_RESERVED (8)
*
*---------------------------------------------------------------------------*/
-static inline void falcon_deadbeef(efhw_ioaddr_t efhw_kva, unsigned what)
+static inline void
+falcon_deadbeef(volatile char __iomem *efhw_kva, unsigned what)
{
writel(what, efhw_kva + 0x300);
mmiowb();
/*! Constants for the type field in efx_vi_hw_resource */
#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */
#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */
-#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */
-#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */
#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */
/* Address of event q pointer (EF1) */
* Metadata concerning the list of hardware resource mappings
*/
struct efx_vi_hw_resource_metadata {
- int version;
int evq_order;
int evq_offs;
int evq_capacity;
#ifndef __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
#define __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
-#ifndef __linux__
-# error Silly
-#endif
-#ifndef __KERNEL__
-# error Silly
-#endif
#include <ci/efhw/efhw_types.h>
#include <linux/interrupt.h>
uint32_t a;
uint32_t b;
} opaque;
- struct {
- uint32_t code;
- uint32_t status;
- } ev1002;
} efhw_event_t;
/* Flags for TX/RX queues */
/* Linux kernel also does not provide PRIx32... Sigh. */
#define PRIx32 "x"
-
+
#ifdef __ia64__
# define PRIx64 "lx"
#else
#include <ci/efhw/iopage_types.h>
#include <ci/efhw/sysdep.h>
-/*--------------------------------------------------------------------
- *
- * hardware limits used in the types
- *
- *--------------------------------------------------------------------*/
-
-#define EFHW_KEVENTQ_MAX 8
-
/*--------------------------------------------------------------------
*
* forward type declarations
struct eventq_resource_hardware {
/*!iobuffer allocated for eventq - can be larger than eventq */
- efhw_iopages_t iobuff;
+ struct efhw_iopages iobuff;
unsigned iobuff_off;
struct efhw_buffer_table_allocation buf_tbl_alloc;
int capacity; /*!< capacity of event queue */
*--------------------------------------------------------------------*/
struct efhw_keventq {
- volatile int lock;
+ int lock;
caddr_t evq_base;
int32_t evq_ptr;
uint32_t evq_mask;
/*! initialise all hardware functional units */
int (*init_hardware) (struct efhw_nic *nic,
struct efhw_ev_handler *,
- const uint8_t *mac_addr);
+ const uint8_t *mac_addr, int non_irq_evq);
/*-------------- Interrupt support ------------ */
*/
int (*interrupt) (struct efhw_nic *nic);
- /*! Enable given interrupt mask for the given IRQ unit */
- void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
+ /*! Enable the interrupt */
+ void (*interrupt_enable) (struct efhw_nic *nic);
- /*! Disable given interrupt mask for the given IRQ unit */
- void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
+ /*! Disable the interrupt */
+ void (*interrupt_disable) (struct efhw_nic *nic);
/*! Set interrupt moderation strategy for the given IRQ unit
** val is in usec
*/
void (*set_interrupt_moderation)(struct efhw_nic *nic,
- uint idx, uint val);
+ uint val);
/*-------------- Event support ------------ */
/*! */
struct efhw_nic {
- /*! zero base index in efrm_nic_table.nic array */
- volatile int index;
+ /*! zero base index in efrm_nic_tablep->nic array */
+ int index;
int ifindex; /*!< OS level nic index */
#ifdef HAS_NET_NAMESPACE
struct net *nd_net;
/* hardware resources */
/*! I/O address of the start of the bar */
- efhw_ioaddr_t bar_ioaddr;
+ volatile char __iomem *bar_ioaddr;
/*! Bar number of control aperture. */
unsigned ctr_ap_bar;
void (*irq_handler) (struct efhw_nic *, int unit);
/*! event queues per driver */
- struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
+ struct efhw_keventq interrupting_evq;
/* for marking when we are not using an IRQ unit
- 0 is a valid offset to an IRQ unit on EF1! */
#define EFHW_IRQ_UNIT_UNUSED 0xffff
- /*! interrupt unit in use */
- unsigned int irq_unit[EFHW_KEVENTQ_MAX];
- efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */
+ /*! interrupt unit in use for the interrupting event queue */
+ unsigned int irq_unit;
+
+ struct efhw_keventq non_interrupting_evq;
+
+ struct efhw_iopage irq_iobuff; /*!< Falcon SYSERR interrupt */
/* The new driverlink infrastructure. */
struct efx_dl_device *net_driver_dev;
/*! Callbacks for handling events. */
struct efhw_ev_handler {
- void (*wakeup_fn)(struct efhw_nic *nic, efhw_event_t *ev);
- void (*timeout_fn)(struct efhw_nic *nic, efhw_event_t *ev);
- void (*sw_fn)(struct efhw_nic *nic, efhw_event_t *ev);
- void (*dmaq_flushed_fn) (struct efhw_nic *, int, int);
+ void (*wakeup_fn)(struct efhw_nic *nic, unsigned);
+ void (*timeout_fn)(struct efhw_nic *nic, unsigned);
+ void (*dmaq_flushed_fn) (struct efhw_nic *, unsigned, int);
};
extern int efhw_keventq_ctor(struct efhw_nic *, int instance,
falcon_handle_char_event(struct efhw_nic *nic,
struct efhw_ev_handler *h, efhw_event_t *evp);
-/*! map event queue instance space (0,1,2,..) onto event queue
- number. This function takes into account the allocation rules for
- the underlying driver model */
-extern int falcon_idx_to_evq(struct efhw_nic *nic, uint idx);
-
/*! Acknowledge to HW that processing is complete on a given event queue */
extern void falcon_nic_evq_ack(struct efhw_nic *nic, uint evq, /* evq id */
uint rptr, /* new read pointer update */
#error Unknown endianness
#endif
+#ifndef __iomem
+#define __iomem
+#endif
+
#ifndef mmiowb
#if defined(__i386__) || defined(__x86_64__)
#define mmiowb()
#endif
#endif
-typedef char *efhw_ioaddr_t;
-
#ifndef readq
-static inline uint64_t __readq(void __iomem *addr)
+static inline uint64_t __readq(volatile void __iomem *addr)
{
return *(volatile uint64_t *)addr;
}
#endif
#ifndef writeq
-static inline void __writeq(uint64_t v, void __iomem *addr)
+static inline void __writeq(uint64_t v, volatile void __iomem *addr)
{
*(volatile uint64_t *)addr = v;
}
*
*--------------------------------------------------------------------*/
-extern int efhw_iopage_alloc(struct efhw_nic *, efhw_iopage_t *p);
-extern void efhw_iopage_free(struct efhw_nic *, efhw_iopage_t *p);
+extern int efhw_iopage_alloc(struct efhw_nic *, struct efhw_iopage *p);
+extern void efhw_iopage_free(struct efhw_nic *, struct efhw_iopage *p);
-extern int efhw_iopages_alloc(struct efhw_nic *, efhw_iopages_t *p,
+extern int efhw_iopages_alloc(struct efhw_nic *, struct efhw_iopages *p,
unsigned order);
-extern void efhw_iopages_free(struct efhw_nic *, efhw_iopages_t *p);
+extern void efhw_iopages_free(struct efhw_nic *, struct efhw_iopages *p);
#endif /* __CI_DRIVER_RESOURCE_IOPAGE_H__ */
* resource management for Xen backend, OpenOnload, etc
* (including support for SFE4001 10GBT NIC)
*
- * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
+ * This file provides struct efhw_page and struct efhw_iopage for Linux
+ * kernel.
*
* Copyright 2005-2007: Solarflare Communications Inc,
* 9501 Jeronimo Road, Suite 250,
#ifndef __CI_EFHW_IOPAGE_LINUX_H__
#define __CI_EFHW_IOPAGE_LINUX_H__
+#include <linux/version.h>
#include <linux/gfp.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
#include <linux/hardirq.h>
+#else
+#include <asm/hardirq.h>
+#endif
+#include <linux/errno.h>
#include <ci/efhw/debug.h>
/*--------------------------------------------------------------------
*
- * efhw_page_t: A single page of memory. Directly mapped in the driver,
- * and can be mapped to userlevel.
+ * struct efhw_page: A single page of memory. Directly mapped in the
+ * driver, and can be mapped to userlevel.
*
*--------------------------------------------------------------------*/
-typedef struct {
+struct efhw_page {
unsigned long kva;
-} efhw_page_t;
+};
-static inline int efhw_page_alloc(efhw_page_t *p)
+static inline int efhw_page_alloc(struct efhw_page *p)
{
p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
return p->kva ? 0 : -ENOMEM;
}
-static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
+static inline int efhw_page_alloc_zeroed(struct efhw_page *p)
{
p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
return p->kva ? 0 : -ENOMEM;
}
-static inline void efhw_page_free(efhw_page_t *p)
+static inline void efhw_page_free(struct efhw_page *p)
{
free_page(p->kva);
EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
}
-static inline char *efhw_page_ptr(efhw_page_t *p)
+static inline char *efhw_page_ptr(struct efhw_page *p)
{
return (char *)p->kva;
}
-static inline unsigned efhw_page_pfn(efhw_page_t *p)
+static inline unsigned efhw_page_pfn(struct efhw_page *p)
{
return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
}
-static inline void efhw_page_mark_invalid(efhw_page_t *p)
+static inline void efhw_page_mark_invalid(struct efhw_page *p)
{
p->kva = 0;
}
-static inline int efhw_page_is_valid(efhw_page_t *p)
+static inline int efhw_page_is_valid(struct efhw_page *p)
{
return p->kva != 0;
}
-static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
+static inline void efhw_page_init_from_va(struct efhw_page *p, void *va)
{
p->kva = (unsigned long)va;
}
/*--------------------------------------------------------------------
*
- * efhw_iopage_t: A single page of memory. Directly mapped in the driver,
+ * struct efhw_iopage: A single page of memory. Directly mapped in the driver,
* and can be mapped to userlevel. Can also be accessed by the NIC.
*
*--------------------------------------------------------------------*/
-typedef struct {
- efhw_page_t p;
+struct efhw_iopage {
+ struct efhw_page p;
dma_addr_t dma_addr;
-} efhw_iopage_t;
+};
-static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
+static inline dma_addr_t efhw_iopage_dma_addr(struct efhw_iopage *p)
{
return p->dma_addr;
}
/*--------------------------------------------------------------------
*
- * efhw_iopages_t: A set of pages that are contiguous in physical memory.
- * Directly mapped in the driver, and can be mapped to userlevel. Can also
- * be accessed by the NIC.
+ * struct efhw_iopages: A set of pages that are contiguous in physical
+ * memory. Directly mapped in the driver, and can be mapped to userlevel.
+ * Can also be accessed by the NIC.
*
* NB. The O/S may be unwilling to allocate many, or even any of these. So
* only use this type where the NIC really needs a physically contiguous
*
*--------------------------------------------------------------------*/
-typedef struct {
+struct efhw_iopages {
caddr_t kva;
unsigned order;
dma_addr_t dma_addr;
-} efhw_iopages_t;
+};
-static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
+static inline caddr_t efhw_iopages_ptr(struct efhw_iopages *p)
{
return p->kva;
}
-static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
+static inline unsigned efhw_iopages_pfn(struct efhw_iopages *p)
{
return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
}
-static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
+static inline dma_addr_t efhw_iopages_dma_addr(struct efhw_iopages *p)
{
return p->dma_addr;
}
-static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
+static inline unsigned efhw_iopages_size(struct efhw_iopages *p)
{
return 1u << (p->order + PAGE_SHIFT);
}
-/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
- * contiguous allocations in iobufsets for iSCSI. This allows the
- * essential information about contiguous allocations from
- * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
- * iobufset. (Changing the iobufset resource to use a union type would
+/* struct efhw_iopage <-> struct efhw_iopages conversions for handling
+ * physically contiguous allocations in iobufsets for iSCSI. This allows
+ * the essential information about contiguous allocations from
+ * efhw_iopages_alloc() to be saved away in the struct efhw_iopage array in
+ * an iobufset. (Changing the iobufset resource to use a union type would
* involve a lot of code changes, and make the iobufset's metadata larger
* which could be bad as it's supposed to fit into a single page on some
* platforms.)
*/
static inline void
-efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
- efhw_iopages_t *iopages, unsigned pageno)
+efhw_iopage_init_from_iopages(struct efhw_iopage *iopage,
+ struct efhw_iopages *iopages, unsigned pageno)
{
iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
+ (pageno * PAGE_SIZE);
}
static inline void
-efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
- efhw_iopage_t *iopage, unsigned order)
+efhw_iopages_init_from_iopage(struct efhw_iopages *iopages,
+ struct efhw_iopage *iopage, unsigned order)
{
iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
EFHW_ASSERT(iopages->kva);
*--------------------------------------------------------------------*/
struct vi_resource_dimensions {
- unsigned evq_int_min, evq_int_max;
- unsigned evq_timer_min, evq_timer_max;
- unsigned rxq_min, rxq_max;
- unsigned txq_min, txq_max;
+ unsigned evq_int_min, evq_int_lim;
+ unsigned evq_timer_min, evq_timer_lim;
+ unsigned rxq_min, rxq_lim;
+ unsigned txq_min, txq_lim;
};
/*! Initialise resources */
extern int
efrm_resources_init(const struct vi_resource_dimensions *,
- int buffer_table_min, int buffer_table_max);
+ int buffer_table_min, int buffer_table_lim);
/*! Tear down resources */
extern void efrm_resources_fini(void);
unsigned int n_bufs;
unsigned int pages_per_contiguous_chunk;
unsigned order;
- efhw_iopage_t bufs[1];
+ struct efhw_iopage bufs[1];
/*!< up to n_bufs can follow this, so this must be the last member */
};
};
/* Resource driver structures used by other drivers as well */
-extern struct efrm_nic_table efrm_nic_table;
+extern struct efrm_nic_table *efrm_nic_tablep;
static inline void efrm_nic_table_hold(void)
{
- atomic_inc(&efrm_nic_table.ref_count);
+ atomic_inc(&efrm_nic_tablep->ref_count);
}
static inline void efrm_nic_table_rele(void)
{
- atomic_dec(&efrm_nic_table.ref_count);
+ atomic_dec(&efrm_nic_tablep->ref_count);
}
static inline int efrm_nic_table_held(void)
{
- return (atomic_read(&efrm_nic_table.ref_count) != 0);
+ return (atomic_read(&efrm_nic_tablep->ref_count) != 0);
}
/* Run code block _x multiple times with variable nic set to each
for ((_nic_i) = (efrm_nic_table_hold(), 0); \
(_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
(_nic_i)++) \
- if (((_nic) = efrm_nic_table.nic[_nic_i]))
+ if (((_nic) = efrm_nic_tablep->nic[_nic_i]))
#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
for ((_i) = (efrm_nic_table_hold(), 0); \
(_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++(_i)) \
- if (((_nic) = efrm_nic_table.nic[_i]) && \
+ if (((_nic) = efrm_nic_tablep->nic[_i]) && \
efrm_nic_set_read((_set), (_i)))
#endif /* __CI_EFRM_NIC_TABLE_H__ */
unsigned int i;
struct kfifo *ids;
unsigned char *buffer;
+#ifndef TCP_CHIMNEY_SUPPORT
unsigned int size = roundup_pow_of_two((limit - base) * sizeof(int));
+#else
+ /* ### TODO - Linux kfifos really are a power of two, sysdep_ci2linux
+ does ci_fifo2's, which only actually hold 2^n - 1.
+ We need to double buffer size, not add one, because
+ ci_fifo2 can only be a power of two. */
+ unsigned int size = roundup_pow_of_two((limit - base) * 2 * sizeof(int));
+#endif
EFRM_ASSERT(base <= limit);
buffer = vmalloc(size);
* level.
***********************************************************************/
-typedef struct efrm_resource_handle_s {
+typedef struct {
uint32_t handle;
} efrm_resource_handle_t;
/* Spinlocks are defined in efhw/sysdep.h */
#include <ci/efhw/sysdep.h>
-#if defined(__linux__) && defined(__KERNEL__)
# include <ci/efrm/sysdep_linux.h>
-#else
-
-# include <ci/efrm/sysdep_ci2linux.h>
-
-#endif
#endif /* __CI_EFRM_SYSDEP_H__ */
#include <linux/workqueue.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
#include <linux/hardirq.h>
+#else
+#include <asm/hardirq.h>
+#endif
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <linux/completion.h>
#include <linux/log2.h>
#endif
+
+/********************************************************************
+ *
+ * Utility functions
+ *
+ ********************************************************************/
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+static inline unsigned long __attribute_const__ roundup_pow_of_two(unsigned long x)
+{
+ return (1UL << fls(x - 1));
+}
+#endif
+
+
/********************************************************************
*
* List API
uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
/*,struct efhw_nic *nic */ );
+
+/* Fill [out_vi_data] with information required to allow a VI to be init'd.
+ * [out_vi_data] must ref at least VI_MAPPINGS_SIZE bytes.
+ */
+extern void efrm_vi_resource_mappings(struct vi_resource*, int nic_i,
+ void* out_vi_data);
+
+
#endif /* __CI_EFRM_VI_RESOURCE_H__ */
struct vi_resource *evq_virs;
};
-#ifdef __ci_ul_driver__
-#define EFRM_VI_USE_WORKQUEUE 0
-#else
#define EFRM_VI_USE_WORKQUEUE 1
-#endif
/*! Global information for the VI resource manager. */
struct vi_resource_manager {
struct vi_resource_nic_info {
struct eventq_resource_hardware evq_pages;
#if defined(__CI_HARDWARE_CONFIG_FALCON__)
- efhw_iopages_t dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT];
+ struct efhw_iopages dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT];
#endif
};
hw->iobuff_off);
}
-/*! Wakeup handler, see efhw_ev_handler_t for prototype */
-extern void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev);
+/*! Wakeup handler */
+extern void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned id);
-/*! Timeout handler, see efhw_ev_handler_t for prototype */
-extern void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev);
+/*! Timeout handler */
+extern void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned id);
-/*! DMA flush handler, see efhw_ev_handler_t for prototype */
-extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, int instance,
+/*! DMA flush handler */
+extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, unsigned id,
int rx_flush);
+/*! SRAM update handler */
+extern void efrm_handle_sram_event(struct efhw_nic *nic);
+
#endif /* __CI_EFRM_VI_RESOURCE_PRIVATE_H__ */
*/
#define efrm_driver_lock(irqlock_state) \
- spin_lock_irqsave(&efrm_nic_table.lock, irqlock_state)
+ spin_lock_irqsave(&efrm_nic_tablep->lock, irqlock_state)
#define efrm_driver_unlock(irqlock_state) \
- spin_unlock_irqrestore(&efrm_nic_table.lock, \
+ spin_unlock_irqrestore(&efrm_nic_tablep->lock, \
irqlock_state);
/* These routines are all methods on the architecturally singleton
*/
/*! Exported driver state */
-struct efrm_nic_table efrm_nic_table;
-EXPORT_SYMBOL(efrm_nic_table);
+static struct efrm_nic_table efrm_nic_table;
+struct efrm_nic_table *efrm_nic_tablep;
+EXPORT_SYMBOL(efrm_nic_tablep);
/* Internal table with resource managers.
* We'd like to not export it, but we are still using efrm_rm_table
int efrm_driver_ctor(void)
{
- memset(&efrm_nic_table, 0, sizeof(efrm_nic_table));
- memset(&efrm_rm_table, 0, sizeof(efrm_rm_table));
-
- spin_lock_init(&efrm_nic_table.lock);
+ efrm_nic_tablep = &efrm_nic_table;
+ spin_lock_init(&efrm_nic_tablep->lock);
EFRM_TRACE("%s: driver created", __FUNCTION__);
return 0;
{
EFRM_ASSERT(!efrm_nic_table_held());
- spin_lock_destroy(&efrm_nic_table.lock);
+ spin_lock_destroy(&efrm_nic_tablep->lock);
+ memset(&efrm_nic_table, 0, sizeof(efrm_nic_table));
+ memset(&efrm_rm_table, 0, sizeof(efrm_rm_table));
+
EFRM_TRACE("%s: driver deleted", __FUNCTION__);
return 0;
}
goto done;
}
- if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
+ if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) {
EFRM_WARN("%s: filled up NIC table size %d", __FUNCTION__,
EFHW_MAX_NR_DEVS);
rc = -E2BIG;
goto done;
}
- EFRM_ASSERT(efrm_nic_table.nic[nic_index] == NULL);
- efrm_nic_table.nic[nic_index] = nic;
+ EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == NULL);
+ efrm_nic_tablep->nic[nic_index] = nic;
nic->index = nic_index;
- if (efrm_nic_table.a_nic == NULL)
- efrm_nic_table.a_nic = nic;
+ if (efrm_nic_tablep->a_nic == NULL)
+ efrm_nic_tablep->a_nic = nic;
- efrm_nic_table.nic_count++;
+ efrm_nic_tablep->nic_count++;
efrm_driver_unlock(lock_flags);
return rc;
goto done;
}
- EFRM_ASSERT(efrm_nic_table.nic[nic_index] == nic);
+ EFRM_ASSERT(efrm_nic_tablep->nic[nic_index] == nic);
nic->index = -1;
- efrm_nic_table.nic[nic_index] = NULL;
+ efrm_nic_tablep->nic[nic_index] = NULL;
- --efrm_nic_table.nic_count;
+ --efrm_nic_tablep->nic_count;
- if (efrm_nic_table.a_nic == nic) {
- if (efrm_nic_table.nic_count == 0) {
- efrm_nic_table.a_nic = NULL;
+ if (efrm_nic_tablep->a_nic == nic) {
+ if (efrm_nic_tablep->nic_count == 0) {
+ efrm_nic_tablep->a_nic = NULL;
} else {
for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS;
nic_index++) {
- if (efrm_nic_table.nic[nic_index] != NULL)
- efrm_nic_table.a_nic =
- efrm_nic_table.nic[nic_index];
+ if (efrm_nic_tablep->nic[nic_index] != NULL)
+ efrm_nic_tablep->a_nic =
+ efrm_nic_tablep->nic[nic_index];
}
- EFRM_ASSERT(efrm_nic_table.a_nic);
+ EFRM_ASSERT(efrm_nic_tablep->a_nic);
}
}
const struct efx_dl_falcon_resources *res)
{
rd->evq_timer_min = res->evq_timer_min;
- rd->evq_timer_max = res->evq_timer_max;
+ rd->evq_timer_lim = res->evq_timer_lim;
rd->evq_int_min = res->evq_int_min;
- rd->evq_int_max = res->evq_int_max;
+ rd->evq_int_lim = res->evq_int_lim;
rd->rxq_min = res->rxq_min;
- rd->rxq_max = res->rxq_max;
+ rd->rxq_lim = res->rxq_lim;
rd->txq_min = res->txq_min;
- rd->txq_max = res->txq_max;
+ rd->txq_lim = res->txq_lim;
EFRM_TRACE
("Using evq_int(%d-%d) evq_timer(%d-%d) RXQ(%d-%d) TXQ(%d-%d)",
- res->evq_int_min, res->evq_int_max, res->evq_timer_min,
- res->evq_timer_max, res->rxq_min, res->rxq_max, res->txq_min,
- res->txq_max);
+ res->evq_int_min, res->evq_int_lim, res->evq_timer_min,
+ res->evq_timer_lim, res->rxq_min, res->rxq_lim, res->txq_min,
+ res->txq_lim);
}
-#if defined(EFX_NOT_UPSTREAM)
/* We have a module parameter that can tell us to only load the char driver
* for 1 NIC (if there are multiple NICs in the system), and if so which one.
* This tells us the PCI bus and slot of the NIC to load for, or -1 to just
MODULE_PARM_DESC(only_NIC,
"Initialise sfc_resource driver for one NIC only, "
"with specified PCI bus and slot");
-#endif
static int
efrm_dl_probe(struct efx_dl_device *efrm_dev,
struct pci_dev *dev;
struct efhw_nic *nic;
unsigned probe_flags = 0;
+ int non_irq_evq;
int rc;
efrm_dev->priv = NULL;
- efx_dl_for_each_device_info_matching(dev_info, EFX_DL_FALCON_RESOURCES,
- struct efx_dl_falcon_resources,
- hdr, res) {
- /* break out, leaving res pointing at the falcon resources */
- break;
- }
+ efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES,
+ struct efx_dl_falcon_resources,
+ hdr, res);
if (res == NULL) {
EFRM_ERR("%s: Unable to find falcon driverlink resources",
if (res->flags & EFX_DL_FALCON_USE_MSI)
probe_flags |= NIC_FLAG_TRY_MSI;
+#if defined(EFX_NOT_UPSTREAM)
+ if (only_NIC != -1 &&
+ (efrm_dev->pci_dev->bus->number !=
+ ((only_NIC >> 8) & 0xFFFF)
+ || PCI_SLOT(efrm_dev->pci_dev->devfn) !=
+ (only_NIC & 0xFF))) {
+ EFRM_NOTICE("Hiding char device %x:%x",
+ efrm_dev->pci_dev->bus->number,
+ PCI_SLOT(efrm_dev->pci_dev->devfn));
+ return -ENODEV;
+ }
+#endif
+
dev = efrm_dev->pci_dev;
if (res->flags & EFX_DL_FALCON_DUAL_FUNC) {
unsigned vendor = dev->vendor;
EFRM_ASSERT(dev->bus != NULL);
dev = NULL;
-#if defined(EFX_NOT_UPSTREAM)
- if (only_NIC != -1 &&
- (efrm_dev->pci_dev->bus->number !=
- ((only_NIC >> 8) & 0xFFFF)
- || PCI_SLOT(efrm_dev->pci_dev->devfn) !=
- (only_NIC & 0xFF))) {
- EFRM_NOTICE("Hiding char device %x:%x",
- efrm_dev->pci_dev->bus->number,
- PCI_SLOT(efrm_dev->pci_dev->devfn));
- return -ENODEV;
- }
-#endif
-
while ((dev = pci_get_device(vendor, FALCON_S_DEVID, dev))
!= NULL) {
EFRM_ASSERT(dev->bus != NULL);
init_vi_resource_dimensions(&res_dim, res);
+ EFRM_ASSERT(res_dim.evq_timer_lim > res_dim.evq_timer_min);
+ res_dim.evq_timer_lim--;
+ non_irq_evq = res_dim.evq_timer_lim;
+
rc = efrm_nic_add(dev, probe_flags, net_dev->dev_addr, &lnic,
res->biu_lock,
- res->buffer_table_min, res->buffer_table_max,
- &res_dim);
+ res->buffer_table_min, res->buffer_table_lim,
+ non_irq_evq, &res_dim);
if (rc != 0)
return rc;
/* TODO do we need to get_page() here ? */
dma_addr = pci_map_page
- (linux_efhw_nic(efrm_nic_table.nic[efx_state->nic_index])->
+ (linux_efhw_nic(efrm_nic_tablep->nic[efx_state->nic_index])->
pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE);
efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
for (i = 0; i < dm_state->n_pages; ++i)
pci_unmap_page(linux_efhw_nic
- (efrm_nic_table.nic[efx_state->nic_index])->pci_dev,
+ (efrm_nic_tablep->nic[efx_state->nic_index])->pci_dev,
dm_state->dma_addrs[i], PAGE_SIZE, PCI_DMA_TODEVICE);
kfree(dm_state->dma_addrs);
{
struct efx_vi_state *efx_state = vih;
int i, ni = efx_state->nic_index;
- struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_table.nic[ni]);
+ struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_tablep->nic[ni]);
unsigned long phys = lnic->ctr_ap_pci_addr;
struct efrm_resource *ep_res = &efx_state->vi_res->rs;
unsigned ep_mmap_bytes;
if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
return -EINVAL;
- mdata->version = 0;
-
- mdata->nic_arch = efrm_nic_table.nic[ni]->devtype.arch;
- mdata->nic_variant = efrm_nic_table.nic[ni]->devtype.variant;
- mdata->nic_revision = efrm_nic_table.nic[ni]->devtype.revision;
+ mdata->nic_arch = efrm_nic_tablep->nic[ni]->devtype.arch;
+ mdata->nic_variant = efrm_nic_tablep->nic[ni]->devtype.variant;
+ mdata->nic_revision = efrm_nic_tablep->nic[ni]->devtype.revision;
mdata->evq_order =
efx_state->vi_res->nic_info[ni].evq_pages.iobuff.order;
(unsigned long)efx_state->vi_res->nic_info[ni].
dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;
- /* NB EFX_VI_HW_RESOURCE_TXBELL not used on Falcon */
- /* NB EFX_VI_HW_RESOURCE_RXBELL not used on Falcon */
-
i++;
hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
/* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */
i++;
- switch (efrm_nic_table.nic[ni]->devtype.variant) {
+ switch (efrm_nic_tablep->nic[ni]->devtype.variant) {
case 'A':
hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
efhw_event_t *evp)
{
- int instance = (int)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
+ unsigned instance = (unsigned)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);
if (!h->dmaq_flushed_fn) {
efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
efhw_event_t *evp)
{
+ unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp);
+
if (!h->wakeup_fn) {
EFHW_WARN("%s: no handler registered", __FUNCTION__);
return;
}
- h->wakeup_fn(nic, evp);
+ h->wakeup_fn(nic, instance);
}
void
efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
efhw_event_t *evp)
{
+ unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp);
+
if (!h->timeout_fn) {
EFHW_WARN("%s: no handler registered", __FUNCTION__);
return;
}
- h->timeout_fn(nic, evp);
+ h->timeout_fn(nic, instance);
}
/**********************************************************************
*
*---------------------------------------------------------------------------*/
-/* on for debug builds */
-#ifndef NDEBUG
-# define FALCON_FULL_FILTER_CACHE 1 /* complete SW shadow of filter tbl */
-# define FALCON_VERIFY_FILTERS 0
-#else /* Also adds duplicate filter check */
-# define FALCON_FULL_FILTER_CACHE 1 /* keep this on for some security */
-# define FALCON_VERIFY_FILTERS 0
-#endif
+/* Keep a software copy of the filter table and check for duplicates. */
+#define FALCON_FULL_FILTER_CACHE 1
+
+/* Read filters back from the hardware to detect corruption. */
+#define FALCON_VERIFY_FILTERS 0
/* options */
#define RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL 8 /* default search limit */
*
*---------------------------------------------------------------------------*/
-#ifndef __KERNEL__
-#define _DEBUG_SYM_ extern
-#else
#define _DEBUG_SYM_ static inline
-#endif
/*----------------------------------------------------------------------------
*
EFHW_ASSERT(!rss_b0);
break;
case 'B':
+ case 'C':
v4 |= scat_b0 << __DW4(SCATTER_EN_1_B0_LBN);
v4 |= rss_b0 << __DW4(RSS_EN_1_B0_LBN);
break;
uint index, desc_type;
uint64_t val1, val2, val3;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* Q attributes */
int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_TX_HDIG_EN) != 0);
switch (nic->devtype.variant) {
case 'B':
+ case 'C':
__DW3CHCK(TX_NON_IP_DROP_DIS_B0_LBN,
TX_NON_IP_DROP_DIS_B0_WIDTH);
__DW3CHCK(TX_IP_CHKSM_DIS_B0_LBN, TX_IP_CHKSM_DIS_B0_WIDTH);
uint i, desc_type = 1;
uint64_t val1, val2, val3;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* Q attributes */
#if BUG5762_WORKAROUND
FALCON_LOCK_DECL;
uint64_t val1, val2, val3;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* initialise the TX descriptor queue pointer table */
FALCON_LOCK_DECL;
uint64_t val1, val2, val3;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* initialise the TX descriptor queue pointer table */
offset = falcon_dma_rx_q_offset(nic, dmaq);
{
/* programming the half table needs to be done in pairs. */
uint64_t entry, val, shift;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
- efhw_ioaddr_t offset;
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *offset;
EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32);
EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN ==
val &= ~(((uint64_t) 0xffffffff) << shift);
val |= (entry << shift);
- EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt ":%x:%" PRIx64 "->%x = %"
- PRIx64, __FUNCTION__, buffer_id, dma_addr, own_id, entry,
- (unsigned)(offset - efhw_kva), val);
+ EFHW_TRACE("%s[%x]: %lx:%x:%" PRIx64 "->%x = %"
+ PRIx64, __FUNCTION__, buffer_id, (unsigned long) dma_addr,
+ own_id, entry, (unsigned)(offset - efhw_kva), val);
/* Falcon requires that access to this register is serialised */
falcon_write_q(offset, val);
dma_addr_t dma_addr, uint bufsz,
uint region, int own_id, int buffer_id)
{
- efhw_ioaddr_t offset;
+ volatile char __iomem *offset;
uint64_t entry;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_ASSERT(region < FALCON_REGION_NUM);
entry = falcon_nic_buffer_table_entry64_mk(dma_addr, bufsz, region,
own_id);
- EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt
- ":bufsz=%x:region=%x:ownid=%x",
- __FUNCTION__, buffer_id, dma_addr, bufsz, region, own_id);
+ EFHW_TRACE("%s[%x]: %lx:bufsz=%x:region=%x:ownid=%x",
+ __FUNCTION__, buffer_id, (unsigned long) dma_addr, bufsz,
+ region, own_id);
EFHW_TRACE("%s: BUF[%x]:NIC[%x]->%" PRIx64,
__FUNCTION__, buffer_id,
static inline void _falcon_nic_buffer_table_commit(struct efhw_nic *nic)
{
/* MUST be called holding the FALCON_LOCK */
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint64_t cmd;
EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
uint64_t cmd;
uint64_t start_id = buffer_id;
uint64_t end_id = buffer_id + num - 1;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
- efhw_ioaddr_t offset = (efhw_kva + BUF_TBL_UPD_REG_OFST);
+ volatile char __iomem *offset = (efhw_kva + BUF_TBL_UPD_REG_OFST);
EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
* updates */
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_BUILD_ASSERT(SRM_UPD_EVQ_REG_OFST == SRM_UPD_EVQ_REG_KER_OFST);
- EFHW_ASSERT((evq == FALCON_EVQ_KERNEL0) || (evq == FALCON_EVQ_CHAR) ||
- (evq == FALCON_EVQ_NONIRQ));
-
__DWCHCK(SRM_UPD_EVQ_ID_LBN, SRM_UPD_EVQ_ID_WIDTH);
__RANGECHCK(evq, SRM_UPD_EVQ_ID_WIDTH);
FALCON_LOCK_DECL;
uint i, val;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* size must be one of the various options, otherwise we assert */
for (i = 0; i < N_EVENTQ_SIZES; i++) {
{
uint val;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
mmiowb();
}
-/*----------------------------------------------------------------------------
- *
- * Helper for evq mapping
- *
- * idx = 0 && char => hw eventq[4]
- * idx = 0 && net => hw eventq[0]
- * 0 < idx < 5 => hw eventq[idx] (5 is non-interrupting)
- *
- *
- *---------------------------------------------------------------------------*/
-
-int falcon_idx_to_evq(struct efhw_nic *nic, uint idx)
-{
- EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
- EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
- return (idx > 0) ? idx : FALCON_EVQ_CHAR;
-}
-
-static inline int falcon_evq_is_interrupting(struct efhw_nic *nic, uint idx)
-{
- EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
- EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
-
- /* only the first CHAR driver event queue is interrupting */
- return (idx == FALCON_EVQ_CHAR);
-}
+/*---------------------------------------------------------------------------*/
static inline void
falcon_drv_ev(struct efhw_nic *nic, uint64_t data, uint qid)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
/* send an event from one driver to the other */
EFHW_BUILD_ASSERT(DRV_EV_REG_KER_OFST == DRV_EV_REG_OFST);
FALCON_LOCK_DECL;
uint val;
ulong offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_BUILD_ASSERT(TIMER_VAL_LBN == 0);
Pacing only available on the virtual interfaces
*/
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
ulong offset;
if (pace > 20)
offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_A1) * 16;
break;
case 'B':
+ case 'C':
/* Would be nice to assert this, but as dmaq is unsigned and
* TX_PACE_TBL_FIRST_QUEUE_B0 is 0, it makes no sense
* EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_B0);
static void falcon_nic_handle_fatal_int(struct efhw_nic *nic)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *offset;
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint64_t val;
offset = (efhw_kva + FATAL_INTR_REG_OFST);
{
FALCON_LOCK_DECL;
uint val;
- efhw_ioaddr_t offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *offset;
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_WIDTH == 1);
static void falcon_nic_interrupt_hw_disable(struct efhw_nic *nic)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *offset;
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_WIDTH == 1);
EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == 0);
FALCON_LOCK_UNLOCK(nic);
}
-#ifndef __ci_ul_driver__
static void falcon_nic_irq_addr_set(struct efhw_nic *nic, dma_addr_t dma_addr)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t offset;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *offset;
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
offset = (efhw_kva + INT_ADR_REG_CHAR_OFST);
FALCON_LOCK_UNLOCK(nic);
}
-#endif
/*--------------------------------------------------------------------
falcon_nic_set_rx_usr_buf_size(struct efhw_nic *nic, int usr_buf_bytes)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint64_t val, val2, usr_buf_size = usr_buf_bytes / 32;
int rubs_lbn, rubs_width, roec_lbn;
roec_lbn = RX_OWNERR_CTL_A1_LBN;
break;
case 'B':
+ case 'C':
rubs_lbn = RX_USR_BUF_SIZE_B0_LBN;
rubs_width = RX_USR_BUF_SIZE_B0_WIDTH;
roec_lbn = RX_OWNERR_CTL_B0_LBN;
uint32_t *tcp_wild,
uint32_t *udp_full, uint32_t *udp_wild)
{
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
FALCON_LOCK_DECL;
uint64_t val;
uint32_t udp_full, uint32_t udp_wild)
{
uint64_t val, val2;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
FALCON_LOCK_DECL;
EFHW_ASSERT(tcp_full < nic->filter_tbl_size);
_DEBUG_SYM_ void falcon_nic_tx_cfg(struct efhw_nic *nic, int unlocked)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint64_t val1, val2;
EFHW_BUILD_ASSERT(TX_CFG_REG_OFST == TX_CFG_REG_KER_OFST);
static void falcon_nic_pace_cfg(struct efhw_nic *nic)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
unsigned offset = 0;
uint64_t val;
switch (nic->devtype.variant) {
case 'A': offset = TX_PACE_REG_A1_OFST; break;
case 'B': offset = TX_PACE_REG_B0_OFST; break;
+ case 'C': offset = TX_PACE_REG_B0_OFST; break;
default: EFHW_ASSERT(0); break;
}
falcon_write_qq(efhw_kva + offset, val, 0);
*
*---------------------------------------------------------------------------*/
-#ifdef __ci_ul_driver__
-
-static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
-{
- EFHW_ERR("%s: not implemented for ul driver", __FUNCTION__);
- return -EOPNOTSUPP;
-}
-
-#else
static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
{
falcon_nic_irq_addr_set(nic, 0);
}
- EFHW_TRACE("%s: " ci_dma_addr_fmt " %sable", __FUNCTION__,
- efhw_iopage_dma_addr(&nic->irq_iobuff), enable ?
- "en" : "dis");
+ EFHW_TRACE("%s: %lx %sable", __FUNCTION__,
+ (unsigned long) efhw_iopage_dma_addr(&nic->irq_iobuff),
+ enable ? "en" : "dis");
return 0;
}
-#endif
static void falcon_nic_close_hardware(struct efhw_nic *nic)
{
EFHW_NOTICE("%s:", __FUNCTION__);
}
-#ifdef __ci_ul_driver__
-extern
-#else
static
-#endif
int falcon_nic_get_mac_config(struct efhw_nic *nic)
{
- efhw_ioaddr_t efhw_kva = nic->bar_ioaddr;
+ volatile char __iomem *efhw_kva = nic->bar_ioaddr;
int is_mac_type_1g;
uint32_t strap, altera;
uint64_t rx_cfg, r;
speed = readl(efhw_kva + MAC0_CTRL_REG_OFST) & 0x3;
is_mac_type_1g = (speed <= 2);
}
+#endif
+ break;
+ case 'C':
+ /* Treat like B0 for now, but without the RX FIFO size check
+ * (don't need it, and RX_CFG_REG will likely change soon
+ * anyway).
+ */
+ is_mac_type_1g = (0 != (strap & 2));
+#if FALCON_MAC_SET_TYPE_BY_SPEED
+ /* Check the selected strap pins against the MAC speed -
+ * and adjust if necessary.
+ */
+ {
+ int speed;
+ speed = readl(efhw_kva + MAC0_CTRL_REG_OFST) & 0x3;
+ is_mac_type_1g = (speed <= 2);
+ }
#endif
break;
default:
static int
falcon_nic_init_hardware(struct efhw_nic *nic,
struct efhw_ev_handler *ev_handlers,
- const uint8_t *mac_addr)
+ const uint8_t *mac_addr, int non_irq_evq)
{
int rc;
IFDEF FALCON's can be removed from
nic.c:efhw_nic_allocate_common_hardware_resources()
*/
- nic->irq_unit[0] = INT_EN_REG_CHAR_OFST;
+ nic->irq_unit = INT_EN_REG_CHAR_OFST;
/*****************************************************************
* The rest of this function deals with initialization of the NICs
/* char driver grabs SRM events onto the non interrupting
* event queue */
- falcon_nic_srm_upd_evq(nic, FALCON_EVQ_NONIRQ);
+ falcon_nic_srm_upd_evq(nic, non_irq_evq);
/* RXDP tweaks */
RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD);
if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
- rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR, &nic->evq[0],
- ev_handlers);
+ rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR,
+ &nic->interrupting_evq, ev_handlers);
if (rc < 0) {
EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
__FUNCTION__, rc, FALCON_EVQ_CHAR);
return rc;
}
}
- rc = efhw_keventq_ctor(nic, FALCON_EVQ_NONIRQ,
- &nic->evq[FALCON_EVQ_NONIRQ], NULL);
+ rc = efhw_keventq_ctor(nic, non_irq_evq,
+ &nic->non_interrupting_evq, NULL);
if (rc < 0) {
EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
- __FUNCTION__, rc, FALCON_EVQ_NONIRQ);
+ __FUNCTION__, rc, non_irq_evq);
return rc;
}
*--------------------------------------------------------------------*/
static void
-falcon_nic_interrupt_enable(struct efhw_nic *nic, unsigned idx)
+falcon_nic_interrupt_enable(struct efhw_nic *nic)
{
- int evq;
+ struct efhw_keventq *q;
+ unsigned rdptr;
- if (idx || (nic->flags & NIC_FLAG_NO_INTERRUPT))
+ if (nic->flags & NIC_FLAG_NO_INTERRUPT)
return;
/* Enable driver interrupts */
falcon_nic_interrupt_hw_enable(nic);
/* An interrupting eventq must start of day ack its read pointer */
- evq = falcon_idx_to_evq(nic, idx);
-
- if (falcon_evq_is_interrupting(nic, evq)) {
- struct efhw_keventq *q = &nic->evq[idx];
- unsigned rdptr =
- EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t);
- falcon_nic_evq_ack(nic, evq, rdptr, false);
- EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__, evq, rdptr);
- }
+ q = &nic->interrupting_evq;
+ rdptr = EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t);
+ falcon_nic_evq_ack(nic, FALCON_EVQ_CHAR, rdptr, false);
+ EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__,
+ FALCON_EVQ_CHAR, rdptr);
}
-static void falcon_nic_interrupt_disable(struct efhw_nic *nic, uint idx)
+static void falcon_nic_interrupt_disable(struct efhw_nic *nic)
{
/* NB. No need to check for NIC_FLAG_NO_INTERRUPT, as
** falcon_nic_interrupt_hw_disable() will do it. */
- if (idx)
- return;
falcon_nic_interrupt_hw_disable(nic);
}
static void
-falcon_nic_set_interrupt_moderation(struct efhw_nic *nic, uint idx,
+falcon_nic_set_interrupt_moderation(struct efhw_nic *nic,
uint32_t val)
{
- falcon_timer_cmd(nic, falcon_idx_to_evq(nic, idx),
- TIMER_MODE_INT_HLDOFF, val / 5);
+ falcon_timer_cmd(nic, FALCON_EVQ_CHAR, TIMER_MODE_INT_HLDOFF,
+ val / 5);
}
static inline void legacy_irq_ack(struct efhw_nic *nic)
static int falcon_nic_interrupt(struct efhw_nic *nic)
{
- volatile uint32_t *syserr_ptr =
+ uint32_t *syserr_ptr =
(uint32_t *) efhw_iopage_ptr(&nic->irq_iobuff);
int handled = 0;
int done_ack = 0;
void
falcon_nic_ipfilter_ctor(struct efhw_nic *nic)
{
- if (nic->devtype.variant == 'B' && nic->fpga_version)
+ if (nic->devtype.variant >= 'B' && nic->fpga_version)
nic->filter_tbl_size = 8 * 1024;
else
nic->filter_tbl_size = 16 * 1024;
static inline void falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic)
{
-#if defined(__ci_ul_driver__)
- if (!(nic->options & NIC_OPT_EFTEST))
- return;
-#endif
/* Do nothing if operating in synchronous mode. */
if (!nic->irq_handler)
FALCON_LOCK_DECL;
int count = 0, rc = 0;
-#if defined(__ci_ul_driver__)
- if (!(nic->options & NIC_OPT_EFTEST))
- return;
-#endif
/* We can be called here early days */
if (!nic->irq_handler)
* upcalls into the core driver */
struct efhw_ev_handler handler;
memset(&handler, 0, sizeof(handler));
- nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = &handler;
- rc = efhw_keventq_poll(nic, &nic->evq[FALCON_EVQ_NONIRQ]);
- nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = NULL;
+ nic->non_interrupting_evq.ev_handlers = &handler;
+ rc = efhw_keventq_poll(nic, &nic->non_interrupting_evq);
+ nic->non_interrupting_evq.ev_handlers = NULL;
if (rc < 0) {
EFHW_ERR("%s: poll ERROR (%d:%d) ***** ",
an event or DMA queue */
FALCON_LOCK_DECL;
-#if defined(__ci_ul_driver__)
- if (!(nic->options & NIC_OPT_EFTEST))
- return;
-#endif
/* Do nothing if operating in synchronous mode. */
if (!nic->irq_handler)
FALCON_LOCK_DECL;
uint64_t val_low64, val_high64;
uint64_t size, hwptr, swptr, val;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
/* Falcon requires 128 bit atomic access for this register */
{
FALCON_LOCK_DECL;
uint64_t val_low64, val_high64;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
EFHW_WARN("Recovering stuck TXQ[%d]", dmaq);
__falcon_really_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint val;
EFHW_BUILD_ASSERT(TX_FLUSH_DESCQ_REG_KER_OFST ==
FALCON_LOCK_DECL;
uint64_t val_low64, val_high64;
uint64_t enable, flush_pending;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
/* Falcon requires 128 bit atomic access for this register */
__falcon_really_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq)
{
FALCON_LOCK_DECL;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
uint val;
EFHW_BUILD_ASSERT(RX_FLUSH_DESCQ_REG_KER_OFST ==
{
FALCON_LOCK_DECL;
uint64_t val;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
ulong offset = falcon_dma_rx_q_offset(nic, dmaq);
/* Falcon requires 128 bit atomic access for this register */
case TX_DESCQ_FLS_DONE_EV_DECODE:
EFHW_TRACE("TX[%d] flushed",
(int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
-#if !defined(__ci_ul_driver__)
efhw_handle_txdmaq_flushed(nic, h, ev);
-#endif
break;
case RX_DESCQ_FLS_DONE_EV_DECODE:
EFHW_TRACE("RX[%d] flushed",
(int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
-#if !defined(__ci_ul_driver__)
efhw_handle_rxdmaq_flushed(nic, h, ev);
-#endif
break;
case SRM_UPD_DONE_EV_DECODE:
break;
case EVQ_INIT_DONE_EV_DECODE:
- EFHW_TRACE("EVQ INIT");
+ EFHW_TRACE("%sEVQ INIT", "");
break;
case WAKE_UP_EV_DECODE:
- EFHW_TRACE("WAKE UP");
+ EFHW_TRACE("%sWAKE UP", "");
efhw_handle_wakeup_event(nic, h, ev);
break;
case TIMER_EV_DECODE:
- EFHW_TRACE("TIMER");
+ EFHW_TRACE("%sTIMER", "");
efhw_handle_timeout_event(nic, h, ev);
break;
/*! Get MAC current address - i.e not necessarily the one in the EEPROM */
static inline void mentormac_get_mac_addr(struct efhw_nic *nic)
{
- efhw_ioaddr_t mac_kva;
+ volatile char __iomem *mac_kva;
uint val1, val2;
MENTOR_MAC_ASSERT_VALID();
static inline void GDACT10mac_get_mac_addr(struct efhw_nic *nic)
{
uint val1, val2;
- efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
+ volatile char __iomem *efhw_kva = EFHW_KVA(nic);
FALCON_LOCK_DECL;
GDACT10_MAC_ASSERT_VALID();
EFRM_ASSERT(frs);
EFRM_ASSERT(frs->pt);
- if (efrm_nic_table.a_nic->devtype.variant >= 'B') {
+ if (efrm_nic_tablep->a_nic->devtype.variant >= 'B') {
/* Scatter setting must match the setting for
* the corresponding RX queue */
if (!(frs->pt->flags & EFHW_VI_JUMBO_EN))
static inline size_t iobsrs_size(int no_pages)
{
return offsetof(struct iobufset_resource, bufs) +
- no_pages * sizeof(efhw_iopage_t);
+ no_pages * sizeof(struct efhw_iopage);
}
void efrm_iobufset_resource_free(struct iobufset_resource *rs)
efrm_buffer_table_free(&rs->buf_tbl_alloc);
/* see comment on call to efhw_iopage_alloc in the alloc routine above
- for discussion on use of efrm_nic_table.a_nic here */
- EFRM_ASSERT(efrm_nic_table.a_nic);
+ for discussion on use of efrm_nic_tablep->a_nic here */
+ EFRM_ASSERT(efrm_nic_tablep->a_nic);
if (rs->order == 0) {
for (i = 0; i < rs->n_bufs; ++i)
- efhw_iopage_free(efrm_nic_table.a_nic, &rs->bufs[i]);
+ efhw_iopage_free(efrm_nic_tablep->a_nic, &rs->bufs[i]);
} else {
/* it is important that this is executed in increasing page
* order because some implementations of
* efhw_iopages_init_from_iopage() assume this */
for (i = 0; i < rs->n_bufs;
i += rs->pages_per_contiguous_chunk) {
- efhw_iopages_t iopages;
+ struct efhw_iopages iopages;
efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i],
rs->order);
- efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
+ efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
}
}
EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0);
EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) ==
EFRM_RESOURCE_VI);
- EFRM_ASSERT(efrm_nic_table.a_nic);
+ EFRM_ASSERT(efrm_nic_tablep->a_nic);
/* allocate the resource data structure. */
object_size = iobsrs_size(n_pages);
/* due to bug2426 we have to specifiy a NIC when
* allocating a DMAable page, which is a bit messy.
* For now we assume that if the page is suitable
- * (e.g. DMAable) by one nic (efrm_nic_table.a_nic),
+ * (e.g. DMAable) by one nic (efrm_nic_tablep->a_nic),
* it is suitable for all NICs.
* XXX I bet that breaks in Solaris.
*/
- rc = efhw_iopage_alloc(efrm_nic_table.a_nic,
+ rc = efhw_iopage_alloc(efrm_nic_tablep->a_nic,
&iobrs->bufs[i]);
if (rc < 0) {
EFRM_ERR("%s: failed (rc %d) to allocate "
}
}
} else {
- efhw_iopages_t iopages;
+ struct efhw_iopages iopages;
unsigned j;
/* make sure iobufs are in a known state in case we don't
for (i = 0; i < iobrs->n_bufs;
i += iobrs->pages_per_contiguous_chunk) {
- rc = efhw_iopages_alloc(efrm_nic_table.a_nic,
+ rc = efhw_iopages_alloc(efrm_nic_tablep->a_nic,
&iopages, iobrs->order);
if (rc < 0) {
EFRM_ERR("%s: failed (rc %d) to allocate "
i = iobrs->n_bufs;
fail4:
/* see comment on call to efhw_iopage_alloc above for a discussion
- * on use of efrm_nic_table.a_nic here */
+ * on use of efrm_nic_tablep->a_nic here */
if (iobrs->order == 0) {
while (i--) {
- efhw_iopage_t *page = &iobrs->bufs[i];
- efhw_iopage_free(efrm_nic_table.a_nic, page);
+ struct efhw_iopage *page = &iobrs->bufs[i];
+ efhw_iopage_free(efrm_nic_tablep->a_nic, page);
}
} else {
unsigned int j;
for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) {
- efhw_iopages_t iopages;
+ struct efhw_iopages iopages;
EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk
== 0);
efhw_iopages_init_from_iopage(&iopages,
&iobrs->bufs[j],
iobrs->order);
- efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
+ efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
}
}
efrm_vi_resource_release(iobrs->evq);
#include "kernel_compat.h"
#include <ci/efhw/common_sysdep.h> /* for dma_addr_t */
-int efhw_iopage_alloc(struct efhw_nic *nic, efhw_iopage_t *p)
+int efhw_iopage_alloc(struct efhw_nic *nic, struct efhw_iopage *p)
{
struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
dma_addr_t handle;
return 0;
}
-void efhw_iopage_free(struct efhw_nic *nic, efhw_iopage_t *p)
+void efhw_iopage_free(struct efhw_nic *nic, struct efhw_iopage *p)
{
struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
EFHW_ASSERT(efhw_page_is_valid(&p->p));
efhw_iopage_ptr(p), p->dma_addr);
}
-int efhw_iopages_alloc(struct efhw_nic *nic, efhw_iopages_t *p, unsigned order)
+int
+efhw_iopages_alloc(struct efhw_nic *nic, struct efhw_iopages *p,
+ unsigned order)
{
unsigned bytes = 1u << (order + PAGE_SHIFT);
struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
return 0;
}
-void efhw_iopages_free(struct efhw_nic *nic, efhw_iopages_t *p)
+void efhw_iopages_free(struct efhw_nic *nic, struct efhw_iopages *p)
{
unsigned bytes = 1u << (p->order + PAGE_SHIFT);
struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
-/* I admit that it's a bit ugly going straight to the field, but it
- * seems easiest given that get_page followed by put_page on a page
- * with PG_reserved set will increment the ref count on 2.6.14 and
- * below, but not 2.6.15. Also, RedHat have hidden put_page_testzero
- * in a header file which produces warnings when compiled. This
- * doesn't agree with our use of -Werror.
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5)
-# define page_count_field(pg) ((pg)->count)
-#else
-# define page_count_field(pg) ((pg)->_count)
-#endif
-
-#define inc_page_count(page) atomic_inc(&page_count_field(page))
-#define dec_page_count(page) atomic_dec(&page_count_field(page))
-
/* Bug 5531: set_page_count doesn't work if the new page count is an
* expression. */
#define ci_set_page_count(page, n) set_page_count(page, (n))
* Hon - The PG_compound bit is honoured by munmap.
*
* OS A B C D
- * 2.4.18 NotDef NU resv NotHon
- * 2.4.29 NotDef NU resv NotHon
- * 2.4.20-31.9 rhl9 NotDef NU resv NotHon
- *
- * 2.4.21-4.EL rhel3 Comp NU resv Hon
- * 2.4.21-15.EL rhel3 Comp NU resv Hon
- * 2.4.21-32.EL rhel3 Comp NU resv Hon
- * 2.4.21-40.EL rhel3 Comp NU resv Hon
- *
* 2.6.0 Comp NU resv NotHon
*
* 2.6.5-7.97 sles9 OptInv NU resv NotHon
* to one on all the sub-pages. The SLES 9 range are affected, as
* are kernels built without CONFIG_MMU defined.
*
- * Possible strategies for multi-page allocations:
+ * On all kernel versions, we just allocate a compound page.
+ * Reference counting should then work on the whole allocation but
+ * is broken by bug/feature D (above) on old kernels.
*
- * EFRM_MMAP_USE_COMPOUND
- * 1. Allocate a compound page. Reference counting should then work
- * on the whole allocation. This is a good theory, but is broken
- * by bug/feature D (above).
- *
- * EFRM_MMAP_USE_SPLIT
- * 2. Convert the multi-page allocation to many single page
- * allocations. This involves incrementing the reference counts
- * and clearing PG_compound on all the pages (including the
- * first). The references should be released _after_ calling
- * pci_free_consistent so that that call doesn't release the
- * memory.
- *
- * EFRM_MMAP_USE_INCREMENT
- * 3. Increment the reference count on all the pages after
- * allocating and decrement them again before freeing. This gets
- * round the zero reference count problem. It doesn't handle the
- * case where someone else is holding a reference to one of our
- * pages when we free the pages, but we think VM_IO stops this
- * from happening.
+ * EFRM_MMAP_USE_SPLIT
+
+ * On old kernels, we convert the multi-page allocation to many
+ * single page allocations. This involves incrementing the
+ * reference counts and clearing PG_compound on all the pages
+ * (including the first). Given that the affected kernels are
+ * inconsistent about the initial reference counts on high order
+ * page allocations, we reinitialise the reference counts instead
+ * of incrementing them. The references are released _after_
+ * calling pci_free_consistent so that that call doesn't release
+ * the memory.
*/
-/* Should we use strategy 1? This can be forced on us by the OS. */
-#if defined(PG_compound)
-#define EFRM_MMAP_USE_COMPOUND 1
-#else
-#define EFRM_MMAP_USE_COMPOUND 0
-#endif
-
-/* Should we use strategy 2? This can be used even if strategy 1 is
- * used. */
+/* Should we split each multi-page allocation into single page
+ * allocations? */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
#define EFRM_MMAP_USE_SPLIT 1
#else
#define EFRM_MMAP_USE_SPLIT 0
#endif
-/* Should we use strategy 3? There's no point doing this if either
- * strategy 1 or strategy 2 is used. */
-#if !EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
-#error "We shouldn't have to use this strategy."
-#define EFRM_MMAP_USE_INCREMENT 1
-#else
-#define EFRM_MMAP_USE_INCREMENT 0
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-#define EFRM_MMAP_RESET_REFCNT 1
-#else
-#define EFRM_MMAP_RESET_REFCNT 0
-#endif
-
/* NB. 2.6.17 has renamed SetPageCompound to __SetPageCompound and
* ClearPageCompound to __ClearPageCompound. */
-#if ((defined(PageCompound) != defined(PG_compound)) || \
- (defined(SetPageCompound) != defined(PG_compound) && \
- defined(__SetPageCompound) != defined(PG_compound)) || \
- (defined(ClearPageCompound) != defined(PG_compound) && \
- defined(__ClearPageCompound) != defined(PG_compound)) || \
- (defined(__GFP_COMP) && !defined(PG_compound)))
+#if ( ( !defined(PageCompound) ) || \
+ ( !defined(PG_compound) ) || \
+ ( !defined(SetPageCompound) && !defined(__SetPageCompound) ) || \
+ ( !defined(ClearPageCompound) && !defined(__ClearPageCompound) ) )
#error Mismatch of defined page-flags.
#endif
-extern int use_pci_alloc; /* Use pci_alloc_consistent to alloc iopages */
-
/****************************************************************************
*
* allocate a buffer suitable for DMA to/from the NIC
{
unsigned pfn = __pa(kva) >> PAGE_SHIFT;
struct page *start_pg = pfn_to_page(pfn);
-#if !defined(NDEBUG) || EFRM_MMAP_USE_SPLIT
+#if EFRM_MMAP_USE_SPLIT
struct page *end_pg = start_pg + (1 << order);
struct page *pg;
#endif
/* Check the page count and PG_compound bit. */
#ifndef NDEBUG
-# if defined(PG_compound)
- EFRM_ASSERT(PageCompound(start_pg) == EFRM_MMAP_USE_COMPOUND);
-# endif
+ EFRM_ASSERT(PageCompound(start_pg) == 1);
EFRM_ASSERT(page_count(start_pg) == 1);
-
- {
- /* Some kernels have the page count field hold (ref_count-1)
- * rather than (ref_count). This is so that decrementing the
- * reference count to "zero" causes the internal value to change
- * from 0 to -1 which sets the carry flag. Other kernels store
- * the real reference count value in the obvious way. We handle
- * this here by reading the reference count field of the first
- * page, which is always 1. */
- int pg_count_zero;
- pg_count_zero = atomic_read(&page_count_field(start_pg)) - 1;
- for (pg = start_pg + 1; pg < end_pg; pg++) {
- int pg_count;
-# if defined(PG_compound)
- EFRM_ASSERT(PageCompound(pg) == EFRM_MMAP_USE_COMPOUND);
-# endif
-
- /* Bug 5450: Some kernels initialise the page count
- * to one for pages other than the first and some
- * leave it at zero. We allow either behaviour
- * here, but disallow anything strange. Newer
- * kernels only define set_page_count in an
- * internal header file, so we have to make do with
- * incrementing and decrementing the reference
- * count. Fortunately, those kernels don't set the
- * reference count to one on all the pages. */
- pg_count = atomic_read(&page_count_field(pg));
-# if EFRM_MMAP_RESET_REFCNT
- if (pg_count != pg_count_zero)
- EFRM_ASSERT(pg_count == pg_count_zero + 1);
-# else
- EFRM_ASSERT(pg_count == pg_count_zero);
-# endif
- }
- }
#endif
/* Split the multi-page allocation if necessary. */
for (pg = start_pg; pg < end_pg; pg++) {
/* This is no longer a compound page. */
-# if EFRM_MMAP_USE_COMPOUND
ClearPageCompound(pg);
EFRM_ASSERT(PageCompound(pg) == 0);
-# endif
# ifndef NDEBUG
{
int pg_count = page_count(pg);
/* Bug 5450: The page count can be zero or one here. */
- if (pg == start_pg) {
+ if (pg == start_pg)
+ EFRM_ASSERT(pg_count == 1);
+ else if (pg_count != 0)
EFRM_ASSERT(pg_count == 1);
- } else {
-# if EFRM_MMAP_RESET_REFCNT
- if (pg_count != 0)
- EFRM_ASSERT(pg_count == 1);
-# else
- EFRM_ASSERT(pg_count == 0);
-# endif
- }
}
# endif
- /* Get a reference which will be released after the pages have
- * been passed back to pci_free_consistent. */
-# if EFRM_MMAP_RESET_REFCNT
- /* Bug 5450: Reset the reference count since the count might
- * already be 1. */
+ /* Get a reference which will be released after the
+ * pages have been passed back to pci_free_consistent.
+ * Reset the page count instead of incrementing it
+ * because old kernels are inconsistent about
+ * initialising the reference count. */
ci_set_page_count(pg, (pg == start_pg) ? 2 : 1);
-# else
- get_page(pg);
-# endif
}
#endif
-
- /* Fudge the reference count if necessary. */
-#if EFRM_MMAP_USE_INCREMENT
- for (pg = start_pg; pg < end_pg; pg++)
- inc_page_count(pg);
-#endif
}
static inline void pci_mmap_pages_hack_before_free(caddr_t kva, unsigned order)
{
-#if EFRM_MMAP_USE_INCREMENT || !defined(NDEBUG)
+#if !defined(NDEBUG)
/* Drop the references taken in pci_mmap_pages_hack_after_alloc */
unsigned pfn = __pa(kva) >> PAGE_SHIFT;
struct page *start_pg = pfn_to_page(pfn);
if (PageReserved(start_pg))
return;
-# if EFRM_MMAP_USE_INCREMENT
- for (pg = start_pg; pg < end_pg; pg++)
- dec_page_count(pg);
-# endif
-
-#if !defined(NDEBUG)
EFRM_ASSERT(page_count(start_pg) == 1+EFRM_MMAP_USE_SPLIT);
-# if EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
+# if !EFRM_MMAP_USE_SPLIT
for (pg = start_pg; pg < end_pg; pg++)
EFRM_ASSERT(PageCompound(pg));
# else
EFRM_ASSERT(page_count(pg) == exp_pg_count);
}
# endif
-#endif
#endif
}
void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, int flag)
{
- struct pci_dev *pci_dev;
void *ptr;
unsigned order;
- EFRM_IOMMU_DECL;
order = __ffs(size/PAGE_SIZE);
EFRM_ASSERT(size == (PAGE_SIZE<<order));
+ /* Can't take a spinlock here since the allocation can
+ * block. */
+ ptr = dma_alloc_coherent(dev, size, dma_addr, flag);
+ if (ptr == NULL)
+ return ptr;
+
/* NB. The caller may well set __GFP_COMP. However we can't
* rely on this working on older kernels. 2.6.9 only acts on
* __GFP_COMP if CONFIG_HUGETLB_PAGE is defined. If the flag
* did have an effect then PG_compound will be set on the
* pages. */
-
- if (use_pci_alloc) {
- /* Can't take a spinlock here since the allocation can
- * block. */
- ptr = dma_alloc_coherent(dev, size, dma_addr, flag);
- if (ptr == NULL)
- return ptr;
- } else {
-#ifdef CONFIG_SWIOTLB /* BUG1340 */
- if (swiotlb) {
- EFRM_ERR("%s: This kernel is using DMA bounce "
- "buffers. Please upgrade kernel to "
- "linux2.6 or reduce the amount of RAM "
- "with mem=XXX.", __FUNCTION__);
- return NULL;
- }
-#endif
- ptr = (void *)__get_free_pages(flag, order);
-
- if (ptr == NULL)
- return NULL;
-
- EFRM_IOMMU_LOCK();
- pci_dev = container_of(dev, struct pci_dev, dev);
- *dma_addr = pci_map_single(pci_dev, ptr, size,
- PCI_DMA_BIDIRECTIONAL);
- EFRM_IOMMU_UNLOCK();
- if (pci_dma_mapping_error(*dma_addr)) {
- free_pages((unsigned long)ptr, order);
- return NULL;
- }
- }
-
#ifndef CONFIG_IA64
pci_mmap_pages_hack_after_alloc(ptr, order);
#endif
void efrm_dma_free_coherent(struct device *dev, size_t size,
void *ptr, dma_addr_t dma_addr)
{
- struct pci_dev *pci_dev;
unsigned order;
- EFRM_IOMMU_DECL;
order = __ffs(size/PAGE_SIZE);
EFRM_ASSERT(size == (PAGE_SIZE<<order));
#ifndef CONFIG_IA64
pci_mmap_pages_hack_before_free(ptr, order);
#endif
- if (use_pci_alloc) {
- EFRM_IOMMU_LOCK();
- dma_free_coherent(dev, size, ptr, dma_addr);
- EFRM_IOMMU_UNLOCK();
- } else {
- pci_dev = container_of(dev, struct pci_dev, dev);
- EFRM_IOMMU_LOCK();
- efrm_pci_unmap_single(pci_dev, dma_addr, size,
- PCI_DMA_BIDIRECTIONAL);
- EFRM_IOMMU_UNLOCK();
-
- free_pages((unsigned long)ptr, order);
- }
+ dma_free_coherent(dev, size, ptr, dma_addr);
#ifndef CONFIG_IA64
pci_mmap_pages_hack_after_free(ptr, order);
#define DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H
#include <linux/version.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/pci.h>
/********* wait_for_completion_timeout() ********************/
-#include <linux/sched.h>
/* RHEL_RELEASE_CODE from linux/version.h is only defined for 2.6.9-55EL
* UTS_RELEASE is unfortunately unusable
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)) && \
!defined(RHEL_RELEASE_CODE)
-static inline unsigned long fastcall __sched
+static inline unsigned long fastcall
efrm_wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
might_sleep();
#endif
-/********* pci_map_*() ********************/
+/********* io mapping ********************/
-#include <linux/pci.h>
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,9)
-/* Bug 4560: Some kernels leak IOMMU entries under heavy load. Use a
- * spinlock to serialise access where possible to alleviate the
- * problem.
- *
- * NB. This is duplicated in the net driver. Please keep in sync. */
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) && \
- defined(__x86_64__) && defined(CONFIG_SMP))
-
-#define EFRM_HAVE_IOMMU_LOCK 1
+ #ifndef __iomem
+ #define __iomem
+ #endif
-#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)) && \
- defined(CONFIG_SUSE_KERNEL))
-#define EFRM_NEED_ALTERNATE_MAX_PFN 1
-#endif
+ static inline void efrm_iounmap(volatile void __iomem *addr)
+ {
+ iounmap((void __iomem *)addr);
+ }
+ #define iounmap(arg) efrm_iounmap(arg)
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
-#if defined(CONFIG_GART_IOMMU)
-#define EFRM_NO_IOMMU no_iommu
-#else
-#define EFRM_NO_IOMMU 1
-#endif
-#else
-#define EFRM_NO_IOMMU 0
#endif
-/* Set to 0 if we should never use the lock. Set to 1 if we should
- * automatically determine if we should use the lock. Set to 2 if we
- * should always use the lock. */
-extern unsigned int efx_use_iommu_lock;
-/* Defined in the net driver. */
-extern spinlock_t efx_iommu_lock;
-/* Non-zero if there is a card which needs the lock. */
-extern int efrm_need_iommu_lock;
-
-/* The IRQ state is needed if the lock is being used. The flag is
- * cached to ensure that every lock is followed by an unlock, even
- * if the global flag changes in the middle of the operation. */
-
-#define EFRM_IOMMU_DECL \
- unsigned long efx_iommu_irq_state = 0; \
- int efx_iommu_using_lock;
-#define EFRM_IOMMU_LOCK() \
- do { \
- efx_iommu_using_lock = (efx_use_iommu_lock && \
- (efrm_need_iommu_lock || \
- efx_use_iommu_lock >= 2)); \
- if (efx_iommu_using_lock) \
- spin_lock_irqsave(&efx_iommu_lock, efx_iommu_irq_state);\
- } while (0)
-#define EFRM_IOMMU_UNLOCK() \
- do { \
- if (efx_iommu_using_lock) \
- spin_unlock_irqrestore(&efx_iommu_lock, \
- efx_iommu_irq_state); \
- } while (0)
-
-#else /* defined(__x86_64__) && defined(CONFIG_SMP) */
-
-#define EFRM_HAVE_IOMMU_LOCK 0
-#define EFRM_IOMMU_DECL
-#define EFRM_IOMMU_LOCK() do {} while (0)
-#define EFRM_IOMMU_UNLOCK() do {} while (0)
-
-#endif
-
-static inline dma_addr_t efrm_pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- dma_addr_t dma_addr;
- EFRM_IOMMU_DECL;
-
- EFRM_IOMMU_LOCK();
- dma_addr = pci_map_single(hwdev, ptr, size, direction);
- EFRM_IOMMU_UNLOCK();
-
- return dma_addr;
-}
-
-static inline void efrm_pci_unmap_single(struct pci_dev *hwdev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
- EFRM_IOMMU_DECL;
-
- EFRM_IOMMU_LOCK();
- pci_unmap_single(hwdev, dma_addr, size, direction);
- EFRM_IOMMU_UNLOCK();
-}
-
-static inline dma_addr_t efrm_pci_map_page(struct pci_dev *hwdev,
- struct page *page,
- unsigned long offset, size_t size,
- int direction)
-{
- dma_addr_t dma_addr;
- EFRM_IOMMU_DECL;
- EFRM_IOMMU_LOCK();
- dma_addr = pci_map_page(hwdev, page, offset, size, direction);
- EFRM_IOMMU_UNLOCK();
-
- return dma_addr;
-}
-
-static inline void efrm_pci_unmap_page(struct pci_dev *hwdev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
- EFRM_IOMMU_DECL;
-
- EFRM_IOMMU_LOCK();
- pci_unmap_page(hwdev, dma_addr, size, direction);
- EFRM_IOMMU_UNLOCK();
-}
+/********* Memory allocation *************/
#ifndef IN_KERNEL_COMPAT_C
# ifndef __GFP_COMP
# endif
#endif
+
+/********* pci_map_*() ********************/
+
extern void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, int flag);
efrm_dma_free_coherent(&hwdev->dev, size, ptr, dma_addr);
}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8))
+static inline void efrm_pci_disable_msi(struct pci_dev *dev) {}
+#undef pci_disable_msi
+#define pci_disable_msi efrm_pci_disable_msi
+#endif
+
#endif /* DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H */
extern int
efrm_nic_add(struct pci_dev *dev, unsigned int opts, const uint8_t *mac_addr,
struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
- int bt_min, int bt_max, const struct vi_resource_dimensions *);
+ int bt_min, int bt_max, int non_irq_evq,
+ const struct vi_resource_dimensions *);
extern void efrm_nic_del(struct linux_efhw_nic *);
return 0;
}
break;
+ case 0x7777:
+ dt->arch = EFHW_ARCH_FALCON;
+ dt->variant = 'C';
+ switch (class_revision) {
+ case 0:
+ dt->revision = 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
default:
return 0;
}
void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options,
struct efhw_device_type dev_type)
{
- int i;
-
nic->devtype = dev_type;
nic->flags = flags;
nic->options = options;
nic->reg_lock = &nic->the_reg_lock;
nic->mtu = 1500 + ETH_HLEN;
- for (i = 0; i < EFHW_KEVENTQ_MAX; i++)
- nic->irq_unit[i] = EFHW_IRQ_UNIT_UNUSED;
+ nic->irq_unit = EFHW_IRQ_UNIT_UNUSED;
switch (nic->devtype.arch) {
case EFHW_ARCH_FALCON:
nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR;
break;
case 'B':
+ case 'C':
nic->flags |= NIC_FLAG_NO_INTERRUPT;
nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR;
break;
void efhw_nic_close_interrupts(struct efhw_nic *nic)
{
- int i;
-
EFHW_ASSERT(nic);
if (!efhw_nic_have_hw(nic))
return;
EFHW_ASSERT(efhw_nic_have_hw(nic));
- for (i = 0; i < EFHW_KEVENTQ_MAX; i++) {
- if (nic->irq_unit[i] != EFHW_IRQ_UNIT_UNUSED)
- efhw_nic_interrupt_disable(nic, i);
- }
+ if (nic->irq_unit != EFHW_IRQ_UNIT_UNUSED)
+ efhw_nic_interrupt_disable(nic);
}
void efhw_nic_dtor(struct efhw_nic *nic)
/* Check that we have functional units because the software only
* driver doesn't initialise anything hardware related any more */
-#ifndef __ci_ul_driver__
/* close interrupts is called first because the act of deregistering
the driver could cause this driver to change from master to slave
and hence the implicit interrupt mappings would be wrong */
efhw_nic_close_hardware(nic);
}
EFHW_TRACE("%s: functional units ... done", __FUNCTION__);
-#endif
/* destroy event queues */
EFHW_TRACE("%s: event queues ... ", __FUNCTION__);
-#ifndef __ci_ul_driver__
- {
- int i;
- for (i = 0; i < EFHW_KEVENTQ_MAX; ++i)
- if (nic->evq[i].evq_mask)
- efhw_keventq_dtor(nic, &nic->evq[i]);
- }
-#endif
+ if (nic->interrupting_evq.evq_mask)
+ efhw_keventq_dtor(nic, &nic->interrupting_evq);
+ if (nic->non_interrupting_evq.evq_mask)
+ efhw_keventq_dtor(nic, &nic->non_interrupting_evq);
EFHW_TRACE("%s: event queues ... done", __FUNCTION__);
#include <ci/efrm/vi_resource_private.h>
#include <ci/efrm/driver_private.h>
-#if EFRM_HAVE_IOMMU_LOCK
-#ifdef EFRM_NEED_ALTERNATE_MAX_PFN
-extern unsigned long blk_max_pfn;
-#define max_pfn blk_max_pfn
-#else
-#include <linux/bootmem.h>
-#endif
-#endif
-
MODULE_AUTHOR("Solarflare Communications");
MODULE_LICENSE("GPL");
.dmaq_flushed_fn = efrm_handle_dmaq_flushed,
};
-#if EFRM_HAVE_IOMMU_LOCK
-int efrm_need_iommu_lock;
-EXPORT_SYMBOL(efrm_need_iommu_lock);
-#endif
-
const int max_hardware_init_repeats = 10;
/*--------------------------------------------------------------------
*
*--------------------------------------------------------------------*/
/* See docs/notes/pci_alloc_consistent */
-int use_pci_alloc = 1; /* Use pci_alloc_consistent to alloc iopages */
static int do_irq = 1; /* enable interrupts */
#if defined(CONFIG_X86_XEN)
MODULE_PARM_DESC(irq_moderation, "IRQ moderation in usec");
module_param(nic_options, int, S_IRUGO);
MODULE_PARM_DESC(nic_options, "Nic options -- see efhw_types.h");
-module_param(use_pci_alloc, int, S_IRUGO);
-MODULE_PARM_DESC(use_pci_alloc, "Use pci_alloc_consistent to alloc iopages "
- "(autodetected by kernel version)");
module_param(efx_vi_eventq_size, int, S_IRUGO);
MODULE_PARM_DESC(efx_vi_eventq_size,
"Size of event queue allocated by efx_vi library");
{
int capacity;
int page_order;
- int i;
int rc;
/* Choose queue size. */
} else if (capacity & nic->evq_sizes)
break;
}
- for (i = 0; i < EFHW_KEVENTQ_MAX; ++i) {
- nic->evq[i].hw.capacity = capacity;
- nic->evq[i].hw.buf_tbl_alloc.base = (unsigned)-1;
- }
+
+ nic->interrupting_evq.hw.capacity = capacity;
+ nic->interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1;
+
+ nic->non_interrupting_evq.hw.capacity = capacity;
+ nic->non_interrupting_evq.hw.buf_tbl_alloc.base = (unsigned)-1;
/* allocate buffer table entries to map onto the iobuffer */
page_order = get_order(capacity * sizeof(efhw_event_t));
if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
rc = efrm_buffer_table_alloc(page_order,
- &nic->evq[0].hw.buf_tbl_alloc);
+ &nic->interrupting_evq
+ .hw.buf_tbl_alloc);
if (rc < 0) {
EFRM_WARN
("%s: failed (%d) to alloc %d buffer table entries",
}
}
rc = efrm_buffer_table_alloc(page_order,
- &nic->evq[FALCON_EVQ_NONIRQ].hw.
+ &nic->non_interrupting_evq.hw.
buf_tbl_alloc);
if (rc < 0) {
EFRM_WARN
*/
static void efrm_nic_buffer_table_free(struct efhw_nic *nic)
{
- int i;
- for (i = 0; i <= FALCON_EVQ_NONIRQ; i++)
- if (nic->evq[i].hw.buf_tbl_alloc.base != (unsigned)-1)
- efrm_buffer_table_free(&nic->evq[i].hw.buf_tbl_alloc);
-
+ if (nic->interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1)
+ efrm_buffer_table_free(&nic->interrupting_evq.hw
+ .buf_tbl_alloc);
+ if (nic->non_interrupting_evq.hw.buf_tbl_alloc.base != (unsigned)-1)
+ efrm_buffer_table_free(&nic->non_interrupting_evq
+ .hw.buf_tbl_alloc);
}
static int iomap_bar(struct linux_efhw_nic *lnic, size_t len)
{
- efhw_ioaddr_t ioaddr;
+ volatile char __iomem *ioaddr;
ioaddr = ioremap_nocache(lnic->ctr_ap_pci_addr, len);
if (ioaddr == 0)
void linux_efrm_nic_dtor(struct linux_efhw_nic *lnic)
{
struct efhw_nic *nic = &lnic->nic;
- efhw_ioaddr_t bar_ioaddr = nic->bar_ioaddr;
+ volatile char __iomem *bar_ioaddr = nic->bar_ioaddr;
efhw_nic_dtor(nic);
- efrm_nic_buffer_table_free(nic);
-
/* Unmap the bar. */
EFRM_ASSERT(bar_ioaddr);
iounmap(bar_ioaddr);
EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
- efhw_keventq_poll(nic, &nic->evq[0]);
+ efhw_keventq_poll(nic, &nic->interrupting_evq);
EFRM_TRACE("tasklet complete");
}
int
efrm_nic_add(struct pci_dev *dev, unsigned flags, const uint8_t *mac_addr,
struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
- int bt_min, int bt_max,
+ int bt_min, int bt_lim, int non_irq_evq,
const struct vi_resource_dimensions *res_dim)
{
struct linux_efhw_nic *lnic = NULL;
pci_name(dev) ? pci_name(dev) : "?", dev->irq);
/* Ensure that we have room for the new adapter-structure. */
- if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
+ if (efrm_nic_tablep->nic_count == EFHW_MAX_NR_DEVS) {
EFRM_WARN("%s: WARNING: too many devices", __FUNCTION__);
rc = -ENOMEM;
goto failed;
}
if (n_nics_probed == 0) {
- rc = efrm_resources_init(res_dim, bt_min, bt_max);
+ rc = efrm_resources_init(res_dim, bt_min, bt_lim);
if (rc != 0)
goto failed;
resources_init = 1;
rc = efrm_driver_register_nic(nic, nic_index++);
if (rc < 0) {
EFRM_ERR("%s: cannot register nic %d with nic error code %d",
- __FUNCTION__, efrm_nic_table.nic_count, rc);
+ __FUNCTION__, efrm_nic_tablep->nic_count, rc);
goto failed;
}
registered_nic = 1;
we want to make sure that we maximise our chances, so we
loop a few times until all is good. */
for (count = 0; count < max_hardware_init_repeats; count++) {
- rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr);
+ rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr,
+ non_irq_evq);
if (rc >= 0)
break;
EFRM_ERR("Interrupt initialisation failed (%d)", rc);
goto failed;
}
- efhw_nic_set_interrupt_moderation(nic, 0, irq_moderation);
- efhw_nic_interrupt_enable(nic, 0);
+ efhw_nic_set_interrupt_moderation(nic, irq_moderation);
+ efhw_nic_interrupt_enable(nic);
}
EFRM_TRACE("interrupts are %sregistered", do_irq ? "" : "not ");
-#if EFRM_HAVE_IOMMU_LOCK
- /* Bug 4560: We need the lock if there is memory which cannot be
- * accessed by the card and there is an IOMMU to access it. In that
- * case, the kernel will use the IOMMU to access the high memory. */
- if ((dev->dma_mask >> PAGE_SHIFT) < max_pfn && !EFRM_NO_IOMMU)
- efrm_need_iommu_lock = 1;
-#endif
-
*lnic_out = lnic;
EFRM_ASSERT(rc == 0);
++n_nics_probed;
EFRM_TRACE("%s:", __FUNCTION__);
EFRM_ASSERT(nic);
+ efrm_nic_buffer_table_free(nic);
+
efrm_driver_unregister_nic(nic);
/*
int
efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim,
- int buffer_table_min, int buffer_table_max)
+ int buffer_table_min, int buffer_table_lim)
{
int i, rc;
- rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_max);
+ rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_lim);
if (rc != 0)
return rc;
int instance;
int rc;
- if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */
+ if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */
return -ENODEV;
spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
/* Falcon A1 RX phys addr wierdness. */
- if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
+ if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
(vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) {
if (vi_flags & EFHW_VI_JUMBO_EN) {
/* Falcon-A cannot do phys + scatter. */
irq_flags_t lock_flags;
struct kfifo *instances;
- if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */
+ if (efrm_nic_tablep->a_nic == NULL) /* ?? FIXME: surely not right */
return;
- if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
+ if (efrm_nic_tablep->a_nic->devtype.variant == 'A' &&
instance == FALCON_A1_ISCSI_DMAQ) {
EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free ==
false);
efrm_vi_rm_init_evq(struct vi_resource *virs, int nic_index)
{
int rc;
- struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
+ struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
struct eventq_resource_hardware *evq_hw =
&virs->nic_info[nic_index].evq_pages;
static inline void
efrm_vi_rm_fini_evq(struct vi_resource *virs, int nic_index)
{
- struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
+ struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
int queue_type, int init, int nic_index)
{
int rc;
- struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
+ struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
uint32_t buf_bytes;
struct vi_resource *evq_virs;
struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
int page_order;
uint32_t num_pages;
- efhw_iopages_t *iobuff;
+ struct efhw_iopages *iobuff;
#endif
if (!init)
if (virs->dmaq_capacity[queue_type] == 0)
return 0;
+ /* Ensure TX pacing turned off -- queue flush doesn't reset this. */
+ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
+ falcon_nic_pace(nic, instance, 0);
+
/* No need to disable the queue here. Nobody is using it anyway. */
fail_evq:
struct efhw_nic *nic;
int next_i;
EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
- nic = efrm_nic_table.nic[nic_index];
+ nic = efrm_nic_tablep->nic[nic_index];
EFRM_ASSERT(nic);
next_i = ((current_ptr / sizeof(efhw_event_t)) &
(virs->evq_capacity - 1));
void efrm_eventq_reset(struct vi_resource *virs, int nic_index)
{
- struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
+ struct efhw_nic *nic = efrm_nic_tablep->nic[nic_index];
int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
EFRM_ASSERT(virs->evq_capacity != 0);
}
}
-void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev)
+void efrm_handle_wakeup_event(struct efhw_nic *nic, unsigned instance)
{
- efrm_eventq_do_callback(nic,
- (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
- false);
+ efrm_eventq_do_callback(nic, instance, false);
}
-void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev)
+void efrm_handle_timeout_event(struct efhw_nic *nic, unsigned instance)
{
- efrm_eventq_do_callback(nic,
- (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
- true);
+ efrm_eventq_do_callback(nic, instance, true);
+}
+
+void efrm_handle_sram_event(struct efhw_nic *nic)
+{
+ if (nic->buf_commit_outstanding > 0)
+ nic->buf_commit_outstanding--;
}
}
void
-efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, int instance,
+efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, unsigned instance,
int rx_flush)
{
irq_flags_t lock_flags;
struct list_head flush_pending;
irq_flags_t lock_flags;
int rc, i, n_evqs;
- unsigned dmaq_min, dmaq_max;
+ unsigned dmaq_min, dmaq_lim;
EFRM_ASSERT(rm_in_out);
EFRM_ASSERT(dims);
EFRM_NOTICE("vi_resource_manager: evq_int=%u-%u evq_timer=%u-%u",
- dims->evq_int_min, dims->evq_int_max,
- dims->evq_timer_min, dims->evq_timer_max);
+ dims->evq_int_min, dims->evq_int_lim,
+ dims->evq_timer_min, dims->evq_timer_lim);
EFRM_NOTICE("vi_resource_manager: rxq=%u-%u txq=%u-%u",
- dims->rxq_min, dims->rxq_max,
- dims->txq_min, dims->txq_max);
+ dims->rxq_min, dims->rxq_lim,
+ dims->txq_min, dims->txq_lim);
efrm_vi_manager = kmalloc(sizeof(*efrm_vi_manager), GFP_KERNEL);
if (efrm_vi_manager == NULL) {
efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
dmaq_min = max(dims->rxq_min, dims->txq_min);
- dmaq_max = min(dims->rxq_max, dims->txq_max);
+ dmaq_lim = min(dims->rxq_lim, dims->txq_lim);
efrm_vi_manager->with_timer_base =
max(dmaq_min, dims->evq_timer_min);
efrm_vi_manager->with_timer_limit =
- min(dmaq_max, dims->evq_timer_max);
+ min(dmaq_lim, dims->evq_timer_lim);
rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_timer,
efrm_vi_manager->with_timer_base,
efrm_vi_manager->with_timer_limit,
efrm_vi_manager->with_interrupt_base =
max(dmaq_min, dims->evq_int_min);
efrm_vi_manager->with_interrupt_limit =
- min(dmaq_max, dims->evq_int_max);
+ min(dmaq_lim, dims->evq_int_lim);
efrm_vi_manager->with_interrupt_limit =
max(efrm_vi_manager->with_interrupt_limit,
efrm_vi_manager->with_interrupt_base);
/* Turn off all power rails */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */
cfg = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
/* Disable port 0 outputs on IO expander */
cfg = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
/* Clear any over-temperature alert */
- (void) efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
+ (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
}
static int sfe4001_check_hw(struct efx_nic *efx)
if (falcon_xaui_link_ok(efx))
return 0;
- rc = efx_i2c_read(i2c, PCA9539, P1_IN, &status, EFX_BYTE);
+ rc = efx_i2c_read(i2c, PCA9539, P1_IN, &status, 1);
status &= ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN));
/* We know we can read from the IO expander because we did
/* Set DSP over-temperature alert threshold */
EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
rc = efx_i2c_write(i2c, MAX6647, WLHO,
- &xgphy_max_temperature, EFX_BYTE);
+ &xgphy_max_temperature, 1);
if (rc)
goto fail1;
/* Read it back and verify */
- rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, EFX_BYTE);
+ rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
if (rc)
goto fail1;
if (in != xgphy_max_temperature) {
}
/* Clear any previous over-temperature alert */
- rc = efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
+ rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
if (rc)
goto fail1;
/* Enable port 0 and port 1 outputs on IO expander */
cfg = 0x00;
- rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
+ rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
if (rc)
goto fail1;
cfg = 0xff & ~(1 << P1_SPARE_LBN);
- rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
+ rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
if (rc)
goto fail2;
out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
(0 << P0_EN_1V0X_LBN));
- rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
+ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
if (rc)
goto fail3;
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
(1 << P0_X_TRST_LBN));
- rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
+ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
if (rc)
goto fail3;
msleep(10);
/* Turn on 1V power rail */
out &= ~(1 << P0_EN_1V0X_LBN);
- rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
+ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
if (rc)
goto fail3;
schedule_timeout_uninterruptible(HZ);
/* Check DSP is powered */
- rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, EFX_BYTE);
+ rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
if (rc)
goto fail3;
if (in & (1 << P1_AFE_PWD_LBN))
fail3:
/* Turn off all power rails */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
fail2:
/* Disable port 0 outputs on IO expander */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
+ (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
fail1:
return rc;
}
link_ok = phy_up && tenxpress_link_ok(efx, 1);
- if (link_ok != efx->link_up) {
- efx->link_up = link_ok;
+ if (link_ok != efx->link_up)
efx->mac_op->fake_phy_event(efx);
- }
/* Nothing to check if we've already shut down the PHY */
if (!phy_up)
soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
PCS_SOFT_RST2_REG);
- /* Modify => put in reset */
+ /* Put in reset */
test_select &= ~(1 << CLK312_EN_LBN);
mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
PCS_TEST_SELECT_REG, test_select);
PCS_CLOCK_CTRL_REG, clk_ctrl);
udelay(10);
- /* Modify => remove reset */
+ /* Remove reset */
clk_ctrl |= (1 << PLL312_RST_N_LBN);
mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
PCS_CLOCK_CTRL_REG, clk_ctrl);
EFX_TRACE(efx, "stop TX queue\n");
atomic_inc(&efx->netif_stop_count);
- if (likely(efx->net_dev_registered))
- netif_stop_queue(efx->net_dev);
+ netif_stop_queue(efx->net_dev);
spin_unlock_bh(&efx->netif_stop_lock);
}
if (atomic_dec_and_lock(&efx->netif_stop_count,
&efx->netif_stop_lock)) {
EFX_TRACE(efx, "waking TX queue\n");
- if (likely(efx->net_dev_registered))
- netif_wake_queue(efx->net_dev);
+ netif_wake_queue(efx->net_dev);
spin_unlock(&efx->netif_stop_lock);
}
local_bh_enable();
}
+static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ if (buffer->unmap_len) {
+ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ if (buffer->unmap_single)
+ pci_unmap_single(pci_dev, buffer->unmap_addr,
+ buffer->unmap_len, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pci_dev, buffer->unmap_addr,
+ buffer->unmap_len, PCI_DMA_TODEVICE);
+ buffer->unmap_len = 0;
+ buffer->unmap_single = 0;
+ }
+
+ if (buffer->skb) {
+ dev_kfree_skb_any((struct sk_buff *) buffer->skb);
+ buffer->skb = NULL;
+ EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
+ "complete\n", tx_queue->queue, read_ptr);
+ }
+}
+
+
/*
* Add a socket buffer to a TX queue
*
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
buffer = &tx_queue->buffer[insert_ptr];
- if (buffer->unmap_len) {
- if (buffer->unmap_single)
- pci_unmap_single(pci_dev, buffer->unmap_addr,
- buffer->unmap_len,
- PCI_DMA_TODEVICE);
- else
- pci_unmap_page(pci_dev, buffer->unmap_addr,
- buffer->unmap_len,
- PCI_DMA_TODEVICE);
- }
- buffer->unmap_len = 0;
+ efx_dequeue_buffer(tx_queue, buffer);
buffer->len = 0;
}
static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index)
{
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
- struct efx_tx_buffer *buffer;
+ struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
+ unsigned int mask = tx_queue->efx->type->txd_ring_mask;
- /* Calculate the stopping point. Doing the check this way
- * avoids wrongly completing every buffer in the ring if we
- * get called twice with the same index. (Hardware should
- * never do this, since it can't complete that many buffers in
- * one go.)
- */
- stop_index = (index + 1) & tx_queue->efx->type->txd_ring_mask;
- read_ptr = tx_queue->read_count & tx_queue->efx->type->txd_ring_mask;
+ stop_index = (index + 1) & mask;
+ read_ptr = tx_queue->read_count & mask;
while (read_ptr != stop_index) {
- buffer = &tx_queue->buffer[read_ptr];
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (unlikely(buffer->len == 0)) {
EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
"completion id %x\n", tx_queue->queue,
read_ptr);
- atomic_inc(&tx_queue->efx->errors.spurious_tx);
- /* Don't reset */
- } else {
- if (buffer->unmap_len) {
- if (buffer->unmap_single)
- pci_unmap_single(pci_dev,
- buffer->unmap_addr,
- buffer->unmap_len,
- PCI_DMA_TODEVICE);
- else
- pci_unmap_page(pci_dev,
- buffer->unmap_addr,
- buffer->unmap_len,
- PCI_DMA_TODEVICE);
- buffer->unmap_single = 0;
- buffer->unmap_len = 0;
- }
- if (buffer->skb) {
- dev_kfree_skb_any((struct sk_buff *)
- buffer->skb);
- buffer->skb = NULL;
- EFX_TRACE(tx_queue->efx, "TX queue %d "
- "transmission id %x complete\n",
- tx_queue->queue, read_ptr);
- }
- buffer->continuation = 1;
- buffer->len = 0;
+ atomic_inc(&efx->errors.spurious_tx);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
}
+
+ efx_dequeue_buffer(tx_queue, buffer);
+ buffer->continuation = 1;
+ buffer->len = 0;
+
++tx_queue->read_count;
- read_ptr = (tx_queue->read_count &
- tx_queue->efx->type->txd_ring_mask);
+ read_ptr = tx_queue->read_count & mask;
}
}
return rc;
}
-#if defined(EFX_USE_FASTCALL)
void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-#else
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-#endif
{
- unsigned long flags __attribute__ ((unused));
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
if (unlikely(tx_queue->stopped)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
- /* If the port is stopped and the net_dev isn't
- * registered, then the caller must be performing
- * flow control manually */
- if (unlikely(!efx->net_dev_registered))
- return;
+ EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
{
EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
- ASSERT_RTNL();
-
/* Initialise fields */
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
{
- unsigned int last_index, mask;
- if (tx_queue->buffer) {
- /* Free any buffers left in the ring */
- mask = tx_queue->efx->type->txd_ring_mask;
- last_index = (tx_queue->insert_count - 1) & mask;
- EFX_LOG(tx_queue->efx, "Will dequeue up to 0x%x from 0x%x\n",
- last_index, tx_queue->read_count & mask);
- efx_dequeue_buffers(tx_queue, last_index);
+ struct efx_tx_buffer *buffer;
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ buffer = &tx_queue->buffer[tx_queue->read_count &
+ tx_queue->efx->type->txd_ring_mask];
+ efx_dequeue_buffer(tx_queue, buffer);
+ buffer->continuation = 1;
+ buffer->len = 0;
+
+ ++tx_queue->read_count;
}
}
{
EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
- ASSERT_RTNL();
-
/* Flush TX queue, remove descriptor ring */
falcon_fini_tx(tx_queue);
int link_up = txc43128_phy_read_link(efx);
/* Simulate a PHY event if link state has changed */
- if (link_up != efx->link_up) {
- efx->link_up = link_up;
+ if (link_up != efx->link_up)
efx->mac_op->fake_phy_event(efx);
- } else if (EFX_WORKAROUND_10934(efx)) {
+ else if (EFX_WORKAROUND_10934(efx)) {
if (link_up || (efx->loopback_mode != LOOPBACK_NONE))
data->bug10934_timer = jiffies;
else {
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_FALCON_B0FPGA(efx) \
- (FALCON_REV(efx) == FALCON_REV_B0 && !(efx)->is_asic)
+ (FALCON_REV(efx) >= FALCON_REV_B0 && !(efx)->is_asic)
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
/* Flush events can take a very long time to appear */
#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
+/* 10Xpress is sensitive to unstable XAUI sync when going into loopback */
+#define EFX_WORKAROUND_11667 EFX_WORKAROUND_ALWAYS
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
phy_data->tx_disabled = efx->tx_disabled;
rc = xfp_reset_phy(efx);
- if (rc < 0)
- goto fail;
EFX_INFO(efx, "XFP: PHY init %s.\n",
rc ? "failed" : "successful");
+ if (rc < 0)
+ goto fail;
+
return 0;
fail:
int rc = 0;
int link_up = xfp_link_ok(efx);
/* Simulate a PHY event if link state has changed */
- if (link_up != efx->link_up) {
- efx->link_up = link_up;
+ if (link_up != efx->link_up)
efx->mac_op->fake_phy_event(efx);
- }
rc = efx->board_info.monitor(efx);
if (rc) {
}
struct efx_phy_operations falcon_xfp_phy_ops = {
- .init = xfp_phy_init,
- .reconfigure = xfp_phy_reconfigure,
- .check_hw = xfp_phy_check_hw,
- .fini = xfp_phy_fini,
- .clear_interrupt = xfp_phy_clear_interrupt,
- .reset_xaui = efx_port_dummy_op_void,
- .mmds = XFP_REQUIRED_DEVS,
- .loopbacks = XFP_LOOPBACKS,
- /* No loopback appears to be reliable enough for self-test
- * operation. So don't do it. */
+ .init = xfp_phy_init,
+ .reconfigure = xfp_phy_reconfigure,
+ .check_hw = xfp_phy_check_hw,
+ .fini = xfp_phy_fini,
+ .clear_interrupt = xfp_phy_clear_interrupt,
+ .reset_xaui = efx_port_dummy_op_void,
+ .mmds = XFP_REQUIRED_DEVS,
+ .loopbacks = XFP_LOOPBACKS,
.startup_loopback = LOOPBACK_PCS,
};
struct net_device *net_dev = (struct net_device *)ptr;
struct netback_accel *bend;
- if ((event == NETDEV_UP) || (event == NETDEV_DOWN)) {
+ if ((event == NETDEV_UP) ||
+ (event == NETDEV_DOWN) ||
+ (event == NETDEV_CHANGE)) {
mutex_lock(&bend_list_mutex);
bend = bend_list;
while (bend != NULL) {
if (bend->shared_page == NULL)
goto next;
- if (bend->net_dev->ifindex == net_dev->ifindex)
- netback_accel_set_interface_state
- (bend, event == NETDEV_UP);
+ if (bend->net_dev->ifindex == net_dev->ifindex) {
+ int ok;
+ if (event == NETDEV_CHANGE)
+ ok = (netif_carrier_ok(net_dev) &&
+ (net_dev->flags & IFF_UP));
+ else
+ ok = (netif_carrier_ok(net_dev) &&
+ (event == NETDEV_UP));
+ netback_accel_set_interface_state(bend, ok);
+ }
next:
mutex_unlock(&bend->bend_mutex);
#endif
rc = netback_accel_init_fwd();
+ if (rc != 0)
+ goto fail0;
- if (rc == 0)
- netback_accel_debugfs_init();
+ netback_accel_debugfs_init();
- if (rc == 0)
- rc = netback_accel_sf_init();
+ rc = netback_accel_sf_init();
+ if (rc != 0)
+ goto fail1;
- if (rc == 0)
- rc = register_netdevice_notifier
- (&netback_accel_netdev_notifier);
+ rc = register_netdevice_notifier
+ (&netback_accel_netdev_notifier);
+ if (rc != 0)
+ goto fail2;
- /*
- * What if no device was found, shouldn't we clean up stuff
- * we've allocated for acceleration subsystem?
- */
+ return 0;
+ fail2:
+ netback_accel_sf_shutdown();
+ fail1:
+ netback_accel_debugfs_fini();
+ netback_accel_shutdown_fwd();
+ fail0:
+#ifdef EFX_GCOV
+ gcov_provider_fini(THIS_MODULE);
+#endif
return rc;
}
*/
static int efx_device_to_efab_nic_index(struct efx_dl_device *efx_dl_dev)
{
- int i;
-
- for (i = 0; i < EFHW_MAX_NR_DEVS; i++) {
- struct efhw_nic *nic = efrm_nic_table.nic[i];
+ int i, rc = -1;
+ struct efhw_nic *nic;
- /*
- * It's possible for the nic structure to have not
- * been initialised if the resource driver failed its
- * driverlink probe
- */
- if (nic == NULL || nic->net_driver_dev == NULL)
- continue;
-
- /* Work out if these are talking about the same NIC */
- if (nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
- return i;
+ EFRM_FOR_EACH_NIC(i, nic) {
+ if (nic != NULL && nic->net_driver_dev != NULL &&
+ nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
+ rc = i;
}
- return -1;
+ return rc;
}
return rc;
}
- if (res_mdata.version != 0)
- return -EPROTO;
-
hwinfo->nic_arch = res_mdata.nic_arch;
hwinfo->nic_variant = res_mdata.nic_variant;
hwinfo->nic_revision = res_mdata.nic_revision;
}
VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
- accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt =
- net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn),
- 0);
+ rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn), 0);
+ if (rc < 0)
+ goto fail0;
+ accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt = rc;
VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
- accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt =
- net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn),
- 0);
+ rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn), 0);
+ if (rc < 0)
+ goto fail1;
+ accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt = rc;
VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
/* Make the relevant H/W pages mappable by the far end */
- accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt =
- net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
+ rc = net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
+ if (rc < 0)
+ goto fail2;
+ accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt = rc;
/* Now do the same for the memory pages */
/* Convert the page + length we got back for the evq to grants. */
for (i = 0; i < accel_hw_priv->evq_npages; i++) {
- accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] =
- net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
+ rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
+ if (rc < 0)
+ goto fail3;
+ accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] = rc;
+
VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i],
pfn);
pfn++;
}
return 0;
+
+ fail3:
+ for (i = i - 1; i >= 0; i--) {
+ ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
+ }
+ ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
+ fail2:
+ ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
+ fail1:
+ ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
+ fail0:
+ return rc;
}
static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend,
struct net_accel_hw_falcon_a *hwinfo)
{
- int rc;
+ int rc, i;
struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
hwinfo->common.evq_rptr);
rc = net_accel_grant_page(bend->hdev_data,
hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
- if (rc < 0)
+ if (rc < 0) {
+ /* Undo ef_bend_hwinfo_falcon_common() */
+ ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
+ ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
+ ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
+ for (i = 0; i < accel_hw_priv->evq_npages; i++) {
+ ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i],
+ bend->far_end);
+ }
return rc;
+ }
accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
/* Initialise the shared page(s) used for comms */
net_accel_msg_init_page(bend->shared_page, PAGE_SIZE,
- bend->net_dev->flags & IFF_UP);
+ (bend->net_dev->flags & IFF_UP) &&
+ (netif_carrier_ok(bend->net_dev)));
msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
/*! Constants for the type field in efx_vi_hw_resource */
#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */
#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */
-#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */
-#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */
#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */
/* Address of event q pointer (EF1) */
* Metadata concerning the list of hardware resource mappings
*/
struct efx_vi_hw_resource_metadata {
- int version;
int evq_order;
int evq_offs;
int evq_capacity;
uint32_t a;
uint32_t b;
} opaque;
- struct {
- uint32_t code;
- uint32_t status;
- } ev1002;
} efhw_event_t;
/* Flags for TX/RX queues */
/* Linux kernel also does not provide PRIx32... Sigh. */
#define PRIx32 "x"
-#define PRIx64 "llx"
-
+
+#ifdef __ia64__
+# define PRIx64 "lx"
+#else
+# define PRIx64 "llx"
+#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
enum {
#include <ci/efhw/iopage_types.h>
#include <ci/efhw/sysdep.h>
-/*--------------------------------------------------------------------
- *
- * hardware limits used in the types
- *
- *--------------------------------------------------------------------*/
-
-#define EFHW_KEVENTQ_MAX 8
-
/*--------------------------------------------------------------------
*
* forward type declarations
struct eventq_resource_hardware {
/*!iobuffer allocated for eventq - can be larger than eventq */
- efhw_iopages_t iobuff;
+ struct efhw_iopages iobuff;
unsigned iobuff_off;
struct efhw_buffer_table_allocation buf_tbl_alloc;
int capacity; /*!< capacity of event queue */
*--------------------------------------------------------------------*/
struct efhw_keventq {
- volatile int lock;
+ int lock;
caddr_t evq_base;
int32_t evq_ptr;
uint32_t evq_mask;
/*! initialise all hardware functional units */
int (*init_hardware) (struct efhw_nic *nic,
struct efhw_ev_handler *,
- const uint8_t *mac_addr);
+ const uint8_t *mac_addr, int non_irq_evq);
/*-------------- Interrupt support ------------ */
*/
int (*interrupt) (struct efhw_nic *nic);
- /*! Enable given interrupt mask for the given IRQ unit */
- void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
+ /*! Enable the interrupt */
+ void (*interrupt_enable) (struct efhw_nic *nic);
- /*! Disable given interrupt mask for the given IRQ unit */
- void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
+ /*! Disable the interrupt */
+ void (*interrupt_disable) (struct efhw_nic *nic);
/*! Set interrupt moderation strategy for the given IRQ unit
** val is in usec
*/
void (*set_interrupt_moderation)(struct efhw_nic *nic,
- uint idx, uint val);
+ uint val);
/*-------------- Event support ------------ */
/*! */
struct efhw_nic {
- /*! zero base index in efrm_nic_table.nic array */
- volatile int index;
+ /*! zero base index in efrm_nic_tablep->nic array */
+ int index;
int ifindex; /*!< OS level nic index */
#ifdef HAS_NET_NAMESPACE
struct net *nd_net;
/* hardware resources */
/*! I/O address of the start of the bar */
- efhw_ioaddr_t bar_ioaddr;
+ volatile char __iomem *bar_ioaddr;
/*! Bar number of control aperture. */
unsigned ctr_ap_bar;
void (*irq_handler) (struct efhw_nic *, int unit);
/*! event queues per driver */
- struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
+ struct efhw_keventq interrupting_evq;
/* for marking when we are not using an IRQ unit
- 0 is a valid offset to an IRQ unit on EF1! */
#define EFHW_IRQ_UNIT_UNUSED 0xffff
- /*! interrupt unit in use */
- unsigned int irq_unit[EFHW_KEVENTQ_MAX];
- efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */
+ /*! interrupt unit in use for the interrupting event queue */
+ unsigned int irq_unit;
+
+ struct efhw_keventq non_interrupting_evq;
+
+ struct efhw_iopage irq_iobuff; /*!< Falcon SYSERR interrupt */
/* The new driverlink infrastructure. */
struct efx_dl_device *net_driver_dev;
#error Unknown endianness
#endif
+#ifndef __iomem
+#define __iomem
+#endif
+
#ifndef mmiowb
#if defined(__i386__) || defined(__x86_64__)
#define mmiowb()
#endif
#endif
-typedef char *efhw_ioaddr_t;
-
#ifndef readq
-static inline uint64_t __readq(void __iomem *addr)
+static inline uint64_t __readq(volatile void __iomem *addr)
{
return *(volatile uint64_t *)addr;
}
#endif
#ifndef writeq
-static inline void __writeq(uint64_t v, void __iomem *addr)
+static inline void __writeq(uint64_t v, volatile void __iomem *addr)
{
*(volatile uint64_t *)addr = v;
}
* resource management for Xen backend, OpenOnload, etc
* (including support for SFE4001 10GBT NIC)
*
- * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
+ * This file provides struct efhw_page and struct efhw_iopage for Linux
+ * kernel.
*
* Copyright 2005-2007: Solarflare Communications Inc,
* 9501 Jeronimo Road, Suite 250,
#ifndef __CI_EFHW_IOPAGE_LINUX_H__
#define __CI_EFHW_IOPAGE_LINUX_H__
+#include <linux/version.h>
#include <linux/gfp.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
#include <linux/hardirq.h>
+#else
+#include <asm/hardirq.h>
+#endif
+#include <linux/errno.h>
#include <ci/efhw/debug.h>
/*--------------------------------------------------------------------
*
- * efhw_page_t: A single page of memory. Directly mapped in the driver,
- * and can be mapped to userlevel.
+ * struct efhw_page: A single page of memory. Directly mapped in the
+ * driver, and can be mapped to userlevel.
*
*--------------------------------------------------------------------*/
-typedef struct {
+struct efhw_page {
unsigned long kva;
-} efhw_page_t;
+};
-static inline int efhw_page_alloc(efhw_page_t *p)
+static inline int efhw_page_alloc(struct efhw_page *p)
{
p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
return p->kva ? 0 : -ENOMEM;
}
-static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
+static inline int efhw_page_alloc_zeroed(struct efhw_page *p)
{
p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
return p->kva ? 0 : -ENOMEM;
}
-static inline void efhw_page_free(efhw_page_t *p)
+static inline void efhw_page_free(struct efhw_page *p)
{
free_page(p->kva);
EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
}
-static inline char *efhw_page_ptr(efhw_page_t *p)
+static inline char *efhw_page_ptr(struct efhw_page *p)
{
return (char *)p->kva;
}
-static inline unsigned efhw_page_pfn(efhw_page_t *p)
+static inline unsigned efhw_page_pfn(struct efhw_page *p)
{
return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
}
-static inline void efhw_page_mark_invalid(efhw_page_t *p)
+static inline void efhw_page_mark_invalid(struct efhw_page *p)
{
p->kva = 0;
}
-static inline int efhw_page_is_valid(efhw_page_t *p)
+static inline int efhw_page_is_valid(struct efhw_page *p)
{
return p->kva != 0;
}
-static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
+static inline void efhw_page_init_from_va(struct efhw_page *p, void *va)
{
p->kva = (unsigned long)va;
}
/*--------------------------------------------------------------------
*
- * efhw_iopage_t: A single page of memory. Directly mapped in the driver,
+ * struct efhw_iopage: A single page of memory. Directly mapped in the driver,
* and can be mapped to userlevel. Can also be accessed by the NIC.
*
*--------------------------------------------------------------------*/
-typedef struct {
- efhw_page_t p;
+struct efhw_iopage {
+ struct efhw_page p;
dma_addr_t dma_addr;
-} efhw_iopage_t;
+};
-static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
+static inline dma_addr_t efhw_iopage_dma_addr(struct efhw_iopage *p)
{
return p->dma_addr;
}
/*--------------------------------------------------------------------
*
- * efhw_iopages_t: A set of pages that are contiguous in physical memory.
- * Directly mapped in the driver, and can be mapped to userlevel. Can also
- * be accessed by the NIC.
+ * struct efhw_iopages: A set of pages that are contiguous in physical
+ * memory. Directly mapped in the driver, and can be mapped to userlevel.
+ * Can also be accessed by the NIC.
*
* NB. The O/S may be unwilling to allocate many, or even any of these. So
* only use this type where the NIC really needs a physically contiguous
*
*--------------------------------------------------------------------*/
-typedef struct {
+struct efhw_iopages {
caddr_t kva;
unsigned order;
dma_addr_t dma_addr;
-} efhw_iopages_t;
+};
-static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
+static inline caddr_t efhw_iopages_ptr(struct efhw_iopages *p)
{
return p->kva;
}
-static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
+static inline unsigned efhw_iopages_pfn(struct efhw_iopages *p)
{
return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
}
-static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
+static inline dma_addr_t efhw_iopages_dma_addr(struct efhw_iopages *p)
{
return p->dma_addr;
}
-static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
+static inline unsigned efhw_iopages_size(struct efhw_iopages *p)
{
return 1u << (p->order + PAGE_SHIFT);
}
-/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
- * contiguous allocations in iobufsets for iSCSI. This allows the
- * essential information about contiguous allocations from
- * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
- * iobufset. (Changing the iobufset resource to use a union type would
+/* struct efhw_iopage <-> struct efhw_iopages conversions for handling
+ * physically contiguous allocations in iobufsets for iSCSI. This allows
+ * the essential information about contiguous allocations from
+ * efhw_iopages_alloc() to be saved away in the struct efhw_iopage array in
+ * an iobufset. (Changing the iobufset resource to use a union type would
* involve a lot of code changes, and make the iobufset's metadata larger
* which could be bad as it's supposed to fit into a single page on some
* platforms.)
*/
static inline void
-efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
- efhw_iopages_t *iopages, unsigned pageno)
+efhw_iopage_init_from_iopages(struct efhw_iopage *iopage,
+ struct efhw_iopages *iopages, unsigned pageno)
{
iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
+ (pageno * PAGE_SIZE);
}
static inline void
-efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
- efhw_iopage_t *iopage, unsigned order)
+efhw_iopages_init_from_iopage(struct efhw_iopages *iopages,
+ struct efhw_iopage *iopage, unsigned order)
{
iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
EFHW_ASSERT(iopages->kva);
};
/* Resource driver structures used by other drivers as well */
-extern struct efrm_nic_table efrm_nic_table;
+extern struct efrm_nic_table *efrm_nic_tablep;
static inline void efrm_nic_table_hold(void)
{
- atomic_inc(&efrm_nic_table.ref_count);
+ atomic_inc(&efrm_nic_tablep->ref_count);
}
static inline void efrm_nic_table_rele(void)
{
- atomic_dec(&efrm_nic_table.ref_count);
+ atomic_dec(&efrm_nic_tablep->ref_count);
}
static inline int efrm_nic_table_held(void)
{
- return (atomic_read(&efrm_nic_table.ref_count) != 0);
+ return (atomic_read(&efrm_nic_tablep->ref_count) != 0);
}
/* Run code block _x multiple times with variable nic set to each
for ((_nic_i) = (efrm_nic_table_hold(), 0); \
(_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
(_nic_i)++) \
- if (((_nic) = efrm_nic_table.nic[_nic_i]))
+ if (((_nic) = efrm_nic_tablep->nic[_nic_i]))
#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
for ((_i) = (efrm_nic_table_hold(), 0); \
(_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++(_i)) \
- if (((_nic) = efrm_nic_table.nic[_i]) && \
+ if (((_nic) = efrm_nic_tablep->nic[_i]) && \
efrm_nic_set_read((_set), (_i)))
#endif /* __CI_EFRM_NIC_TABLE_H__ */
#include <linux/workqueue.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
#include <linux/hardirq.h>
+#else
+#include <asm/hardirq.h>
+#endif
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <linux/completion.h>
#include <linux/log2.h>
#endif
+
+/********************************************************************
+ *
+ * Utility functions
+ *
+ ********************************************************************/
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+static inline unsigned long __attribute_const__ roundup_pow_of_two(unsigned long x)
+{
+ return (1UL << fls(x - 1));
+}
+#endif
+
+
/********************************************************************
*
* List API
** must be at least 16 bytes long.
*/
+#if defined(__unix__) && ! defined(__KERNEL__)
+extern int ci_format_select_set(char* s, int len_s, int nfds, const fd_set*);
+extern int ci_format_select(char* s, int len_s,
+ int nfds, const fd_set* rds, const fd_set* wrs,
+ const fd_set* exs, struct timeval* timeout);
+#endif
+
/**********************************************************************
* Error checking.
ci_inline void ci_atomic32_add(volatile ci_uint32* p, ci_uint32 v)
{ __asm__ __volatile__("lock; addl %1, %0" : "+m" (*p) : "ir" (v)); }
+ci_inline void ci_atomic32_inc(volatile ci_uint32* p)
+{ __asm__ __volatile__("lock; incl %0" : "+m" (*p)); }
+
+ci_inline int ci_atomic32_dec_and_test(volatile ci_uint32* p) {
+ char r;
+ __asm__ __volatile__("lock; decl %0; sete %1" : "+m" (*p), "=qm" (r));
+ return r;
+}
+
#define ci_atomic_or(a, v) ci_atomic32_or ((ci_uint32*) &(a)->n, (v))
#define ci_atomic_and(a, v) ci_atomic32_and((ci_uint32*) &(a)->n, (v))
#define ci_atomic_add(a, v) ci_atomic32_add((ci_uint32*) &(a)->n, (v))
/** Number of frame trunc events seen on fastpath */
u64 fastpath_frm_trunc;
+ /** Number of rx discard (bad crc) events seen on fastpath */
+ u64 fastpath_crc_bad;
+
+ /** Number of rx discard (bad csum) events seen on fastpath */
+ u64 fastpath_csum_bad;
+
+ /** Number of rx discard (bad rights) events seen on fastpath */
+ u64 fastpath_rights_bad;
+
+ /** Number of rx discard ("other") events seen on fastpath */
+ u64 fastpath_discard_other;
+
/** Number of no rx descriptor trunc events seen on fastpath */
u64 rx_no_desc_trunc;
- /** The number of misc bad events (e.g. RX_DISCARD) processed. */
+ /** The number of misc bad events processed. */
u64 bad_event_count;
/** Number of events dealt with in poll loop */
struct dentry *fastpath_tx_completions;
struct dentry *fastpath_tx_pending_max;
struct dentry *fastpath_frm_trunc;
+ struct dentry *fastpath_crc_bad;
+ struct dentry *fastpath_csum_bad;
+ struct dentry *fastpath_rights_bad;
+ struct dentry *fastpath_discard_other;
struct dentry *rx_no_desc_trunc;
struct dentry *event_count;
struct dentry *bad_event_count;
vnic->dbfs.fastpath_frm_trunc = debugfs_create_u64
("fastpath_frm_trunc", S_IRUSR | S_IRGRP | S_IROTH,
vnic->dbfs_dir, &vnic->stats.fastpath_frm_trunc);
+ vnic->dbfs.fastpath_crc_bad = debugfs_create_u64
+ ("fastpath_crc_bad", S_IRUSR | S_IRGRP | S_IROTH,
+ vnic->dbfs_dir, &vnic->stats.fastpath_crc_bad);
+ vnic->dbfs.fastpath_csum_bad = debugfs_create_u64
+ ("fastpath_csum_bad", S_IRUSR | S_IRGRP | S_IROTH,
+ vnic->dbfs_dir, &vnic->stats.fastpath_csum_bad);
+ vnic->dbfs.fastpath_rights_bad = debugfs_create_u64
+ ("fastpath_rights_bad", S_IRUSR | S_IRGRP | S_IROTH,
+ vnic->dbfs_dir, &vnic->stats.fastpath_rights_bad);
+ vnic->dbfs.fastpath_discard_other = debugfs_create_u64
+ ("fastpath_discard_other", S_IRUSR | S_IRGRP | S_IROTH,
+ vnic->dbfs_dir, &vnic->stats.fastpath_discard_other);
vnic->dbfs.rx_no_desc_trunc = debugfs_create_u64
("rx_no_desc_trunc", S_IRUSR | S_IRGRP | S_IROTH,
vnic->dbfs_dir, &vnic->stats.rx_no_desc_trunc);
debugfs_remove(vnic->dbfs.event_count_since_irq);
debugfs_remove(vnic->dbfs.events_per_irq_max);
debugfs_remove(vnic->dbfs.fastpath_frm_trunc);
+ debugfs_remove(vnic->dbfs.fastpath_crc_bad);
+ debugfs_remove(vnic->dbfs.fastpath_csum_bad);
+ debugfs_remove(vnic->dbfs.fastpath_rights_bad);
+ debugfs_remove(vnic->dbfs.fastpath_discard_other);
debugfs_remove(vnic->dbfs.rx_no_desc_trunc);
debugfs_remove(vnic->dbfs.events_per_poll_max);
debugfs_remove(vnic->dbfs.events_per_poll_rx_max);
static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
{
-
if (!vnic->backend_netdev_up) {
vnic->backend_netdev_up = 1;
static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
{
-
if (vnic->backend_netdev_up) {
vnic->backend_netdev_up = 0;
if (rc < 0) {
EPRINTK("Xen netfront accelerator version mismatch\n");
- return -EINVAL;
+ goto fail;
}
if (rc > 0) {
* and accept certain subsets of previous versions
*/
EPRINTK("Xen netfront accelerator version mismatch\n");
- return -EINVAL;
+ goto fail;
}
return 0;
+
+ fail:
+ netfront_accel_debugfs_fini();
+ flush_workqueue(netfront_accel_workqueue);
+ destroy_workqueue(netfront_accel_workqueue);
+#ifdef EFX_GCOV
+ gcov_provider_fini(THIS_MODULE);
+#endif
+ return -EINVAL;
}
module_init(netfront_accel_init);
" buffer %d RX_DISCARD_OTHER q_id %d\n",
__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
EF_EVENT_RX_DISCARD_Q_ID(*ev) );
- /*
- * Probably tail of packet for which error has
- * already been logged, so don't count in
- * stats
- */
+ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_discard_other);
+ } else if (EF_EVENT_RX_DISCARD_TYPE(*ev) ==
+ EF_EVENT_RX_DISCARD_CSUM_BAD) {
+ DPRINTK("%s: " EF_EVENT_FMT
+ " buffer %d DISCARD CSUM_BAD q_id %d\n",
+ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
+ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
+ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_csum_bad);
+ } else if (EF_EVENT_RX_DISCARD_TYPE(*ev) ==
+ EF_EVENT_RX_DISCARD_CRC_BAD) {
+ DPRINTK("%s: " EF_EVENT_FMT
+ " buffer %d DISCARD CRC_BAD q_id %d\n",
+ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
+ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
+ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_crc_bad);
} else {
- EPRINTK("%s: " EF_EVENT_FMT
- " buffer %d rx discard type %d q_id %d\n",
+ BUG_ON(EF_EVENT_RX_DISCARD_TYPE(*ev) !=
+ EF_EVENT_RX_DISCARD_RIGHTS);
+ DPRINTK("%s: " EF_EVENT_FMT
+ " buffer %d DISCARD RIGHTS q_id %d\n",
__FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
- EF_EVENT_RX_DISCARD_TYPE(*ev),
EF_EVENT_RX_DISCARD_Q_ID(*ev) );
- NETFRONT_ACCEL_STATS_OP(++vnic->stats.bad_event_count);
+ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_rights_bad);
}
}
#define EFVI_FALCON_EVQTIMER_DISABLE (EFVI_FALCON_TIMER_MODE_DIS << TIMER_MODE_LBN)
-/* ---- efhw_event_t helpers --- */
+/* ---- ef_vi_event helpers --- */
#define EFVI_FALCON_EVENT_CODE(evp) \
((evp)->u64 & EFVI_FALCON_EVENT_CODE_MASK)
/* Falcon constants */
#define TX_EV_DESC_PTR_LBN 0
-/**********************************************************************
- * ef_iobufset ********************************************************
- **********************************************************************/
-
-/*! \i_ef_bufs An [ef_iobufset] is a collection of buffers to be used
-** with the NIC.
-*/
-typedef struct ef_iobufset {
- unsigned magic;
- unsigned bufs_mmap_bytes;
- unsigned bufs_handle;
- int bufs_ptr_off;
- ef_addr bufs_addr;
- unsigned bufs_size; /* size rounded to pow2 */
- int bufs_num;
- int faultonaccess;
-} ef_iobufset;
-
/**********************************************************************
* ef_vi **************************************************************
const ef_vi_qword* ev)
{
unsigned q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
- unsigned desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
+ uint16_t desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
ef_rx_dup_state_t* rx_dup_state = &evq->evq_state->rx_dup_state[q_id];
if(likely( desc_ptr != rx_dup_state->rx_last_desc_ptr )) {
}
+int ef_vi_receive_post(ef_vi* vi, ef_addr addr, ef_request_id dma_id)
+{
+ int rc = ef_vi_receive_init(vi, addr, dma_id, 0);
+ if( rc == 0 ) ef_vi_receive_push(vi);
+ return rc;
+}
+
+
void ef_vi_receive_push(ef_vi* vi)
{
ef_vi_wiob();
#ifndef __CI_CIUL_SYSDEP_LINUX_H__
#define __CI_CIUL_SYSDEP_LINUX_H__
+
+#define ef_vi_wiob() mmiowb()
+
+
/**********************************************************************
* Kernel version compatability
*/
# if defined(__i386__) || defined(__x86_64__) /* GCC x86/x64 */
typedef unsigned long long ef_vi_dma_addr_t;
-# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
-# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
-# else
-# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
-# endif
+# endif
+#endif
+#ifndef mmiowb
+# if defined(__i386__) || defined(__x86_64__)
+# define mmiowb()
+# elif defined(__ia64__)
+# ifndef ia64_mfa
+# define ia64_mfa() asm volatile ("mf.a" ::: "memory")
+# endif
+# define mmiowb ia64_mfa
+# else
+# error "Need definition for mmiowb"
# endif
#endif
#if !defined(__GNUC__)
# if defined(__PPC__) /* GCC, PPC */
typedef unsigned long ef_vi_dma_addr_t;
-# define ef_vi_wiob() wmb()
# ifdef __powerpc64__
# ifdef CONFIG_SMP
# elif defined(__ia64__) /* GCC, IA64 */
typedef unsigned long ef_vi_dma_addr_t;
-# define ef_vi_wiob() __asm__ __volatile__("mf.a": : :"memory")
-
# else
# error Unknown processor - GNU C
# endif
# define EF_VI_LIKELY(t) __builtin_expect((t), 1)
# define EF_VI_UNLIKELY(t) __builtin_expect((t), 0)
# endif
-
-# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
-# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
-# else
-# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
-# endif
-
# else
# error Old Intel compiler not supported.
# endif