obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
- xgmac.o sge.o l2t.o cxgb3_offload.o
+ xgmac.o sge.o l2t.o cxgb3_offload.o \
+ mv88e1xxx.o trace.o vsc7323.o
+
+CFLAGS += -DATOMIC_ADD_RETURN
+CFLAGS += -DSPIN_TRYLOCK_IRQSAVE
+CFLAGS += -DRTNL_TRYLOCK
+CFLAGS += -DGSO_SIZE
+CFLAGS += -DKZALLOC
+CFLAGS += -DHAS_EEH
+CFLAGS += -DIRQF
+CFLAGS += -DNEW_SKB_COPY
+CFLAGS += -DNEW_SKB_OFFSET
+CFLAGS += -DCONFIG_CHELSIO_T3_CORE
+CFLAGS += -DGSO_TYPE
+CFLAGS += -DNETEVENT
+Cflags += -DCONFIG_TCP_OFFLOAD_MODULE
+CFLAGS += -DT3_IP_HDR
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2007 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
/* This file should not be included directly. Include common.h instead. */
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cache.h>
-#include <linux/mutex.h>
#include "t3cdev.h"
-#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <asm/io.h>
-typedef irqreturn_t(*intr_handler_t) (int, void *, struct pt_regs *);
+#ifdef T3_TRACE
+# include "trace.h"
+#endif
struct vlan_group;
-struct adapter;
struct port_info {
struct adapter *adapter;
struct vlan_group *vlan_grp;
- const struct port_type_info *port_type;
u8 port_id;
+ u8 tx_chan;
+ u8 txpkt_intf;
u8 rx_csum_offload;
u8 nqsets;
u8 first_qset;
struct link_config link_config;
struct net_device_stats netstats;
int activity;
+ int max_ofld_bw;
};
-enum { /* adapter flags */
- FULL_INIT_DONE = (1 << 0),
- USING_MSI = (1 << 1),
- USING_MSIX = (1 << 2),
- QUEUES_BOUND = (1 << 3),
- TP_PARITY_INIT = (1 << 4),
+struct work_struct;
+struct dentry;
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ QUEUES_BOUND = (1 << 3),
+ TP_PARITY_INIT = (1 << 4),
};
struct fl_pg_chunk {
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
unsigned int size; /* capacity of free list */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of times buffer allocation failed */
};
+/* max concurrent LRO sessions per queue set */
+#define MAX_LRO_SES 8
+
+struct lro_session {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ u32 seq;
+ u16 iplen;
+ u16 mss;
+ __be16 vlan;
+ u8 npkts;
+};
+
+struct lro_state {
+ unsigned short enabled;
+ unsigned short active_idx; /* index of most recently added session */
+ unsigned int nactive; /* # of active sessions */
+ struct lro_session sess[MAX_LRO_SES];
+};
+
/*
* Bundle size for grouping offload RX packets for delivery to the stack.
* Don't make this too big as we do prefetch on each packet in a bundle.
struct rsp_desc;
-struct sge_rspq { /* state for an SGE response queue */
- unsigned int credits; /* # of pending response credits */
- unsigned int size; /* capacity of response queue */
- unsigned int cidx; /* consumer index */
- unsigned int gen; /* current generation bit */
- unsigned int polling; /* is the queue serviced through NAPI? */
- unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
- unsigned int next_holdoff; /* holdoff time for next interrupt */
- struct rsp_desc *desc; /* address of HW response ring */
- dma_addr_t phys_addr; /* physical address of the ring */
- unsigned int cntxt_id; /* SGE context id for the response q */
- spinlock_t lock; /* guards response processing */
- struct sk_buff *rx_head; /* offload packet receive queue head */
- struct sk_buff *rx_tail; /* offload packet receive queue tail */
+struct sge_rspq { /* state for an SGE response queue */
+ unsigned int credits; /* # of pending response credits */
+ unsigned int size; /* capacity of response queue */
+ unsigned int cidx; /* consumer index */
+ unsigned int gen; /* current generation bit */
+ unsigned int polling; /* is the queue serviced through NAPI? */
+ unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
+ unsigned int next_holdoff; /* holdoff time for next interrupt */
+ struct rsp_desc *desc; /* address of HW response ring */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ unsigned int cntxt_id; /* SGE context id for the response q */
+ spinlock_t lock; /* guards response processing */
+ struct sk_buff *rx_head; /* offload packet receive queue head */
+ struct sk_buff *rx_tail; /* offload packet receive queue tail */
unsigned long offload_pkts;
unsigned long offload_bundles;
- unsigned long eth_pkts; /* # of ethernet packets */
- unsigned long pure_rsps; /* # of pure (non-data) responses */
- unsigned long imm_data; /* responses with immediate data */
- unsigned long rx_drops; /* # of packets dropped due to no mem */
- unsigned long async_notif; /* # of asynchronous notification events */
- unsigned long empty; /* # of times queue ran out of credits */
- unsigned long nomem; /* # of responses deferred due to no mem */
- unsigned long unhandled_irqs; /* # of spurious intrs */
+ unsigned long eth_pkts; /* # of ethernet packets */
+ unsigned long pure_rsps; /* # of pure (non-data) responses */
+ unsigned long imm_data; /* responses with immediate data */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+ unsigned long async_notif; /* # of asynchronous notification events */
+ unsigned long empty; /* # of times queue ran out of credits */
+ unsigned long nomem; /* # of responses deferred due to no mem */
+ unsigned long unhandled_irqs; /* # of spurious intrs */
unsigned long starved;
unsigned long restarted;
};
struct tx_desc;
struct tx_sw_desc;
-
-struct sge_txq { /* state for an SGE Tx queue */
- unsigned long flags; /* HW DMA fetch status */
- unsigned int in_use; /* # of in-use Tx descriptors */
- unsigned int size; /* # of descriptors */
- unsigned int processed; /* total # of descs HW has processed */
- unsigned int cleaned; /* total # of descs SW has reclaimed */
- unsigned int stop_thres; /* SW TX queue suspend threshold */
- unsigned int cidx; /* consumer index */
- unsigned int pidx; /* producer index */
- unsigned int gen; /* current value of generation bit */
- unsigned int unacked; /* Tx descriptors used since last COMPL */
- struct tx_desc *desc; /* address of HW Tx descriptor ring */
- struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
- spinlock_t lock; /* guards enqueueing of new packets */
- unsigned int token; /* WR token */
- dma_addr_t phys_addr; /* physical address of the ring */
- struct sk_buff_head sendq; /* List of backpressured offload packets */
- struct tasklet_struct qresume_tsk; /* restarts the queue */
- unsigned int cntxt_id; /* SGE context id for the Tx q */
- unsigned long stops; /* # of times q has been stopped */
- unsigned long restarts; /* # of queue restarts */
+struct eth_coalesce_sw_desc;
+
+struct sge_txq { /* state for an SGE Tx queue */
+ unsigned long flags; /* HW DMA fetch status */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* current value of generation bit */
+ unsigned int unacked; /* Tx descriptors used since last COMPL */
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ unsigned int eth_coalesce_idx; /* idx of the next coalesce pkt */
+ unsigned int eth_coalesce_bytes; /* total lentgh of coalesced pkts */
+ struct eth_coalesce_sw_desc *eth_coalesce_sdesc;
+ spinlock_t lock; /* guards enqueueing of new packets */
+ unsigned int token; /* WR token */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ struct sk_buff_head sendq; /* List of backpressured offload packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ unsigned int cntxt_id; /* SGE context id for the Tx q */
+ unsigned long stops; /* # of times q has been stopped */
+ unsigned long restarts; /* # of queue restarts */
};
-enum { /* per port SGE statistics */
- SGE_PSTAT_TSO, /* # of TSO requests */
- SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
- SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
- SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
- SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
-
- SGE_PSTAT_MAX /* must be last */
+enum { /* per port SGE statistics */
+ SGE_PSTAT_TSO, /* # of TSO requests */
+ SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
+ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
+ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
+ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
+ SGE_PSTAT_TX_COALESCE_WR, /* # of TX Coalesce Work Requests */
+ SGE_PSTAT_TX_COALESCE_PKT, /* # of TX Coalesced packets */
+ SGE_PSTAT_LRO, /* # of completed LRO packets */
+ SGE_PSTAT_LRO_SKB, /* # of sk_buffs added to LRO sessions */
+ SGE_PSTAT_LRO_PG, /* # of page chunks added to LRO sessions */
+ SGE_PSTAT_LRO_ACK, /* # of pure ACKs fully merged by LRO */
+ SGE_PSTAT_LRO_OVFLOW, /* # of LRO session overflows */
+ SGE_PSTAT_LRO_COLSN, /* # of LRO hash collisions */
+
+ SGE_PSTAT_MAX /* must be last */
};
-struct sge_qset { /* an SGE queue set */
+struct sge_qset { /* an SGE queue set */
struct sge_rspq rspq;
- struct sge_fl fl[SGE_RXQ_PER_SET];
- struct sge_txq txq[SGE_TXQ_PER_SET];
- struct net_device *netdev; /* associated net device */
- unsigned long txq_stopped; /* which Tx queues are stopped */
- struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+ struct sge_fl fl[SGE_RXQ_PER_SET];
+ struct lro_state lro;
+ struct sge_txq txq[SGE_TXQ_PER_SET];
+ struct net_device *netdev; /* associated net device */
+ unsigned long txq_stopped; /* which Tx queues are stopped */
+ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
unsigned long port_stats[SGE_PSTAT_MAX];
} ____cacheline_aligned;
struct sge {
struct sge_qset qs[SGE_QSETS];
- spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
+ spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
};
struct adapter {
char desc[22];
} msix_info[SGE_QSETS + 1];
+#ifdef T3_TRACE
+ struct trace_buf *tb[SGE_QSETS];
+#endif
+
/* T3 modules */
struct sge sge;
struct mc7 pmrx;
struct mc5 mc5;
struct net_device *port[MAX_NPORTS];
+ u8 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
+ u8 rrss_map[SGE_QSETS]; /* reverse RSS map table */
+
unsigned int check_task_cnt;
- struct work_struct adap_check_task;
+ struct delayed_work adap_check_task;
struct work_struct ext_intr_handler_task;
/*
*/
struct net_device *dummy_netdev[SGE_QSETS - 1];
+ u32 t3_config_space[16]; /* For old kernels only */
+
struct dentry *debugfs_root;
- struct mutex mdio_lock;
+ spinlock_t mdio_lock;
+ spinlock_t elmer_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
};
-static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
+#include "cxgb3_compat.h"
+
+#define MDIO_LOCK(adapter) spin_lock(&(adapter)->mdio_lock)
+#define MDIO_UNLOCK(adapter) spin_unlock(&(adapter)->mdio_lock)
+
+#define ELMR_LOCK(adapter) spin_lock(&(adapter)->elmer_lock)
+#define ELMR_UNLOCK(adapter) spin_unlock(&(adapter)->elmer_lock)
+
+/**
+ * t3_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t3_read_reg(adapter_t *adapter, u32 reg_addr)
{
u32 val = readl(adapter->regs + reg_addr);
- CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
+ CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr,
+ val);
return val;
}
-static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+/**
+ * t3_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t3_write_reg(adapter_t *adapter, u32 reg_addr, u32 val)
{
- CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
+ CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
writel(val, adapter->regs + reg_addr);
}
+/**
+ * t3_os_pci_write_config_4 - 32-bit write to PCI config space
+ * @adapter: the adapter
+ * @reg: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given register in PCI config space.
+ */
+static inline void t3_os_pci_write_config_4(adapter_t *adapter, int reg,
+ u32 val)
+{
+ pci_write_config_dword(adapter->pdev, reg, val);
+}
+
+/**
+ * t3_os_pci_read_config_4 - read a 32-bit value from PCI config space
+ * @adapter: the adapter
+ * @reg: the register address
+ * @val: where to store the value read
+ *
+ * Read a 32-bit value from the given register in PCI config space.
+ */
+static inline void t3_os_pci_read_config_4(adapter_t *adapter, int reg,
+ u32 *val)
+{
+ pci_read_config_dword(adapter->pdev, reg, val);
+}
+
+/**
+ * t3_os_pci_write_config_2 - 16-bit write to PCI config space
+ * @adapter: the adapter
+ * @reg: the register address
+ * @val: the value to write
+ *
+ * Write a 16-bit value into the given register in PCI config space.
+ */
+static inline void t3_os_pci_write_config_2(adapter_t *adapter, int reg,
+ u16 val)
+{
+ pci_write_config_word(adapter->pdev, reg, val);
+}
+
+/**
+ * t3_os_pci_read_config_2 - read a 16-bit value from PCI config space
+ * @adapter: the adapter
+ * @reg: the register address
+ * @val: where to store the value read
+ *
+ * Read a 16-bit value from the given register in PCI config space.
+ */
+static inline void t3_os_pci_read_config_2(adapter_t *adapter, int reg,
+ u16 *val)
+{
+ pci_read_config_word(adapter->pdev, reg, val);
+}
+
+/**
+ * t3_os_find_pci_capability - lookup a capability in the PCI capability list
+ * @adapter: the adapter
+ * @cap: the capability
+ *
+ * Return the address of the given capability within the PCI capability list.
+ */
+static inline int t3_os_find_pci_capability(adapter_t *adapter, int cap)
+{
+ return pci_find_capability(adapter->pdev, cap);
+}
+
+/**
+ * port_name - return the string name of a port
+ * @adapter: the adapter
+ * @port_idx: the port index
+ *
+ * Return the string name of the selected port.
+ */
+static inline const char *port_name(adapter_t *adapter, unsigned int port_idx)
+{
+ return adapter->port[port_idx]->name;
+}
+
+/**
+ * t3_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t3_os_set_hw_addr(adapter_t *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ memcpy(adapter->port[port_idx]->dev_addr, hw_addr, ETH_ALEN);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(adapter->port[port_idx]->perm_addr, hw_addr, ETH_ALEN);
+#endif
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
{
return netdev_priv(adap->port[idx]);
#define tdev2adap(d) container_of(d, struct adapter, tdev)
-static inline int offload_running(struct adapter *adapter)
+static inline int offload_running(adapter_t *adapter)
{
return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
}
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
-void t3_os_ext_intr_handler(struct adapter *adapter);
-void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+void t3_os_ext_intr_handler(adapter_t *adapter);
+void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
-void t3_sge_start(struct adapter *adap);
-void t3_sge_stop(struct adapter *adap);
-void t3_free_sge_resources(struct adapter *adap);
-void t3_sge_err_intr_handler(struct adapter *adapter);
-intr_handler_t t3_intr_handler(struct adapter *adap, int polling,
- struct pt_regs *ptregs);
+void t3_sge_start(adapter_t *adap);
+void t3_sge_stop(adapter_t *adap);
+void t3_free_sge_resources(adapter_t *adap);
+void t3_sge_err_intr_handler(adapter_t *adapter);
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
-int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+int t3_mgmt_tx(adapter_t *adap, struct sk_buff *skb);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
-int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
- int irq_vec_idx, const struct qset_params *p,
+int t3_sge_alloc_qset(adapter_t *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *netdev);
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
-irqreturn_t t3_sge_intr_msix(int irq, void *cookie, struct pt_regs*);
-#endif /* __T3_ADAPTER_H__ */
+#endif /* __T3_ADAPTER_H__ */
/*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2005-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include "common.h"
#include "regs.h"
enum {
- AEL100X_TX_DISABLE = 9,
- AEL100X_TX_CONFIG1 = 0xc002,
+ AEL100X_TX_DISABLE = 9,
+ AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
- AEL1002_XFI_EQL = 0xc015,
- AEL1002_LB_EN = 0xc017,
+ AEL1002_XFI_EQL = 0xc015,
+ AEL1002_LB_EN = 0xc017,
- LASI_CTRL = 0x9002,
- LASI_STAT = 0x9005
+ LASI_CTRL = 0x9002,
+ LASI_STAT = 0x9005
};
static void ael100x_txon(struct cphy *phy)
return 0;
}
+#ifdef C99_NOT_SUPPORTED
static struct cphy_ops ael1002_ops = {
- .reset = ael1002_reset,
- .intr_enable = ael1002_intr_noop,
- .intr_disable = ael1002_intr_noop,
- .intr_clear = ael1002_intr_noop,
- .intr_handler = ael1002_intr_noop,
+ ael1002_reset,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1002_power_down,
+};
+#else
+static struct cphy_ops ael1002_ops = {
+ .reset = ael1002_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
.get_link_status = ael100x_get_link_status,
- .power_down = ael1002_power_down,
+ .power_down = ael1002_power_down,
};
+#endif
-void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops)
+int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
+ cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-R");
ael100x_txon(phy);
+ return 0;
}
static int ael1006_reset(struct cphy *phy, int wait)
if (err)
return err;
- return (status & 1) ? cphy_cause_link_change : 0;
+ return (status & 1) ? cphy_cause_link_change : 0;
}
static int ael1006_power_down(struct cphy *phy, int enable)
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
}
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops ael1006_ops = {
+ ael1006_reset,
+ ael1006_intr_enable,
+ ael1006_intr_disable,
+ ael1006_intr_clear,
+ ael1006_intr_handler,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1006_power_down,
+};
+#else
static struct cphy_ops ael1006_ops = {
- .reset = ael1006_reset,
- .intr_enable = ael1006_intr_enable,
- .intr_disable = ael1006_intr_disable,
- .intr_clear = ael1006_intr_clear,
- .intr_handler = ael1006_intr_handler,
+ .reset = ael1006_reset,
+ .intr_enable = ael1006_intr_enable,
+ .intr_disable = ael1006_intr_disable,
+ .intr_clear = ael1006_intr_clear,
+ .intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
- .power_down = ael1006_power_down,
+ .power_down = ael1006_power_down,
};
+#endif
-void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops)
+int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
+ cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-SR");
ael100x_txon(phy);
+ return 0;
}
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops qt2045_ops = {
+ ael1006_reset,
+ ael1006_intr_enable,
+ ael1006_intr_disable,
+ ael1006_intr_clear,
+ ael1006_intr_handler,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1006_power_down,
+};
+#else
static struct cphy_ops qt2045_ops = {
- .reset = ael1006_reset,
- .intr_enable = ael1006_intr_enable,
- .intr_disable = ael1006_intr_disable,
- .intr_clear = ael1006_intr_clear,
- .intr_handler = ael1006_intr_handler,
+ .reset = ael1006_reset,
+ .intr_enable = ael1006_intr_enable,
+ .intr_disable = ael1006_intr_disable,
+ .intr_clear = ael1006_intr_clear,
+ .intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
- .power_down = ael1006_power_down,
+ .power_down = ael1006_power_down,
};
+#endif
-void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops)
+int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
{
unsigned int stat;
- cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
+ cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
/*
* Some cards where the PHY is supposed to be at address 0 actually
if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
stat == 0xffff)
phy->addr = 1;
+ return 0;
}
static int xaui_direct_reset(struct cphy *phy, int wait)
{
if (link_ok) {
unsigned int status;
-
+
status = t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) |
- t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) |
- t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) |
- t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT3, phy->addr));
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT3, phy->addr));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
return 0;
}
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops xaui_direct_ops = {
+ xaui_direct_reset,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ xaui_direct_get_link_status,
+ xaui_direct_power_down,
+};
+#else
static struct cphy_ops xaui_direct_ops = {
- .reset = xaui_direct_reset,
- .intr_enable = ael1002_intr_noop,
- .intr_disable = ael1002_intr_noop,
- .intr_clear = ael1002_intr_noop,
- .intr_handler = ael1002_intr_noop,
+ .reset = xaui_direct_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
.get_link_status = xaui_direct_get_link_status,
- .power_down = xaui_direct_power_down,
+ .power_down = xaui_direct_power_down,
};
+#endif
-void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops)
+int t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops);
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
+ return 0;
}
/*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2005-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef __CHELSIO_COMMON_H
#define __CHELSIO_COMMON_H
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include "version.h"
-
-#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_ALERT(adap, fmt, ...) \
- dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
-
-/*
- * More powerful macro that selectively prints messages based on msg_enable.
- * For info and debugging messages.
- */
-#define CH_MSG(adapter, level, category, fmt, ...) do { \
- if ((adapter)->msg_enable & NETIF_MSG_##category) \
- dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
- ## __VA_ARGS__); \
-} while (0)
-
-#ifdef DEBUG
-# define CH_DBG(adapter, category, fmt, ...) \
- CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
-#else
-# define CH_DBG(adapter, category, fmt, ...)
-#endif
-
-/* Additional NETIF_MSG_* categories */
-#define NETIF_MSG_MMIO 0x8000000
-
-struct t3_rx_mode {
- struct net_device *dev;
- struct dev_mc_list *mclist;
- unsigned int idx;
-};
-
-static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
- struct dev_mc_list *mclist)
-{
- p->dev = dev;
- p->mclist = mclist;
- p->idx = 0;
-}
-
-static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
-{
- u8 *addr = NULL;
-
- if (rm->mclist && rm->idx < rm->dev->mc_count) {
- addr = rm->mclist->dmi_addr;
- rm->mclist = rm->mclist->next;
- rm->idx++;
- }
- return addr;
-}
+#include "osdep.h"
enum {
- MAX_NPORTS = 2, /* max # of ports */
- MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
- EEPROMSIZE = 8192, /* Serial EEPROM size */
+ MAX_FRAME_SIZE = 10240, /* max MAC frame size, includes header + FCS */
+ EEPROMSIZE = 8192, /* Serial EEPROM size */
SERNUM_LEN = 16, /* Serial # length */
- RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- PROTO_SRAM_LINES = 128, /* size of TP sram */
+ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PROTO_SRAM_LINES = 128, /* size of protocol sram */
};
#define MAX_RX_COALESCING_LEN 12288U
enum {
- PAUSE_RX = 1 << 0,
- PAUSE_TX = 1 << 1,
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
SUPPORTED_IRQ = 1 << 24
};
-enum { /* adapter interrupt-maintained statistics */
+enum { /* adapter interrupt-maintained statistics */
STAT_ULP_CH0_PBL_OOB,
STAT_ULP_CH1_PBL_OOB,
STAT_PCI_CORR_ECC,
- IRQ_NUM_STATS /* keep last */
+ IRQ_NUM_STATS /* keep last */
};
enum {
(((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
enum {
- SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
- SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
- SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
+ FW_VERSION_MAJOR = 6,
+ FW_VERSION_MINOR = 0,
+ FW_VERSION_MICRO = 0
};
-enum sge_context_type { /* SGE egress context types */
- SGE_CNTXT_RDMA = 0,
- SGE_CNTXT_ETH = 2,
- SGE_CNTXT_OFLD = 4,
- SGE_CNTXT_CTRL = 5
+enum {
+ SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
+ SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
+ SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
+};
+
+enum sge_context_type { /* SGE egress context types */
+ SGE_CNTXT_RDMA = 0,
+ SGE_CNTXT_ETH = 2,
+ SGE_CNTXT_OFLD = 4,
+ SGE_CNTXT_CTRL = 5
};
enum {
- AN_PKT_SIZE = 32, /* async notification packet size */
- IMMED_PKT_SIZE = 48 /* packet size for immediate data */
+ AN_PKT_SIZE = 32, /* async notification packet size */
+ IMMED_PKT_SIZE = 48 /* packet size for immediate data */
};
-struct sg_ent { /* SGE scatter/gather entry */
+struct sg_ent { /* SGE scatter/gather entry */
__be32 len[2];
__be64 addr[2];
};
#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
struct cphy;
-struct adapter;
struct mdio_ops {
- int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *val);
- int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val);
+ int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *val);
+ int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val);
};
struct adapter_info {
- unsigned char nports; /* # of ports */
- unsigned char phy_base_addr; /* MDIO PHY base address */
- unsigned char mdien;
- unsigned char mdiinv;
- unsigned int gpio_out; /* GPIO output settings */
- unsigned int gpio_intr; /* GPIO IRQ enable mask */
- unsigned long caps; /* adapter capabilities */
- const struct mdio_ops *mdio_ops; /* MDIO operations */
- const char *desc; /* product description */
+ unsigned char nports0; /* # of ports on channel 0 */
+ unsigned char nports1; /* # of ports on channel 1 */
+ unsigned char phy_base_addr; /* MDIO PHY base address */
+ unsigned char mdien:1;
+ unsigned char mdiinv:1;
+ unsigned int gpio_out; /* GPIO output settings */
+ unsigned int gpio_intr; /* GPIO IRQ enable mask */
+ unsigned long caps; /* adapter capabilities */
+ const struct mdio_ops *mdio_ops; /* MDIO operations */
+ const char *desc; /* product description */
};
struct port_type_info {
- void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *ops);
- unsigned int caps;
- const char *desc;
+ int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *ops);
};
struct mc5_stats {
};
struct mac_stats {
- u64 tx_octets; /* total # of octets in good frames */
- u64 tx_octets_bad; /* total # of octets in error frames */
- u64 tx_frames; /* all good frames */
- u64 tx_mcast_frames; /* good multicast frames */
- u64 tx_bcast_frames; /* good broadcast frames */
- u64 tx_pause; /* # of transmitted pause frames */
- u64 tx_deferred; /* frames with deferred transmissions */
- u64 tx_late_collisions; /* # of late collisions */
- u64 tx_total_collisions; /* # of total collisions */
- u64 tx_excess_collisions; /* frame errors from excessive collissions */
- u64 tx_underrun; /* # of Tx FIFO underruns */
- u64 tx_len_errs; /* # of Tx length errors */
- u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
- u64 tx_excess_deferral; /* # of frames with excessive deferral */
- u64 tx_fcs_errs; /* # of frames with bad FCS */
-
- u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_octets_bad; /* total # of octets in error frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_mcast_frames; /* good multicast frames */
+ u64 tx_bcast_frames; /* good broadcast frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_deferred; /* frames with deferred transmissions */
+ u64 tx_late_collisions; /* # of late collisions */
+ u64 tx_total_collisions; /* # of total collisions */
+ u64 tx_excess_collisions; /* frame errors from excessive collissions */
+ u64 tx_underrun; /* # of Tx FIFO underruns */
+ u64 tx_len_errs; /* # of Tx length errors */
+ u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
+ u64 tx_excess_deferral; /* # of frames with excessive deferral */
+ u64 tx_fcs_errs; /* # of frames with bad FCS */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
- u64 rx_octets; /* total # of octets in good frames */
- u64 rx_octets_bad; /* total # of octets in error frames */
- u64 rx_frames; /* all good frames */
- u64 rx_mcast_frames; /* good multicast frames */
- u64 rx_bcast_frames; /* good broadcast frames */
- u64 rx_pause; /* # of received pause frames */
- u64 rx_fcs_errs; /* # of received frames with bad FCS */
- u64 rx_align_errs; /* alignment errors */
- u64 rx_symbol_errs; /* symbol errors */
- u64 rx_data_errs; /* data errors */
- u64 rx_sequence_errs; /* sequence errors */
- u64 rx_runt; /* # of runt frames */
- u64 rx_jabber; /* # of jabber frames */
- u64 rx_short; /* # of short frames */
- u64 rx_too_long; /* # of oversized frames */
- u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
-
- u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_octets_bad; /* total # of octets in error frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_mcast_frames; /* good multicast frames */
+ u64 rx_bcast_frames; /* good broadcast frames */
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_fcs_errs; /* # of received frames with bad FCS */
+ u64 rx_align_errs; /* alignment errors */
+ u64 rx_symbol_errs; /* symbol errors */
+ u64 rx_data_errs; /* data errors */
+ u64 rx_sequence_errs; /* sequence errors */
+ u64 rx_runt; /* # of runt frames */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_short; /* # of short frames */
+ u64 rx_too_long; /* # of oversized frames */
+ u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
- u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
+ u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
unsigned long tx_fifo_parity_err;
unsigned long rx_fifo_parity_err;
unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
unsigned long num_resets; /* # times reset due to stuck TX */
-
};
struct tp_mib_stats {
};
struct tp_params {
- unsigned int nchan; /* # of channels */
- unsigned int pmrx_size; /* total PMRX capacity */
- unsigned int pmtx_size; /* total PMTX capacity */
- unsigned int cm_size; /* total CM capacity */
- unsigned int chan_rx_size; /* per channel Rx size */
- unsigned int chan_tx_size; /* per channel Tx size */
- unsigned int rx_pg_size; /* Rx page size */
- unsigned int tx_pg_size; /* Tx page size */
- unsigned int rx_num_pgs; /* # of Rx pages */
- unsigned int tx_num_pgs; /* # of Tx pages */
- unsigned int ntimer_qs; /* # of timer queues */
-};
-
-struct qset_params { /* SGE queue set parameters */
- unsigned int polling; /* polling/interrupt service for rspq */
- unsigned int coalesce_usecs; /* irq coalescing timer */
- unsigned int rspq_size; /* # of entries in response queue */
- unsigned int fl_size; /* # of entries in regular free list */
- unsigned int jumbo_size; /* # of entries in jumbo free list */
- unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
- unsigned int cong_thres; /* FL congestion threshold */
+ unsigned int nchan; /* # of channels */
+ unsigned int pmrx_size; /* total PMRX capacity */
+ unsigned int pmtx_size; /* total PMTX capacity */
+ unsigned int cm_size; /* total CM capacity */
+ unsigned int chan_rx_size; /* per channel Rx size */
+ unsigned int chan_tx_size; /* per channel Tx size */
+ unsigned int rx_pg_size; /* Rx page size */
+ unsigned int tx_pg_size; /* Tx page size */
+ unsigned int rx_num_pgs; /* # of Rx pages */
+ unsigned int tx_num_pgs; /* # of Tx pages */
+ unsigned int ntimer_qs; /* # of timer queues */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int dack_re; /* DACK timer resolution */
+};
+
+struct qset_params { /* SGE queue set parameters */
+ unsigned int polling; /* polling/interrupt service for rspq */
+ unsigned int lro; /* large receive offload */
+ unsigned int coalesce_usecs; /* irq coalescing timer */
+ unsigned int rspq_size; /* # of entries in response queue */
+ unsigned int fl_size; /* # of entries in regular free list */
+ unsigned int jumbo_size; /* # of entries in jumbo free list */
+ unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
+ unsigned int cong_thres; /* FL congestion threshold */
+ unsigned int vector; /* Interrupt (line or vector) number */
};
struct sge_params {
- unsigned int max_pkt_size; /* max offload pkt size */
+ unsigned int max_pkt_size; /* max offload pkt size */
struct qset_params qset[SGE_QSETS];
};
struct mc5_params {
- unsigned int mode; /* selects MC5 width */
- unsigned int nservers; /* size of server region */
- unsigned int nfilters; /* size of filter region */
- unsigned int nroutes; /* size of routing region */
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nfilters; /* size of filter region */
+ unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
/* MC5 modes, these must be non-0 */
enum {
MC5_MODE_144_BIT = 1,
- MC5_MODE_72_BIT = 2
+ MC5_MODE_72_BIT = 2
};
/* MC5 min active region size */
};
struct pci_params {
- unsigned int vpd_cap_addr;
- unsigned int pcie_cap_addr;
+ unsigned int vpd_cap_addr;
+ unsigned int pcie_cap_addr;
unsigned short speed;
- unsigned char width;
- unsigned char variant;
+ unsigned char width;
+ unsigned char variant;
};
enum {
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
- struct tp_params tp;
+ struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
const struct adapter_info *info;
+#ifdef CONFIG_CHELSIO_T3_CORE
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
-
- unsigned int nports; /* # of ethernet ports */
- unsigned int stats_update_period; /* MAC stats accumulation period */
- unsigned int linkpoll_period; /* link poll period in 0.1s */
- unsigned int rev; /* chip revision */
- unsigned int offload;
+#endif
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int chan_map; /* bitmap of in-use Tx channels */
+ unsigned int stats_update_period; /* MAC stats accumulation period */
+ unsigned int linkpoll_period; /* link poll period in 0.1s */
+ unsigned int rev; /* chip revision */
+ unsigned int offload;
};
enum { /* chip revisions */
struct trace_params {
u32 sip;
- u32 sip_mask;
+ u32 sip_mask;
u32 dip;
- u32 dip_mask;
+ u32 dip_mask;
u16 sport;
u16 sport_mask;
u16 dport;
u32 vlan_mask:12;
u32 intf:4;
u32 intf_mask:4;
- u8 proto;
- u8 proto_mask;
+ u8 proto;
+ u8 proto_mask;
};
struct link_config {
- unsigned int supported; /* link capabilities */
- unsigned int advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
- unsigned char requested_duplex; /* duplex user has requested */
- unsigned char duplex; /* actual link duplex */
- unsigned char requested_fc; /* flow control user has requested */
- unsigned char fc; /* actual link flow control */
- unsigned char autoneg; /* autonegotiating? */
- unsigned int link_ok; /* link up? */
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_duplex; /* duplex user has requested */
+ unsigned char duplex; /* actual link duplex */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned int link_ok; /* link up? */
};
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
struct mc5 {
- struct adapter *adapter;
+ adapter_t *adapter;
unsigned int tcam_size;
unsigned char part_type;
unsigned char parity_enabled;
}
struct mc7 {
- struct adapter *adapter; /* backpointer to adapter */
- unsigned int size; /* memory size in bytes */
- unsigned int width; /* MC7 interface width */
- unsigned int offset; /* register address offset for MC7 instance */
- const char *name; /* name of MC7 instance */
- struct mc7_stats stats; /* MC7 statistics */
+ adapter_t *adapter; /* backpointer to adapter */
+ unsigned int size; /* memory size in bytes */
+ unsigned int width; /* MC7 interface width */
+ unsigned int offset; /* register address offset for MC7 instance */
+ const char *name; /* name of MC7 instance */
+ struct mc7_stats stats; /* MC7 statistics */
};
static inline unsigned int t3_mc7_size(const struct mc7 *p)
}
struct cmac {
- struct adapter *adapter;
+ adapter_t *adapter;
unsigned int offset;
- unsigned int nucast; /* # of address filters for unicast MACs */
+ unsigned char nucast; /* # of address filters for unicast MACs */
+ unsigned char multiport; /* multiple ports connected to this MAC */
+ unsigned char ext_port; /* external MAC port */
+ unsigned char promisc_map; /* which external ports are promiscuous */
unsigned int tx_tcnt;
unsigned int tx_xcnt;
u64 tx_mcnt;
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2,
- MAC_RXFIFO_SIZE = 32768
+ MAC_RXFIFO_SIZE = 32768
};
/* IEEE 802.3ae specified MDIO devices */
enum {
MDIO_DEV_PMA_PMD = 1,
- MDIO_DEV_WIS = 2,
- MDIO_DEV_PCS = 3,
- MDIO_DEV_XGXS = 4
+ MDIO_DEV_WIS = 2,
+ MDIO_DEV_PCS = 3,
+ MDIO_DEV_XGXS = 4
};
/* PHY loopback direction */
/* PHY operations */
struct cphy_ops {
- void (*destroy)(struct cphy *phy);
int (*reset)(struct cphy *phy, int wait);
int (*intr_enable)(struct cphy *phy);
/* A PHY instance */
struct cphy {
- int addr; /* PHY address */
- struct adapter *adapter; /* associated adapter */
- unsigned long fifo_errors; /* FIFO over/under-flows */
- const struct cphy_ops *ops; /* PHY operations */
- int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
+ int addr; /* PHY address */
+ unsigned int caps; /* PHY capabilities */
+ adapter_t *adapter; /* associated adapter */
+ const char *desc; /* PHY description */
+ unsigned long fifo_errors; /* FIFO over/under-flows */
+ const struct cphy_ops *ops; /* PHY operations */
+ int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
- int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
+ int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
static inline int mdio_read(struct cphy *phy, int mmd, int reg,
unsigned int *valp)
{
- return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
+ return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
}
static inline int mdio_write(struct cphy *phy, int mmd, int reg,
unsigned int val)
{
- return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
+ return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
}
/* Convenience initializer */
-static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
int phy_addr, struct cphy_ops *phy_ops,
- const struct mdio_ops *mdio_ops)
+ const struct mdio_ops *mdio_ops, unsigned int caps,
+ const char *desc)
{
phy->adapter = adapter;
- phy->addr = phy_addr;
- phy->ops = phy_ops;
+ phy->addr = phy_addr;
+ phy->caps = caps;
+ phy->desc = desc;
+ phy->ops = phy_ops;
if (mdio_ops) {
- phy->mdio_read = mdio_ops->read;
+ phy->mdio_read = mdio_ops->read;
phy->mdio_write = mdio_ops->write;
}
}
/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
#define MAC_STATS_ACCUM_SECS 180
+/* The external MAC needs accumulation every 30 seconds */
+#define VSC_STATS_ACCUM_SECS 30
+
#define XGM_REG(reg_addr, idx) \
((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
#define adapter_info(adap) ((adap)->params.info)
-static inline int uses_xaui(const struct adapter *adap)
+static inline int uses_xaui(const adapter_t *adap)
{
return adapter_info(adap)->caps & SUPPORTED_AUI;
}
-static inline int is_10G(const struct adapter *adap)
+static inline int is_10G(const adapter_t *adap)
{
return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
}
-static inline int is_offload(const struct adapter *adap)
+static inline int is_offload(const adapter_t *adap)
{
+#if defined(CONFIG_CHELSIO_T3_CORE)
return adap->params.offload;
+#else
+ return 0;
+#endif
}
-static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
{
return adap->params.vpd.cclk / 1000;
}
-static inline unsigned int is_pcie(const struct adapter *adap)
+static inline unsigned int dack_ticks_to_usec(const adapter_t *adap,
+ unsigned int ticks)
+{
+ return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
+}
+
+static inline unsigned int is_pcie(const adapter_t *adap)
{
return adap->params.pci.variant == PCI_VARIANT_PCIE;
}
-void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
- u32 val);
-void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
- int n, unsigned int offset);
-int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp);
-static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val);
+void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
+ unsigned int offset);
+int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t3_wait_op_done(adapter_t *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
+
int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
unsigned int set);
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
-void t3_intr_enable(struct adapter *adapter);
-void t3_intr_disable(struct adapter *adapter);
-void t3_intr_clear(struct adapter *adapter);
-void t3_port_intr_enable(struct adapter *adapter, int idx);
-void t3_port_intr_disable(struct adapter *adapter, int idx);
-void t3_port_intr_clear(struct adapter *adapter, int idx);
-int t3_slow_intr_handler(struct adapter *adapter);
-int t3_phy_intr_handler(struct adapter *adapter);
+void t3_intr_enable(adapter_t *adapter);
+void t3_intr_disable(adapter_t *adapter);
+void t3_intr_clear(adapter_t *adapter);
+void t3_port_intr_enable(adapter_t *adapter, int idx);
+void t3_port_intr_disable(adapter_t *adapter, int idx);
+void t3_port_intr_clear(adapter_t *adapter, int idx);
+int t3_slow_intr_handler(adapter_t *adapter);
+int t3_phy_intr_handler(adapter_t *adapter);
-void t3_link_changed(struct adapter *adapter, int port_id);
+void t3_link_changed(adapter_t *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
-int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
-int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
-int t3_seeprom_wp(struct adapter *adapter, int enable);
-int t3_get_tp_version(struct adapter *adapter, u32 *vers);
-int t3_check_tpsram_version(struct adapter *adapter, int *must_load);
-int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
-int t3_set_proto_sram(struct adapter *adap, u8 *data);
-int t3_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented);
-int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
-int t3_get_fw_version(struct adapter *adapter, u32 *vers);
-int t3_check_fw_version(struct adapter *adapter, int *must_load);
-int t3_init_hw(struct adapter *adapter, u32 fw_params);
-void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
-void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
-int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
- int reset);
-void t3_led_ready(struct adapter *adapter);
-void t3_fatal_err(struct adapter *adapter);
-void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
- const u8 * cpus, const u16 *rspq);
-int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
-int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
-int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
- unsigned int n, unsigned int *valp);
+int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
+int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data);
+int t3_seeprom_wp(adapter_t *adapter, int enable);
+int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
+ u32 *data, int byte_oriented);
+int t3_get_tp_version(adapter_t *adapter, u32 *vers);
+int t3_check_tpsram_version(adapter_t *adapter, int *must_load);
+int t3_check_tpsram(adapter_t *adapter, u8 *tp_ram, unsigned int size);
+int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size);
+int t3_get_fw_version(adapter_t *adapter, u32 *vers);
+int t3_check_fw_version(adapter_t *adapter, int *must_load);
+int t3_load_boot(adapter_t *adapter, const u8 *fw_data, unsigned int size);
+int t3_init_hw(adapter_t *adapter, u32 fw_params);
+void mac_prep(struct cmac *mac, adapter_t *adapter, int index);
+void early_hw_init(adapter_t *adapter, const struct adapter_info *ai);
+int t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset);
+void t3_led_ready(adapter_t *adapter);
+void t3_fatal_err(adapter_t *adapter);
+void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on);
+void t3_tp_set_offload_mode(adapter_t *adap, int enable);
+void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
+ const u16 *rspq);
+int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map);
+int t3_set_proto_sram(adapter_t *adap, u8 *data);
+int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask);
+void t3_port_failover(adapter_t *adapter, int port);
+void t3_failover_done(adapter_t *adapter, int port);
+void t3_failover_clear(adapter_t *adapter);
+int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
u64 *buf);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
-int t3_mac_set_num_ucast(struct cmac *mac, int n);
+int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
-int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc);
int t3b2_mac_watchdog_task(struct cmac *mac);
-void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
+void t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes);
void t3_mc5_intr_handler(struct mc5 *mc5);
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
u32 *buf);
-int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
-void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
-void t3_tp_set_offload_mode(struct adapter *adap, int enable);
-void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
-void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
- unsigned short alpha[NCCTRL_WIN],
+#ifdef CONFIG_CHELSIO_T3_CORE
+int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh);
+void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size);
+void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
-void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
-void t3_get_cong_cntl_tab(struct adapter *adap,
+void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]);
+void t3_get_cong_cntl_tab(adapter_t *adap,
unsigned short incr[NMTUS][NCCTRL_WIN]);
-void t3_config_trace_filter(struct adapter *adapter,
- const struct trace_params *tp, int filter_index,
- int invert, int enable);
-int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
-
-void t3_sge_prep(struct adapter *adap, struct sge_params *p);
-void t3_sge_init(struct adapter *adap, struct sge_params *p);
-int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
+ int filter_index, int invert, int enable);
+int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched);
+int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg);
+void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
+ unsigned int *ipg);
+void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED]);
+void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
+ unsigned int start, unsigned int n);
+#endif
+
+void t3_sge_prep(adapter_t *adap, struct sge_params *p);
+void t3_sge_init(adapter_t *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx);
-int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
- int gts_enable, u64 base_addr, unsigned int size,
- unsigned int esize, unsigned int cong_thres, int gen,
- unsigned int cidx);
-int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
- int irq_vec_idx, u64 base_addr, unsigned int size,
+int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ u64 base_addr, unsigned int size, unsigned int esize,
+ unsigned int cong_thres, int gen, unsigned int cidx);
+int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
+ u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx);
-int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
- unsigned int size, int rspq, int ovfl_mode,
+int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres);
-int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
-int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
-int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
-int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
-int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
-int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
-int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
-int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
-int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(adapter_t *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id);
+int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
unsigned int credits);
-void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops);
-void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops);
-void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops);
-void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n);
+int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n);
+int t3_vsc7323_init(adapter_t *adap, int nports);
+int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port);
+int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port);
+int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port);
+int t3_vsc7323_enable(adapter_t *adap, int port, int which);
+int t3_vsc7323_disable(adapter_t *adap, int port, int which);
+const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac);
+
+int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
-void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops);
-#endif /* __CHELSIO_COMMON_H */
+int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+#endif /* __CHELSIO_COMMON_H */
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2003-2007 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+#ifndef __CXGB3_COMPAT_H
+#define __CXGB3_COMPAT_H
+
+#include <linux/version.h>
+#include "common.h"
+#include <linux/pci.h>
+
+/* XXX Verify OS version */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,13) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,5)
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)
+
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+
+static inline void pci_disable_msi(struct pci_dev *dev)
+{}
+
+static inline int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries,
+ int nvec)
+{
+ return -1;
+}
+
+static inline void pci_disable_msix(struct pci_dev* dev)
+{}
+
+static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
+
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#define _spin_trylock spin_trylock
+
+#endif /* KERNEL_VERSION(2.6.5) */
+
+#ifndef ATOMIC_ADD_RETURN
+#if defined(CONFIG_X86_64)
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ int __i = i;
+ __asm__ __volatile__(
+ LOCK "xaddl %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+}
+
+#elif defined(CONFIG_X86)
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ int __i;
+#ifdef CONFIG_M386
+ if(unlikely(boot_cpu_data.x86==3))
+ goto no_xadd;
+#endif
+ /* Modern 486+ processor */
+ __i = i;
+ __asm__ __volatile__(
+ LOCK "xaddl %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+
+#ifdef CONFIG_M386
+no_xadd: /* Legacy 386 processor */
+ local_irq_disable();
+ __i = atomic_read(v);
+ atomic_set(v, i + __i);
+ local_irq_enable();
+ return i + __i;
+#endif
+}
+
+#elif defined(CONFIG_IA64)
+#define atomic_add_return(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic_add(__ia64_aar_i, v); \
+})
+
+#elif defined(CONFIG_PPC64)
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#elif defined(CONFIG_PPC)
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+#endif
+#endif /* ATOMIC_ADD_RETURN */
+
+#ifndef SPIN_TRYLOCK_IRQSAVE
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ _spin_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+#endif
+
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+static inline int t3_os_pci_save_state(struct adapter *adapter)
+{
+ return pci_save_state(adapter->pdev, adapter->t3_config_space);
+}
+
+static inline int t3_os_pci_restore_state(struct adapter *adapter)
+{
+ return pci_restore_state(adapter->pdev, adapter->t3_config_space);
+}
+
+static
+inline void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ while (!cancel_delayed_work(work))
+ flush_workqueue(wq);
+}
+
+#else
+static inline int t3_os_pci_save_state(adapter_t *adapter)
+{
+ return pci_save_state(adapter->pdev);
+}
+
+static inline int t3_os_pci_restore_state(adapter_t *adapter)
+{
+ return pci_restore_state(adapter->pdev);
+}
+#endif
+
+static inline int __netif_rx_schedule_prep(struct net_device *dev)
+{
+ return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+#ifndef CONFIG_DEBUG_FS
+#include <linux/err.h>
+/* Adapted from debugfs.h */
+static inline struct dentry *debugfs_create_dir(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void debugfs_remove(struct dentry *dentry)
+{}
+#else
+#include <linux/debugfs.h>
+#endif
+
+static inline void setup_timer(struct timer_list * timer,
+ void (*function)(unsigned long),
+ unsigned long data)
+{
+ timer->function = function;
+ timer->data = data;
+ init_timer(timer);
+}
+
+#define DEFINE_MUTEX DECLARE_MUTEX
+#define mutex_lock down
+#define mutex_unlock up
+
+#undef DEFINE_RWLOCK /* broken RH4u3 definition, rw_lock_t does not exist */
+#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+
+#define gfp_t unsigned
+
+/* 2.6.14 and above */
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
+#include <linux/debugfs.h>
+
+static inline int t3_os_pci_save_state(adapter_t *adapter)
+{
+ return pci_save_state(adapter->pdev);
+}
+
+static inline int t3_os_pci_restore_state(adapter_t *adapter)
+{
+ return pci_restore_state(adapter->pdev);
+}
+
+#endif /* LINUX_VERSION_CODE */
+
+#if !defined(NETEVENT)
+struct notifier_block;
+
+static inline void register_netevent_notifier(struct notifier_block *nb)
+{}
+
+static inline void unregister_netevent_notifier(struct notifier_block *nb)
+{}
+
+#if defined(CONFIG_TCP_OFFLOAD_MODULE) && !defined(CONFIG_IA64)
+#define OFLD_USE_KPROBES
+#endif
+
+#else
+extern int netdev_nit;
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+
+typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
+#define DECLARE_INTR_HANDLER(handler, irq, cookie, regs) \
+ static irqreturn_t handler(int irq, void *cookie, struct pt_regs *regs)
+
+intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
+static inline void t3_poll_handler(struct adapter *adapter,
+ struct sge_qset *qs)
+{
+ t3_intr_handler(adapter, qs->rspq.polling) (0,
+ (adapter->flags & USING_MSIX) ? (void *)qs : (void *)adapter,
+ NULL);
+}
+
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+
+#ifndef I_PRIVATE
+#define i_private u.generic_ip
+#endif
+
+#else /* 2.6.19 */
+typedef irqreturn_t (*intr_handler_t)(int, void *);
+#define DECLARE_INTR_HANDLER(handler, irq, cookie, regs) \
+ static irqreturn_t handler(int irq, void *cookie)
+
+intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
+static inline void t3_poll_handler(struct adapter *adapter,
+ struct sge_qset *qs)
+{
+ t3_intr_handler(adapter, qs->rspq.polling) (0,
+ (adapter->flags & USING_MSIX) ? (void *)qs : (void *)adapter);
+}
+
+#endif /* 2.6.19 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+#define DECLARE_TASK_FUNC(task, task_param) \
+ static void task(void *task_param)
+
+#define WORK2ADAP(task_param, task) task_param
+#define DELWORK2ADAP(task_param, task) task_param
+#define WORK2T3CDATA(task_param, task) task_param
+
+#define delayed_work work_struct
+
+#define T3_INIT_WORK INIT_WORK
+#define T3_INIT_DELAYED_WORK INIT_WORK
+
+#else /* 2.6.20 */
+
+#define DECLARE_TASK_FUNC(task, task_param) \
+ static void task(struct work_struct *task_param)
+
+#define WORK2ADAP(task_param, task) \
+ container_of(task_param, struct adapter, task)
+
+#define DELWORK2ADAP(task_param, task) \
+ container_of(task_param, struct adapter, task.work)
+
+#define WORK2T3CDATA(task_param, task) \
+ container_of(task_param, struct t3c_data, task)
+
+#define T3_INIT_WORK(task_handler, task, adapter) \
+ INIT_WORK(task_handler, task)
+
+#define T3_INIT_DELAYED_WORK(task_handler, task, adapter) \
+ INIT_DELAYED_WORK(task_handler, task)
+
+#endif /* 2.6.20 */
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+#include <linux/firmware.h>
+#else
+struct firmware {
+ size_t size;
+ u8 *data;
+};
+
+struct device;
+
+static inline int request_firmware(const struct firmware **firmware_p,
+ char *name,
+ struct device *device)
+{
+ printk(KERN_WARNING
+ "FW_LOADER not set in this kernel. FW upgrade aborted.\n");
+ return -1;
+}
+
+static inline void release_firmware(const struct firmware *fw)
+{}
+#endif /* FW_LOADER */
+
+#if !defined(RTNL_TRYLOCK)
+#include <linux/rtnetlink.h>
+static inline int rtnl_trylock(void)
+{
+ return !rtnl_shlock_nowait();
+}
+#endif /* RTNL_TRYLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#ifndef KZALLOC
+static inline void *kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* KZALLOC */
+#endif
+
+#ifndef GSO_SIZE
+#define gso_size tso_size
+#endif /* GSO_SIZE */
+
+#ifndef NIPQUAD_FMT
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+#endif
+
+/* sysfs compatibility */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+
+#define cxgb3_compat_device class_device
+
+#define CXGB3_SHOW_FUNC(func, d, attr, buf) \
+ static ssize_t func(struct cxgb3_compat_device *d, \
+ char *buf) \
+
+#define CXGB3_STORE_FUNC(func, d, attr, buf, len) \
+ static ssize_t func(struct cxgb3_compat_device *d, \
+ const char *buf, \
+ size_t len)
+
+#define CXGB3_DEVICE_ATTR(_name,_mode,_show,_store) \
+struct class_device_attribute dev_attr_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+#ifndef LINUX_2_4
+static inline struct kobject *net2kobj(struct net_device *dev)
+{
+ return &dev->class_dev.kobj;
+}
+#endif
+
+#else /* sysfs compatibility */
+
+#define cxgb3_compat_device device
+
+#define CXGB3_SHOW_FUNC(func, d, attr, buf) \
+ static ssize_t func(struct cxgb3_compat_device *d, \
+ struct device_attribute *attr, \
+ char *buf) \
+
+#define CXGB3_STORE_FUNC(func, d, attr, buf, len) \
+ static ssize_t func(struct cxgb3_compat_device *d, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t len)
+
+#define CXGB3_DEVICE_ATTR DEVICE_ATTR
+
+static inline struct kobject *net2kobj(struct net_device *dev)
+{
+ return &dev->dev.kobj;
+}
+
+#endif /* sysfs compatibility */
+
+#if !defined(IRQF)
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF */
+
+#if !defined(VLANGRP)
+#include <linux/if_vlan.h>
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+ int vlan_id)
+{
+ return vg->vlan_devices[vlan_id];
+}
+#endif /* VLANGRP */
+
+#if !defined(for_each_netdev)
+#define for_each_netdev(d) \
+ for (d = dev_base; d; d = d->next)
+#endif
+
+#include <linux/ip.h>
+
+#if !defined(NEW_SKB_COPY)
+static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
+ void *to,
+ const unsigned int len)
+{
+ memcpy(to, skb->data, len);
+}
+
+static inline void skb_copy_to_linear_data(struct sk_buff *skb,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data, from, len);
+}
+#endif
+
+#if defined(NEW_SKB_OFFSET)
+static inline void cxgb3_set_skb_header(struct sk_buff *skb,
+ struct iphdr *ip_hdr,
+ int offset)
+{
+ skb_set_network_header(skb, offset);
+}
+
+#else /* NEW_SKB_OFFSET */
+static inline int skb_network_offset(struct sk_buff *skb)
+{
+ return skb->nh.raw - skb->data;
+}
+
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+ return skb->h.raw;
+}
+
+#if !defined(CONFIG_XEN)
+/* XEN 4.1 and 4.2 has these in its RHEL 5.1 2.6.18-based kernel */
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+ return skb->h.raw - skb->data;
+}
+
+#if !defined(T3_IP_HDR)
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+ return skb->nh.iph;
+}
+#endif
+
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+ return skb->h.th;
+}
+#endif
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac.raw = skb->data;
+}
+
+static inline void skb_reset_network_header(struct sk_buff *skb)
+{
+ skb->nh.raw = skb->data;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+ skb->h.raw = skb->data;
+}
+
+static inline void cxgb3_set_skb_header(struct sk_buff *skb,
+ struct iphdr *ip_hdr,
+ int offset)
+{
+ skb->nh.iph = ip_hdr;
+}
+
+#endif /* NEW_SKB_OFFSET */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+#if defined(ETHTOOL_GPERMADDR)
+#define CXGB3_ETHTOOL_GPERMADDR ETHTOOL_GPERMADDR
+#endif
+#endif
+
+#if !defined(TRANSPORT_HEADER)
+#define transport_header h.raw
+#endif
+#endif
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
#define _CXGB3_OFFLOAD_CTL_DEFS_H
+#include <linux/compiler.h>
+
enum {
- GET_MAX_OUTSTANDING_WR = 0,
+ GET_MAX_OUTSTANDING_WR = 0,
GET_TX_MAX_CHUNK = 1,
GET_TID_RANGE = 2,
GET_STID_RANGE = 3,
RDMA_CTRL_QP_SETUP = 17,
RDMA_GET_MEM = 18,
+ FAILOVER = 30,
+ FAILOVER_DONE = 31,
+ FAILOVER_CLEAR = 32,
+ FAILOVER_ACTIVE_SLAVE = 33,
+ FAILOVER_PORT_DOWN = 34,
+ FAILOVER_PORT_UP = 35,
+ FAILOVER_PORT_RELEASE = 36,
+
+ GET_CPUIDX_OF_QSET = 40,
+ GET_PORT_SCHED = 41,
+ GET_PORT_ARRAY = 42,
+
GET_RX_PAGE_INFO = 50,
};
* Structure used to describe a TID range. Valid TIDs are [base, base+num).
*/
struct tid_range {
- unsigned int base; /* first TID */
- unsigned int num; /* number of TIDs in range */
+ unsigned int base; /* first TID */
+ unsigned int num; /* number of TIDs in range */
};
/*
* Structure used to request the size and contents of the MTU table.
*/
struct mtutab {
- unsigned int size; /* # of entries in the MTU table */
- const unsigned short *mtus; /* the MTU table values */
+ unsigned int size; /* # of entries in the MTU table */
+ const unsigned short *mtus; /* the MTU table values */
};
struct net_device;
* Structure used to request the adapter net_device owning a given MAC address.
*/
struct iff_mac {
- struct net_device *dev; /* the net_device */
- const unsigned char *mac_addr; /* MAC address to lookup */
+ struct net_device *dev; /* the net_device */
+ const unsigned char *mac_addr; /* MAC address to lookup */
u16 vlan_tag;
};
+/* Structure used to request a port's offload scheduler */
+struct port_sched {
+ struct net_device *dev; /* the net_device */
+ int sched; /* associated scheduler */
+};
+
struct pci_dev;
/*
* Structure used to request the TCP DDP parameters.
*/
struct ddp_params {
- unsigned int llimit; /* TDDP region start address */
- unsigned int ulimit; /* TDDP region end address */
- unsigned int tag_mask; /* TDDP tag mask */
+ unsigned int llimit; /* TDDP region start address */
+ unsigned int ulimit; /* TDDP region end address */
+ unsigned int tag_mask; /* TDDP tag mask */
struct pci_dev *pdev;
};
struct adap_ports {
- unsigned int nports; /* number of ports on this adapter */
- struct net_device *lldevs[2];
+ unsigned int nports; /* number of ports on this adapter */
+ struct net_device *lldevs[4]; /* Max number of ports is 4 */
+};
+
+struct port_array {
+ unsigned int nports; /* number of ports on this adapter */
+ struct net_device **lldevs; /* points to array of net_devices */
+};
+
+struct bond_ports {
+ unsigned int port;
+ unsigned int nports; /* number of ports on this adapter */
+ unsigned int ports[4]; /* Max number of ports is 4 */
};
/*
* Structure used to return information to the iscsi layer.
*/
struct ulp_iscsi_info {
- unsigned int offset;
- unsigned int llimit;
- unsigned int ulimit;
- unsigned int tagmask;
- unsigned int pgsz3;
- unsigned int pgsz2;
- unsigned int pgsz1;
- unsigned int pgsz0;
- unsigned int max_rxsz;
- unsigned int max_txsz;
- struct pci_dev *pdev;
+ unsigned int offset;
+ unsigned int llimit;
+ unsigned int ulimit;
+ unsigned int tagmask;
+ u8 pgsz_factor[4];
+ unsigned int max_rxsz;
+ unsigned int max_txsz;
+ struct pci_dev *pdev;
+};
+
+/*
+ * Offload TX/RX page information.
+ */
+struct ofld_page_info {
+ unsigned int page_size; /* Page size, should be a power of 2 */
+ unsigned int num; /* Number of pages */
};
/*
* Structure used to return information to the RDMA layer.
*/
struct rdma_info {
- unsigned int tpt_base; /* TPT base address */
- unsigned int tpt_top; /* TPT last entry address */
- unsigned int pbl_base; /* PBL base address */
- unsigned int pbl_top; /* PBL last entry address */
- unsigned int rqt_base; /* RQT base address */
- unsigned int rqt_top; /* RQT last entry address */
- unsigned int udbell_len; /* user doorbell region length */
- unsigned long udbell_physbase; /* user doorbell physical start addr */
- void __iomem *kdb_addr; /* kernel doorbell register address */
- struct pci_dev *pdev; /* associated PCI device */
+ unsigned int tpt_base; /* TPT base address */
+ unsigned int tpt_top; /* TPT last entry address */
+ unsigned int pbl_base; /* PBL base address */
+ unsigned int pbl_top; /* PBL last entry address */
+ unsigned int rqt_base; /* RQT base address */
+ unsigned int rqt_top; /* RQT last entry address */
+ unsigned int udbell_len; /* user doorbell region length */
+ unsigned long udbell_physbase; /* user doorbell physical start addr */
+ void __iomem *kdb_addr; /* kernel doorbell register address */
+ struct pci_dev *pdev; /* associated PCI device */
};
/*
unsigned long long base_addr;
unsigned int size;
};
-
-/*
- * Offload TX/RX page information.
- */
-struct ofld_page_info {
- unsigned int page_size; /* Page size, should be a power of 2 */
- unsigned int num; /* Number of pages */
-};
-#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
+#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
/*
- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
#ifndef _CHELSIO_DEFS_H
#define _CHELSIO_DEFS_H
return &t->atid_tab[atid - t->atid_base];
}
+
static inline union listen_entry *stid2entry(const struct tid_info *t,
unsigned int stid)
{
unsigned int tid)
{
struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
- &(t->tid_tab[tid]) : NULL;
+ &(t->tid_tab[tid]) : NULL;
return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
}
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef __CHIOCTL_H__
#define __CHIOCTL_H__
+#ifndef AUTOCONF_INCLUDED
+#include <linux/autoconf.h>
+#endif
+
/*
* Ioctl commands specific to this driver.
*/
enum {
+ CHELSIO_SETREG = 1024,
+ CHELSIO_GETREG = 1025,
+ CHELSIO_SETTPI = 1026,
+ CHELSIO_GETTPI = 1027,
+ CHELSIO_DEVUP = 1028,
CHELSIO_GETMTUTAB = 1029,
CHELSIO_SETMTUTAB = 1030,
+ CHELSIO_GETMTU = 1031,
CHELSIO_SET_PM = 1032,
CHELSIO_GET_PM = 1033,
+ CHELSIO_GET_TCAM = 1034,
+ CHELSIO_SET_TCAM = 1035,
+ CHELSIO_GET_TCB = 1036,
+ CHELSIO_READ_TCAM_WORD = 1037,
CHELSIO_GET_MEM = 1038,
+ CHELSIO_GET_SGE_CONTEXT = 1039,
+ CHELSIO_GET_SGE_DESC = 1040,
CHELSIO_LOAD_FW = 1041,
+ CHELSIO_GET_PROTO = 1042,
+ CHELSIO_SET_PROTO = 1043,
CHELSIO_SET_TRACE_FILTER = 1044,
CHELSIO_SET_QSET_PARAMS = 1045,
CHELSIO_GET_QSET_PARAMS = 1046,
CHELSIO_SET_QSET_NUM = 1047,
CHELSIO_GET_QSET_NUM = 1048,
+ CHELSIO_SET_PKTSCHED = 1049,
+ CHELSIO_SET_FILTER = 1050,
+ CHELSIO_SET_HW_SCHED = 1051,
+ CHELSIO_LOAD_BOOT = 1054,
+ CHELSIO_CLEAR_STATS = 1055,
};
+/* statistics categories */
+enum {
+ STATS_PORT = 1 << 1,
+ STATS_QUEUE = 1 << 2,
+};
+
struct ch_reg {
uint32_t cmd;
uint32_t addr;
uint32_t queue_num;
uint32_t idx;
uint32_t size;
- uint8_t data[128];
+ uint8_t data[128];
};
struct ch_mem_range {
uint32_t addr;
uint32_t len;
uint32_t version;
- uint8_t buf[0];
+ uint8_t buf[0];
};
+enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
+
struct ch_qset_params {
uint32_t cmd;
uint32_t qset_idx;
- int32_t txq_size[3];
- int32_t rspq_size;
- int32_t fl_size[2];
- int32_t intr_lat;
- int32_t polling;
- int32_t cong_thres;
+ int32_t txq_size[3];
+ int32_t rspq_size;
+ int32_t fl_size[2];
+ int32_t intr_lat;
+ int32_t polling;
+ int32_t lro;
+ int32_t cong_thres;
+ int32_t vector;
+ int32_t qnum;
};
struct ch_pktsched_params {
uint32_t cmd;
- uint8_t sched;
- uint8_t idx;
- uint8_t min;
- uint8_t max;
- uint8_t binding;
+ uint8_t sched;
+ uint8_t idx;
+ uint8_t min;
+ uint8_t max;
+ uint8_t binding;
+};
+
+struct ch_hw_sched {
+ uint32_t cmd;
+ uint8_t sched;
+ int8_t mode;
+ int8_t channel;
+ int32_t kbps; /* rate in Kbps */
+ int32_t class_ipg; /* tenths of nanoseconds */
+ int32_t flow_ipg; /* usec */
};
#ifndef TCB_SIZE
/* TCB size in 32-bit words */
#define TCB_WORDS (TCB_SIZE / 4)
-enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
-
struct ch_mtus {
uint32_t cmd;
uint32_t nmtus;
uint32_t vlan_mask:12;
uint32_t intf:4;
uint32_t intf_mask:4;
- uint8_t proto;
- uint8_t proto_mask;
- uint8_t invert_match:1;
- uint8_t config_tx:1;
- uint8_t config_rx:1;
- uint8_t trace_tx:1;
- uint8_t trace_rx:1;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint8_t invert_match:1;
+ uint8_t config_tx:1;
+ uint8_t config_rx:1;
+ uint8_t trace_tx:1;
+ uint8_t trace_rx:1;
};
#define SIOCCHIOCTL SIOCDEVPRIVATE
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include <linux/module.h>
+#ifndef LINUX_2_4
#include <linux/moduleparam.h>
+#endif /* LINUX_2_4 */
#include <linux/init.h>
#include <linux/pci.h>
+#ifndef LINUX_2_4
#include <linux/dma-mapping.h>
+#endif /* LINUX_2_4 */
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/rtnetlink.h>
-#include <linux/firmware.h>
#include <asm/uaccess.h>
#include "common.h"
#include "firmware_exports.h"
enum {
- MAX_TXQ_ENTRIES = 16384,
+ MAX_TXQ_ENTRIES = 16384,
MAX_CTRL_TXQ_ENTRIES = 1024,
- MAX_RSPQ_ENTRIES = 16384,
- MAX_RX_BUFFERS = 16384,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
MAX_RX_JUMBO_BUFFERS = 16384,
- MIN_TXQ_ENTRIES = 4,
+ MIN_TXQ_ENTRIES = 4,
MIN_CTRL_TXQ_ENTRIES = 4,
- MIN_RSPQ_ENTRIES = 32,
- MIN_FL_ENTRIES = 32
+ MIN_RSPQ_ENTRIES = 32,
+ MIN_FL_ENTRIES = 32,
+ MIN_FL_JUMBO_ENTRIES = 32
};
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
#define EEPROM_MAGIC 0x38E2F10C
-#define to_net_dev(class) container_of(class, struct net_device, class_dev)
-
#define CH_DEVICE(devid, idx) \
- { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-
-static const struct pci_device_id cxgb3_pci_tbl[] = {
- CH_DEVICE(0x20, 0), /* PE9000 */
- CH_DEVICE(0x21, 1), /* T302E */
- CH_DEVICE(0x22, 2), /* T310E */
- CH_DEVICE(0x23, 3), /* T320X */
- CH_DEVICE(0x24, 1), /* T302X */
- CH_DEVICE(0x25, 3), /* T320E */
- CH_DEVICE(0x26, 2), /* T310X */
- CH_DEVICE(0x30, 2), /* T3B10 */
- CH_DEVICE(0x31, 3), /* T3B20 */
- CH_DEVICE(0x32, 1), /* T3B02 */
- {0,}
+ { \
+ .vendor = PCI_VENDOR_ID_CHELSIO, \
+ .device = (devid), \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (idx) \
+ }
+
+static struct pci_device_id cxgb3_pci_tbl[] = {
+ CH_DEVICE(0x20, 0), /* PE9000 */
+ CH_DEVICE(0x21, 1), /* T302E */
+ CH_DEVICE(0x22, 2), /* T310E */
+ CH_DEVICE(0x23, 3), /* T320X */
+ CH_DEVICE(0x24, 1), /* T302X */
+ CH_DEVICE(0x25, 3), /* T320E */
+ CH_DEVICE(0x26, 2), /* T310X */
+ CH_DEVICE(0x30, 2), /* T3B10 */
+ CH_DEVICE(0x31, 3), /* T3B20 */
+ CH_DEVICE(0x32, 1), /* T3B02 */
+ CH_DEVICE(0x33, 4), /* T3B04 */
+ { 0, }
};
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
+
/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy pin interrupts. This parameter determines which
static int msi = 2;
module_param(msi, int, 0644);
-MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
+MODULE_PARM_DESC(msi, "whether to use MSI-X (2), MSI (1) or Legacy INTx (0)");
/*
* The driver enables offload as a default.
module_param(ofld_disable, int, 0644);
MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
+/*
+ * The driver uses an auto-queue algorithm by default.
+ * To disable it and force a single queue-set per port, use singleq = 1.
+ */
+
+static int singleq = 0;
+
+module_param(singleq, int, 0644);
+MODULE_PARM_DESC(singleq, "use a single queue-set per port");
+
/*
* We have work elements that we need to cancel when an interface is taken
* down. Normally the work elements would be executed by keventd but that
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
+#ifdef LINUX_2_4
+struct workqueue_struct *cxgb3_wq;
+#else
static struct workqueue_struct *cxgb3_wq;
+#endif /* LINUX_2_4 */
+
+#ifndef LINUX_2_4
+static struct dentry *cxgb3_debugfs_root;
+#endif /* LINUX_2_4 */
/**
* link_report - show link status and link speed/duplex
- * @p: the port whose settings are to be reported
+ * @dev: the port whose settings are to be reported
*
* Shows the link status, speed, and duplex of a port.
*/
if (!netif_carrier_ok(dev))
printk(KERN_INFO "%s: link down\n", dev->name);
else {
+ static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ printk(KERN_INFO "%s: link up, %s, %s-duplex, %s PAUSE\n",
+ dev->name, s,
+ p->link_config.duplex == DUPLEX_FULL ? "full" : "half",
+ fc[p->link_config.fc]);
}
}
/**
* t3_os_link_changed - handle link status changes
* @adapter: the adapter associated with the link change
- * @port_id: the port index whose limk status has changed
+ * @port_id: the port index whose link status has changed
* @link_stat: the new status of the link
* @speed: the new speed setting
* @duplex: the new duplex setting
if (link_stat != netif_carrier_ok(dev)) {
if (link_stat) {
- t3_mac_enable(mac, MAC_DIRECTION_RX);
+ msleep(10);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+ /* Clear errors created by MAC enable */
+ t3_set_reg_field(adapter,
+ A_XGM_STAT_CTRL + pi->mac.offset,
+ F_CLRSTATS, 1);
+
+ if (adapter->params.nports > 2) {
+ pi->max_ofld_bw = speed * 940;
+ t3_config_sched(adapter,
+ pi->max_ofld_bw, port_id);
+ }
netif_carrier_on(dev);
} else {
- netif_carrier_off(dev);
pi->phy.ops->power_down(&pi->phy, 1);
t3_mac_disable(mac, MAC_DIRECTION_RX);
t3_link_start(&pi->phy, mac, &pi->link_config);
+ netif_carrier_off(dev);
}
link_report(dev);
/**
* link_start - enable a port
- * @dev: the device to enable
+ * @dev: the port to enable
*
* Performs the MAC and PHY actions needed to enable a port.
*/
struct cmac *mac = &pi->mac;
init_rx_mode(&rm, dev, dev->mc_list);
- t3_mac_reset(mac);
+ if (!mac->multiport)
+ t3_mac_reset(mac);
t3_mac_set_mtu(mac, dev->mtu);
t3_mac_set_address(mac, 0, dev->dev_addr);
t3_mac_set_rx_mode(mac, &rm);
t3_link_start(&pi->phy, mac, &pi->link_config);
- t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
}
-static inline void cxgb_disable_msi(struct adapter *adapter)
+static void cxgb_disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
pci_disable_msix(adapter->pdev);
/*
* Interrupt handler for asynchronous events used with MSI-X.
*/
-static irqreturn_t t3_async_intr_handler(int irq, void *cookie, struct pt_regs *ptregs)
+DECLARE_INTR_HANDLER(t3_async_intr_handler, irq, cookie, regs)
{
t3_slow_intr_handler(cookie);
return IRQ_HANDLED;
for (i = 0; i < pi->nqsets; i++, msi_idx++) {
snprintf(adap->msix_info[msi_idx].desc, n,
- "%s (queue %d)", d->name, i);
+ "%s (queue %d)", d->name,
+ pi->first_qset + i);
adap->msix_info[msi_idx].desc[n] = 0;
}
}
}
-static int request_msix_data_irqs(struct adapter *adap)
+static int request_msix_data_irqs(adapter_t *adap)
{
int i, j, err, qidx = 0;
for (j = 0; j < nqsets; ++j) {
err = request_irq(adap->msix_info[qidx + 1].vec,
t3_intr_handler(adap,
- adap->sge.qs[qidx].
- rspq.polling,NULL),
+ adap->sge.qs[qidx].rspq.polling),
0, adap->msix_info[qidx + 1].desc,
&adap->sge.qs[qidx]);
if (err) {
}
skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
- greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
+ greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
memset(greq, 0, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
* We always configure the RSS mapping for two ports since the mapping
* table has plenty of entries.
*/
-static void setup_rss(struct adapter *adap)
+static void setup_rss(adapter_t *adap)
{
int i;
- unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
- unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
+ unsigned int nq[2];
u8 cpus[SGE_QSETS + 1];
u16 rspq_map[RSS_TABLE_SIZE];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
- cpus[SGE_QSETS] = 0xff; /* terminator */
+ cpus[SGE_QSETS] = 0xff; /* terminator */
+
+ nq[0] = nq[1] = 0;
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+
+ nq[pi->tx_chan] += pi->nqsets;
+ }
for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
- rspq_map[i] = i % nq0;
- rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+ rspq_map[i] = nq[0] ? i % nq[0] : 0;
+ rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
}
+ /* Calculate the reverse RSS map table */
+ for (i = 0; i < RSS_TABLE_SIZE; ++i)
+ if (adap->rrss_map[rspq_map[i]] == 0xff)
+ adap->rrss_map[rspq_map[i]] = i;
+
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
- F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
- V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
+ F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
+ F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
+ cpus, rspq_map);
}
/*
* both netdevices representing interfaces and the dummy ones for the extra
* queues.
*/
-static void quiesce_rx(struct adapter *adap)
+static void quiesce_rx(adapter_t *adap)
{
int i;
struct net_device *dev;
struct net_device *dev = adap->port[i];
const struct port_info *pi = netdev_priv(dev);
- for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+ for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
+ ++j, ++qset_idx) {
+ if (!pi->rx_csum_offload)
+ adap->params.sge.qset[qset_idx].lro = 0;
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx,
&adap->params.sge.qset[qset_idx], ntxq,
- j == 0 ? dev :
+ j == pi->first_qset ? dev :
adap-> dummy_netdev[dummy_dev_idx++]);
if (err) {
t3_free_sge_resources(adap);
return 0;
}
-static ssize_t attr_show(struct class_device *c, char *buf,
- ssize_t(*format) (struct net_device *, char *))
+#ifndef LINUX_2_4
+static ssize_t attr_show(struct cxgb3_compat_device *d, char *buf,
+ ssize_t (*format)(struct net_device *, char *))
{
ssize_t len;
/* Synchronize with ioctls that may shut down the device */
rtnl_lock();
- len = (*format) (to_net_dev(c), buf);
+ len = (*format)(to_net_dev(d), buf);
rtnl_unlock();
return len;
}
-static ssize_t attr_store(struct class_device *c, const char *buf, size_t len,
- ssize_t(*set) (struct net_device *, unsigned int),
+static ssize_t attr_store(struct cxgb3_compat_device *d,
+ const char *buf, size_t len,
+ ssize_t (*set)(struct net_device *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
char *endp;
return -EINVAL;
rtnl_lock();
- ret = (*set) (to_net_dev(c), val);
+ ret = (*set)(to_net_dev(d), val);
if (!ret)
ret = len;
rtnl_unlock();
struct adapter *adap = pi->adapter; \
return sprintf(buf, "%u\n", val_expr); \
} \
-static ssize_t show_##name(struct class_device *c, char *buf) \
+CXGB3_SHOW_FUNC(show_##name, d, attr, buf) \
{ \
- return attr_show(c, buf, format_##name); \
+ return attr_show(d, buf, format_##name); \
}
static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
-
- if (adap->flags & FULL_INIT_DONE)
- return -EBUSY;
- if (val && adap->params.rev == 0)
- return -EINVAL;
- if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
- min_tids)
- return -EINVAL;
- adap->params.mc5.nfilters = val;
return 0;
}
-static ssize_t store_nfilters(struct class_device *c,
- const char *buf, size_t len)
+CXGB3_STORE_FUNC(store_nfilters, d, attr, buf, len)
{
- return attr_store(c, buf, len, set_nfilters, 0, ~0);
+ return attr_store(d, buf, len, set_nfilters, 0, ~0);
}
static ssize_t set_nservers(struct net_device *dev, unsigned int val)
return 0;
}
-static ssize_t store_nservers(struct class_device *c,
- const char *buf, size_t len)
+CXGB3_STORE_FUNC(store_nservers, d, attr, buf, len)
{
- return attr_store(c, buf, len, set_nservers, 0, ~0);
+ return attr_store(d, buf, len, set_nservers, 0, ~0);
}
#define CXGB3_ATTR_R(name, val_expr) \
CXGB3_SHOW(name, val_expr) \
-static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+static CXGB3_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
#define CXGB3_ATTR_RW(name, val_expr, store_method) \
CXGB3_SHOW(name, val_expr) \
-static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+static CXGB3_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
static struct attribute *cxgb3_attrs[] = {
- &class_device_attr_cam_size.attr,
- &class_device_attr_nfilters.attr,
- &class_device_attr_nservers.attr,
+ &dev_attr_cam_size.attr,
+ &dev_attr_nfilters.attr,
+ &dev_attr_nservers.attr,
NULL
};
-static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
+static struct attribute_group cxgb3_attr_group = { .attrs = cxgb3_attrs };
+
+static ssize_t reg_attr_show(struct cxgb3_compat_device *d, char *buf, int reg,
+ int shift, unsigned int mask)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ ssize_t len;
+ unsigned int v;
+
+ /* Synchronize with ioctls that may shut down the device */
+ rtnl_lock();
+ v = t3_read_reg(adap, reg);
+ len = sprintf(buf, "%u\n", (v >> shift) & mask);
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t reg_attr_store(struct cxgb3_compat_device *d, const char *buf,
+ size_t len, int reg, int shift,
+ unsigned int mask, unsigned int min_val,
+ unsigned int max_val)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ char *endp;
+ unsigned int val;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf || val < min_val || val > max_val)
+ return -EINVAL;
+
+ rtnl_lock();
+ t3_set_reg_field(adap, reg, mask << shift,
+ val << shift);
+ rtnl_unlock();
+ return len;
+}
+
+#define T3_REG_SHOW(name, reg, shift, mask) \
+CXGB3_SHOW_FUNC(show_##name, d, attr, buf) \
+{ \
+ return reg_attr_show(d, buf, reg, shift, mask); \
+}
+
+#define T3_REG_STORE(name, reg, shift, mask, min_val, max_val) \
+CXGB3_STORE_FUNC(store_##name, d, attr, buf, len) \
+{ \
+ return reg_attr_store(d, buf, len, reg, shift, mask, min_val, max_val); \
+}
+
+#define T3_ATTR(name, reg, shift, mask, min_val, max_val) \
+T3_REG_SHOW(name, reg, shift, mask) \
+T3_REG_STORE(name, reg, shift, mask, min_val, max_val) \
+static CXGB3_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
-static ssize_t tm_attr_show(struct class_device *c, char *buf,
- int sched)
+T3_ATTR(tcp_retries1, A_TP_SHIFT_CNT, S_RXTSHIFTMAXR1, M_RXTSHIFTMAXR1, 3, 15);
+T3_ATTR(tcp_retries2, A_TP_SHIFT_CNT, S_RXTSHIFTMAXR2, M_RXTSHIFTMAXR2, 0, 15);
+T3_ATTR(tcp_syn_retries, A_TP_SHIFT_CNT, S_SYNSHIFTMAX, M_SYNSHIFTMAX, 0, 15);
+T3_ATTR(tcp_keepalive_probes, A_TP_SHIFT_CNT, S_KEEPALIVEMAX, M_KEEPALIVEMAX,
+ 1, 15);
+T3_ATTR(tcp_sack, A_TP_TCP_OPTIONS, S_SACKMODE, M_SACKMODE, 0, 1);
+T3_ATTR(tcp_timestamps, A_TP_TCP_OPTIONS, S_TIMESTAMPSMODE, M_TIMESTAMPSMODE,
+ 0, 1);
+
+static ssize_t timer_attr_show(struct cxgb3_compat_device *d, char *buf, int reg)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int v, tps;
+ ssize_t len;
+
+ /* Synchronize with ioctls that may shut down the device */
+ rtnl_lock();
+ v = t3_read_reg(adap, reg);
+ tps = (adap->params.vpd.cclk * 1000) >> adap->params.tp.tre;
+ len = sprintf(buf, "%u\n", v / tps);
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t timer_attr_store(struct cxgb3_compat_device *d, const char *buf,
+ size_t len, int reg, unsigned int min_val,
+ unsigned int max_val)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ char *endp;
+ unsigned int val, tps;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ tps = (adap->params.vpd.cclk * 1000) >> adap->params.tp.tre;
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf || val * tps < min_val || val * tps > max_val)
+ return -EINVAL;
+
+ rtnl_lock();
+ t3_write_reg(adap, reg, val * tps);
+ rtnl_unlock();
+ return len;
+}
+
+#define T3_TIMER_REG_SHOW(name, reg) \
+CXGB3_SHOW_FUNC(show_##name, d, attr, buf) \
+{ \
+ return timer_attr_show(d, buf, reg); \
+}
+
+#define T3_TIMER_REG_STORE(name, reg, min_val, max_val) \
+CXGB3_STORE_FUNC(store_##name, d, attr, buf, len) \
+{ \
+ return timer_attr_store(d, buf, len, reg, min_val, max_val); \
+}
+
+#define T3_TIMER_ATTR(name, reg, min_val, max_val) \
+T3_TIMER_REG_SHOW(name, reg) \
+T3_TIMER_REG_STORE(name, reg, min_val, max_val) \
+static CXGB3_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+
+T3_TIMER_ATTR(tcp_keepalive_time, A_TP_KEEP_IDLE, 0, M_KEEPALIVEIDLE);
+T3_TIMER_ATTR(tcp_keepalive_intvl, A_TP_KEEP_INTVL, 0, M_KEEPALIVEINTVL);
+T3_TIMER_ATTR(tcp_finwait2_timeout, A_TP_FINWAIT2_TIMER, 0, M_FINWAIT2TIME);
+
+static ssize_t tm_attr_show(struct cxgb3_compat_device *d, char *buf, int sched)
{
- struct port_info *pi = netdev_priv(to_net_dev(c));
+ struct port_info *pi = netdev_priv(to_net_dev(d));
struct adapter *adap = pi->adapter;
- unsigned int v, addr, bpt, cpt;
ssize_t len;
+ unsigned int rate;
- addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
rtnl_lock();
- t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
- v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
- if (sched & 1)
- v >>= 16;
- bpt = (v >> 8) & 0xff;
- cpt = v & 0xff;
- if (!cpt)
+ t3_get_tx_sched(adap, sched, &rate, NULL);
+ if (!rate)
len = sprintf(buf, "disabled\n");
- else {
- v = (adap->params.vpd.cclk * 1000) / cpt;
- len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
- }
+ else
+ len = sprintf(buf, "%u Kbps\n", rate);
rtnl_unlock();
return len;
}
-static ssize_t tm_attr_store(struct class_device *c, const char *buf,
+static ssize_t tm_attr_store(struct cxgb3_compat_device *d, const char *buf,
size_t len, int sched)
{
- struct port_info *pi = netdev_priv(to_net_dev(c));
+ struct port_info *pi = netdev_priv(to_net_dev(d));
struct adapter *adap = pi->adapter;
- unsigned int val;
char *endp;
ssize_t ret;
+ unsigned int val;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
}
#define TM_ATTR(name, sched) \
-static ssize_t show_##name(struct class_device *c, \
- char *buf) \
+CXGB3_SHOW_FUNC(show_##name, d, attr, buf) \
{ \
- return tm_attr_show(c, buf, sched); \
+ return tm_attr_show(d, buf, sched); \
} \
-static ssize_t store_##name(struct class_device *c, \
- const char *buf, size_t len) \
+CXGB3_STORE_FUNC(store_##name, d, attr, buf, len) \
{ \
- return tm_attr_store(c, buf, len, sched); \
+ return tm_attr_store(d, buf, len, sched); \
} \
-static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+static CXGB3_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
TM_ATTR(sched0, 0);
TM_ATTR(sched1, 1);
TM_ATTR(sched7, 7);
static struct attribute *offload_attrs[] = {
- &class_device_attr_sched0.attr,
- &class_device_attr_sched1.attr,
- &class_device_attr_sched2.attr,
- &class_device_attr_sched3.attr,
- &class_device_attr_sched4.attr,
- &class_device_attr_sched5.attr,
- &class_device_attr_sched6.attr,
- &class_device_attr_sched7.attr,
+ &dev_attr_tcp_retries1.attr,
+ &dev_attr_tcp_retries2.attr,
+ &dev_attr_tcp_syn_retries.attr,
+ &dev_attr_tcp_keepalive_probes.attr,
+ &dev_attr_tcp_sack.attr,
+ &dev_attr_tcp_timestamps.attr,
+ &dev_attr_tcp_keepalive_time.attr,
+ &dev_attr_tcp_keepalive_intvl.attr,
+ &dev_attr_tcp_finwait2_timeout.attr,
+ &dev_attr_sched0.attr,
+ &dev_attr_sched1.attr,
+ &dev_attr_sched2.attr,
+ &dev_attr_sched3.attr,
+ &dev_attr_sched4.attr,
+ &dev_attr_sched5.attr,
+ &dev_attr_sched6.attr,
+ &dev_attr_sched7.attr,
NULL
};
-static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
+static struct attribute_group offload_attr_group = { .attrs = offload_attrs };
+#endif /* ! LINUX_2_4 */
/*
* Sends an sk_buff to an offload queue driver
struct cpl_smt_write_req *req;
struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) return -ENOMEM;
req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
- req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
+ req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
req->iff = idx;
memset(req->src_mac1, 0, sizeof(req->src_mac1));
memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
int i;
for_each_port(adapter, i)
- write_smt_entry(adapter, i);
+ write_smt_entry(adapter, i);
return 0;
}
for (j = 0; j < pi->nqsets; ++j)
send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
- -1, i);
+ -1, pi->tx_chan);
}
}
#define FW_FNAME "t3fw-%d.%d.%d.bin"
+#define TPEEPROM_NAME "t3%c_tp_eeprom-%d.%d.%d.bin"
#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
+#ifndef LINUX_2_4
static int upgrade_fw(struct adapter *adap)
{
int ret;
release_firmware(fw);
if (ret == 0)
- dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
- FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+ dev_warn(dev, "successful upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
else
dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
return ret;
}
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data);
+
static inline char t3rev2char(struct adapter *adapter)
{
- char rev = 0;
+ char rev = 'z';
switch(adapter->params.rev) {
+ case T3_REV_A:
+ rev = 'a';
+ break;
case T3_REV_B:
case T3_REV_B2:
rev = 'b';
ret = t3_set_proto_sram(adap, tpsram->data);
if (ret == 0)
- dev_info(dev,
+ dev_warn(dev,
"successful update of protocol engine "
"to %d.%d.%d\n",
TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
return ret;
}
+#endif /* ! LINUX_2_4 */
/**
* cxgb_up - enable the adapter
- * @adapter: adapter being enabled
+ * @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
*/
static int cxgb_up(struct adapter *adap)
{
- int err;
+ int err = 0;
int must_load;
if (!(adap->flags & FULL_INIT_DONE)) {
err = t3_check_fw_version(adap, &must_load);
if (err == -EINVAL) {
+#ifndef LINUX_2_4
err = upgrade_fw(adap);
- if (err && must_load)
- goto out;
+#endif
+ /*
+ * We're willing to continue working with firmware
+ * which mismatches to some extent unless we're told
+ * by the check routine that we must load new code.
+ */
+ if (!must_load)
+ err = 0;
}
+ if (err)
+ goto out;
err = t3_check_tpsram_version(adap, &must_load);
if (err == -EINVAL) {
+#ifndef LINUX_2_4
err = update_tpsram(adap);
- if (err && must_load)
- goto out;
+#endif
+ /*
+ * We're willing to continue working with a TP SRAM
+ * which mismatches to some extent unless we're told
+ * by the check routine that we must load new code.
+ */
+ if (!must_load)
+ err = 0;
}
+ if (err)
+ goto out;
err = init_dummy_netdevs(adap);
if (err)
t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
-
+
err = setup_sge_qsets(adap);
if (err)
goto out;
goto irq_err;
}
} else if ((err = request_irq(adap->pdev->irq,
- t3_intr_handler(adap,
- adap->sge.qs[0].rspq.
- polling,NULL),
- (adap->flags & USING_MSI) ?
- 0 : IRQF_SHARED,
- adap->name, adap)))
+ t3_intr_handler(adap,
+ adap->sge.qs[0].rspq.polling),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->name, adap)))
goto irq_err;
t3_sge_start(adap);
if (adap->flags & TP_PARITY_INIT) {
t3_write_reg(adap, A_TP_INT_CAUSE,
- F_CMCACHEPERR | F_ARPLUTPERR);
+ F_CMCACHEPERR | F_ARPLUTPERR);
t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
}
- if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
+ if (!(adap->flags & QUEUES_BOUND)) {
bind_qsets(adap);
- adap->flags |= QUEUES_BOUND;
-
+ adap->flags |= QUEUES_BOUND;
+ }
out:
return err;
irq_err:
*/
static void cxgb_down(struct adapter *adapter)
{
+ unsigned long flags;
+
t3_sge_stop(adapter);
- spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
+
+ /* sync with PHY intr task */
+ spin_lock_irqsave(&adapter->work_lock, flags);
t3_intr_disable(adapter);
- spin_unlock_irq(&adapter->work_lock);
+ spin_unlock_irqrestore(&adapter->work_lock, flags);
if (adapter->flags & USING_MSIX) {
int i, n = 0;
free_irq(adapter->msix_info[0].vec, adapter);
for_each_port(adapter, i)
- n += adap2pinfo(adapter, i)->nqsets;
+ n += adap2pinfo(adapter, i)->nqsets;
for (i = 0; i < n; ++i)
free_irq(adapter->msix_info[i + 1].vec,
unsigned int timeo;
timeo = adap->params.linkpoll_period ?
- (HZ * adap->params.linkpoll_period) / 10 :
- adap->params.stats_update_period * HZ;
+ (HZ * adap->params.linkpoll_period) / 10 :
+ adap->params.stats_update_period * HZ;
if (timeo)
queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
}
struct adapter *adapter = pi->adapter;
struct t3cdev *tdev = dev2t3cdev(dev);
int adap_up = adapter->open_device_map & PORT_MASK;
- int err;
+ int err = 0;
if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
adapter->params.b_wnd,
adapter->params.rev == 0 ?
- adapter->port[0]->mtu : 0xffff);
+ adapter->port[0]->mtu : 0xffff);
init_smt(adapter);
+#ifndef LINUX_2_4
/* Never mind if the next step fails */
- sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+ sysfs_create_group(net2kobj(tdev->lldev), &offload_attr_group);
+#endif /* LINUX_2_4 */
/* Call back all registered clients */
cxgb3_add_clients(tdev);
/* Call back all registered clients */
cxgb3_remove_clients(tdev);
-
- sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
-
+#ifndef LINUX_2_4
+ sysfs_remove_group(net2kobj(tdev->lldev), &offload_attr_group);
+#endif
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
t3_tp_set_offload_mode(adapter, 0);
return err;
set_bit(pi->port_id, &adapter->open_device_map);
+
if (is_offload(adapter) && !ofld_disable) {
err = offload_open(dev);
if (err)
t3_port_intr_disable(adapter, pi->port_id);
netif_stop_queue(dev);
- pi->phy.ops->power_down(&pi->phy, 1);
netif_carrier_off(dev);
- t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+
+ /* disable pause frames */
+ t3_set_reg_field(adapter, A_XGM_TX_CFG + pi->mac.offset,
+ F_TXPAUSEEN, 0);
+
+ /* Reset RX FIFO HWM */
+ t3_set_reg_field(adapter, A_XGM_RXFIFO_CFG + pi->mac.offset,
+ V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
spin_lock(&adapter->work_lock); /* sync with update task */
clear_bit(pi->port_id, &adapter->open_device_map);
if (!adapter->open_device_map)
cxgb_down(adapter);
+ msleep(100);
+
+ /* Wait for TXFIFO empty */
+ t3_wait_op_done(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
+ F_TXFIFO_EMPTY, 1, 20, 5);
+
+ msleep(100);
+ t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
+
+ pi->phy.ops->power_down(&pi->phy, 1);
+
return 0;
}
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
struct net_device_stats *ns = &pi->netstats;
- const struct mac_stats *pstats;
+ const struct mac_stats *pstats = &pi->mac.stats;
- spin_lock(&adapter->stats_lock);
- pstats = t3_mac_update_stats(&pi->mac);
- spin_unlock(&adapter->stats_lock);
+ if (adapter->flags & FULL_INIT_DONE) {
+ spin_lock(&adapter->stats_lock);
+ t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+ }
ns->tx_bytes = pstats->tx_octets;
ns->tx_packets = pstats->tx_frames;
ns->tx_errors = pstats->tx_underrun;
ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
- pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
- pstats->rx_fifo_ovfl;
+ pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
+ pstats->rx_fifo_ovfl;
/* detailed rx_errors */
ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
"VLANextractions ",
"VLANinsertions ",
"TxCsumOffload ",
+ "TXCoalesceWR ",
+ "TXCoalescePkt ",
"RxCsumGood ",
"RxDrops ",
+ "LroQueued ",
+ "LroFlushed ",
+ "LroExceededSessions",
+
"CheckTXEnToggled ",
"CheckResets ",
-
};
static int get_stats_count(struct net_device *dev)
return T3_REGMAP_SIZE;
}
+#ifndef LINUX_2_4
static int get_eeprom_len(struct net_device *dev)
{
return EEPROMSIZE;
}
+#endif /* LINUX_2_4 */
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 fw_vers = 0;
- u32 tp_vers = 0;
+ u32 fw_vers = 0, tp_vers = 0;
t3_get_fw_version(adapter, &fw_vers);
t3_get_tp_version(adapter, &tp_vers);
}
}
-static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, stats_strings, sizeof(stats_strings));
int i;
unsigned long tot = 0;
- for (i = 0; i < p->nqsets; ++i)
- tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
+ tot += adapter->sge.qs[i].port_stats[idx];
return tot;
}
+static void clear_sge_port_stats(struct adapter *adapter, struct port_info *p)
+{
+ int i;
+ struct sge_qset *qs = &adapter->sge.qs[p->first_qset];
+
+ for (i = 0; i < p->nqsets; i++, qs++)
+ memset(qs->port_stats, 0, sizeof(qs->port_stats));
+}
+
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- const struct mac_stats *s;
+ const struct mac_stats *s = &pi->mac.stats;
- spin_lock(&adapter->stats_lock);
- s = t3_mac_update_stats(&pi->mac);
- spin_unlock(&adapter->stats_lock);
+ if (adapter->flags & FULL_INIT_DONE) {
+ spin_lock(&adapter->stats_lock);
+ t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+ }
*data++ = s->tx_octets;
*data++ = s->tx_frames;
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_COALESCE_WR);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_COALESCE_PKT);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
*data++ = s->rx_cong_drops;
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_SKB) +
+ collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_PG) +
+ collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_ACK);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_OVFLOW);
*data++ = s->num_toggled;
*data++ = s->num_resets;
+
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
{
u32 *p = buf + start;
- for (; start <= end; start += sizeof(u32))
+ for ( ; start <= end; start += sizeof(u32))
*p++ = t3_read_reg(ap, start);
}
{
struct port_info *pi = netdev_priv(dev);
struct adapter *ap = pi->adapter;
-
/*
* Version scheme:
* bits 0..9: chip version
static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
+ int cap;
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
- if (!(lc->supported & SUPPORTED_Autoneg))
- return -EOPNOTSUPP; /* can't change speed/duplex */
+ if (!(lc->supported & SUPPORTED_Autoneg)) {
+ /*
+ * PHY offers a single speed/duplex. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+ if (lc->supported & cap)
+ return 0;
+ }
+ return -EINVAL;
+ }
if (cmd->autoneg == AUTONEG_DISABLE) {
- int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+ cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
return -EINVAL;
struct port_info *p = netdev_priv(dev);
p->rx_csum_offload = data;
+ if (!data) {
+ struct adapter *adap = p->adapter;
+ int i;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+ adap->params.sge.qset[i].lro = 0;
+ adap->sge.qs[i].lro.enabled = 0;
+ }
+ }
return 0;
}
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adapter = pi->adapter;
const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
e->rx_max_pending = MAX_RX_BUFFERS;
e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
e->rx_pending < MIN_FL_ENTRIES ||
- e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+ e->rx_jumbo_pending < MIN_FL_JUMBO_ENTRIES ||
e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
return -EINVAL;
}
static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
- u8 * data)
+ u8 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
e->magic = EEPROM_MAGIC;
for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
+ err = t3_seeprom_read(adapter, i, (u32 *)&buf[i]);
if (!err)
memcpy(data, buf + e->offset, e->len);
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 * data)
+ u8 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
u32 aligned_offset, aligned_len, *p;
u8 *buf;
- int err;
+ int err = 0;
+
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
+ err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
aligned_offset + aligned_len - 4,
- (u32 *) & buf[aligned_len - 4]);
+ (u32 *)&buf[aligned_len - 4]);
if (err)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
if (err)
goto out;
- for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
err = t3_seeprom_write(adapter, aligned_offset, *p);
aligned_offset += 4;
}
}
static struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
- .get_drvinfo = get_drvinfo,
- .get_msglevel = get_msglevel,
- .set_msglevel = set_msglevel,
- .get_ringparam = get_sge_param,
- .set_ringparam = set_sge_param,
- .get_coalesce = get_coalesce,
- .set_coalesce = set_coalesce,
- .get_eeprom_len = get_eeprom_len,
- .get_eeprom = get_eeprom,
- .set_eeprom = set_eeprom,
- .get_pauseparam = get_pauseparam,
- .set_pauseparam = set_pauseparam,
- .get_rx_csum = get_rx_csum,
- .set_rx_csum = set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_link = ethtool_op_get_link,
- .get_strings = get_strings,
- .phys_id = cxgb3_phys_id,
- .nway_reset = restart_autoneg,
- .get_stats_count = get_stats_count,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+#ifndef LINUX_2_4
+ .get_eeprom_len = get_eeprom_len,
+#endif /* LINUX_2_4 */
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_rx_csum = get_rx_csum,
+ .set_rx_csum = set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+#ifndef LINUX_2_4
+ .set_tx_csum = ethtool_op_set_tx_csum,
+#endif /* LINUX_2_4 */
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .phys_id = cxgb3_phys_id,
+ .nway_reset = restart_autoneg,
+ .get_stats_count = get_stats_count,
.get_ethtool_stats = get_stats,
- .get_regs_len = get_regs_len,
- .get_regs = get_regs,
- .get_wol = get_wol,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
- .get_perm_addr = ethtool_op_get_perm_addr
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+#ifndef LINUX_2_4
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif /* LINUX_2_4 */
+#ifdef CXGB3_ETHTOOL_GPERMADDR
+ .get_perm_addr = ethtool_op_get_perm_addr
+#endif
+};
+
+
+#define adjust_proc_metrics() \
+ if (len <= offset + count) *eof = 1; \
+ *start = buf + offset; \
+ len -= offset; \
+ if (len > count) len = count; \
+ if (len < 0) len = 0;
+
+static int snmp_read_proc(char *buf, char **start, off_t offset, int count,
+ int *eof, void *data)
+{
+ struct adapter *adapter = data;
+ struct tp_mib_stats m;
+ int len = 0;
+
+ spin_lock(&adapter->stats_lock);
+ t3_tp_get_mib_stats(adapter, &m);
+ spin_unlock(&adapter->stats_lock);
+
+#define MIB32(s, field) len += sprintf(buf + len, "%-18s %u\n", s, m.field)
+#define MIB64(s, hi, lo) \
+ len += sprintf(buf + len, "%-18s %llu\n", s, \
+ (unsigned long long)m.hi + m.lo)
+
+ MIB64("IPInReceives:", ipInReceive_hi, ipInReceive_lo);
+ MIB64("IPInHdrErrors:", ipInHdrErrors_hi, ipInHdrErrors_lo);
+ MIB64("IPInAddrErrors:", ipInAddrErrors_hi, ipInAddrErrors_lo);
+ MIB64("IPInUnknownProtos:", ipInUnknownProtos_hi,
+ ipInUnknownProtos_lo);
+ MIB64("IPInDiscards:", ipInDiscards_hi, ipInDiscards_lo);
+ MIB64("IPInDelivers:", ipInDelivers_hi, ipInDelivers_lo);
+ MIB64("IPOutRequests:", ipOutRequests_hi, ipOutRequests_lo);
+ MIB64("IPOutDiscards:", ipOutDiscards_hi, ipOutDiscards_lo);
+ MIB64("IPOutNoRoutes:", ipOutNoRoutes_hi, ipOutNoRoutes_lo);
+ MIB32("IPReasmTimeout:", ipReasmTimeout);
+ MIB32("IPReasmReqds:", ipReasmReqds);
+ MIB32("IPReasmOKs:", ipReasmOKs);
+ MIB32("IPReasmFails:", ipReasmFails);
+ MIB32("TCPActiveOpens:", tcpActiveOpens);
+ MIB32("TCPPassiveOpens:", tcpPassiveOpens);
+ MIB32("TCPAttemptFails:", tcpAttemptFails);
+ MIB32("TCPEstabResets:", tcpEstabResets);
+ MIB32("TCPOutRsts:", tcpOutRsts);
+ MIB32("TCPCurrEstab:", tcpCurrEstab);
+ MIB64("TCPInSegs:", tcpInSegs_hi, tcpInSegs_lo);
+ MIB64("TCPOutSegs:", tcpOutSegs_hi, tcpOutSegs_lo);
+ MIB64("TCPRetransSeg:", tcpRetransSeg_hi, tcpRetransSeg_lo);
+ MIB64("TCPInErrs:", tcpInErrs_hi, tcpInErrs_lo);
+ MIB32("TCPRtoMin:", tcpRtoMin);
+ MIB32("TCPRtoMax:", tcpRtoMax);
+
+#undef MIB32
+#undef MIB64
+
+ adjust_proc_metrics();
+ return len;
+}
+
+static int mtus_read_proc(char *buf, char **start, off_t offset, int count,
+ int *eof, void *data)
+{
+ struct adapter *adapter = data;
+ unsigned short hw_mtus[NMTUS];
+ int i, len = 0;
+
+ spin_lock(&adapter->stats_lock);
+ t3_read_hw_mtus(adapter, hw_mtus);
+ spin_unlock(&adapter->stats_lock);
+
+ len += sprintf(buf, "Soft MTU\tEffective MTU\n");
+ for (i = 0; i < NMTUS; ++i)
+ len += sprintf(buf + len, "%8u\t\t%5u\n",
+ adapter->params.mtus[i], hw_mtus[i]);
+
+ adjust_proc_metrics();
+ return len;
+}
+
+static int cong_ctrl_read_proc(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ static const char *dec_fac[] = {
+ "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
+ "0.9375" };
+
+ unsigned short incr[NMTUS][NCCTRL_WIN];
+ struct adapter *adapter = data;
+ int i, len = 0;
+
+ t3_get_cong_cntl_tab(adapter, incr);
+
+ for (i = 0; i < NCCTRL_WIN; ++i) {
+ int j;
+
+ for (j = 0; j < NMTUS; ++j)
+ len += sprintf(buf + len, "%5u ", incr[j][i]);
+
+ len += sprintf(buf + len, "%5u %s\n", adapter->params.a_wnd[i],
+ dec_fac[adapter->params.b_wnd[i]]);
+ }
+
+ adjust_proc_metrics();
+ return len;
+}
+
+static int rss_read_proc(char *buf, char **start, off_t offset, int count,
+ int *eof, void *data)
+{
+ u8 lkup_tab[2 * RSS_TABLE_SIZE];
+ u16 map_tab[RSS_TABLE_SIZE];
+ struct adapter *adapter = data;
+ int i, len;
+
+ i = t3_read_rss(adapter, lkup_tab, map_tab);
+ if (i < 0)
+ return i;
+
+ len = sprintf(buf, "Idx\tLookup\tMap\n");
+ for (i = 0; i < RSS_TABLE_SIZE; ++i)
+ len += sprintf(buf + len, "%3u\t %3u\t %u\n", i, lkup_tab[i],
+ map_tab[i]);
+ for (; i < 2 * RSS_TABLE_SIZE; ++i)
+ len += sprintf(buf + len, "%3u\t %3u\n", i, lkup_tab[i]);
+
+ adjust_proc_metrics();
+ return len;
+}
+
+static int sched_read_proc(char *buf, char **start, off_t offset, int count,
+ int *eof, void *data)
+{
+ int i, len;
+ unsigned int map, kbps, ipg;
+ unsigned int pace_tab[NTX_SCHED];
+ struct adapter *adap = data;
+
+ map = t3_read_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP);
+ t3_read_pace_tbl(adap, pace_tab);
+
+ len = sprintf(buf, "Scheduler Mode Channel Rate (Kbps) "
+ "Class IPG (0.1 ns) Flow IPG (us)\n");
+ for (i = 0; i < NTX_SCHED; ++i) {
+ t3_get_tx_sched(adap, i, &kbps, &ipg);
+ len += sprintf(buf + len, " %u %-5s %u ", i,
+ (map & (1 << (S_TX_MOD_TIMER_MODE + i))) ?
+ "flow" : "class", !!(map & (1 << i)));
+ if (kbps)
+ len += sprintf(buf + len, "%9u ", kbps);
+ else
+ len += sprintf(buf + len, " disabled ");
+
+ if (ipg)
+ len += sprintf(buf + len, "%13u ", ipg);
+ else
+ len += sprintf(buf + len, " disabled ");
+
+ if (pace_tab[i])
+ len += sprintf(buf + len, "%10u\n", pace_tab[i] / 1000);
+ else
+ len += sprintf(buf + len, " disabled\n");
+ }
+
+ adjust_proc_metrics();
+ return len;
+}
+
+static int stats_read_proc(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int i, len = 0;
+ struct adapter *adapter = data;
+
+ len += sprintf(buf + len, "Interface: ");
+ for (i = 0; i < SGE_QSETS; ++i)
+ len += sprintf(buf + len, " %10s",
+ adapter->sge.qs[i].netdev ?
+ adapter->sge.qs[i].netdev->name : "N/A");
+
+#define C(s, v) \
+ len += sprintf(buf + len, "\n%-18s", s); \
+ for (i = 0; i < SGE_QSETS; ++i) \
+ len += sprintf(buf + len, " %10lu", adapter->sge.qs[i].v); \
+
+ C("RspQEmpty:", rspq.empty);
+ C("FL0Empty:", fl[0].empty);
+ C("FL0AllocFailed:", fl[0].alloc_failed);
+ C("FL1Empty:", fl[1].empty);
+ C("FL1AllocFailed:", fl[1].alloc_failed);
+ C("TxQ0Full:", txq[0].stops);
+ C("TxQ0Restarts:", txq[0].restarts);
+ C("TxQ1Full:", txq[1].stops);
+ C("TxQ1Restarts:", txq[1].restarts);
+ C("TxQ2Full:", txq[2].stops);
+ C("TxQ2Restarts:", txq[2].restarts);
+ C("RxEthPackets:", rspq.eth_pkts);
+ C("TXCoalesceWR:", port_stats[SGE_PSTAT_TX_COALESCE_WR]);
+ C("TXCoalescePkt:", port_stats[SGE_PSTAT_TX_COALESCE_PKT]);
+ C("LROcompleted:", port_stats[SGE_PSTAT_LRO]);
+ C("LROpages:", port_stats[SGE_PSTAT_LRO_PG]);
+ C("LROpackets:", port_stats[SGE_PSTAT_LRO_SKB]);
+ C("LROmergedACKs:", port_stats[SGE_PSTAT_LRO_ACK]);
+ C("LROoverflow:", port_stats[SGE_PSTAT_LRO_OVFLOW]);
+ C("LROcollisions:", port_stats[SGE_PSTAT_LRO_COLSN]);
+ C("RxOffloadPackets:", rspq.offload_pkts);
+ C("RxOffloadBundles:", rspq.offload_bundles);
+ C("PureRepsonses:", rspq.pure_rsps);
+ C("RxImmediateData:", rspq.imm_data);
+ C("ANE:", rspq.async_notif);
+ C("RxDrops:", rspq.rx_drops);
+ C("RspDeferred:", rspq.nomem);
+ C("UnhandledIntr:", rspq.unhandled_irqs);
+ C("RspStarved:", rspq.starved);
+ C("RspRestarted:", rspq.restarted);
+#undef C
+
+ len += sprintf(buf + len, "\n%-18s %lu\n", "RxCorrectableErr:",
+ adapter->pmrx.stats.corr_err);
+ len += sprintf(buf + len, "%-18s %lu\n", "TxCorrectableErr:",
+ adapter->pmtx.stats.corr_err);
+ len += sprintf(buf + len, "%-18s %lu\n", "CMCorrectableErr:",
+ adapter->cm.stats.corr_err);
+
+ len += sprintf(buf + len, "\n%-18s %lu\n", "ActiveRegionFull:",
+ adapter->mc5.stats.active_rgn_full);
+ len += sprintf(buf + len, "%-18s %lu\n", "NFASearchErr:",
+ adapter->mc5.stats.nfa_srch_err);
+ len += sprintf(buf + len, "%-18s %lu\n", "MC5UnknownCmd:",
+ adapter->mc5.stats.unknown_cmd);
+ len += sprintf(buf + len, "%-18s %lu\n", "MC5DelActEmpty:",
+ adapter->mc5.stats.del_act_empty);
+
+ len += sprintf(buf + len, "\n%-18s %lu\n", "ULPCh0PBLOOB:",
+ adapter->irq_stats[STAT_ULP_CH0_PBL_OOB]);
+ len += sprintf(buf + len, "%-18s %lu\n", "ULPCh1PBLOOB:",
+ adapter->irq_stats[STAT_ULP_CH1_PBL_OOB]);
+ len += sprintf(buf + len, "%-18s %lu\n", "PCICorrectableErr:",
+ adapter->irq_stats[STAT_PCI_CORR_ECC]);
+
+ adjust_proc_metrics();
+ return len;
+}
+
+struct cxgb3_proc_entry {
+ const char *name;
+ read_proc_t *fn;
+};
+
+static struct cxgb3_proc_entry proc_files[] = {
+ { "snmp", snmp_read_proc },
+ { "congestion_control", cong_ctrl_read_proc },
+ { "mtus", mtus_read_proc },
+ { "rss", rss_read_proc },
+ { "sched", sched_read_proc },
+ { "stats", stats_read_proc },
};
+static int __devinit cxgb_proc_setup(struct adapter *adapter,
+ struct proc_dir_entry *dir)
+{
+ int i, created;
+ struct proc_dir_entry *p;
+
+ if (!dir)
+ return -EINVAL;
+
+ /* If we can create any of the entries we do. */
+ for (created = i = 0; i < ARRAY_SIZE(proc_files); ++i) {
+ p = create_proc_read_entry(proc_files[i].name, 0, dir,
+ proc_files[i].fn, adapter);
+ if (p) {
+ p->owner = THIS_MODULE;
+ created++;
+ } else if (!created)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void cxgb_proc_cleanup(struct proc_dir_entry *dir)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(proc_files); ++i)
+ remove_proc_entry(proc_files[i].name, dir);
+}
+
+static void clear_qset_stats(struct sge_qset *qs)
+{
+ qs->rspq.empty = 0;
+ qs->fl[0].empty = 0;
+ qs->fl[1].empty = 0;
+ qs->txq[0].stops = 0;
+ qs->txq[0].restarts = 0;
+ qs->txq[1].stops = 0;
+ qs->txq[1].restarts = 0;
+ qs->txq[2].stops = 0;
+ qs->txq[2].restarts = 0;
+ qs->rspq.eth_pkts = 0;
+ qs->port_stats[SGE_PSTAT_TX_COALESCE_WR] = 0;
+ qs->port_stats[SGE_PSTAT_TX_COALESCE_PKT] = 0;
+ qs->port_stats[SGE_PSTAT_LRO] = 0;
+ qs->port_stats[SGE_PSTAT_LRO_PG] = 0;
+ qs->port_stats[SGE_PSTAT_LRO_SKB] = 0;
+ qs->port_stats[SGE_PSTAT_LRO_ACK] = 0;
+ qs->port_stats[SGE_PSTAT_LRO_OVFLOW] = 0;
+ qs->port_stats[SGE_PSTAT_LRO_COLSN] = 0;
+ qs->rspq.offload_pkts = 0;
+ qs->rspq.offload_bundles = 0;
+ qs->rspq.pure_rsps = 0;
+ qs->rspq.imm_data = 0;
+ qs->rspq.async_notif = 0;
+ qs->rspq.rx_drops = 0;
+ qs->rspq.nomem = 0;
+ qs->fl[0].alloc_failed = 0;
+ qs->fl[1].alloc_failed = 0;
+ qs->rspq.unhandled_irqs = 0;
+ qs->rspq.starved = 0;
+ qs->rspq.restarted = 0;
+}
+
+static void clear_port_qset_stats(struct adapter *adap,
+ const struct port_info *pi)
+{
+ int i;
+ struct sge_qset *qs = &adap->sge.qs[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++)
+ clear_qset_stats(qs++);
+}
+
static int in_range(int val, int lo, int hi)
{
return val < 0 || (val <= hi && val >= lo);
return -EFAULT;
switch (cmd) {
- case CHELSIO_SET_QSET_PARAMS:{
- int i;
- struct qset_params *q;
- struct ch_qset_params t;
+ case CHELSIO_SETREG: {
+ struct ch_reg edata;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&t, useraddr, sizeof(t)))
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
- if (t.qset_idx >= SGE_QSETS)
+ if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
return -EINVAL;
- if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
- !in_range(t.cong_thres, 0, 255) ||
- !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
- MAX_TXQ_ENTRIES) ||
- !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
- MAX_TXQ_ENTRIES) ||
- !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
- MAX_CTRL_TXQ_ENTRIES) ||
- !in_range(t.fl_size[0], MIN_FL_ENTRIES,
- MAX_RX_BUFFERS)
- || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
- MAX_RX_JUMBO_BUFFERS)
- || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
- MAX_RSPQ_ENTRIES))
+ writel(edata.val, adapter->regs + edata.addr);
+ break;
+ }
+ case CHELSIO_GETREG: {
+ struct ch_reg edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
return -EINVAL;
+ edata.val = readl(adapter->regs + edata.addr);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_GETTPI: {
+ struct ch_reg edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ ret = t3_elmr_blk_read(adapter, edata.addr, &edata.val, 1);
+ if (ret)
+ return ret;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SETTPI: {
+ struct ch_reg edata;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ ret = t3_elmr_blk_write(adapter, edata.addr, &edata.val, 1);
+ if (ret)
+ return ret;
+ break;
+ }
+ case CHELSIO_GET_SGE_CONTEXT: {
+ struct ch_cntxt ecntxt;
+
+ if (copy_from_user(&ecntxt, useraddr, sizeof(ecntxt)))
+ return -EFAULT;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ if (ecntxt.cntxt_type == CNTXT_TYPE_EGRESS)
+ ret = t3_sge_read_ecntxt(adapter, ecntxt.cntxt_id,
+ ecntxt.data);
+ else if (ecntxt.cntxt_type == CNTXT_TYPE_FL)
+ ret = t3_sge_read_fl(adapter, ecntxt.cntxt_id,
+ ecntxt.data);
+ else if (ecntxt.cntxt_type == CNTXT_TYPE_RSP)
+ ret = t3_sge_read_rspq(adapter, ecntxt.cntxt_id,
+ ecntxt.data);
+ else if (ecntxt.cntxt_type == CNTXT_TYPE_CQ)
+ ret = t3_sge_read_cq(adapter, ecntxt.cntxt_id,
+ ecntxt.data);
+ else
+ ret = -EINVAL;
+ spin_unlock_irq(&adapter->sge.reg_lock);
+
+ if (ret)
+ return ret;
+ if (copy_to_user(useraddr, &ecntxt, sizeof(ecntxt)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_GET_SGE_DESC: {
+ struct ch_desc edesc;
+
+ if (copy_from_user(&edesc, useraddr, sizeof(edesc)))
+ return -EFAULT;
+
+ if (edesc.queue_num >= SGE_QSETS * 6)
+ return -EINVAL;
+
+ ret = t3_get_desc(&adapter->sge.qs[edesc.queue_num / 6],
+ edesc.queue_num % 6, edesc.idx, edesc.data);
+ if (ret < 0)
+ return ret;
+ edesc.size = ret;
+
+ if (copy_to_user(useraddr, &edesc, sizeof(edesc)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SET_QSET_PARAMS: {
+ int i;
+ struct qset_params *q;
+ struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.qset_idx >= SGE_QSETS)
+ return -EINVAL;
+ if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
+ !in_range(t.cong_thres, 0, 255) ||
+ !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+ MAX_CTRL_TXQ_ENTRIES) ||
+ !in_range(t.fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
+ !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+ MAX_RX_JUMBO_BUFFERS) ||
+ !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
+ return -EINVAL;
+
+ if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (t.qset_idx >= pi->first_qset &&
+ t.qset_idx < pi->first_qset + pi->nqsets &&
+ !pi->rx_csum_offload)
+ return -EINVAL;
+ }
+
if ((adapter->flags & FULL_INIT_DONE) &&
- (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
- t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
- t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
- t.polling >= 0 || t.cong_thres >= 0))
+ (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
+ t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
+ t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
+ t.polling >= 0 || t.cong_thres >= 0))
return -EBUSY;
+ /* Allow setting of any available qset when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets = pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx < q1)
+ return -EINVAL;
+ if (t.qset_idx > q1 + nqsets - 1)
+ return -EINVAL;
+
q = &adapter->params.sge.qset[t.qset_idx];
if (t.rspq_size >= 0)
if (t.cong_thres >= 0)
q->cong_thres = t.cong_thres;
if (t.intr_lat >= 0) {
- struct sge_qset *qs =
- &adapter->sge.qs[t.qset_idx];
+ struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
q->coalesce_usecs = t.intr_lat;
t3_update_qset_coalesce(qs, q);
else {
/* No polling with INTx for T3A */
if (adapter->params.rev == 0 &&
- !(adapter->flags & USING_MSI))
+ !(adapter->flags & USING_MSI))
t.polling = 0;
for (i = 0; i < SGE_QSETS; i++) {
- q = &adapter->params.sge.
- qset[i];
+ q = &adapter->params.sge.qset[i];
q->polling = t.polling;
}
}
}
+ if (t.lro >= 0) {
+ struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
+
+ q->lro = t.lro;
+ qs->lro.enabled = t.lro;
+ }
break;
}
- case CHELSIO_GET_QSET_PARAMS:{
+ case CHELSIO_GET_QSET_PARAMS: {
struct qset_params *q;
struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+ int i;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
- if (t.qset_idx >= SGE_QSETS)
+
+ /* Display qsets for all ports when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets = pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx >= nqsets)
return -EINVAL;
- q = &adapter->params.sge.qset[t.qset_idx];
- t.rspq_size = q->rspq_size;
+ q = &adapter->params.sge.qset[q1 + t.qset_idx];
+ t.rspq_size = q->rspq_size;
t.txq_size[0] = q->txq_size[0];
t.txq_size[1] = q->txq_size[1];
t.txq_size[2] = q->txq_size[2];
- t.fl_size[0] = q->fl_size;
- t.fl_size[1] = q->jumbo_size;
- t.polling = q->polling;
- t.intr_lat = q->coalesce_usecs;
- t.cong_thres = q->cong_thres;
+ t.fl_size[0] = q->fl_size;
+ t.fl_size[1] = q->jumbo_size;
+ t.polling = q->polling;
+ t.lro = q->lro;
+ t.intr_lat = q->coalesce_usecs;
+ t.cong_thres = q->cong_thres;
+ t.qnum = q1;
+
+ if (adapter->flags & USING_MSIX)
+ t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
+ else
+ t.vector = adapter->pdev->irq;
if (copy_to_user(useraddr, &t, sizeof(t)))
return -EFAULT;
break;
}
- case CHELSIO_SET_QSET_NUM:{
+ case CHELSIO_SET_QSET_NUM: {
struct ch_reg edata;
unsigned int i, first_qset = 0, other_qsets = 0;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
if (edata.val < 1 ||
- (edata.val > 1 && !(adapter->flags & USING_MSIX)))
+ (edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
for_each_port(adapter, i)
}
break;
}
- case CHELSIO_GET_QSET_NUM:{
+ case CHELSIO_GET_QSET_NUM: {
struct ch_reg edata;
edata.cmd = CHELSIO_GET_QSET_NUM;
return -EFAULT;
break;
}
- case CHELSIO_LOAD_FW:{
+ case CHELSIO_LOAD_FW: {
u8 *fw_data;
struct ch_mem_range t;
if (!fw_data)
return -ENOMEM;
- if (copy_from_user
- (fw_data, useraddr + sizeof(t), t.len)) {
+ if (copy_from_user(fw_data, useraddr + sizeof(t), t.len)) {
kfree(fw_data);
return -EFAULT;
}
return ret;
break;
}
- case CHELSIO_SETMTUTAB:{
+ case CHELSIO_LOAD_BOOT: {
+ u8 *boot_data;
+ struct ch_mem_range t;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+
+ boot_data = kmalloc(t.len, GFP_KERNEL);
+ if (!boot_data)
+ return -ENOMEM;
+
+ if (copy_from_user(boot_data, useraddr + sizeof(t), t.len)) {
+ kfree(boot_data);
+ return -EFAULT;
+ }
+
+ ret = t3_load_boot(adapter, boot_data, t.len);
+ kfree(boot_data);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ case CHELSIO_CLEAR_STATS: {
+ struct ch_reg edata;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -EAGAIN;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if ((edata.val & STATS_QUEUE) && edata.addr != -1 &&
+ edata.addr >= pi->nqsets)
+ return -EINVAL;
+ if (edata.val & STATS_PORT) {
+ spin_lock(&adapter->stats_lock);
+ t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+ memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
+ clear_sge_port_stats(adapter, pi);
+ }
+ if (edata.val & STATS_QUEUE) {
+ if (edata.addr == -1)
+ clear_port_qset_stats(adapter, pi);
+ else
+ clear_qset_stats(&adapter->sge.qs[edata.addr +
+ pi->first_qset]);
+ }
+ break;
+ }
+
+ case CHELSIO_DEVUP:
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ ret = offload_open(dev);
+ if (ret)
+ return ret;
+
+ break;
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+ case CHELSIO_SETMTUTAB: {
struct ch_mtus m;
int i;
return -EFAULT;
if (m.nmtus != NMTUS)
return -EINVAL;
- if (m.mtus[0] < 81) /* accommodate SACK */
+ if (m.mtus[0] < 81) /* accommodate SACK */
return -EINVAL;
- /* MTUs must be in ascending order */
+ // MTUs must be in ascending order
for (i = 1; i < NMTUS; ++i)
if (m.mtus[i] < m.mtus[i - 1])
return -EINVAL;
memcpy(adapter->params.mtus, m.mtus,
- sizeof(adapter->params.mtus));
+ sizeof(adapter->params.mtus));
break;
}
- case CHELSIO_GET_PM:{
+ case CHELSIO_GETMTUTAB: {
+ struct ch_mtus m;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+
+ memcpy(m.mtus, adapter->params.mtus, sizeof(m.mtus));
+ m.nmtus = NMTUS;
+
+ if (copy_to_user(useraddr, &m, sizeof(m)))
+ return -EFAULT;
+ break;
+ }
+#endif /* CONFIG_CHELSIO_T3_CORE */
+
+ case CHELSIO_GET_PM: {
struct tp_params *p = &adapter->params.tp;
- struct ch_pm m = {.cmd = CHELSIO_GET_PM };
+ struct ch_pm m = { .cmd = CHELSIO_GET_PM };
if (!is_offload(adapter))
return -EOPNOTSUPP;
- m.tx_pg_sz = p->tx_pg_size;
+ m.tx_pg_sz = p->tx_pg_size;
m.tx_num_pg = p->tx_num_pgs;
- m.rx_pg_sz = p->rx_pg_size;
+ m.rx_pg_sz = p->rx_pg_size;
m.rx_num_pg = p->rx_num_pgs;
- m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
+ m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
if (copy_to_user(useraddr, &m, sizeof(m)))
return -EFAULT;
break;
}
- case CHELSIO_SET_PM:{
+ case CHELSIO_SET_PM: {
struct ch_pm m;
struct tp_params *p = &adapter->params.tp;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
- !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
- return -EINVAL; /* not power of 2 */
+ !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
+ return -EINVAL; /* not power of 2 */
if (!(m.rx_pg_sz & 0x14000))
- return -EINVAL; /* not 16KB or 64KB */
+ return -EINVAL; /* not 16KB or 64KB */
if (!(m.tx_pg_sz & 0x1554000))
return -EINVAL;
if (m.tx_num_pg == -1)
if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
return -EINVAL;
if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
- m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
+ m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
return -EINVAL;
p->rx_pg_size = m.rx_pg_sz;
p->tx_pg_size = m.tx_pg_sz;
p->tx_num_pgs = m.tx_num_pg;
break;
}
- case CHELSIO_GET_MEM:{
+ case CHELSIO_READ_TCAM_WORD: {
+ struct ch_tcam_word t;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -EIO; /* need MC5 */
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ ret = t3_read_mc5_range(&adapter->mc5, t.addr, 1, t.buf);
+ if (ret)
+ return ret;
+ if (copy_to_user(useraddr, &t, sizeof(t)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_GET_MEM: {
struct ch_mem_range t;
struct mc7 *mem;
u64 buf[32];
if (!is_offload(adapter))
return -EOPNOTSUPP;
if (!(adapter->flags & FULL_INIT_DONE))
- return -EIO; /* need the memory controllers */
+ return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
if ((t.addr & 7) || (t.len & 7))
* Read 256 bytes at a time as len can be large and we don't
* want to use huge intermediate buffers.
*/
- useraddr += sizeof(t); /* advance to start of buffer */
+ useraddr += sizeof(t); /* advance to start of buffer */
while (t.len) {
- unsigned int chunk =
- min_t(unsigned int, t.len, sizeof(buf));
+ unsigned int chunk = min_t(unsigned int, t.len,
+ sizeof(buf));
- ret =
- t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
- buf);
+ ret = t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, buf);
if (ret)
return ret;
if (copy_to_user(useraddr, buf, chunk))
}
break;
}
- case CHELSIO_SET_TRACE_FILTER:{
+#ifdef CONFIG_CHELSIO_T3_CORE
+ case CHELSIO_SET_TRACE_FILTER: {
struct ch_trace t;
const struct trace_params *tp;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
- t3_config_trace_filter(adapter, tp, 0,
- t.invert_match,
- t.trace_tx);
+ t3_config_trace_filter(adapter, tp, 0, t.invert_match,
+ t.trace_tx);
if (t.config_rx)
- t3_config_trace_filter(adapter, tp, 1,
- t.invert_match,
- t.trace_rx);
+ t3_config_trace_filter(adapter, tp, 1, t.invert_match,
+ t.trace_rx);
break;
}
+#endif
+ case CHELSIO_SET_PKTSCHED: {
+ struct ch_pktsched_params p;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!adapter->open_device_map)
+ return -EAGAIN; /* uP and SGE must be running */
+ if (copy_from_user(&p, useraddr, sizeof(p)))
+ return -EFAULT;
+ send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
+ p.binding);
+ break;
+ }
+#ifdef CONFIG_CHELSIO_T3_CORE
+ case CHELSIO_SET_HW_SCHED: {
+ struct ch_hw_sched t;
+ unsigned int ticks_per_usec = core_ticks_per_usec(adapter);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -EAGAIN; /* need TP to be initialized */
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.sched >= NTX_SCHED || !in_range(t.mode, 0, 1) ||
+ !in_range(t.channel, 0, 1) ||
+ !in_range(t.kbps, 0, 10000000) ||
+ !in_range(t.class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
+ !in_range(t.flow_ipg, 0,
+ dack_ticks_to_usec(adapter, 0x7ff)))
+ return -EINVAL;
+
+ if (t.kbps >= 0) {
+ ret = t3_config_sched(adapter, t.kbps, t.sched);
+ if (ret < 0)
+ return ret;
+ }
+ if (t.class_ipg >= 0)
+ t3_set_sched_ipg(adapter, t.sched, t.class_ipg);
+ if (t.flow_ipg >= 0) {
+ t.flow_ipg *= 1000; /* us -> ns */
+ t3_set_pace_tbl(adapter, &t.flow_ipg, t.sched, 1);
+ }
+ if (t.mode >= 0) {
+ int bit = 1 << (S_TX_MOD_TIMER_MODE + t.sched);
+
+ t3_set_reg_field(adapter, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ bit, t.mode ? bit : 0);
+ }
+ if (t.channel >= 0)
+ t3_set_reg_field(adapter, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ 1 << t.sched, t.channel << t.sched);
+ break;
+ }
+#endif /* CONFIG_CHELSIO_T3_CORE */
default:
return -EOPNOTSUPP;
}
case SIOCGMIIPHY:
data->phy_id = pi->phy.addr;
/* FALLTHRU */
- case SIOCGMIIREG:{
+ case SIOCGMIIREG: {
u32 val;
struct cphy *phy = &pi->phy;
else if (mmd > MDIO_DEV_XGXS)
return -EINVAL;
- ret =
- phy->mdio_read(adapter, data->phy_id & 0x1f,
- mmd, data->reg_num, &val);
+ ret = phy->mdio_read(adapter, data->phy_id & 0x1f, mmd,
+ data->reg_num, &val);
} else
- ret =
- phy->mdio_read(adapter, data->phy_id & 0x1f,
- 0, data->reg_num & 0x1f,
- &val);
+ ret = phy->mdio_read(adapter, data->phy_id & 0x1f, 0,
+ data->reg_num & 0x1f, &val);
if (!ret)
data->val_out = val;
break;
}
- case SIOCSMIIREG:{
+ case SIOCSMIIREG: {
struct cphy *phy = &pi->phy;
if (!capable(CAP_NET_ADMIN))
else if (mmd > MDIO_DEV_XGXS)
return -EINVAL;
- ret =
- phy->mdio_write(adapter,
- data->phy_id & 0x1f, mmd,
- data->reg_num,
- data->val_in);
+ ret = phy->mdio_write(adapter, data->phy_id & 0x1f,
+ mmd, data->reg_num, data->val_in);
} else
- ret =
- phy->mdio_write(adapter,
- data->phy_id & 0x1f, 0,
- data->reg_num & 0x1f,
- data->val_in);
+ ret = phy->mdio_write(adapter, data->phy_id & 0x1f, 0,
+ data->reg_num & 0x1f,
+ data->val_in);
break;
}
case SIOCCHIOCTL:
- return cxgb_extension_ioctl(dev, req->ifr_data);
+ return cxgb_extension_ioctl(dev, (void *)req->ifr_data);
default:
return -EOPNOTSUPP;
}
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
- struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
int ret;
- if (new_mtu < 81) /* accommodate SACK */
+ if (new_mtu < 81) /* accommodate SACK */
return -EINVAL;
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
+
dev->mtu = new_mtu;
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
adapter->params.a_wnd, adapter->params.b_wnd,
adapter->port[0]->mtu);
+
return 0;
}
t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
+
return 0;
}
{
int i;
- for (i = 0; i < p->nqsets; i++) {
- struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+ struct sge_rspq *q = &adap->sge.qs[i].rspq;
+ unsigned long flags;
- spin_lock_irq(&q->lock);
- spin_unlock_irq(&q->lock);
+ spin_lock_irqsave(&q->lock, flags);
+ spin_unlock_irqrestore(&q->lock, flags);
}
}
struct adapter *adapter = pi->adapter;
pi->vlan_grp = grp;
+
if (adapter->params.rev > 0)
- t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
+ t3_set_vlan_accel(adapter, 1 << pi->tx_chan, grp != NULL);
else {
/* single control for all ports */
unsigned int i, have_vlans = 0;
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ unsigned long flags;
int qidx;
- for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
- struct sge_qset *qs = &adapter->sge.qs[qidx];
- void *source;
-
- if (adapter->flags & USING_MSIX)
- source = qs;
- else
- source = adapter;
-
- t3_intr_handler(adapter, qs->rspq.polling, NULL) (0, source, NULL);
- }
+ local_irq_save(flags);
+ for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++)
+ t3_poll_handler(adapter, &adapter->sge.qs[qidx]);
+ local_irq_restore(flags);
}
#endif
/*
* Periodic accumulation of MAC statistics.
*/
+
static void mac_stats_update(struct adapter *adapter)
{
int i;
struct net_device *dev = adapter->port[i];
struct port_info *p = netdev_priv(dev);
- if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
+ if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
t3_link_changed(adapter, i);
}
}
{
int i;
- if (!rtnl_trylock()) /* synchronize with ifdown */
+ if (!rtnl_trylock()) /* synchronize with ifdown */
return;
for_each_port(adapter, i) {
rtnl_unlock();
}
-static void t3_adap_check_task(void *data)
+DECLARE_TASK_FUNC(t3_adap_check_task, task_param)
{
- struct adapter *adapter = data;
+ struct adapter *adapter = DELWORK2ADAP(task_param, adap_check_task);
const struct adapter_params *p = &adapter->params;
adapter->check_task_cnt++;
/* Accumulate MAC stats if needed */
if (!p->linkpoll_period ||
(adapter->check_task_cnt * p->linkpoll_period) / 10 >=
- p->stats_update_period) {
+ p->stats_update_period) {
mac_stats_update(adapter);
adapter->check_task_cnt = 0;
}
- if (p->rev == T3_REV_B2)
+ if (p->rev == T3_REV_B2 && p->nports < 4)
check_t3b2_mac(adapter);
/* Schedule the next check update if any port is active. */
/*
* Processes external (PHY) interrupts in process context.
*/
-static void ext_intr_task(void *data)
+DECLARE_TASK_FUNC(ext_intr_task, task_param)
{
- struct adapter *adapter = data;
+ struct adapter *adapter = WORK2ADAP(task_param, ext_intr_handler_task);
+ unsigned long flags;
t3_phy_intr_handler(adapter);
/* Now reenable external interrupts */
- spin_lock_irq(&adapter->work_lock);
+ spin_lock_irqsave(&adapter->work_lock, flags);
if (adapter->slow_intr_mask) {
adapter->slow_intr_mask |= F_T3DBG;
t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
t3_write_reg(adapter, A_PL_INT_ENABLE0,
adapter->slow_intr_mask);
}
- spin_unlock_irq(&adapter->work_lock);
+ spin_unlock_irqrestore(&adapter->work_lock, flags);
}
/*
CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
fw_status[0], fw_status[1],
fw_status[2], fw_status[3]);
-
}
+#if defined(HAS_EEH)
/**
* t3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
struct adapter *adapter = pci_get_drvdata(pdev);
int i;
+ /* stop DMA engine */
+ t3_sge_stop(adapter);
+
/* Stop all ports */
for_each_port(adapter, i) {
struct net_device *netdev = adapter->port[i];
cxgb_close(netdev);
}
- if (is_offload(adapter) &&
- test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
- offload_close(&adapter->tdev);
+ if (is_offload(adapter)) {
+ cxgb_proc_cleanup(adapter->tdev.proc_dir);
+ if (test_bit(OFFLOAD_DEVMAP_BIT,
+ &adapter->open_device_map))
+ offload_close(&adapter->tdev);
+ }
/* Free sge resources */
t3_free_sge_resources(adapter);
if (is_offload(adapter)) {
__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+ cxgb_proc_setup(adapter, adapter->tdev.proc_dir);
if (offload_open(adapter->port[0]))
printk(KERN_WARNING
- "Could not bring back offload capabilities\n");
+ "Could not initialize offload capabilities\n");
}
}
.slot_reset = t3_io_slot_reset,
.resume = t3_io_resume,
};
+#endif
+
+/* Set the number of qsets based on the number of CPUs and the number of ports,
+ * not to exceed the number of available qsets, assuming there are enough qsets
+ * per port in HW.
+ */
+static inline void set_nqsets(struct adapter *adap)
+{
+ int i, j = 0;
+ int num_cpus = num_online_cpus();
+ int hwports = adap->params.nports;
+ int nqsets = SGE_QSETS;
+
+ if ((adap->flags & USING_MSIX) && !singleq) {
+ if (hwports == 2 &&
+ (hwports * nqsets > SGE_QSETS ||
+ num_cpus >= nqsets/hwports))
+ nqsets /= hwports;
+ if (nqsets > num_cpus)
+ nqsets = num_cpus;
+ if (nqsets < 1 || hwports == 4)
+ nqsets = 1;
+ }
+ else
+ nqsets = 1;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = j;
+ pi->nqsets = nqsets;
+ j = pi->first_qset + nqsets;
+
+ dev_info(&adap->pdev->dev,
+ "Port %d using %d queue sets.\n", i, nqsets);
+ }
+}
+
+/*
+ * Interrupt handler used to check if MSI/MSI-X works on this platform.
+ */
+DECLARE_INTR_HANDLER(check_intr_handler, irq, adap, regs)
+{
+ t3_set_reg_field(adap, A_PL_INT_ENABLE0, F_MI1, 0);
+ return IRQ_HANDLED;
+}
+
+static void __devinit check_msi(struct adapter *adap)
+{
+ int vec, mi1;
+
+ if (!(t3_read_reg(adap, A_PL_INT_CAUSE0) & F_MI1))
+ return;
+
+ vec = (adap->flags & USING_MSI) ? adap->pdev->irq :
+ adap->msix_info[0].vec;
+
+ if (request_irq(vec, check_intr_handler, 0, adap->name, adap))
+ return;
+
+ t3_set_reg_field(adap, A_PL_INT_ENABLE0, 0, F_MI1);
+ msleep(10);
+ mi1 = t3_read_reg(adap, A_PL_INT_ENABLE0) & F_MI1;
+ if (mi1)
+ t3_set_reg_field(adap, A_PL_INT_ENABLE0, F_MI1, 0);
+ free_irq(vec, adap);
+
+ if (mi1) {
+ cxgb_disable_msi(adap);
+ dev_info(&adap->pdev->dev,
+ "the kernel believes that MSI is available on this "
+ "platform\nbut the driver's MSI test has failed. "
+ "Proceeding with INTx interrupts.\n");
+ }
+}
static int __devinit cxgb_enable_msix(struct adapter *adap)
{
adap->msix_info[i].vec = entries[i].vector;
} else if (err > 0)
dev_info(&adap->pdev->dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
+ "only %d MSI-X vectors left, not using MSI-X\n", err);
return err;
}
-static void __devinit print_port_info(struct adapter *adap,
+#ifdef T3_TRACE
+static void __devinit alloc_trace_bufs(adapter_t *adap)
+{
+ int i;
+ char s[32];
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ sprintf(s, "sge_q%d", i);
+ adap->tb[i] = t3_trace_alloc(adap->debugfs_root, s, 512);
+ }
+}
+
+static void free_trace_bufs(adapter_t *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->tb); ++i)
+ t3_trace_free(adap->tb[i]);
+}
+#else
+# define alloc_trace_bufs(adapter)
+# define free_trace_bufs(adapter)
+#endif
+
+static void __devinit print_port_info(adapter_t *adap,
const struct adapter_info *ai)
{
static const char *pci_variant[] = {
const struct port_info *pi = netdev_priv(dev);
if (!test_bit(i, &adap->registered_device_map))
- continue;
+ continue;
printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
- dev->name, ai->desc, pi->port_type->desc,
+ dev->name, ai->desc, pi->phy.desc,
is_offload(adap) ? "R" : "", adap->params.rev, buf,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
}
}
+static void touch_bars(struct pci_dev *pdev)
+{
+#if BITS_PER_LONG < 64
+ u32 v;
+
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
+#endif
+}
+
static int __devinit init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
}
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pci_enable_device(pdev);
if (err) {
- /* Just info, some other driver may have claimed the device. */
- dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
return err;
}
- err = pci_enable_device(pdev);
+ /*
+ * Can't use pci_request_regions() here because some kernels want to
+ * request the MSI-X BAR in pci_enable_msix. Also no need to request
+ * the doorbell BAR if we are not doing user-space RDMA.
+ * So only request BAR0.
+ */
+ err = pci_request_region(pdev, 0, DRV_NAME);
if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- goto out_release_regions;
+ /*
+ * Some other driver may have already claimed the device.
+ * Report the event but do not disable the device.
+ */
+ printk(KERN_INFO "%s: cannot obtain PCI resources\n",
+ pci_name(pdev));
+ return err;
}
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_disable_device;
+ goto out_release_regions;
}
} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_disable_device;
+ goto out_release_regions;
}
+ touch_bars(pdev);
pci_set_master(pdev);
mmio_start = pci_resource_start(pdev, 0);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
- goto out_disable_device;
+ goto out_release_regions;
}
+ adapter->pdev = pdev;
+
adapter->regs = ioremap_nocache(mmio_start, mmio_len);
if (!adapter->regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
+ dev_err(&pdev->dev,
+ "cannot map device registers\n");
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->pdev = pdev;
adapter->name = pci_name(pdev);
adapter->msg_enable = dflt_msg_enable;
adapter->mmio_len = mmio_len;
-
- mutex_init(&adapter->mdio_lock);
+ memset(adapter->rrss_map, 0xff, sizeof(adapter->rrss_map));
+ INIT_LIST_HEAD(&adapter->adapter_list);
+ spin_lock_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->elmer_lock);
spin_lock_init(&adapter->work_lock);
spin_lock_init(&adapter->stats_lock);
- INIT_LIST_HEAD(&adapter->adapter_list);
- INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task, adapter);
- INIT_WORK(&adapter->adap_check_task, t3_adap_check_task, adapter);
+ T3_INIT_WORK(&adapter->ext_intr_handler_task,
+ ext_intr_task, adapter);
+ T3_INIT_DELAYED_WORK(&adapter->adap_check_task,
+ t3_adap_check_task,
+ adapter);
- for (i = 0; i < ai->nports; ++i) {
+ for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
struct net_device *netdev;
netdev = alloc_etherdev(sizeof(struct port_info));
goto out_free_dev;
}
+ spin_lock_init(&netdev->queue_lock);
+
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->rx_csum_offload = 1;
- pi->nqsets = 1;
- pi->first_qset = i;
- pi->activity = 0;
pi->port_id = i;
+ pi->tx_chan = i >= ai->nports0;
+ pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 :
+ 2 * i;
+ adapter->rxpkt_map[pi->txpkt_intf] = i;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
- netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
netdev->features |= NETIF_F_LLTX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ if (ai->nports0 + ai->nports1 <= 2) // disable TSO on T304
+ netdev->features |= NETIF_F_TSO;
+
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_rx_register = vlan_rx_register;
netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
netdev->open = cxgb_open;
netdev->stop = cxgb_close;
netdev->hard_start_xmit = t3_eth_xmit;
+ netdev->tx_queue_len = 10000;
netdev->get_stats = cxgb_get_stats;
netdev->set_multicast_list = cxgb_set_rxmode;
netdev->do_ioctl = cxgb_ioctl;
netdev->weight = 64;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+
}
pci_set_drvdata(pdev, adapter);
/* Driver's ready. Reflect it on LEDs */
t3_led_ready(adapter);
+#ifndef LINUX_2_4
+ if (cxgb3_debugfs_root) {
+ adapter->debugfs_root = debugfs_create_dir(adapter->name,
+ cxgb3_debugfs_root);
+ if (adapter->debugfs_root)
+ alloc_trace_bufs(adapter);
+ }
+#endif /* LINUX_2_4 */
+
if (is_offload(adapter)) {
__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
cxgb3_adapter_ofld(adapter);
+ cxgb_proc_setup(adapter, adapter->tdev.proc_dir);
}
/* See what interrupts we'll be using */
adapter->flags |= USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
+ if (adapter->flags & (USING_MSIX | USING_MSI))
+ check_msi(adapter);
+
+ set_nqsets(adapter);
- err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
- &cxgb3_attr_group);
+#ifndef LINUX_2_4
+ sysfs_create_group(net2kobj(adapter->port[0]),
+ &cxgb3_attr_group);
+#endif /* LINUX_2_4 */
print_port_info(adapter, ai);
return 0;
out_free_dev:
iounmap(adapter->regs);
- for (i = ai->nports - 1; i >= 0; --i)
+ for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
if (adapter->port[i])
free_netdev(adapter->port[i]);
out_free_adapter:
kfree(adapter);
-out_disable_device:
+ out_release_regions:
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
-out_release_regions:
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
int i;
t3_sge_stop(adapter);
- sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
+#ifndef LINUX_2_4
+ sysfs_remove_group(net2kobj(adapter->port[0]),
&cxgb3_attr_group);
+#endif /* LINUX_2_4 */
if (is_offload(adapter)) {
- cxgb3_adapter_unofld(adapter);
+ cxgb_proc_cleanup(adapter->tdev.proc_dir);
if (test_bit(OFFLOAD_DEVMAP_BIT,
&adapter->open_device_map))
offload_close(&adapter->tdev);
+ cxgb3_adapter_unofld(adapter);
}
- for_each_port(adapter, i)
- if (test_bit(i, &adapter->registered_device_map))
- unregister_netdev(adapter->port[i]);
+ for_each_port(adapter, i) {
+ if (test_bit(i, &adapter->registered_device_map)) {
+ unregister_netdev(adapter->port[i]);
+ }
+ }
+
+ if (adapter->debugfs_root) {
+ free_trace_bufs(adapter);
+#ifndef LINUX_2_4
+ debugfs_remove(adapter->debugfs_root);
+#endif /* LINUX_2_4 */
+ }
t3_free_sge_resources(adapter);
cxgb_disable_msi(adapter);
iounmap(adapter->regs);
kfree(adapter);
- pci_release_regions(pdev);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static struct pci_driver driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
.id_table = cxgb3_pci_tbl,
- .probe = init_one,
- .remove = __devexit_p(remove_one),
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+#if defined(HAS_EEH)
.err_handler = &t3_err_handler,
+#endif
+
};
static int __init cxgb3_init_module(void)
{
int ret;
+#ifndef LINUX_2_4
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb3_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (!cxgb3_debugfs_root)
+ printk(KERN_WARNING DRV_NAME
+ ": could not create debugfs entry, continuing\n");
+#endif /* LINUX_2_4 */
+
cxgb3_offload_init();
ret = pci_register_driver(&driver);
+
+#ifndef LINUX_2_4
+ if (ret < 0)
+ debugfs_remove(cxgb3_debugfs_root);
+#else
+ if (ret > 0)
+ ret = 0;
+#endif /* LINUX_2_4 */
return ret;
}
static void __exit cxgb3_cleanup_module(void)
{
pci_unregister_driver(&driver);
- if (cxgb3_wq)
+ if (cxgb3_wq) {
destroy_workqueue(cxgb3_wq);
+ cxgb3_wq = NULL;
+ }
+#ifndef LINUX_2_4
+ debugfs_remove(cxgb3_debugfs_root); /* NULL ok */
+#endif /* LINUX_2_4 */
+ cxgb3_offload_exit();
}
module_init(cxgb3_init_module);
/*
- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
#include <linux/list.h>
-#include <net/neighbour.h>
#include <linux/notifier.h>
#include <asm/atomic.h>
#include <linux/proc_fs.h>
#include <linux/if_vlan.h>
-#include <net/netevent.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <net/neighbour.h>
#include "common.h"
#include "regs.h"
#include "firmware_exports.h"
#include "cxgb3_offload.h"
+#include "cxgb3_compat.h"
+#if defined(NETEVENT)
+#include <net/netevent.h>
+#endif
+
static LIST_HEAD(client_list);
static LIST_HEAD(ofld_dev_list);
static DEFINE_MUTEX(cxgb3_db_lock);
static DEFINE_RWLOCK(adapter_list_lock);
static LIST_HEAD(adapter_list);
+#ifdef LINUX_2_4
+static unsigned int MAX_ATIDS = 64 * 1024;
+#else
static const unsigned int MAX_ATIDS = 64 * 1024;
-static const unsigned int ATID_BASE = 0x10000;
+#endif /* LINUX_2_4 */
+static const unsigned int ATID_BASE = 0x100000;
+
+static struct proc_dir_entry *cxgb3_proc_root;
static inline int offload_activated(struct t3cdev *tdev)
{
- const struct adapter *adapter = tdev2adap(tdev);
+ struct adapter *adapter = tdev2adap(tdev);
return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
}
}
mutex_unlock(&cxgb3_db_lock);
}
-
EXPORT_SYMBOL(cxgb3_register_client);
/**
}
mutex_unlock(&cxgb3_db_lock);
}
-
EXPORT_SYMBOL(cxgb3_unregister_client);
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return (struct t3cdev *)pi->adapter;
+}
+EXPORT_SYMBOL(dev2t3cdev);
+
/**
- * cxgb3_add_clients - activate registered clients for an offload device
+ * cxgb3_add_clients - activate register clients for an offload device
* @tdev: the offload device
*
* Call backs all registered clients once a offload device is activated
}
/**
- * cxgb3_remove_clients - deactivates registered clients
- * for an offload device
+ * cxgb3_remove_clients - activate register clients for an offload device
* @tdev: the offload device
*
* Call backs all registered clients once a offload device is deactivated
mutex_unlock(&cxgb3_db_lock);
}
-static struct net_device *get_iff_from_mac(struct adapter *adapter,
+static struct t3cdev * dev2tdev(struct net_device *dev)
+{
+ struct adapter *adapter;
+ int port, found = 0;
+ struct net_device *curdev = NULL;
+
+ if (!dev)
+ return NULL;
+
+ read_lock(&adapter_list_lock);
+ list_for_each_entry(adapter, &adapter_list, adapter_list) {
+ for_each_port(adapter, port) {
+ curdev = adapter->port[port];
+ if (dev->flags & IFF_MASTER &&
+ curdev->flags & IFF_SLAVE)
+ found = curdev->master == dev;
+ else if (dev->priv_flags & IFF_802_1Q_VLAN)
+ found = VLAN_DEV_INFO(dev)->real_dev == curdev;
+ else
+ found = dev == curdev;
+
+ if (found)
+ goto out;
+ }
+ }
+out:
+ read_unlock(&adapter_list_lock);
+
+ return found ? dev2t3cdev(curdev) : NULL;
+}
+
+static struct net_device *get_iff_from_mac(adapter_t *adapter,
const unsigned char *mac,
unsigned int vlan)
{
int i;
for_each_port(adapter, i) {
- const struct vlan_group *grp;
+ struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
- dev = grp ? grp->vlan_devices[vlan] : NULL;
- } else
+ dev = grp ? vlan_group_get_device(grp, vlan) :
+ NULL;
+ }
+ else
while (dev->master)
dev = dev->master;
return dev;
return NULL;
}
-static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
- void *data)
+static inline void failover_fixup(adapter_t *adapter, int port)
+{
+ struct net_device *dev = adapter->port[port];
+ struct port_info *p = netdev_priv(dev);
+ struct cmac *mac = &p->mac;
+
+ if (!netif_running(dev)) {
+ /* Failover triggered by the interface ifdown */
+ t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
+ F_TXEN);
+ t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
+ } else {
+ /* Failover triggered by the interface link down */
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+ t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
+ F_RXEN);
+ }
+}
+
+static inline int in_bond(int port, struct bond_ports *bond_ports)
+{
+ int i;
+
+ for (i = 0; i < bond_ports->nports; i++)
+ if (port == bond_ports->ports[i])
+ break;
+
+ return (i < bond_ports->nports);
+}
+
+static int t3_4ports_failover(struct adapter *adapter, int event,
+ struct bond_ports *bond_ports)
+{
+ int port = bond_ports->port;
+ struct t3cdev *tdev = &adapter->tdev;
+ struct l2t_data *d = L2DATA(tdev);
+ struct l2t_entry *e, *end;
+ int nports = 0, port_idx;
+
+ /* Reassign L2T entries */
+ switch (event) {
+ case FAILOVER_PORT_RELEASE:
+ case FAILOVER_PORT_DOWN:
+ read_lock_bh(&d->lock);
+ port_idx = 0;
+ nports = bond_ports->nports;
+ for (e = &d->l2tab[1], end = d->rover;
+ e != end; ++e) {
+ int newport;
+
+ if (e->smt_idx == port) {
+ newport = bond_ports->ports[port_idx];
+ spin_lock_bh(&e->lock);
+ e->smt_idx = newport;
+ if (e->state == L2T_STATE_VALID)
+ t3_l2t_update_l2e(tdev, e);
+ spin_unlock_bh(&e->lock);
+ port_idx = port_idx < nports ?
+ port_idx + 1 : 0;
+ }
+ /*
+ * If the port is released, update orig_smt_idx
+ * to failed over port.
+ * There are 2 situations:
+ * 1. Port X is the original port and is released.
+ * {orig_smt_idx, smt_idx} follows these steps.
+ * {X, X} -> {X, Y} -> {Y, Y}
+ * 2. Port Z is released, a failover from port X
+ * had happened previously.
+ * {orig_smt_idx, smt_idx} follows these steps:
+ * {X, Z} -> {Z, Z}
+ */
+ if (event == FAILOVER_PORT_RELEASE &&
+ e->orig_smt_idx == port) {
+ spin_lock_bh(&e->lock);
+ e->orig_smt_idx = e->smt_idx;
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ read_unlock_bh(&d->lock);
+ break;
+ case FAILOVER_PORT_UP:
+ read_lock_bh(&d->lock);
+ for (e = &d->l2tab[1], end = d->rover;
+ e != end; ++e) {
+ if (e->orig_smt_idx == port &&
+ in_bond(e->smt_idx, bond_ports)) {
+ spin_lock_bh(&e->lock);
+ e->smt_idx = port;
+ if (e->state == L2T_STATE_VALID)
+ t3_l2t_update_l2e(tdev, e);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ read_unlock_bh(&d->lock);
+ break;
+ case FAILOVER_ACTIVE_SLAVE:
+ read_lock_bh(&d->lock);
+ for (e = &d->l2tab[1], end = d->rover;
+ e != end; ++e) {
+ if (e->smt_idx != port &&
+ in_bond(e->smt_idx, bond_ports)) {
+ spin_lock_bh(&e->lock);
+ e->smt_idx = port;
+ if (e->state == L2T_STATE_VALID)
+ t3_l2t_update_l2e(tdev, e);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ read_unlock_bh(&d->lock);
+ break;
+ }
+ return 0;
+}
+
+static int cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
{
int ret = 0;
struct ulp_iscsi_info *uiip = data;
uiip->max_rxsz = min_t(unsigned int,
adapter->params.tp.rx_pg_size,
(adapter->sge.qs[0].fl[1].buf_size -
- sizeof(struct cpl_rx_data) * 2 -
- sizeof(struct cpl_rx_data_ddp)));
+ sizeof(struct cpl_rx_data) * 2 -
+ sizeof(struct cpl_rx_data_ddp)) );
+ /* also check the max rx data length programmed in TP */
+ uiip->max_rxsz = min(uiip->max_rxsz,
+ ((t3_read_reg(adapter, A_TP_PARA_REG2))
+ >> S_MAXRXDATA) & M_MAXRXDATA);
break;
case ULP_ISCSI_SET_PARAMS:
t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+ /* set MaxRxData and MaxCoalesceSize to 16224 */
+ t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60);
+ /* program the ddp page sizes */
+ {
+ int i;
+ unsigned int val = 0;
+ for (i = 0; i < 4; i++)
+ val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
+ if (val)
+ t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
+ }
break;
default:
ret = -EOPNOTSUPP;
/* Response queue used for RDMA events. */
#define ASYNC_NOTIF_RSPQ 0
-static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+static int cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
{
int ret = 0;
switch (req) {
case RDMA_GET_PARAMS: {
- struct rdma_info *rdma = data;
+ struct rdma_info *req = data;
struct pci_dev *pdev = adapter->pdev;
- rdma->udbell_physbase = pci_resource_start(pdev, 2);
- rdma->udbell_len = pci_resource_len(pdev, 2);
- rdma->tpt_base =
- t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
- rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
- rdma->pbl_base =
- t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
- rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
- rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
- rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
- rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
- rdma->pdev = pdev;
+ req->udbell_physbase = pci_resource_start(pdev, 2);
+ req->udbell_len = pci_resource_len(pdev, 2);
+ req->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+ req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ req->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+ req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ req->pdev = pdev;
break;
}
- case RDMA_CQ_OP:{
+ case RDMA_CQ_OP: {
unsigned long flags;
- struct rdma_cq_op *rdma = data;
+ struct rdma_cq_op *req = data;
/* may be called in any context */
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
- ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
- rdma->credits);
+ ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
+ req->credits);
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
- case RDMA_GET_MEM:{
+ case RDMA_GET_MEM: {
struct ch_mem_range *t = data;
struct mc7 *mem;
else
return -EINVAL;
- ret =
- t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
- (u64 *) t->buf);
+ ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
if (ret)
return ret;
break;
}
- case RDMA_CQ_SETUP:{
- struct rdma_cq_setup *rdma = data;
-
- spin_lock_irq(&adapter->sge.reg_lock);
- ret =
- t3_sge_init_cqcntxt(adapter, rdma->id,
- rdma->base_addr, rdma->size,
- ASYNC_NOTIF_RSPQ,
- rdma->ovfl_mode, rdma->credits,
- rdma->credit_thres);
- spin_unlock_irq(&adapter->sge.reg_lock);
+ case RDMA_CQ_SETUP: {
+ struct rdma_cq_setup *req = data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+ ret = t3_sge_init_cqcntxt(adapter, req->id, req->base_addr,
+ req->size, ASYNC_NOTIF_RSPQ,
+ req->ovfl_mode, req->credits,
+ req->credit_thres);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
- case RDMA_CQ_DISABLE:
- spin_lock_irq(&adapter->sge.reg_lock);
+ case RDMA_CQ_DISABLE: {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
- spin_unlock_irq(&adapter->sge.reg_lock);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
- case RDMA_CTRL_QP_SETUP:{
- struct rdma_ctrlqp_setup *rdma = data;
+ }
+ case RDMA_CTRL_QP_SETUP: {
+ struct rdma_ctrlqp_setup *req = data;
+ unsigned long flags;
- spin_lock_irq(&adapter->sge.reg_lock);
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
- SGE_CNTXT_RDMA,
- ASYNC_NOTIF_RSPQ,
- rdma->base_addr, rdma->size,
- FW_RI_TID_START, 1, 0);
- spin_unlock_irq(&adapter->sge.reg_lock);
+ SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
+ req->base_addr, req->size,
+ FW_RI_TID_START, 1, 0);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
default:
struct iff_mac *iffmacp;
struct ddp_params *ddpp;
struct adap_ports *ports;
+ struct port_array *pap;
struct ofld_page_info *rx_page_info;
struct tp_params *tp = &adapter->params.tp;
- int i;
+ struct bond_ports *bond_ports;
+ int port;
switch (req) {
case GET_MAX_OUTSTANDING_WR:
*(unsigned int *)data = WR_FLITS;
break;
case GET_TX_MAX_CHUNK:
- *(unsigned int *)data = 1 << 20; /* 1MB */
+ *(unsigned int *)data = 1 << 20; /* 1MB */
break;
case GET_TID_RANGE:
tid = data;
tid->num = t3_mc5_size(&adapter->mc5) -
- adapter->params.mc5.nroutes -
- adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+ adapter->params.mc5.nroutes -
+ adapter->params.mc5.nfilters -
+ adapter->params.mc5.nservers;
tid->base = 0;
break;
case GET_STID_RANGE:
tid = data;
tid->num = adapter->params.mc5.nservers;
tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
- adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+ adapter->params.mc5.nfilters -
+ adapter->params.mc5.nroutes;
break;
case GET_L2T_CAPACITY:
*(unsigned int *)data = 2048;
break;
+ case GET_CPUIDX_OF_QSET: {
+ unsigned int qset = *(unsigned int *)data;
+
+ if (qset >= SGE_QSETS ||
+ adapter->rrss_map[qset] >= RSS_TABLE_SIZE)
+ return -EINVAL;
+ *(unsigned int *)data = adapter->rrss_map[qset];
+ break;
+ }
+ case GET_PORT_SCHED: {
+ struct port_sched *p = data;
+
+ if (adapter->params.nports > 2) {
+ const struct port_info *pi = netdev_priv(p->dev);
+ p->sched = pi->port_id;
+ } else
+ p->sched = -1;
+ break;
+ }
case GET_MTUS:
mtup = data;
mtup->size = NMTUS;
case GET_IFF_FROM_MAC:
iffmacp = data;
iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
- iffmacp->vlan_tag &
- VLAN_VID_MASK);
+ iffmacp->vlan_tag & VLAN_VID_MASK);
break;
case GET_DDP_PARAMS:
ddpp = data;
ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+ ddpp->pdev = adapter->pdev;
break;
case GET_PORTS:
ports = data;
- ports->nports = adapter->params.nports;
- for_each_port(adapter, i)
- ports->lldevs[i] = adapter->port[i];
+ ports->nports = adapter->params.nports;
+ for_each_port(adapter, port)
+ ports->lldevs[port] = adapter->port[port];
+ break;
+ case GET_PORT_ARRAY:
+ pap = data;
+ pap->nports = adapter->params.nports;
+ pap->lldevs = adapter->port;
+ break;
+ case FAILOVER:
+ port = *(int *)data;
+ t3_port_failover(adapter, port);
+ failover_fixup(adapter, !port);
+ break;
+ case FAILOVER_DONE:
+ port = *(int *)data;
+ t3_failover_done(adapter, port);
+ break;
+ case FAILOVER_CLEAR:
+ t3_failover_clear(adapter);
+ break;
+ case FAILOVER_ACTIVE_SLAVE:
+ case FAILOVER_PORT_DOWN:
+ case FAILOVER_PORT_UP:
+ case FAILOVER_PORT_RELEASE:
+ bond_ports = data;
+ t3_4ports_failover(adapter, req, bond_ports);
+ break;
+ case GET_RX_PAGE_INFO:
+ rx_page_info = data;
+ rx_page_info->page_size = tp->rx_pg_size;
+ rx_page_info->num = tp->rx_num_pgs;
break;
case ULP_ISCSI_GET_PARAMS:
case ULP_ISCSI_SET_PARAMS:
if (!offload_running(adapter))
return -EAGAIN;
return cxgb_rdma_ctl(adapter, req, data);
- case GET_RX_PAGE_INFO:
- rx_page_info = data;
- rx_page_info->page_size = tp->rx_pg_size;
- rx_page_info->num = tp->rx_num_pgs;
- break;
default:
return -EOPNOTSUPP;
}
void cxgb3_set_dummy_ops(struct t3cdev *dev)
{
- dev->recv = rx_offload_blackhole;
+ dev->recv = rx_offload_blackhole;
dev->neigh_update = dummy_neigh_update;
}
return ctx;
}
-
EXPORT_SYMBOL(cxgb3_free_atid);
/*
t->stids_in_use--;
spin_unlock_bh(&t->stid_lock);
}
-
EXPORT_SYMBOL(cxgb3_free_stid);
void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
- void *ctx, unsigned int tid)
+ void *ctx, unsigned int tid)
{
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
t->tid_tab[tid].ctx = ctx;
atomic_inc(&t->tids_in_use);
}
-
EXPORT_SYMBOL(cxgb3_insert_tid);
/*
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
-static void t3_process_tid_release_list(void *work)
+DECLARE_TASK_FUNC(t3_process_tid_release_list, task_param)
{
- struct t3c_data *td = work;
struct sk_buff *skb;
+ struct t3c_data *td = WORK2T3CDATA(task_param, tid_release_task);
struct t3cdev *tdev = td->dev;
-
spin_lock_bh(&td->tid_release_lock);
while (td->tid_release_list) {
struct t3c_tid_entry *p = td->tid_release_list;
schedule_work(&td->tid_release_task);
spin_unlock_bh(&td->tid_release_lock);
}
-
EXPORT_SYMBOL(cxgb3_queue_tid_release);
/*
}
atomic_dec(&t->tids_in_use);
}
-
EXPORT_SYMBOL(cxgb3_remove_tid);
int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
spin_unlock_bh(&t->atid_lock);
return atid;
}
-
EXPORT_SYMBOL(cxgb3_alloc_atid);
int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
spin_unlock_bh(&t->stid_lock);
return stid;
}
-
EXPORT_SYMBOL(cxgb3_alloc_stid);
-/* Get the t3cdev associated with a net_device */
-struct t3cdev *dev2t3cdev(struct net_device *dev)
-{
- const struct port_info *pi = netdev_priv(dev);
-
- return (struct t3cdev *)pi->adapter;
-}
-
-EXPORT_SYMBOL(dev2t3cdev);
-
static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
- t3c_tid->
- ctx);
+ t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, CPL_ACT_OPEN_RPL);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_OPEN_RPL);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
- return t3c_tid->client->handlers[p->opcode] (dev, skb,
- t3c_tid->ctx);
+ return t3c_tid->client->handlers[p->opcode] (dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, p->opcode);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
- t3c_tid->client->handlers[p->opcode]) {
+ t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
- (dev, skb, t3c_tid->ctx);
+ (dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, p->opcode);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
- (dev, skb, t3c_tid->ctx);
+ (dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, CPL_PASS_ACCEPT_REQ);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
* the buffer.
*/
static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
- gfp_t gfp)
+ int gfp)
{
if (likely(!skb_cloned(skb))) {
BUG_ON(skb->len < len);
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
- t3c_tid->client->handlers[p->opcode]) {
+ t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
- (dev, skb, t3c_tid->ctx);
+ (dev, skb, t3c_tid->ctx);
} else {
struct cpl_abort_req_rss *req = cplhdr(skb);
struct cpl_abort_rpl *rpl;
unsigned int tid = GET_TID(req);
u8 cmd = req->status;
+ WARN_ON(dev->type == T3B);
+
if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
req->status == CPL_ERR_PERSIST_NEG_ADVICE)
goto out;
reply_skb = cxgb3_get_cpl_reply_skb(skb,
- sizeof(struct
- cpl_abort_rpl),
+ sizeof(struct cpl_abort_rpl),
GFP_ATOMIC);
if (!reply_skb) {
goto out;
}
reply_skb->priority = CPL_PRIORITY_DATA;
- __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
rpl = cplhdr(reply_skb);
rpl->wr.wr_hi =
- htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
rpl->wr.wr_lo = htonl(V_WR_TID(tid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ OPCODE_TID(rpl) =
+ htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
rpl->cmd = cmd;
cxgb3_ofld_send(dev, reply_skb);
-out:
+ out:
return CPL_RET_BUF_DONE;
}
}
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
- (dev, skb, t3c_tid->ctx);
+ (dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, CPL_ACT_ESTABLISH);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_ESTABLISH);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_trace_pkt *p = cplhdr(skb);
+ struct adapter *adapter = tdev2adap(dev);
skb->protocol = htons(0xffff);
skb->dev = dev->lldev;
- skb_pull(skb, sizeof(*p));
- skb->mac.raw = skb->data;
+ if (adapter->params.nports > 2)
+ skb_pull(skb, sizeof(*p) + 8); /* pull CPL + preamble */
+ else
+ skb_pull(skb, sizeof(*p)); /* pull CPL */
+ skb_reset_mac_header(skb);
netif_receive_skb(skb);
return 0;
}
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
- t3c_tid->client->handlers[opcode]) {
- return t3c_tid->client->handlers[opcode] (dev, skb,
- t3c_tid->ctx);
+ t3c_tid->client->handlers[opcode]) {
+ return t3c_tid->client->handlers[opcode](dev,skb,t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, opcode);
+ CH_MSG(tdev2adap(dev), DEBUG, OFLD,
+ "%s: received clientless CPL command 0x%x\n",
+ dev->name, opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
+#if defined(NETEVENT)
static int nb_callback(struct notifier_block *self, unsigned long event,
- void *ctx)
+ void *ctx)
{
switch (event) {
- case (NETEVENT_NEIGH_UPDATE):{
- cxgb_neigh_update((struct neighbour *)ctx);
- break;
+ case (NETEVENT_NEIGH_UPDATE): {
+ cxgb_neigh_update((struct neighbour *)ctx);
+ break;
+ }
+#ifdef DIVY /* XXX Divy no NETEVENT_ROUTE_UPDATE definition */
+ case (NETEVENT_ROUTE_UPDATE):
+ break;
+#endif
+ case (NETEVENT_PMTU_UPDATE):
+ break;
+ case (NETEVENT_REDIRECT): {
+ struct netevent_redirect *nr = ctx;
+ cxgb_redirect(nr->old, nr->new);
+ cxgb_neigh_update(nr->new->neighbour);
+ break;
+ }
+ default:
+ break;
}
- case (NETEVENT_PMTU_UPDATE):
- break;
- case (NETEVENT_REDIRECT):{
- struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(nr->new->neighbour);
- break;
+ return 0;
+}
+#else
+static int nb_callback(struct notifier_block *self, unsigned long event,
+ void *ctx)
+{
+ return 0;
+}
+
+#if defined(OFLD_USE_KPROBES)
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/autoconf.h>
+#endif
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <net/arp.h>
+
+static int (*orig_arp_constructor)(struct neighbour *);
+
+static void neigh_suspect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ neigh->output = neigh->ops->output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->output;
+}
+
+static void neigh_connect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ neigh->output = neigh->ops->connected_output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->hh_output;
+}
+
+static inline int neigh_max_probes(const struct neighbour *n)
+{
+ const struct neigh_parms *p = n->parms;
+ return (n->nud_state & NUD_PROBE ?
+ p->ucast_probes :
+ p->ucast_probes + p->app_probes + p->mcast_probes);
+}
+
+static void neigh_timer_handler_offload(unsigned long arg)
+{
+ unsigned long now, next;
+ struct neighbour *neigh = (struct neighbour *)arg;
+ unsigned state;
+ int notify = 0;
+
+ write_lock(&neigh->lock);
+
+ state = neigh->nud_state;
+ now = jiffies;
+ next = now + HZ;
+
+ if (!(state & NUD_IN_TIMER)) {
+#ifndef CONFIG_SMP
+ printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
+#endif
+ goto out;
}
- default:
- break;
+
+ if (state & NUD_REACHABLE) {
+ if (time_before_eq(now,
+ neigh->confirmed +
+ neigh->parms->reachable_time)) {
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else if (time_before_eq(now,
+ neigh->used +
+ neigh->parms->delay_probe_time)) {
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ next = now + neigh->parms->delay_probe_time;
+ } else {
+ neigh->nud_state = NUD_STALE;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ cxgb_neigh_update(neigh);
+ }
+ } else if (state & NUD_DELAY) {
+ if (time_before_eq(now,
+ neigh->confirmed +
+ neigh->parms->delay_probe_time)) {
+ neigh->nud_state = NUD_REACHABLE;
+ neigh->updated = jiffies;
+ neigh_connect(neigh);
+ cxgb_neigh_update(neigh);
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else {
+ neigh->nud_state = NUD_PROBE;
+ neigh->updated = jiffies;
+ atomic_set(&neigh->probes, 0);
+ next = now + neigh->parms->retrans_time;
+ }
+ } else {
+ /* NUD_PROBE|NUD_INCOMPLETE */
+ next = now + neigh->parms->retrans_time;
+ }
+
+ if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+ atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
+ struct sk_buff *skb;
+
+ neigh->nud_state = NUD_FAILED;
+ neigh->updated = jiffies;
+ notify = 1;
+ cxgb_neigh_update(neigh);
+ NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
+
+ /* It is very thin place. report_unreachable is very
+ complicated routine. Particularly, it can hit the same
+ neighbour entry!
+ So that, we try to be accurate and avoid dead loop. --ANK
+ */
+ while (neigh->nud_state == NUD_FAILED &&
+ (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
+ write_unlock(&neigh->lock);
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+ skb_queue_purge(&neigh->arp_queue);
}
+
+ if (neigh->nud_state & NUD_IN_TIMER) {
+ if (time_before(next, jiffies + HZ/2))
+ next = jiffies + HZ/2;
+ if (!mod_timer(&neigh->timer, next))
+ neigh_hold(neigh);
+ }
+ if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
+ struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ /* keep skb alive even if arp_queue overflows */
+ if (skb)
+ skb_get(skb);
+ write_unlock(&neigh->lock);
+ neigh->ops->solicit(neigh, skb);
+ atomic_inc(&neigh->probes);
+ if (skb)
+ kfree_skb(skb);
+ } else {
+out:
+ write_unlock(&neigh->lock);
+ }
+
+#ifdef CONFIG_ARPD
+ if (notify && neigh->parms->app_probes)
+ neigh_app_notify(neigh);
+#endif
+ neigh_release(neigh);
+}
+
+static int arp_constructor_offload(struct neighbour *neigh)
+{
+ if (dev2tdev(neigh->dev))
+ neigh->timer.function = neigh_timer_handler_offload;
+ return orig_arp_constructor(neigh);
+}
+
+/*
+ * This must match exactly the signature of neigh_update for jprobes to work.
+ * It runs from a trap handler with interrupts off so don't disable BH.
+ */
+static int neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
+ u8 new, u32 flags)
+{
+ write_lock(&neigh->lock);
+ cxgb_neigh_update(neigh);
+ write_unlock(&neigh->lock);
+ jprobe_return();
+ /* NOTREACHED */
return 0;
}
+static struct jprobe neigh_update_jprobe = {
+ .entry = (kprobe_opcode_t *) neigh_update_offload,
+ .kp.addr = (kprobe_opcode_t *) neigh_update
+};
+
+static int prepare_arp_with_t3core(void)
+{
+ int err;
+
+ err = register_jprobe(&neigh_update_jprobe);
+ if (err) {
+ printk(KERN_ERR "Could not install neigh_update jprobe, "
+ "error %d\n", err);
+ return err;
+ }
+
+ orig_arp_constructor = arp_tbl.constructor;
+ arp_tbl.constructor = arp_constructor_offload;
+
+ return 0;
+}
+
+static void restore_arp_sans_t3core(void)
+{
+ arp_tbl.constructor = orig_arp_constructor;
+ unregister_jprobe(&neigh_update_jprobe);
+}
+
+#else /* Module suport */
+
+static inline int prepare_arp_with_t3core(void)
+{
+ return 0;
+}
+
+static inline void restore_arp_sans_t3core(void)
+{}
+#endif
+#endif /* netevent */
+
static struct notifier_block nb = {
.notifier_call = nb_callback
};
printk(KERN_ERR "T3C: handler registration for "
"opcode %x failed\n", opcode);
}
-
EXPORT_SYMBOL(t3_register_cpl_handler);
/*
int r;
local_bh_disable();
+#if defined(CONFIG_CHELSIO_T3)
+ if (unlikely(netdev_nit)) { /* deal with active taps */
+ skb->nh.raw = skb->data;
+ if (!skb->dev)
+ skb->dev = dev->lldev;
+ dev_queue_xmit_nit(skb, skb->dev);
+ }
+#endif
r = dev->send(dev, skb);
+
local_bh_enable();
return r;
}
-
EXPORT_SYMBOL(cxgb3_ofld_send);
-static int is_offloading(struct net_device *dev)
+/**
+ * cxgb3_ofld_skb - process n received offload packets
+ * @dev: the offload device
+ * @skb: an array of offload packets
+ * @n: the number of offload packets
+ *
+ * Process an array of ingress offload packets. Each packet is forwarded
+ * to any active network taps and then passed to the offload device's receive
+ * method. We optimize passing packets to the receive method by passing
+ * it the whole array at once except when there are active taps.
+ */
+int cxgb3_ofld_recv(struct t3cdev *dev, struct sk_buff **skb, int n)
{
- struct adapter *adapter;
- int i;
-
- read_lock_bh(&adapter_list_lock);
- list_for_each_entry(adapter, &adapter_list, adapter_list) {
- for_each_port(adapter, i) {
- if (dev == adapter->port[i]) {
- read_unlock_bh(&adapter_list_lock);
- return 1;
- }
- }
+#if defined(CONFIG_CHELSIO_T3)
+ if (likely(!netdev_nit))
+ return dev->recv(dev, skb, n);
+
+ for ( ; n; n--, skb++) {
+ skb[0]->dev = dev->lldev;
+ dev_queue_xmit_nit(skb[0], dev->lldev);
+ skb[0]->dev = NULL;
+ dev->recv(dev, skb, 1);
}
- read_unlock_bh(&adapter_list_lock);
return 0;
+#else
+ return dev->recv(dev, skb, n);
+#endif
}
void cxgb_neigh_update(struct neighbour *neigh)
{
- struct net_device *dev = neigh->dev;
+ struct t3cdev *tdev = dev2tdev(neigh->dev);
- if (dev && (is_offloading(dev))) {
- struct t3cdev *tdev = dev2t3cdev(dev);
-
- BUG_ON(!tdev);
+ if (tdev)
t3_l2t_update(tdev, neigh);
- }
}
static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = 0;
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(W_TCB_L2T_IX);
req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
- struct net_device *olddev, *newdev;
struct tid_info *ti;
- struct t3cdev *tdev;
+ struct t3cdev *old_tdev, *new_tdev;
u32 tid;
int update_tcb;
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = old->neighbour->dev;
- newdev = new->neighbour->dev;
- if (!is_offloading(olddev))
+ old_tdev = dev2tdev(old->neighbour->dev);
+ new_tdev = dev2tdev(new->neighbour->dev);
+
+ if (!old_tdev)
return;
- if (!is_offloading(newdev)) {
- printk(KERN_WARNING "%s: Redirect to non-offload "
+ if (new_tdev) {
+ printk(KERN_WARNING "%s: Redirect to non-offload"
"device ignored.\n", __FUNCTION__);
return;
}
- tdev = dev2t3cdev(olddev);
- BUG_ON(!tdev);
- if (tdev != dev2t3cdev(newdev)) {
+
+ if (old_tdev != new_tdev) {
printk(KERN_WARNING "%s: Redirect to different "
"offload device ignored.\n", __FUNCTION__);
return;
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new->neighbour, newdev);
+ e = t3_l2t_get(new_tdev, new->neighbour, new->neighbour->dev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__FUNCTION__);
}
/* Walk tid table and notify clients of dst change. */
- ti = &(T3C_DATA(tdev))->tid_maps;
+ ti = &(T3C_DATA(new_tdev))->tid_maps;
for (tid = 0; tid < ti->ntids; tid++) {
te = lookup_tid(ti, tid);
BUG_ON(!te);
if (te && te->ctx && te->client && te->client->redirect) {
- update_tcb = te->client->redirect(te->ctx, old, new, e);
- if (update_tcb) {
- l2t_hold(L2DATA(tdev), e);
- set_l2t_ix(tdev, tid, e);
+ update_tcb = te->client->redirect(te->ctx, old, new,
+ e);
+ if (update_tcb) {
+ l2t_hold(L2DATA(new_tdev), e);
+ set_l2t_ix(new_tdev, tid, e);
}
}
}
- l2t_release(L2DATA(tdev), e);
+ l2t_release(L2DATA(new_tdev), e);
}
/*
}
/*
- * Free memory allocated through t3_alloc_mem().
+ * Free memory allocated through cxgb3_alloc_mem().
*/
void cxgb_free_mem(void *addr)
{
- unsigned long p = (unsigned long)addr;
+ unsigned long p = (unsigned long) addr;
if (p >= VMALLOC_START && p < VMALLOC_END)
vfree(addr);
kfree(addr);
}
+static int offload_info_read_proc(char *buf, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ struct t3c_data *d = data;
+ struct tid_info *t = &d->tid_maps;
+ int len;
+
+ len = sprintf(buf, "TID range: 0..%d, in use: %u\n"
+ "STID range: %d..%d, in use: %u\n"
+ "ATID range: %d..%d, in use: %u\n"
+ "MSS: %u\n",
+ t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
+ t->stid_base + t->nstids - 1, t->stids_in_use,
+ t->atid_base, t->atid_base + t->natids - 1,
+ t->atids_in_use, d->tx_max_chunk);
+ if (len > length)
+ len = length;
+ *eof = 1;
+ return len;
+}
+
+static int offload_info_proc_setup(struct proc_dir_entry *dir,
+ struct t3c_data *d)
+{
+ struct proc_dir_entry *p;
+
+ if (!dir)
+ return -EINVAL;
+
+ p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
+ if (!p)
+ return -ENOMEM;
+
+ p->owner = THIS_MODULE;
+ return 0;
+}
+
+static void offload_proc_dev_setup(struct t3cdev *dev)
+{
+ t3_l2t_proc_setup(dev->proc_dir, L2DATA(dev));
+ offload_info_proc_setup(dev->proc_dir, T3C_DATA(dev));
+}
+
+static void offload_info_proc_free(struct proc_dir_entry *dir)
+{
+ if (dir)
+ remove_proc_entry("info", dir);
+}
+
+static void offload_proc_dev_cleanup(struct t3cdev *dev)
+{
+ t3_l2t_proc_free(dev->proc_dir);
+ offload_info_proc_free(dev->proc_dir);
+}
+
/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
cxgb_free_mem(t->tid_tab);
}
-static inline void add_adapter(struct adapter *adap)
+static inline void add_adapter(adapter_t *adap)
{
- write_lock_bh(&adapter_list_lock);
+ write_lock(&adapter_list_lock);
list_add_tail(&adap->adapter_list, &adapter_list);
- write_unlock_bh(&adapter_list_lock);
+ write_unlock(&adapter_list_lock);
}
-static inline void remove_adapter(struct adapter *adap)
+static inline void remove_adapter(adapter_t *adap)
{
- write_lock_bh(&adapter_list_lock);
+ write_lock(&adapter_list_lock);
list_del(&adap->adapter_list);
- write_unlock_bh(&adapter_list_lock);
+ write_unlock(&adapter_list_lock);
}
int cxgb3_offload_activate(struct adapter *adapter)
t->mtus = mtutab.mtus;
t->nmtus = mtutab.size;
- INIT_WORK(&t->tid_release_task,t3_process_tid_release_list,t);
spin_lock_init(&t->tid_release_lock);
INIT_LIST_HEAD(&t->list_node);
t->dev = dev;
dev->recv = process_rx;
dev->neigh_update = t3_l2t_update;
+ T3_INIT_WORK(&t->tid_release_task, t3_process_tid_release_list, t);
+
+ offload_proc_dev_setup(dev);
+
/* Register netevent handler once */
- if (list_empty(&adapter_list))
+ if (list_empty(&adapter_list)) {
+#if defined(NETEVENT)
register_netevent_notifier(&nb);
-
+#else
+#if defined(OFLD_USE_KPROBES)
+ if (prepare_arp_with_t3core())
+ printk(KERN_ERR "Unable to set offload capabilities\n");
+#endif
+#endif
+ }
add_adapter(adapter);
return 0;
struct t3cdev *tdev = &adapter->tdev;
struct t3c_data *t = T3C_DATA(tdev);
+ offload_proc_dev_cleanup(tdev);
remove_adapter(adapter);
- if (list_empty(&adapter_list))
+ if (list_empty(&adapter_list)) {
+#if defined(NETEVENT)
unregister_netevent_notifier(&nb);
-
+#else
+#if defined(OFLD_USE_KPROBES)
+ restore_arp_sans_t3core();
+#endif
+#endif
+ }
free_tid_maps(&t->tid_maps);
T3C_DATA(tdev) = NULL;
t3_free_l2t(L2DATA(tdev));
kfree(t);
}
+static void __devexit offload_proc_dev_exit(struct t3cdev *tdev)
+{
+ remove_proc_entry(tdev->name, cxgb3_proc_root);
+ tdev->proc_dir = NULL;
+}
+
+static void __devinit offload_proc_dev_init(struct t3cdev *tdev)
+{
+ if (!cxgb3_proc_root) {
+ printk("%s: root proc idr is null\n", __func__);
+ return;
+ }
+
+ tdev->proc_dir = proc_mkdir(tdev->name, cxgb3_proc_root);
+ if (!tdev->proc_dir) {
+ printk(KERN_WARNING "Unable to create /proc/net/cxgb3/%s dir\n",
+ tdev->name);
+ return;
+ }
+ tdev->proc_dir->owner = THIS_MODULE;
+}
+
static inline void register_tdev(struct t3cdev *tdev)
{
static int unit;
mutex_unlock(&cxgb3_db_lock);
}
+static inline int adap2type(struct adapter *adapter)
+{
+ int type = 0;
+
+ switch (adapter->params.rev) {
+ case T3_REV_A:
+ type = T3A;
+ break;
+ case T3_REV_B:
+ case T3_REV_B2:
+ type = T3B;
+ break;
+ case T3_REV_C:
+ type = T3C;
+ break;
+ }
+ return type;
+}
+
void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
cxgb3_set_dummy_ops(tdev);
tdev->send = t3_offload_tx;
tdev->ctl = cxgb_offload_ctl;
- tdev->type = adapter->params.rev == 0 ? T3A : T3B;
+ tdev->type = adap2type(adapter);
register_tdev(tdev);
+ offload_proc_dev_init(tdev);
}
void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
+ offload_proc_dev_exit(tdev);
tdev->recv = NULL;
tdev->neigh_update = NULL;
unregister_tdev(tdev);
}
+static int offload_devices_read_proc(char *buf, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ int i, len = 0;
+ struct t3cdev *tdev;
+ struct net_device *ndev;
+ struct adapter *adapter;
+
+ len += sprintf(buf, "Device Interfaces\n");
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ len += sprintf(buf + len, "%-16s", tdev->name);
+ adapter = tdev2adap(tdev);
+ for (i = 0; i < adapter->params.nports; i++) {
+ ndev = adapter->port[i];
+ len += sprintf(buf + len, " %s", ndev->name);
+ }
+ len += sprintf(buf + len, "\n");
+ if (len >= length)
+ break;
+ }
+ mutex_unlock(&cxgb3_db_lock);
+
+ if (len > length)
+ len = length;
+ *eof = 1;
+ return len;
+}
+
+static void offload_proc_cleanup(void)
+{
+ remove_proc_entry("devices", cxgb3_proc_root);
+ remove_proc_entry("cxgb3", proc_net);
+ cxgb3_proc_root = NULL;
+
+}
+
+static int offload_proc_init(void)
+{
+ struct proc_dir_entry *d;
+
+ cxgb3_proc_root = proc_mkdir("cxgb3", proc_net);
+ if (!cxgb3_proc_root)
+ return -ENOMEM;
+ cxgb3_proc_root->owner = THIS_MODULE;
+
+ d = create_proc_read_entry("devices", 0, cxgb3_proc_root,
+ offload_devices_read_proc, NULL);
+
+ if (!d)
+ goto cleanup;
+ d->owner = THIS_MODULE;
+ return 0;
+
+cleanup:
+ offload_proc_cleanup();
+ return -ENOMEM;
+}
+
void __init cxgb3_offload_init(void)
{
int i;
t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
- t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
- t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+ /* for iSCSI */
t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+
+ if (offload_proc_init())
+ printk(KERN_WARNING "Unable to create /proc/net/cxgb3 dir\n");
+}
+
+void __exit cxgb3_offload_exit(void)
+{
+ offload_proc_cleanup();
}
/*
- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2007 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef _CXGB3_OFFLOAD_H
#define _CXGB3_OFFLOAD_H
#include <linux/list.h>
#include <linux/skbuff.h>
+#include "tcb.h"
#include "l2t.h"
#include "t3cdev.h"
struct adapter;
void cxgb3_offload_init(void);
+void cxgb3_offload_exit(void);
void cxgb3_adapter_ofld(struct adapter *adapter);
void cxgb3_adapter_unofld(struct adapter *adapter);
int cxgb3_offload_activate(struct adapter *adapter);
void cxgb3_offload_deactivate(struct adapter *adapter);
+int cxgb3_ofld_recv(struct t3cdev *dev, struct sk_buff **skb, int n);
+
void cxgb3_set_dummy_ops(struct t3cdev *dev);
struct t3cdev *dev2t3cdev(struct net_device *dev);
struct sk_buff *skb, void *ctx);
struct cxgb3_client {
- char *name;
- void (*add) (struct t3cdev *);
- void (*remove) (struct t3cdev *);
- cxgb3_cpl_handler_func *handlers;
- int (*redirect)(void *ctx, struct dst_entry *old,
- struct dst_entry *new, struct l2t_entry *l2t);
- struct list_head client_list;
+ char *name;
+ void (*add) (struct t3cdev *);
+ void (*remove) (struct t3cdev *);
+ cxgb3_cpl_handler_func *handlers;
+ int (*redirect)(void *ctx, struct dst_entry *old,
+ struct dst_entry *new,
+ struct l2t_entry *l2t);
+ struct list_head client_list;
};
/*
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
- void *ctx, unsigned int tid);
+ void *ctx,
+ unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
struct t3c_tid_entry {
- struct cxgb3_client *client;
- void *ctx;
+ struct cxgb3_client *client;
+ void *ctx;
};
/* CPL message priority levels */
enum {
- CPL_PRIORITY_DATA = 0, /* data messages */
- CPL_PRIORITY_SETUP = 1, /* connection setup messages */
- CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
- CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
- CPL_PRIORITY_ACK = 1, /* RX ACK messages */
- CPL_PRIORITY_CONTROL = 1 /* offload control messages */
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* offload control messages */
};
/* Flags for return value of CPL message handlers */
enum {
- CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
- CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
- CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
+ CPL_RET_BUF_DONE = 1, // buffer processing done, buffer may be freed
+ CPL_RET_BAD_MSG = 2, // bad CPL message (e.g., unknown opcode)
+ CPL_RET_UNKNOWN_TID = 4 // unexpected unknown TID
};
typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
struct t3c_data {
struct list_head list_node;
struct t3cdev *dev;
- unsigned int tx_max_chunk; /* max payload for TX_DATA */
- unsigned int max_wrs; /* max in-flight WRs per connection */
+ unsigned int tx_max_chunk; /* max payload for TX_DATA */
+ unsigned int max_wrs; /* max in-flight WRs per connection */
unsigned int nmtus;
const unsigned short *mtus;
struct tid_info tid_maps;
-/*
- * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+/*
+ * ----------------------------------------------------------------------------
+ * >>>>>>>>>>>>>>>>>>>>>>>>>>>>> COPYRIGHT NOTICE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ * ----------------------------------------------------------------------------
+ * Copyright 2004 (C) Chelsio Communications, Inc. (Chelsio)
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
+ * Chelsio Communications, Inc. owns the sole copyright to this software.
+ * You may not make a copy, you may not derive works herefrom, and you may
+ * not distribute this work to others. Other restrictions of rights may apply
+ * as well. This is unpublished, confidential information. All rights reserved.
+ * This software contains confidential information and trade secrets of Chelsio
+ * Communications, Inc. Use, disclosure, or reproduction is prohibited without
+ * the prior express written permission of Chelsio Communications, Inc.
+ * ----------------------------------------------------------------------------
+ * >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Warranty <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ * ----------------------------------------------------------------------------
+ * CHELSIO MAKES NO WARRANTY OF ANY KIND WITH REGARD TO THE USE OF THIS
+ * SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * ----------------------------------------------------------------------------
*
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
+ * This is the firmware_exports.h header file, firmware interface defines.
*
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Written January 2005 by felix marti (felix@chelsio.com)
*/
#ifndef _FIRMWARE_EXPORTS_H_
#define _FIRMWARE_EXPORTS_H_
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
+#define FW_MNGTOPCODE_WRC_SET 0x01
+#define FW_MNGTOPCODE_TUNNEL_CR_FLUSH 0x02
+
-/* Maximum size of a WR sent from the host, limited by the SGE.
+/* Maximum size of a WR sent from the host, limited by the SGE.
*
- * Note: WR coming from ULP or TP are only limited by CIM.
+ * Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
- * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
- * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
+ * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
#define FW_TUNNEL_SGEEC_START 8
#define FW_TUNNEL_TID_START 65544
+
/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
* must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
* (or 'uP Token') FW_CTRL_TID_START.
*
* Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
- */
+ */
#define FW_CTRL_NUM 8
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
-/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
- * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
- *
- * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
#define FW_RI_TID_START 65552
/*
- * The RX_PKT_TID
+ * The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
#define G_FW_VERSION_MICRO(x) \
(((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
-#endif /* _FIRMWARE_EXPORTS_H_ */
+#endif /* _FIRMWARE_EXPORTS_H_ */
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <net/neighbour.h>
-#include "common.h"
+#include "cxgb3_compat.h"
#include "t3cdev.h"
#include "cxgb3_defs.h"
#include "l2t.h"
e->neigh = n;
}
+static void setup_l2e(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cpl_l2t_write_req *req;
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+ req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+ V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+ V_L2T_W_PRIO(vlan_prio(e)));
+ req->port_idx = e->smt_idx;
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(dev, skb);
+}
+
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
- struct cpl_l2t_write_req *req;
if (!skb) {
- skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(struct cpl_l2t_write_req), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
}
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
- req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
- V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
- V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
- memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
- skb->priority = CPL_PRIORITY_CONTROL;
- cxgb3_ofld_send(dev, skb);
+ setup_l2e(dev, skb, e);
+
while (e->arpq_head) {
skb = e->arpq_head;
e->arpq_head = skb->next;
return 0;
}
+/*
+ * Update an L2T entry.
+ * Must be called with the entry locked.
+ */
+int t3_l2t_update_l2e(struct t3cdev *dev, struct l2t_entry *e)
+{
+ struct sk_buff * skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+ GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ setup_l2e(dev, skb, e);
+
+ return 0;
+}
+
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
{
again:
switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
- case L2T_STATE_VALID: /* fast-path, send the packet on */
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
+ if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
spin_unlock_bh(&e->lock);
goto again;
}
spin_lock_bh(&e->lock);
if (e->arpq_head)
setup_l2e_send_pending(dev, skb, e);
- else /* we lost the race */
+ else /* we lost the race */
__kfree_skb(skb);
spin_unlock_bh(&e->lock);
}
}
return 0;
}
-
EXPORT_SYMBOL(t3_l2t_send_slow);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
{
again:
switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE) {
}
spin_unlock_bh(&e->lock);
return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
+ if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
spin_unlock_bh(&e->lock);
goto again;
}
}
return;
}
-
EXPORT_SYMBOL(t3_l2t_send_event);
/*
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
spin_unlock_bh(&e->lock);
atomic_inc(&d->nfree);
}
-
EXPORT_SYMBOL(t3_l2e_free);
/*
{
unsigned int nud_state;
- spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
- spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
+ e->orig_smt_idx = smt_idx;
+ e->chan_idx = p->txpkt_intf & 1;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
write_unlock_bh(&d->lock);
return e;
}
-
EXPORT_SYMBOL(t3_l2t_get);
/*
}
}
+#if defined(NETEVENT) || !defined(OFLD_USE_KPROBES)
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
- L2T_STATE_VALID : L2T_STATE_STALE;
+ L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, 6))
setup_l2e_send_pending(dev, NULL, e);
}
if (arpq)
handle_failed_resolution(dev, arpq);
}
+#else
+/*
+ * Called from a kprobe, interrupts are off.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+ struct l2t_entry *e;
+ struct l2t_data *d = L2DATA(dev);
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = arp_hash(addr, ifidx, d);
+
+ read_lock(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ if (atomic_read(&e->refcnt)) {
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ e->tdev = dev;
+ mod_timer(&e->update_timer, jiffies + 1);
+ }
+ spin_unlock(&e->lock);
+ break;
+ }
+ read_unlock(&d->lock);
+}
+
+static void update_timer_cb(unsigned long data)
+{
+ struct sk_buff *arpq = NULL;
+ struct l2t_entry *e = (struct l2t_entry *)data;
+ struct neighbour *neigh = e->neigh;
+ struct t3cdev *dev = e->tdev;
+
+ barrier();
+ if (!atomic_read(&e->refcnt))
+ return;
+
+ read_lock(&neigh->lock);
+ spin_lock(&e->lock);
+
+ if (atomic_read(&e->refcnt)) {
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ arpq = e->arpq_head;
+ e->arpq_head = e->arpq_tail = NULL;
+ } else if ((neigh->nud_state &
+ (NUD_CONNECTED|NUD_STALE)) && e->arpq_head)
+ setup_l2e_send_pending(dev, NULL, e);
+ } else {
+ e->state = neigh_is_connected(neigh) ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
+ setup_l2e_send_pending(dev, NULL, e);
+ }
+ }
+ spin_unlock(&e->lock);
+ read_unlock(&neigh->lock);
+
+ if (arpq)
+ handle_failed_resolution(dev, arpq);
+}
+#endif
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
+#ifndef NETEVENT
+#ifdef OFLD_USE_KPROBES
+ setup_timer(&d->l2tab[i].update_timer, update_timer_cb,
+ (unsigned long)&d->l2tab[i]);
+#endif
+#endif
}
return d;
}
void t3_free_l2t(struct l2t_data *d)
{
+#ifndef NETEVENT
+#ifdef OFLD_USE_KPROBES
+ int i;
+
+ /* Stop all L2T timers */
+ for (i = 0; i < d->nentries; ++i)
+ del_timer_sync(&d->l2tab[i].update_timer);
+#endif
+#endif
cxgb_free_mem(d);
}
+#ifdef CONFIG_PROC_FS
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct l2t_data *d = seq->private;
+
+ return pos >= d->nentries ? NULL : &d->l2tab[pos];
+}
+
+static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ v = l2t_get_idx(seq, *pos + 1);
+ if (v)
+ ++*pos;
+ return v;
+}
+
+static void l2t_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static char l2e_state(const struct l2t_entry *e)
+{
+ switch (e->state) {
+ case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
+ case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
+ case L2T_STATE_RESOLVING:
+ return e->arpq_head ? 'A' : 'R';
+ default:
+ return 'U';
+ }
+}
+
+static int l2t_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Index IP address Ethernet address VLAN "
+ "Prio State Users SMTIDX Port\n");
+ else {
+ char ip[20];
+ struct l2t_entry *e = v;
+
+ spin_lock_bh(&e->lock);
+ sprintf(ip, "%u.%u.%u.%u", NIPQUAD(e->addr));
+ seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
+ " %3u %c %7u %4u %s\n",
+ e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
+ e->dmac[3], e->dmac[4], e->dmac[5],
+ e->vlan & VLAN_VID_MASK, vlan_prio(e),
+ l2e_state(e), atomic_read(&e->refcnt), e->smt_idx,
+ e->neigh ? e->neigh->dev->name : "");
+ spin_unlock_bh(&e->lock);
+ }
+ return 0;
+}
+
+static struct seq_operations l2t_seq_ops = {
+ .start = l2t_seq_start,
+ .next = l2t_seq_next,
+ .stop = l2t_seq_stop,
+ .show = l2t_seq_show
+};
+
+static int l2t_seq_open(struct inode *inode, struct file *file)
+{
+ int rc = seq_open(file, &l2t_seq_ops);
+
+ if (!rc) {
+ struct proc_dir_entry *dp = PDE(inode);
+ struct seq_file *seq = file->private_data;
+
+ seq->private = dp->data;
+ }
+ return rc;
+}
+
+static struct file_operations l2t_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = l2t_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Create the proc entries for the L2 table under dir.
+ */
+int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d)
+{
+ struct proc_dir_entry *p;
+
+ if (!dir)
+ return -EINVAL;
+
+ p = create_proc_entry("l2t", S_IRUGO, dir);
+ if (!p)
+ return -ENOMEM;
+
+ p->proc_fops = &l2t_seq_fops;
+ p->data = d;
+ return 0;
+}
+
+void t3_l2t_proc_free(struct proc_dir_entry *dir)
+{
+ if (dir)
+ remove_proc_entry("l2t", dir);
+}
+#endif
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef _CHELSIO_L2T_H
#define _CHELSIO_L2T_H
+#ifndef AUTOCONF_INCLUDED
+#include <linux/autoconf.h>
+#endif
#include <linux/spinlock.h>
#include "t3cdev.h"
#include <asm/atomic.h>
enum {
- L2T_STATE_VALID, /* entry is up to date */
- L2T_STATE_STALE, /* entry may be used but needs revalidation */
- L2T_STATE_RESOLVING, /* entry needs address resolution */
- L2T_STATE_UNUSED /* entry not in use */
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_UNUSED /* entry not in use */
};
struct neighbour;
* first element in its chain through its first pointer.
*/
struct l2t_entry {
- u16 state; /* entry state */
- u16 idx; /* entry index */
- u32 addr; /* dest IP address */
- int ifindex; /* neighbor's net_device's ifindex */
- u16 smt_idx; /* SMT index */
- u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
- struct neighbour *neigh; /* associated neighbour */
- struct l2t_entry *first; /* start of hash chain */
- struct l2t_entry *next; /* next l2t_entry on chain */
- struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr; /* dest IP address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ u16 smt_idx; /* SMT index */
+ u16 orig_smt_idx; /* original SMT index in a bond */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
struct sk_buff *arpq_tail;
spinlock_t lock;
- atomic_t refcnt; /* entry reference count */
- u8 dmac[6]; /* neighbour's MAC address */
+ atomic_t refcnt; /* entry reference count */
+ u8 dmac[6]; /* neighbour's MAC address */
+ u8 chan_idx; /* channel index */
+#ifndef NETEVENT
+#ifdef OFLD_USE_KPROBES
+ struct timer_list update_timer;
+ struct t3cdev *tdev;
+#endif
+#endif
};
struct l2t_data {
- unsigned int nentries; /* number of entries */
- struct l2t_entry *rover; /* starting point for next allocation */
- atomic_t nfree; /* number of free entries */
+ unsigned int nentries; /* number of entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
};
-typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
- struct sk_buff * skb);
+typedef void (*arp_failure_handler_func)(struct t3cdev *dev,
+ struct sk_buff *skb);
/*
* Callback stored in an skb to handle address resolution failure.
*/
#define L2DATA(dev) ((dev)->l2opt)
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
void t3_free_l2t(struct l2t_data *d);
+int t3_l2t_update_l2e(struct t3cdev *dev, struct l2t_entry *e);
+
+#ifdef CONFIG_PROC_FS
+int t3_l2t_proc_setup(struct proc_dir_entry *dir, struct l2t_data *d);
+void t3_l2t_proc_free(struct proc_dir_entry *dir);
+#else
+#define l2t_proc_setup(dir, d) 0
+#define l2t_proc_free(dir)
+#endif
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
- if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include "common.h"
#include "regs.h"
* Issue a command to the TCAM and wait for its completion. The address and
* any data required by the command must have been setup by the caller.
*/
-static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
+static int mc5_cmd_write(adapter_t *adapter, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
-static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
- u32 v3)
+static inline void dbgi_wr_addr3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
}
-static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
- u32 v3)
+static inline void dbgi_wr_data3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
-static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
- u32 *v3)
+static inline void dbgi_rd_rsp3(adapter_t *adapter, u32 *v1, u32 *v2, u32 *v3)
{
*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
* command cmd. The data to be written must have been set up by the caller.
* Returns -1 on failure, 0 on success.
*/
-static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
+static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
if (mc5_cmd_write(adapter, cmd) == 0)
return 0;
- CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
- addr_lo);
+ CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", addr_lo);
return -1;
}
static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
u32 data_array_base, u32 write_cmd,
- int addr_shift)
+ int addr_shift)
{
unsigned int i;
- struct adapter *adap = mc5->adapter;
+ adapter_t *adap = mc5->adapter;
/*
* We need the size of the TCAM data and mask arrays in terms of
unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
if (mc5->mode == MC5_MODE_144_BIT) {
- size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
+ size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
server_base *= 2;
}
return -1;
/* Initialize the mask array. */
- dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
- for (i = 0; i < size72; i++) {
- if (i == server_base) /* entering server or routing region */
- t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
- mc5->mode == MC5_MODE_144_BIT ?
- 0xfffffff9 : 0xfffffffd);
+ for (i = 0; i < server_base; i++) {
+ dbgi_wr_data3(adap, 0x3fffffff, 0xfff80000, 0xff);
+ if (mc5_write(adap, mask_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+ i++;
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
}
+
+ dbgi_wr_data3(adap,
+ mc5->mode == MC5_MODE_144_BIT ? 0xfffffff9 : 0xfffffffd,
+ 0xffffffff, 0xff);
+ for (; i < size72; i++)
+ if (mc5_write(adap, mask_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+
return 0;
}
static int init_idt52100(struct mc5 *mc5)
{
int i;
- struct adapter *adap = mc5->adapter;
+ adapter_t *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
-err:
+ err:
return -EIO;
}
static int init_idt43102(struct mc5 *mc5)
{
int i;
- struct adapter *adap = mc5->adapter;
+ adapter_t *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
- V_RDLAT(0xd) | V_SRCHLAT(0x12));
+ V_RDLAT(0xd) | V_SRCHLAT(0x12));
/*
* Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
-err:
+ err:
return -EIO;
}
/* Put MC5 in DBGI mode. */
static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
{
- t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
- V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+ t3_set_reg_field(mc5->adapter, A_MC5_DB_CONFIG, F_PRTYEN | F_MBUSEN,
+ F_DBGIEN);
}
/* Put MC5 in M-Bus mode. */
static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
{
- t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
- V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
- V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
- V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+ t3_set_reg_field(mc5->adapter, A_MC5_DB_CONFIG, F_DBGIEN,
+ V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
}
-/*
- * Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+/**
+ * t3_mc5_init - initialize MC5 and the TCAM
+ * @mc5: the MC5 handle
+ * @nservers: desired number the TCP servers (listening ports)
+ * @nfilters: desired number of HW filters (classifiers)
+ * @nroutes: desired number of routes
+ *
+ * Initialize MC5 and the TCAM and partition the TCAM for the requested
+ * number of servers, filters, and routes. The number of routes is
+ * typically 0 except for specialized uses of the T3 adapters.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
{
- u32 cfg;
int err;
unsigned int tcam_size = mc5->tcam_size;
- struct adapter *adap = mc5->adapter;
+ unsigned int mode72 = mc5->mode == MC5_MODE_72_BIT;
+ adapter_t *adap = mc5->adapter;
if (!tcam_size)
return 0;
return -EINVAL;
/* Reset the TCAM */
- cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
- cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
- t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+ t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_TMMODE | F_COMPEN,
+ V_COMPEN(mode72) | V_TMMODE(mode72) | F_TMRST);
if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
CH_ERR(adap, "TCAM reset timed out\n");
return -1;
t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
tcam_size - nroutes - nfilters - nservers);
- mc5->parity_enabled = 1;
-
/* All the TCAM addresses we access have only the low 32 bits non 0 */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
return err;
}
-/*
+/**
* read_mc5_range - dump a part of the memory managed by MC5
* @mc5: the MC5 handle
* @start: the start address for the dump
{
u32 read_cmd;
int err = 0;
- struct adapter *adap = mc5->adapter;
+ adapter_t *adap = mc5->adapter;
if (mc5->part_type == IDT75P52100)
read_cmd = IDT_CMD_READ;
}
mc5_dbgi_mode_disable(mc5);
- return 0;
+ return err;
}
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
-/*
- * MC5 interrupt handler
+/**
+ * t3_mc5_intr_handler - MC5 interrupt handler
+ * @mc5: the MC5 handle
+ *
+ * The MC5 interrupt handler.
*/
void t3_mc5_intr_handler(struct mc5 *mc5)
{
- struct adapter *adap = mc5->adapter;
+ adapter_t *adap = mc5->adapter;
u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
if ((cause & F_PARITYERR) && mc5->parity_enabled) {
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
-void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+/**
+ * t3_mc5_prep - initialize the SW state for MC5
+ * @adapter: the adapter
+ * @mc5: the MC5 handle
+ * @mode: whether the TCAM will be in 72- or 144-bit mode
+ *
+ * Initialize the SW state associated with MC5. Among other things
+ * this determines the size of the attached TCAM.
+ */
+void __devinit t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
- static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
+ static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
64 K, 128 K, 256 K, 32 K
};
u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
mc5->adapter = adapter;
- mc5->mode = (unsigned char)mode;
- mc5->part_type = (unsigned char)G_TMTYPE(cfg);
+ mc5->parity_enabled = 1;
+ mc5->mode = (unsigned char) mode;
+ mc5->part_type = (unsigned char) G_TMTYPE(cfg);
if (cfg & F_TMTYPEHI)
mc5->part_type |= 4;
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include "common.h"
+
+/* Marvell PHY interrupt status bits. */
+#define MV_INTR_JABBER 0x0001
+#define MV_INTR_POLARITY_CHNG 0x0002
+#define MV_INTR_ENG_DETECT_CHNG 0x0010
+#define MV_INTR_DOWNSHIFT 0x0020
+#define MV_INTR_MDI_XOVER_CHNG 0x0040
+#define MV_INTR_FIFO_OVER_UNDER 0x0080
+#define MV_INTR_FALSE_CARRIER 0x0100
+#define MV_INTR_SYMBOL_ERROR 0x0200
+#define MV_INTR_LINK_CHNG 0x0400
+#define MV_INTR_AUTONEG_DONE 0x0800
+#define MV_INTR_PAGE_RECV 0x1000
+#define MV_INTR_DUPLEX_CHNG 0x2000
+#define MV_INTR_SPEED_CHNG 0x4000
+#define MV_INTR_AUTONEG_ERR 0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL 16
+#define MV88E1XXX_SPECIFIC_STATUS 17
+#define MV88E1XXX_INTR_ENABLE 18
+#define MV88E1XXX_INTR_STATUS 19
+#define MV88E1XXX_EXT_SPECIFIC_CNTRL 20
+#define MV88E1XXX_RECV_ERR 21
+#define MV88E1XXX_EXT_ADDR 22
+#define MV88E1XXX_GLOBAL_STATUS 23
+#define MV88E1XXX_LED_CNTRL 24
+#define MV88E1XXX_LED_OVERRIDE 25
+#define MV88E1XXX_EXT_SPECIFIC_CNTRL2 26
+#define MV88E1XXX_EXT_SPECIFIC_STATUS 27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER 28
+#define MV88E1XXX_EXTENDED_ADDR 29
+#define MV88E1XXX_EXTENDED_DATA 30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE 5
+#define M_PSCR_MDI_XOVER_MODE 0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT 9
+#define M_DOWNSHIFT_CNT 0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN 7
+#define M_PSSR_CABLE_LEN 0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED 14
+#define M_PSSR_SPEED 0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI 0
+#define CROSSOVER_MDIX 1
+#define CROSSOVER_AUTO 3
+
+#define INTR_ENABLE_MASK (MV_INTR_SPEED_CHNG | MV_INTR_DUPLEX_CHNG | \
+ MV_INTR_AUTONEG_DONE | MV_INTR_LINK_CHNG | MV_INTR_FIFO_OVER_UNDER | \
+ MV_INTR_ENG_DETECT_CHNG)
+
+/*
+ * Reset the PHY. If 'wait' is set wait until the reset completes.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+ return t3_phy_reset(cphy, 0, wait);
+}
+
+static int mv88e1xxx_intr_enable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, INTR_ENABLE_MASK);
+}
+
+static int mv88e1xxx_intr_disable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, 0);
+}
+
+static int mv88e1xxx_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+
+ /* Clear PHY interrupts by reading the register. */
+ return mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &val);
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+ return t3_mdio_change_bits(cphy, 0, MV88E1XXX_SPECIFIC_CNTRL,
+ V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE),
+ V_PSCR_MDI_XOVER_MODE(crossover));
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+ mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+ /* restart autoneg for change to take effect */
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANRESTART);
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int mmd, int dir, int on)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_LOOPBACK,
+ on ? BMCR_LOOPBACK : 0);
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ u32 status;
+ int sp = -1, dplx = -1, pause = 0;
+
+ mdio_read(cphy, 0, MV88E1XXX_SPECIFIC_STATUS, &status);
+ if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+ if (status & V_PSSR_RX_PAUSE)
+ pause |= PAUSE_RX;
+ if (status & V_PSSR_TX_PAUSE)
+ pause |= PAUSE_TX;
+ dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_PSSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+ }
+ if (link_ok)
+ *link_ok = (status & V_PSSR_LINK) != 0;
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err = t3_set_phy_speed_duplex(phy, speed, duplex);
+
+ /* PHY needs reset for new settings to take effect */
+ if (!err)
+ err = mv88e1xxx_reset(phy, 0);
+ return err;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+ /*
+ * Set the downshift counter to 2 so we try to establish Gb link
+ * twice before downshifting.
+ */
+ return t3_mdio_change_bits(cphy, 0, MV88E1XXX_EXT_SPECIFIC_CNTRL,
+ V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT),
+ downshift_enable ? V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2) : 0);
+}
+
+static int mv88e1xxx_power_down(struct cphy *cphy, int enable)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+ enable ? BMCR_PDOWN : 0);
+}
+
+static int mv88e1xxx_intr_handler(struct cphy *cphy)
+{
+ const u32 link_change_intrs = MV_INTR_LINK_CHNG |
+ MV_INTR_AUTONEG_DONE | MV_INTR_DUPLEX_CHNG |
+ MV_INTR_SPEED_CHNG | MV_INTR_DOWNSHIFT;
+
+ u32 cause;
+ int cphy_cause = 0;
+
+ mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &cause);
+ cause &= INTR_ENABLE_MASK;
+ if (cause & link_change_intrs)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & MV_INTR_FIFO_OVER_UNDER)
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops mv88e1xxx_ops = {
+ mv88e1xxx_reset,
+ mv88e1xxx_intr_enable,
+ mv88e1xxx_intr_disable,
+ mv88e1xxx_intr_clear,
+ mv88e1xxx_intr_handler,
+ mv88e1xxx_autoneg_enable,
+ mv88e1xxx_autoneg_restart,
+ t3_phy_advertise,
+ mv88e1xxx_set_loopback,
+ mv88e1xxx_set_speed_duplex,
+ mv88e1xxx_get_link_status,
+ mv88e1xxx_power_down,
+};
+#else
+static struct cphy_ops mv88e1xxx_ops = {
+ .reset = mv88e1xxx_reset,
+ .intr_enable = mv88e1xxx_intr_enable,
+ .intr_disable = mv88e1xxx_intr_disable,
+ .intr_clear = mv88e1xxx_intr_clear,
+ .intr_handler = mv88e1xxx_intr_handler,
+ .autoneg_enable = mv88e1xxx_autoneg_enable,
+ .autoneg_restart = mv88e1xxx_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_loopback = mv88e1xxx_set_loopback,
+ .set_speed_duplex = mv88e1xxx_set_speed_duplex,
+ .get_link_status = mv88e1xxx_get_link_status,
+ .power_down = mv88e1xxx_power_down,
+};
+#endif
+
+int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ int err;
+
+ cphy_init(phy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops,
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
+ SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
+
+ /* Configure copper PHY transmitter as class A to reduce EMI. */
+ err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb);
+ if (!err)
+ err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004);
+
+ if (!err)
+ err = mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */
+ return err;
+}
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2003-2007 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#ifndef __CHELSIO_OSDEP_H
+#define __CHELSIO_OSDEP_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include "version.h"
+
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) \
+ dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+ if ((adapter)->msg_enable & NETIF_MSG_##category) \
+ dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
+ ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+ CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(adapter, category, fmt, ...)
+#endif
+
+/* Additional NETIF_MSG_* categories */
+#define NETIF_MSG_OFLD 0x4000000
+#define NETIF_MSG_MMIO 0x8000000
+
+#define IFF_FILTER_ETH_P_SLOW 0x4
+
+typedef struct adapter adapter_t;
+
+/**
+ * struct t3_rx_mode - encapsulates the Rx mode for a port
+ * @dev: the net_device associated with the port
+ * @mclist: the multicast address list for the port
+ * @idx: current position within the multicast list
+ *
+ * This structure is passed to the MAC routines that configure the Rx mode
+ * of a port. The structure is opaque to the common code. It invokes a few
+ * functions on this structure including promisc_rx_mode()
+ * that returns whether the port should be in promiscuous mode,
+ * allmulti_rx_mode() to check if the port should be in ALLMULTI mode,
+ * and t3_get_next_mcaddr() that returns the multicast addresses for the
+ * port one at a time.
+ */
+struct t3_rx_mode {
+ struct net_device *dev;
+ struct dev_mc_list *mclist;
+ unsigned int idx;
+};
+
+static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
+ struct dev_mc_list *mclist)
+{
+ p->dev = dev;
+ p->mclist = mclist;
+ p->idx = 0;
+}
+
+#define promisc_rx_mode(rm) ((rm)->dev->flags & IFF_PROMISC)
+#define allmulti_rx_mode(rm) ((rm)->dev->flags & IFF_ALLMULTI)
+
+/**
+ * t3_get_next_mcaddr - return the next L2 multicast address for a port
+ * @rm: the Rx mode info
+ *
+ * Returns the next Ethernet multicast address for a port or %NULL if there are
+ * no more.
+ */
+static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
+{
+ u8 *addr = NULL;
+
+ if (rm->mclist && rm->idx < rm->dev->mc_count) {
+ addr = rm->mclist->dmi_addr;
+ rm->mclist = rm->mclist->next;
+ rm->idx++;
+ }
+ return addr;
+}
+
+enum {
+ TP_TMR_RES = 200, /* TP timer resolution in usec */
+ MAX_NPORTS = 4, /* max # of ports */
+ TP_SRAM_OFFSET = 4096, /* TP SRAM content offset in eeprom */
+ TP_SRAM_LEN = 2112, /* TP SRAM content offset in eeprom */
+};
+
+/* compatibility stuff for older kernels */
+#ifndef PCI_EXP_LNKSTA
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#endif
+
+#ifndef PCI_EXP_LNKCTL
+#define PCI_EXP_LNKCTL 16 /* Link Control */
+#endif
+
+#ifndef PCI_EXP_LNKCAP
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#endif
+
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8 /* Device Control */
+#endif
+
+#ifndef PCI_EXP_DEVCTL_PAYLOAD
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#endif
+
+#ifndef BMCR_SPEED1000
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#endif
+
+#ifndef MII_CTRL1000
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+#endif
+
+#ifndef ADVERTISE_PAUSE_CAP
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
+#endif
+
+#ifndef ADVERTISED_Pause
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0 /* driver took care of packet */
+#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
+#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
+#endif
+
+/* Note: cxgb3_compat.h assumes that struct adapter is already defined.
+ * delayed_work is used in struct adapter definition, hence backporting
+ * its definition here.
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+#define delayed_work work_struct
+#endif
+
+#ifdef LINUX_2_4
+#include "linux_2_4_compat.h"
+#include "linux_2_4_compat_workqueue.h"
+#endif
+
+#ifdef CONFIG_XEN
+#define CHELSIO_FREE_TXBUF_ASAP 1 /* VMs need TX bufs freed ASAP */
+#endif
+
+#endif /* !__CHELSIO_OSDEP_H */
+/* This file is automatically generated --- do not edit */
+
+/* registers for module SGE3 */
+#define SGE3_BASE_ADDR 0x0
+
#define A_SG_CONTROL 0x0
#define S_CONGMODE 29
#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
#define F_FATLPERREN V_FATLPERREN(1U)
+#define S_URGTNL 26
+#define V_URGTNL(x) ((x) << S_URGTNL)
+#define F_URGTNL V_URGTNL(1U)
+
+#define S_NEWNOTIFY 25
+#define V_NEWNOTIFY(x) ((x) << S_NEWNOTIFY)
+#define F_NEWNOTIFY V_NEWNOTIFY(1U)
+
+#define S_AVOIDCQOVFL 24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ 23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL 22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
+
+#define S_EGRENUPBP 21
+#define V_EGRENUPBP(x) ((x) << S_EGRENUPBP)
+#define F_EGRENUPBP V_EGRENUPBP(1U)
+
#define S_DROPPKT 20
#define V_DROPPKT(x) ((x) << S_DROPPKT)
#define F_DROPPKT V_DROPPKT(1U)
#define S_USERSPACESIZE 14
#define M_USERSPACESIZE 0x1f
#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+#define G_USERSPACESIZE(x) (((x) >> S_USERSPACESIZE) & M_USERSPACESIZE)
#define S_HOSTPAGESIZE 11
#define M_HOSTPAGESIZE 0x7
#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+#define G_HOSTPAGESIZE(x) (((x) >> S_HOSTPAGESIZE) & M_HOSTPAGESIZE)
+
+#define S_PCIRELAX 10
+#define V_PCIRELAX(x) ((x) << S_PCIRELAX)
+#define F_PCIRELAX V_PCIRELAX(1U)
#define S_FLMODE 9
#define V_FLMODE(x) ((x) << S_FLMODE)
#define S_PKTSHIFT 6
#define M_PKTSHIFT 0x7
#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
#define S_ONEINTMULTQ 5
#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
+#define S_FLPICKAVAIL 4
+#define V_FLPICKAVAIL(x) ((x) << S_FLPICKAVAIL)
+#define F_FLPICKAVAIL V_FLPICKAVAIL(1U)
+
+#define S_BIGENDIANEGRESS 3
+#define V_BIGENDIANEGRESS(x) ((x) << S_BIGENDIANEGRESS)
+#define F_BIGENDIANEGRESS V_BIGENDIANEGRESS(1U)
+
#define S_BIGENDIANINGRESS 2
#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
-#define S_AVOIDCQOVFL 24
-#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
-#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
-
-#define S_OPTONEINTMULTQ 23
-#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
-#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
-
-#define S_CQCRDTCTRL 22
-#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
-#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
-
#define A_SG_KDOORBELL 0x4
#define S_SELEGRCNTX 31
#define S_EGRCNTX 0
#define M_EGRCNTX 0xffff
#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+#define G_EGRCNTX(x) (((x) >> S_EGRCNTX) & M_EGRCNTX)
#define A_SG_GTS 0x8
#define S_NEWTIMER 16
#define M_NEWTIMER 0x1fff
#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+#define G_NEWTIMER(x) (((x) >> S_NEWTIMER) & M_NEWTIMER)
#define S_NEWINDEX 0
#define M_NEWINDEX 0xffff
#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+#define G_NEWINDEX(x) (((x) >> S_NEWINDEX) & M_NEWINDEX)
#define A_SG_CONTEXT_CMD 0xc
#define S_CONTEXT_CMD_OPCODE 28
#define M_CONTEXT_CMD_OPCODE 0xf
#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+#define G_CONTEXT_CMD_OPCODE(x) (((x) >> S_CONTEXT_CMD_OPCODE) & M_CONTEXT_CMD_OPCODE)
#define S_CONTEXT_CMD_BUSY 27
#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
#define S_CQ_CREDIT 20
-
#define M_CQ_CREDIT 0x7f
-
#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
-
#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
#define S_CQ 19
-
#define V_CQ(x) ((x) << S_CQ)
#define F_CQ V_CQ(1U)
#define S_CONTEXT 0
#define M_CONTEXT 0xffff
#define V_CONTEXT(x) ((x) << S_CONTEXT)
-
#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
#define A_SG_CONTEXT_DATA0 0x10
-
#define A_SG_CONTEXT_DATA1 0x14
-
#define A_SG_CONTEXT_DATA2 0x18
-
#define A_SG_CONTEXT_DATA3 0x1c
-
#define A_SG_CONTEXT_MASK0 0x20
-
#define A_SG_CONTEXT_MASK1 0x24
-
#define A_SG_CONTEXT_MASK2 0x28
-
#define A_SG_CONTEXT_MASK3 0x2c
-
#define A_SG_RSPQ_CREDIT_RETURN 0x30
#define S_CREDITS 0
#define M_CREDITS 0xffff
#define V_CREDITS(x) ((x) << S_CREDITS)
+#define G_CREDITS(x) (((x) >> S_CREDITS) & M_CREDITS)
#define A_SG_DATA_INTR 0x34
#define V_ERRINTR(x) ((x) << S_ERRINTR)
#define F_ERRINTR V_ERRINTR(1U)
+#define S_DATAINTR 0
+#define M_DATAINTR 0xff
+#define V_DATAINTR(x) ((x) << S_DATAINTR)
+#define G_DATAINTR(x) (((x) >> S_DATAINTR) & M_DATAINTR)
+
#define A_SG_HI_DRB_HI_THRSH 0x38
+#define S_HIDRBHITHRSH 0
+#define M_HIDRBHITHRSH 0x3ff
+#define V_HIDRBHITHRSH(x) ((x) << S_HIDRBHITHRSH)
+#define G_HIDRBHITHRSH(x) (((x) >> S_HIDRBHITHRSH) & M_HIDRBHITHRSH)
+
#define A_SG_HI_DRB_LO_THRSH 0x3c
+#define S_HIDRBLOTHRSH 0
+#define M_HIDRBLOTHRSH 0x3ff
+#define V_HIDRBLOTHRSH(x) ((x) << S_HIDRBLOTHRSH)
+#define G_HIDRBLOTHRSH(x) (((x) >> S_HIDRBLOTHRSH) & M_HIDRBLOTHRSH)
+
#define A_SG_LO_DRB_HI_THRSH 0x40
+#define S_LODRBHITHRSH 0
+#define M_LODRBHITHRSH 0x3ff
+#define V_LODRBHITHRSH(x) ((x) << S_LODRBHITHRSH)
+#define G_LODRBHITHRSH(x) (((x) >> S_LODRBHITHRSH) & M_LODRBHITHRSH)
+
#define A_SG_LO_DRB_LO_THRSH 0x44
+#define S_LODRBLOTHRSH 0
+#define M_LODRBLOTHRSH 0x3ff
+#define V_LODRBLOTHRSH(x) ((x) << S_LODRBLOTHRSH)
+#define G_LODRBLOTHRSH(x) (((x) >> S_LODRBLOTHRSH) & M_LODRBLOTHRSH)
+
+#define A_SG_ONE_INT_MULT_Q_COALESCING_TIMER 0x48
#define A_SG_RSPQ_FL_STATUS 0x4c
+#define S_RSPQ0STARVED 0
+#define V_RSPQ0STARVED(x) ((x) << S_RSPQ0STARVED)
+#define F_RSPQ0STARVED V_RSPQ0STARVED(1U)
+
+#define S_RSPQ1STARVED 1
+#define V_RSPQ1STARVED(x) ((x) << S_RSPQ1STARVED)
+#define F_RSPQ1STARVED V_RSPQ1STARVED(1U)
+
+#define S_RSPQ2STARVED 2
+#define V_RSPQ2STARVED(x) ((x) << S_RSPQ2STARVED)
+#define F_RSPQ2STARVED V_RSPQ2STARVED(1U)
+
+#define S_RSPQ3STARVED 3
+#define V_RSPQ3STARVED(x) ((x) << S_RSPQ3STARVED)
+#define F_RSPQ3STARVED V_RSPQ3STARVED(1U)
+
+#define S_RSPQ4STARVED 4
+#define V_RSPQ4STARVED(x) ((x) << S_RSPQ4STARVED)
+#define F_RSPQ4STARVED V_RSPQ4STARVED(1U)
+
+#define S_RSPQ5STARVED 5
+#define V_RSPQ5STARVED(x) ((x) << S_RSPQ5STARVED)
+#define F_RSPQ5STARVED V_RSPQ5STARVED(1U)
+
+#define S_RSPQ6STARVED 6
+#define V_RSPQ6STARVED(x) ((x) << S_RSPQ6STARVED)
+#define F_RSPQ6STARVED V_RSPQ6STARVED(1U)
+
+#define S_RSPQ7STARVED 7
+#define V_RSPQ7STARVED(x) ((x) << S_RSPQ7STARVED)
+#define F_RSPQ7STARVED V_RSPQ7STARVED(1U)
+
#define S_RSPQ0DISABLED 8
+#define V_RSPQ0DISABLED(x) ((x) << S_RSPQ0DISABLED)
+#define F_RSPQ0DISABLED V_RSPQ0DISABLED(1U)
+
+#define S_RSPQ1DISABLED 9
+#define V_RSPQ1DISABLED(x) ((x) << S_RSPQ1DISABLED)
+#define F_RSPQ1DISABLED V_RSPQ1DISABLED(1U)
+
+#define S_RSPQ2DISABLED 10
+#define V_RSPQ2DISABLED(x) ((x) << S_RSPQ2DISABLED)
+#define F_RSPQ2DISABLED V_RSPQ2DISABLED(1U)
+
+#define S_RSPQ3DISABLED 11
+#define V_RSPQ3DISABLED(x) ((x) << S_RSPQ3DISABLED)
+#define F_RSPQ3DISABLED V_RSPQ3DISABLED(1U)
+
+#define S_RSPQ4DISABLED 12
+#define V_RSPQ4DISABLED(x) ((x) << S_RSPQ4DISABLED)
+#define F_RSPQ4DISABLED V_RSPQ4DISABLED(1U)
+
+#define S_RSPQ5DISABLED 13
+#define V_RSPQ5DISABLED(x) ((x) << S_RSPQ5DISABLED)
+#define F_RSPQ5DISABLED V_RSPQ5DISABLED(1U)
+
+#define S_RSPQ6DISABLED 14
+#define V_RSPQ6DISABLED(x) ((x) << S_RSPQ6DISABLED)
+#define F_RSPQ6DISABLED V_RSPQ6DISABLED(1U)
+
+#define S_RSPQ7DISABLED 15
+#define V_RSPQ7DISABLED(x) ((x) << S_RSPQ7DISABLED)
+#define F_RSPQ7DISABLED V_RSPQ7DISABLED(1U)
+
+#define S_FL0EMPTY 16
+#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
+#define F_FL0EMPTY V_FL0EMPTY(1U)
+
+#define S_FL1EMPTY 17
+#define V_FL1EMPTY(x) ((x) << S_FL1EMPTY)
+#define F_FL1EMPTY V_FL1EMPTY(1U)
+
+#define S_FL2EMPTY 18
+#define V_FL2EMPTY(x) ((x) << S_FL2EMPTY)
+#define F_FL2EMPTY V_FL2EMPTY(1U)
+
+#define S_FL3EMPTY 19
+#define V_FL3EMPTY(x) ((x) << S_FL3EMPTY)
+#define F_FL3EMPTY V_FL3EMPTY(1U)
+
+#define S_FL4EMPTY 20
+#define V_FL4EMPTY(x) ((x) << S_FL4EMPTY)
+#define F_FL4EMPTY V_FL4EMPTY(1U)
+
+#define S_FL5EMPTY 21
+#define V_FL5EMPTY(x) ((x) << S_FL5EMPTY)
+#define F_FL5EMPTY V_FL5EMPTY(1U)
+
+#define S_FL6EMPTY 22
+#define V_FL6EMPTY(x) ((x) << S_FL6EMPTY)
+#define F_FL6EMPTY V_FL6EMPTY(1U)
+
+#define S_FL7EMPTY 23
+#define V_FL7EMPTY(x) ((x) << S_FL7EMPTY)
+#define F_FL7EMPTY V_FL7EMPTY(1U)
+
+#define S_FL8EMPTY 24
+#define V_FL8EMPTY(x) ((x) << S_FL8EMPTY)
+#define F_FL8EMPTY V_FL8EMPTY(1U)
+
+#define S_FL9EMPTY 25
+#define V_FL9EMPTY(x) ((x) << S_FL9EMPTY)
+#define F_FL9EMPTY V_FL9EMPTY(1U)
+
+#define S_FL10EMPTY 26
+#define V_FL10EMPTY(x) ((x) << S_FL10EMPTY)
+#define F_FL10EMPTY V_FL10EMPTY(1U)
+
+#define S_FL11EMPTY 27
+#define V_FL11EMPTY(x) ((x) << S_FL11EMPTY)
+#define F_FL11EMPTY V_FL11EMPTY(1U)
+
+#define S_FL12EMPTY 28
+#define V_FL12EMPTY(x) ((x) << S_FL12EMPTY)
+#define F_FL12EMPTY V_FL12EMPTY(1U)
+
+#define S_FL13EMPTY 29
+#define V_FL13EMPTY(x) ((x) << S_FL13EMPTY)
+#define F_FL13EMPTY V_FL13EMPTY(1U)
+
+#define S_FL14EMPTY 30
+#define V_FL14EMPTY(x) ((x) << S_FL14EMPTY)
+#define F_FL14EMPTY V_FL14EMPTY(1U)
+
+#define S_FL15EMPTY 31
+#define V_FL15EMPTY(x) ((x) << S_FL15EMPTY)
+#define F_FL15EMPTY V_FL15EMPTY(1U)
+
+#define A_SG_EGR_PRI_CNT 0x50
+
+#define S_EGRERROPCODE 24
+#define M_EGRERROPCODE 0xff
+#define V_EGRERROPCODE(x) ((x) << S_EGRERROPCODE)
+#define G_EGRERROPCODE(x) (((x) >> S_EGRERROPCODE) & M_EGRERROPCODE)
+
+#define S_EGRHIOPCODE 16
+#define M_EGRHIOPCODE 0xff
+#define V_EGRHIOPCODE(x) ((x) << S_EGRHIOPCODE)
+#define G_EGRHIOPCODE(x) (((x) >> S_EGRHIOPCODE) & M_EGRHIOPCODE)
+
+#define S_EGRLOOPCODE 8
+#define M_EGRLOOPCODE 0xff
+#define V_EGRLOOPCODE(x) ((x) << S_EGRLOOPCODE)
+#define G_EGRLOOPCODE(x) (((x) >> S_EGRLOOPCODE) & M_EGRLOOPCODE)
+
+#define S_EGRPRICNT 0
+#define M_EGRPRICNT 0x1f
+#define V_EGRPRICNT(x) ((x) << S_EGRPRICNT)
+#define G_EGRPRICNT(x) (((x) >> S_EGRPRICNT) & M_EGRPRICNT)
#define A_SG_EGR_RCQ_DRB_THRSH 0x54
#define S_HIRCQDRBTHRSH 16
#define M_HIRCQDRBTHRSH 0x7ff
#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+#define G_HIRCQDRBTHRSH(x) (((x) >> S_HIRCQDRBTHRSH) & M_HIRCQDRBTHRSH)
#define S_LORCQDRBTHRSH 0
#define M_LORCQDRBTHRSH 0x7ff
#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+#define G_LORCQDRBTHRSH(x) (((x) >> S_LORCQDRBTHRSH) & M_LORCQDRBTHRSH)
#define A_SG_EGR_CNTX_BADDR 0x58
+#define S_EGRCNTXBADDR 5
+#define M_EGRCNTXBADDR 0x7ffffff
+#define V_EGRCNTXBADDR(x) ((x) << S_EGRCNTXBADDR)
+#define G_EGRCNTXBADDR(x) (((x) >> S_EGRCNTXBADDR) & M_EGRCNTXBADDR)
+
#define A_SG_INT_CAUSE 0x5c
#define S_HIRCQPARITYERROR 31
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+#define S_HICRDTUNDFLOWERR 9
+#define V_HICRDTUNDFLOWERR(x) ((x) << S_HICRDTUNDFLOWERR)
+#define F_HICRDTUNDFLOWERR V_HICRDTUNDFLOWERR(1U)
+
+#define S_LOCRDTUNDFLOWERR 8
+#define V_LOCRDTUNDFLOWERR(x) ((x) << S_LOCRDTUNDFLOWERR)
+#define F_LOCRDTUNDFLOWERR V_LOCRDTUNDFLOWERR(1U)
+
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
-#define A_SG_INT_ENABLE 0x60
+#define S_FLEMPTY 1
+#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
+#define F_FLEMPTY V_FLEMPTY(1U)
+#define S_RSPQSTARVE 0
+#define V_RSPQSTARVE(x) ((x) << S_RSPQSTARVE)
+#define F_RSPQSTARVE V_RSPQSTARVE(1U)
+
+#define A_SG_INT_ENABLE 0x60
#define A_SG_CMDQ_CREDIT_TH 0x64
#define S_TIMEOUT 8
#define M_TIMEOUT 0xffffff
#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+#define G_TIMEOUT(x) (((x) >> S_TIMEOUT) & M_TIMEOUT)
#define S_THRESHOLD 0
#define M_THRESHOLD 0xff
#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
#define A_SG_TIMER_TICK 0x68
-
#define A_SG_CQ_CONTEXT_BADDR 0x6c
+#define S_BASEADDR 5
+#define M_BASEADDR 0x7ffffff
+#define V_BASEADDR(x) ((x) << S_BASEADDR)
+#define G_BASEADDR(x) (((x) >> S_BASEADDR) & M_BASEADDR)
+
#define A_SG_OCO_BASE 0x70
#define S_BASE1 16
#define M_BASE1 0xffff
#define V_BASE1(x) ((x) << S_BASE1)
+#define G_BASE1(x) (((x) >> S_BASE1) & M_BASE1)
+
+#define S_BASE0 0
+#define M_BASE0 0xffff
+#define V_BASE0(x) ((x) << S_BASE0)
+#define G_BASE0(x) (((x) >> S_BASE0) & M_BASE0)
#define A_SG_DRB_PRI_THRESH 0x74
+#define S_DRBPRITHRSH 0
+#define M_DRBPRITHRSH 0xffff
+#define V_DRBPRITHRSH(x) ((x) << S_DRBPRITHRSH)
+#define G_DRBPRITHRSH(x) (((x) >> S_DRBPRITHRSH) & M_DRBPRITHRSH)
+
+#define A_SG_DEBUG_INDEX 0x78
+#define A_SG_DEBUG_DATA 0x7c
+
+/* registers for module PCIX1 */
+#define PCIX1_BASE_ADDR 0x80
+
#define A_PCIX_INT_ENABLE 0x80
#define S_MSIXPARERR 22
#define M_MSIXPARERR 0x7
-
#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+#define G_MSIXPARERR(x) (((x) >> S_MSIXPARERR) & M_MSIXPARERR)
#define S_CFPARERR 18
#define M_CFPARERR 0xf
-
#define V_CFPARERR(x) ((x) << S_CFPARERR)
+#define G_CFPARERR(x) (((x) >> S_CFPARERR) & M_CFPARERR)
#define S_RFPARERR 14
#define M_RFPARERR 0xf
-
#define V_RFPARERR(x) ((x) << S_RFPARERR)
+#define G_RFPARERR(x) (((x) >> S_RFPARERR) & M_RFPARERR)
#define S_WFPARERR 12
#define M_WFPARERR 0x3
-
#define V_WFPARERR(x) ((x) << S_WFPARERR)
+#define G_WFPARERR(x) (((x) >> S_WFPARERR) & M_WFPARERR)
#define S_PIOPARERR 11
#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
#define F_MSTDETPARERR V_MSTDETPARERR(1U)
#define A_PCIX_INT_CAUSE 0x84
-
#define A_PCIX_CFG 0x88
#define S_DMASTOPEN 19
#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
#define F_CLIDECEN V_CLIDECEN(1U)
+#define S_LATTMRDIS 17
+#define V_LATTMRDIS(x) ((x) << S_LATTMRDIS)
+#define F_LATTMRDIS V_LATTMRDIS(1U)
+
+#define S_LOWPWREN 16
+#define V_LOWPWREN(x) ((x) << S_LOWPWREN)
+#define F_LOWPWREN V_LOWPWREN(1U)
+
+#define S_ASYNCINTVEC 11
+#define M_ASYNCINTVEC 0x1f
+#define V_ASYNCINTVEC(x) ((x) << S_ASYNCINTVEC)
+#define G_ASYNCINTVEC(x) (((x) >> S_ASYNCINTVEC) & M_ASYNCINTVEC)
+
+#define S_MAXSPLTRNC 8
+#define M_MAXSPLTRNC 0x7
+#define V_MAXSPLTRNC(x) ((x) << S_MAXSPLTRNC)
+#define G_MAXSPLTRNC(x) (((x) >> S_MAXSPLTRNC) & M_MAXSPLTRNC)
+
+#define S_MAXSPLTRNR 5
+#define M_MAXSPLTRNR 0x7
+#define V_MAXSPLTRNR(x) ((x) << S_MAXSPLTRNR)
+#define G_MAXSPLTRNR(x) (((x) >> S_MAXSPLTRNR) & M_MAXSPLTRNR)
+
+#define S_MAXWRBYTECNT 3
+#define M_MAXWRBYTECNT 0x3
+#define V_MAXWRBYTECNT(x) ((x) << S_MAXWRBYTECNT)
+#define G_MAXWRBYTECNT(x) (((x) >> S_MAXWRBYTECNT) & M_MAXWRBYTECNT)
+
+#define S_WRREQATOMICEN 2
+#define V_WRREQATOMICEN(x) ((x) << S_WRREQATOMICEN)
+#define F_WRREQATOMICEN V_WRREQATOMICEN(1U)
+
+#define S_RSTWRMMODE 1
+#define V_RSTWRMMODE(x) ((x) << S_RSTWRMMODE)
+#define F_RSTWRMMODE V_RSTWRMMODE(1U)
+
+#define S_PIOACK64EN 0
+#define V_PIOACK64EN(x) ((x) << S_PIOACK64EN)
+#define F_PIOACK64EN V_PIOACK64EN(1U)
+
#define A_PCIX_MODE 0x8c
#define S_PCLKRANGE 6
#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+#define S_66MHZ 1
+#define V_66MHZ(x) ((x) << S_66MHZ)
+#define F_66MHZ V_66MHZ(1U)
+
#define S_64BIT 0
#define V_64BIT(x) ((x) << S_64BIT)
#define F_64BIT V_64BIT(1U)
+#define A_PCIX_CAL 0x90
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_PERCALDIV 22
+#define M_PERCALDIV 0xff
+#define V_PERCALDIV(x) ((x) << S_PERCALDIV)
+#define G_PERCALDIV(x) (((x) >> S_PERCALDIV) & M_PERCALDIV)
+
+#define S_PERCALEN 21
+#define V_PERCALEN(x) ((x) << S_PERCALEN)
+#define F_PERCALEN V_PERCALEN(1U)
+
+#define S_SGLCALEN 20
+#define V_SGLCALEN(x) ((x) << S_SGLCALEN)
+#define F_SGLCALEN V_SGLCALEN(1U)
+
+#define S_ZINUPDMODE 19
+#define V_ZINUPDMODE(x) ((x) << S_ZINUPDMODE)
+#define F_ZINUPDMODE V_ZINUPDMODE(1U)
+
+#define S_ZINSEL 18
+#define V_ZINSEL(x) ((x) << S_ZINSEL)
+#define F_ZINSEL V_ZINSEL(1U)
+
+#define S_ZPDMAN 15
+#define M_ZPDMAN 0x7
+#define V_ZPDMAN(x) ((x) << S_ZPDMAN)
+#define G_ZPDMAN(x) (((x) >> S_ZPDMAN) & M_ZPDMAN)
+
+#define S_ZPUMAN 12
+#define M_ZPUMAN 0x7
+#define V_ZPUMAN(x) ((x) << S_ZPUMAN)
+#define G_ZPUMAN(x) (((x) >> S_ZPUMAN) & M_ZPUMAN)
+
+#define S_ZPDOUT 9
+#define M_ZPDOUT 0x7
+#define V_ZPDOUT(x) ((x) << S_ZPDOUT)
+#define G_ZPDOUT(x) (((x) >> S_ZPDOUT) & M_ZPDOUT)
+
+#define S_ZPUOUT 6
+#define M_ZPUOUT 0x7
+#define V_ZPUOUT(x) ((x) << S_ZPUOUT)
+#define G_ZPUOUT(x) (((x) >> S_ZPUOUT) & M_ZPUOUT)
+
+#define S_ZPDIN 3
+#define M_ZPDIN 0x7
+#define V_ZPDIN(x) ((x) << S_ZPDIN)
+#define G_ZPDIN(x) (((x) >> S_ZPDIN) & M_ZPDIN)
+
+#define S_ZPUIN 0
+#define M_ZPUIN 0x7
+#define V_ZPUIN(x) ((x) << S_ZPUIN)
+#define G_ZPUIN(x) (((x) >> S_ZPUIN) & M_ZPUIN)
+
+#define A_PCIX_WOL 0x94
+
+#define S_WAKEUP1 3
+#define V_WAKEUP1(x) ((x) << S_WAKEUP1)
+#define F_WAKEUP1 V_WAKEUP1(1U)
+
+#define S_WAKEUP0 2
+#define V_WAKEUP0(x) ((x) << S_WAKEUP0)
+#define F_WAKEUP0 V_WAKEUP0(1U)
+
+#define S_SLEEPMODE1 1
+#define V_SLEEPMODE1(x) ((x) << S_SLEEPMODE1)
+#define F_SLEEPMODE1 V_SLEEPMODE1(1U)
+
+#define S_SLEEPMODE0 0
+#define V_SLEEPMODE0(x) ((x) << S_SLEEPMODE0)
+#define F_SLEEPMODE0 V_SLEEPMODE0(1U)
+
+#define A_PCIX_STAT0 0x98
+
+#define S_PIOREQFIFOLEVEL 26
+#define M_PIOREQFIFOLEVEL 0x3f
+#define V_PIOREQFIFOLEVEL(x) ((x) << S_PIOREQFIFOLEVEL)
+#define G_PIOREQFIFOLEVEL(x) (((x) >> S_PIOREQFIFOLEVEL) & M_PIOREQFIFOLEVEL)
+
+#define S_RFINIST 24
+#define M_RFINIST 0x3
+#define V_RFINIST(x) ((x) << S_RFINIST)
+#define G_RFINIST(x) (((x) >> S_RFINIST) & M_RFINIST)
+
+#define S_RFRESPRDST 22
+#define M_RFRESPRDST 0x3
+#define V_RFRESPRDST(x) ((x) << S_RFRESPRDST)
+#define G_RFRESPRDST(x) (((x) >> S_RFRESPRDST) & M_RFRESPRDST)
+
+#define S_TARCST 19
+#define M_TARCST 0x7
+#define V_TARCST(x) ((x) << S_TARCST)
+#define G_TARCST(x) (((x) >> S_TARCST) & M_TARCST)
+
+#define S_TARXST 16
+#define M_TARXST 0x7
+#define V_TARXST(x) ((x) << S_TARXST)
+#define G_TARXST(x) (((x) >> S_TARXST) & M_TARXST)
+
+#define S_WFREQWRST 13
+#define M_WFREQWRST 0x7
+#define V_WFREQWRST(x) ((x) << S_WFREQWRST)
+#define G_WFREQWRST(x) (((x) >> S_WFREQWRST) & M_WFREQWRST)
+
+#define S_WFRESPFIFOEMPTY 12
+#define V_WFRESPFIFOEMPTY(x) ((x) << S_WFRESPFIFOEMPTY)
+#define F_WFRESPFIFOEMPTY V_WFRESPFIFOEMPTY(1U)
+
+#define S_WFREQFIFOEMPTY 11
+#define V_WFREQFIFOEMPTY(x) ((x) << S_WFREQFIFOEMPTY)
+#define F_WFREQFIFOEMPTY V_WFREQFIFOEMPTY(1U)
+
+#define S_RFRESPFIFOEMPTY 10
+#define V_RFRESPFIFOEMPTY(x) ((x) << S_RFRESPFIFOEMPTY)
+#define F_RFRESPFIFOEMPTY V_RFRESPFIFOEMPTY(1U)
+
+#define S_RFREQFIFOEMPTY 9
+#define V_RFREQFIFOEMPTY(x) ((x) << S_RFREQFIFOEMPTY)
+#define F_RFREQFIFOEMPTY V_RFREQFIFOEMPTY(1U)
+
+#define S_PIORESPFIFOLEVEL 7
+#define M_PIORESPFIFOLEVEL 0x3
+#define V_PIORESPFIFOLEVEL(x) ((x) << S_PIORESPFIFOLEVEL)
+#define G_PIORESPFIFOLEVEL(x) (((x) >> S_PIORESPFIFOLEVEL) & M_PIORESPFIFOLEVEL)
+
+#define S_CFRESPFIFOEMPTY 6
+#define V_CFRESPFIFOEMPTY(x) ((x) << S_CFRESPFIFOEMPTY)
+#define F_CFRESPFIFOEMPTY V_CFRESPFIFOEMPTY(1U)
+
+#define S_CFREQFIFOEMPTY 5
+#define V_CFREQFIFOEMPTY(x) ((x) << S_CFREQFIFOEMPTY)
+#define F_CFREQFIFOEMPTY V_CFREQFIFOEMPTY(1U)
+
+#define S_VPDRESPFIFOEMPTY 4
+#define V_VPDRESPFIFOEMPTY(x) ((x) << S_VPDRESPFIFOEMPTY)
+#define F_VPDRESPFIFOEMPTY V_VPDRESPFIFOEMPTY(1U)
+
+#define S_VPDREQFIFOEMPTY 3
+#define V_VPDREQFIFOEMPTY(x) ((x) << S_VPDREQFIFOEMPTY)
+#define F_VPDREQFIFOEMPTY V_VPDREQFIFOEMPTY(1U)
+
+#define S_PIO_RSPPND 2
+#define V_PIO_RSPPND(x) ((x) << S_PIO_RSPPND)
+#define F_PIO_RSPPND V_PIO_RSPPND(1U)
+
+#define S_DLYTRNPND 1
+#define V_DLYTRNPND(x) ((x) << S_DLYTRNPND)
+#define F_DLYTRNPND V_DLYTRNPND(1U)
+
+#define S_SPLTRNPND 0
+#define V_SPLTRNPND(x) ((x) << S_SPLTRNPND)
+#define F_SPLTRNPND V_SPLTRNPND(1U)
+
+#define A_PCIX_STAT1 0x9c
+
+#define S_WFINIST 26
+#define M_WFINIST 0xf
+#define V_WFINIST(x) ((x) << S_WFINIST)
+#define G_WFINIST(x) (((x) >> S_WFINIST) & M_WFINIST)
+
+#define S_ARBST 23
+#define M_ARBST 0x7
+#define V_ARBST(x) ((x) << S_ARBST)
+#define G_ARBST(x) (((x) >> S_ARBST) & M_ARBST)
+
+#define S_PMIST 21
+#define M_PMIST 0x3
+#define V_PMIST(x) ((x) << S_PMIST)
+#define G_PMIST(x) (((x) >> S_PMIST) & M_PMIST)
+
+#define S_CALST 19
+#define M_CALST 0x3
+#define V_CALST(x) ((x) << S_CALST)
+#define G_CALST(x) (((x) >> S_CALST) & M_CALST)
+
+#define S_CFREQRDST 17
+#define M_CFREQRDST 0x3
+#define V_CFREQRDST(x) ((x) << S_CFREQRDST)
+#define G_CFREQRDST(x) (((x) >> S_CFREQRDST) & M_CFREQRDST)
+
+#define S_CFINIST 15
+#define M_CFINIST 0x3
+#define V_CFINIST(x) ((x) << S_CFINIST)
+#define G_CFINIST(x) (((x) >> S_CFINIST) & M_CFINIST)
+
+#define S_CFRESPRDST 13
+#define M_CFRESPRDST 0x3
+#define V_CFRESPRDST(x) ((x) << S_CFRESPRDST)
+#define G_CFRESPRDST(x) (((x) >> S_CFRESPRDST) & M_CFRESPRDST)
+
+#define S_INICST 10
+#define M_INICST 0x7
+#define V_INICST(x) ((x) << S_INICST)
+#define G_INICST(x) (((x) >> S_INICST) & M_INICST)
+
+#define S_INIXST 7
+#define M_INIXST 0x7
+#define V_INIXST(x) ((x) << S_INIXST)
+#define G_INIXST(x) (((x) >> S_INIXST) & M_INIXST)
+
+#define S_INTST 4
+#define M_INTST 0x7
+#define V_INTST(x) ((x) << S_INTST)
+#define G_INTST(x) (((x) >> S_INTST) & M_INTST)
+
+#define S_PIOST 2
+#define M_PIOST 0x3
+#define V_PIOST(x) ((x) << S_PIOST)
+#define G_PIOST(x) (((x) >> S_PIOST) & M_PIOST)
+
+#define S_RFREQRDST 0
+#define M_RFREQRDST 0x3
+#define V_RFREQRDST(x) ((x) << S_RFREQRDST)
+#define G_RFREQRDST(x) (((x) >> S_RFREQRDST) & M_RFREQRDST)
+
+/* registers for module PCIE0 */
+#define PCIE0_BASE_ADDR 0x80
+
#define A_PCIE_INT_ENABLE 0x80
-#define S_BISTERR 15
+#define S_BISTERR 19
#define M_BISTERR 0xff
-
#define V_BISTERR(x) ((x) << S_BISTERR)
+#define G_BISTERR(x) (((x) >> S_BISTERR) & M_BISTERR)
#define S_TXPARERR 18
#define V_TXPARERR(x) ((x) << S_TXPARERR)
#define S_PCIE_MSIXPARERR 12
#define M_PCIE_MSIXPARERR 0x7
-
#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+#define G_PCIE_MSIXPARERR(x) (((x) >> S_PCIE_MSIXPARERR) & M_PCIE_MSIXPARERR)
#define S_PCIE_CFPARERR 11
#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
+#define S_VPDADDRCHNG 5
+#define V_VPDADDRCHNG(x) ((x) << S_VPDADDRCHNG)
+#define F_VPDADDRCHNG V_VPDADDRCHNG(1U)
+
+#define S_BUSMSTREN 4
+#define V_BUSMSTREN(x) ((x) << S_BUSMSTREN)
+#define F_BUSMSTREN V_BUSMSTREN(1U)
+
+#define S_PMSTCHNG 3
+#define V_PMSTCHNG(x) ((x) << S_PMSTCHNG)
+#define F_PMSTCHNG V_PMSTCHNG(1U)
+
+#define S_PEXMSG 2
+#define V_PEXMSG(x) ((x) << S_PEXMSG)
+#define F_PEXMSG V_PEXMSG(1U)
+
+#define S_ZEROLENRD 1
+#define V_ZEROLENRD(x) ((x) << S_ZEROLENRD)
+#define F_ZEROLENRD V_ZEROLENRD(1U)
+
#define S_PEXERR 0
#define V_PEXERR(x) ((x) << S_PEXERR)
#define F_PEXERR V_PEXERR(1U)
#define A_PCIE_INT_CAUSE 0x84
+#define A_PCIE_CFG 0x88
#define S_PCIE_DMASTOPEN 24
#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U)
-#define A_PCIE_CFG 0x88
+#define S_PRIORITYINTA 23
+#define V_PRIORITYINTA(x) ((x) << S_PRIORITYINTA)
+#define F_PRIORITYINTA V_PRIORITYINTA(1U)
+
+#define S_INIFULLPKT 22
+#define V_INIFULLPKT(x) ((x) << S_INIFULLPKT)
+#define F_INIFULLPKT V_INIFULLPKT(1U)
+
+#define S_ENABLELINKDWNDRST 21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST 20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
+
+#define S_ENABLEHOTRST 19
+#define V_ENABLEHOTRST(x) ((x) << S_ENABLEHOTRST)
+#define F_ENABLEHOTRST V_ENABLEHOTRST(1U)
+
+#define S_INIWAITFORGNT 18
+#define V_INIWAITFORGNT(x) ((x) << S_INIWAITFORGNT)
+#define F_INIWAITFORGNT V_INIWAITFORGNT(1U)
+
+#define S_INIBEDIS 17
+#define V_INIBEDIS(x) ((x) << S_INIBEDIS)
+#define F_INIBEDIS V_INIBEDIS(1U)
#define S_PCIE_CLIDECEN 16
#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+#define S_PCIE_MAXSPLTRNC 7
+#define M_PCIE_MAXSPLTRNC 0xf
+#define V_PCIE_MAXSPLTRNC(x) ((x) << S_PCIE_MAXSPLTRNC)
+#define G_PCIE_MAXSPLTRNC(x) (((x) >> S_PCIE_MAXSPLTRNC) & M_PCIE_MAXSPLTRNC)
+
+#define S_PCIE_MAXSPLTRNR 1
+#define M_PCIE_MAXSPLTRNR 0x3f
+#define V_PCIE_MAXSPLTRNR(x) ((x) << S_PCIE_MAXSPLTRNR)
+#define G_PCIE_MAXSPLTRNR(x) (((x) >> S_PCIE_MAXSPLTRNR) & M_PCIE_MAXSPLTRNR)
+
#define S_CRSTWRMMODE 0
#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
#define A_PCIE_MODE 0x8c
+#define S_TAR_STATE 29
+#define M_TAR_STATE 0x7
+#define V_TAR_STATE(x) ((x) << S_TAR_STATE)
+#define G_TAR_STATE(x) (((x) >> S_TAR_STATE) & M_TAR_STATE)
+
+#define S_RF_STATEINI 26
+#define M_RF_STATEINI 0x7
+#define V_RF_STATEINI(x) ((x) << S_RF_STATEINI)
+#define G_RF_STATEINI(x) (((x) >> S_RF_STATEINI) & M_RF_STATEINI)
+
+#define S_CF_STATEINI 23
+#define M_CF_STATEINI 0x7
+#define V_CF_STATEINI(x) ((x) << S_CF_STATEINI)
+#define G_CF_STATEINI(x) (((x) >> S_CF_STATEINI) & M_CF_STATEINI)
+
+#define S_PIO_STATEPL 20
+#define M_PIO_STATEPL 0x7
+#define V_PIO_STATEPL(x) ((x) << S_PIO_STATEPL)
+#define G_PIO_STATEPL(x) (((x) >> S_PIO_STATEPL) & M_PIO_STATEPL)
+
+#define S_PIO_STATEISC 18
+#define M_PIO_STATEISC 0x3
+#define V_PIO_STATEISC(x) ((x) << S_PIO_STATEISC)
+#define G_PIO_STATEISC(x) (((x) >> S_PIO_STATEISC) & M_PIO_STATEISC)
+
#define S_NUMFSTTRNSEQRX 10
#define M_NUMFSTTRNSEQRX 0xff
#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+#define S_LNKCNTLSTATE 2
+#define M_LNKCNTLSTATE 0xff
+#define V_LNKCNTLSTATE(x) ((x) << S_LNKCNTLSTATE)
+#define G_LNKCNTLSTATE(x) (((x) >> S_LNKCNTLSTATE) & M_LNKCNTLSTATE)
+
+#define S_VC0UP 1
+#define V_VC0UP(x) ((x) << S_VC0UP)
+#define F_VC0UP V_VC0UP(1U)
+
+#define S_LNKINITIAL 0
+#define V_LNKINITIAL(x) ((x) << S_LNKINITIAL)
+#define F_LNKINITIAL V_LNKINITIAL(1U)
+
+#define A_PCIE_STAT 0x90
+
+#define S_INI_STATE 28
+#define M_INI_STATE 0xf
+#define V_INI_STATE(x) ((x) << S_INI_STATE)
+#define G_INI_STATE(x) (((x) >> S_INI_STATE) & M_INI_STATE)
+
+#define S_WF_STATEINI 24
+#define M_WF_STATEINI 0xf
+#define V_WF_STATEINI(x) ((x) << S_WF_STATEINI)
+#define G_WF_STATEINI(x) (((x) >> S_WF_STATEINI) & M_WF_STATEINI)
+
+#define S_PLM_REQFIFOCNT 22
+#define M_PLM_REQFIFOCNT 0x3
+#define V_PLM_REQFIFOCNT(x) ((x) << S_PLM_REQFIFOCNT)
+#define G_PLM_REQFIFOCNT(x) (((x) >> S_PLM_REQFIFOCNT) & M_PLM_REQFIFOCNT)
+
+#define S_ER_REQFIFOEMPTY 21
+#define V_ER_REQFIFOEMPTY(x) ((x) << S_ER_REQFIFOEMPTY)
+#define F_ER_REQFIFOEMPTY V_ER_REQFIFOEMPTY(1U)
+
+#define S_WF_RSPFIFOEMPTY 20
+#define V_WF_RSPFIFOEMPTY(x) ((x) << S_WF_RSPFIFOEMPTY)
+#define F_WF_RSPFIFOEMPTY V_WF_RSPFIFOEMPTY(1U)
+
+#define S_WF_REQFIFOEMPTY 19
+#define V_WF_REQFIFOEMPTY(x) ((x) << S_WF_REQFIFOEMPTY)
+#define F_WF_REQFIFOEMPTY V_WF_REQFIFOEMPTY(1U)
+
+#define S_RF_RSPFIFOEMPTY 18
+#define V_RF_RSPFIFOEMPTY(x) ((x) << S_RF_RSPFIFOEMPTY)
+#define F_RF_RSPFIFOEMPTY V_RF_RSPFIFOEMPTY(1U)
+
+#define S_RF_REQFIFOEMPTY 17
+#define V_RF_REQFIFOEMPTY(x) ((x) << S_RF_REQFIFOEMPTY)
+#define F_RF_REQFIFOEMPTY V_RF_REQFIFOEMPTY(1U)
+
+#define S_RF_ACTEMPTY 16
+#define V_RF_ACTEMPTY(x) ((x) << S_RF_ACTEMPTY)
+#define F_RF_ACTEMPTY V_RF_ACTEMPTY(1U)
+
+#define S_PIO_RSPFIFOCNT 11
+#define M_PIO_RSPFIFOCNT 0x1f
+#define V_PIO_RSPFIFOCNT(x) ((x) << S_PIO_RSPFIFOCNT)
+#define G_PIO_RSPFIFOCNT(x) (((x) >> S_PIO_RSPFIFOCNT) & M_PIO_RSPFIFOCNT)
+
+#define S_PIO_REQFIFOCNT 5
+#define M_PIO_REQFIFOCNT 0x3f
+#define V_PIO_REQFIFOCNT(x) ((x) << S_PIO_REQFIFOCNT)
+#define G_PIO_REQFIFOCNT(x) (((x) >> S_PIO_REQFIFOCNT) & M_PIO_REQFIFOCNT)
+
+#define S_CF_RSPFIFOEMPTY 4
+#define V_CF_RSPFIFOEMPTY(x) ((x) << S_CF_RSPFIFOEMPTY)
+#define F_CF_RSPFIFOEMPTY V_CF_RSPFIFOEMPTY(1U)
+
+#define S_CF_REQFIFOEMPTY 3
+#define V_CF_REQFIFOEMPTY(x) ((x) << S_CF_REQFIFOEMPTY)
+#define F_CF_REQFIFOEMPTY V_CF_REQFIFOEMPTY(1U)
+
+#define S_CF_ACTEMPTY 2
+#define V_CF_ACTEMPTY(x) ((x) << S_CF_ACTEMPTY)
+#define F_CF_ACTEMPTY V_CF_ACTEMPTY(1U)
+
+#define S_VPD_RSPFIFOEMPTY 1
+#define V_VPD_RSPFIFOEMPTY(x) ((x) << S_VPD_RSPFIFOEMPTY)
+#define F_VPD_RSPFIFOEMPTY V_VPD_RSPFIFOEMPTY(1U)
+
+#define S_VPD_REQFIFOEMPTY 0
+#define V_VPD_REQFIFOEMPTY(x) ((x) << S_VPD_REQFIFOEMPTY)
+#define F_VPD_REQFIFOEMPTY V_VPD_REQFIFOEMPTY(1U)
+
+#define A_PCIE_CAL 0x90
+
+#define S_CALBUSY 31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY V_CALBUSY(1U)
+
+#define S_CALFAULT 30
+#define V_CALFAULT(x) ((x) << S_CALFAULT)
+#define F_CALFAULT V_CALFAULT(1U)
+
+#define S_PCIE_ZINSEL 11
+#define V_PCIE_ZINSEL(x) ((x) << S_PCIE_ZINSEL)
+#define F_PCIE_ZINSEL V_PCIE_ZINSEL(1U)
+
+#define S_ZMAN 8
+#define M_ZMAN 0x7
+#define V_ZMAN(x) ((x) << S_ZMAN)
+#define G_ZMAN(x) (((x) >> S_ZMAN) & M_ZMAN)
+
+#define S_ZOUT 3
+#define M_ZOUT 0x1f
+#define V_ZOUT(x) ((x) << S_ZOUT)
+#define G_ZOUT(x) (((x) >> S_ZOUT) & M_ZOUT)
+
+#define S_ZIN 0
+#define M_ZIN 0x7
+#define V_ZIN(x) ((x) << S_ZIN)
+#define G_ZIN(x) (((x) >> S_ZIN) & M_ZIN)
+
+#define A_PCIE_WOL 0x94
+
+#define S_CF_RSPSTATE 12
+#define M_CF_RSPSTATE 0x3
+#define V_CF_RSPSTATE(x) ((x) << S_CF_RSPSTATE)
+#define G_CF_RSPSTATE(x) (((x) >> S_CF_RSPSTATE) & M_CF_RSPSTATE)
+
+#define S_RF_RSPSTATE 10
+#define M_RF_RSPSTATE 0x3
+#define V_RF_RSPSTATE(x) ((x) << S_RF_RSPSTATE)
+#define G_RF_RSPSTATE(x) (((x) >> S_RF_RSPSTATE) & M_RF_RSPSTATE)
+
+#define S_PME_STATE 7
+#define M_PME_STATE 0x7
+#define V_PME_STATE(x) ((x) << S_PME_STATE)
+#define G_PME_STATE(x) (((x) >> S_PME_STATE) & M_PME_STATE)
+
+#define S_INT_STATE 4
+#define M_INT_STATE 0x7
+#define V_INT_STATE(x) ((x) << S_INT_STATE)
+#define G_INT_STATE(x) (((x) >> S_INT_STATE) & M_INT_STATE)
+
#define A_PCIE_PEX_CTRL0 0x98
+#define S_CPLTIMEOUTRETRY 31
+#define V_CPLTIMEOUTRETRY(x) ((x) << S_CPLTIMEOUTRETRY)
+#define F_CPLTIMEOUTRETRY V_CPLTIMEOUTRETRY(1U)
+
+#define S_STRICTTSMN 30
+#define V_STRICTTSMN(x) ((x) << S_STRICTTSMN)
+#define F_STRICTTSMN V_STRICTTSMN(1U)
+
#define S_NUMFSTTRNSEQ 22
#define M_NUMFSTTRNSEQ 0xff
#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
#define S_REPLAYLMT 2
#define M_REPLAYLMT 0xfffff
-
#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+#define G_REPLAYLMT(x) (((x) >> S_REPLAYLMT) & M_REPLAYLMT)
+
+#define S_TXPNDCHKEN 1
+#define V_TXPNDCHKEN(x) ((x) << S_TXPNDCHKEN)
+#define F_TXPNDCHKEN V_TXPNDCHKEN(1U)
+
+#define S_CPLPNDCHKEN 0
+#define V_CPLPNDCHKEN(x) ((x) << S_CPLPNDCHKEN)
+#define F_CPLPNDCHKEN V_CPLPNDCHKEN(1U)
#define A_PCIE_PEX_CTRL1 0x9c
-#define S_T3A_ACKLAT 0
-#define M_T3A_ACKLAT 0x7ff
+#define S_RXPHYERREN 31
+#define V_RXPHYERREN(x) ((x) << S_RXPHYERREN)
+#define F_RXPHYERREN V_RXPHYERREN(1U)
-#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+#define S_DLLPTIMEOUTLMT 13
+#define M_DLLPTIMEOUTLMT 0x3ffff
+#define V_DLLPTIMEOUTLMT(x) ((x) << S_DLLPTIMEOUTLMT)
+#define G_DLLPTIMEOUTLMT(x) (((x) >> S_DLLPTIMEOUTLMT) & M_DLLPTIMEOUTLMT)
#define S_ACKLAT 0
#define M_ACKLAT 0x1fff
-
#define V_ACKLAT(x) ((x) << S_ACKLAT)
+#define G_ACKLAT(x) (((x) >> S_ACKLAT) & M_ACKLAT)
-#define A_PCIE_PEX_ERR 0xa4
+#define S_T3A_DLLPTIMEOUTLMT 11
+#define M_T3A_DLLPTIMEOUTLMT 0xfffff
+#define V_T3A_DLLPTIMEOUTLMT(x) ((x) << S_T3A_DLLPTIMEOUTLMT)
+#define G_T3A_DLLPTIMEOUTLMT(x) (((x) >> S_T3A_DLLPTIMEOUTLMT) & M_T3A_DLLPTIMEOUTLMT)
-#define A_T3DBG_GPIO_EN 0xd0
+#define S_T3A_ACKLAT 0
+#define M_T3A_ACKLAT 0x7ff
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+#define G_T3A_ACKLAT(x) (((x) >> S_T3A_ACKLAT) & M_T3A_ACKLAT)
-#define S_GPIO11_OEN 27
-#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
-#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+#define A_PCIE_PEX_CTRL2 0xa0
-#define S_GPIO10_OEN 26
-#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
-#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+#define S_LNKCNTLDETDIR 30
+#define V_LNKCNTLDETDIR(x) ((x) << S_LNKCNTLDETDIR)
+#define F_LNKCNTLDETDIR V_LNKCNTLDETDIR(1U)
-#define S_GPIO7_OEN 23
-#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
-#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+#define S_ENTERL1REN 29
+#define V_ENTERL1REN(x) ((x) << S_ENTERL1REN)
+#define F_ENTERL1REN V_ENTERL1REN(1U)
-#define S_GPIO6_OEN 22
-#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
-#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+#define S_PMEXITL1REQ 28
+#define V_PMEXITL1REQ(x) ((x) << S_PMEXITL1REQ)
+#define F_PMEXITL1REQ V_PMEXITL1REQ(1U)
-#define S_GPIO5_OEN 21
-#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
-#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+#define S_PMTXIDLE 27
+#define V_PMTXIDLE(x) ((x) << S_PMTXIDLE)
+#define F_PMTXIDLE V_PMTXIDLE(1U)
-#define S_GPIO4_OEN 20
-#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
-#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+#define S_PCIMODELOOP 26
+#define V_PCIMODELOOP(x) ((x) << S_PCIMODELOOP)
+#define F_PCIMODELOOP V_PCIMODELOOP(1U)
-#define S_GPIO2_OEN 18
-#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
-#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+#define S_L1ASPMTXRXL0STIME 14
+#define M_L1ASPMTXRXL0STIME 0xfff
+#define V_L1ASPMTXRXL0STIME(x) ((x) << S_L1ASPMTXRXL0STIME)
+#define G_L1ASPMTXRXL0STIME(x) (((x) >> S_L1ASPMTXRXL0STIME) & M_L1ASPMTXRXL0STIME)
-#define S_GPIO1_OEN 17
-#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
-#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+#define S_L0SIDLETIME 3
+#define M_L0SIDLETIME 0x7ff
+#define V_L0SIDLETIME(x) ((x) << S_L0SIDLETIME)
+#define G_L0SIDLETIME(x) (((x) >> S_L0SIDLETIME) & M_L0SIDLETIME)
-#define S_GPIO0_OEN 16
-#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
-#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+#define S_ENTERL1ASPMEN 2
+#define V_ENTERL1ASPMEN(x) ((x) << S_ENTERL1ASPMEN)
+#define F_ENTERL1ASPMEN V_ENTERL1ASPMEN(1U)
-#define S_GPIO10_OUT_VAL 10
-#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
-#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+#define S_ENTERL1EN 1
+#define V_ENTERL1EN(x) ((x) << S_ENTERL1EN)
+#define F_ENTERL1EN V_ENTERL1EN(1U)
-#define S_GPIO7_OUT_VAL 7
-#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
-#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+#define S_ENTERL0SEN 0
+#define V_ENTERL0SEN(x) ((x) << S_ENTERL0SEN)
+#define F_ENTERL0SEN V_ENTERL0SEN(1U)
-#define S_GPIO6_OUT_VAL 6
-#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
-#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+#define S_ENTERL23 3
+#define V_ENTERL23(x) ((x) << S_ENTERL23)
+#define F_ENTERL23 V_ENTERL23(1U)
-#define S_GPIO5_OUT_VAL 5
-#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
-#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+#define A_PCIE_PEX_ERR 0xa4
-#define S_GPIO4_OUT_VAL 4
-#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
-#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+#define S_CPLTIMEOUTID 18
+#define M_CPLTIMEOUTID 0x7f
+#define V_CPLTIMEOUTID(x) ((x) << S_CPLTIMEOUTID)
+#define G_CPLTIMEOUTID(x) (((x) >> S_CPLTIMEOUTID) & M_CPLTIMEOUTID)
-#define S_GPIO2_OUT_VAL 2
-#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
-#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+#define S_FLOWCTLOFLOWERR 17
+#define V_FLOWCTLOFLOWERR(x) ((x) << S_FLOWCTLOFLOWERR)
+#define F_FLOWCTLOFLOWERR V_FLOWCTLOFLOWERR(1U)
-#define S_GPIO1_OUT_VAL 1
-#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
-#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+#define S_REPLAYTIMEOUT 16
+#define V_REPLAYTIMEOUT(x) ((x) << S_REPLAYTIMEOUT)
+#define F_REPLAYTIMEOUT V_REPLAYTIMEOUT(1U)
-#define S_GPIO0_OUT_VAL 0
-#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
-#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+#define S_REPLAYROLLOVER 15
+#define V_REPLAYROLLOVER(x) ((x) << S_REPLAYROLLOVER)
+#define F_REPLAYROLLOVER V_REPLAYROLLOVER(1U)
-#define A_T3DBG_INT_ENABLE 0xd8
+#define S_BADDLLP 14
+#define V_BADDLLP(x) ((x) << S_BADDLLP)
+#define F_BADDLLP V_BADDLLP(1U)
-#define S_GPIO11 11
-#define V_GPIO11(x) ((x) << S_GPIO11)
-#define F_GPIO11 V_GPIO11(1U)
+#define S_DLLPERR 13
+#define V_DLLPERR(x) ((x) << S_DLLPERR)
+#define F_DLLPERR V_DLLPERR(1U)
-#define S_GPIO10 10
-#define V_GPIO10(x) ((x) << S_GPIO10)
-#define F_GPIO10 V_GPIO10(1U)
+#define S_FLOWCTLPROTERR 12
+#define V_FLOWCTLPROTERR(x) ((x) << S_FLOWCTLPROTERR)
+#define F_FLOWCTLPROTERR V_FLOWCTLPROTERR(1U)
-#define S_GPIO7 7
-#define V_GPIO7(x) ((x) << S_GPIO7)
-#define F_GPIO7 V_GPIO7(1U)
+#define S_CPLTIMEOUT 11
+#define V_CPLTIMEOUT(x) ((x) << S_CPLTIMEOUT)
+#define F_CPLTIMEOUT V_CPLTIMEOUT(1U)
+
+#define S_PHYRCVERR 10
+#define V_PHYRCVERR(x) ((x) << S_PHYRCVERR)
+#define F_PHYRCVERR V_PHYRCVERR(1U)
+
+#define S_DISTLP 9
+#define V_DISTLP(x) ((x) << S_DISTLP)
+#define F_DISTLP V_DISTLP(1U)
+
+#define S_BADECRC 8
+#define V_BADECRC(x) ((x) << S_BADECRC)
+#define F_BADECRC V_BADECRC(1U)
+
+#define S_BADTLP 7
+#define V_BADTLP(x) ((x) << S_BADTLP)
+#define F_BADTLP V_BADTLP(1U)
+
+#define S_MALTLP 6
+#define V_MALTLP(x) ((x) << S_MALTLP)
+#define F_MALTLP V_MALTLP(1U)
+
+#define S_UNXCPL 5
+#define V_UNXCPL(x) ((x) << S_UNXCPL)
+#define F_UNXCPL V_UNXCPL(1U)
+
+#define S_UNSREQ 4
+#define V_UNSREQ(x) ((x) << S_UNSREQ)
+#define F_UNSREQ V_UNSREQ(1U)
+
+#define S_PSNREQ 3
+#define V_PSNREQ(x) ((x) << S_PSNREQ)
+#define F_PSNREQ V_PSNREQ(1U)
+
+#define S_UNSCPL 2
+#define V_UNSCPL(x) ((x) << S_UNSCPL)
+#define F_UNSCPL V_UNSCPL(1U)
+
+#define S_CPLABT 1
+#define V_CPLABT(x) ((x) << S_CPLABT)
+#define F_CPLABT V_CPLABT(1U)
+
+#define S_PSNCPL 0
+#define V_PSNCPL(x) ((x) << S_PSNCPL)
+#define F_PSNCPL V_PSNCPL(1U)
+
+#define A_PCIE_SERDES_CTRL 0xa8
+
+#define S_PMASEL 3
+#define V_PMASEL(x) ((x) << S_PMASEL)
+#define F_PMASEL V_PMASEL(1U)
+
+#define S_LANE 0
+#define M_LANE 0x7
+#define V_LANE(x) ((x) << S_LANE)
+#define G_LANE(x) (((x) >> S_LANE) & M_LANE)
+
+#define A_PCIE_PIPE_CTRL 0xa8
+
+#define S_RECDETUSEC 19
+#define M_RECDETUSEC 0x7
+#define V_RECDETUSEC(x) ((x) << S_RECDETUSEC)
+#define G_RECDETUSEC(x) (((x) >> S_RECDETUSEC) & M_RECDETUSEC)
+
+#define S_PLLLCKCYC 6
+#define M_PLLLCKCYC 0x1fff
+#define V_PLLLCKCYC(x) ((x) << S_PLLLCKCYC)
+#define G_PLLLCKCYC(x) (((x) >> S_PLLLCKCYC) & M_PLLLCKCYC)
+
+#define S_ELECIDLEDETCYC 3
+#define M_ELECIDLEDETCYC 0x7
+#define V_ELECIDLEDETCYC(x) ((x) << S_ELECIDLEDETCYC)
+#define G_ELECIDLEDETCYC(x) (((x) >> S_ELECIDLEDETCYC) & M_ELECIDLEDETCYC)
+
+#define S_USECDRLOS 2
+#define V_USECDRLOS(x) ((x) << S_USECDRLOS)
+#define F_USECDRLOS V_USECDRLOS(1U)
+
+#define S_PCLKREQINP1 1
+#define V_PCLKREQINP1(x) ((x) << S_PCLKREQINP1)
+#define F_PCLKREQINP1 V_PCLKREQINP1(1U)
+
+#define S_PCLKOFFINP1 0
+#define V_PCLKOFFINP1(x) ((x) << S_PCLKOFFINP1)
+#define F_PCLKOFFINP1 V_PCLKOFFINP1(1U)
+
+#define A_PCIE_SERDES_QUAD_CTRL0 0xac
+
+#define S_TESTSIG 10
+#define M_TESTSIG 0x7ffff
+#define V_TESTSIG(x) ((x) << S_TESTSIG)
+#define G_TESTSIG(x) (((x) >> S_TESTSIG) & M_TESTSIG)
+
+#define S_OFFSET 2
+#define M_OFFSET 0xff
+#define V_OFFSET(x) ((x) << S_OFFSET)
+#define G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET)
+
+#define S_OFFSETEN 1
+#define V_OFFSETEN(x) ((x) << S_OFFSETEN)
+#define F_OFFSETEN V_OFFSETEN(1U)
+
+#define S_IDDQB 0
+#define V_IDDQB(x) ((x) << S_IDDQB)
+#define F_IDDQB V_IDDQB(1U)
+
+#define S_MANMODE 31
+#define V_MANMODE(x) ((x) << S_MANMODE)
+#define F_MANMODE V_MANMODE(1U)
+
+#define S_MANLPBKEN 29
+#define M_MANLPBKEN 0x3
+#define V_MANLPBKEN(x) ((x) << S_MANLPBKEN)
+#define G_MANLPBKEN(x) (((x) >> S_MANLPBKEN) & M_MANLPBKEN)
+
+#define S_MANTXRECDETEN 28
+#define V_MANTXRECDETEN(x) ((x) << S_MANTXRECDETEN)
+#define F_MANTXRECDETEN V_MANTXRECDETEN(1U)
+
+#define S_MANTXBEACON 27
+#define V_MANTXBEACON(x) ((x) << S_MANTXBEACON)
+#define F_MANTXBEACON V_MANTXBEACON(1U)
+
+#define S_MANTXEI 26
+#define V_MANTXEI(x) ((x) << S_MANTXEI)
+#define F_MANTXEI V_MANTXEI(1U)
+
+#define S_MANRXPOLARITY 25
+#define V_MANRXPOLARITY(x) ((x) << S_MANRXPOLARITY)
+#define F_MANRXPOLARITY V_MANRXPOLARITY(1U)
+
+#define S_MANTXRST 24
+#define V_MANTXRST(x) ((x) << S_MANTXRST)
+#define F_MANTXRST V_MANTXRST(1U)
+
+#define S_MANRXRST 23
+#define V_MANRXRST(x) ((x) << S_MANRXRST)
+#define F_MANRXRST V_MANRXRST(1U)
+
+#define S_MANTXEN 22
+#define V_MANTXEN(x) ((x) << S_MANTXEN)
+#define F_MANTXEN V_MANTXEN(1U)
+
+#define S_MANRXEN 21
+#define V_MANRXEN(x) ((x) << S_MANRXEN)
+#define F_MANRXEN V_MANRXEN(1U)
+
+#define S_MANEN 20
+#define V_MANEN(x) ((x) << S_MANEN)
+#define F_MANEN V_MANEN(1U)
+
+#define S_PCIE_CMURANGE 17
+#define M_PCIE_CMURANGE 0x7
+#define V_PCIE_CMURANGE(x) ((x) << S_PCIE_CMURANGE)
+#define G_PCIE_CMURANGE(x) (((x) >> S_PCIE_CMURANGE) & M_PCIE_CMURANGE)
+
+#define S_PCIE_BGENB 16
+#define V_PCIE_BGENB(x) ((x) << S_PCIE_BGENB)
+#define F_PCIE_BGENB V_PCIE_BGENB(1U)
+
+#define S_PCIE_ENSKPDROP 15
+#define V_PCIE_ENSKPDROP(x) ((x) << S_PCIE_ENSKPDROP)
+#define F_PCIE_ENSKPDROP V_PCIE_ENSKPDROP(1U)
+
+#define S_PCIE_ENCOMMA 14
+#define V_PCIE_ENCOMMA(x) ((x) << S_PCIE_ENCOMMA)
+#define F_PCIE_ENCOMMA V_PCIE_ENCOMMA(1U)
+
+#define S_PCIE_EN8B10B 13
+#define V_PCIE_EN8B10B(x) ((x) << S_PCIE_EN8B10B)
+#define F_PCIE_EN8B10B V_PCIE_EN8B10B(1U)
+
+#define S_PCIE_ENELBUF 12
+#define V_PCIE_ENELBUF(x) ((x) << S_PCIE_ENELBUF)
+#define F_PCIE_ENELBUF V_PCIE_ENELBUF(1U)
+
+#define S_PCIE_GAIN 7
+#define M_PCIE_GAIN 0x1f
+#define V_PCIE_GAIN(x) ((x) << S_PCIE_GAIN)
+#define G_PCIE_GAIN(x) (((x) >> S_PCIE_GAIN) & M_PCIE_GAIN)
+
+#define S_PCIE_BANDGAP 3
+#define M_PCIE_BANDGAP 0xf
+#define V_PCIE_BANDGAP(x) ((x) << S_PCIE_BANDGAP)
+#define G_PCIE_BANDGAP(x) (((x) >> S_PCIE_BANDGAP) & M_PCIE_BANDGAP)
+
+#define S_RXCOMADJ 2
+#define V_RXCOMADJ(x) ((x) << S_RXCOMADJ)
+#define F_RXCOMADJ V_RXCOMADJ(1U)
+
+#define S_PREEMPH 0
+#define M_PREEMPH 0x3
+#define V_PREEMPH(x) ((x) << S_PREEMPH)
+#define G_PREEMPH(x) (((x) >> S_PREEMPH) & M_PREEMPH)
+
+#define A_PCIE_SERDES_QUAD_CTRL1 0xb0
+
+#define S_FASTINIT 28
+#define V_FASTINIT(x) ((x) << S_FASTINIT)
+#define F_FASTINIT V_FASTINIT(1U)
+
+#define S_CTCDISABLE 27
+#define V_CTCDISABLE(x) ((x) << S_CTCDISABLE)
+#define F_CTCDISABLE V_CTCDISABLE(1U)
+
+#define S_MANRESETPLL 26
+#define V_MANRESETPLL(x) ((x) << S_MANRESETPLL)
+#define F_MANRESETPLL V_MANRESETPLL(1U)
+
+#define S_MANL2PWRDN 25
+#define V_MANL2PWRDN(x) ((x) << S_MANL2PWRDN)
+#define F_MANL2PWRDN V_MANL2PWRDN(1U)
+
+#define S_MANQUADEN 24
+#define V_MANQUADEN(x) ((x) << S_MANQUADEN)
+#define F_MANQUADEN V_MANQUADEN(1U)
+
+#define S_RXEQCTL 22
+#define M_RXEQCTL 0x3
+#define V_RXEQCTL(x) ((x) << S_RXEQCTL)
+#define G_RXEQCTL(x) (((x) >> S_RXEQCTL) & M_RXEQCTL)
+
+#define S_HIVMODE 21
+#define V_HIVMODE(x) ((x) << S_HIVMODE)
+#define F_HIVMODE V_HIVMODE(1U)
+
+#define S_REFSEL 19
+#define M_REFSEL 0x3
+#define V_REFSEL(x) ((x) << S_REFSEL)
+#define G_REFSEL(x) (((x) >> S_REFSEL) & M_REFSEL)
+
+#define S_RXTERMADJ 17
+#define M_RXTERMADJ 0x3
+#define V_RXTERMADJ(x) ((x) << S_RXTERMADJ)
+#define G_RXTERMADJ(x) (((x) >> S_RXTERMADJ) & M_RXTERMADJ)
+
+#define S_TXTERMADJ 15
+#define M_TXTERMADJ 0x3
+#define V_TXTERMADJ(x) ((x) << S_TXTERMADJ)
+#define G_TXTERMADJ(x) (((x) >> S_TXTERMADJ) & M_TXTERMADJ)
+
+#define S_DEQ 11
+#define M_DEQ 0xf
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 7
+#define M_DTX 0xf
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 6
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 5
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define S_INTPARRESET 4
+#define V_INTPARRESET(x) ((x) << S_INTPARRESET)
+#define F_INTPARRESET V_INTPARRESET(1U)
+
+#define S_INTPARLPBK 3
+#define V_INTPARLPBK(x) ((x) << S_INTPARLPBK)
+#define F_INTPARLPBK V_INTPARLPBK(1U)
+
+#define S_INTSERLPBKWDRV 2
+#define V_INTSERLPBKWDRV(x) ((x) << S_INTSERLPBKWDRV)
+#define F_INTSERLPBKWDRV V_INTSERLPBKWDRV(1U)
+
+#define S_PW 1
+#define V_PW(x) ((x) << S_PW)
+#define F_PW V_PW(1U)
+
+#define S_PCLKDETECT 0
+#define V_PCLKDETECT(x) ((x) << S_PCLKDETECT)
+#define F_PCLKDETECT V_PCLKDETECT(1U)
+
+#define A_PCIE_SERDES_STATUS0 0xb0
+
+#define S_RXERRLANE7 21
+#define M_RXERRLANE7 0x7
+#define V_RXERRLANE7(x) ((x) << S_RXERRLANE7)
+#define G_RXERRLANE7(x) (((x) >> S_RXERRLANE7) & M_RXERRLANE7)
+
+#define S_RXERRLANE6 18
+#define M_RXERRLANE6 0x7
+#define V_RXERRLANE6(x) ((x) << S_RXERRLANE6)
+#define G_RXERRLANE6(x) (((x) >> S_RXERRLANE6) & M_RXERRLANE6)
+
+#define S_RXERRLANE5 15
+#define M_RXERRLANE5 0x7
+#define V_RXERRLANE5(x) ((x) << S_RXERRLANE5)
+#define G_RXERRLANE5(x) (((x) >> S_RXERRLANE5) & M_RXERRLANE5)
+
+#define S_RXERRLANE4 12
+#define M_RXERRLANE4 0x7
+#define V_RXERRLANE4(x) ((x) << S_RXERRLANE4)
+#define G_RXERRLANE4(x) (((x) >> S_RXERRLANE4) & M_RXERRLANE4)
+
+#define S_PCIE_RXERRLANE3 9
+#define M_PCIE_RXERRLANE3 0x7
+#define V_PCIE_RXERRLANE3(x) ((x) << S_PCIE_RXERRLANE3)
+#define G_PCIE_RXERRLANE3(x) (((x) >> S_PCIE_RXERRLANE3) & M_PCIE_RXERRLANE3)
+
+#define S_PCIE_RXERRLANE2 6
+#define M_PCIE_RXERRLANE2 0x7
+#define V_PCIE_RXERRLANE2(x) ((x) << S_PCIE_RXERRLANE2)
+#define G_PCIE_RXERRLANE2(x) (((x) >> S_PCIE_RXERRLANE2) & M_PCIE_RXERRLANE2)
+
+#define S_PCIE_RXERRLANE1 3
+#define M_PCIE_RXERRLANE1 0x7
+#define V_PCIE_RXERRLANE1(x) ((x) << S_PCIE_RXERRLANE1)
+#define G_PCIE_RXERRLANE1(x) (((x) >> S_PCIE_RXERRLANE1) & M_PCIE_RXERRLANE1)
+
+#define S_PCIE_RXERRLANE0 0
+#define M_PCIE_RXERRLANE0 0x7
+#define V_PCIE_RXERRLANE0(x) ((x) << S_PCIE_RXERRLANE0)
+#define G_PCIE_RXERRLANE0(x) (((x) >> S_PCIE_RXERRLANE0) & M_PCIE_RXERRLANE0)
+
+#define A_PCIE_SERDES_LANE_CTRL 0xb4
+
+#define S_EXTBISTCHKERRCLR 22
+#define V_EXTBISTCHKERRCLR(x) ((x) << S_EXTBISTCHKERRCLR)
+#define F_EXTBISTCHKERRCLR V_EXTBISTCHKERRCLR(1U)
+
+#define S_EXTBISTCHKEN 21
+#define V_EXTBISTCHKEN(x) ((x) << S_EXTBISTCHKEN)
+#define F_EXTBISTCHKEN V_EXTBISTCHKEN(1U)
+
+#define S_EXTBISTGENEN 20
+#define V_EXTBISTGENEN(x) ((x) << S_EXTBISTGENEN)
+#define F_EXTBISTGENEN V_EXTBISTGENEN(1U)
+
+#define S_EXTBISTPAT 17
+#define M_EXTBISTPAT 0x7
+#define V_EXTBISTPAT(x) ((x) << S_EXTBISTPAT)
+#define G_EXTBISTPAT(x) (((x) >> S_EXTBISTPAT) & M_EXTBISTPAT)
+
+#define S_EXTPARRESET 16
+#define V_EXTPARRESET(x) ((x) << S_EXTPARRESET)
+#define F_EXTPARRESET V_EXTPARRESET(1U)
+
+#define S_EXTPARLPBK 15
+#define V_EXTPARLPBK(x) ((x) << S_EXTPARLPBK)
+#define F_EXTPARLPBK V_EXTPARLPBK(1U)
+
+#define S_MANRXTERMEN 14
+#define V_MANRXTERMEN(x) ((x) << S_MANRXTERMEN)
+#define F_MANRXTERMEN V_MANRXTERMEN(1U)
+
+#define S_MANBEACONTXEN 13
+#define V_MANBEACONTXEN(x) ((x) << S_MANBEACONTXEN)
+#define F_MANBEACONTXEN V_MANBEACONTXEN(1U)
+
+#define S_MANRXDETECTEN 12
+#define V_MANRXDETECTEN(x) ((x) << S_MANRXDETECTEN)
+#define F_MANRXDETECTEN V_MANRXDETECTEN(1U)
+
+#define S_MANTXIDLEEN 11
+#define V_MANTXIDLEEN(x) ((x) << S_MANTXIDLEEN)
+#define F_MANTXIDLEEN V_MANTXIDLEEN(1U)
+
+#define S_MANRXIDLEEN 10
+#define V_MANRXIDLEEN(x) ((x) << S_MANRXIDLEEN)
+#define F_MANRXIDLEEN V_MANRXIDLEEN(1U)
+
+#define S_MANL1PWRDN 9
+#define V_MANL1PWRDN(x) ((x) << S_MANL1PWRDN)
+#define F_MANL1PWRDN V_MANL1PWRDN(1U)
+
+#define S_MANRESET 8
+#define V_MANRESET(x) ((x) << S_MANRESET)
+#define F_MANRESET V_MANRESET(1U)
+
+#define S_MANFMOFFSET 3
+#define M_MANFMOFFSET 0x1f
+#define V_MANFMOFFSET(x) ((x) << S_MANFMOFFSET)
+#define G_MANFMOFFSET(x) (((x) >> S_MANFMOFFSET) & M_MANFMOFFSET)
+
+#define S_MANFMOFFSETEN 2
+#define V_MANFMOFFSETEN(x) ((x) << S_MANFMOFFSETEN)
+#define F_MANFMOFFSETEN V_MANFMOFFSETEN(1U)
+
+#define S_MANLANEEN 1
+#define V_MANLANEEN(x) ((x) << S_MANLANEEN)
+#define F_MANLANEEN V_MANLANEEN(1U)
+
+#define S_INTSERLPBK 0
+#define V_INTSERLPBK(x) ((x) << S_INTSERLPBK)
+#define F_INTSERLPBK V_INTSERLPBK(1U)
+
+#define A_PCIE_SERDES_STATUS1 0xb4
+
+#define S_CMULOCK 31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK V_CMULOCK(1U)
+
+#define S_RXKLOCKLANE7 23
+#define V_RXKLOCKLANE7(x) ((x) << S_RXKLOCKLANE7)
+#define F_RXKLOCKLANE7 V_RXKLOCKLANE7(1U)
+
+#define S_RXKLOCKLANE6 22
+#define V_RXKLOCKLANE6(x) ((x) << S_RXKLOCKLANE6)
+#define F_RXKLOCKLANE6 V_RXKLOCKLANE6(1U)
+
+#define S_RXKLOCKLANE5 21
+#define V_RXKLOCKLANE5(x) ((x) << S_RXKLOCKLANE5)
+#define F_RXKLOCKLANE5 V_RXKLOCKLANE5(1U)
+
+#define S_RXKLOCKLANE4 20
+#define V_RXKLOCKLANE4(x) ((x) << S_RXKLOCKLANE4)
+#define F_RXKLOCKLANE4 V_RXKLOCKLANE4(1U)
+
+#define S_PCIE_RXKLOCKLANE3 19
+#define V_PCIE_RXKLOCKLANE3(x) ((x) << S_PCIE_RXKLOCKLANE3)
+#define F_PCIE_RXKLOCKLANE3 V_PCIE_RXKLOCKLANE3(1U)
+
+#define S_PCIE_RXKLOCKLANE2 18
+#define V_PCIE_RXKLOCKLANE2(x) ((x) << S_PCIE_RXKLOCKLANE2)
+#define F_PCIE_RXKLOCKLANE2 V_PCIE_RXKLOCKLANE2(1U)
+
+#define S_PCIE_RXKLOCKLANE1 17
+#define V_PCIE_RXKLOCKLANE1(x) ((x) << S_PCIE_RXKLOCKLANE1)
+#define F_PCIE_RXKLOCKLANE1 V_PCIE_RXKLOCKLANE1(1U)
+
+#define S_PCIE_RXKLOCKLANE0 16
+#define V_PCIE_RXKLOCKLANE0(x) ((x) << S_PCIE_RXKLOCKLANE0)
+#define F_PCIE_RXKLOCKLANE0 V_PCIE_RXKLOCKLANE0(1U)
+
+#define S_RXUFLOWLANE7 15
+#define V_RXUFLOWLANE7(x) ((x) << S_RXUFLOWLANE7)
+#define F_RXUFLOWLANE7 V_RXUFLOWLANE7(1U)
+
+#define S_RXUFLOWLANE6 14
+#define V_RXUFLOWLANE6(x) ((x) << S_RXUFLOWLANE6)
+#define F_RXUFLOWLANE6 V_RXUFLOWLANE6(1U)
+
+#define S_RXUFLOWLANE5 13
+#define V_RXUFLOWLANE5(x) ((x) << S_RXUFLOWLANE5)
+#define F_RXUFLOWLANE5 V_RXUFLOWLANE5(1U)
+
+#define S_RXUFLOWLANE4 12
+#define V_RXUFLOWLANE4(x) ((x) << S_RXUFLOWLANE4)
+#define F_RXUFLOWLANE4 V_RXUFLOWLANE4(1U)
+
+#define S_PCIE_RXUFLOWLANE3 11
+#define V_PCIE_RXUFLOWLANE3(x) ((x) << S_PCIE_RXUFLOWLANE3)
+#define F_PCIE_RXUFLOWLANE3 V_PCIE_RXUFLOWLANE3(1U)
+
+#define S_PCIE_RXUFLOWLANE2 10
+#define V_PCIE_RXUFLOWLANE2(x) ((x) << S_PCIE_RXUFLOWLANE2)
+#define F_PCIE_RXUFLOWLANE2 V_PCIE_RXUFLOWLANE2(1U)
+
+#define S_PCIE_RXUFLOWLANE1 9
+#define V_PCIE_RXUFLOWLANE1(x) ((x) << S_PCIE_RXUFLOWLANE1)
+#define F_PCIE_RXUFLOWLANE1 V_PCIE_RXUFLOWLANE1(1U)
+
+#define S_PCIE_RXUFLOWLANE0 8
+#define V_PCIE_RXUFLOWLANE0(x) ((x) << S_PCIE_RXUFLOWLANE0)
+#define F_PCIE_RXUFLOWLANE0 V_PCIE_RXUFLOWLANE0(1U)
+
+#define S_RXOFLOWLANE7 7
+#define V_RXOFLOWLANE7(x) ((x) << S_RXOFLOWLANE7)
+#define F_RXOFLOWLANE7 V_RXOFLOWLANE7(1U)
+
+#define S_RXOFLOWLANE6 6
+#define V_RXOFLOWLANE6(x) ((x) << S_RXOFLOWLANE6)
+#define F_RXOFLOWLANE6 V_RXOFLOWLANE6(1U)
+
+#define S_RXOFLOWLANE5 5
+#define V_RXOFLOWLANE5(x) ((x) << S_RXOFLOWLANE5)
+#define F_RXOFLOWLANE5 V_RXOFLOWLANE5(1U)
+
+#define S_RXOFLOWLANE4 4
+#define V_RXOFLOWLANE4(x) ((x) << S_RXOFLOWLANE4)
+#define F_RXOFLOWLANE4 V_RXOFLOWLANE4(1U)
+
+#define S_PCIE_RXOFLOWLANE3 3
+#define V_PCIE_RXOFLOWLANE3(x) ((x) << S_PCIE_RXOFLOWLANE3)
+#define F_PCIE_RXOFLOWLANE3 V_PCIE_RXOFLOWLANE3(1U)
+
+#define S_PCIE_RXOFLOWLANE2 2
+#define V_PCIE_RXOFLOWLANE2(x) ((x) << S_PCIE_RXOFLOWLANE2)
+#define F_PCIE_RXOFLOWLANE2 V_PCIE_RXOFLOWLANE2(1U)
+
+#define S_PCIE_RXOFLOWLANE1 1
+#define V_PCIE_RXOFLOWLANE1(x) ((x) << S_PCIE_RXOFLOWLANE1)
+#define F_PCIE_RXOFLOWLANE1 V_PCIE_RXOFLOWLANE1(1U)
+
+#define S_PCIE_RXOFLOWLANE0 0
+#define V_PCIE_RXOFLOWLANE0(x) ((x) << S_PCIE_RXOFLOWLANE0)
+#define F_PCIE_RXOFLOWLANE0 V_PCIE_RXOFLOWLANE0(1U)
+
+#define A_PCIE_SERDES_LANE_STAT 0xb8
+
+#define S_EXTBISTCHKERRCNT 8
+#define M_EXTBISTCHKERRCNT 0xffffff
+#define V_EXTBISTCHKERRCNT(x) ((x) << S_EXTBISTCHKERRCNT)
+#define G_EXTBISTCHKERRCNT(x) (((x) >> S_EXTBISTCHKERRCNT) & M_EXTBISTCHKERRCNT)
+
+#define S_EXTBISTCHKFMD 7
+#define V_EXTBISTCHKFMD(x) ((x) << S_EXTBISTCHKFMD)
+#define F_EXTBISTCHKFMD V_EXTBISTCHKFMD(1U)
+
+#define S_BEACONDETECTCHG 6
+#define V_BEACONDETECTCHG(x) ((x) << S_BEACONDETECTCHG)
+#define F_BEACONDETECTCHG V_BEACONDETECTCHG(1U)
+
+#define S_RXDETECTCHG 5
+#define V_RXDETECTCHG(x) ((x) << S_RXDETECTCHG)
+#define F_RXDETECTCHG V_RXDETECTCHG(1U)
+
+#define S_TXIDLEDETECTCHG 4
+#define V_TXIDLEDETECTCHG(x) ((x) << S_TXIDLEDETECTCHG)
+#define F_TXIDLEDETECTCHG V_TXIDLEDETECTCHG(1U)
+
+#define S_BEACONDETECT 2
+#define V_BEACONDETECT(x) ((x) << S_BEACONDETECT)
+#define F_BEACONDETECT V_BEACONDETECT(1U)
+
+#define S_RXDETECT 1
+#define V_RXDETECT(x) ((x) << S_RXDETECT)
+#define F_RXDETECT V_RXDETECT(1U)
+
+#define S_TXIDLEDETECT 0
+#define V_TXIDLEDETECT(x) ((x) << S_TXIDLEDETECT)
+#define F_TXIDLEDETECT V_TXIDLEDETECT(1U)
+
+#define A_PCIE_SERDES_STATUS2 0xb8
+
+#define S_TXRECDETLANE7 31
+#define V_TXRECDETLANE7(x) ((x) << S_TXRECDETLANE7)
+#define F_TXRECDETLANE7 V_TXRECDETLANE7(1U)
+
+#define S_TXRECDETLANE6 30
+#define V_TXRECDETLANE6(x) ((x) << S_TXRECDETLANE6)
+#define F_TXRECDETLANE6 V_TXRECDETLANE6(1U)
+
+#define S_TXRECDETLANE5 29
+#define V_TXRECDETLANE5(x) ((x) << S_TXRECDETLANE5)
+#define F_TXRECDETLANE5 V_TXRECDETLANE5(1U)
+
+#define S_TXRECDETLANE4 28
+#define V_TXRECDETLANE4(x) ((x) << S_TXRECDETLANE4)
+#define F_TXRECDETLANE4 V_TXRECDETLANE4(1U)
+
+#define S_TXRECDETLANE3 27
+#define V_TXRECDETLANE3(x) ((x) << S_TXRECDETLANE3)
+#define F_TXRECDETLANE3 V_TXRECDETLANE3(1U)
+
+#define S_TXRECDETLANE2 26
+#define V_TXRECDETLANE2(x) ((x) << S_TXRECDETLANE2)
+#define F_TXRECDETLANE2 V_TXRECDETLANE2(1U)
+
+#define S_TXRECDETLANE1 25
+#define V_TXRECDETLANE1(x) ((x) << S_TXRECDETLANE1)
+#define F_TXRECDETLANE1 V_TXRECDETLANE1(1U)
+
+#define S_TXRECDETLANE0 24
+#define V_TXRECDETLANE0(x) ((x) << S_TXRECDETLANE0)
+#define F_TXRECDETLANE0 V_TXRECDETLANE0(1U)
+
+#define S_RXEIDLANE7 23
+#define V_RXEIDLANE7(x) ((x) << S_RXEIDLANE7)
+#define F_RXEIDLANE7 V_RXEIDLANE7(1U)
+
+#define S_RXEIDLANE6 22
+#define V_RXEIDLANE6(x) ((x) << S_RXEIDLANE6)
+#define F_RXEIDLANE6 V_RXEIDLANE6(1U)
+
+#define S_RXEIDLANE5 21
+#define V_RXEIDLANE5(x) ((x) << S_RXEIDLANE5)
+#define F_RXEIDLANE5 V_RXEIDLANE5(1U)
+
+#define S_RXEIDLANE4 20
+#define V_RXEIDLANE4(x) ((x) << S_RXEIDLANE4)
+#define F_RXEIDLANE4 V_RXEIDLANE4(1U)
+
+#define S_RXEIDLANE3 19
+#define V_RXEIDLANE3(x) ((x) << S_RXEIDLANE3)
+#define F_RXEIDLANE3 V_RXEIDLANE3(1U)
+
+#define S_RXEIDLANE2 18
+#define V_RXEIDLANE2(x) ((x) << S_RXEIDLANE2)
+#define F_RXEIDLANE2 V_RXEIDLANE2(1U)
+
+#define S_RXEIDLANE1 17
+#define V_RXEIDLANE1(x) ((x) << S_RXEIDLANE1)
+#define F_RXEIDLANE1 V_RXEIDLANE1(1U)
+
+#define S_RXEIDLANE0 16
+#define V_RXEIDLANE0(x) ((x) << S_RXEIDLANE0)
+#define F_RXEIDLANE0 V_RXEIDLANE0(1U)
+
+#define S_RXREMSKIPLANE7 15
+#define V_RXREMSKIPLANE7(x) ((x) << S_RXREMSKIPLANE7)
+#define F_RXREMSKIPLANE7 V_RXREMSKIPLANE7(1U)
+
+#define S_RXREMSKIPLANE6 14
+#define V_RXREMSKIPLANE6(x) ((x) << S_RXREMSKIPLANE6)
+#define F_RXREMSKIPLANE6 V_RXREMSKIPLANE6(1U)
+
+#define S_RXREMSKIPLANE5 13
+#define V_RXREMSKIPLANE5(x) ((x) << S_RXREMSKIPLANE5)
+#define F_RXREMSKIPLANE5 V_RXREMSKIPLANE5(1U)
+
+#define S_RXREMSKIPLANE4 12
+#define V_RXREMSKIPLANE4(x) ((x) << S_RXREMSKIPLANE4)
+#define F_RXREMSKIPLANE4 V_RXREMSKIPLANE4(1U)
+
+#define S_PCIE_RXREMSKIPLANE3 11
+#define V_PCIE_RXREMSKIPLANE3(x) ((x) << S_PCIE_RXREMSKIPLANE3)
+#define F_PCIE_RXREMSKIPLANE3 V_PCIE_RXREMSKIPLANE3(1U)
+
+#define S_PCIE_RXREMSKIPLANE2 10
+#define V_PCIE_RXREMSKIPLANE2(x) ((x) << S_PCIE_RXREMSKIPLANE2)
+#define F_PCIE_RXREMSKIPLANE2 V_PCIE_RXREMSKIPLANE2(1U)
+
+#define S_PCIE_RXREMSKIPLANE1 9
+#define V_PCIE_RXREMSKIPLANE1(x) ((x) << S_PCIE_RXREMSKIPLANE1)
+#define F_PCIE_RXREMSKIPLANE1 V_PCIE_RXREMSKIPLANE1(1U)
+
+#define S_PCIE_RXREMSKIPLANE0 8
+#define V_PCIE_RXREMSKIPLANE0(x) ((x) << S_PCIE_RXREMSKIPLANE0)
+#define F_PCIE_RXREMSKIPLANE0 V_PCIE_RXREMSKIPLANE0(1U)
+
+#define S_RXADDSKIPLANE7 7
+#define V_RXADDSKIPLANE7(x) ((x) << S_RXADDSKIPLANE7)
+#define F_RXADDSKIPLANE7 V_RXADDSKIPLANE7(1U)
+
+#define S_RXADDSKIPLANE6 6
+#define V_RXADDSKIPLANE6(x) ((x) << S_RXADDSKIPLANE6)
+#define F_RXADDSKIPLANE6 V_RXADDSKIPLANE6(1U)
+
+#define S_RXADDSKIPLANE5 5
+#define V_RXADDSKIPLANE5(x) ((x) << S_RXADDSKIPLANE5)
+#define F_RXADDSKIPLANE5 V_RXADDSKIPLANE5(1U)
+
+#define S_RXADDSKIPLANE4 4
+#define V_RXADDSKIPLANE4(x) ((x) << S_RXADDSKIPLANE4)
+#define F_RXADDSKIPLANE4 V_RXADDSKIPLANE4(1U)
+
+#define S_PCIE_RXADDSKIPLANE3 3
+#define V_PCIE_RXADDSKIPLANE3(x) ((x) << S_PCIE_RXADDSKIPLANE3)
+#define F_PCIE_RXADDSKIPLANE3 V_PCIE_RXADDSKIPLANE3(1U)
+
+#define S_PCIE_RXADDSKIPLANE2 2
+#define V_PCIE_RXADDSKIPLANE2(x) ((x) << S_PCIE_RXADDSKIPLANE2)
+#define F_PCIE_RXADDSKIPLANE2 V_PCIE_RXADDSKIPLANE2(1U)
+
+#define S_PCIE_RXADDSKIPLANE1 1
+#define V_PCIE_RXADDSKIPLANE1(x) ((x) << S_PCIE_RXADDSKIPLANE1)
+#define F_PCIE_RXADDSKIPLANE1 V_PCIE_RXADDSKIPLANE1(1U)
+
+#define S_PCIE_RXADDSKIPLANE0 0
+#define V_PCIE_RXADDSKIPLANE0(x) ((x) << S_PCIE_RXADDSKIPLANE0)
+#define F_PCIE_RXADDSKIPLANE0 V_PCIE_RXADDSKIPLANE0(1U)
+
+#define A_PCIE_PEX_WMARK 0xbc
+
+#define S_P_WMARK 18
+#define M_P_WMARK 0x7ff
+#define V_P_WMARK(x) ((x) << S_P_WMARK)
+#define G_P_WMARK(x) (((x) >> S_P_WMARK) & M_P_WMARK)
+
+#define S_NP_WMARK 11
+#define M_NP_WMARK 0x7f
+#define V_NP_WMARK(x) ((x) << S_NP_WMARK)
+#define G_NP_WMARK(x) (((x) >> S_NP_WMARK) & M_NP_WMARK)
+
+#define S_CPL_WMARK 0
+#define M_CPL_WMARK 0x7ff
+#define V_CPL_WMARK(x) ((x) << S_CPL_WMARK)
+#define G_CPL_WMARK(x) (((x) >> S_CPL_WMARK) & M_CPL_WMARK)
+
+#define A_PCIE_SERDES_BIST 0xbc
+
+#define S_PCIE_BISTDONE 24
+#define M_PCIE_BISTDONE 0xff
+#define V_PCIE_BISTDONE(x) ((x) << S_PCIE_BISTDONE)
+#define G_PCIE_BISTDONE(x) (((x) >> S_PCIE_BISTDONE) & M_PCIE_BISTDONE)
+
+#define S_PCIE_BISTCYCLETHRESH 3
+#define M_PCIE_BISTCYCLETHRESH 0xffff
+#define V_PCIE_BISTCYCLETHRESH(x) ((x) << S_PCIE_BISTCYCLETHRESH)
+#define G_PCIE_BISTCYCLETHRESH(x) (((x) >> S_PCIE_BISTCYCLETHRESH) & M_PCIE_BISTCYCLETHRESH)
+
+#define S_BISTMODE 0
+#define M_BISTMODE 0x7
+#define V_BISTMODE(x) ((x) << S_BISTMODE)
+#define G_BISTMODE(x) (((x) >> S_BISTMODE) & M_BISTMODE)
+
+/* registers for module T3DBG */
+#define T3DBG_BASE_ADDR 0xc0
+
+#define A_T3DBG_DBG0_CFG 0xc0
+
+#define S_REGSELECT 9
+#define M_REGSELECT 0xff
+#define V_REGSELECT(x) ((x) << S_REGSELECT)
+#define G_REGSELECT(x) (((x) >> S_REGSELECT) & M_REGSELECT)
+
+#define S_MODULESELECT 4
+#define M_MODULESELECT 0x1f
+#define V_MODULESELECT(x) ((x) << S_MODULESELECT)
+#define G_MODULESELECT(x) (((x) >> S_MODULESELECT) & M_MODULESELECT)
+
+#define S_CLKSELECT 0
+#define M_CLKSELECT 0xf
+#define V_CLKSELECT(x) ((x) << S_CLKSELECT)
+#define G_CLKSELECT(x) (((x) >> S_CLKSELECT) & M_CLKSELECT)
+
+#define A_T3DBG_DBG0_EN 0xc4
+
+#define S_SDRBYTE0 8
+#define V_SDRBYTE0(x) ((x) << S_SDRBYTE0)
+#define F_SDRBYTE0 V_SDRBYTE0(1U)
+
+#define S_DDREN 4
+#define V_DDREN(x) ((x) << S_DDREN)
+#define F_DDREN V_DDREN(1U)
+
+#define S_PORTEN 0
+#define V_PORTEN(x) ((x) << S_PORTEN)
+#define F_PORTEN V_PORTEN(1U)
+
+#define A_T3DBG_DBG1_CFG 0xc8
+#define A_T3DBG_DBG1_EN 0xcc
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN 27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN 26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+
+#define S_GPIO9_OEN 25
+#define V_GPIO9_OEN(x) ((x) << S_GPIO9_OEN)
+#define F_GPIO9_OEN V_GPIO9_OEN(1U)
+
+#define S_GPIO8_OEN 24
+#define V_GPIO8_OEN(x) ((x) << S_GPIO8_OEN)
+#define F_GPIO8_OEN V_GPIO8_OEN(1U)
+
+#define S_GPIO7_OEN 23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN 22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN 21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN 20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+
+#define S_GPIO3_OEN 19
+#define V_GPIO3_OEN(x) ((x) << S_GPIO3_OEN)
+#define F_GPIO3_OEN V_GPIO3_OEN(1U)
+
+#define S_GPIO2_OEN 18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN 17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN 16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+
+#define S_GPIO11_OUT_VAL 11
+#define V_GPIO11_OUT_VAL(x) ((x) << S_GPIO11_OUT_VAL)
+#define F_GPIO11_OUT_VAL V_GPIO11_OUT_VAL(1U)
+
+#define S_GPIO10_OUT_VAL 10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO9_OUT_VAL 9
+#define V_GPIO9_OUT_VAL(x) ((x) << S_GPIO9_OUT_VAL)
+#define F_GPIO9_OUT_VAL V_GPIO9_OUT_VAL(1U)
+
+#define S_GPIO8_OUT_VAL 8
+#define V_GPIO8_OUT_VAL(x) ((x) << S_GPIO8_OUT_VAL)
+#define F_GPIO8_OUT_VAL V_GPIO8_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL 7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL 6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL 5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL 4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO3_OUT_VAL 3
+#define V_GPIO3_OUT_VAL(x) ((x) << S_GPIO3_OUT_VAL)
+#define F_GPIO3_OUT_VAL V_GPIO3_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL 2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL 1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL 0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_GPIO_IN 0xd4
+
+#define S_GPIO11_CHG_DET 27
+#define V_GPIO11_CHG_DET(x) ((x) << S_GPIO11_CHG_DET)
+#define F_GPIO11_CHG_DET V_GPIO11_CHG_DET(1U)
+
+#define S_GPIO10_CHG_DET 26
+#define V_GPIO10_CHG_DET(x) ((x) << S_GPIO10_CHG_DET)
+#define F_GPIO10_CHG_DET V_GPIO10_CHG_DET(1U)
+
+#define S_GPIO9_CHG_DET 25
+#define V_GPIO9_CHG_DET(x) ((x) << S_GPIO9_CHG_DET)
+#define F_GPIO9_CHG_DET V_GPIO9_CHG_DET(1U)
+
+#define S_GPIO8_CHG_DET 24
+#define V_GPIO8_CHG_DET(x) ((x) << S_GPIO8_CHG_DET)
+#define F_GPIO8_CHG_DET V_GPIO8_CHG_DET(1U)
+
+#define S_GPIO7_CHG_DET 23
+#define V_GPIO7_CHG_DET(x) ((x) << S_GPIO7_CHG_DET)
+#define F_GPIO7_CHG_DET V_GPIO7_CHG_DET(1U)
+
+#define S_GPIO6_CHG_DET 22
+#define V_GPIO6_CHG_DET(x) ((x) << S_GPIO6_CHG_DET)
+#define F_GPIO6_CHG_DET V_GPIO6_CHG_DET(1U)
+
+#define S_GPIO5_CHG_DET 21
+#define V_GPIO5_CHG_DET(x) ((x) << S_GPIO5_CHG_DET)
+#define F_GPIO5_CHG_DET V_GPIO5_CHG_DET(1U)
+
+#define S_GPIO4_CHG_DET 20
+#define V_GPIO4_CHG_DET(x) ((x) << S_GPIO4_CHG_DET)
+#define F_GPIO4_CHG_DET V_GPIO4_CHG_DET(1U)
+
+#define S_GPIO3_CHG_DET 19
+#define V_GPIO3_CHG_DET(x) ((x) << S_GPIO3_CHG_DET)
+#define F_GPIO3_CHG_DET V_GPIO3_CHG_DET(1U)
+
+#define S_GPIO2_CHG_DET 18
+#define V_GPIO2_CHG_DET(x) ((x) << S_GPIO2_CHG_DET)
+#define F_GPIO2_CHG_DET V_GPIO2_CHG_DET(1U)
+
+#define S_GPIO1_CHG_DET 17
+#define V_GPIO1_CHG_DET(x) ((x) << S_GPIO1_CHG_DET)
+#define F_GPIO1_CHG_DET V_GPIO1_CHG_DET(1U)
+
+#define S_GPIO0_CHG_DET 16
+#define V_GPIO0_CHG_DET(x) ((x) << S_GPIO0_CHG_DET)
+#define F_GPIO0_CHG_DET V_GPIO0_CHG_DET(1U)
+
+#define S_GPIO11_IN 11
+#define V_GPIO11_IN(x) ((x) << S_GPIO11_IN)
+#define F_GPIO11_IN V_GPIO11_IN(1U)
+
+#define S_GPIO10_IN 10
+#define V_GPIO10_IN(x) ((x) << S_GPIO10_IN)
+#define F_GPIO10_IN V_GPIO10_IN(1U)
+
+#define S_GPIO9_IN 9
+#define V_GPIO9_IN(x) ((x) << S_GPIO9_IN)
+#define F_GPIO9_IN V_GPIO9_IN(1U)
+
+#define S_GPIO8_IN 8
+#define V_GPIO8_IN(x) ((x) << S_GPIO8_IN)
+#define F_GPIO8_IN V_GPIO8_IN(1U)
+
+#define S_GPIO7_IN 7
+#define V_GPIO7_IN(x) ((x) << S_GPIO7_IN)
+#define F_GPIO7_IN V_GPIO7_IN(1U)
+
+#define S_GPIO6_IN 6
+#define V_GPIO6_IN(x) ((x) << S_GPIO6_IN)
+#define F_GPIO6_IN V_GPIO6_IN(1U)
+
+#define S_GPIO5_IN 5
+#define V_GPIO5_IN(x) ((x) << S_GPIO5_IN)
+#define F_GPIO5_IN V_GPIO5_IN(1U)
+
+#define S_GPIO4_IN 4
+#define V_GPIO4_IN(x) ((x) << S_GPIO4_IN)
+#define F_GPIO4_IN V_GPIO4_IN(1U)
+
+#define S_GPIO3_IN 3
+#define V_GPIO3_IN(x) ((x) << S_GPIO3_IN)
+#define F_GPIO3_IN V_GPIO3_IN(1U)
+
+#define S_GPIO2_IN 2
+#define V_GPIO2_IN(x) ((x) << S_GPIO2_IN)
+#define F_GPIO2_IN V_GPIO2_IN(1U)
+
+#define S_GPIO1_IN 1
+#define V_GPIO1_IN(x) ((x) << S_GPIO1_IN)
+#define F_GPIO1_IN V_GPIO1_IN(1U)
+
+#define S_GPIO0_IN 0
+#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
+#define F_GPIO0_IN V_GPIO0_IN(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_C_LOCK 21
+#define V_C_LOCK(x) ((x) << S_C_LOCK)
+#define F_C_LOCK V_C_LOCK(1U)
+
+#define S_M_LOCK 20
+#define V_M_LOCK(x) ((x) << S_M_LOCK)
+#define F_M_LOCK V_M_LOCK(1U)
+
+#define S_U_LOCK 19
+#define V_U_LOCK(x) ((x) << S_U_LOCK)
+#define F_U_LOCK V_U_LOCK(1U)
+
+#define S_R_LOCK 18
+#define V_R_LOCK(x) ((x) << S_R_LOCK)
+#define F_R_LOCK V_R_LOCK(1U)
+
+#define S_PX_LOCK 17
+#define V_PX_LOCK(x) ((x) << S_PX_LOCK)
+#define F_PX_LOCK V_PX_LOCK(1U)
+
+#define S_GPIO11 11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11 V_GPIO11(1U)
+
+#define S_GPIO10 10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10 V_GPIO10(1U)
+
+#define S_GPIO9 9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9 V_GPIO9(1U)
+
+#define S_GPIO8 8
+#define V_GPIO8(x) ((x) << S_GPIO8)
+#define F_GPIO8 V_GPIO8(1U)
+
+#define S_GPIO7 7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7 V_GPIO7(1U)
#define S_GPIO6 6
#define V_GPIO6(x) ((x) << S_GPIO6)
#define V_GPIO0(x) ((x) << S_GPIO0)
#define F_GPIO0 V_GPIO0(1U)
+#define S_PE_LOCK 16
+#define V_PE_LOCK(x) ((x) << S_PE_LOCK)
+#define F_PE_LOCK V_PE_LOCK(1U)
+
#define A_T3DBG_INT_CAUSE 0xdc
+#define A_T3DBG_DBG0_RST_VALUE 0xe0
+
+#define S_DEBUGDATA 0
+#define M_DEBUGDATA 0xff
+#define V_DEBUGDATA(x) ((x) << S_DEBUGDATA)
+#define G_DEBUGDATA(x) (((x) >> S_DEBUGDATA) & M_DEBUGDATA)
+
+#define A_T3DBG_PLL_OCLK_PAD_EN 0xe4
+
+#define S_PCIE_OCLK_EN 20
+#define V_PCIE_OCLK_EN(x) ((x) << S_PCIE_OCLK_EN)
+#define F_PCIE_OCLK_EN V_PCIE_OCLK_EN(1U)
+
+#define S_PCLKTREE_DBG_EN 17
+#define V_PCLKTREE_DBG_EN(x) ((x) << S_PCLKTREE_DBG_EN)
+#define F_PCLKTREE_DBG_EN V_PCLKTREE_DBG_EN(1U)
+
+#define S_PCIX_OCLK_EN 16
+#define V_PCIX_OCLK_EN(x) ((x) << S_PCIX_OCLK_EN)
+#define F_PCIX_OCLK_EN V_PCIX_OCLK_EN(1U)
+
+#define S_U_OCLK_EN 12
+#define V_U_OCLK_EN(x) ((x) << S_U_OCLK_EN)
+#define F_U_OCLK_EN V_U_OCLK_EN(1U)
+
+#define S_R_OCLK_EN 8
+#define V_R_OCLK_EN(x) ((x) << S_R_OCLK_EN)
+#define F_R_OCLK_EN V_R_OCLK_EN(1U)
+
+#define S_M_OCLK_EN 4
+#define V_M_OCLK_EN(x) ((x) << S_M_OCLK_EN)
+#define F_M_OCLK_EN V_M_OCLK_EN(1U)
+
+#define S_C_OCLK_EN 0
+#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
+#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+
+#define A_T3DBG_PLL_LOCK 0xe8
+
+#define S_PCIX_LOCK 16
+#define V_PCIX_LOCK(x) ((x) << S_PCIX_LOCK)
+#define F_PCIX_LOCK V_PCIX_LOCK(1U)
+
+#define S_PLL_U_LOCK 12
+#define V_PLL_U_LOCK(x) ((x) << S_PLL_U_LOCK)
+#define F_PLL_U_LOCK V_PLL_U_LOCK(1U)
+
+#define S_PLL_R_LOCK 8
+#define V_PLL_R_LOCK(x) ((x) << S_PLL_R_LOCK)
+#define F_PLL_R_LOCK V_PLL_R_LOCK(1U)
+
+#define S_PLL_M_LOCK 4
+#define V_PLL_M_LOCK(x) ((x) << S_PLL_M_LOCK)
+#define F_PLL_M_LOCK V_PLL_M_LOCK(1U)
+
+#define S_PLL_C_LOCK 0
+#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
+#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+
+#define S_PCIE_LOCK 20
+#define V_PCIE_LOCK(x) ((x) << S_PCIE_LOCK)
+#define F_PCIE_LOCK V_PCIE_LOCK(1U)
+
+#define A_T3DBG_SERDES_RBC_CFG 0xec
+
+#define S_X_RBC_LANE_SEL 16
+#define M_X_RBC_LANE_SEL 0x3
+#define V_X_RBC_LANE_SEL(x) ((x) << S_X_RBC_LANE_SEL)
+#define G_X_RBC_LANE_SEL(x) (((x) >> S_X_RBC_LANE_SEL) & M_X_RBC_LANE_SEL)
+
+#define S_X_RBC_DBG_EN 12
+#define V_X_RBC_DBG_EN(x) ((x) << S_X_RBC_DBG_EN)
+#define F_X_RBC_DBG_EN V_X_RBC_DBG_EN(1U)
+
+#define S_X_SERDES_SEL 8
+#define V_X_SERDES_SEL(x) ((x) << S_X_SERDES_SEL)
+#define F_X_SERDES_SEL V_X_SERDES_SEL(1U)
+
+#define S_PE_RBC_LANE_SEL 4
+#define M_PE_RBC_LANE_SEL 0x7
+#define V_PE_RBC_LANE_SEL(x) ((x) << S_PE_RBC_LANE_SEL)
+#define G_PE_RBC_LANE_SEL(x) (((x) >> S_PE_RBC_LANE_SEL) & M_PE_RBC_LANE_SEL)
+
+#define S_PE_RBC_DBG_EN 0
+#define V_PE_RBC_DBG_EN(x) ((x) << S_PE_RBC_DBG_EN)
+#define F_PE_RBC_DBG_EN V_PE_RBC_DBG_EN(1U)
#define A_T3DBG_GPIO_ACT_LOW 0xf0
+#define S_C_LOCK_ACT_LOW 21
+#define V_C_LOCK_ACT_LOW(x) ((x) << S_C_LOCK_ACT_LOW)
+#define F_C_LOCK_ACT_LOW V_C_LOCK_ACT_LOW(1U)
+
+#define S_M_LOCK_ACT_LOW 20
+#define V_M_LOCK_ACT_LOW(x) ((x) << S_M_LOCK_ACT_LOW)
+#define F_M_LOCK_ACT_LOW V_M_LOCK_ACT_LOW(1U)
+
+#define S_U_LOCK_ACT_LOW 19
+#define V_U_LOCK_ACT_LOW(x) ((x) << S_U_LOCK_ACT_LOW)
+#define F_U_LOCK_ACT_LOW V_U_LOCK_ACT_LOW(1U)
+
+#define S_R_LOCK_ACT_LOW 18
+#define V_R_LOCK_ACT_LOW(x) ((x) << S_R_LOCK_ACT_LOW)
+#define F_R_LOCK_ACT_LOW V_R_LOCK_ACT_LOW(1U)
+
+#define S_PX_LOCK_ACT_LOW 17
+#define V_PX_LOCK_ACT_LOW(x) ((x) << S_PX_LOCK_ACT_LOW)
+#define F_PX_LOCK_ACT_LOW V_PX_LOCK_ACT_LOW(1U)
+
+#define S_GPIO11_ACT_LOW 11
+#define V_GPIO11_ACT_LOW(x) ((x) << S_GPIO11_ACT_LOW)
+#define F_GPIO11_ACT_LOW V_GPIO11_ACT_LOW(1U)
+
+#define S_GPIO10_ACT_LOW 10
+#define V_GPIO10_ACT_LOW(x) ((x) << S_GPIO10_ACT_LOW)
+#define F_GPIO10_ACT_LOW V_GPIO10_ACT_LOW(1U)
+
+#define S_GPIO9_ACT_LOW 9
+#define V_GPIO9_ACT_LOW(x) ((x) << S_GPIO9_ACT_LOW)
+#define F_GPIO9_ACT_LOW V_GPIO9_ACT_LOW(1U)
+
+#define S_GPIO8_ACT_LOW 8
+#define V_GPIO8_ACT_LOW(x) ((x) << S_GPIO8_ACT_LOW)
+#define F_GPIO8_ACT_LOW V_GPIO8_ACT_LOW(1U)
+
+#define S_GPIO7_ACT_LOW 7
+#define V_GPIO7_ACT_LOW(x) ((x) << S_GPIO7_ACT_LOW)
+#define F_GPIO7_ACT_LOW V_GPIO7_ACT_LOW(1U)
+
+#define S_GPIO6_ACT_LOW 6
+#define V_GPIO6_ACT_LOW(x) ((x) << S_GPIO6_ACT_LOW)
+#define F_GPIO6_ACT_LOW V_GPIO6_ACT_LOW(1U)
+
+#define S_GPIO5_ACT_LOW 5
+#define V_GPIO5_ACT_LOW(x) ((x) << S_GPIO5_ACT_LOW)
+#define F_GPIO5_ACT_LOW V_GPIO5_ACT_LOW(1U)
+
+#define S_GPIO4_ACT_LOW 4
+#define V_GPIO4_ACT_LOW(x) ((x) << S_GPIO4_ACT_LOW)
+#define F_GPIO4_ACT_LOW V_GPIO4_ACT_LOW(1U)
+
+#define S_GPIO3_ACT_LOW 3
+#define V_GPIO3_ACT_LOW(x) ((x) << S_GPIO3_ACT_LOW)
+#define F_GPIO3_ACT_LOW V_GPIO3_ACT_LOW(1U)
+
+#define S_GPIO2_ACT_LOW 2
+#define V_GPIO2_ACT_LOW(x) ((x) << S_GPIO2_ACT_LOW)
+#define F_GPIO2_ACT_LOW V_GPIO2_ACT_LOW(1U)
+
+#define S_GPIO1_ACT_LOW 1
+#define V_GPIO1_ACT_LOW(x) ((x) << S_GPIO1_ACT_LOW)
+#define F_GPIO1_ACT_LOW V_GPIO1_ACT_LOW(1U)
+
+#define S_GPIO0_ACT_LOW 0
+#define V_GPIO0_ACT_LOW(x) ((x) << S_GPIO0_ACT_LOW)
+#define F_GPIO0_ACT_LOW V_GPIO0_ACT_LOW(1U)
+
+#define S_PE_LOCK_ACT_LOW 16
+#define V_PE_LOCK_ACT_LOW(x) ((x) << S_PE_LOCK_ACT_LOW)
+#define F_PE_LOCK_ACT_LOW V_PE_LOCK_ACT_LOW(1U)
+
+#define A_T3DBG_PMON_CFG 0xf4
+
+#define S_PMON_DONE 29
+#define V_PMON_DONE(x) ((x) << S_PMON_DONE)
+#define F_PMON_DONE V_PMON_DONE(1U)
+
+#define S_PMON_FAIL 28
+#define V_PMON_FAIL(x) ((x) << S_PMON_FAIL)
+#define F_PMON_FAIL V_PMON_FAIL(1U)
+
+#define S_PMON_FDEL_AUTO 22
+#define M_PMON_FDEL_AUTO 0x3f
+#define V_PMON_FDEL_AUTO(x) ((x) << S_PMON_FDEL_AUTO)
+#define G_PMON_FDEL_AUTO(x) (((x) >> S_PMON_FDEL_AUTO) & M_PMON_FDEL_AUTO)
+
+#define S_PMON_CDEL_AUTO 16
+#define M_PMON_CDEL_AUTO 0x3f
+#define V_PMON_CDEL_AUTO(x) ((x) << S_PMON_CDEL_AUTO)
+#define G_PMON_CDEL_AUTO(x) (((x) >> S_PMON_CDEL_AUTO) & M_PMON_CDEL_AUTO)
+
+#define S_PMON_FDEL_MANUAL 10
+#define M_PMON_FDEL_MANUAL 0x3f
+#define V_PMON_FDEL_MANUAL(x) ((x) << S_PMON_FDEL_MANUAL)
+#define G_PMON_FDEL_MANUAL(x) (((x) >> S_PMON_FDEL_MANUAL) & M_PMON_FDEL_MANUAL)
+
+#define S_PMON_CDEL_MANUAL 4
+#define M_PMON_CDEL_MANUAL 0x3f
+#define V_PMON_CDEL_MANUAL(x) ((x) << S_PMON_CDEL_MANUAL)
+#define G_PMON_CDEL_MANUAL(x) (((x) >> S_PMON_CDEL_MANUAL) & M_PMON_CDEL_MANUAL)
+
+#define S_PMON_MANUAL 1
+#define V_PMON_MANUAL(x) ((x) << S_PMON_MANUAL)
+#define F_PMON_MANUAL V_PMON_MANUAL(1U)
+
+#define S_PMON_AUTO 0
+#define V_PMON_AUTO(x) ((x) << S_PMON_AUTO)
+#define F_PMON_AUTO V_PMON_AUTO(1U)
+
+#define A_T3DBG_SERDES_REFCLK_CFG 0xf8
+
+#define S_PE_REFCLK_DBG_EN 12
+#define V_PE_REFCLK_DBG_EN(x) ((x) << S_PE_REFCLK_DBG_EN)
+#define F_PE_REFCLK_DBG_EN V_PE_REFCLK_DBG_EN(1U)
+
+#define S_X_REFCLK_DBG_EN 8
+#define V_X_REFCLK_DBG_EN(x) ((x) << S_X_REFCLK_DBG_EN)
+#define F_X_REFCLK_DBG_EN V_X_REFCLK_DBG_EN(1U)
+
+#define S_PE_REFCLK_TERMADJ 5
+#define M_PE_REFCLK_TERMADJ 0x3
+#define V_PE_REFCLK_TERMADJ(x) ((x) << S_PE_REFCLK_TERMADJ)
+#define G_PE_REFCLK_TERMADJ(x) (((x) >> S_PE_REFCLK_TERMADJ) & M_PE_REFCLK_TERMADJ)
+
+#define S_PE_REFCLK_PD 4
+#define V_PE_REFCLK_PD(x) ((x) << S_PE_REFCLK_PD)
+#define F_PE_REFCLK_PD V_PE_REFCLK_PD(1U)
+
+#define S_X_REFCLK_TERMADJ 1
+#define M_X_REFCLK_TERMADJ 0x3
+#define V_X_REFCLK_TERMADJ(x) ((x) << S_X_REFCLK_TERMADJ)
+#define G_X_REFCLK_TERMADJ(x) (((x) >> S_X_REFCLK_TERMADJ) & M_X_REFCLK_TERMADJ)
+
+#define S_X_REFCLK_PD 0
+#define V_X_REFCLK_PD(x) ((x) << S_X_REFCLK_PD)
+#define F_X_REFCLK_PD V_X_REFCLK_PD(1U)
+
+#define A_T3DBG_PCIE_PMA_BSPIN_CFG 0xfc
+
+#define S_BSMODEQUAD1 31
+#define V_BSMODEQUAD1(x) ((x) << S_BSMODEQUAD1)
+#define F_BSMODEQUAD1 V_BSMODEQUAD1(1U)
+
+#define S_BSINSELLANE7 29
+#define M_BSINSELLANE7 0x3
+#define V_BSINSELLANE7(x) ((x) << S_BSINSELLANE7)
+#define G_BSINSELLANE7(x) (((x) >> S_BSINSELLANE7) & M_BSINSELLANE7)
+
+#define S_BSENLANE7 28
+#define V_BSENLANE7(x) ((x) << S_BSENLANE7)
+#define F_BSENLANE7 V_BSENLANE7(1U)
+
+#define S_BSINSELLANE6 25
+#define M_BSINSELLANE6 0x3
+#define V_BSINSELLANE6(x) ((x) << S_BSINSELLANE6)
+#define G_BSINSELLANE6(x) (((x) >> S_BSINSELLANE6) & M_BSINSELLANE6)
+
+#define S_BSENLANE6 24
+#define V_BSENLANE6(x) ((x) << S_BSENLANE6)
+#define F_BSENLANE6 V_BSENLANE6(1U)
+
+#define S_BSINSELLANE5 21
+#define M_BSINSELLANE5 0x3
+#define V_BSINSELLANE5(x) ((x) << S_BSINSELLANE5)
+#define G_BSINSELLANE5(x) (((x) >> S_BSINSELLANE5) & M_BSINSELLANE5)
+
+#define S_BSENLANE5 20
+#define V_BSENLANE5(x) ((x) << S_BSENLANE5)
+#define F_BSENLANE5 V_BSENLANE5(1U)
+
+#define S_BSINSELLANE4 17
+#define M_BSINSELLANE4 0x3
+#define V_BSINSELLANE4(x) ((x) << S_BSINSELLANE4)
+#define G_BSINSELLANE4(x) (((x) >> S_BSINSELLANE4) & M_BSINSELLANE4)
+
+#define S_BSENLANE4 16
+#define V_BSENLANE4(x) ((x) << S_BSENLANE4)
+#define F_BSENLANE4 V_BSENLANE4(1U)
+
+#define S_BSMODEQUAD0 15
+#define V_BSMODEQUAD0(x) ((x) << S_BSMODEQUAD0)
+#define F_BSMODEQUAD0 V_BSMODEQUAD0(1U)
+
+#define S_BSINSELLANE3 13
+#define M_BSINSELLANE3 0x3
+#define V_BSINSELLANE3(x) ((x) << S_BSINSELLANE3)
+#define G_BSINSELLANE3(x) (((x) >> S_BSINSELLANE3) & M_BSINSELLANE3)
+
+#define S_BSENLANE3 12
+#define V_BSENLANE3(x) ((x) << S_BSENLANE3)
+#define F_BSENLANE3 V_BSENLANE3(1U)
+
+#define S_BSINSELLANE2 9
+#define M_BSINSELLANE2 0x3
+#define V_BSINSELLANE2(x) ((x) << S_BSINSELLANE2)
+#define G_BSINSELLANE2(x) (((x) >> S_BSINSELLANE2) & M_BSINSELLANE2)
+
+#define S_BSENLANE2 8
+#define V_BSENLANE2(x) ((x) << S_BSENLANE2)
+#define F_BSENLANE2 V_BSENLANE2(1U)
+
+#define S_BSINSELLANE1 5
+#define M_BSINSELLANE1 0x3
+#define V_BSINSELLANE1(x) ((x) << S_BSINSELLANE1)
+#define G_BSINSELLANE1(x) (((x) >> S_BSINSELLANE1) & M_BSINSELLANE1)
+
+#define S_BSENLANE1 4
+#define V_BSENLANE1(x) ((x) << S_BSENLANE1)
+#define F_BSENLANE1 V_BSENLANE1(1U)
+
+#define S_BSINSELLANE0 1
+#define M_BSINSELLANE0 0x3
+#define V_BSINSELLANE0(x) ((x) << S_BSINSELLANE0)
+#define G_BSINSELLANE0(x) (((x) >> S_BSINSELLANE0) & M_BSINSELLANE0)
+
+#define S_BSENLANE0 0
+#define V_BSENLANE0(x) ((x) << S_BSENLANE0)
+#define F_BSENLANE0 V_BSENLANE0(1U)
+
+/* registers for module MC7_PMRX */
#define MC7_PMRX_BASE_ADDR 0x100
#define A_MC7_CFG 0x100
+#define S_IMPSETUPDATE 14
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
#define S_IFEN 13
#define V_IFEN(x) ((x) << S_IFEN)
#define F_IFEN V_IFEN(1U)
+#define S_TERM300 12
+#define V_TERM300(x) ((x) << S_TERM300)
+#define F_TERM300 V_TERM300(1U)
+
#define S_TERM150 11
#define V_TERM150(x) ((x) << S_TERM150)
#define F_TERM150 V_TERM150(1U)
#define V_WIDTH(x) ((x) << S_WIDTH)
#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+#define S_ODTEN 7
+#define V_ODTEN(x) ((x) << S_ODTEN)
+#define F_ODTEN V_ODTEN(1U)
+
#define S_BKS 6
#define V_BKS(x) ((x) << S_BKS)
#define F_BKS V_BKS(1U)
#define A_MC7_MODE 0x104
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
+#define S_MODE 0
+#define M_MODE 0xffff
+#define V_MODE(x) ((x) << S_MODE)
+#define G_MODE(x) (((x) >> S_MODE) & M_MODE)
#define A_MC7_EXT_MODE1 0x108
+#define S_OCDADJUSTMODE 20
+#define V_OCDADJUSTMODE(x) ((x) << S_OCDADJUSTMODE)
+#define F_OCDADJUSTMODE V_OCDADJUSTMODE(1U)
+
+#define S_OCDCODE 16
+#define M_OCDCODE 0xf
+#define V_OCDCODE(x) ((x) << S_OCDCODE)
+#define G_OCDCODE(x) (((x) >> S_OCDCODE) & M_OCDCODE)
+
+#define S_EXTMODE1 0
+#define M_EXTMODE1 0xffff
+#define V_EXTMODE1(x) ((x) << S_EXTMODE1)
+#define G_EXTMODE1(x) (((x) >> S_EXTMODE1) & M_EXTMODE1)
+
#define A_MC7_EXT_MODE2 0x10c
+#define S_EXTMODE2 0
+#define M_EXTMODE2 0xffff
+#define V_EXTMODE2(x) ((x) << S_EXTMODE2)
+#define G_EXTMODE2(x) (((x) >> S_EXTMODE2) & M_EXTMODE2)
+
#define A_MC7_EXT_MODE3 0x110
-#define A_MC7_PRE 0x114
+#define S_EXTMODE3 0
+#define M_EXTMODE3 0xffff
+#define V_EXTMODE3(x) ((x) << S_EXTMODE3)
+#define G_EXTMODE3(x) (((x) >> S_EXTMODE3) & M_EXTMODE3)
+#define A_MC7_PRE 0x114
#define A_MC7_REF 0x118
#define S_PREREFDIV 1
#define M_PREREFDIV 0x3fff
#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+#define G_PREREFDIV(x) (((x) >> S_PREREFDIV) & M_PREREFDIV)
#define S_PERREFEN 0
#define V_PERREFEN(x) ((x) << S_PERREFEN)
#define A_MC7_DLL 0x11c
+#define S_DLLLOCK 31
+#define V_DLLLOCK(x) ((x) << S_DLLLOCK)
+#define F_DLLLOCK V_DLLLOCK(1U)
+
+#define S_DLLDELTA 24
+#define M_DLLDELTA 0x7f
+#define V_DLLDELTA(x) ((x) << S_DLLDELTA)
+#define G_DLLDELTA(x) (((x) >> S_DLLDELTA) & M_DLLDELTA)
+
+#define S_MANDELTA 3
+#define M_MANDELTA 0x7f
+#define V_MANDELTA(x) ((x) << S_MANDELTA)
+#define G_MANDELTA(x) (((x) >> S_MANDELTA) & M_MANDELTA)
+
+#define S_DLLDELTASEL 2
+#define V_DLLDELTASEL(x) ((x) << S_DLLDELTASEL)
+#define F_DLLDELTASEL V_DLLDELTASEL(1U)
+
#define S_DLLENB 1
#define V_DLLENB(x) ((x) << S_DLLENB)
#define F_DLLENB V_DLLENB(1U)
#define S_ACTTOPREDLY 26
#define M_ACTTOPREDLY 0xf
#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+#define G_ACTTOPREDLY(x) (((x) >> S_ACTTOPREDLY) & M_ACTTOPREDLY)
#define S_ACTTORDWRDLY 23
#define M_ACTTORDWRDLY 0x7
#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+#define G_ACTTORDWRDLY(x) (((x) >> S_ACTTORDWRDLY) & M_ACTTORDWRDLY)
#define S_PRECYC 20
#define M_PRECYC 0x7
#define V_PRECYC(x) ((x) << S_PRECYC)
+#define G_PRECYC(x) (((x) >> S_PRECYC) & M_PRECYC)
#define S_REFCYC 13
#define M_REFCYC 0x7f
#define V_REFCYC(x) ((x) << S_REFCYC)
+#define G_REFCYC(x) (((x) >> S_REFCYC) & M_REFCYC)
#define S_BKCYC 8
#define M_BKCYC 0x1f
#define V_BKCYC(x) ((x) << S_BKCYC)
+#define G_BKCYC(x) (((x) >> S_BKCYC) & M_BKCYC)
#define S_WRTORDDLY 4
#define M_WRTORDDLY 0xf
#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+#define G_WRTORDDLY(x) (((x) >> S_WRTORDDLY) & M_WRTORDDLY)
#define S_RDTOWRDLY 0
#define M_RDTOWRDLY 0xf
#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+#define G_RDTOWRDLY(x) (((x) >> S_RDTOWRDLY) & M_RDTOWRDLY)
-#define A_MC7_CAL 0x128
+#define A_MC7_HWM_WRR 0x124
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
+#define S_MEM_HWM 26
+#define M_MEM_HWM 0x3f
+#define V_MEM_HWM(x) ((x) << S_MEM_HWM)
+#define G_MEM_HWM(x) (((x) >> S_MEM_HWM) & M_MEM_HWM)
+
+#define S_ULP_HWM 22
+#define M_ULP_HWM 0xf
+#define V_ULP_HWM(x) ((x) << S_ULP_HWM)
+#define G_ULP_HWM(x) (((x) >> S_ULP_HWM) & M_ULP_HWM)
+
+#define S_TOT_RLD_WT 14
+#define M_TOT_RLD_WT 0xff
+#define V_TOT_RLD_WT(x) ((x) << S_TOT_RLD_WT)
+#define G_TOT_RLD_WT(x) (((x) >> S_TOT_RLD_WT) & M_TOT_RLD_WT)
+
+#define S_MEM_RLD_WT 7
+#define M_MEM_RLD_WT 0x7f
+#define V_MEM_RLD_WT(x) ((x) << S_MEM_RLD_WT)
+#define G_MEM_RLD_WT(x) (((x) >> S_MEM_RLD_WT) & M_MEM_RLD_WT)
+
+#define S_ULP_RLD_WT 0
+#define M_ULP_RLD_WT 0x7f
+#define V_ULP_RLD_WT(x) ((x) << S_ULP_RLD_WT)
+#define G_ULP_RLD_WT(x) (((x) >> S_ULP_RLD_WT) & M_ULP_RLD_WT)
+
+#define A_MC7_CAL 0x128
#define S_BUSY 31
#define V_BUSY(x) ((x) << S_BUSY)
#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
#define F_CAL_FAULT V_CAL_FAULT(1U)
+#define S_PER_CAL_DIV 22
+#define M_PER_CAL_DIV 0xff
+#define V_PER_CAL_DIV(x) ((x) << S_PER_CAL_DIV)
+#define G_PER_CAL_DIV(x) (((x) >> S_PER_CAL_DIV) & M_PER_CAL_DIV)
+
+#define S_PER_CAL_EN 21
+#define V_PER_CAL_EN(x) ((x) << S_PER_CAL_EN)
+#define F_PER_CAL_EN V_PER_CAL_EN(1U)
+
#define S_SGL_CAL_EN 20
#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+#define S_IMP_UPD_MODE 19
+#define V_IMP_UPD_MODE(x) ((x) << S_IMP_UPD_MODE)
+#define F_IMP_UPD_MODE V_IMP_UPD_MODE(1U)
+
+#define S_IMP_SEL 18
+#define V_IMP_SEL(x) ((x) << S_IMP_SEL)
+#define F_IMP_SEL V_IMP_SEL(1U)
+
+#define S_IMP_MAN_PD 15
+#define M_IMP_MAN_PD 0x7
+#define V_IMP_MAN_PD(x) ((x) << S_IMP_MAN_PD)
+#define G_IMP_MAN_PD(x) (((x) >> S_IMP_MAN_PD) & M_IMP_MAN_PD)
+
+#define S_IMP_MAN_PU 12
+#define M_IMP_MAN_PU 0x7
+#define V_IMP_MAN_PU(x) ((x) << S_IMP_MAN_PU)
+#define G_IMP_MAN_PU(x) (((x) >> S_IMP_MAN_PU) & M_IMP_MAN_PU)
+
+#define S_IMP_CAL_PD 9
+#define M_IMP_CAL_PD 0x7
+#define V_IMP_CAL_PD(x) ((x) << S_IMP_CAL_PD)
+#define G_IMP_CAL_PD(x) (((x) >> S_IMP_CAL_PD) & M_IMP_CAL_PD)
+
+#define S_IMP_CAL_PU 6
+#define M_IMP_CAL_PU 0x7
+#define V_IMP_CAL_PU(x) ((x) << S_IMP_CAL_PU)
+#define G_IMP_CAL_PU(x) (((x) >> S_IMP_CAL_PU) & M_IMP_CAL_PU)
+
+#define S_IMP_SET_PD 3
+#define M_IMP_SET_PD 0x7
+#define V_IMP_SET_PD(x) ((x) << S_IMP_SET_PD)
+#define G_IMP_SET_PD(x) (((x) >> S_IMP_SET_PD) & M_IMP_SET_PD)
+
+#define S_IMP_SET_PU 0
+#define M_IMP_SET_PU 0x7
+#define V_IMP_SET_PU(x) ((x) << S_IMP_SET_PU)
+#define G_IMP_SET_PU(x) (((x) >> S_IMP_SET_PU) & M_IMP_SET_PU)
+
#define A_MC7_ERR_ADDR 0x12c
+#define S_ERRADDRESS 3
+#define M_ERRADDRESS 0x1fffffff
+#define V_ERRADDRESS(x) ((x) << S_ERRADDRESS)
+#define G_ERRADDRESS(x) (((x) >> S_ERRADDRESS) & M_ERRADDRESS)
+
+#define S_ERRAGENT 1
+#define M_ERRAGENT 0x3
+#define V_ERRAGENT(x) ((x) << S_ERRAGENT)
+#define G_ERRAGENT(x) (((x) >> S_ERRAGENT) & M_ERRAGENT)
+
+#define S_ERROP 0
+#define V_ERROP(x) ((x) << S_ERROP)
+#define F_ERROP V_ERROP(1U)
+
#define A_MC7_ECC 0x130
+#define S_UECNT 10
+#define M_UECNT 0xff
+#define V_UECNT(x) ((x) << S_UECNT)
+#define G_UECNT(x) (((x) >> S_UECNT) & M_UECNT)
+
+#define S_CECNT 2
+#define M_CECNT 0xff
+#define V_CECNT(x) ((x) << S_CECNT)
+#define G_CECNT(x) (((x) >> S_CECNT) & M_CECNT)
+
#define S_ECCCHKEN 1
#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
#define F_ECCCHKEN V_ECCCHKEN(1U)
#define F_ECCGENEN V_ECCGENEN(1U)
#define A_MC7_CE_ADDR 0x134
-
#define A_MC7_CE_DATA0 0x138
-
#define A_MC7_CE_DATA1 0x13c
-
#define A_MC7_CE_DATA2 0x140
#define S_DATA 0
#define M_DATA 0xff
-
+#define V_DATA(x) ((x) << S_DATA)
#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
#define A_MC7_UE_ADDR 0x144
-
#define A_MC7_UE_DATA0 0x148
-
#define A_MC7_UE_DATA1 0x14c
-
#define A_MC7_UE_DATA2 0x150
-
#define A_MC7_BD_ADDR 0x154
#define S_ADDR 3
-
#define M_ADDR 0x1fffffff
+#define V_ADDR(x) ((x) << S_ADDR)
+#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
#define A_MC7_BD_DATA0 0x158
-
#define A_MC7_BD_DATA1 0x15c
-
+#define A_MC7_BD_DATA2 0x160
#define A_MC7_BD_OP 0x164
#define S_OP 0
-
#define V_OP(x) ((x) << S_OP)
#define F_OP V_OP(1U)
-#define F_OP V_OP(1U)
-#define A_SF_OP 0x6dc
-
#define A_MC7_BIST_ADDR_BEG 0x168
+#define S_ADDRBEG 5
+#define M_ADDRBEG 0x7ffffff
+#define V_ADDRBEG(x) ((x) << S_ADDRBEG)
+#define G_ADDRBEG(x) (((x) >> S_ADDRBEG) & M_ADDRBEG)
+
#define A_MC7_BIST_ADDR_END 0x16c
-#define A_MC7_BIST_DATA 0x170
+#define S_ADDREND 5
+#define M_ADDREND 0x7ffffff
+#define V_ADDREND(x) ((x) << S_ADDREND)
+#define G_ADDREND(x) (((x) >> S_ADDREND) & M_ADDREND)
+#define A_MC7_BIST_DATA 0x170
#define A_MC7_BIST_OP 0x174
+#define S_GAP 4
+#define M_GAP 0x1f
+#define V_GAP(x) ((x) << S_GAP)
+#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
+
#define S_CONT 3
#define V_CONT(x) ((x) << S_CONT)
#define F_CONT V_CONT(1U)
-#define F_CONT V_CONT(1U)
+#define S_DATAPAT 1
+#define M_DATAPAT 0x3
+#define V_DATAPAT(x) ((x) << S_DATAPAT)
+#define G_DATAPAT(x) (((x) >> S_DATAPAT) & M_DATAPAT)
#define A_MC7_INT_ENABLE 0x178
#define S_PE 2
#define M_PE 0x7fff
-
#define V_PE(x) ((x) << S_PE)
-
#define G_PE(x) (((x) >> S_PE) & M_PE)
#define S_UE 1
#define A_MC7_INT_CAUSE 0x17c
+/* registers for module MC7_PMTX */
#define MC7_PMTX_BASE_ADDR 0x180
+/* registers for module MC7_CM */
#define MC7_CM_BASE_ADDR 0x200
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x280
+
#define A_CIM_BOOT_CFG 0x280
#define S_BOOTADDR 2
#define M_BOOTADDR 0x3fffffff
#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+#define G_BOOTADDR(x) (((x) >> S_BOOTADDR) & M_BOOTADDR)
+
+#define S_BOOTSDRAM 1
+#define V_BOOTSDRAM(x) ((x) << S_BOOTSDRAM)
+#define F_BOOTSDRAM V_BOOTSDRAM(1U)
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+#define A_CIM_FLASH_BASE_ADDR 0x284
+
+#define S_FLASHBASEADDR 2
+#define M_FLASHBASEADDR 0x3fffff
+#define V_FLASHBASEADDR(x) ((x) << S_FLASHBASEADDR)
+#define G_FLASHBASEADDR(x) (((x) >> S_FLASHBASEADDR) & M_FLASHBASEADDR)
+
+#define A_CIM_FLASH_ADDR_SIZE 0x288
+
+#define S_FLASHADDRSIZE 2
+#define M_FLASHADDRSIZE 0x3fffff
+#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
+#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
#define A_CIM_SDRAM_BASE_ADDR 0x28c
+#define S_SDRAMBASEADDR 2
+#define M_SDRAMBASEADDR 0x3fffffff
+#define V_SDRAMBASEADDR(x) ((x) << S_SDRAMBASEADDR)
+#define G_SDRAMBASEADDR(x) (((x) >> S_SDRAMBASEADDR) & M_SDRAMBASEADDR)
+
#define A_CIM_SDRAM_ADDR_SIZE 0x290
+#define S_SDRAMADDRSIZE 2
+#define M_SDRAMADDRSIZE 0x3fffffff
+#define V_SDRAMADDRSIZE(x) ((x) << S_SDRAMADDRSIZE)
+#define G_SDRAMADDRSIZE(x) (((x) >> S_SDRAMADDRSIZE) & M_SDRAMADDRSIZE)
+
+#define A_CIM_UP_SPARE_INT 0x294
+
+#define S_UPSPAREINT 0
+#define M_UPSPAREINT 0x7
+#define V_UPSPAREINT(x) ((x) << S_UPSPAREINT)
+#define G_UPSPAREINT(x) (((x) >> S_UPSPAREINT) & M_UPSPAREINT)
+
#define A_CIM_HOST_INT_ENABLE 0x298
#define S_DTAGPARERR 28
#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
#define F_DRAMPARERR V_DRAMPARERR(1U)
+#define S_TIMER1INTEN 15
+#define V_TIMER1INTEN(x) ((x) << S_TIMER1INTEN)
+#define F_TIMER1INTEN V_TIMER1INTEN(1U)
+
+#define S_TIMER0INTEN 14
+#define V_TIMER0INTEN(x) ((x) << S_TIMER0INTEN)
+#define F_TIMER0INTEN V_TIMER0INTEN(1U)
+
+#define S_PREFDROPINTEN 13
+#define V_PREFDROPINTEN(x) ((x) << S_PREFDROPINTEN)
+#define F_PREFDROPINTEN V_PREFDROPINTEN(1U)
+
+#define S_BLKWRPLINTEN 12
+#define V_BLKWRPLINTEN(x) ((x) << S_BLKWRPLINTEN)
+#define F_BLKWRPLINTEN V_BLKWRPLINTEN(1U)
+
+#define S_BLKRDPLINTEN 11
+#define V_BLKRDPLINTEN(x) ((x) << S_BLKRDPLINTEN)
+#define F_BLKRDPLINTEN V_BLKRDPLINTEN(1U)
+
+#define S_BLKWRCTLINTEN 10
+#define V_BLKWRCTLINTEN(x) ((x) << S_BLKWRCTLINTEN)
+#define F_BLKWRCTLINTEN V_BLKWRCTLINTEN(1U)
+
+#define S_BLKRDCTLINTEN 9
+#define V_BLKRDCTLINTEN(x) ((x) << S_BLKRDCTLINTEN)
+#define F_BLKRDCTLINTEN V_BLKRDCTLINTEN(1U)
+
+#define S_BLKWRFLASHINTEN 8
+#define V_BLKWRFLASHINTEN(x) ((x) << S_BLKWRFLASHINTEN)
+#define F_BLKWRFLASHINTEN V_BLKWRFLASHINTEN(1U)
+
+#define S_BLKRDFLASHINTEN 7
+#define V_BLKRDFLASHINTEN(x) ((x) << S_BLKRDFLASHINTEN)
+#define F_BLKRDFLASHINTEN V_BLKRDFLASHINTEN(1U)
+
+#define S_SGLWRFLASHINTEN 6
+#define V_SGLWRFLASHINTEN(x) ((x) << S_SGLWRFLASHINTEN)
+#define F_SGLWRFLASHINTEN V_SGLWRFLASHINTEN(1U)
+
+#define S_WRBLKFLASHINTEN 5
+#define V_WRBLKFLASHINTEN(x) ((x) << S_WRBLKFLASHINTEN)
+#define F_WRBLKFLASHINTEN V_WRBLKFLASHINTEN(1U)
+
+#define S_BLKWRBOOTINTEN 4
+#define V_BLKWRBOOTINTEN(x) ((x) << S_BLKWRBOOTINTEN)
+#define F_BLKWRBOOTINTEN V_BLKWRBOOTINTEN(1U)
+
+#define S_BLKRDBOOTINTEN 3
+#define V_BLKRDBOOTINTEN(x) ((x) << S_BLKRDBOOTINTEN)
+#define F_BLKRDBOOTINTEN V_BLKRDBOOTINTEN(1U)
+
+#define S_FLASHRANGEINTEN 2
+#define V_FLASHRANGEINTEN(x) ((x) << S_FLASHRANGEINTEN)
+#define F_FLASHRANGEINTEN V_FLASHRANGEINTEN(1U)
+
+#define S_SDRAMRANGEINTEN 1
+#define V_SDRAMRANGEINTEN(x) ((x) << S_SDRAMRANGEINTEN)
+#define F_SDRAMRANGEINTEN V_SDRAMRANGEINTEN(1U)
+
+#define S_RSVDSPACEINTEN 0
+#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
+#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x29c
+#define S_TIMER1INT 15
+#define V_TIMER1INT(x) ((x) << S_TIMER1INT)
+#define F_TIMER1INT V_TIMER1INT(1U)
+
+#define S_TIMER0INT 14
+#define V_TIMER0INT(x) ((x) << S_TIMER0INT)
+#define F_TIMER0INT V_TIMER0INT(1U)
+
+#define S_PREFDROPINT 13
+#define V_PREFDROPINT(x) ((x) << S_PREFDROPINT)
+#define F_PREFDROPINT V_PREFDROPINT(1U)
+
#define S_BLKWRPLINT 12
#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
#define F_BLKWRPLINT V_BLKWRPLINT(1U)
#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
+#define S_BLKRDBOOTINT 3
+#define V_BLKRDBOOTINT(x) ((x) << S_BLKRDBOOTINT)
+#define F_BLKRDBOOTINT V_BLKRDBOOTINT(1U)
+
#define S_FLASHRANGEINT 2
#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+#define A_CIM_UP_INT_ENABLE 0x2a0
+
+#define S_MSTPLINTEN 16
+#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
+#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+
+#define A_CIM_UP_INT_CAUSE 0x2a4
+
+#define S_MSTPLINT 16
+#define V_MSTPLINT(x) ((x) << S_MSTPLINT)
+#define F_MSTPLINT V_MSTPLINT(1U)
+
+#define A_CIM_IBQ_FULLA_THRSH 0x2a8
+
+#define S_IBQ0FULLTHRSH 0
+#define M_IBQ0FULLTHRSH 0x1ff
+#define V_IBQ0FULLTHRSH(x) ((x) << S_IBQ0FULLTHRSH)
+#define G_IBQ0FULLTHRSH(x) (((x) >> S_IBQ0FULLTHRSH) & M_IBQ0FULLTHRSH)
+
+#define S_IBQ1FULLTHRSH 16
+#define M_IBQ1FULLTHRSH 0x1ff
+#define V_IBQ1FULLTHRSH(x) ((x) << S_IBQ1FULLTHRSH)
+#define G_IBQ1FULLTHRSH(x) (((x) >> S_IBQ1FULLTHRSH) & M_IBQ1FULLTHRSH)
+
+#define A_CIM_IBQ_FULLB_THRSH 0x2ac
+
+#define S_IBQ2FULLTHRSH 0
+#define M_IBQ2FULLTHRSH 0x1ff
+#define V_IBQ2FULLTHRSH(x) ((x) << S_IBQ2FULLTHRSH)
+#define G_IBQ2FULLTHRSH(x) (((x) >> S_IBQ2FULLTHRSH) & M_IBQ2FULLTHRSH)
+
+#define S_IBQ3FULLTHRSH 16
+#define M_IBQ3FULLTHRSH 0x1ff
+#define V_IBQ3FULLTHRSH(x) ((x) << S_IBQ3FULLTHRSH)
+#define G_IBQ3FULLTHRSH(x) (((x) >> S_IBQ3FULLTHRSH) & M_IBQ3FULLTHRSH)
+
#define A_CIM_HOST_ACC_CTRL 0x2b0
#define S_HOSTBUSY 17
#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
#define F_HOSTBUSY V_HOSTBUSY(1U)
-#define A_CIM_HOST_ACC_DATA 0x2b4
+#define S_HOSTWRITE 16
+#define V_HOSTWRITE(x) ((x) << S_HOSTWRITE)
+#define F_HOSTWRITE V_HOSTWRITE(1U)
+#define S_HOSTADDR 0
+#define M_HOSTADDR 0xffff
+#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
+#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
#define A_CIM_IBQ_DBG_CFG 0x2c0
#define S_IBQDBGADDR 16
#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
#define F_IBQDBGEN V_IBQDBGEN(1U)
+#define A_CIM_OBQ_DBG_CFG 0x2c4
+
+#define S_OBQDBGADDR 16
+#define M_OBQDBGADDR 0x1ff
+#define V_OBQDBGADDR(x) ((x) << S_OBQDBGADDR)
+#define G_OBQDBGADDR(x) (((x) >> S_OBQDBGADDR) & M_OBQDBGADDR)
+
+#define S_OBQDBGQID 3
+#define M_OBQDBGQID 0x3
+#define V_OBQDBGQID(x) ((x) << S_OBQDBGQID)
+#define G_OBQDBGQID(x) (((x) >> S_OBQDBGQID) & M_OBQDBGQID)
+
+#define S_OBQDBGWR 2
+#define V_OBQDBGWR(x) ((x) << S_OBQDBGWR)
+#define F_OBQDBGWR V_OBQDBGWR(1U)
+
+#define S_OBQDBGBUSY 1
+#define V_OBQDBGBUSY(x) ((x) << S_OBQDBGBUSY)
+#define F_OBQDBGBUSY V_OBQDBGBUSY(1U)
+
+#define S_OBQDBGEN 0
+#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
+#define F_OBQDBGEN V_OBQDBGEN(1U)
+
#define A_CIM_IBQ_DBG_DATA 0x2c8
+#define A_CIM_OBQ_DBG_DATA 0x2cc
+#define A_CIM_CDEBUGDATA 0x2d0
+
+#define S_CDEBUGDATAH 16
+#define M_CDEBUGDATAH 0xffff
+#define V_CDEBUGDATAH(x) ((x) << S_CDEBUGDATAH)
+#define G_CDEBUGDATAH(x) (((x) >> S_CDEBUGDATAH) & M_CDEBUGDATAH)
+
+#define S_CDEBUGDATAL 0
+#define M_CDEBUGDATAL 0xffff
+#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
+#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+
+#define A_CIM_DEBUGCFG 0x2e0
+
+#define S_POLADBGRDPTR 23
+#define M_POLADBGRDPTR 0x1ff
+#define V_POLADBGRDPTR(x) ((x) << S_POLADBGRDPTR)
+#define G_POLADBGRDPTR(x) (((x) >> S_POLADBGRDPTR) & M_POLADBGRDPTR)
+
+#define S_PILADBGRDPTR 14
+#define M_PILADBGRDPTR 0x1ff
+#define V_PILADBGRDPTR(x) ((x) << S_PILADBGRDPTR)
+#define G_PILADBGRDPTR(x) (((x) >> S_PILADBGRDPTR) & M_PILADBGRDPTR)
+
+#define S_CIM_LADBGEN 12
+#define V_CIM_LADBGEN(x) ((x) << S_CIM_LADBGEN)
+#define F_CIM_LADBGEN V_CIM_LADBGEN(1U)
+
+#define S_DEBUGSELHI 5
+#define M_DEBUGSELHI 0x1f
+#define V_DEBUGSELHI(x) ((x) << S_DEBUGSELHI)
+#define G_DEBUGSELHI(x) (((x) >> S_DEBUGSELHI) & M_DEBUGSELHI)
+
+#define S_DEBUGSELLO 0
+#define M_DEBUGSELLO 0x1f
+#define V_DEBUGSELLO(x) ((x) << S_DEBUGSELLO)
+#define G_DEBUGSELLO(x) (((x) >> S_DEBUGSELLO) & M_DEBUGSELLO)
+
+#define A_CIM_DEBUGSTS 0x2e4
+
+#define S_POLADBGWRPTR 16
+#define M_POLADBGWRPTR 0x1ff
+#define V_POLADBGWRPTR(x) ((x) << S_POLADBGWRPTR)
+#define G_POLADBGWRPTR(x) (((x) >> S_POLADBGWRPTR) & M_POLADBGWRPTR)
+
+#define S_PILADBGWRPTR 0
+#define M_PILADBGWRPTR 0x1ff
+#define V_PILADBGWRPTR(x) ((x) << S_PILADBGWRPTR)
+#define G_PILADBGWRPTR(x) (((x) >> S_PILADBGWRPTR) & M_PILADBGWRPTR)
+
+#define A_CIM_PO_LA_DEBUGDATA 0x2e8
+#define A_CIM_PI_LA_DEBUGDATA 0x2ec
+
+/* registers for module TP1 */
+#define TP1_BASE_ADDR 0x300
#define A_TP_IN_CONFIG 0x300
#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
+#define S_DBMAXOPCNT 16
+#define M_DBMAXOPCNT 0xff
+#define V_DBMAXOPCNT(x) ((x) << S_DBMAXOPCNT)
+#define G_DBMAXOPCNT(x) (((x) >> S_DBMAXOPCNT) & M_DBMAXOPCNT)
+
+#define S_IPV6ENABLE 15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+
#define S_NICMODE 14
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
-#define F_NICMODE V_NICMODE(1U)
+#define S_ECHECKSUMCHECKTCP 13
+#define V_ECHECKSUMCHECKTCP(x) ((x) << S_ECHECKSUMCHECKTCP)
+#define F_ECHECKSUMCHECKTCP V_ECHECKSUMCHECKTCP(1U)
-#define S_IPV6ENABLE 15
-#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
-#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+#define S_ECHECKSUMCHECKIP 12
+#define V_ECHECKSUMCHECKIP(x) ((x) << S_ECHECKSUMCHECKIP)
+#define F_ECHECKSUMCHECKIP V_ECHECKSUMCHECKIP(1U)
+
+#define S_ECPL 10
+#define V_ECPL(x) ((x) << S_ECPL)
+#define F_ECPL V_ECPL(1U)
+
+#define S_EETHERNET 8
+#define V_EETHERNET(x) ((x) << S_EETHERNET)
+#define F_EETHERNET V_EETHERNET(1U)
+
+#define S_ETUNNEL 7
+#define V_ETUNNEL(x) ((x) << S_ETUNNEL)
+#define F_ETUNNEL V_ETUNNEL(1U)
+
+#define S_CCHECKSUMCHECKTCP 6
+#define V_CCHECKSUMCHECKTCP(x) ((x) << S_CCHECKSUMCHECKTCP)
+#define F_CCHECKSUMCHECKTCP V_CCHECKSUMCHECKTCP(1U)
+
+#define S_CCHECKSUMCHECKIP 5
+#define V_CCHECKSUMCHECKIP(x) ((x) << S_CCHECKSUMCHECKIP)
+#define F_CCHECKSUMCHECKIP V_CCHECKSUMCHECKIP(1U)
+
+#define S_CCPL 3
+#define V_CCPL(x) ((x) << S_CCPL)
+#define F_CCPL V_CCPL(1U)
+
+#define S_CETHERNET 1
+#define V_CETHERNET(x) ((x) << S_CETHERNET)
+#define F_CETHERNET V_CETHERNET(1U)
+
+#define S_CTUNNEL 0
+#define V_CTUNNEL(x) ((x) << S_CTUNNEL)
+#define F_CTUNNEL V_CTUNNEL(1U)
#define A_TP_OUT_CONFIG 0x304
+#define S_IPIDSPLITMODE 16
+#define V_IPIDSPLITMODE(x) ((x) << S_IPIDSPLITMODE)
+#define F_IPIDSPLITMODE V_IPIDSPLITMODE(1U)
+
+#define S_VLANEXTRACTIONENABLE2NDPORT 13
+#define V_VLANEXTRACTIONENABLE2NDPORT(x) ((x) << S_VLANEXTRACTIONENABLE2NDPORT)
+#define F_VLANEXTRACTIONENABLE2NDPORT V_VLANEXTRACTIONENABLE2NDPORT(1U)
+
#define S_VLANEXTRACTIONENABLE 12
+#define V_VLANEXTRACTIONENABLE(x) ((x) << S_VLANEXTRACTIONENABLE)
+#define F_VLANEXTRACTIONENABLE V_VLANEXTRACTIONENABLE(1U)
+
+#define S_ECHECKSUMGENERATETCP 11
+#define V_ECHECKSUMGENERATETCP(x) ((x) << S_ECHECKSUMGENERATETCP)
+#define F_ECHECKSUMGENERATETCP V_ECHECKSUMGENERATETCP(1U)
+
+#define S_ECHECKSUMGENERATEIP 10
+#define V_ECHECKSUMGENERATEIP(x) ((x) << S_ECHECKSUMGENERATEIP)
+#define F_ECHECKSUMGENERATEIP V_ECHECKSUMGENERATEIP(1U)
+
+#define S_OUT_ECPL 8
+#define V_OUT_ECPL(x) ((x) << S_OUT_ECPL)
+#define F_OUT_ECPL V_OUT_ECPL(1U)
+
+#define S_OUT_EETHERNET 6
+#define V_OUT_EETHERNET(x) ((x) << S_OUT_EETHERNET)
+#define F_OUT_EETHERNET V_OUT_EETHERNET(1U)
+
+#define S_CCHECKSUMGENERATETCP 5
+#define V_CCHECKSUMGENERATETCP(x) ((x) << S_CCHECKSUMGENERATETCP)
+#define F_CCHECKSUMGENERATETCP V_CCHECKSUMGENERATETCP(1U)
+
+#define S_CCHECKSUMGENERATEIP 4
+#define V_CCHECKSUMGENERATEIP(x) ((x) << S_CCHECKSUMGENERATEIP)
+#define F_CCHECKSUMGENERATEIP V_CCHECKSUMGENERATEIP(1U)
+
+#define S_OUT_CCPL 2
+#define V_OUT_CCPL(x) ((x) << S_OUT_CCPL)
+#define F_OUT_CCPL V_OUT_CCPL(1U)
+
+#define S_OUT_CETHERNET 0
+#define V_OUT_CETHERNET(x) ((x) << S_OUT_CETHERNET)
+#define F_OUT_CETHERNET V_OUT_CETHERNET(1U)
#define A_TP_GLOBAL_CONFIG 0x308
+#define S_SYNCOOKIEPARAMS 26
+#define M_SYNCOOKIEPARAMS 0x3f
+#define V_SYNCOOKIEPARAMS(x) ((x) << S_SYNCOOKIEPARAMS)
+#define G_SYNCOOKIEPARAMS(x) (((x) >> S_SYNCOOKIEPARAMS) & M_SYNCOOKIEPARAMS)
+
+#define S_RXFLOWCONTROLDISABLE 25
+#define V_RXFLOWCONTROLDISABLE(x) ((x) << S_RXFLOWCONTROLDISABLE)
+#define F_RXFLOWCONTROLDISABLE V_RXFLOWCONTROLDISABLE(1U)
+
#define S_TXPACINGENABLE 24
#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
+#define S_ATTACKFILTERENABLE 23
+#define V_ATTACKFILTERENABLE(x) ((x) << S_ATTACKFILTERENABLE)
+#define F_ATTACKFILTERENABLE V_ATTACKFILTERENABLE(1U)
+
+#define S_SYNCOOKIENOOPTIONS 22
+#define V_SYNCOOKIENOOPTIONS(x) ((x) << S_SYNCOOKIENOOPTIONS)
+#define F_SYNCOOKIENOOPTIONS V_SYNCOOKIENOOPTIONS(1U)
+
+#define S_PROTECTEDMODE 21
+#define V_PROTECTEDMODE(x) ((x) << S_PROTECTEDMODE)
+#define F_PROTECTEDMODE V_PROTECTEDMODE(1U)
+
+#define S_PINGDROP 20
+#define V_PINGDROP(x) ((x) << S_PINGDROP)
+#define F_PINGDROP V_PINGDROP(1U)
+
+#define S_FRAGMENTDROP 19
+#define V_FRAGMENTDROP(x) ((x) << S_FRAGMENTDROP)
+#define F_FRAGMENTDROP V_FRAGMENTDROP(1U)
+
+#define S_FIVETUPLELOOKUP 17
+#define M_FIVETUPLELOOKUP 0x3
+#define V_FIVETUPLELOOKUP(x) ((x) << S_FIVETUPLELOOKUP)
+#define G_FIVETUPLELOOKUP(x) (((x) >> S_FIVETUPLELOOKUP) & M_FIVETUPLELOOKUP)
+
#define S_PATHMTU 15
#define V_PATHMTU(x) ((x) << S_PATHMTU)
#define F_PATHMTU V_PATHMTU(1U)
+#define S_IPIDENTSPLIT 14
+#define V_IPIDENTSPLIT(x) ((x) << S_IPIDENTSPLIT)
+#define F_IPIDENTSPLIT V_IPIDENTSPLIT(1U)
+
#define S_IPCHECKSUMOFFLOAD 13
#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
+#define S_QOSMAPPING 10
+#define V_QOSMAPPING(x) ((x) << S_QOSMAPPING)
+#define F_QOSMAPPING V_QOSMAPPING(1U)
+
+#define S_TCAMSERVERUSE 8
+#define M_TCAMSERVERUSE 0x3
+#define V_TCAMSERVERUSE(x) ((x) << S_TCAMSERVERUSE)
+#define G_TCAMSERVERUSE(x) (((x) >> S_TCAMSERVERUSE) & M_TCAMSERVERUSE)
+
#define S_IPTTL 0
#define M_IPTTL 0xff
#define V_IPTTL(x) ((x) << S_IPTTL)
+#define G_IPTTL(x) (((x) >> S_IPTTL) & M_IPTTL)
+
+#define A_TP_GLOBAL_RX_CREDIT 0x30c
+#define A_TP_CMM_SIZE 0x310
+
+#define S_CMMEMMGRSIZE 0
+#define M_CMMEMMGRSIZE 0xfffffff
+#define V_CMMEMMGRSIZE(x) ((x) << S_CMMEMMGRSIZE)
+#define G_CMMEMMGRSIZE(x) (((x) >> S_CMMEMMGRSIZE) & M_CMMEMMGRSIZE)
#define A_TP_CMM_MM_BASE 0x314
+#define S_CMMEMMGRBASE 0
+#define M_CMMEMMGRBASE 0xfffffff
+#define V_CMMEMMGRBASE(x) ((x) << S_CMMEMMGRBASE)
+#define G_CMMEMMGRBASE(x) (((x) >> S_CMMEMMGRBASE) & M_CMMEMMGRBASE)
+
#define A_TP_CMM_TIMER_BASE 0x318
#define S_CMTIMERMAXNUM 28
#define M_CMTIMERMAXNUM 0x3
#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+#define G_CMTIMERMAXNUM(x) (((x) >> S_CMTIMERMAXNUM) & M_CMTIMERMAXNUM)
+
+#define S_CMTIMERBASE 0
+#define M_CMTIMERBASE 0xfffffff
+#define V_CMTIMERBASE(x) ((x) << S_CMTIMERBASE)
+#define G_CMTIMERBASE(x) (((x) >> S_CMTIMERBASE) & M_CMTIMERBASE)
#define A_TP_PMM_SIZE 0x31c
-#define A_TP_PMM_TX_BASE 0x320
+#define S_PMSIZE 0
+#define M_PMSIZE 0xfffffff
+#define V_PMSIZE(x) ((x) << S_PMSIZE)
+#define G_PMSIZE(x) (((x) >> S_PMSIZE) & M_PMSIZE)
+#define A_TP_PMM_TX_BASE 0x320
+#define A_TP_PMM_DEFRAG_BASE 0x324
#define A_TP_PMM_RX_BASE 0x328
-
#define A_TP_PMM_RX_PAGE_SIZE 0x32c
-
#define A_TP_PMM_RX_MAX_PAGE 0x330
-#define A_TP_PMM_TX_PAGE_SIZE 0x334
+#define S_PMRXMAXPAGE 0
+#define M_PMRXMAXPAGE 0x1fffff
+#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
+#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
#define A_TP_PMM_TX_MAX_PAGE 0x338
+#define S_PMTXMAXPAGE 0
+#define M_PMTXMAXPAGE 0x1fffff
+#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
+#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+
#define A_TP_TCP_OPTIONS 0x340
#define S_MTUDEFAULT 16
#define M_MTUDEFAULT 0xffff
#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+#define G_MTUDEFAULT(x) (((x) >> S_MTUDEFAULT) & M_MTUDEFAULT)
#define S_MTUENABLE 10
#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
#define F_MTUENABLE V_MTUENABLE(1U)
+#define S_SACKTX 9
+#define V_SACKTX(x) ((x) << S_SACKTX)
+#define F_SACKTX V_SACKTX(1U)
+
#define S_SACKRX 8
#define V_SACKRX(x) ((x) << S_SACKRX)
#define F_SACKRX V_SACKRX(1U)
#define S_SACKMODE 4
-
#define M_SACKMODE 0x3
-
#define V_SACKMODE(x) ((x) << S_SACKMODE)
+#define G_SACKMODE(x) (((x) >> S_SACKMODE) & M_SACKMODE)
#define S_WINDOWSCALEMODE 2
#define M_WINDOWSCALEMODE 0x3
#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+#define G_WINDOWSCALEMODE(x) (((x) >> S_WINDOWSCALEMODE) & M_WINDOWSCALEMODE)
#define S_TIMESTAMPSMODE 0
-
#define M_TIMESTAMPSMODE 0x3
-
#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+#define G_TIMESTAMPSMODE(x) (((x) >> S_TIMESTAMPSMODE) & M_TIMESTAMPSMODE)
#define A_TP_DACK_CONFIG 0x344
#define S_AUTOSTATE3 30
#define M_AUTOSTATE3 0x3
#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+#define G_AUTOSTATE3(x) (((x) >> S_AUTOSTATE3) & M_AUTOSTATE3)
#define S_AUTOSTATE2 28
#define M_AUTOSTATE2 0x3
#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+#define G_AUTOSTATE2(x) (((x) >> S_AUTOSTATE2) & M_AUTOSTATE2)
#define S_AUTOSTATE1 26
#define M_AUTOSTATE1 0x3
#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+#define G_AUTOSTATE1(x) (((x) >> S_AUTOSTATE1) & M_AUTOSTATE1)
#define S_BYTETHRESHOLD 5
#define M_BYTETHRESHOLD 0xfffff
#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+#define G_BYTETHRESHOLD(x) (((x) >> S_BYTETHRESHOLD) & M_BYTETHRESHOLD)
#define S_MSSTHRESHOLD 3
#define M_MSSTHRESHOLD 0x3
#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+#define G_MSSTHRESHOLD(x) (((x) >> S_MSSTHRESHOLD) & M_MSSTHRESHOLD)
#define S_AUTOCAREFUL 2
#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
#define A_TP_PC_CONFIG 0x348
+#define S_CMCACHEDISABLE 31
+#define V_CMCACHEDISABLE(x) ((x) << S_CMCACHEDISABLE)
+#define F_CMCACHEDISABLE V_CMCACHEDISABLE(1U)
+
+#define S_ENABLEOCSPIFULL 30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+
+#define S_ENABLEFLMERRORDDP 29
+#define V_ENABLEFLMERRORDDP(x) ((x) << S_ENABLEFLMERRORDDP)
+#define F_ENABLEFLMERRORDDP V_ENABLEFLMERRORDDP(1U)
+
+#define S_LOCKTID 28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID V_LOCKTID(1U)
+
+#define S_FIXRCVWND 27
+#define V_FIXRCVWND(x) ((x) << S_FIXRCVWND)
+#define F_FIXRCVWND V_FIXRCVWND(1U)
+
#define S_TXTOSQUEUEMAPMODE 26
#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
+#define S_RDDPCONGEN 25
+#define V_RDDPCONGEN(x) ((x) << S_RDDPCONGEN)
+#define F_RDDPCONGEN V_RDDPCONGEN(1U)
+
+#define S_ENABLEONFLYPDU 24
+#define V_ENABLEONFLYPDU(x) ((x) << S_ENABLEONFLYPDU)
+#define F_ENABLEONFLYPDU V_ENABLEONFLYPDU(1U)
+
#define S_ENABLEEPCMDAFULL 23
#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
+#define S_TXDATAACKRATEENABLE 21
+#define V_TXDATAACKRATEENABLE(x) ((x) << S_TXDATAACKRATEENABLE)
+#define F_TXDATAACKRATEENABLE V_TXDATAACKRATEENABLE(1U)
+
#define S_TXDEFERENABLE 20
#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
+#define S_HEARBEATONCEDACK 18
+#define V_HEARBEATONCEDACK(x) ((x) << S_HEARBEATONCEDACK)
+#define F_HEARBEATONCEDACK V_HEARBEATONCEDACK(1U)
+
+#define S_HEARBEATONCEHEAP 17
+#define V_HEARBEATONCEHEAP(x) ((x) << S_HEARBEATONCEHEAP)
+#define F_HEARBEATONCEHEAP V_HEARBEATONCEHEAP(1U)
+
#define S_HEARBEATDACK 16
#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
#define F_HEARBEATDACK V_HEARBEATDACK(1U)
#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
-#define S_ENABLEOCSPIFULL 30
-#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
-#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+#define S_ACCEPTLATESTRCVADV 14
+#define V_ACCEPTLATESTRCVADV(x) ((x) << S_ACCEPTLATESTRCVADV)
+#define F_ACCEPTLATESTRCVADV V_ACCEPTLATESTRCVADV(1U)
-#define S_LOCKTID 28
-#define V_LOCKTID(x) ((x) << S_LOCKTID)
-#define F_LOCKTID V_LOCKTID(1U)
+#define S_DISABLESYNDATA 13
+#define V_DISABLESYNDATA(x) ((x) << S_DISABLESYNDATA)
+#define F_DISABLESYNDATA V_DISABLESYNDATA(1U)
+
+#define S_DISABLEWINDOWPSH 12
+#define V_DISABLEWINDOWPSH(x) ((x) << S_DISABLEWINDOWPSH)
+#define F_DISABLEWINDOWPSH V_DISABLEWINDOWPSH(1U)
+
+#define S_DISABLEFINOLDDATA 11
+#define V_DISABLEFINOLDDATA(x) ((x) << S_DISABLEFINOLDDATA)
+#define F_DISABLEFINOLDDATA V_DISABLEFINOLDDATA(1U)
+
+#define S_ENABLEFLMERROR 10
+#define V_ENABLEFLMERROR(x) ((x) << S_ENABLEFLMERROR)
+#define F_ENABLEFLMERROR V_ENABLEFLMERROR(1U)
+
+#define S_DISABLENEXTMTU 9
+#define V_DISABLENEXTMTU(x) ((x) << S_DISABLENEXTMTU)
+#define F_DISABLENEXTMTU V_DISABLENEXTMTU(1U)
+
+#define S_FILTERPEERFIN 8
+#define V_FILTERPEERFIN(x) ((x) << S_FILTERPEERFIN)
+#define F_FILTERPEERFIN V_FILTERPEERFIN(1U)
+
+#define S_ENABLEFEEDBACKSEND 7
+#define V_ENABLEFEEDBACKSEND(x) ((x) << S_ENABLEFEEDBACKSEND)
+#define F_ENABLEFEEDBACKSEND V_ENABLEFEEDBACKSEND(1U)
+
+#define S_ENABLERDMAERROR 6
+#define V_ENABLERDMAERROR(x) ((x) << S_ENABLERDMAERROR)
+#define F_ENABLERDMAERROR V_ENABLERDMAERROR(1U)
+
+#define S_ENABLEDDPFLOWCONTROL 5
+#define V_ENABLEDDPFLOWCONTROL(x) ((x) << S_ENABLEDDPFLOWCONTROL)
+#define F_ENABLEDDPFLOWCONTROL V_ENABLEDDPFLOWCONTROL(1U)
+
+#define S_DISABLEHELDFIN 4
+#define V_DISABLEHELDFIN(x) ((x) << S_DISABLEHELDFIN)
+#define F_DISABLEHELDFIN V_DISABLEHELDFIN(1U)
#define S_TABLELATENCYDELTA 0
#define M_TABLELATENCYDELTA 0xf
#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
-#define G_TABLELATENCYDELTA(x) \
- (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
#define A_TP_PC_CONFIG2 0x34c
#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
+#define S_ENABLEDROPRQEMPTYPKT 10
+#define V_ENABLEDROPRQEMPTYPKT(x) ((x) << S_ENABLEDROPRQEMPTYPKT)
+#define F_ENABLEDROPRQEMPTYPKT V_ENABLEDROPRQEMPTYPKT(1U)
+
+#define S_ENABLETXPORTFROMDA2 9
+#define V_ENABLETXPORTFROMDA2(x) ((x) << S_ENABLETXPORTFROMDA2)
+#define F_ENABLETXPORTFROMDA2 V_ENABLETXPORTFROMDA2(1U)
+
+#define S_ENABLERXPKTTMSTPRSS 8
+#define V_ENABLERXPKTTMSTPRSS(x) ((x) << S_ENABLERXPKTTMSTPRSS)
+#define F_ENABLERXPKTTMSTPRSS V_ENABLERXPKTTMSTPRSS(1U)
+
+#define S_ENABLESNDUNAINRXDATA 7
+#define V_ENABLESNDUNAINRXDATA(x) ((x) << S_ENABLESNDUNAINRXDATA)
+#define F_ENABLESNDUNAINRXDATA V_ENABLESNDUNAINRXDATA(1U)
+
+#define S_ENABLERXPORTFROMADDR 6
+#define V_ENABLERXPORTFROMADDR(x) ((x) << S_ENABLERXPORTFROMADDR)
+#define F_ENABLERXPORTFROMADDR V_ENABLERXPORTFROMADDR(1U)
+
+#define S_ENABLETXPORTFROMDA 5
+#define V_ENABLETXPORTFROMDA(x) ((x) << S_ENABLETXPORTFROMDA)
+#define F_ENABLETXPORTFROMDA V_ENABLETXPORTFROMDA(1U)
+
+#define S_ENABLECHDRAFULL 4
+#define V_ENABLECHDRAFULL(x) ((x) << S_ENABLECHDRAFULL)
+#define F_ENABLECHDRAFULL V_ENABLECHDRAFULL(1U)
+
+#define S_ENABLENONOFDSCBBIT 3
+#define V_ENABLENONOFDSCBBIT(x) ((x) << S_ENABLENONOFDSCBBIT)
+#define F_ENABLENONOFDSCBBIT V_ENABLENONOFDSCBBIT(1U)
+
+#define S_ENABLENONOFDTIDRSS 2
+#define V_ENABLENONOFDTIDRSS(x) ((x) << S_ENABLENONOFDTIDRSS)
+#define F_ENABLENONOFDTIDRSS V_ENABLENONOFDTIDRSS(1U)
+
+#define S_ENABLENONOFDTCBRSS 1
+#define V_ENABLENONOFDTCBRSS(x) ((x) << S_ENABLENONOFDTCBRSS)
+#define F_ENABLENONOFDTCBRSS V_ENABLENONOFDTCBRSS(1U)
+
+#define S_ENABLEOLDRXFORWARD 0
+#define V_ENABLEOLDRXFORWARD(x) ((x) << S_ENABLEOLDRXFORWARD)
+#define F_ENABLEOLDRXFORWARD V_ENABLEOLDRXFORWARD(1U)
+
#define S_CHDRAFULL 4
#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
#define F_CHDRAFULL V_CHDRAFULL(1U)
#define A_TP_TCP_BACKOFF_REG0 0x350
+#define S_TIMERBACKOFFINDEX3 24
+#define M_TIMERBACKOFFINDEX3 0xff
+#define V_TIMERBACKOFFINDEX3(x) ((x) << S_TIMERBACKOFFINDEX3)
+#define G_TIMERBACKOFFINDEX3(x) (((x) >> S_TIMERBACKOFFINDEX3) & M_TIMERBACKOFFINDEX3)
+
+#define S_TIMERBACKOFFINDEX2 16
+#define M_TIMERBACKOFFINDEX2 0xff
+#define V_TIMERBACKOFFINDEX2(x) ((x) << S_TIMERBACKOFFINDEX2)
+#define G_TIMERBACKOFFINDEX2(x) (((x) >> S_TIMERBACKOFFINDEX2) & M_TIMERBACKOFFINDEX2)
+
+#define S_TIMERBACKOFFINDEX1 8
+#define M_TIMERBACKOFFINDEX1 0xff
+#define V_TIMERBACKOFFINDEX1(x) ((x) << S_TIMERBACKOFFINDEX1)
+#define G_TIMERBACKOFFINDEX1(x) (((x) >> S_TIMERBACKOFFINDEX1) & M_TIMERBACKOFFINDEX1)
+
+#define S_TIMERBACKOFFINDEX0 0
+#define M_TIMERBACKOFFINDEX0 0xff
+#define V_TIMERBACKOFFINDEX0(x) ((x) << S_TIMERBACKOFFINDEX0)
+#define G_TIMERBACKOFFINDEX0(x) (((x) >> S_TIMERBACKOFFINDEX0) & M_TIMERBACKOFFINDEX0)
+
#define A_TP_TCP_BACKOFF_REG1 0x354
+#define S_TIMERBACKOFFINDEX7 24
+#define M_TIMERBACKOFFINDEX7 0xff
+#define V_TIMERBACKOFFINDEX7(x) ((x) << S_TIMERBACKOFFINDEX7)
+#define G_TIMERBACKOFFINDEX7(x) (((x) >> S_TIMERBACKOFFINDEX7) & M_TIMERBACKOFFINDEX7)
+
+#define S_TIMERBACKOFFINDEX6 16
+#define M_TIMERBACKOFFINDEX6 0xff
+#define V_TIMERBACKOFFINDEX6(x) ((x) << S_TIMERBACKOFFINDEX6)
+#define G_TIMERBACKOFFINDEX6(x) (((x) >> S_TIMERBACKOFFINDEX6) & M_TIMERBACKOFFINDEX6)
+
+#define S_TIMERBACKOFFINDEX5 8
+#define M_TIMERBACKOFFINDEX5 0xff
+#define V_TIMERBACKOFFINDEX5(x) ((x) << S_TIMERBACKOFFINDEX5)
+#define G_TIMERBACKOFFINDEX5(x) (((x) >> S_TIMERBACKOFFINDEX5) & M_TIMERBACKOFFINDEX5)
+
+#define S_TIMERBACKOFFINDEX4 0
+#define M_TIMERBACKOFFINDEX4 0xff
+#define V_TIMERBACKOFFINDEX4(x) ((x) << S_TIMERBACKOFFINDEX4)
+#define G_TIMERBACKOFFINDEX4(x) (((x) >> S_TIMERBACKOFFINDEX4) & M_TIMERBACKOFFINDEX4)
+
#define A_TP_TCP_BACKOFF_REG2 0x358
+#define S_TIMERBACKOFFINDEX11 24
+#define M_TIMERBACKOFFINDEX11 0xff
+#define V_TIMERBACKOFFINDEX11(x) ((x) << S_TIMERBACKOFFINDEX11)
+#define G_TIMERBACKOFFINDEX11(x) (((x) >> S_TIMERBACKOFFINDEX11) & M_TIMERBACKOFFINDEX11)
+
+#define S_TIMERBACKOFFINDEX10 16
+#define M_TIMERBACKOFFINDEX10 0xff
+#define V_TIMERBACKOFFINDEX10(x) ((x) << S_TIMERBACKOFFINDEX10)
+#define G_TIMERBACKOFFINDEX10(x) (((x) >> S_TIMERBACKOFFINDEX10) & M_TIMERBACKOFFINDEX10)
+
+#define S_TIMERBACKOFFINDEX9 8
+#define M_TIMERBACKOFFINDEX9 0xff
+#define V_TIMERBACKOFFINDEX9(x) ((x) << S_TIMERBACKOFFINDEX9)
+#define G_TIMERBACKOFFINDEX9(x) (((x) >> S_TIMERBACKOFFINDEX9) & M_TIMERBACKOFFINDEX9)
+
+#define S_TIMERBACKOFFINDEX8 0
+#define M_TIMERBACKOFFINDEX8 0xff
+#define V_TIMERBACKOFFINDEX8(x) ((x) << S_TIMERBACKOFFINDEX8)
+#define G_TIMERBACKOFFINDEX8(x) (((x) >> S_TIMERBACKOFFINDEX8) & M_TIMERBACKOFFINDEX8)
+
#define A_TP_TCP_BACKOFF_REG3 0x35c
+#define S_TIMERBACKOFFINDEX15 24
+#define M_TIMERBACKOFFINDEX15 0xff
+#define V_TIMERBACKOFFINDEX15(x) ((x) << S_TIMERBACKOFFINDEX15)
+#define G_TIMERBACKOFFINDEX15(x) (((x) >> S_TIMERBACKOFFINDEX15) & M_TIMERBACKOFFINDEX15)
+
+#define S_TIMERBACKOFFINDEX14 16
+#define M_TIMERBACKOFFINDEX14 0xff
+#define V_TIMERBACKOFFINDEX14(x) ((x) << S_TIMERBACKOFFINDEX14)
+#define G_TIMERBACKOFFINDEX14(x) (((x) >> S_TIMERBACKOFFINDEX14) & M_TIMERBACKOFFINDEX14)
+
+#define S_TIMERBACKOFFINDEX13 8
+#define M_TIMERBACKOFFINDEX13 0xff
+#define V_TIMERBACKOFFINDEX13(x) ((x) << S_TIMERBACKOFFINDEX13)
+#define G_TIMERBACKOFFINDEX13(x) (((x) >> S_TIMERBACKOFFINDEX13) & M_TIMERBACKOFFINDEX13)
+
+#define S_TIMERBACKOFFINDEX12 0
+#define M_TIMERBACKOFFINDEX12 0xff
+#define V_TIMERBACKOFFINDEX12(x) ((x) << S_TIMERBACKOFFINDEX12)
+#define G_TIMERBACKOFFINDEX12(x) (((x) >> S_TIMERBACKOFFINDEX12) & M_TIMERBACKOFFINDEX12)
+
+#define A_TP_PARA_REG0 0x360
+
+#define S_INITCWND 24
+#define M_INITCWND 0x7
+#define V_INITCWND(x) ((x) << S_INITCWND)
+#define G_INITCWND(x) (((x) >> S_INITCWND) & M_INITCWND)
+
+#define S_DUPACKTHRESH 20
+#define M_DUPACKTHRESH 0xf
+#define V_DUPACKTHRESH(x) ((x) << S_DUPACKTHRESH)
+#define G_DUPACKTHRESH(x) (((x) >> S_DUPACKTHRESH) & M_DUPACKTHRESH)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITRWND 16
+#define M_INITRWND 0xffff
+#define V_INITRWND(x) ((x) << S_INITRWND)
+#define G_INITRWND(x) (((x) >> S_INITRWND) & M_INITRWND)
+
+#define S_INITIALSSTHRESH 0
+#define M_INITIALSSTHRESH 0xffff
+#define V_INITIALSSTHRESH(x) ((x) << S_INITIALSSTHRESH)
+#define G_INITIALSSTHRESH(x) (((x) >> S_INITIALSSTHRESH) & M_INITIALSSTHRESH)
+
#define A_TP_PARA_REG2 0x368
#define S_MAXRXDATA 16
#define M_MAXRXDATA 0xffff
#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+#define G_MAXRXDATA(x) (((x) >> S_MAXRXDATA) & M_MAXRXDATA)
#define S_RXCOALESCESIZE 0
#define M_RXCOALESCESIZE 0xffff
#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+#define G_RXCOALESCESIZE(x) (((x) >> S_RXCOALESCESIZE) & M_RXCOALESCESIZE)
#define A_TP_PARA_REG3 0x36c
+#define S_TUNNELCNGDROP1 21
+#define V_TUNNELCNGDROP1(x) ((x) << S_TUNNELCNGDROP1)
+#define F_TUNNELCNGDROP1 V_TUNNELCNGDROP1(1U)
+
+#define S_TUNNELCNGDROP0 20
+#define V_TUNNELCNGDROP0(x) ((x) << S_TUNNELCNGDROP0)
+#define F_TUNNELCNGDROP0 V_TUNNELCNGDROP0(1U)
+
#define S_TXDATAACKIDX 16
#define M_TXDATAACKIDX 0xf
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+#define G_TXDATAACKIDX(x) (((x) >> S_TXDATAACKIDX) & M_TXDATAACKIDX)
+
+#define S_RXFRAGENABLE 12
+#define M_RXFRAGENABLE 0x7
+#define V_RXFRAGENABLE(x) ((x) << S_RXFRAGENABLE)
+#define G_RXFRAGENABLE(x) (((x) >> S_RXFRAGENABLE) & M_RXFRAGENABLE)
+
+#define S_TXPACEFIXEDSTRICT 11
+#define V_TXPACEFIXEDSTRICT(x) ((x) << S_TXPACEFIXEDSTRICT)
+#define F_TXPACEFIXEDSTRICT V_TXPACEFIXEDSTRICT(1U)
+
+#define S_TXPACEAUTOSTRICT 10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED 9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO 8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+
+#define S_RXURGTUNNEL 6
+#define V_RXURGTUNNEL(x) ((x) << S_RXURGTUNNEL)
+#define F_RXURGTUNNEL V_RXURGTUNNEL(1U)
+
+#define S_RXURGMODE 5
+#define V_RXURGMODE(x) ((x) << S_RXURGMODE)
+#define F_RXURGMODE V_RXURGMODE(1U)
+
+#define S_TXURGMODE 4
+#define V_TXURGMODE(x) ((x) << S_TXURGMODE)
+#define F_TXURGMODE V_TXURGMODE(1U)
+
+#define S_CNGCTRLMODE 2
+#define M_CNGCTRLMODE 0x3
+#define V_CNGCTRLMODE(x) ((x) << S_CNGCTRLMODE)
+#define G_CNGCTRLMODE(x) (((x) >> S_CNGCTRLMODE) & M_CNGCTRLMODE)
+
+#define S_RXCOALESCEENABLE 1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN 0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define S_HIGHSPEEDCFG 24
+#define M_HIGHSPEEDCFG 0xff
+#define V_HIGHSPEEDCFG(x) ((x) << S_HIGHSPEEDCFG)
+#define G_HIGHSPEEDCFG(x) (((x) >> S_HIGHSPEEDCFG) & M_HIGHSPEEDCFG)
+
+#define S_NEWRENOCFG 16
+#define M_NEWRENOCFG 0xff
+#define V_NEWRENOCFG(x) ((x) << S_NEWRENOCFG)
+#define G_NEWRENOCFG(x) (((x) >> S_NEWRENOCFG) & M_NEWRENOCFG)
+
+#define S_TAHOECFG 8
+#define M_TAHOECFG 0xff
+#define V_TAHOECFG(x) ((x) << S_TAHOECFG)
+#define G_TAHOECFG(x) (((x) >> S_TAHOECFG) & M_TAHOECFG)
+
+#define S_RENOCFG 0
+#define M_RENOCFG 0xff
+#define V_RENOCFG(x) ((x) << S_RENOCFG)
+#define G_RENOCFG(x) (((x) >> S_RENOCFG) & M_RENOCFG)
+
+#define A_TP_PARA_REG5 0x374
+
+#define S_INDICATESIZE 16
+#define M_INDICATESIZE 0xffff
+#define V_INDICATESIZE(x) ((x) << S_INDICATESIZE)
+#define G_INDICATESIZE(x) (((x) >> S_INDICATESIZE) & M_INDICATESIZE)
+
+#define S_SCHDENABLE 8
+#define V_SCHDENABLE(x) ((x) << S_SCHDENABLE)
+#define F_SCHDENABLE V_SCHDENABLE(1U)
+
+#define S_RXDDPOFFINIT 3
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+
+#define S_ONFLYDDPENABLE 2
+#define V_ONFLYDDPENABLE(x) ((x) << S_ONFLYDDPENABLE)
+#define F_ONFLYDDPENABLE V_ONFLYDDPENABLE(1U)
+
+#define S_DACKTIMERSPIN 1
+#define V_DACKTIMERSPIN(x) ((x) << S_DACKTIMERSPIN)
+#define F_DACKTIMERSPIN V_DACKTIMERSPIN(1U)
+
+#define S_PUSHTIMERENABLE 0
+#define V_PUSHTIMERENABLE(x) ((x) << S_PUSHTIMERENABLE)
+#define F_PUSHTIMERENABLE V_PUSHTIMERENABLE(1U)
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_TXPDUSIZEADJ 16
+#define M_TXPDUSIZEADJ 0xff
+#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
+#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
+
+#define S_ENABLEDEFERACK 12
+#define V_ENABLEDEFERACK(x) ((x) << S_ENABLEDEFERACK)
+#define F_ENABLEDEFERACK V_ENABLEDEFERACK(1U)
+
+#define S_ENABLEESND 11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND V_ENABLEESND(1U)
+
+#define S_ENABLECSND 10
+#define V_ENABLECSND(x) ((x) << S_ENABLECSND)
+#define F_ENABLECSND V_ENABLECSND(1U)
-#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+#define S_ENABLEPDUE 9
+#define V_ENABLEPDUE(x) ((x) << S_ENABLEPDUE)
+#define F_ENABLEPDUE V_ENABLEPDUE(1U)
-#define S_TXPACEAUTOSTRICT 10
-#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
-#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+#define S_ENABLEPDUC 8
+#define V_ENABLEPDUC(x) ((x) << S_ENABLEPDUC)
+#define F_ENABLEPDUC V_ENABLEPDUC(1U)
-#define S_TXPACEFIXED 9
-#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
-#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+#define S_ENABLEBUFI 7
+#define V_ENABLEBUFI(x) ((x) << S_ENABLEBUFI)
+#define F_ENABLEBUFI V_ENABLEBUFI(1U)
-#define S_TXPACEAUTO 8
-#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
-#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+#define S_ENABLEBUFE 6
+#define V_ENABLEBUFE(x) ((x) << S_ENABLEBUFE)
+#define F_ENABLEBUFE V_ENABLEBUFE(1U)
-#define S_RXCOALESCEENABLE 1
-#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
-#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+#define S_ENABLEDEFER 5
+#define V_ENABLEDEFER(x) ((x) << S_ENABLEDEFER)
+#define F_ENABLEDEFER V_ENABLEDEFER(1U)
-#define S_RXCOALESCEPSHEN 0
-#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
-#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+#define S_ENABLECLEARRXMTOOS 4
+#define V_ENABLECLEARRXMTOOS(x) ((x) << S_ENABLECLEARRXMTOOS)
+#define F_ENABLECLEARRXMTOOS V_ENABLECLEARRXMTOOS(1U)
-#define A_TP_PARA_REG4 0x370
+#define S_DISABLEPDUCNG 3
+#define V_DISABLEPDUCNG(x) ((x) << S_DISABLEPDUCNG)
+#define F_DISABLEPDUCNG V_DISABLEPDUCNG(1U)
-#define A_TP_PARA_REG5 0x374
+#define S_DISABLEPDUTIMEOUT 2
+#define V_DISABLEPDUTIMEOUT(x) ((x) << S_DISABLEPDUTIMEOUT)
+#define F_DISABLEPDUTIMEOUT V_DISABLEPDUTIMEOUT(1U)
-#define S_RXDDPOFFINIT 3
-#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
-#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+#define S_DISABLEPDURXMT 1
+#define V_DISABLEPDURXMT(x) ((x) << S_DISABLEPDURXMT)
+#define F_DISABLEPDURXMT V_DISABLEPDURXMT(1U)
-#define A_TP_PARA_REG6 0x378
+#define S_DISABLEPDUXMT 0
+#define V_DISABLEPDUXMT(x) ((x) << S_DISABLEPDUXMT)
+#define F_DISABLEPDUXMT V_DISABLEPDUXMT(1U)
+
+#define S_ENABLEEPDU 14
+#define V_ENABLEEPDU(x) ((x) << S_ENABLEEPDU)
+#define F_ENABLEEPDU V_ENABLEEPDU(1U)
#define S_T3A_ENABLEESND 13
#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
-#define S_ENABLEESND 11
-#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
-#define F_ENABLEESND V_ENABLEESND(1U)
+#define S_T3A_ENABLECSND 12
+#define V_T3A_ENABLECSND(x) ((x) << S_T3A_ENABLECSND)
+#define F_T3A_ENABLECSND V_T3A_ENABLECSND(1U)
+
+#define S_T3A_ENABLEDEFERACK 9
+#define V_T3A_ENABLEDEFERACK(x) ((x) << S_T3A_ENABLEDEFERACK)
+#define F_T3A_ENABLEDEFERACK V_T3A_ENABLEDEFERACK(1U)
+
+#define S_ENABLEPDUI 7
+#define V_ENABLEPDUI(x) ((x) << S_ENABLEPDUI)
+#define F_ENABLEPDUI V_ENABLEPDUI(1U)
+
+#define S_T3A_ENABLEPDUE 6
+#define V_T3A_ENABLEPDUE(x) ((x) << S_T3A_ENABLEPDUE)
+#define F_T3A_ENABLEPDUE V_T3A_ENABLEPDUE(1U)
#define A_TP_PARA_REG7 0x37c
#define S_PMMAXXFERLEN1 16
#define M_PMMAXXFERLEN1 0xffff
#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+#define G_PMMAXXFERLEN1(x) (((x) >> S_PMMAXXFERLEN1) & M_PMMAXXFERLEN1)
#define S_PMMAXXFERLEN0 0
#define M_PMMAXXFERLEN0 0xffff
#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+#define G_PMMAXXFERLEN0(x) (((x) >> S_PMMAXXFERLEN0) & M_PMMAXXFERLEN0)
#define A_TP_TIMER_RESOLUTION 0x390
#define S_TIMERRESOLUTION 16
#define M_TIMERRESOLUTION 0xff
#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
#define S_TIMESTAMPRESOLUTION 8
#define M_TIMESTAMPRESOLUTION 0xff
#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+#define G_TIMESTAMPRESOLUTION(x) (((x) >> S_TIMESTAMPRESOLUTION) & M_TIMESTAMPRESOLUTION)
#define S_DELAYEDACKRESOLUTION 0
#define M_DELAYEDACKRESOLUTION 0xff
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
#define A_TP_MSL 0x394
+#define S_MSL 0
+#define M_MSL 0x3fffffff
+#define V_MSL(x) ((x) << S_MSL)
+#define G_MSL(x) (((x) >> S_MSL) & M_MSL)
+
#define A_TP_RXT_MIN 0x398
+#define S_RXTMIN 0
+#define M_RXTMIN 0x3fffffff
+#define V_RXTMIN(x) ((x) << S_RXTMIN)
+#define G_RXTMIN(x) (((x) >> S_RXTMIN) & M_RXTMIN)
+
#define A_TP_RXT_MAX 0x39c
+#define S_RXTMAX 0
+#define M_RXTMAX 0x3fffffff
+#define V_RXTMAX(x) ((x) << S_RXTMAX)
+#define G_RXTMAX(x) (((x) >> S_RXTMAX) & M_RXTMAX)
+
#define A_TP_PERS_MIN 0x3a0
+#define S_PERSMIN 0
+#define M_PERSMIN 0x3fffffff
+#define V_PERSMIN(x) ((x) << S_PERSMIN)
+#define G_PERSMIN(x) (((x) >> S_PERSMIN) & M_PERSMIN)
+
#define A_TP_PERS_MAX 0x3a4
+#define S_PERSMAX 0
+#define M_PERSMAX 0x3fffffff
+#define V_PERSMAX(x) ((x) << S_PERSMAX)
+#define G_PERSMAX(x) (((x) >> S_PERSMAX) & M_PERSMAX)
+
#define A_TP_KEEP_IDLE 0x3a8
+#define S_KEEPALIVEIDLE 0
+#define M_KEEPALIVEIDLE 0x3fffffff
+#define V_KEEPALIVEIDLE(x) ((x) << S_KEEPALIVEIDLE)
+#define G_KEEPALIVEIDLE(x) (((x) >> S_KEEPALIVEIDLE) & M_KEEPALIVEIDLE)
+
#define A_TP_KEEP_INTVL 0x3ac
+#define S_KEEPALIVEINTVL 0
+#define M_KEEPALIVEINTVL 0x3fffffff
+#define V_KEEPALIVEINTVL(x) ((x) << S_KEEPALIVEINTVL)
+#define G_KEEPALIVEINTVL(x) (((x) >> S_KEEPALIVEINTVL) & M_KEEPALIVEINTVL)
+
#define A_TP_INIT_SRTT 0x3b0
+#define S_INITSRTT 0
+#define M_INITSRTT 0xffff
+#define V_INITSRTT(x) ((x) << S_INITSRTT)
+#define G_INITSRTT(x) (((x) >> S_INITSRTT) & M_INITSRTT)
+
#define A_TP_DACK_TIMER 0x3b4
+#define S_DACKTIME 0
+#define M_DACKTIME 0xfff
+#define V_DACKTIME(x) ((x) << S_DACKTIME)
+#define G_DACKTIME(x) (((x) >> S_DACKTIME) & M_DACKTIME)
+
#define A_TP_FINWAIT2_TIMER 0x3b8
+#define S_FINWAIT2TIME 0
+#define M_FINWAIT2TIME 0x3fffffff
+#define V_FINWAIT2TIME(x) ((x) << S_FINWAIT2TIME)
+#define G_FINWAIT2TIME(x) (((x) >> S_FINWAIT2TIME) & M_FINWAIT2TIME)
+
+#define A_TP_FAST_FINWAIT2_TIMER 0x3bc
+
+#define S_FASTFINWAIT2TIME 0
+#define M_FASTFINWAIT2TIME 0x3fffffff
+#define V_FASTFINWAIT2TIME(x) ((x) << S_FASTFINWAIT2TIME)
+#define G_FASTFINWAIT2TIME(x) (((x) >> S_FASTFINWAIT2TIME) & M_FASTFINWAIT2TIME)
+
#define A_TP_SHIFT_CNT 0x3c0
#define S_SYNSHIFTMAX 24
-
#define M_SYNSHIFTMAX 0xff
-
#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+#define G_SYNSHIFTMAX(x) (((x) >> S_SYNSHIFTMAX) & M_SYNSHIFTMAX)
#define S_RXTSHIFTMAXR1 20
-
#define M_RXTSHIFTMAXR1 0xf
-
#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+#define G_RXTSHIFTMAXR1(x) (((x) >> S_RXTSHIFTMAXR1) & M_RXTSHIFTMAXR1)
#define S_RXTSHIFTMAXR2 16
-
#define M_RXTSHIFTMAXR2 0xf
-
#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+#define G_RXTSHIFTMAXR2(x) (((x) >> S_RXTSHIFTMAXR2) & M_RXTSHIFTMAXR2)
#define S_PERSHIFTBACKOFFMAX 12
#define M_PERSHIFTBACKOFFMAX 0xf
#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+#define G_PERSHIFTBACKOFFMAX(x) (((x) >> S_PERSHIFTBACKOFFMAX) & M_PERSHIFTBACKOFFMAX)
#define S_PERSHIFTMAX 8
#define M_PERSHIFTMAX 0xf
#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+#define G_PERSHIFTMAX(x) (((x) >> S_PERSHIFTMAX) & M_PERSHIFTMAX)
#define S_KEEPALIVEMAX 0
-
#define M_KEEPALIVEMAX 0xff
-
#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+#define G_KEEPALIVEMAX(x) (((x) >> S_KEEPALIVEMAX) & M_KEEPALIVEMAX)
+#define A_TP_TIME_HI 0x3c8
+#define A_TP_TIME_LO 0x3cc
#define A_TP_MTU_PORT_TABLE 0x3d0
+#define S_PORT1MTUVALUE 16
+#define M_PORT1MTUVALUE 0xffff
+#define V_PORT1MTUVALUE(x) ((x) << S_PORT1MTUVALUE)
+#define G_PORT1MTUVALUE(x) (((x) >> S_PORT1MTUVALUE) & M_PORT1MTUVALUE)
+
+#define S_PORT0MTUVALUE 0
+#define M_PORT0MTUVALUE 0xffff
+#define V_PORT0MTUVALUE(x) ((x) << S_PORT0MTUVALUE)
+#define G_PORT0MTUVALUE(x) (((x) >> S_PORT0MTUVALUE) & M_PORT0MTUVALUE)
+
+#define A_TP_ULP_TABLE 0x3d4
+
+#define S_ULPTYPE7FIELD 28
+#define M_ULPTYPE7FIELD 0xf
+#define V_ULPTYPE7FIELD(x) ((x) << S_ULPTYPE7FIELD)
+#define G_ULPTYPE7FIELD(x) (((x) >> S_ULPTYPE7FIELD) & M_ULPTYPE7FIELD)
+
+#define S_ULPTYPE6FIELD 24
+#define M_ULPTYPE6FIELD 0xf
+#define V_ULPTYPE6FIELD(x) ((x) << S_ULPTYPE6FIELD)
+#define G_ULPTYPE6FIELD(x) (((x) >> S_ULPTYPE6FIELD) & M_ULPTYPE6FIELD)
+
+#define S_ULPTYPE5FIELD 20
+#define M_ULPTYPE5FIELD 0xf
+#define V_ULPTYPE5FIELD(x) ((x) << S_ULPTYPE5FIELD)
+#define G_ULPTYPE5FIELD(x) (((x) >> S_ULPTYPE5FIELD) & M_ULPTYPE5FIELD)
+
+#define S_ULPTYPE4FIELD 16
+#define M_ULPTYPE4FIELD 0xf
+#define V_ULPTYPE4FIELD(x) ((x) << S_ULPTYPE4FIELD)
+#define G_ULPTYPE4FIELD(x) (((x) >> S_ULPTYPE4FIELD) & M_ULPTYPE4FIELD)
+
+#define S_ULPTYPE3FIELD 12
+#define M_ULPTYPE3FIELD 0xf
+#define V_ULPTYPE3FIELD(x) ((x) << S_ULPTYPE3FIELD)
+#define G_ULPTYPE3FIELD(x) (((x) >> S_ULPTYPE3FIELD) & M_ULPTYPE3FIELD)
+
+#define S_ULPTYPE2FIELD 8
+#define M_ULPTYPE2FIELD 0xf
+#define V_ULPTYPE2FIELD(x) ((x) << S_ULPTYPE2FIELD)
+#define G_ULPTYPE2FIELD(x) (((x) >> S_ULPTYPE2FIELD) & M_ULPTYPE2FIELD)
+
+#define S_ULPTYPE1FIELD 4
+#define M_ULPTYPE1FIELD 0xf
+#define V_ULPTYPE1FIELD(x) ((x) << S_ULPTYPE1FIELD)
+#define G_ULPTYPE1FIELD(x) (((x) >> S_ULPTYPE1FIELD) & M_ULPTYPE1FIELD)
+
+#define S_ULPTYPE0FIELD 0
+#define M_ULPTYPE0FIELD 0xf
+#define V_ULPTYPE0FIELD(x) ((x) << S_ULPTYPE0FIELD)
+#define G_ULPTYPE0FIELD(x) (((x) >> S_ULPTYPE0FIELD) & M_ULPTYPE0FIELD)
+
+#define A_TP_PACE_TABLE 0x3d8
#define A_TP_CCTRL_TABLE 0x3dc
-
+#define A_TP_TOS_TABLE 0x3e0
#define A_TP_MTU_TABLE 0x3e4
-
#define A_TP_RSS_MAP_TABLE 0x3e8
-
#define A_TP_RSS_LKP_TABLE 0x3ec
-
#define A_TP_RSS_CONFIG 0x3f0
#define S_TNL4TUPEN 29
#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
#define F_TNLLKPEN V_TNLLKPEN(1U)
+#define S_OFD4TUPEN 21
+#define V_OFD4TUPEN(x) ((x) << S_OFD4TUPEN)
+#define F_OFD4TUPEN V_OFD4TUPEN(1U)
+
+#define S_OFD2TUPEN 20
+#define V_OFD2TUPEN(x) ((x) << S_OFD2TUPEN)
+#define F_OFD2TUPEN V_OFD2TUPEN(1U)
+
+#define S_OFDMAPEN 17
+#define V_OFDMAPEN(x) ((x) << S_OFDMAPEN)
+#define F_OFDMAPEN V_OFDMAPEN(1U)
+
+#define S_OFDLKPEN 16
+#define V_OFDLKPEN(x) ((x) << S_OFDLKPEN)
+#define F_OFDLKPEN V_OFDLKPEN(1U)
+
+#define S_SYN4TUPEN 13
+#define V_SYN4TUPEN(x) ((x) << S_SYN4TUPEN)
+#define F_SYN4TUPEN V_SYN4TUPEN(1U)
+
+#define S_SYN2TUPEN 12
+#define V_SYN2TUPEN(x) ((x) << S_SYN2TUPEN)
+#define F_SYN2TUPEN V_SYN2TUPEN(1U)
+
+#define S_SYNMAPEN 9
+#define V_SYNMAPEN(x) ((x) << S_SYNMAPEN)
+#define F_SYNMAPEN V_SYNMAPEN(1U)
+
+#define S_SYNLKPEN 8
+#define V_SYNLKPEN(x) ((x) << S_SYNLKPEN)
+#define F_SYNLKPEN V_SYNLKPEN(1U)
+
#define S_RRCPLMAPEN 7
#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
#define S_RRCPLCPUSIZE 4
#define M_RRCPLCPUSIZE 0x7
#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+#define G_RRCPLCPUSIZE(x) (((x) >> S_RRCPLCPUSIZE) & M_RRCPLCPUSIZE)
#define S_RQFEEDBACKENABLE 3
#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
-#define S_DISABLE 0
+#define S_HASHSAVE 1
+#define V_HASHSAVE(x) ((x) << S_HASHSAVE)
+#define F_HASHSAVE V_HASHSAVE(1U)
+#define S_DISABLE 0
+#define V_DISABLE(x) ((x) << S_DISABLE)
+#define F_DISABLE V_DISABLE(1U)
+
+#define A_TP_RSS_CONFIG_TNL 0x3f4
+
+#define S_MASKSIZE 28
+#define M_MASKSIZE 0x7
+#define V_MASKSIZE(x) ((x) << S_MASKSIZE)
+#define G_MASKSIZE(x) (((x) >> S_MASKSIZE) & M_MASKSIZE)
+
+#define S_DEFAULTCPUBASE 22
+#define M_DEFAULTCPUBASE 0x3f
+#define V_DEFAULTCPUBASE(x) ((x) << S_DEFAULTCPUBASE)
+#define G_DEFAULTCPUBASE(x) (((x) >> S_DEFAULTCPUBASE) & M_DEFAULTCPUBASE)
+
+#define S_DEFAULTCPU 16
+#define M_DEFAULTCPU 0x3f
+#define V_DEFAULTCPU(x) ((x) << S_DEFAULTCPU)
+#define G_DEFAULTCPU(x) (((x) >> S_DEFAULTCPU) & M_DEFAULTCPU)
+
+#define S_DEFAULTQUEUE 0
+#define M_DEFAULTQUEUE 0xffff
+#define V_DEFAULTQUEUE(x) ((x) << S_DEFAULTQUEUE)
+#define G_DEFAULTQUEUE(x) (((x) >> S_DEFAULTQUEUE) & M_DEFAULTQUEUE)
+
+#define A_TP_RSS_CONFIG_OFD 0x3f8
+#define A_TP_RSS_CONFIG_SYN 0x3fc
+#define A_TP_RSS_SECRET_KEY0 0x400
+#define A_TP_RSS_SECRET_KEY1 0x404
+#define A_TP_RSS_SECRET_KEY2 0x408
+#define A_TP_RSS_SECRET_KEY3 0x40c
#define A_TP_TM_PIO_ADDR 0x418
-
#define A_TP_TM_PIO_DATA 0x41c
-
#define A_TP_TX_MOD_QUE_TABLE 0x420
-
#define A_TP_TX_RESOURCE_LIMIT 0x424
+#define S_TX_RESOURCE_LIMIT_CH1_PC 24
+#define M_TX_RESOURCE_LIMIT_CH1_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH1_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH1_PC)
+#define G_TX_RESOURCE_LIMIT_CH1_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH1_PC) & M_TX_RESOURCE_LIMIT_CH1_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH1_NON_PC 16
+#define M_TX_RESOURCE_LIMIT_CH1_NON_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH1_NON_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH1_NON_PC)
+#define G_TX_RESOURCE_LIMIT_CH1_NON_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH1_NON_PC) & M_TX_RESOURCE_LIMIT_CH1_NON_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH0_PC 8
+#define M_TX_RESOURCE_LIMIT_CH0_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH0_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH0_PC)
+#define G_TX_RESOURCE_LIMIT_CH0_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH0_PC) & M_TX_RESOURCE_LIMIT_CH0_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH0_NON_PC 0
+#define M_TX_RESOURCE_LIMIT_CH0_NON_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH0_NON_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH0_NON_PC)
+#define G_TX_RESOURCE_LIMIT_CH0_NON_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH0_NON_PC) & M_TX_RESOURCE_LIMIT_CH0_NON_PC)
+
#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+#define S_RX_MOD_WEIGHT 24
+#define M_RX_MOD_WEIGHT 0xff
+#define V_RX_MOD_WEIGHT(x) ((x) << S_RX_MOD_WEIGHT)
+#define G_RX_MOD_WEIGHT(x) (((x) >> S_RX_MOD_WEIGHT) & M_RX_MOD_WEIGHT)
+
+#define S_TX_MOD_WEIGHT 16
+#define M_TX_MOD_WEIGHT 0xff
+#define V_TX_MOD_WEIGHT(x) ((x) << S_TX_MOD_WEIGHT)
+#define G_TX_MOD_WEIGHT(x) (((x) >> S_TX_MOD_WEIGHT) & M_TX_MOD_WEIGHT)
+
+#define S_TX_MOD_TIMER_MODE 8
+#define M_TX_MOD_TIMER_MODE 0xff
+#define V_TX_MOD_TIMER_MODE(x) ((x) << S_TX_MOD_TIMER_MODE)
+#define G_TX_MOD_TIMER_MODE(x) (((x) >> S_TX_MOD_TIMER_MODE) & M_TX_MOD_TIMER_MODE)
+
#define S_TX_MOD_QUEUE_REQ_MAP 0
#define M_TX_MOD_QUEUE_REQ_MAP 0xff
#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define G_TX_MOD_QUEUE_REQ_MAP(x) (((x) >> S_TX_MOD_QUEUE_REQ_MAP) & M_TX_MOD_QUEUE_REQ_MAP)
#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+#define S_TP_TX_MODQ_WGHT7 24
+#define M_TP_TX_MODQ_WGHT7 0xff
+#define V_TP_TX_MODQ_WGHT7(x) ((x) << S_TP_TX_MODQ_WGHT7)
+#define G_TP_TX_MODQ_WGHT7(x) (((x) >> S_TP_TX_MODQ_WGHT7) & M_TP_TX_MODQ_WGHT7)
+
+#define S_TP_TX_MODQ_WGHT6 16
+#define M_TP_TX_MODQ_WGHT6 0xff
+#define V_TP_TX_MODQ_WGHT6(x) ((x) << S_TP_TX_MODQ_WGHT6)
+#define G_TP_TX_MODQ_WGHT6(x) (((x) >> S_TP_TX_MODQ_WGHT6) & M_TP_TX_MODQ_WGHT6)
+
+#define S_TP_TX_MODQ_WGHT5 8
+#define M_TP_TX_MODQ_WGHT5 0xff
+#define V_TP_TX_MODQ_WGHT5(x) ((x) << S_TP_TX_MODQ_WGHT5)
+#define G_TP_TX_MODQ_WGHT5(x) (((x) >> S_TP_TX_MODQ_WGHT5) & M_TP_TX_MODQ_WGHT5)
+
+#define S_TP_TX_MODQ_WGHT4 0
+#define M_TP_TX_MODQ_WGHT4 0xff
+#define V_TP_TX_MODQ_WGHT4(x) ((x) << S_TP_TX_MODQ_WGHT4)
+#define G_TP_TX_MODQ_WGHT4(x) (((x) >> S_TP_TX_MODQ_WGHT4) & M_TP_TX_MODQ_WGHT4)
+
#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+#define S_TP_TX_MODQ_WGHT3 24
+#define M_TP_TX_MODQ_WGHT3 0xff
+#define V_TP_TX_MODQ_WGHT3(x) ((x) << S_TP_TX_MODQ_WGHT3)
+#define G_TP_TX_MODQ_WGHT3(x) (((x) >> S_TP_TX_MODQ_WGHT3) & M_TP_TX_MODQ_WGHT3)
+
+#define S_TP_TX_MODQ_WGHT2 16
+#define M_TP_TX_MODQ_WGHT2 0xff
+#define V_TP_TX_MODQ_WGHT2(x) ((x) << S_TP_TX_MODQ_WGHT2)
+#define G_TP_TX_MODQ_WGHT2(x) (((x) >> S_TP_TX_MODQ_WGHT2) & M_TP_TX_MODQ_WGHT2)
+
+#define S_TP_TX_MODQ_WGHT1 8
+#define M_TP_TX_MODQ_WGHT1 0xff
+#define V_TP_TX_MODQ_WGHT1(x) ((x) << S_TP_TX_MODQ_WGHT1)
+#define G_TP_TX_MODQ_WGHT1(x) (((x) >> S_TP_TX_MODQ_WGHT1) & M_TP_TX_MODQ_WGHT1)
+
+#define S_TP_TX_MODQ_WGHT0 0
+#define M_TP_TX_MODQ_WGHT0 0xff
+#define V_TP_TX_MODQ_WGHT0(x) ((x) << S_TP_TX_MODQ_WGHT0)
+#define G_TP_TX_MODQ_WGHT0(x) (((x) >> S_TP_TX_MODQ_WGHT0) & M_TP_TX_MODQ_WGHT0)
+
#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+#define S_RX_MOD_CHANNEL_WEIGHT1 24
+#define M_RX_MOD_CHANNEL_WEIGHT1 0xff
+#define V_RX_MOD_CHANNEL_WEIGHT1(x) ((x) << S_RX_MOD_CHANNEL_WEIGHT1)
+#define G_RX_MOD_CHANNEL_WEIGHT1(x) (((x) >> S_RX_MOD_CHANNEL_WEIGHT1) & M_RX_MOD_CHANNEL_WEIGHT1)
+
+#define S_RX_MOD_CHANNEL_WEIGHT0 16
+#define M_RX_MOD_CHANNEL_WEIGHT0 0xff
+#define V_RX_MOD_CHANNEL_WEIGHT0(x) ((x) << S_RX_MOD_CHANNEL_WEIGHT0)
+#define G_RX_MOD_CHANNEL_WEIGHT0(x) (((x) >> S_RX_MOD_CHANNEL_WEIGHT0) & M_RX_MOD_CHANNEL_WEIGHT0)
+
+#define S_TX_MOD_CHANNEL_WEIGHT1 8
+#define M_TX_MOD_CHANNEL_WEIGHT1 0xff
+#define V_TX_MOD_CHANNEL_WEIGHT1(x) ((x) << S_TX_MOD_CHANNEL_WEIGHT1)
+#define G_TX_MOD_CHANNEL_WEIGHT1(x) (((x) >> S_TX_MOD_CHANNEL_WEIGHT1) & M_TX_MOD_CHANNEL_WEIGHT1)
+
+#define S_TX_MOD_CHANNEL_WEIGHT0 0
+#define M_TX_MOD_CHANNEL_WEIGHT0 0xff
+#define V_TX_MOD_CHANNEL_WEIGHT0(x) ((x) << S_TX_MOD_CHANNEL_WEIGHT0)
+#define G_TX_MOD_CHANNEL_WEIGHT0(x) (((x) >> S_TX_MOD_CHANNEL_WEIGHT0) & M_TX_MOD_CHANNEL_WEIGHT0)
+
#define A_TP_MOD_RATE_LIMIT 0x438
-#define A_TP_PIO_ADDR 0x440
+#define S_RX_MOD_RATE_LIMIT_INC 24
+#define M_RX_MOD_RATE_LIMIT_INC 0xff
+#define V_RX_MOD_RATE_LIMIT_INC(x) ((x) << S_RX_MOD_RATE_LIMIT_INC)
+#define G_RX_MOD_RATE_LIMIT_INC(x) (((x) >> S_RX_MOD_RATE_LIMIT_INC) & M_RX_MOD_RATE_LIMIT_INC)
-#define A_TP_PIO_DATA 0x444
+#define S_RX_MOD_RATE_LIMIT_TICK 16
+#define M_RX_MOD_RATE_LIMIT_TICK 0xff
+#define V_RX_MOD_RATE_LIMIT_TICK(x) ((x) << S_RX_MOD_RATE_LIMIT_TICK)
+#define G_RX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_RX_MOD_RATE_LIMIT_TICK) & M_RX_MOD_RATE_LIMIT_TICK)
+
+#define S_TX_MOD_RATE_LIMIT_INC 8
+#define M_TX_MOD_RATE_LIMIT_INC 0xff
+#define V_TX_MOD_RATE_LIMIT_INC(x) ((x) << S_TX_MOD_RATE_LIMIT_INC)
+#define G_TX_MOD_RATE_LIMIT_INC(x) (((x) >> S_TX_MOD_RATE_LIMIT_INC) & M_TX_MOD_RATE_LIMIT_INC)
+#define S_TX_MOD_RATE_LIMIT_TICK 0
+#define M_TX_MOD_RATE_LIMIT_TICK 0xff
+#define V_TX_MOD_RATE_LIMIT_TICK(x) ((x) << S_TX_MOD_RATE_LIMIT_TICK)
+#define G_TX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_TX_MOD_RATE_LIMIT_TICK) & M_TX_MOD_RATE_LIMIT_TICK)
+
+#define A_TP_PIO_ADDR 0x440
+#define A_TP_PIO_DATA 0x444
#define A_TP_RESET 0x44c
#define S_FLSTINITENABLE 1
#define V_TPRESET(x) ((x) << S_TPRESET)
#define F_TPRESET V_TPRESET(1U)
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_RDATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+#define S_CMRXFLSTBASE 0
+#define M_CMRXFLSTBASE 0xfffffff
+#define V_CMRXFLSTBASE(x) ((x) << S_CMRXFLSTBASE)
+#define G_CMRXFLSTBASE(x) (((x) >> S_CMRXFLSTBASE) & M_CMRXFLSTBASE)
+
#define A_TP_CMM_MM_TX_FLST_BASE 0x464
-#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+#define S_CMTXFLSTBASE 0
+#define M_CMTXFLSTBASE 0xfffffff
+#define V_CMTXFLSTBASE(x) ((x) << S_CMTXFLSTBASE)
+#define G_CMTXFLSTBASE(x) (((x) >> S_CMTXFLSTBASE) & M_CMTXFLSTBASE)
-#define A_TP_MIB_INDEX 0x450
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
-#define A_TP_MIB_RDATA 0x454
+#define S_CMPSFLSTBASE 0
+#define M_CMPSFLSTBASE 0xfffffff
+#define V_CMPSFLSTBASE(x) ((x) << S_CMPSFLSTBASE)
+#define G_CMPSFLSTBASE(x) (((x) >> S_CMPSFLSTBASE) & M_CMPSFLSTBASE)
#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+#define S_CMMAXPSTRUCT 0
+#define M_CMMAXPSTRUCT 0x1fffff
+#define V_CMMAXPSTRUCT(x) ((x) << S_CMMAXPSTRUCT)
+#define G_CMMAXPSTRUCT(x) (((x) >> S_CMMAXPSTRUCT) & M_CMMAXPSTRUCT)
+
#define A_TP_INT_ENABLE 0x470
#define S_FLMTXFLSTEMPTY 30
#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U)
+#define S_FLMPERRSET 28
+#define V_FLMPERRSET(x) ((x) << S_FLMPERRSET)
+#define F_FLMPERRSET V_FLMPERRSET(1U)
+
+#define S_PROTOCOLSRAMPERR 27
+#define V_PROTOCOLSRAMPERR(x) ((x) << S_PROTOCOLSRAMPERR)
+#define F_PROTOCOLSRAMPERR V_PROTOCOLSRAMPERR(1U)
+
#define S_ARPLUTPERR 26
#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
#define F_ARPLUTPERR V_ARPLUTPERR(1U)
+#define S_CMRCFOPPERR 25
+#define V_CMRCFOPPERR(x) ((x) << S_CMRCFOPPERR)
+#define F_CMRCFOPPERR V_CMRCFOPPERR(1U)
+
#define S_CMCACHEPERR 24
#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
#define F_CMCACHEPERR V_CMCACHEPERR(1U)
+#define S_CMRCFDATAPERR 23
+#define V_CMRCFDATAPERR(x) ((x) << S_CMRCFDATAPERR)
+#define F_CMRCFDATAPERR V_CMRCFDATAPERR(1U)
+
+#define S_DBL2TLUTPERR 22
+#define V_DBL2TLUTPERR(x) ((x) << S_DBL2TLUTPERR)
+#define F_DBL2TLUTPERR V_DBL2TLUTPERR(1U)
+
+#define S_DBTXTIDPERR 21
+#define V_DBTXTIDPERR(x) ((x) << S_DBTXTIDPERR)
+#define F_DBTXTIDPERR V_DBTXTIDPERR(1U)
+
+#define S_DBEXTPERR 20
+#define V_DBEXTPERR(x) ((x) << S_DBEXTPERR)
+#define F_DBEXTPERR V_DBEXTPERR(1U)
+
+#define S_DBOPPERR 19
+#define V_DBOPPERR(x) ((x) << S_DBOPPERR)
+#define F_DBOPPERR V_DBOPPERR(1U)
+
+#define S_TMCACHEPERR 18
+#define V_TMCACHEPERR(x) ((x) << S_TMCACHEPERR)
+#define F_TMCACHEPERR V_TMCACHEPERR(1U)
+
+#define S_ETPOUTCPLFIFOPERR 17
+#define V_ETPOUTCPLFIFOPERR(x) ((x) << S_ETPOUTCPLFIFOPERR)
+#define F_ETPOUTCPLFIFOPERR V_ETPOUTCPLFIFOPERR(1U)
+
+#define S_ETPOUTTCPFIFOPERR 16
+#define V_ETPOUTTCPFIFOPERR(x) ((x) << S_ETPOUTTCPFIFOPERR)
+#define F_ETPOUTTCPFIFOPERR V_ETPOUTTCPFIFOPERR(1U)
+
+#define S_ETPOUTIPFIFOPERR 15
+#define V_ETPOUTIPFIFOPERR(x) ((x) << S_ETPOUTIPFIFOPERR)
+#define F_ETPOUTIPFIFOPERR V_ETPOUTIPFIFOPERR(1U)
+
+#define S_ETPOUTETHFIFOPERR 14
+#define V_ETPOUTETHFIFOPERR(x) ((x) << S_ETPOUTETHFIFOPERR)
+#define F_ETPOUTETHFIFOPERR V_ETPOUTETHFIFOPERR(1U)
+
+#define S_ETPINCPLFIFOPERR 13
+#define V_ETPINCPLFIFOPERR(x) ((x) << S_ETPINCPLFIFOPERR)
+#define F_ETPINCPLFIFOPERR V_ETPINCPLFIFOPERR(1U)
+
+#define S_ETPINTCPOPTFIFOPERR 12
+#define V_ETPINTCPOPTFIFOPERR(x) ((x) << S_ETPINTCPOPTFIFOPERR)
+#define F_ETPINTCPOPTFIFOPERR V_ETPINTCPOPTFIFOPERR(1U)
+
+#define S_ETPINTCPFIFOPERR 11
+#define V_ETPINTCPFIFOPERR(x) ((x) << S_ETPINTCPFIFOPERR)
+#define F_ETPINTCPFIFOPERR V_ETPINTCPFIFOPERR(1U)
+
+#define S_ETPINIPFIFOPERR 10
+#define V_ETPINIPFIFOPERR(x) ((x) << S_ETPINIPFIFOPERR)
+#define F_ETPINIPFIFOPERR V_ETPINIPFIFOPERR(1U)
+
+#define S_ETPINETHFIFOPERR 9
+#define V_ETPINETHFIFOPERR(x) ((x) << S_ETPINETHFIFOPERR)
+#define F_ETPINETHFIFOPERR V_ETPINETHFIFOPERR(1U)
+
+#define S_CTPOUTCPLFIFOPERR 8
+#define V_CTPOUTCPLFIFOPERR(x) ((x) << S_CTPOUTCPLFIFOPERR)
+#define F_CTPOUTCPLFIFOPERR V_CTPOUTCPLFIFOPERR(1U)
+
+#define S_CTPOUTTCPFIFOPERR 7
+#define V_CTPOUTTCPFIFOPERR(x) ((x) << S_CTPOUTTCPFIFOPERR)
+#define F_CTPOUTTCPFIFOPERR V_CTPOUTTCPFIFOPERR(1U)
+
+#define S_CTPOUTIPFIFOPERR 6
+#define V_CTPOUTIPFIFOPERR(x) ((x) << S_CTPOUTIPFIFOPERR)
+#define F_CTPOUTIPFIFOPERR V_CTPOUTIPFIFOPERR(1U)
+
+#define S_CTPOUTETHFIFOPERR 5
+#define V_CTPOUTETHFIFOPERR(x) ((x) << S_CTPOUTETHFIFOPERR)
+#define F_CTPOUTETHFIFOPERR V_CTPOUTETHFIFOPERR(1U)
+
+#define S_CTPINCPLFIFOPERR 4
+#define V_CTPINCPLFIFOPERR(x) ((x) << S_CTPINCPLFIFOPERR)
+#define F_CTPINCPLFIFOPERR V_CTPINCPLFIFOPERR(1U)
+
+#define S_CTPINTCPOPFIFOPERR 3
+#define V_CTPINTCPOPFIFOPERR(x) ((x) << S_CTPINTCPOPFIFOPERR)
+#define F_CTPINTCPOPFIFOPERR V_CTPINTCPOPFIFOPERR(1U)
+
+#define S_CTPINTCPFIFOPERR 2
+#define V_CTPINTCPFIFOPERR(x) ((x) << S_CTPINTCPFIFOPERR)
+#define F_CTPINTCPFIFOPERR V_CTPINTCPFIFOPERR(1U)
+
+#define S_CTPINIPFIFOPERR 1
+#define V_CTPINIPFIFOPERR(x) ((x) << S_CTPINIPFIFOPERR)
+#define F_CTPINIPFIFOPERR V_CTPINIPFIFOPERR(1U)
+
+#define S_CTPINETHFIFOPERR 0
+#define V_CTPINETHFIFOPERR(x) ((x) << S_CTPINETHFIFOPERR)
+#define F_CTPINETHFIFOPERR V_CTPINETHFIFOPERR(1U)
+
#define A_TP_INT_CAUSE 0x474
+#define A_TP_FLM_FREE_PS_CNT 0x480
+
+#define S_FREEPSTRUCTCOUNT 0
+#define M_FREEPSTRUCTCOUNT 0x1fffff
+#define V_FREEPSTRUCTCOUNT(x) ((x) << S_FREEPSTRUCTCOUNT)
+#define G_FREEPSTRUCTCOUNT(x) (((x) >> S_FREEPSTRUCTCOUNT) & M_FREEPSTRUCTCOUNT)
+
+#define A_TP_FLM_FREE_RX_CNT 0x484
+
+#define S_FREERXPAGECOUNT 0
+#define M_FREERXPAGECOUNT 0x1fffff
+#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
+#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+
+#define A_TP_FLM_FREE_TX_CNT 0x488
+
+#define S_FREETXPAGECOUNT 0
+#define M_FREETXPAGECOUNT 0x1fffff
+#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
+#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+
+#define A_TP_TM_HEAP_PUSH_CNT 0x48c
+#define A_TP_TM_HEAP_POP_CNT 0x490
+#define A_TP_TM_DACK_PUSH_CNT 0x494
+#define A_TP_TM_DACK_POP_CNT 0x498
+#define A_TP_TM_MOD_PUSH_CNT 0x49c
+#define A_TP_MOD_POP_CNT 0x4a0
+#define A_TP_TIMER_SEPARATOR 0x4a4
+#define A_TP_DEBUG_SEL 0x4a8
+#define A_TP_DEBUG_FLAGS 0x4ac
+
+#define S_RXTIMERDACKFIRST 26
+#define V_RXTIMERDACKFIRST(x) ((x) << S_RXTIMERDACKFIRST)
+#define F_RXTIMERDACKFIRST V_RXTIMERDACKFIRST(1U)
+
+#define S_RXTIMERDACK 25
+#define V_RXTIMERDACK(x) ((x) << S_RXTIMERDACK)
+#define F_RXTIMERDACK V_RXTIMERDACK(1U)
+
+#define S_RXTIMERHEARTBEAT 24
+#define V_RXTIMERHEARTBEAT(x) ((x) << S_RXTIMERHEARTBEAT)
+#define F_RXTIMERHEARTBEAT V_RXTIMERHEARTBEAT(1U)
+
+#define S_RXPAWSDROP 23
+#define V_RXPAWSDROP(x) ((x) << S_RXPAWSDROP)
+#define F_RXPAWSDROP V_RXPAWSDROP(1U)
+
+#define S_RXURGDATADROP 22
+#define V_RXURGDATADROP(x) ((x) << S_RXURGDATADROP)
+#define F_RXURGDATADROP V_RXURGDATADROP(1U)
+
+#define S_RXFUTUREDATA 21
+#define V_RXFUTUREDATA(x) ((x) << S_RXFUTUREDATA)
+#define F_RXFUTUREDATA V_RXFUTUREDATA(1U)
+
+#define S_RXRCVRXMDATA 20
+#define V_RXRCVRXMDATA(x) ((x) << S_RXRCVRXMDATA)
+#define F_RXRCVRXMDATA V_RXRCVRXMDATA(1U)
+
+#define S_RXRCVOOODATAFIN 19
+#define V_RXRCVOOODATAFIN(x) ((x) << S_RXRCVOOODATAFIN)
+#define F_RXRCVOOODATAFIN V_RXRCVOOODATAFIN(1U)
+
+#define S_RXRCVOOODATA 18
+#define V_RXRCVOOODATA(x) ((x) << S_RXRCVOOODATA)
+#define F_RXRCVOOODATA V_RXRCVOOODATA(1U)
+
+#define S_RXRCVWNDZERO 17
+#define V_RXRCVWNDZERO(x) ((x) << S_RXRCVWNDZERO)
+#define F_RXRCVWNDZERO V_RXRCVWNDZERO(1U)
+
+#define S_RXRCVWNDLTMSS 16
+#define V_RXRCVWNDLTMSS(x) ((x) << S_RXRCVWNDLTMSS)
+#define F_RXRCVWNDLTMSS V_RXRCVWNDLTMSS(1U)
+
+#define S_TXDUPACKINC 11
+#define V_TXDUPACKINC(x) ((x) << S_TXDUPACKINC)
+#define F_TXDUPACKINC V_TXDUPACKINC(1U)
+
+#define S_TXRXMURG 10
+#define V_TXRXMURG(x) ((x) << S_TXRXMURG)
+#define F_TXRXMURG V_TXRXMURG(1U)
+
+#define S_TXRXMFIN 9
+#define V_TXRXMFIN(x) ((x) << S_TXRXMFIN)
+#define F_TXRXMFIN V_TXRXMFIN(1U)
+
+#define S_TXRXMSYN 8
+#define V_TXRXMSYN(x) ((x) << S_TXRXMSYN)
+#define F_TXRXMSYN V_TXRXMSYN(1U)
+
+#define S_TXRXMNEWRENO 7
+#define V_TXRXMNEWRENO(x) ((x) << S_TXRXMNEWRENO)
+#define F_TXRXMNEWRENO V_TXRXMNEWRENO(1U)
+
+#define S_TXRXMFAST 6
+#define V_TXRXMFAST(x) ((x) << S_TXRXMFAST)
+#define F_TXRXMFAST V_TXRXMFAST(1U)
+
+#define S_TXRXMTIMER 5
+#define V_TXRXMTIMER(x) ((x) << S_TXRXMTIMER)
+#define F_TXRXMTIMER V_TXRXMTIMER(1U)
+
+#define S_TXRXMTIMERKEEPALIVE 4
+#define V_TXRXMTIMERKEEPALIVE(x) ((x) << S_TXRXMTIMERKEEPALIVE)
+#define F_TXRXMTIMERKEEPALIVE V_TXRXMTIMERKEEPALIVE(1U)
+
+#define S_TXRXMTIMERPERSIST 3
+#define V_TXRXMTIMERPERSIST(x) ((x) << S_TXRXMTIMERPERSIST)
+#define F_TXRXMTIMERPERSIST V_TXRXMTIMERPERSIST(1U)
+
+#define S_TXRCVADVSHRUNK 2
+#define V_TXRCVADVSHRUNK(x) ((x) << S_TXRCVADVSHRUNK)
+#define F_TXRCVADVSHRUNK V_TXRCVADVSHRUNK(1U)
+
+#define S_TXRCVADVZERO 1
+#define V_TXRCVADVZERO(x) ((x) << S_TXRCVADVZERO)
+#define F_TXRCVADVZERO V_TXRCVADVZERO(1U)
+
+#define S_TXRCVADVLTMSS 0
+#define V_TXRCVADVLTMSS(x) ((x) << S_TXRCVADVLTMSS)
+#define F_TXRCVADVLTMSS V_TXRCVADVLTMSS(1U)
+
+#define S_RXDEBUGFLAGS 16
+#define M_RXDEBUGFLAGS 0xffff
+#define V_RXDEBUGFLAGS(x) ((x) << S_RXDEBUGFLAGS)
+#define G_RXDEBUGFLAGS(x) (((x) >> S_RXDEBUGFLAGS) & M_RXDEBUGFLAGS)
+#define S_TXDEBUGFLAGS 0
+#define M_TXDEBUGFLAGS 0xffff
+#define V_TXDEBUGFLAGS(x) ((x) << S_TXDEBUGFLAGS)
+#define G_TXDEBUGFLAGS(x) (((x) >> S_TXDEBUGFLAGS) & M_TXDEBUGFLAGS)
+
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+#define A_TP_CM_FLOW_CNTL_MODE 0x4b0
+
+#define S_CMFLOWCACHEDISABLE 0
+#define V_CMFLOWCACHEDISABLE(x) ((x) << S_CMFLOWCACHEDISABLE)
+#define F_CMFLOWCACHEDISABLE V_CMFLOWCACHEDISABLE(1U)
+
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
+
+#define S_EDROPTUNNEL 19
+#define V_EDROPTUNNEL(x) ((x) << S_EDROPTUNNEL)
+#define F_EDROPTUNNEL V_EDROPTUNNEL(1U)
+
+#define S_CDROPTUNNEL 18
+#define V_CDROPTUNNEL(x) ((x) << S_CDROPTUNNEL)
+#define F_CDROPTUNNEL V_CDROPTUNNEL(1U)
+
+#define S_ETHRESHOLD 12
+#define M_ETHRESHOLD 0x3f
+#define V_ETHRESHOLD(x) ((x) << S_ETHRESHOLD)
+#define G_ETHRESHOLD(x) (((x) >> S_ETHRESHOLD) & M_ETHRESHOLD)
+
+#define S_CTHRESHOLD 6
+#define M_CTHRESHOLD 0x3f
+#define V_CTHRESHOLD(x) ((x) << S_CTHRESHOLD)
+#define G_CTHRESHOLD(x) (((x) >> S_CTHRESHOLD) & M_CTHRESHOLD)
+
+#define S_TXTHRESHOLD 0
+#define M_TXTHRESHOLD 0x3f
+#define V_TXTHRESHOLD(x) ((x) << S_TXTHRESHOLD)
+#define G_TXTHRESHOLD(x) (((x) >> S_TXTHRESHOLD) & M_TXTHRESHOLD)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+#define A_TP_CLEAR_DEBUG 0x4c0
+
+#define S_CLRDEBUG 0
+#define V_CLRDEBUG(x) ((x) << S_CLRDEBUG)
+#define F_CLRDEBUG V_CLRDEBUG(1U)
+
+#define A_TP_DEBUG_VEC 0x4c4
+#define A_TP_DEBUG_VEC2 0x4c8
+#define A_TP_DEBUG_REG_SEL 0x4cc
+#define A_TP_DEBUG 0x4d0
+#define A_TP_DBG_LA_CONFIG 0x4d4
+#define A_TP_DBG_LA_DATAH 0x4d8
+#define A_TP_DBG_LA_DATAL 0x4dc
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
+#define A_TP_TX_MOD_Q5_Q4_TIMER_SEPARATOR 0x1
+#define A_TP_TX_MOD_Q3_Q2_TIMER_SEPARATOR 0x2
+#define A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR 0x3
+#define A_TP_RX_MOD_Q1_Q0_TIMER_SEPARATOR 0x4
+#define A_TP_TX_MOD_Q7_Q6_RATE_LIMIT 0x5
+#define A_TP_TX_MOD_Q5_Q4_RATE_LIMIT 0x6
+#define A_TP_TX_MOD_Q3_Q2_RATE_LIMIT 0x7
#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+#define A_TP_RX_MOD_Q1_Q0_RATE_LIMIT 0x9
+#define A_TP_TX_TRC_KEY0 0x20
+#define A_TP_TX_TRC_MASK0 0x21
+#define A_TP_TX_TRC_KEY1 0x22
+#define A_TP_TX_TRC_MASK1 0x23
+#define A_TP_TX_TRC_KEY2 0x24
+#define A_TP_TX_TRC_MASK2 0x25
+#define A_TP_TX_TRC_KEY3 0x26
+#define A_TP_TX_TRC_MASK3 0x27
+#define A_TP_IPMI_CFG1 0x28
+
+#define S_VLANENABLE 31
+#define V_VLANENABLE(x) ((x) << S_VLANENABLE)
+#define F_VLANENABLE V_VLANENABLE(1U)
+
+#define S_PRIMARYPORTENABLE 30
+#define V_PRIMARYPORTENABLE(x) ((x) << S_PRIMARYPORTENABLE)
+#define F_PRIMARYPORTENABLE V_PRIMARYPORTENABLE(1U)
+
+#define S_SECUREPORTENABLE 29
+#define V_SECUREPORTENABLE(x) ((x) << S_SECUREPORTENABLE)
+#define F_SECUREPORTENABLE V_SECUREPORTENABLE(1U)
+
+#define S_ARPENABLE 28
+#define V_ARPENABLE(x) ((x) << S_ARPENABLE)
+#define F_ARPENABLE V_ARPENABLE(1U)
+
+#define S_VLAN 0
+#define M_VLAN 0xffff
+#define V_VLAN(x) ((x) << S_VLAN)
+#define G_VLAN(x) (((x) >> S_VLAN) & M_VLAN)
+
+#define A_TP_IPMI_CFG2 0x29
+
+#define S_SECUREPORT 16
+#define M_SECUREPORT 0xffff
+#define V_SECUREPORT(x) ((x) << S_SECUREPORT)
+#define G_SECUREPORT(x) (((x) >> S_SECUREPORT) & M_SECUREPORT)
+
+#define S_PRIMARYPORT 0
+#define M_PRIMARYPORT 0xffff
+#define V_PRIMARYPORT(x) ((x) << S_PRIMARYPORT)
+#define G_PRIMARYPORT(x) (((x) >> S_PRIMARYPORT) & M_PRIMARYPORT)
+
+#define A_TP_RX_TRC_KEY0 0x120
+#define A_TP_RX_TRC_MASK0 0x121
+#define A_TP_RX_TRC_KEY1 0x122
+#define A_TP_RX_TRC_MASK1 0x123
+#define A_TP_RX_TRC_KEY2 0x124
+#define A_TP_RX_TRC_MASK2 0x125
+#define A_TP_RX_TRC_KEY3 0x126
+#define A_TP_RX_TRC_MASK3 0x127
+#define A_TP_QOS_RX_TOS_MAP_H 0x128
+#define A_TP_QOS_RX_TOS_MAP_L 0x129
+#define A_TP_QOS_RX_MAP_MODE 0x12a
+
+#define S_DEFAULTCH 11
+#define V_DEFAULTCH(x) ((x) << S_DEFAULTCH)
+#define F_DEFAULTCH V_DEFAULTCH(1U)
+
+#define S_RXMAPMODE 8
+#define M_RXMAPMODE 0x7
+#define V_RXMAPMODE(x) ((x) << S_RXMAPMODE)
+#define G_RXMAPMODE(x) (((x) >> S_RXMAPMODE) & M_RXMAPMODE)
+
+#define S_RXVLANMAP 7
+#define V_RXVLANMAP(x) ((x) << S_RXVLANMAP)
+#define F_RXVLANMAP V_RXVLANMAP(1U)
#define A_TP_TX_DROP_CFG_CH0 0x12b
+#define S_TIMERENABLED 31
+#define V_TIMERENABLED(x) ((x) << S_TIMERENABLED)
+#define F_TIMERENABLED V_TIMERENABLED(1U)
+
+#define S_TIMERERRORENABLE 30
+#define V_TIMERERRORENABLE(x) ((x) << S_TIMERERRORENABLE)
+#define F_TIMERERRORENABLE V_TIMERERRORENABLE(1U)
+
+#define S_TIMERTHRESHOLD 4
+#define M_TIMERTHRESHOLD 0x3ffffff
+#define V_TIMERTHRESHOLD(x) ((x) << S_TIMERTHRESHOLD)
+#define G_TIMERTHRESHOLD(x) (((x) >> S_TIMERTHRESHOLD) & M_TIMERTHRESHOLD)
+
+#define S_PACKETDROPS 0
+#define M_PACKETDROPS 0xf
+#define V_PACKETDROPS(x) ((x) << S_PACKETDROPS)
+#define G_PACKETDROPS(x) (((x) >> S_PACKETDROPS) & M_PACKETDROPS)
+
+#define A_TP_TX_DROP_CFG_CH1 0x12c
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0SENT 16
+#define M_TXDROPCNTCH0SENT 0xffff
+#define V_TXDROPCNTCH0SENT(x) ((x) << S_TXDROPCNTCH0SENT)
+#define G_TXDROPCNTCH0SENT(x) (((x) >> S_TXDROPCNTCH0SENT) & M_TXDROPCNTCH0SENT)
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & M_TXDROPCNTCH0RCVD)
+
+#define A_TP_TX_DROP_CNT_CH1 0x12e
+
+#define S_TXDROPCNTCH1SENT 16
+#define M_TXDROPCNTCH1SENT 0xffff
+#define V_TXDROPCNTCH1SENT(x) ((x) << S_TXDROPCNTCH1SENT)
+#define G_TXDROPCNTCH1SENT(x) (((x) >> S_TXDROPCNTCH1SENT) & M_TXDROPCNTCH1SENT)
+
+#define S_TXDROPCNTCH1RCVD 0
+#define M_TXDROPCNTCH1RCVD 0xffff
+#define V_TXDROPCNTCH1RCVD(x) ((x) << S_TXDROPCNTCH1RCVD)
+#define G_TXDROPCNTCH1RCVD(x) (((x) >> S_TXDROPCNTCH1RCVD) & M_TXDROPCNTCH1RCVD)
+
#define A_TP_TX_DROP_MODE 0x12f
+#define S_TXDROPMODECH1 1
+#define V_TXDROPMODECH1(x) ((x) << S_TXDROPMODECH1)
+#define F_TXDROPMODECH1 V_TXDROPMODECH1(1U)
+
+#define S_TXDROPMODECH0 0
+#define V_TXDROPMODECH0(x) ((x) << S_TXDROPMODECH0)
+#define F_TXDROPMODECH0 V_TXDROPMODECH0(1U)
+
+#define A_TP_VLAN_PRI_MAP 0x137
+
+#define S_VLANPRIMAP7 14
+#define M_VLANPRIMAP7 0x3
+#define V_VLANPRIMAP7(x) ((x) << S_VLANPRIMAP7)
+#define G_VLANPRIMAP7(x) (((x) >> S_VLANPRIMAP7) & M_VLANPRIMAP7)
+
+#define S_VLANPRIMAP6 12
+#define M_VLANPRIMAP6 0x3
+#define V_VLANPRIMAP6(x) ((x) << S_VLANPRIMAP6)
+#define G_VLANPRIMAP6(x) (((x) >> S_VLANPRIMAP6) & M_VLANPRIMAP6)
+
+#define S_VLANPRIMAP5 10
+#define M_VLANPRIMAP5 0x3
+#define V_VLANPRIMAP5(x) ((x) << S_VLANPRIMAP5)
+#define G_VLANPRIMAP5(x) (((x) >> S_VLANPRIMAP5) & M_VLANPRIMAP5)
+
+#define S_VLANPRIMAP4 8
+#define M_VLANPRIMAP4 0x3
+#define V_VLANPRIMAP4(x) ((x) << S_VLANPRIMAP4)
+#define G_VLANPRIMAP4(x) (((x) >> S_VLANPRIMAP4) & M_VLANPRIMAP4)
+
+#define S_VLANPRIMAP3 6
+#define M_VLANPRIMAP3 0x3
+#define V_VLANPRIMAP3(x) ((x) << S_VLANPRIMAP3)
+#define G_VLANPRIMAP3(x) (((x) >> S_VLANPRIMAP3) & M_VLANPRIMAP3)
+
+#define S_VLANPRIMAP2 4
+#define M_VLANPRIMAP2 0x3
+#define V_VLANPRIMAP2(x) ((x) << S_VLANPRIMAP2)
+#define G_VLANPRIMAP2(x) (((x) >> S_VLANPRIMAP2) & M_VLANPRIMAP2)
+
+#define S_VLANPRIMAP1 2
+#define M_VLANPRIMAP1 0x3
+#define V_VLANPRIMAP1(x) ((x) << S_VLANPRIMAP1)
+#define G_VLANPRIMAP1(x) (((x) >> S_VLANPRIMAP1) & M_VLANPRIMAP1)
+
+#define S_VLANPRIMAP0 0
+#define M_VLANPRIMAP0 0x3
+#define V_VLANPRIMAP0(x) ((x) << S_VLANPRIMAP0)
+#define G_VLANPRIMAP0(x) (((x) >> S_VLANPRIMAP0) & M_VLANPRIMAP0)
+
+#define A_TP_MAC_MATCH_MAP0 0x138
+
+#define S_MACMATCHMAP7 21
+#define M_MACMATCHMAP7 0x7
+#define V_MACMATCHMAP7(x) ((x) << S_MACMATCHMAP7)
+#define G_MACMATCHMAP7(x) (((x) >> S_MACMATCHMAP7) & M_MACMATCHMAP7)
+
+#define S_MACMATCHMAP6 18
+#define M_MACMATCHMAP6 0x7
+#define V_MACMATCHMAP6(x) ((x) << S_MACMATCHMAP6)
+#define G_MACMATCHMAP6(x) (((x) >> S_MACMATCHMAP6) & M_MACMATCHMAP6)
+
+#define S_MACMATCHMAP5 15
+#define M_MACMATCHMAP5 0x7
+#define V_MACMATCHMAP5(x) ((x) << S_MACMATCHMAP5)
+#define G_MACMATCHMAP5(x) (((x) >> S_MACMATCHMAP5) & M_MACMATCHMAP5)
+
+#define S_MACMATCHMAP4 12
+#define M_MACMATCHMAP4 0x7
+#define V_MACMATCHMAP4(x) ((x) << S_MACMATCHMAP4)
+#define G_MACMATCHMAP4(x) (((x) >> S_MACMATCHMAP4) & M_MACMATCHMAP4)
+
+#define S_MACMATCHMAP3 9
+#define M_MACMATCHMAP3 0x7
+#define V_MACMATCHMAP3(x) ((x) << S_MACMATCHMAP3)
+#define G_MACMATCHMAP3(x) (((x) >> S_MACMATCHMAP3) & M_MACMATCHMAP3)
+
+#define S_MACMATCHMAP2 6
+#define M_MACMATCHMAP2 0x7
+#define V_MACMATCHMAP2(x) ((x) << S_MACMATCHMAP2)
+#define G_MACMATCHMAP2(x) (((x) >> S_MACMATCHMAP2) & M_MACMATCHMAP2)
+
+#define S_MACMATCHMAP1 3
+#define M_MACMATCHMAP1 0x7
+#define V_MACMATCHMAP1(x) ((x) << S_MACMATCHMAP1)
+#define G_MACMATCHMAP1(x) (((x) >> S_MACMATCHMAP1) & M_MACMATCHMAP1)
+
+#define S_MACMATCHMAP0 0
+#define M_MACMATCHMAP0 0x7
+#define V_MACMATCHMAP0(x) ((x) << S_MACMATCHMAP0)
+#define G_MACMATCHMAP0(x) (((x) >> S_MACMATCHMAP0) & M_MACMATCHMAP0)
+
+#define A_TP_MAC_MATCH_MAP1 0x139
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_LOOKUPEVERYPKT 28
+#define V_LOOKUPEVERYPKT(x) ((x) << S_LOOKUPEVERYPKT)
+#define F_LOOKUPEVERYPKT V_LOOKUPEVERYPKT(1U)
+
+#define S_ENABLEINSERTIONSFD 27
+#define V_ENABLEINSERTIONSFD(x) ((x) << S_ENABLEINSERTIONSFD)
+#define F_ENABLEINSERTIONSFD V_ENABLEINSERTIONSFD(1U)
+
+#define S_ENABLEINSERTION 26
+#define V_ENABLEINSERTION(x) ((x) << S_ENABLEINSERTION)
+#define F_ENABLEINSERTION V_ENABLEINSERTION(1U)
+
+#define S_ENABLEEXTRACTIONSFD 25
+#define V_ENABLEEXTRACTIONSFD(x) ((x) << S_ENABLEEXTRACTIONSFD)
+#define F_ENABLEEXTRACTIONSFD V_ENABLEEXTRACTIONSFD(1U)
+
+#define S_ENABLEEXTRACT 24
+#define V_ENABLEEXTRACT(x) ((x) << S_ENABLEEXTRACT)
+#define F_ENABLEEXTRACT V_ENABLEEXTRACT(1U)
+
+#define S_BITPOS3 18
+#define M_BITPOS3 0x3f
+#define V_BITPOS3(x) ((x) << S_BITPOS3)
+#define G_BITPOS3(x) (((x) >> S_BITPOS3) & M_BITPOS3)
+
+#define S_BITPOS2 12
+#define M_BITPOS2 0x3f
+#define V_BITPOS2(x) ((x) << S_BITPOS2)
+#define G_BITPOS2(x) (((x) >> S_BITPOS2) & M_BITPOS2)
+
+#define S_BITPOS1 6
+#define M_BITPOS1 0x3f
+#define V_BITPOS1(x) ((x) << S_BITPOS1)
+#define G_BITPOS1(x) (((x) >> S_BITPOS1) & M_BITPOS1)
+
+#define S_BITPOS0 0
+#define M_BITPOS0 0x3f
+#define V_BITPOS0(x) ((x) << S_BITPOS0)
+#define G_BITPOS0(x) (((x) >> S_BITPOS0) & M_BITPOS0)
+
+#define A_TP_PREAMBLE_MSB 0x142
+#define A_TP_PREAMBLE_LSB 0x143
#define A_TP_EGRESS_CONFIG 0x145
#define S_REWRITEFORCETOSIZE 0
#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
-#define A_TP_TX_TRC_KEY0 0x20
+#define A_TP_INTF_FROM_TX_PKT 0x244
-#define A_TP_RX_TRC_KEY0 0x120
+#define S_INTFFROMTXPKT 0
+#define V_INTFFROMTXPKT(x) ((x) << S_INTFFROMTXPKT)
+#define F_INTFFROMTXPKT V_INTFFROMTXPKT(1U)
-#define A_TP_TX_DROP_CNT_CH0 0x12d
+#define A_TP_FIFO_CONFIG 0x8c0
-#define S_TXDROPCNTCH0RCVD 0
-#define M_TXDROPCNTCH0RCVD 0xffff
-#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
-#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
- M_TXDROPCNTCH0RCVD)
+#define S_RXFIFOCONFIG 10
+#define M_RXFIFOCONFIG 0x3f
+#define V_RXFIFOCONFIG(x) ((x) << S_RXFIFOCONFIG)
+#define G_RXFIFOCONFIG(x) (((x) >> S_RXFIFOCONFIG) & M_RXFIFOCONFIG)
-#define A_TP_PROXY_FLOW_CNTL 0x4b0
+#define S_TXFIFOCONFIG 2
+#define M_TXFIFOCONFIG 0x3f
+#define V_TXFIFOCONFIG(x) ((x) << S_TXFIFOCONFIG)
+#define G_TXFIFOCONFIG(x) (((x) >> S_TXFIFOCONFIG) & M_TXFIFOCONFIG)
-#define A_TP_EMBED_OP_FIELD0 0x4e8
-#define A_TP_EMBED_OP_FIELD1 0x4ec
-#define A_TP_EMBED_OP_FIELD2 0x4f0
-#define A_TP_EMBED_OP_FIELD3 0x4f4
-#define A_TP_EMBED_OP_FIELD4 0x4f8
-#define A_TP_EMBED_OP_FIELD5 0x4fc
+/* registers for module ULP2_RX */
+#define ULP2_RX_BASE_ADDR 0x500
#define A_ULPRX_CTL 0x500
+#define S_PCMD1THRESHOLD 24
+#define M_PCMD1THRESHOLD 0xff
+#define V_PCMD1THRESHOLD(x) ((x) << S_PCMD1THRESHOLD)
+#define G_PCMD1THRESHOLD(x) (((x) >> S_PCMD1THRESHOLD) & M_PCMD1THRESHOLD)
+
+#define S_PCMD0THRESHOLD 16
+#define M_PCMD0THRESHOLD 0xff
+#define V_PCMD0THRESHOLD(x) ((x) << S_PCMD0THRESHOLD)
+#define G_PCMD0THRESHOLD(x) (((x) >> S_PCMD0THRESHOLD) & M_PCMD0THRESHOLD)
+
#define S_ROUND_ROBIN 4
#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
+#define S_RDMA_PERMISSIVE_MODE 3
+#define V_RDMA_PERMISSIVE_MODE(x) ((x) << S_RDMA_PERMISSIVE_MODE)
+#define F_RDMA_PERMISSIVE_MODE V_RDMA_PERMISSIVE_MODE(1U)
+
+#define S_PAGEPODME 2
+#define V_PAGEPODME(x) ((x) << S_PAGEPODME)
+#define F_PAGEPODME V_PAGEPODME(1U)
+
+#define S_ISCSITAGTCB 1
+#define V_ISCSITAGTCB(x) ((x) << S_ISCSITAGTCB)
+#define F_ISCSITAGTCB V_ISCSITAGTCB(1U)
+
+#define S_TDDPTAGTCB 0
+#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
+#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+
#define A_ULPRX_INT_ENABLE 0x504
#define S_DATASELFRAMEERR0 7
#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
#define F_PARERRDATA V_PARERRDATA(1U)
-#define A_ULPRX_INT_CAUSE 0x508
+#define S_PARERR 0
+#define V_PARERR(x) ((x) << S_PARERR)
+#define F_PARERR V_PARERR(1U)
+#define A_ULPRX_INT_CAUSE 0x508
#define A_ULPRX_ISCSI_LLIMIT 0x50c
+#define S_ISCSILLIMIT 6
+#define M_ISCSILLIMIT 0x3ffffff
+#define V_ISCSILLIMIT(x) ((x) << S_ISCSILLIMIT)
+#define G_ISCSILLIMIT(x) (((x) >> S_ISCSILLIMIT) & M_ISCSILLIMIT)
+
#define A_ULPRX_ISCSI_ULIMIT 0x510
+#define S_ISCSIULIMIT 6
+#define M_ISCSIULIMIT 0x3ffffff
+#define V_ISCSIULIMIT(x) ((x) << S_ISCSIULIMIT)
+#define G_ISCSIULIMIT(x) (((x) >> S_ISCSIULIMIT) & M_ISCSIULIMIT)
+
#define A_ULPRX_ISCSI_TAGMASK 0x514
+#define S_ISCSITAGMASK 6
+#define M_ISCSITAGMASK 0x3ffffff
+#define V_ISCSITAGMASK(x) ((x) << S_ISCSITAGMASK)
+#define G_ISCSITAGMASK(x) (((x) >> S_ISCSITAGMASK) & M_ISCSITAGMASK)
+
+#define A_ULPRX_ISCSI_PSZ 0x518
+
+#define S_HPZ3 24
+#define M_HPZ3 0xf
+#define V_HPZ3(x) ((x) << S_HPZ3)
+#define G_HPZ3(x) (((x) >> S_HPZ3) & M_HPZ3)
+
+#define S_HPZ2 16
+#define M_HPZ2 0xf
+#define V_HPZ2(x) ((x) << S_HPZ2)
+#define G_HPZ2(x) (((x) >> S_HPZ2) & M_HPZ2)
+
+#define S_HPZ1 8
+#define M_HPZ1 0xf
+#define V_HPZ1(x) ((x) << S_HPZ1)
+#define G_HPZ1(x) (((x) >> S_HPZ1) & M_HPZ1)
+
#define S_HPZ0 0
#define M_HPZ0 0xf
#define V_HPZ0(x) ((x) << S_HPZ0)
#define A_ULPRX_TDDP_LLIMIT 0x51c
+#define S_TDDPLLIMIT 6
+#define M_TDDPLLIMIT 0x3ffffff
+#define V_TDDPLLIMIT(x) ((x) << S_TDDPLLIMIT)
+#define G_TDDPLLIMIT(x) (((x) >> S_TDDPLLIMIT) & M_TDDPLLIMIT)
+
#define A_ULPRX_TDDP_ULIMIT 0x520
-#define A_ULPRX_TDDP_PSZ 0x528
-#define A_ULPRX_STAG_LLIMIT 0x52c
+#define S_TDDPULIMIT 6
+#define M_TDDPULIMIT 0x3ffffff
+#define V_TDDPULIMIT(x) ((x) << S_TDDPULIMIT)
+#define G_TDDPULIMIT(x) (((x) >> S_TDDPULIMIT) & M_TDDPULIMIT)
-#define A_ULPRX_STAG_ULIMIT 0x530
+#define A_ULPRX_TDDP_TAGMASK 0x524
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
+#define S_TDDPTAGMASK 6
+#define M_TDDPTAGMASK 0x3ffffff
+#define V_TDDPTAGMASK(x) ((x) << S_TDDPTAGMASK)
+#define G_TDDPTAGMASK(x) (((x) >> S_TDDPTAGMASK) & M_TDDPTAGMASK)
+#define A_ULPRX_TDDP_PSZ 0x528
+#define A_ULPRX_STAG_LLIMIT 0x52c
+#define A_ULPRX_STAG_ULIMIT 0x530
+#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
#define A_ULPRX_PBL_LLIMIT 0x53c
-
-#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_TDDP_TAGMASK 0x524
-
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
+/* registers for module ULP2_TX */
+#define ULP2_TX_BASE_ADDR 0x580
#define A_ULPTX_CONFIG 0x580
#define A_ULPTX_INT_ENABLE 0x584
+#define S_CMD_FIFO_PERR_SET1 7
+#define V_CMD_FIFO_PERR_SET1(x) ((x) << S_CMD_FIFO_PERR_SET1)
+#define F_CMD_FIFO_PERR_SET1 V_CMD_FIFO_PERR_SET1(1U)
+
+#define S_CMD_FIFO_PERR_SET0 6
+#define V_CMD_FIFO_PERR_SET0(x) ((x) << S_CMD_FIFO_PERR_SET0)
+#define F_CMD_FIFO_PERR_SET0 V_CMD_FIFO_PERR_SET0(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET1 5
+#define V_LSO_HDR_SRAM_PERR_SET1(x) ((x) << S_LSO_HDR_SRAM_PERR_SET1)
+#define F_LSO_HDR_SRAM_PERR_SET1 V_LSO_HDR_SRAM_PERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET0 4
+#define V_LSO_HDR_SRAM_PERR_SET0(x) ((x) << S_LSO_HDR_SRAM_PERR_SET0)
+#define F_LSO_HDR_SRAM_PERR_SET0 V_LSO_HDR_SRAM_PERR_SET0(1U)
+
+#define S_IMM_DATA_PERR_SET_CH1 3
+#define V_IMM_DATA_PERR_SET_CH1(x) ((x) << S_IMM_DATA_PERR_SET_CH1)
+#define F_IMM_DATA_PERR_SET_CH1 V_IMM_DATA_PERR_SET_CH1(1U)
+
+#define S_IMM_DATA_PERR_SET_CH0 2
+#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
+#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+
#define S_PBL_BOUND_ERR_CH1 1
#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
#define A_ULPTX_INT_CAUSE 0x588
-
#define A_ULPTX_TPT_LLIMIT 0x58c
-
#define A_ULPTX_TPT_ULIMIT 0x590
-
#define A_ULPTX_PBL_LLIMIT 0x594
-
#define A_ULPTX_PBL_ULIMIT 0x598
+#define A_ULPTX_CPL_ERR_OFFSET 0x59c
+#define A_ULPTX_CPL_ERR_MASK 0x5a0
+#define A_ULPTX_CPL_ERR_VALUE 0x5a4
+#define A_ULPTX_CPL_PACK_SIZE 0x5a8
+
+#define S_VALUE 24
+#define M_VALUE 0xff
+#define V_VALUE(x) ((x) << S_VALUE)
+#define G_VALUE(x) (((x) >> S_VALUE) & M_VALUE)
+
+#define S_CH1SIZE2 24
+#define M_CH1SIZE2 0xff
+#define V_CH1SIZE2(x) ((x) << S_CH1SIZE2)
+#define G_CH1SIZE2(x) (((x) >> S_CH1SIZE2) & M_CH1SIZE2)
+
+#define S_CH1SIZE1 16
+#define M_CH1SIZE1 0xff
+#define V_CH1SIZE1(x) ((x) << S_CH1SIZE1)
+#define G_CH1SIZE1(x) (((x) >> S_CH1SIZE1) & M_CH1SIZE1)
+
+#define S_CH0SIZE2 8
+#define M_CH0SIZE2 0xff
+#define V_CH0SIZE2(x) ((x) << S_CH0SIZE2)
+#define G_CH0SIZE2(x) (((x) >> S_CH0SIZE2) & M_CH0SIZE2)
+
+#define S_CH0SIZE1 0
+#define M_CH0SIZE1 0xff
+#define V_CH0SIZE1(x) ((x) << S_CH0SIZE1)
+#define G_CH0SIZE1(x) (((x) >> S_CH0SIZE1) & M_CH0SIZE1)
#define A_ULPTX_DMA_WEIGHT 0x5ac
#define S_D1_WEIGHT 16
#define M_D1_WEIGHT 0xffff
#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+#define G_D1_WEIGHT(x) (((x) >> S_D1_WEIGHT) & M_D1_WEIGHT)
#define S_D0_WEIGHT 0
#define M_D0_WEIGHT 0xffff
#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+#define G_D0_WEIGHT(x) (((x) >> S_D0_WEIGHT) & M_D0_WEIGHT)
+
+/* registers for module PM1_RX */
+#define PM1_RX_BASE_ADDR 0x5c0
#define A_PM1_RX_CFG 0x5c0
#define A_PM1_RX_MODE 0x5c4
+#define S_STAT_CHANNEL 1
+#define V_STAT_CHANNEL(x) ((x) << S_STAT_CHANNEL)
+#define F_STAT_CHANNEL V_STAT_CHANNEL(1U)
+
+#define S_PRIORITY_CH 0
+#define V_PRIORITY_CH(x) ((x) << S_PRIORITY_CH)
+#define F_PRIORITY_CH V_PRIORITY_CH(1U)
+
+#define A_PM1_RX_STAT_CONFIG 0x5c8
+#define A_PM1_RX_STAT_COUNT 0x5cc
+#define A_PM1_RX_STAT_MSB 0x5d0
+#define A_PM1_RX_STAT_LSB 0x5d4
#define A_PM1_RX_INT_ENABLE 0x5d8
#define S_ZERO_E_CMD_ERROR 18
#define S_IESPI_PAR_ERROR 3
#define M_IESPI_PAR_ERROR 0x7
-
#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+#define G_IESPI_PAR_ERROR(x) (((x) >> S_IESPI_PAR_ERROR) & M_IESPI_PAR_ERROR)
#define S_OCSPI_PAR_ERROR 0
#define M_OCSPI_PAR_ERROR 0x7
-
#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+#define G_OCSPI_PAR_ERROR(x) (((x) >> S_OCSPI_PAR_ERROR) & M_OCSPI_PAR_ERROR)
#define A_PM1_RX_INT_CAUSE 0x5dc
+/* registers for module PM1_TX */
+#define PM1_TX_BASE_ADDR 0x5e0
+
#define A_PM1_TX_CFG 0x5e0
#define A_PM1_TX_MODE 0x5e4
-
+#define A_PM1_TX_STAT_CONFIG 0x5e8
+#define A_PM1_TX_STAT_COUNT 0x5ec
+#define A_PM1_TX_STAT_MSB 0x5f0
+#define A_PM1_TX_STAT_LSB 0x5f4
#define A_PM1_TX_INT_ENABLE 0x5f8
#define S_ZERO_C_CMD_ERROR 18
#define S_ICSPI_PAR_ERROR 3
#define M_ICSPI_PAR_ERROR 0x7
-
#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+#define G_ICSPI_PAR_ERROR(x) (((x) >> S_ICSPI_PAR_ERROR) & M_ICSPI_PAR_ERROR)
#define S_OESPI_PAR_ERROR 0
#define M_OESPI_PAR_ERROR 0x7
-
#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+#define G_OESPI_PAR_ERROR(x) (((x) >> S_OESPI_PAR_ERROR) & M_OESPI_PAR_ERROR)
#define A_PM1_TX_INT_CAUSE 0x5fc
+/* registers for module MPS0 */
+#define MPS0_BASE_ADDR 0x600
+
#define A_MPS_CFG 0x600
+#define S_ENFORCEPKT 11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT V_ENFORCEPKT(1U)
+
+#define S_SGETPQID 8
+#define M_SGETPQID 0x7
+#define V_SGETPQID(x) ((x) << S_SGETPQID)
+#define G_SGETPQID(x) (((x) >> S_SGETPQID) & M_SGETPQID)
+
+#define S_TPRXPORTSIZE 7
+#define V_TPRXPORTSIZE(x) ((x) << S_TPRXPORTSIZE)
+#define F_TPRXPORTSIZE V_TPRXPORTSIZE(1U)
+
+#define S_TPTXPORT1SIZE 6
+#define V_TPTXPORT1SIZE(x) ((x) << S_TPTXPORT1SIZE)
+#define F_TPTXPORT1SIZE V_TPTXPORT1SIZE(1U)
+
+#define S_TPTXPORT0SIZE 5
+#define V_TPTXPORT0SIZE(x) ((x) << S_TPTXPORT0SIZE)
+#define F_TPTXPORT0SIZE V_TPTXPORT0SIZE(1U)
+
#define S_TPRXPORTEN 4
#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
#define F_TPRXPORTEN V_TPRXPORTEN(1U)
#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
-#define S_ENFORCEPKT 11
-#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
-#define F_ENFORCEPKT V_ENFORCEPKT(1U)
+#define A_MPS_DRR_CFG1 0x604
+
+#define S_RLDWTTPD1 11
+#define M_RLDWTTPD1 0x7ff
+#define V_RLDWTTPD1(x) ((x) << S_RLDWTTPD1)
+#define G_RLDWTTPD1(x) (((x) >> S_RLDWTTPD1) & M_RLDWTTPD1)
+
+#define S_RLDWTTPD0 0
+#define M_RLDWTTPD0 0x7ff
+#define V_RLDWTTPD0(x) ((x) << S_RLDWTTPD0)
+#define G_RLDWTTPD0(x) (((x) >> S_RLDWTTPD0) & M_RLDWTTPD0)
+
+#define A_MPS_DRR_CFG2 0x608
+
+#define S_RLDWTTOTAL 0
+#define M_RLDWTTOTAL 0xfff
+#define V_RLDWTTOTAL(x) ((x) << S_RLDWTTOTAL)
+#define G_RLDWTTOTAL(x) (((x) >> S_RLDWTTOTAL) & M_RLDWTTOTAL)
+
+#define A_MPS_MCA_STATUS 0x60c
+
+#define S_MCAPKTCNT 12
+#define M_MCAPKTCNT 0xfffff
+#define V_MCAPKTCNT(x) ((x) << S_MCAPKTCNT)
+#define G_MCAPKTCNT(x) (((x) >> S_MCAPKTCNT) & M_MCAPKTCNT)
+
+#define S_MCADEPTH 0
+#define M_MCADEPTH 0xfff
+#define V_MCADEPTH(x) ((x) << S_MCADEPTH)
+#define G_MCADEPTH(x) (((x) >> S_MCADEPTH) & M_MCADEPTH)
+
+#define A_MPS_TX0_TP_CNT 0x610
+
+#define S_TX0TPDISCNT 24
+#define M_TX0TPDISCNT 0xff
+#define V_TX0TPDISCNT(x) ((x) << S_TX0TPDISCNT)
+#define G_TX0TPDISCNT(x) (((x) >> S_TX0TPDISCNT) & M_TX0TPDISCNT)
+
+#define S_TX0TPCNT 0
+#define M_TX0TPCNT 0xffffff
+#define V_TX0TPCNT(x) ((x) << S_TX0TPCNT)
+#define G_TX0TPCNT(x) (((x) >> S_TX0TPCNT) & M_TX0TPCNT)
+
+#define A_MPS_TX1_TP_CNT 0x614
+
+#define S_TX1TPDISCNT 24
+#define M_TX1TPDISCNT 0xff
+#define V_TX1TPDISCNT(x) ((x) << S_TX1TPDISCNT)
+#define G_TX1TPDISCNT(x) (((x) >> S_TX1TPDISCNT) & M_TX1TPDISCNT)
+
+#define S_TX1TPCNT 0
+#define M_TX1TPCNT 0xffffff
+#define V_TX1TPCNT(x) ((x) << S_TX1TPCNT)
+#define G_TX1TPCNT(x) (((x) >> S_TX1TPCNT) & M_TX1TPCNT)
+
+#define A_MPS_RX_TP_CNT 0x618
+
+#define S_RXTPDISCNT 24
+#define M_RXTPDISCNT 0xff
+#define V_RXTPDISCNT(x) ((x) << S_RXTPDISCNT)
+#define G_RXTPDISCNT(x) (((x) >> S_RXTPDISCNT) & M_RXTPDISCNT)
+
+#define S_RXTPCNT 0
+#define M_RXTPCNT 0xffffff
+#define V_RXTPCNT(x) ((x) << S_RXTPCNT)
+#define G_RXTPCNT(x) (((x) >> S_RXTPCNT) & M_RXTPCNT)
#define A_MPS_INT_ENABLE 0x61c
#define S_MCAPARERRENB 6
#define M_MCAPARERRENB 0x7
-
#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+#define G_MCAPARERRENB(x) (((x) >> S_MCAPARERRENB) & M_MCAPARERRENB)
#define S_RXTPPARERRENB 4
#define M_RXTPPARERRENB 0x3
-
#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+#define G_RXTPPARERRENB(x) (((x) >> S_RXTPPARERRENB) & M_RXTPPARERRENB)
#define S_TX1TPPARERRENB 2
#define M_TX1TPPARERRENB 0x3
-
#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+#define G_TX1TPPARERRENB(x) (((x) >> S_TX1TPPARERRENB) & M_TX1TPPARERRENB)
#define S_TX0TPPARERRENB 0
#define M_TX0TPPARERRENB 0x3
-
#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+#define G_TX0TPPARERRENB(x) (((x) >> S_TX0TPPARERRENB) & M_TX0TPPARERRENB)
#define A_MPS_INT_CAUSE 0x620
#define S_MCAPARERR 6
#define M_MCAPARERR 0x7
-
#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+#define G_MCAPARERR(x) (((x) >> S_MCAPARERR) & M_MCAPARERR)
#define S_RXTPPARERR 4
#define M_RXTPPARERR 0x3
-
#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+#define G_RXTPPARERR(x) (((x) >> S_RXTPPARERR) & M_RXTPPARERR)
#define S_TX1TPPARERR 2
#define M_TX1TPPARERR 0x3
-
#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+#define G_TX1TPPARERR(x) (((x) >> S_TX1TPPARERR) & M_TX1TPPARERR)
#define S_TX0TPPARERR 0
#define M_TX0TPPARERR 0x3
-
#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+#define G_TX0TPPARERR(x) (((x) >> S_TX0TPPARERR) & M_TX0TPPARERR)
+
+/* registers for module CPL_SWITCH */
+#define CPL_SWITCH_BASE_ADDR 0x640
#define A_CPL_SWITCH_CNTRL 0x640
+#define S_CPL_PKT_TID 8
+#define M_CPL_PKT_TID 0xffffff
+#define V_CPL_PKT_TID(x) ((x) << S_CPL_PKT_TID)
+#define G_CPL_PKT_TID(x) (((x) >> S_CPL_PKT_TID) & M_CPL_PKT_TID)
+
+#define S_CIM_TO_UP_FULL_SIZE 4
+#define V_CIM_TO_UP_FULL_SIZE(x) ((x) << S_CIM_TO_UP_FULL_SIZE)
+#define F_CIM_TO_UP_FULL_SIZE V_CIM_TO_UP_FULL_SIZE(1U)
+
+#define S_CPU_NO_3F_CIM_ENABLE 3
+#define V_CPU_NO_3F_CIM_ENABLE(x) ((x) << S_CPU_NO_3F_CIM_ENABLE)
+#define F_CPU_NO_3F_CIM_ENABLE V_CPU_NO_3F_CIM_ENABLE(1U)
+
+#define S_SWITCH_TABLE_ENABLE 2
+#define V_SWITCH_TABLE_ENABLE(x) ((x) << S_SWITCH_TABLE_ENABLE)
+#define F_SWITCH_TABLE_ENABLE V_SWITCH_TABLE_ENABLE(1U)
+
+#define S_SGE_ENABLE 1
+#define V_SGE_ENABLE(x) ((x) << S_SGE_ENABLE)
+#define F_SGE_ENABLE V_SGE_ENABLE(1U)
+
+#define S_CIM_ENABLE 0
+#define V_CIM_ENABLE(x) ((x) << S_CIM_ENABLE)
+#define F_CIM_ENABLE V_CIM_ENABLE(1U)
+
+#define A_CPL_SWITCH_TBL_IDX 0x644
+
+#define S_SWITCH_TBL_IDX 0
+#define M_SWITCH_TBL_IDX 0xf
+#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
+#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+
+#define A_CPL_SWITCH_TBL_DATA 0x648
+#define A_CPL_SWITCH_ZERO_ERROR 0x64c
+
+#define S_ZERO_CMD 0
+#define M_ZERO_CMD 0xff
+#define V_ZERO_CMD(x) ((x) << S_ZERO_CMD)
+#define G_ZERO_CMD(x) (((x) >> S_ZERO_CMD) & M_ZERO_CMD)
+
#define A_CPL_INTR_ENABLE 0x650
#define S_CIM_OP_MAP_PERR 5
#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
#define A_CPL_INTR_CAUSE 0x654
+#define A_CPL_MAP_TBL_IDX 0x658
+
+#define S_CPL_MAP_TBL_IDX 0
+#define M_CPL_MAP_TBL_IDX 0xff
+#define V_CPL_MAP_TBL_IDX(x) ((x) << S_CPL_MAP_TBL_IDX)
+#define G_CPL_MAP_TBL_IDX(x) (((x) >> S_CPL_MAP_TBL_IDX) & M_CPL_MAP_TBL_IDX)
#define A_CPL_MAP_TBL_DATA 0x65c
+#define S_CPL_MAP_TBL_DATA 0
+#define M_CPL_MAP_TBL_DATA 0xff
+#define V_CPL_MAP_TBL_DATA(x) ((x) << S_CPL_MAP_TBL_DATA)
+#define G_CPL_MAP_TBL_DATA(x) (((x) >> S_CPL_MAP_TBL_DATA) & M_CPL_MAP_TBL_DATA)
+
+/* registers for module SMB0 */
+#define SMB0_BASE_ADDR 0x660
+
#define A_SMB_GLOBAL_TIME_CFG 0x660
+#define S_LADBGWRPTR 24
+#define M_LADBGWRPTR 0xff
+#define V_LADBGWRPTR(x) ((x) << S_LADBGWRPTR)
+#define G_LADBGWRPTR(x) (((x) >> S_LADBGWRPTR) & M_LADBGWRPTR)
+
+#define S_LADBGRDPTR 16
+#define M_LADBGRDPTR 0xff
+#define V_LADBGRDPTR(x) ((x) << S_LADBGRDPTR)
+#define G_LADBGRDPTR(x) (((x) >> S_LADBGRDPTR) & M_LADBGRDPTR)
+
+#define S_LADBGEN 13
+#define V_LADBGEN(x) ((x) << S_LADBGEN)
+#define F_LADBGEN V_LADBGEN(1U)
+
+#define S_MACROCNTCFG 8
+#define M_MACROCNTCFG 0x1f
+#define V_MACROCNTCFG(x) ((x) << S_MACROCNTCFG)
+#define G_MACROCNTCFG(x) (((x) >> S_MACROCNTCFG) & M_MACROCNTCFG)
+
+#define S_MICROCNTCFG 0
+#define M_MICROCNTCFG 0xff
+#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
+#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+
+#define A_SMB_MST_TIMEOUT_CFG 0x664
+
+#define S_DEBUGSELH 28
+#define M_DEBUGSELH 0xf
+#define V_DEBUGSELH(x) ((x) << S_DEBUGSELH)
+#define G_DEBUGSELH(x) (((x) >> S_DEBUGSELH) & M_DEBUGSELH)
+
+#define S_DEBUGSELL 24
+#define M_DEBUGSELL 0xf
+#define V_DEBUGSELL(x) ((x) << S_DEBUGSELL)
+#define G_DEBUGSELL(x) (((x) >> S_DEBUGSELL) & M_DEBUGSELL)
+
+#define S_MSTTIMEOUTCFG 0
+#define M_MSTTIMEOUTCFG 0xffffff
+#define V_MSTTIMEOUTCFG(x) ((x) << S_MSTTIMEOUTCFG)
+#define G_MSTTIMEOUTCFG(x) (((x) >> S_MSTTIMEOUTCFG) & M_MSTTIMEOUTCFG)
+
+#define A_SMB_MST_CTL_CFG 0x668
+
+#define S_MSTFIFODBG 31
+#define V_MSTFIFODBG(x) ((x) << S_MSTFIFODBG)
+#define F_MSTFIFODBG V_MSTFIFODBG(1U)
+
+#define S_MSTFIFODBGCLR 30
+#define V_MSTFIFODBGCLR(x) ((x) << S_MSTFIFODBGCLR)
+#define F_MSTFIFODBGCLR V_MSTFIFODBGCLR(1U)
+
+#define S_MSTRXBYTECFG 12
+#define M_MSTRXBYTECFG 0x3f
+#define V_MSTRXBYTECFG(x) ((x) << S_MSTRXBYTECFG)
+#define G_MSTRXBYTECFG(x) (((x) >> S_MSTRXBYTECFG) & M_MSTRXBYTECFG)
+
+#define S_MSTTXBYTECFG 6
+#define M_MSTTXBYTECFG 0x3f
+#define V_MSTTXBYTECFG(x) ((x) << S_MSTTXBYTECFG)
+#define G_MSTTXBYTECFG(x) (((x) >> S_MSTTXBYTECFG) & M_MSTTXBYTECFG)
+
+#define S_MSTRESET 1
+#define V_MSTRESET(x) ((x) << S_MSTRESET)
+#define F_MSTRESET V_MSTRESET(1U)
+
+#define S_MSTCTLEN 0
+#define V_MSTCTLEN(x) ((x) << S_MSTCTLEN)
+#define F_MSTCTLEN V_MSTCTLEN(1U)
+
+#define A_SMB_MST_CTL_STS 0x66c
+
+#define S_MSTRXBYTECNT 12
+#define M_MSTRXBYTECNT 0x3f
+#define V_MSTRXBYTECNT(x) ((x) << S_MSTRXBYTECNT)
+#define G_MSTRXBYTECNT(x) (((x) >> S_MSTRXBYTECNT) & M_MSTRXBYTECNT)
+
+#define S_MSTTXBYTECNT 6
+#define M_MSTTXBYTECNT 0x3f
+#define V_MSTTXBYTECNT(x) ((x) << S_MSTTXBYTECNT)
+#define G_MSTTXBYTECNT(x) (((x) >> S_MSTTXBYTECNT) & M_MSTTXBYTECNT)
+
+#define S_MSTBUSYSTS 0
+#define V_MSTBUSYSTS(x) ((x) << S_MSTBUSYSTS)
+#define F_MSTBUSYSTS V_MSTBUSYSTS(1U)
+
+#define A_SMB_MST_TX_FIFO_RDWR 0x670
+#define A_SMB_MST_RX_FIFO_RDWR 0x674
+#define A_SMB_SLV_TIMEOUT_CFG 0x678
+
+#define S_SLVTIMEOUTCFG 0
+#define M_SLVTIMEOUTCFG 0xffffff
+#define V_SLVTIMEOUTCFG(x) ((x) << S_SLVTIMEOUTCFG)
+#define G_SLVTIMEOUTCFG(x) (((x) >> S_SLVTIMEOUTCFG) & M_SLVTIMEOUTCFG)
+
+#define A_SMB_SLV_CTL_CFG 0x67c
+
+#define S_SLVFIFODBG 31
+#define V_SLVFIFODBG(x) ((x) << S_SLVFIFODBG)
+#define F_SLVFIFODBG V_SLVFIFODBG(1U)
+
+#define S_SLVFIFODBGCLR 30
+#define V_SLVFIFODBGCLR(x) ((x) << S_SLVFIFODBGCLR)
+#define F_SLVFIFODBGCLR V_SLVFIFODBGCLR(1U)
+
+#define S_SLVADDRCFG 4
+#define M_SLVADDRCFG 0x7f
+#define V_SLVADDRCFG(x) ((x) << S_SLVADDRCFG)
+#define G_SLVADDRCFG(x) (((x) >> S_SLVADDRCFG) & M_SLVADDRCFG)
+
+#define S_SLVALRTSET 2
+#define V_SLVALRTSET(x) ((x) << S_SLVALRTSET)
+#define F_SLVALRTSET V_SLVALRTSET(1U)
+
+#define S_SLVRESET 1
+#define V_SLVRESET(x) ((x) << S_SLVRESET)
+#define F_SLVRESET V_SLVRESET(1U)
+
+#define S_SLVCTLEN 0
+#define V_SLVCTLEN(x) ((x) << S_SLVCTLEN)
+#define F_SLVCTLEN V_SLVCTLEN(1U)
+
+#define A_SMB_SLV_CTL_STS 0x680
+
+#define S_SLVFIFOTXCNT 12
+#define M_SLVFIFOTXCNT 0x3f
+#define V_SLVFIFOTXCNT(x) ((x) << S_SLVFIFOTXCNT)
+#define G_SLVFIFOTXCNT(x) (((x) >> S_SLVFIFOTXCNT) & M_SLVFIFOTXCNT)
+
+#define S_SLVFIFOCNT 6
+#define M_SLVFIFOCNT 0x3f
+#define V_SLVFIFOCNT(x) ((x) << S_SLVFIFOCNT)
+#define G_SLVFIFOCNT(x) (((x) >> S_SLVFIFOCNT) & M_SLVFIFOCNT)
+
+#define S_SLVALRTSTS 2
+#define V_SLVALRTSTS(x) ((x) << S_SLVALRTSTS)
+#define F_SLVALRTSTS V_SLVALRTSTS(1U)
+
+#define S_SLVBUSYSTS 0
+#define V_SLVBUSYSTS(x) ((x) << S_SLVBUSYSTS)
+#define F_SLVBUSYSTS V_SLVBUSYSTS(1U)
+
+#define A_SMB_SLV_FIFO_RDWR 0x684
+#define A_SMB_SLV_CMD_FIFO_RDWR 0x688
+#define A_SMB_INT_ENABLE 0x68c
+
+#define S_SLVTIMEOUTINTEN 7
+#define V_SLVTIMEOUTINTEN(x) ((x) << S_SLVTIMEOUTINTEN)
+#define F_SLVTIMEOUTINTEN V_SLVTIMEOUTINTEN(1U)
+
+#define S_SLVERRINTEN 6
+#define V_SLVERRINTEN(x) ((x) << S_SLVERRINTEN)
+#define F_SLVERRINTEN V_SLVERRINTEN(1U)
+
+#define S_SLVDONEINTEN 5
+#define V_SLVDONEINTEN(x) ((x) << S_SLVDONEINTEN)
+#define F_SLVDONEINTEN V_SLVDONEINTEN(1U)
+
+#define S_SLVRXRDYINTEN 4
+#define V_SLVRXRDYINTEN(x) ((x) << S_SLVRXRDYINTEN)
+#define F_SLVRXRDYINTEN V_SLVRXRDYINTEN(1U)
+
+#define S_MSTTIMEOUTINTEN 3
+#define V_MSTTIMEOUTINTEN(x) ((x) << S_MSTTIMEOUTINTEN)
+#define F_MSTTIMEOUTINTEN V_MSTTIMEOUTINTEN(1U)
+
+#define S_MSTNACKINTEN 2
+#define V_MSTNACKINTEN(x) ((x) << S_MSTNACKINTEN)
+#define F_MSTNACKINTEN V_MSTNACKINTEN(1U)
+
+#define S_MSTLOSTARBINTEN 1
+#define V_MSTLOSTARBINTEN(x) ((x) << S_MSTLOSTARBINTEN)
+#define F_MSTLOSTARBINTEN V_MSTLOSTARBINTEN(1U)
+
+#define S_MSTDONEINTEN 0
+#define V_MSTDONEINTEN(x) ((x) << S_MSTDONEINTEN)
+#define F_MSTDONEINTEN V_MSTDONEINTEN(1U)
+
+#define A_SMB_INT_CAUSE 0x690
+
+#define S_SLVTIMEOUTINT 7
+#define V_SLVTIMEOUTINT(x) ((x) << S_SLVTIMEOUTINT)
+#define F_SLVTIMEOUTINT V_SLVTIMEOUTINT(1U)
+
+#define S_SLVERRINT 6
+#define V_SLVERRINT(x) ((x) << S_SLVERRINT)
+#define F_SLVERRINT V_SLVERRINT(1U)
+
+#define S_SLVDONEINT 5
+#define V_SLVDONEINT(x) ((x) << S_SLVDONEINT)
+#define F_SLVDONEINT V_SLVDONEINT(1U)
+
+#define S_SLVRXRDYINT 4
+#define V_SLVRXRDYINT(x) ((x) << S_SLVRXRDYINT)
+#define F_SLVRXRDYINT V_SLVRXRDYINT(1U)
+
+#define S_MSTTIMEOUTINT 3
+#define V_MSTTIMEOUTINT(x) ((x) << S_MSTTIMEOUTINT)
+#define F_MSTTIMEOUTINT V_MSTTIMEOUTINT(1U)
+
+#define S_MSTNACKINT 2
+#define V_MSTNACKINT(x) ((x) << S_MSTNACKINT)
+#define F_MSTNACKINT V_MSTNACKINT(1U)
+
+#define S_MSTLOSTARBINT 1
+#define V_MSTLOSTARBINT(x) ((x) << S_MSTLOSTARBINT)
+#define F_MSTLOSTARBINT V_MSTLOSTARBINT(1U)
+
+#define S_MSTDONEINT 0
+#define V_MSTDONEINT(x) ((x) << S_MSTDONEINT)
+#define F_MSTDONEINT V_MSTDONEINT(1U)
+
+#define A_SMB_DEBUG_DATA 0x694
+
+#define S_DEBUGDATAH 16
+#define M_DEBUGDATAH 0xffff
+#define V_DEBUGDATAH(x) ((x) << S_DEBUGDATAH)
+#define G_DEBUGDATAH(x) (((x) >> S_DEBUGDATAH) & M_DEBUGDATAH)
+
+#define S_DEBUGDATAL 0
+#define M_DEBUGDATAL 0xffff
+#define V_DEBUGDATAL(x) ((x) << S_DEBUGDATAL)
+#define G_DEBUGDATAL(x) (((x) >> S_DEBUGDATAL) & M_DEBUGDATAL)
+
+#define A_SMB_DEBUG_LA 0x69c
+
+#define S_DEBUGLAREQADDR 0
+#define M_DEBUGLAREQADDR 0x3ff
+#define V_DEBUGLAREQADDR(x) ((x) << S_DEBUGLAREQADDR)
+#define G_DEBUGLAREQADDR(x) (((x) >> S_DEBUGLAREQADDR) & M_DEBUGLAREQADDR)
+
+/* registers for module I2CM0 */
+#define I2CM0_BASE_ADDR 0x6a0
+
#define A_I2C_CFG 0x6a0
#define S_I2C_CLKDIV 0
#define M_I2C_CLKDIV 0xfff
#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+#define G_I2C_CLKDIV(x) (((x) >> S_I2C_CLKDIV) & M_I2C_CLKDIV)
+
+#define A_I2C_DATA 0x6a4
+#define A_I2C_OP 0x6a8
+
+#define S_ACK 30
+#define V_ACK(x) ((x) << S_ACK)
+#define F_ACK V_ACK(1U)
+
+#define S_I2C_CONT 1
+#define V_I2C_CONT(x) ((x) << S_I2C_CONT)
+#define F_I2C_CONT V_I2C_CONT(1U)
+
+/* registers for module MI1 */
+#define MI1_BASE_ADDR 0x6b0
#define A_MI1_CFG 0x6b0
#define S_CLKDIV 5
#define M_CLKDIV 0xff
#define V_CLKDIV(x) ((x) << S_CLKDIV)
+#define G_CLKDIV(x) (((x) >> S_CLKDIV) & M_CLKDIV)
#define S_ST 3
-
#define M_ST 0x3
-
#define V_ST(x) ((x) << S_ST)
-
#define G_ST(x) (((x) >> S_ST) & M_ST)
#define S_PREEN 2
#define S_PHYADDR 5
#define M_PHYADDR 0x1f
#define V_PHYADDR(x) ((x) << S_PHYADDR)
+#define G_PHYADDR(x) (((x) >> S_PHYADDR) & M_PHYADDR)
#define S_REGADDR 0
#define M_REGADDR 0x1f
#define V_REGADDR(x) ((x) << S_REGADDR)
+#define G_REGADDR(x) (((x) >> S_REGADDR) & M_REGADDR)
#define A_MI1_DATA 0x6b8
+#define S_MDI_DATA 0
+#define M_MDI_DATA 0xffff
+#define V_MDI_DATA(x) ((x) << S_MDI_DATA)
+#define G_MDI_DATA(x) (((x) >> S_MDI_DATA) & M_MDI_DATA)
+
#define A_MI1_OP 0x6bc
+#define S_INC 2
+#define V_INC(x) ((x) << S_INC)
+#define F_INC V_INC(1U)
+
#define S_MDI_OP 0
#define M_MDI_OP 0x3
#define V_MDI_OP(x) ((x) << S_MDI_OP)
+#define G_MDI_OP(x) (((x) >> S_MDI_OP) & M_MDI_OP)
-#define A_SF_DATA 0x6d8
+/* registers for module JM1 */
+#define JM1_BASE_ADDR 0x6c0
+
+#define A_JM_CFG 0x6c0
+
+#define S_JM_CLKDIV 2
+#define M_JM_CLKDIV 0xff
+#define V_JM_CLKDIV(x) ((x) << S_JM_CLKDIV)
+#define G_JM_CLKDIV(x) (((x) >> S_JM_CLKDIV) & M_JM_CLKDIV)
+#define S_TRST 1
+#define V_TRST(x) ((x) << S_TRST)
+#define F_TRST V_TRST(1U)
+
+#define S_EN 0
+#define V_EN(x) ((x) << S_EN)
+#define F_EN V_EN(1U)
+
+#define A_JM_MODE 0x6c4
+#define A_JM_DATA 0x6c8
+#define A_JM_OP 0x6cc
+
+#define S_CNT 0
+#define M_CNT 0x1f
+#define V_CNT(x) ((x) << S_CNT)
+#define G_CNT(x) (((x) >> S_CNT) & M_CNT)
+
+/* registers for module SF1 */
+#define SF1_BASE_ADDR 0x6d8
+
+#define A_SF_DATA 0x6d8
#define A_SF_OP 0x6dc
#define S_BYTECNT 1
#define M_BYTECNT 0x3
#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+/* registers for module PL3 */
+#define PL3_BASE_ADDR 0x6e0
#define A_PL_INT_ENABLE0 0x6e0
+#define S_SW 25
+#define V_SW(x) ((x) << S_SW)
+#define F_SW V_SW(1U)
+
+#define S_EXT 24
+#define V_EXT(x) ((x) << S_EXT)
+#define F_EXT V_EXT(1U)
+
#define S_T3DBG 23
#define V_T3DBG(x) ((x) << S_T3DBG)
#define F_T3DBG V_T3DBG(1U)
#define V_MC5A(x) ((x) << S_MC5A)
#define F_MC5A V_MC5A(1U)
+#define S_SF1 17
+#define V_SF1(x) ((x) << S_SF1)
+#define F_SF1 V_SF1(1U)
+
+#define S_SMB0 15
+#define V_SMB0(x) ((x) << S_SMB0)
+#define F_SMB0 V_SMB0(1U)
+
+#define S_I2CM0 14
+#define V_I2CM0(x) ((x) << S_I2CM0)
+#define F_I2CM0 V_I2CM0(1U)
+
+#define S_MI1 13
+#define V_MI1(x) ((x) << S_MI1)
+#define F_MI1 V_MI1(1U)
+
#define S_CPL_SWITCH 12
#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
#define F_CPL_SWITCH V_CPL_SWITCH(1U)
#define F_SGE3 V_SGE3(1U)
#define A_PL_INT_CAUSE0 0x6e4
-
+#define A_PL_INT_ENABLE1 0x6e8
+#define A_PL_INT_CAUSE1 0x6ec
#define A_PL_RST 0x6f0
+#define S_FATALPERREN 4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
+#define S_SWINT1 3
+#define V_SWINT1(x) ((x) << S_SWINT1)
+#define F_SWINT1 V_SWINT1(1U)
+
+#define S_SWINT0 2
+#define V_SWINT0(x) ((x) << S_SWINT0)
+#define F_SWINT0 V_SWINT0(1U)
+
#define S_CRSTWRM 1
#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
#define F_CRSTWRM V_CRSTWRM(1U)
#define A_PL_REV 0x6f4
+#define S_REV 0
+#define M_REV 0xf
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
#define A_PL_CLI 0x6f8
+#define A_PL_LCK 0x6fc
+
+#define S_LCK 0
+#define M_LCK 0x3
+#define V_LCK(x) ((x) << S_LCK)
+#define G_LCK(x) (((x) >> S_LCK) & M_LCK)
+
+/* registers for module MC5A */
+#define MC5A_BASE_ADDR 0x700
+
+#define A_MC5_BUF_CONFIG 0x700
+
+#define S_TERM300_240 31
+#define V_TERM300_240(x) ((x) << S_TERM300_240)
+#define F_TERM300_240 V_TERM300_240(1U)
+
+#define S_MC5_TERM150 30
+#define V_MC5_TERM150(x) ((x) << S_MC5_TERM150)
+#define F_MC5_TERM150 V_MC5_TERM150(1U)
+
+#define S_TERM60 29
+#define V_TERM60(x) ((x) << S_TERM60)
+#define F_TERM60 V_TERM60(1U)
+
+#define S_GDDRIII 28
+#define V_GDDRIII(x) ((x) << S_GDDRIII)
+#define F_GDDRIII V_GDDRIII(1U)
+
+#define S_GDDRII 27
+#define V_GDDRII(x) ((x) << S_GDDRII)
+#define F_GDDRII V_GDDRII(1U)
+
+#define S_GDDRI 26
+#define V_GDDRI(x) ((x) << S_GDDRI)
+#define F_GDDRI V_GDDRI(1U)
+
+#define S_READ 25
+#define V_READ(x) ((x) << S_READ)
+#define F_READ V_READ(1U)
+
+#define S_IMP_SET_UPDATE 24
+#define V_IMP_SET_UPDATE(x) ((x) << S_IMP_SET_UPDATE)
+#define F_IMP_SET_UPDATE V_IMP_SET_UPDATE(1U)
+
+#define S_CAL_UPDATE 23
+#define V_CAL_UPDATE(x) ((x) << S_CAL_UPDATE)
+#define F_CAL_UPDATE V_CAL_UPDATE(1U)
+
+#define S_CAL_BUSY 22
+#define V_CAL_BUSY(x) ((x) << S_CAL_BUSY)
+#define F_CAL_BUSY V_CAL_BUSY(1U)
+
+#define S_CAL_ERROR 21
+#define V_CAL_ERROR(x) ((x) << S_CAL_ERROR)
+#define F_CAL_ERROR V_CAL_ERROR(1U)
+
+#define S_SGL_CAL_EN 20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+
+#define S_IMP_UPD_MODE 19
+#define V_IMP_UPD_MODE(x) ((x) << S_IMP_UPD_MODE)
+#define F_IMP_UPD_MODE V_IMP_UPD_MODE(1U)
+
+#define S_IMP_SEL 18
+#define V_IMP_SEL(x) ((x) << S_IMP_SEL)
+#define F_IMP_SEL V_IMP_SEL(1U)
+
+#define S_MAN_PU 15
+#define M_MAN_PU 0x7
+#define V_MAN_PU(x) ((x) << S_MAN_PU)
+#define G_MAN_PU(x) (((x) >> S_MAN_PU) & M_MAN_PU)
+
+#define S_MAN_PD 12
+#define M_MAN_PD 0x7
+#define V_MAN_PD(x) ((x) << S_MAN_PD)
+#define G_MAN_PD(x) (((x) >> S_MAN_PD) & M_MAN_PD)
+
+#define S_CAL_PU 9
+#define M_CAL_PU 0x7
+#define V_CAL_PU(x) ((x) << S_CAL_PU)
+#define G_CAL_PU(x) (((x) >> S_CAL_PU) & M_CAL_PU)
+
+#define S_CAL_PD 6
+#define M_CAL_PD 0x7
+#define V_CAL_PD(x) ((x) << S_CAL_PD)
+#define G_CAL_PD(x) (((x) >> S_CAL_PD) & M_CAL_PD)
+
+#define S_SET_PU 3
+#define M_SET_PU 0x7
+#define V_SET_PU(x) ((x) << S_SET_PU)
+#define G_SET_PU(x) (((x) >> S_SET_PU) & M_SET_PU)
+
+#define S_SET_PD 0
+#define M_SET_PD 0x7
+#define V_SET_PD(x) ((x) << S_SET_PD)
+#define G_SET_PD(x) (((x) >> S_SET_PD) & M_SET_PD)
+
+#define S_CAL_IMP_UPD 23
+#define V_CAL_IMP_UPD(x) ((x) << S_CAL_IMP_UPD)
+#define F_CAL_IMP_UPD V_CAL_IMP_UPD(1U)
#define A_MC5_DB_CONFIG 0x704
+#define S_TMCFGWRLOCK 31
+#define V_TMCFGWRLOCK(x) ((x) << S_TMCFGWRLOCK)
+#define F_TMCFGWRLOCK V_TMCFGWRLOCK(1U)
+
#define S_TMTYPEHI 30
#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
#define F_TMTYPEHI V_TMTYPEHI(1U)
#define V_TMTYPE(x) ((x) << S_TMTYPE)
#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+#define S_TMPARTCOUNT 24
+#define M_TMPARTCOUNT 0x3
+#define V_TMPARTCOUNT(x) ((x) << S_TMPARTCOUNT)
+#define G_TMPARTCOUNT(x) (((x) >> S_TMPARTCOUNT) & M_TMPARTCOUNT)
+
+#define S_NLIP 18
+#define M_NLIP 0x3f
+#define V_NLIP(x) ((x) << S_NLIP)
+#define G_NLIP(x) (((x) >> S_NLIP) & M_NLIP)
+
#define S_COMPEN 17
#define V_COMPEN(x) ((x) << S_COMPEN)
#define F_COMPEN V_COMPEN(1U)
+#define S_BUILD 16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD V_BUILD(1U)
+
+#define S_FILTEREN 11
+#define V_FILTEREN(x) ((x) << S_FILTEREN)
+#define F_FILTEREN V_FILTEREN(1U)
+
+#define S_CLIPUPDATE 10
+#define V_CLIPUPDATE(x) ((x) << S_CLIPUPDATE)
+#define F_CLIPUPDATE V_CLIPUPDATE(1U)
+
+#define S_TM_IO_PDOWN 9
+#define V_TM_IO_PDOWN(x) ((x) << S_TM_IO_PDOWN)
+#define F_TM_IO_PDOWN V_TM_IO_PDOWN(1U)
+
+#define S_SYNMODE 7
+#define M_SYNMODE 0x3
+#define V_SYNMODE(x) ((x) << S_SYNMODE)
+#define G_SYNMODE(x) (((x) >> S_SYNMODE) & M_SYNMODE)
+
#define S_PRTYEN 6
#define V_PRTYEN(x) ((x) << S_PRTYEN)
#define F_PRTYEN V_PRTYEN(1U)
#define V_DBGIEN(x) ((x) << S_DBGIEN)
#define F_DBGIEN V_DBGIEN(1U)
+#define S_TCMCFGOVR 3
+#define V_TCMCFGOVR(x) ((x) << S_TCMCFGOVR)
+#define F_TCMCFGOVR V_TCMCFGOVR(1U)
+
#define S_TMRDY 2
#define V_TMRDY(x) ((x) << S_TMRDY)
#define F_TMRDY V_TMRDY(1U)
#define V_TMMODE(x) ((x) << S_TMMODE)
#define F_TMMODE V_TMMODE(1U)
-#define F_TMMODE V_TMMODE(1U)
+#define A_MC5_MISC 0x708
+
+#define S_LIP_CMP_UNAVAILABLE 0
+#define M_LIP_CMP_UNAVAILABLE 0xf
+#define V_LIP_CMP_UNAVAILABLE(x) ((x) << S_LIP_CMP_UNAVAILABLE)
+#define G_LIP_CMP_UNAVAILABLE(x) (((x) >> S_LIP_CMP_UNAVAILABLE) & M_LIP_CMP_UNAVAILABLE)
#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+#define S_RTINDX 0
+#define M_RTINDX 0x3fffff
+#define V_RTINDX(x) ((x) << S_RTINDX)
+#define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX)
+
#define A_MC5_DB_FILTER_TABLE 0x710
+#define S_SRINDX 0
+#define M_SRINDX 0x3fffff
+#define V_SRINDX(x) ((x) << S_SRINDX)
+#define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX)
+
#define A_MC5_DB_SERVER_INDEX 0x714
+#define A_MC5_DB_LIP_RAM_ADDR 0x718
+
+#define S_RAMWR 8
+#define V_RAMWR(x) ((x) << S_RAMWR)
+#define F_RAMWR V_RAMWR(1U)
+
+#define S_RAMADDR 0
+#define M_RAMADDR 0x3f
+#define V_RAMADDR(x) ((x) << S_RAMADDR)
+#define G_RAMADDR(x) (((x) >> S_RAMADDR) & M_RAMADDR)
+#define A_MC5_DB_LIP_RAM_DATA 0x71c
#define A_MC5_DB_RSP_LATENCY 0x720
#define S_RDLAT 16
#define M_RDLAT 0x1f
#define V_RDLAT(x) ((x) << S_RDLAT)
+#define G_RDLAT(x) (((x) >> S_RDLAT) & M_RDLAT)
#define S_LRNLAT 8
#define M_LRNLAT 0x1f
#define V_LRNLAT(x) ((x) << S_LRNLAT)
+#define G_LRNLAT(x) (((x) >> S_LRNLAT) & M_LRNLAT)
#define S_SRCHLAT 0
#define M_SRCHLAT 0x1f
#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define A_MC5_DB_PARITY_LATENCY 0x724
+
+#define S_PARLAT 0
+#define M_PARLAT 0xf
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_DB_WR_LRN_VERIFY 0x728
+
+#define S_VWVEREN 2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN V_VWVEREN(1U)
+
+#define S_LRNVEREN 1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN V_LRNVEREN(1U)
+
+#define S_POVEREN 0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN V_POVEREN(1U)
#define A_MC5_DB_PART_ID_INDEX 0x72c
+#define S_IDINDEX 0
+#define M_IDINDEX 0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_DB_RESET_MAX 0x730
+
+#define S_RSTMAX 0
+#define M_RSTMAX 0xf
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_DB_ACT_CNT 0x734
+
+#define S_ACTCNT 0
+#define M_ACTCNT 0xfffff
+#define V_ACTCNT(x) ((x) << S_ACTCNT)
+#define G_ACTCNT(x) (((x) >> S_ACTCNT) & M_ACTCNT)
+
+#define A_MC5_DB_CLIP_MAP 0x738
+
+#define S_CLIPMAPOP 31
+#define V_CLIPMAPOP(x) ((x) << S_CLIPMAPOP)
+#define F_CLIPMAPOP V_CLIPMAPOP(1U)
+
+#define S_CLIPMAPVAL 16
+#define M_CLIPMAPVAL 0x3f
+#define V_CLIPMAPVAL(x) ((x) << S_CLIPMAPVAL)
+#define G_CLIPMAPVAL(x) (((x) >> S_CLIPMAPVAL) & M_CLIPMAPVAL)
+
+#define S_CLIPMAPADDR 0
+#define M_CLIPMAPADDR 0x3f
+#define V_CLIPMAPADDR(x) ((x) << S_CLIPMAPADDR)
+#define G_CLIPMAPADDR(x) (((x) >> S_CLIPMAPADDR) & M_CLIPMAPADDR)
+
+#define A_MC5_DB_SIZE 0x73c
#define A_MC5_DB_INT_ENABLE 0x740
+#define S_MSGSEL 28
+#define M_MSGSEL 0xf
+#define V_MSGSEL(x) ((x) << S_MSGSEL)
+#define G_MSGSEL(x) (((x) >> S_MSGSEL) & M_MSGSEL)
+
#define S_DELACTEMPTY 18
#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
#define F_DELACTEMPTY V_DELACTEMPTY(1U)
#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
+#define S_SYNCOOKIEOFF 11
+#define V_SYNCOOKIEOFF(x) ((x) << S_SYNCOOKIEOFF)
+#define F_SYNCOOKIEOFF V_SYNCOOKIEOFF(1U)
+
+#define S_SYNCOOKIEBAD 10
+#define V_SYNCOOKIEBAD(x) ((x) << S_SYNCOOKIEBAD)
+#define F_SYNCOOKIEBAD V_SYNCOOKIEBAD(1U)
+
+#define S_SYNCOOKIE 9
+#define V_SYNCOOKIE(x) ((x) << S_SYNCOOKIE)
+#define F_SYNCOOKIE V_SYNCOOKIE(1U)
+
#define S_NFASRCHFAIL 8
#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
#define V_PARITYERR(x) ((x) << S_PARITYERR)
#define F_PARITYERR V_PARITYERR(1U)
+#define S_LIPMISS 5
+#define V_LIPMISS(x) ((x) << S_LIPMISS)
+#define F_LIPMISS V_LIPMISS(1U)
+
+#define S_LIP0 4
+#define V_LIP0(x) ((x) << S_LIP0)
+#define F_LIP0 V_LIP0(1U)
+
+#define S_MISS 3
+#define V_MISS(x) ((x) << S_MISS)
+#define F_MISS V_MISS(1U)
+
+#define S_ROUTINGHIT 2
+#define V_ROUTINGHIT(x) ((x) << S_ROUTINGHIT)
+#define F_ROUTINGHIT V_ROUTINGHIT(1U)
+
+#define S_ACTIVEHIT 1
+#define V_ACTIVEHIT(x) ((x) << S_ACTIVEHIT)
+#define F_ACTIVEHIT V_ACTIVEHIT(1U)
+
+#define S_ACTIVEOUTHIT 0
+#define V_ACTIVEOUTHIT(x) ((x) << S_ACTIVEOUTHIT)
+#define F_ACTIVEOUTHIT V_ACTIVEOUTHIT(1U)
+
#define A_MC5_DB_INT_CAUSE 0x744
+#define A_MC5_DB_INT_TID 0x748
+
+#define S_INTTID 0
+#define M_INTTID 0xfffff
+#define V_INTTID(x) ((x) << S_INTTID)
+#define G_INTTID(x) (((x) >> S_INTTID) & M_INTTID)
+
+#define A_MC5_DB_INT_PTID 0x74c
+
+#define S_INTPTID 0
+#define M_INTPTID 0xfffff
+#define V_INTPTID(x) ((x) << S_INTPTID)
+#define G_INTPTID(x) (((x) >> S_INTPTID) & M_INTPTID)
#define A_MC5_DB_DBGI_CONFIG 0x774
+#define S_WRREQSIZE 22
+#define M_WRREQSIZE 0x3ff
+#define V_WRREQSIZE(x) ((x) << S_WRREQSIZE)
+#define G_WRREQSIZE(x) (((x) >> S_WRREQSIZE) & M_WRREQSIZE)
+
+#define S_SADRSEL 4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL V_SADRSEL(1U)
+
+#define S_CMDMODE 0
+#define M_CMDMODE 0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
#define A_MC5_DB_DBGI_REQ_CMD 0x778
-#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+#define S_MBUSCMD 0
+#define M_MBUSCMD 0xf
+#define V_MBUSCMD(x) ((x) << S_MBUSCMD)
+#define G_MBUSCMD(x) (((x) >> S_MBUSCMD) & M_MBUSCMD)
-#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+#define S_IDTCMDHI 11
+#define M_IDTCMDHI 0x7
+#define V_IDTCMDHI(x) ((x) << S_IDTCMDHI)
+#define G_IDTCMDHI(x) (((x) >> S_IDTCMDHI) & M_IDTCMDHI)
+
+#define S_IDTCMDLO 0
+#define M_IDTCMDLO 0xf
+#define V_IDTCMDLO(x) ((x) << S_IDTCMDLO)
+#define G_IDTCMDLO(x) (((x) >> S_IDTCMDLO) & M_IDTCMDLO)
+
+#define S_IDTCMD 0
+#define M_IDTCMD 0xfffff
+#define V_IDTCMD(x) ((x) << S_IDTCMD)
+#define G_IDTCMD(x) (((x) >> S_IDTCMD) & M_IDTCMD)
+
+#define S_LCMDB 16
+#define M_LCMDB 0x7ff
+#define V_LCMDB(x) ((x) << S_LCMDB)
+#define G_LCMDB(x) (((x) >> S_LCMDB) & M_LCMDB)
+#define S_LCMDA 0
+#define M_LCMDA 0x7ff
+#define V_LCMDA(x) ((x) << S_LCMDA)
+#define G_LCMDA(x) (((x) >> S_LCMDA) & M_LCMDA)
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
-#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+#define S_DBGIREQADRHI 0
+#define M_DBGIREQADRHI 0xff
+#define V_DBGIREQADRHI(x) ((x) << S_DBGIREQADRHI)
+#define G_DBGIREQADRHI(x) (((x) >> S_DBGIREQADRHI) & M_DBGIREQADRHI)
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
-
#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+#define A_MC5_DB_DBGI_REQ_DATA3 0x794
+#define A_MC5_DB_DBGI_REQ_DATA4 0x798
+
+#define S_DBGIREQDATA4 0
+#define M_DBGIREQDATA4 0xffff
+#define V_DBGIREQDATA4(x) ((x) << S_DBGIREQDATA4)
+#define G_DBGIREQDATA4(x) (((x) >> S_DBGIREQDATA4) & M_DBGIREQDATA4)
+
+#define A_MC5_DB_DBGI_REQ_MASK0 0x79c
+#define A_MC5_DB_DBGI_REQ_MASK1 0x7a0
+#define A_MC5_DB_DBGI_REQ_MASK2 0x7a4
+#define A_MC5_DB_DBGI_REQ_MASK3 0x7a8
+#define A_MC5_DB_DBGI_REQ_MASK4 0x7ac
+
+#define S_DBGIREQMSK4 0
+#define M_DBGIREQMSK4 0xffff
+#define V_DBGIREQMSK4(x) ((x) << S_DBGIREQMSK4)
+#define G_DBGIREQMSK4(x) (((x) >> S_DBGIREQMSK4) & M_DBGIREQMSK4)
#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+#define S_DBGIRSPMSG 8
+#define M_DBGIRSPMSG 0xf
+#define V_DBGIRSPMSG(x) ((x) << S_DBGIRSPMSG)
+#define G_DBGIRSPMSG(x) (((x) >> S_DBGIRSPMSG) & M_DBGIRSPMSG)
+
+#define S_DBGIRSPMSGVLD 2
+#define V_DBGIRSPMSGVLD(x) ((x) << S_DBGIRSPMSGVLD)
+#define F_DBGIRSPMSGVLD V_DBGIRSPMSGVLD(1U)
+
+#define S_DBGIRSPHIT 1
+#define V_DBGIRSPHIT(x) ((x) << S_DBGIRSPHIT)
+#define F_DBGIRSPHIT V_DBGIRSPHIT(1U)
+
#define S_DBGIRSPVALID 0
#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
-
#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
-
#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+#define A_MC5_DB_DBGI_RSP_DATA3 0x7c0
+#define A_MC5_DB_DBGI_RSP_DATA4 0x7c4
+
+#define S_DBGIRSPDATA3 0
+#define M_DBGIRSPDATA3 0xffff
+#define V_DBGIRSPDATA3(x) ((x) << S_DBGIRSPDATA3)
+#define G_DBGIRSPDATA3(x) (((x) >> S_DBGIRSPDATA3) & M_DBGIRSPDATA3)
+
+#define A_MC5_DB_DBGI_RSP_LAST_CMD 0x7c8
+
+#define S_LASTCMDB 16
+#define M_LASTCMDB 0x7ff
+#define V_LASTCMDB(x) ((x) << S_LASTCMDB)
+#define G_LASTCMDB(x) (((x) >> S_LASTCMDB) & M_LASTCMDB)
+
+#define S_LASTCMDA 0
+#define M_LASTCMDA 0x7ff
+#define V_LASTCMDA(x) ((x) << S_LASTCMDA)
+#define G_LASTCMDA(x) (((x) >> S_LASTCMDA) & M_LASTCMDA)
#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+#define S_PO_DWR 0
+#define M_PO_DWR 0xfffff
+#define V_PO_DWR(x) ((x) << S_PO_DWR)
+#define G_PO_DWR(x) (((x) >> S_PO_DWR) & M_PO_DWR)
+
#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+#define S_PO_MWR 0
+#define M_PO_MWR 0xfffff
+#define V_PO_MWR(x) ((x) << S_PO_MWR)
+#define G_PO_MWR(x) (((x) >> S_PO_MWR) & M_PO_MWR)
+
#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+#define S_AO_SRCH 0
+#define M_AO_SRCH 0xfffff
+#define V_AO_SRCH(x) ((x) << S_AO_SRCH)
+#define G_AO_SRCH(x) (((x) >> S_AO_SRCH) & M_AO_SRCH)
+
#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+#define S_AO_LRN 0
+#define M_AO_LRN 0xfffff
+#define V_AO_LRN(x) ((x) << S_AO_LRN)
+#define G_AO_LRN(x) (((x) >> S_AO_LRN) & M_AO_LRN)
+
#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+#define S_SYN_SRCH 0
+#define M_SYN_SRCH 0xfffff
+#define V_SYN_SRCH(x) ((x) << S_SYN_SRCH)
+#define G_SYN_SRCH(x) (((x) >> S_SYN_SRCH) & M_SYN_SRCH)
+
#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+#define S_SYN_LRN 0
+#define M_SYN_LRN 0xfffff
+#define V_SYN_LRN(x) ((x) << S_SYN_LRN)
+#define G_SYN_LRN(x) (((x) >> S_SYN_LRN) & M_SYN_LRN)
+
#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+#define S_ACK_SRCH 0
+#define M_ACK_SRCH 0xfffff
+#define V_ACK_SRCH(x) ((x) << S_ACK_SRCH)
+#define G_ACK_SRCH(x) (((x) >> S_ACK_SRCH) & M_ACK_SRCH)
+
#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+#define S_ACK_LRN 0
+#define M_ACK_LRN 0xfffff
+#define V_ACK_LRN(x) ((x) << S_ACK_LRN)
+#define G_ACK_LRN(x) (((x) >> S_ACK_LRN) & M_ACK_LRN)
+
#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+#define S_I_SRCH 0
+#define M_I_SRCH 0xfffff
+#define V_I_SRCH(x) ((x) << S_I_SRCH)
+#define G_I_SRCH(x) (((x) >> S_I_SRCH) & M_I_SRCH)
+
#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+#define S_E_SRCH 0
+#define M_E_SRCH 0xfffff
+#define V_E_SRCH(x) ((x) << S_E_SRCH)
+#define G_E_SRCH(x) (((x) >> S_E_SRCH) & M_E_SRCH)
+
#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+#define S_WRITE 0
+#define M_WRITE 0xfffff
+#define V_WRITE(x) ((x) << S_WRITE)
+#define G_WRITE(x) (((x) >> S_WRITE) & M_WRITE)
+
#define A_MC5_DB_DATA_READ_CMD 0x7f8
+#define S_READCMD 0
+#define M_READCMD 0xfffff
+#define V_READCMD(x) ((x) << S_READCMD)
+#define G_READCMD(x) (((x) >> S_READCMD) & M_READCMD)
+
+#define A_MC5_DB_MASK_WRITE_CMD 0x7fc
+
+#define S_MASKWR 0
+#define M_MASKWR 0xffff
+#define V_MASKWR(x) ((x) << S_MASKWR)
+#define G_MASKWR(x) (((x) >> S_MASKWR) & M_MASKWR)
+
+/* registers for module XGMAC0_0 */
#define XGMAC0_0_BASE_ADDR 0x800
#define A_XGM_TX_CTRL 0x800
+#define S_SENDPAUSE 2
+#define V_SENDPAUSE(x) ((x) << S_SENDPAUSE)
+#define F_SENDPAUSE V_SENDPAUSE(1U)
+
+#define S_SENDZEROPAUSE 1
+#define V_SENDZEROPAUSE(x) ((x) << S_SENDZEROPAUSE)
+#define F_SENDZEROPAUSE V_SENDZEROPAUSE(1U)
+
#define S_TXEN 0
#define V_TXEN(x) ((x) << S_TXEN)
#define F_TXEN V_TXEN(1U)
#define A_XGM_TX_CFG 0x804
+#define S_CFGCLKSPEED 2
+#define M_CFGCLKSPEED 0x7
+#define V_CFGCLKSPEED(x) ((x) << S_CFGCLKSPEED)
+#define G_CFGCLKSPEED(x) (((x) >> S_CFGCLKSPEED) & M_CFGCLKSPEED)
+
+#define S_STRETCHMODE 1
+#define V_STRETCHMODE(x) ((x) << S_STRETCHMODE)
+#define F_STRETCHMODE V_STRETCHMODE(1U)
+
#define S_TXPAUSEEN 0
#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
#define F_TXPAUSEEN V_TXPAUSEEN(1U)
#define A_XGM_TX_PAUSE_QUANTA 0x808
+#define S_TXPAUSEQUANTA 0
+#define M_TXPAUSEQUANTA 0xffff
+#define V_TXPAUSEQUANTA(x) ((x) << S_TXPAUSEQUANTA)
+#define G_TXPAUSEQUANTA(x) (((x) >> S_TXPAUSEQUANTA) & M_TXPAUSEQUANTA)
+
#define A_XGM_RX_CTRL 0x80c
#define S_RXEN 0
#define A_XGM_RX_CFG 0x810
+#define S_CON802_3PREAMBLE 12
+#define V_CON802_3PREAMBLE(x) ((x) << S_CON802_3PREAMBLE)
+#define F_CON802_3PREAMBLE V_CON802_3PREAMBLE(1U)
+
+#define S_ENNON802_3PREAMBLE 11
+#define V_ENNON802_3PREAMBLE(x) ((x) << S_ENNON802_3PREAMBLE)
+#define F_ENNON802_3PREAMBLE V_ENNON802_3PREAMBLE(1U)
+
+#define S_COPYPREAMBLE 10
+#define V_COPYPREAMBLE(x) ((x) << S_COPYPREAMBLE)
+#define F_COPYPREAMBLE V_COPYPREAMBLE(1U)
+
#define S_DISPAUSEFRAMES 9
#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
#define V_RMFCS(x) ((x) << S_RMFCS)
#define F_RMFCS V_RMFCS(1U)
+#define S_DISNONVLAN 5
+#define V_DISNONVLAN(x) ((x) << S_DISNONVLAN)
+#define F_DISNONVLAN V_DISNONVLAN(1U)
+
+#define S_ENEXTMATCH 4
+#define V_ENEXTMATCH(x) ((x) << S_ENEXTMATCH)
+#define F_ENEXTMATCH V_ENEXTMATCH(1U)
+
+#define S_ENHASHUCAST 3
+#define V_ENHASHUCAST(x) ((x) << S_ENHASHUCAST)
+#define F_ENHASHUCAST V_ENHASHUCAST(1U)
+
#define S_ENHASHMCAST 2
#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
#define F_ENHASHMCAST V_ENHASHMCAST(1U)
-#define S_COPYALLFRAMES 0
-#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
-#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
-
#define S_DISBCAST 1
#define V_DISBCAST(x) ((x) << S_DISBCAST)
#define F_DISBCAST V_DISBCAST(1U)
-#define A_XGM_RX_HASH_LOW 0x814
+#define S_COPYALLFRAMES 0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
+#define A_XGM_RX_HASH_LOW 0x814
#define A_XGM_RX_HASH_HIGH 0x818
-
#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
-
#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
-#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+#define S_ADDRESS_HIGH 0
+#define M_ADDRESS_HIGH 0xffff
+#define V_ADDRESS_HIGH(x) ((x) << S_ADDRESS_HIGH)
+#define G_ADDRESS_HIGH(x) (((x) >> S_ADDRESS_HIGH) & M_ADDRESS_HIGH)
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+#define A_XGM_RX_EXACT_MATCH_HIGH_2 0x828
#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
-
+#define A_XGM_RX_EXACT_MATCH_HIGH_3 0x830
#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
-
+#define A_XGM_RX_EXACT_MATCH_HIGH_4 0x838
#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
-
+#define A_XGM_RX_EXACT_MATCH_HIGH_5 0x840
#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
-
+#define A_XGM_RX_EXACT_MATCH_HIGH_6 0x848
#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
-
+#define A_XGM_RX_EXACT_MATCH_HIGH_7 0x850
#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+#define A_XGM_RX_EXACT_MATCH_HIGH_8 0x858
+#define A_XGM_RX_TYPE_MATCH_1 0x85c
+
+#define S_ENTYPEMATCH 31
+#define V_ENTYPEMATCH(x) ((x) << S_ENTYPEMATCH)
+#define F_ENTYPEMATCH V_ENTYPEMATCH(1U)
+
+#define S_TYPE 0
+#define M_TYPE 0xffff
+#define V_TYPE(x) ((x) << S_TYPE)
+#define G_TYPE(x) (((x) >> S_TYPE) & M_TYPE)
+
+#define A_XGM_RX_TYPE_MATCH_2 0x860
+#define A_XGM_RX_TYPE_MATCH_3 0x864
+#define A_XGM_RX_TYPE_MATCH_4 0x868
+#define A_XGM_INT_STATUS 0x86c
+
+#define S_XGMIIEXTINT 10
+#define V_XGMIIEXTINT(x) ((x) << S_XGMIIEXTINT)
+#define F_XGMIIEXTINT V_XGMIIEXTINT(1U)
+
+#define S_LINKFAULTCHANGE 9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
+
+#define S_PHYFRAMECOMPLETE 8
+#define V_PHYFRAMECOMPLETE(x) ((x) << S_PHYFRAMECOMPLETE)
+#define F_PHYFRAMECOMPLETE V_PHYFRAMECOMPLETE(1U)
+
+#define S_PAUSEFRAMETXMT 7
+#define V_PAUSEFRAMETXMT(x) ((x) << S_PAUSEFRAMETXMT)
+#define F_PAUSEFRAMETXMT V_PAUSEFRAMETXMT(1U)
+
+#define S_PAUSECNTRTIMEOUT 6
+#define V_PAUSECNTRTIMEOUT(x) ((x) << S_PAUSECNTRTIMEOUT)
+#define F_PAUSECNTRTIMEOUT V_PAUSECNTRTIMEOUT(1U)
+
+#define S_NON0PAUSERCVD 5
+#define V_NON0PAUSERCVD(x) ((x) << S_NON0PAUSERCVD)
+#define F_NON0PAUSERCVD V_NON0PAUSERCVD(1U)
+
+#define S_STATOFLOW 4
+#define V_STATOFLOW(x) ((x) << S_STATOFLOW)
+#define F_STATOFLOW V_STATOFLOW(1U)
+
+#define S_TXERRFIFO 3
+#define V_TXERRFIFO(x) ((x) << S_TXERRFIFO)
+#define F_TXERRFIFO V_TXERRFIFO(1U)
+
+#define S_TXUFLOW 2
+#define V_TXUFLOW(x) ((x) << S_TXUFLOW)
+#define F_TXUFLOW V_TXUFLOW(1U)
+
+#define S_FRAMETXMT 1
+#define V_FRAMETXMT(x) ((x) << S_FRAMETXMT)
+#define F_FRAMETXMT V_FRAMETXMT(1U)
+
+#define S_FRAMERCVD 0
+#define V_FRAMERCVD(x) ((x) << S_FRAMERCVD)
+#define F_FRAMERCVD V_FRAMERCVD(1U)
+
+#define A_XGM_XGM_INT_MASK 0x870
+#define A_XGM_XGM_INT_ENABLE 0x874
+#define A_XGM_XGM_INT_DISABLE 0x878
+#define A_XGM_TX_PAUSE_TIMER 0x87c
+
+#define S_CURPAUSETIMER 0
+#define M_CURPAUSETIMER 0xffff
+#define V_CURPAUSETIMER(x) ((x) << S_CURPAUSETIMER)
+#define G_CURPAUSETIMER(x) (((x) >> S_CURPAUSETIMER) & M_CURPAUSETIMER)
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_READSNPSHOT 4
+#define V_READSNPSHOT(x) ((x) << S_READSNPSHOT)
+#define F_READSNPSHOT V_READSNPSHOT(1U)
-#define A_XGM_STAT_CTRL 0x880
+#define S_TAKESNPSHOT 3
+#define V_TAKESNPSHOT(x) ((x) << S_TAKESNPSHOT)
+#define F_TAKESNPSHOT V_TAKESNPSHOT(1U)
#define S_CLRSTATS 2
#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
#define F_CLRSTATS V_CLRSTATS(1U)
+#define S_INCRSTATS 1
+#define V_INCRSTATS(x) ((x) << S_INCRSTATS)
+#define F_INCRSTATS V_INCRSTATS(1U)
+
+#define S_ENTESTMODEWR 0
+#define V_ENTESTMODEWR(x) ((x) << S_ENTESTMODEWR)
+#define F_ENTESTMODEWR V_ENTESTMODEWR(1U)
+
#define A_XGM_RXFIFO_CFG 0x884
#define S_RXFIFO_EMPTY 31
#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+#define S_RXFIFO_FULL 30
+#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL)
+#define F_RXFIFO_FULL V_RXFIFO_FULL(1U)
+
#define S_RXFIFOPAUSEHWM 17
#define M_RXFIFOPAUSEHWM 0xfff
-
#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
-
#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
#define S_RXFIFOPAUSELWM 5
#define M_RXFIFOPAUSELWM 0xfff
-
#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
-
#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
#define S_RXSTRFRWRD 1
#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
#define A_XGM_TXFIFO_CFG 0x888
+#define S_TXFIFO_EMPTY 31
+#define V_TXFIFO_EMPTY(x) ((x) << S_TXFIFO_EMPTY)
+#define F_TXFIFO_EMPTY V_TXFIFO_EMPTY(1U)
+
+#define S_TXFIFO_FULL 30
+#define V_TXFIFO_FULL(x) ((x) << S_TXFIFO_FULL)
+#define F_TXFIFO_FULL V_TXFIFO_FULL(1U)
+
#define S_UNDERUNFIX 22
#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
#define S_TXIPG 13
#define M_TXIPG 0xff
#define V_TXIPG(x) ((x) << S_TXIPG)
#define S_TXFIFOTHRESH 4
#define M_TXFIFOTHRESH 0x1ff
-
#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
-#define S_ENDROPPKT 21
-#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
-#define F_ENDROPPKT V_ENDROPPKT(1U)
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define A_XGM_SLOW_TIMER 0x88c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffff
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffff
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
#define A_XGM_SERDES_CTRL 0x890
-#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_SERDESEN 25
+#define V_SERDESEN(x) ((x) << S_SERDESEN)
+#define F_SERDESEN V_SERDESEN(1U)
#define S_SERDESRESET_ 24
#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
#define F_SERDESRESET_ V_SERDESRESET_(1U)
+#define S_CMURANGE 21
+#define M_CMURANGE 0x7
+#define V_CMURANGE(x) ((x) << S_CMURANGE)
+#define G_CMURANGE(x) (((x) >> S_CMURANGE) & M_CMURANGE)
+
+#define S_BGENB 20
+#define V_BGENB(x) ((x) << S_BGENB)
+#define F_BGENB V_BGENB(1U)
+
+#define S_ENSKPDROP 19
+#define V_ENSKPDROP(x) ((x) << S_ENSKPDROP)
+#define F_ENSKPDROP V_ENSKPDROP(1U)
+
+#define S_ENCOMMA 18
+#define V_ENCOMMA(x) ((x) << S_ENCOMMA)
+#define F_ENCOMMA V_ENCOMMA(1U)
+
+#define S_EN8B10B 17
+#define V_EN8B10B(x) ((x) << S_EN8B10B)
+#define F_EN8B10B V_EN8B10B(1U)
+
+#define S_ENELBUF 16
+#define V_ENELBUF(x) ((x) << S_ENELBUF)
+#define F_ENELBUF V_ENELBUF(1U)
+
+#define S_GAIN 11
+#define M_GAIN 0x1f
+#define V_GAIN(x) ((x) << S_GAIN)
+#define G_GAIN(x) (((x) >> S_GAIN) & M_GAIN)
+
+#define S_BANDGAP 7
+#define M_BANDGAP 0xf
+#define V_BANDGAP(x) ((x) << S_BANDGAP)
+#define G_BANDGAP(x) (((x) >> S_BANDGAP) & M_BANDGAP)
+
+#define S_LPBKEN 5
+#define M_LPBKEN 0x3
+#define V_LPBKEN(x) ((x) << S_LPBKEN)
+#define G_LPBKEN(x) (((x) >> S_LPBKEN) & M_LPBKEN)
+
#define S_RXENABLE 4
#define V_RXENABLE(x) ((x) << S_RXENABLE)
#define F_RXENABLE V_RXENABLE(1U)
#define V_TXENABLE(x) ((x) << S_TXENABLE)
#define F_TXENABLE V_TXENABLE(1U)
-#define A_XGM_PAUSE_TIMER 0x890
+#define A_XGM_XAUI_PCS_TEST 0x894
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_XGM_RGMII_CTRL 0x898
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
#define A_XGM_RGMII_IMP 0x89c
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
#define S_XGM_IMPSETUPDATE 6
#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
#define S_RGMIIIMPPD 3
#define M_RGMIIIMPPD 0x7
#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
#define S_RGMIIIMPPU 0
#define M_RGMIIIMPPU 0x7
#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
-
-#define S_CALRESET 8
-#define V_CALRESET(x) ((x) << S_CALRESET)
-#define F_CALRESET V_CALRESET(1U)
-
-#define S_CALUPDATE 7
-#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
-#define F_CALUPDATE V_CALUPDATE(1U)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
#define A_XGM_XAUI_IMP 0x8a0
-#define S_CALBUSY 31
-#define V_CALBUSY(x) ((x) << S_CALBUSY)
-#define F_CALBUSY V_CALBUSY(1U)
-
#define S_XGM_CALFAULT 29
#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
#define S_XAUIIMP 0
#define M_XAUIIMP 0x7
#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+#define G_XAUIIMP(x) (((x) >> S_XAUIIMP) & M_XAUIIMP)
+
+#define A_XGM_SERDES_BIST 0x8a4
+
+#define S_BISTDONE 28
+#define M_BISTDONE 0xf
+#define V_BISTDONE(x) ((x) << S_BISTDONE)
+#define G_BISTDONE(x) (((x) >> S_BISTDONE) & M_BISTDONE)
+
+#define S_BISTCYCLETHRESH 3
+#define M_BISTCYCLETHRESH 0x1ffff
+#define V_BISTCYCLETHRESH(x) ((x) << S_BISTCYCLETHRESH)
+#define G_BISTCYCLETHRESH(x) (((x) >> S_BISTCYCLETHRESH) & M_BISTCYCLETHRESH)
#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+#define S_RXENERRORGATHER 16
+#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER)
+#define F_RXENERRORGATHER V_RXENERRORGATHER(1U)
+
+#define S_RXENSINGLEFLIT 15
+#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT)
+#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U)
+
#define S_RXENFRAMER 14
#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
#define F_RXENFRAMER V_RXENFRAMER(1U)
#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
#define F_MAC_RESET_ V_MAC_RESET_(1U)
+#define A_XGM_XAUI1G_CTRL 0x8b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_XGM_SERDES_LANE_CTRL 0x8b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xf
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xf
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
#define A_XGM_PORT_CFG 0x8b8
+#define S_SAFESPEEDCHANGE 4
+#define V_SAFESPEEDCHANGE(x) ((x) << S_SAFESPEEDCHANGE)
+#define F_SAFESPEEDCHANGE V_SAFESPEEDCHANGE(1U)
+
#define S_CLKDIVRESET_ 3
#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
#define S_PORTSPEED 1
#define M_PORTSPEED 0x3
-
#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+#define G_PORTSPEED(x) (((x) >> S_PORTSPEED) & M_PORTSPEED)
#define S_ENRGMII 0
#define V_ENRGMII(x) ((x) << S_ENRGMII)
#define F_ENRGMII V_ENRGMII(1U)
+#define A_XGM_EPIO_DATA0 0x8c0
+#define A_XGM_EPIO_DATA1 0x8c4
+#define A_XGM_EPIO_DATA2 0x8c8
+#define A_XGM_EPIO_DATA3 0x8cc
+#define A_XGM_EPIO_OP 0x8d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xff
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
#define A_XGM_INT_ENABLE 0x8d4
+#define S_XAUIPCSDECERR 24
+#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR)
+#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
#define S_TXFIFO_PRTY_ERR 17
#define M_TXFIFO_PRTY_ERR 0x7
-
#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+#define G_TXFIFO_PRTY_ERR(x) (((x) >> S_TXFIFO_PRTY_ERR) & M_TXFIFO_PRTY_ERR)
#define S_RXFIFO_PRTY_ERR 14
#define M_RXFIFO_PRTY_ERR 0x7
-
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+#define G_RXFIFO_PRTY_ERR(x) (((x) >> S_RXFIFO_PRTY_ERR) & M_RXFIFO_PRTY_ERR)
#define S_TXFIFO_UNDERRUN 13
#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
-#define S_SERDES_LOS 4
-#define M_SERDES_LOS 0xf
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xf
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
-#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xf
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
#define S_XAUIPCSCTCERR 3
#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
-#define A_XGM_INT_CAUSE 0x8d8
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_XGM_INT 0
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT V_XGM_INT(1U)
+#define S_SERDESCMULOCK_LOSS 24
+#define V_SERDESCMULOCK_LOSS(x) ((x) << S_SERDESCMULOCK_LOSS)
+#define F_SERDESCMULOCK_LOSS V_SERDESCMULOCK_LOSS(1U)
+
+#define S_SERDESBIST_ERR 8
+#define M_SERDESBIST_ERR 0xf
+#define V_SERDESBIST_ERR(x) ((x) << S_SERDESBIST_ERR)
+#define G_SERDESBIST_ERR(x) (((x) >> S_SERDESBIST_ERR) & M_SERDESBIST_ERR)
+
+#define S_SERDES_LOS 4
+#define M_SERDES_LOS 0xf
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+#define G_SERDES_LOS(x) (((x) >> S_SERDES_LOS) & M_SERDES_LOS)
+
+#define A_XGM_INT_CAUSE 0x8d8
#define A_XGM_XAUI_ACT_CTRL 0x8dc
#define S_TXACTENABLE 1
#define A_XGM_SERDES_CTRL0 0x8e0
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
#define S_RESET3 23
#define V_RESET3(x) ((x) << S_RESET3)
#define F_RESET3 V_RESET3(1U)
#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
#define F_RESETPLL01 V_RESETPLL01(1U)
+#define S_PW23 12
+#define M_PW23 0x3
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_XGM_DEQ 6
+#define M_XGM_DEQ 0xf
+#define V_XGM_DEQ(x) ((x) << S_XGM_DEQ)
+#define G_XGM_DEQ(x) (((x) >> S_XGM_DEQ) & M_XGM_DEQ)
+
+#define S_XGM_DTX 2
+#define M_XGM_DTX 0xf
+#define V_XGM_DTX(x) ((x) << S_XGM_DTX)
+#define G_XGM_DTX(x) (((x) >> S_XGM_DTX) & M_XGM_DTX)
+
+#define S_XGM_LODRV 1
+#define V_XGM_LODRV(x) ((x) << S_XGM_LODRV)
+#define F_XGM_LODRV V_XGM_LODRV(1U)
+
+#define S_XGM_HIDRV 0
+#define V_XGM_HIDRV(x) ((x) << S_XGM_HIDRV)
+#define F_XGM_HIDRV V_XGM_HIDRV(1U)
+
+#define A_XGM_SERDES_CTRL1 0x8e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1f
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1f
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1f
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1f
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_XGM_SERDES_CTRL2 0x8e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_XGM_SERDES_CTRL3 0x8ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
#define A_XGM_SERDES_STAT0 0x8f0
-#define A_XGM_SERDES_STAT1 0x8f4
-#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffff
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIGFORCEEN0 2
+#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0)
+#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U)
+
+#define S_LOWSIGFORCEVALUE0 1
+#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0)
+#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U)
#define S_LOWSIG0 0
#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
#define F_LOWSIG0 V_LOWSIG0(1U)
+#define A_XGM_SERDES_STAT1 0x8f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffff
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIGFORCEEN1 2
+#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1)
+#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U)
+
+#define S_LOWSIGFORCEVALUE1 1
+#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1)
+#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffff
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIGFORCEEN2 2
+#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2)
+#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U)
+
+#define S_LOWSIGFORCEVALUE2 1
+#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2)
+#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
#define A_XGM_SERDES_STAT3 0x8fc
-#define A_XGM_STAT_TX_BYTE_LOW 0x900
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffff
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+#define S_LOWSIGFORCEEN3 2
+#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3)
+#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U)
+
+#define S_LOWSIGFORCEVALUE3 1
+#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3)
+#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
#define A_XGM_STAT_TX_BYTE_HIGH 0x904
-#define A_XGM_STAT_TX_FRAME_LOW 0x908
+#define S_TXBYTES_HIGH 0
+#define M_TXBYTES_HIGH 0x1fff
+#define V_TXBYTES_HIGH(x) ((x) << S_TXBYTES_HIGH)
+#define G_TXBYTES_HIGH(x) (((x) >> S_TXBYTES_HIGH) & M_TXBYTES_HIGH)
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
-#define A_XGM_STAT_TX_BCAST 0x910
+#define S_TXFRAMES_HIGH 0
+#define M_TXFRAMES_HIGH 0xf
+#define V_TXFRAMES_HIGH(x) ((x) << S_TXFRAMES_HIGH)
+#define G_TXFRAMES_HIGH(x) (((x) >> S_TXFRAMES_HIGH) & M_TXFRAMES_HIGH)
+#define A_XGM_STAT_TX_BCAST 0x910
#define A_XGM_STAT_TX_MCAST 0x914
-
#define A_XGM_STAT_TX_PAUSE 0x918
-
#define A_XGM_STAT_TX_64B_FRAMES 0x91c
-
#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
-
#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
-
#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
-
#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
-
#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
-
#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
-
#define A_XGM_STAT_TX_ERR_FRAMES 0x938
-
#define A_XGM_STAT_RX_BYTES_LOW 0x93c
-
#define A_XGM_STAT_RX_BYTES_HIGH 0x940
-#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+#define S_RXBYTES_HIGH 0
+#define M_RXBYTES_HIGH 0x1fff
+#define V_RXBYTES_HIGH(x) ((x) << S_RXBYTES_HIGH)
+#define G_RXBYTES_HIGH(x) (((x) >> S_RXBYTES_HIGH) & M_RXBYTES_HIGH)
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
-#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+#define S_RXFRAMES_HIGH 0
+#define M_RXFRAMES_HIGH 0xf
+#define V_RXFRAMES_HIGH(x) ((x) << S_RXFRAMES_HIGH)
+#define G_RXFRAMES_HIGH(x) (((x) >> S_RXFRAMES_HIGH) & M_RXFRAMES_HIGH)
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
-
#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
-#define A_XGM_STAT_RX_64B_FRAMES 0x958
+#define S_RXPAUSEFRAMES 0
+#define M_RXPAUSEFRAMES 0xffff
+#define V_RXPAUSEFRAMES(x) ((x) << S_RXPAUSEFRAMES)
+#define G_RXPAUSEFRAMES(x) (((x) >> S_RXPAUSEFRAMES) & M_RXPAUSEFRAMES)
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
-
#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
-
#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
-
#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
-
#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
-
#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
-
#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+#define S_RXSHORTFRAMES 0
+#define M_RXSHORTFRAMES 0xffff
+#define V_RXSHORTFRAMES(x) ((x) << S_RXSHORTFRAMES)
+#define G_RXSHORTFRAMES(x) (((x) >> S_RXSHORTFRAMES) & M_RXSHORTFRAMES)
+
#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+#define S_RXOVERSIZEFRAMES 0
+#define M_RXOVERSIZEFRAMES 0xffff
+#define V_RXOVERSIZEFRAMES(x) ((x) << S_RXOVERSIZEFRAMES)
+#define G_RXOVERSIZEFRAMES(x) (((x) >> S_RXOVERSIZEFRAMES) & M_RXOVERSIZEFRAMES)
+
#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+#define S_RXJABBERFRAMES 0
+#define M_RXJABBERFRAMES 0xffff
+#define V_RXJABBERFRAMES(x) ((x) << S_RXJABBERFRAMES)
+#define G_RXJABBERFRAMES(x) (((x) >> S_RXJABBERFRAMES) & M_RXJABBERFRAMES)
+
#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+#define S_RXCRCERRFRAMES 0
+#define M_RXCRCERRFRAMES 0xffff
+#define V_RXCRCERRFRAMES(x) ((x) << S_RXCRCERRFRAMES)
+#define G_RXCRCERRFRAMES(x) (((x) >> S_RXCRCERRFRAMES) & M_RXCRCERRFRAMES)
+
#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+#define S_RXLENGTHERRFRAMES 0
+#define M_RXLENGTHERRFRAMES 0xffff
+#define V_RXLENGTHERRFRAMES(x) ((x) << S_RXLENGTHERRFRAMES)
+#define G_RXLENGTHERRFRAMES(x) (((x) >> S_RXLENGTHERRFRAMES) & M_RXLENGTHERRFRAMES)
+
#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+#define S_RXSYMCODEERRFRAMES 0
+#define M_RXSYMCODEERRFRAMES 0xffff
+#define V_RXSYMCODEERRFRAMES(x) ((x) << S_RXSYMCODEERRFRAMES)
+#define G_RXSYMCODEERRFRAMES(x) (((x) >> S_RXSYMCODEERRFRAMES) & M_RXSYMCODEERRFRAMES)
+
#define A_XGM_SERDES_STATUS0 0x98c
+#define S_RXERRLANE3 9
+#define M_RXERRLANE3 0x7
+#define V_RXERRLANE3(x) ((x) << S_RXERRLANE3)
+#define G_RXERRLANE3(x) (((x) >> S_RXERRLANE3) & M_RXERRLANE3)
+
+#define S_RXERRLANE2 6
+#define M_RXERRLANE2 0x7
+#define V_RXERRLANE2(x) ((x) << S_RXERRLANE2)
+#define G_RXERRLANE2(x) (((x) >> S_RXERRLANE2) & M_RXERRLANE2)
+
+#define S_RXERRLANE1 3
+#define M_RXERRLANE1 0x7
+#define V_RXERRLANE1(x) ((x) << S_RXERRLANE1)
+#define G_RXERRLANE1(x) (((x) >> S_RXERRLANE1) & M_RXERRLANE1)
+
+#define S_RXERRLANE0 0
+#define M_RXERRLANE0 0x7
+#define V_RXERRLANE0(x) ((x) << S_RXERRLANE0)
+#define G_RXERRLANE0(x) (((x) >> S_RXERRLANE0) & M_RXERRLANE0)
+
#define A_XGM_SERDES_STATUS1 0x990
-#define S_CMULOCK 31
-#define V_CMULOCK(x) ((x) << S_CMULOCK)
-#define F_CMULOCK V_CMULOCK(1U)
+#define S_RXKLOCKLANE3 11
+#define V_RXKLOCKLANE3(x) ((x) << S_RXKLOCKLANE3)
+#define F_RXKLOCKLANE3 V_RXKLOCKLANE3(1U)
-#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+#define S_RXKLOCKLANE2 10
+#define V_RXKLOCKLANE2(x) ((x) << S_RXKLOCKLANE2)
+#define F_RXKLOCKLANE2 V_RXKLOCKLANE2(1U)
+
+#define S_RXKLOCKLANE1 9
+#define V_RXKLOCKLANE1(x) ((x) << S_RXKLOCKLANE1)
+#define F_RXKLOCKLANE1 V_RXKLOCKLANE1(1U)
+
+#define S_RXKLOCKLANE0 8
+#define V_RXKLOCKLANE0(x) ((x) << S_RXKLOCKLANE0)
+#define F_RXKLOCKLANE0 V_RXKLOCKLANE0(1U)
+
+#define S_RXUFLOWLANE3 7
+#define V_RXUFLOWLANE3(x) ((x) << S_RXUFLOWLANE3)
+#define F_RXUFLOWLANE3 V_RXUFLOWLANE3(1U)
+
+#define S_RXUFLOWLANE2 6
+#define V_RXUFLOWLANE2(x) ((x) << S_RXUFLOWLANE2)
+#define F_RXUFLOWLANE2 V_RXUFLOWLANE2(1U)
+
+#define S_RXUFLOWLANE1 5
+#define V_RXUFLOWLANE1(x) ((x) << S_RXUFLOWLANE1)
+#define F_RXUFLOWLANE1 V_RXUFLOWLANE1(1U)
+
+#define S_RXUFLOWLANE0 4
+#define V_RXUFLOWLANE0(x) ((x) << S_RXUFLOWLANE0)
+#define F_RXUFLOWLANE0 V_RXUFLOWLANE0(1U)
+#define S_RXOFLOWLANE3 3
+#define V_RXOFLOWLANE3(x) ((x) << S_RXOFLOWLANE3)
+#define F_RXOFLOWLANE3 V_RXOFLOWLANE3(1U)
+
+#define S_RXOFLOWLANE2 2
+#define V_RXOFLOWLANE2(x) ((x) << S_RXOFLOWLANE2)
+#define F_RXOFLOWLANE2 V_RXOFLOWLANE2(1U)
+
+#define S_RXOFLOWLANE1 1
+#define V_RXOFLOWLANE1(x) ((x) << S_RXOFLOWLANE1)
+#define F_RXOFLOWLANE1 V_RXOFLOWLANE1(1U)
+
+#define S_RXOFLOWLANE0 0
+#define V_RXOFLOWLANE0(x) ((x) << S_RXOFLOWLANE0)
+#define F_RXOFLOWLANE0 V_RXOFLOWLANE0(1U)
+
+#define A_XGM_SERDES_STATUS2 0x994
+
+#define S_XGM_RXEIDLANE3 11
+#define V_XGM_RXEIDLANE3(x) ((x) << S_XGM_RXEIDLANE3)
+#define F_XGM_RXEIDLANE3 V_XGM_RXEIDLANE3(1U)
+
+#define S_XGM_RXEIDLANE2 10
+#define V_XGM_RXEIDLANE2(x) ((x) << S_XGM_RXEIDLANE2)
+#define F_XGM_RXEIDLANE2 V_XGM_RXEIDLANE2(1U)
+
+#define S_XGM_RXEIDLANE1 9
+#define V_XGM_RXEIDLANE1(x) ((x) << S_XGM_RXEIDLANE1)
+#define F_XGM_RXEIDLANE1 V_XGM_RXEIDLANE1(1U)
+
+#define S_XGM_RXEIDLANE0 8
+#define V_XGM_RXEIDLANE0(x) ((x) << S_XGM_RXEIDLANE0)
+#define F_XGM_RXEIDLANE0 V_XGM_RXEIDLANE0(1U)
+
+#define S_RXREMSKIPLANE3 7
+#define V_RXREMSKIPLANE3(x) ((x) << S_RXREMSKIPLANE3)
+#define F_RXREMSKIPLANE3 V_RXREMSKIPLANE3(1U)
+
+#define S_RXREMSKIPLANE2 6
+#define V_RXREMSKIPLANE2(x) ((x) << S_RXREMSKIPLANE2)
+#define F_RXREMSKIPLANE2 V_RXREMSKIPLANE2(1U)
+
+#define S_RXREMSKIPLANE1 5
+#define V_RXREMSKIPLANE1(x) ((x) << S_RXREMSKIPLANE1)
+#define F_RXREMSKIPLANE1 V_RXREMSKIPLANE1(1U)
+
+#define S_RXREMSKIPLANE0 4
+#define V_RXREMSKIPLANE0(x) ((x) << S_RXREMSKIPLANE0)
+#define F_RXREMSKIPLANE0 V_RXREMSKIPLANE0(1U)
+
+#define S_RXADDSKIPLANE3 3
+#define V_RXADDSKIPLANE3(x) ((x) << S_RXADDSKIPLANE3)
+#define F_RXADDSKIPLANE3 V_RXADDSKIPLANE3(1U)
+
+#define S_RXADDSKIPLANE2 2
+#define V_RXADDSKIPLANE2(x) ((x) << S_RXADDSKIPLANE2)
+#define F_RXADDSKIPLANE2 V_RXADDSKIPLANE2(1U)
+
+#define S_RXADDSKIPLANE1 1
+#define V_RXADDSKIPLANE1(x) ((x) << S_RXADDSKIPLANE1)
+#define F_RXADDSKIPLANE1 V_RXADDSKIPLANE1(1U)
+
+#define S_RXADDSKIPLANE0 0
+#define V_RXADDSKIPLANE0(x) ((x) << S_RXADDSKIPLANE0)
+#define F_RXADDSKIPLANE0 V_RXADDSKIPLANE0(1U)
+
+#define A_XGM_XAUI_PCS_ERR 0x998
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xf
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xf
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_XGM_RGMII_STATUS 0x99c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_XGM_WOL_STATUS 0x9a0
+
+#define S_PATDETECTED 31
+#define V_PATDETECTED(x) ((x) << S_PATDETECTED)
+#define F_PATDETECTED V_PATDETECTED(1U)
+
+#define S_MATCHEDFILTER 0
+#define M_MATCHEDFILTER 0x7
+#define V_MATCHEDFILTER(x) ((x) << S_MATCHEDFILTER)
+#define G_MATCHEDFILTER(x) (((x) >> S_MATCHEDFILTER) & M_MATCHEDFILTER)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
#define S_TXSPI4SOPCNT 16
#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffff
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffff
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffff
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
+/* registers for module XGMAC0_1 */
#define XGMAC0_1_BASE_ADDR 0xa00
/*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2005-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
+#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/tcp.h>
+#ifndef LINUX_2_4
#include <linux/dma-mapping.h>
+#endif
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
#include "t3_cpl.h"
+#include "cxgb3_offload.h"
#include "firmware_exports.h"
+#include "cxgb3_compat.h"
+
#define USE_GTS 0
#define SGE_RX_SM_BUF_SIZE 1536
-
#define SGE_RX_COPY_THRES 256
#define SGE_RX_PULL_LEN 128
/* Values for sge_txq.flags */
enum {
- TXQ_RUNNING = 1 << 0, /* fetch engine is running */
- TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
+ TXQ_RUNNING = 1 << 0, /* fetch engine is running */
+ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
};
struct tx_desc {
- __be64 flit[TX_DESC_FLITS];
+ u64 flit[TX_DESC_FLITS];
};
struct rx_desc {
__be32 addr_hi;
};
-struct tx_sw_desc { /* SW state per Tx descriptor */
+/*
+ * A single WR can reference up to 7 wire packets when we coalesce egress
+ * packets. Instead of growing the shared tx sw desc we allocate a seperate
+ * coalesce sw descriptor queue. The generic tx sw desc indicates if the new
+ * software descriptor is valid or not.
+ */
+#define ETH_COALESCE_PKT_NUM 7
+#define ETH_COALESCE_DUMMY_SKB ((struct sk_buff*)1)
+
+enum { LAST_PKT_DESC = 1, PKT_COALESCE_WR = 2 };
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
- u8 eop; /* set if last descriptor for packet */
- u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 eop_coalesce; /* 1 if last descriptor for pkt, 2 if coalesce wr */
+ u8 addr_idx_coalesce_num; /* buffer index of first SGL entry in
+ descriptor, # of coalesced pkts */
u8 fragidx; /* first page fragment associated with descriptor */
s8 sflit; /* start flit of first SGL entry in descriptor */
};
+struct eth_coalesce_sw_desc { /* SW state for a Coalesce WR descriptor */
+ struct sk_buff *skb[ETH_COALESCE_PKT_NUM];
+};
+
struct rx_sw_desc { /* SW state per Rx descriptor */
union {
struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(dma_addr);
};
-struct rsp_desc { /* response queue descriptor */
+struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
* Replenishes a response queue by making the supplied number of responses
* available to HW.
*/
-static inline void refill_rspq(struct adapter *adapter,
- const struct sge_rspq *q, unsigned int credits)
+static inline void refill_rspq(adapter_t *adapter, const struct sge_rspq *q,
+ unsigned int credits)
{
- rmb();
t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
{
const struct sg_ent *sgp;
struct tx_sw_desc *d = &q->sdesc[cidx];
- int nfrags, frag_idx, curflit, j = d->addr_idx;
+ int nfrags, frag_idx, curflit, j = d->addr_idx_coalesce_num;
sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
frag_idx = d->fragidx;
if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
d = cidx + 1 == q->size ? q->sdesc : d + 1;
d->fragidx = frag_idx;
- d->addr_idx = j;
+ d->addr_idx_coalesce_num = j;
d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
}
}
+static inline void unmap_tx_pkt_coalesce_wr(struct sge_txq *q,
+ unsigned int cidx,
+ unsigned int num,
+ struct pci_dev *pdev)
+{
+ struct eth_coalesce_sw_desc *csd = &q->eth_coalesce_sdesc[cidx];
+ struct tx_pkt_coalesce_wr *wr =
+ (struct tx_pkt_coalesce_wr *)&q->desc[cidx];
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct cpl_tx_pkt_coalesce *cpl = &wr->cpl[i];
+ unsigned int len = csd->skb[i]->len;
+
+ if (skb_headlen(csd->skb[i]))
+ pci_unmap_single(pdev, be64_to_cpu(cpl->addr),
+ len, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, be64_to_cpu(cpl->addr), len,
+ PCI_DMA_TODEVICE);
+ }
+}
+
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
* Reclaims Tx descriptors from an SGE Tx queue and frees the associated
* Tx buffers. Called with the Tx queue lock held.
*/
-static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
- unsigned int n)
+static void free_tx_desc(adapter_t *adapter, struct sge_txq *q, unsigned int n)
{
struct tx_sw_desc *d;
struct pci_dev *pdev = adapter->pdev;
- unsigned int cidx = q->cidx;
+ unsigned int cidx = q->cidx, i;
const int need_unmap = need_skb_unmap() &&
q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+#ifdef T3_TRACE
+ T3_TRACE3(adapter->tb[q->cntxt_id & 7],
+ "reclaiming %u Tx descriptors at cidx %u (used %u)", n,
+ cidx, q->in_use - n);
+#endif
d = &q->sdesc[cidx];
while (n--) {
- if (d->skb) { /* an SGL is present */
- if (need_unmap)
- unmap_skb(d->skb, q, cidx, pdev);
- if (d->eop)
- kfree_skb(d->skb);
+ if (d->skb) { /* an SGL is present */
+ if (need_unmap) {
+ if (d->eop_coalesce == PKT_COALESCE_WR)
+ unmap_tx_pkt_coalesce_wr(q, cidx,
+ d->addr_idx_coalesce_num, pdev);
+ else
+ unmap_skb(d->skb, q, cidx, pdev);
+ }
+
+ if (d->eop_coalesce == PKT_COALESCE_WR)
+ for (i = 0; i < d->addr_idx_coalesce_num; i++) {
+ struct eth_coalesce_sw_desc *csd =
+ &q->eth_coalesce_sdesc[cidx];
+
+ dev_kfree_skb_any(csd->skb[i]);
+ }
+ else if (d->eop_coalesce)
+ dev_kfree_skb_any(d->skb);
}
++d;
if (++cidx == q->size) {
* and frees the associated buffers if possible. Called with the Tx
* queue's lock held.
*/
-static inline void reclaim_completed_tx(struct adapter *adapter,
- struct sge_txq *q)
+static inline void reclaim_completed_tx(adapter_t *adapter, struct sge_txq *q)
{
unsigned int reclaim = q->processed - q->cleaned;
+ reclaim = min(16U, reclaim);
if (reclaim) {
free_tx_desc(adapter, q, reclaim);
q->cleaned += reclaim;
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @pdev: the PCI device associated with the adapter
- * @rxq: the SGE free list to clean up
+ * @q: the SGE free list to clean up
*
* Release the buffers on an SGE free-buffer Rx queue. HW fetching from
* this queue should be stopped before calling this function.
/**
* add_one_rx_buf - add a packet buffer to a free-buffer list
- * @va: buffer start VA
+ * @va: buffer start VA
* @len: the buffer length
* @d: the HW Rx descriptor to write
* @sd: the SW Rx descriptor to write
* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
-static inline void add_one_rx_buf(void *va, unsigned int len,
+static inline int add_one_rx_buf(void *va, unsigned int len,
struct rx_desc *d, struct rx_sw_desc *sd,
- unsigned int gen, struct pci_dev *pdev)
+ unsigned int gen, adapter_t *adapter)
{
dma_addr_t mapping;
- mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+ mapping = pci_map_single(adapter->pdev, va, len, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(mapping)))
+ return -ENOMEM;
+
pci_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
- d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ d->addr_hi = cpu_to_be32((u64)mapping >> 32);
wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
}
static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
return 0;
}
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= q->credits / 4) {
+ q->pend_cred = 0;
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ }
+}
+
/**
* refill_fl - refill an SGE free-buffer list
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the free-list to refill
* @n: the number of new buffers to allocate
* @gfp: the gfp flags for allocating new buffers
* allocated with the supplied gfp flags. The caller must assure that
* @n does not exceed the queue's capacity.
*/
-static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+static unsigned int refill_fl(adapter_t *adap, struct sge_fl *q, int n, gfp_t gfp)
{
void *buf_start;
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
+ unsigned int count = 0;
+ int err;
while (n--) {
if (q->use_pages) {
buf_start = skb->data;
}
- add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
- adap->pdev);
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
+ adap);
+ if (unlikely(err)) {
+ if (q->use_pages) {
+ if (q->pg_chunk.page) {
+ put_page(q->pg_chunk.page);
+ __free_page(q->pg_chunk.page);
+ q->pg_chunk.page = sd->pg_chunk.page = NULL;
+ }
+ } else {
+ kfree_skb(sd->skb);
+ sd->skb = NULL;
+ }
+ break;
+ }
+
d++;
sd++;
if (++q->pidx == q->size) {
sd = q->sdesc;
d = q->desc;
}
- q->credits++;
+ count++;
}
- wmb();
- t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+
+ q->credits += count;
+ q->pend_cred += count;
+ ring_fl_db(adap, q);
+ return count;
}
-static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+static inline void __refill_fl(adapter_t *adap, struct sge_fl *fl)
{
refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
}
/**
* recycle_rx_buf - recycle a receive buffer
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the SGE free list
* @idx: index of buffer to recycle
*
* Recycles the specified buffer on the given free list by adding it at
* the next available slot on the list.
*/
-static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
- unsigned int idx)
+static void recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
{
struct rx_desc *from = &q->desc[idx];
- struct rx_desc *to = &q->desc[q->pidx];
+ struct rx_desc *to = &q->desc[q->pidx];
q->sdesc[q->pidx] = q->sdesc[idx];
- to->addr_lo = from->addr_lo; /* already big endian */
- to->addr_hi = from->addr_hi; /* likewise */
+ to->addr_lo = from->addr_lo; // already big endian
+ to->addr_hi = from->addr_hi; // likewise
wmb();
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
* of the SW ring.
*/
static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
- size_t sw_size, dma_addr_t * phys, void *metadata)
+ size_t sw_size, dma_addr_t *phys, void *metadata)
{
size_t len = nelem * elem_size;
void *s = NULL;
+#ifndef LINUX_2_4
void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+#else
+ void *p = pci_alloc_consistent(pdev, len, phys);
+#endif
if (!p)
return NULL;
s = kcalloc(nelem, sw_size, GFP_KERNEL);
if (!s) {
+#ifndef LINUX_2_4
dma_free_coherent(&pdev->dev, len, p, *phys);
+#else
+ pci_free_consistent(pdev, len, p, *phys);
+#endif
return NULL;
}
}
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
* queue set must be quiesced prior to calling this.
*/
-static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+void t3_free_qset(adapter_t *adapter, struct sge_qset *q)
{
int i;
struct pci_dev *pdev = adapter->pdev;
spin_unlock(&adapter->sge.reg_lock);
free_rx_bufs(pdev, &q->fl[i]);
kfree(q->fl[i].sdesc);
+#ifndef LINUX_2_4
dma_free_coherent(&pdev->dev,
- q->fl[i].size *
- sizeof(struct rx_desc), q->fl[i].desc,
- q->fl[i].phys_addr);
+#else
+ pci_free_consistent(pdev,
+#endif
+ q->fl[i].size * sizeof(struct rx_desc),
+ q->fl[i].desc, q->fl[i].phys_addr);
}
for (i = 0; i < SGE_TXQ_PER_SET; ++i)
q->txq[i].in_use);
kfree(q->txq[i].sdesc);
}
+#ifndef LINUX_2_4
dma_free_coherent(&pdev->dev,
- q->txq[i].size *
- sizeof(struct tx_desc),
- q->txq[i].desc, q->txq[i].phys_addr);
+#else
+ pci_free_consistent(pdev,
+#endif
+ q->txq[i].size * sizeof(struct tx_desc),
+ q->txq[i].desc, q->txq[i].phys_addr);
__skb_queue_purge(&q->txq[i].sendq);
+
+ if (i == TXQ_ETH && q->txq[TXQ_ETH].eth_coalesce_sdesc)
+ kfree(q->txq[TXQ_ETH].eth_coalesce_sdesc);
}
if (q->rspq.desc) {
spin_lock(&adapter->sge.reg_lock);
t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
spin_unlock(&adapter->sge.reg_lock);
+#ifndef LINUX_2_4
dma_free_coherent(&pdev->dev,
+#else
+ pci_free_consistent(pdev,
+#endif
q->rspq.size * sizeof(struct rsp_desc),
q->rspq.desc, q->rspq.phys_addr);
}
*/
static inline unsigned int sgl_len(unsigned int n)
{
- /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+ // alternatively: 3 * (n / 2) + 2 * (n & 1)
return (3 * n) / 2 + (n & 1);
}
* threshold and the packet is too big to copy, or (b) the packet should
* be copied but there is no memory for the copy.
*/
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+static struct sk_buff *get_packet(adapter_t *adap, struct sge_fl *fl,
unsigned int len, unsigned int drop_thres)
{
struct sk_buff *skb = NULL;
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
- memcpy(skb->data, sd->skb->data, len);
+ skb_copy_from_linear_data(sd->skb, skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
pci_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
* Note: this function is similar to @get_packet but deals with Rx buffers
* that are page chunks rather than sk_buffs.
*/
-static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+static struct sk_buff *get_packet_pg(adapter_t *adap, struct sge_fl *fl,
unsigned int len, unsigned int drop_thres)
{
struct sk_buff *skb = NULL;
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
+ skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
}
return skb;
}
return 1;
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+#ifndef NETIF_F_TSO_FAKE
+ /* TSO supported */
if (skb_shinfo(skb)->gso_size)
flits++;
+#endif
return flits_to_desc(flits);
}
*
* When GTS is disabled we unconditionally ring the doorbell.
*/
-static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+static inline void check_ring_tx_db(adapter_t *adap, struct sge_txq *q)
{
#if USE_GTS
clear_bit(TXQ_LAST_PKT_DB, &q->flags);
if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#ifdef T3_TRACE
+ T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
+ q->cntxt_id);
+#endif
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
#else
- wmb(); /* write descriptors before telling HW */
+ wmb(); /* write descriptors before telling HW */
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
#endif
const struct sge_txq *q,
const struct sg_ent *sgl,
unsigned int flits, unsigned int sgl_flits,
- unsigned int gen, __be32 wr_hi,
- __be32 wr_lo)
+ unsigned int gen, unsigned int wr_hi,
+ unsigned int wr_lo)
{
struct work_request_hdr *wrp = (struct work_request_hdr *)d;
struct tx_sw_desc *sd = &q->sdesc[pidx];
sd->skb = skb;
if (need_skb_unmap()) {
sd->fragidx = 0;
- sd->addr_idx = 0;
+ sd->addr_idx_coalesce_num = 0;
sd->sflit = flits;
}
if (likely(ndesc == 1)) {
- sd->eop = 1;
+ sd->eop_coalesce = LAST_PKT_DESC;
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
wmb();
fp += avail;
d++;
- sd->eop = 0;
+ sd->eop_coalesce = 0;
sd++;
if (++pidx == q->size) {
pidx = 0;
wr_gen2(d, gen);
flits = 1;
}
- sd->eop = 1;
+ sd->eop_coalesce = LAST_PKT_DESC;
wrp->wr_hi |= htonl(F_WR_EOP);
wmb();
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
* write_tx_pkt_wr - write a TX_PKT work request
* @adap: the adapter
* @skb: the packet to send
- * @pi: the egress interface
+ * @pi: the egress interface port structure
* @pidx: index of the first Tx descriptor to write
* @gen: the generation value to use
* @q: the Tx queue
*
* Generate a TX_PKT work request to send the supplied packet.
*/
-static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+static void write_tx_pkt_wr(adapter_t *adap, struct sk_buff *skb,
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
cpl->len = htonl(skb->len | 0x80000000);
- cntrl = V_TXPKT_INTF(pi->port_id);
+ cntrl = V_TXPKT_INTF(pi->txpkt_intf);
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+#ifdef NETIF_F_TSO_FAKE
+ /* TSO not supported */
+ tso_info = 0;
+#else
+ /* TSO supported */
tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+#endif
if (tso_info) {
int eth_type;
- struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+ struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *) cpl;
d->flit[2] = 0;
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
hdr->cntrl = htonl(cntrl);
- eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
- CPL_ETH_II : CPL_ETH_II_VLAN;
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
tso_info |= V_LSO_ETH_TYPE(eth_type) |
- V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
- V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+ V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+ V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
hdr->lso_info = htonl(tso_info);
flits = 3;
} else {
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
- cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
- cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_HW);
+ cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
+ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
cpl->cntrl = htonl(cntrl);
if (skb->len <= WR_LEN - sizeof(*cpl)) {
q->sdesc[pidx].skb = NULL;
if (!skb->data_len)
- memcpy(&d->flit[2], skb->data, skb->len);
+ skb_copy_from_linear_data(skb, &d->flit[2],
+ skb->len);
else
skb_copy_bits(skb, 0, &d->flit[2], skb->len);
flits = (skb->len + 7) / 8 + 2;
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
- V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
- | F_WR_SOP | F_WR_EOP | compl);
+ V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
+ F_WR_SOP | F_WR_EOP | compl);
wmb();
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb),
+ adap->pdev);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
htonl(V_WR_TID(q->token)));
}
+/**
+ * finalize_tx_pkt_coalesce_wr - complete a tx pkt coalesce wr
+ * @q: the Tx queue
+ */
+static inline void finalize_tx_pkt_coalesce_wr(struct sge_txq *q)
+{
+
+ struct work_request_hdr *wrp =
+ (struct work_request_hdr *)&q->desc[q->pidx];
+
+ wmb();
+ wrp->wr_lo =
+ htonl(V_WR_GEN(q->gen) | V_WR_TID(q->token) |
+ V_WR_LEN(1 + (q->eth_coalesce_idx << 1)));
+ wr_gen2((struct tx_desc *)wrp, q->gen);
+}
+
+/**
+ * ship_tx_pkt_coalesce_wr - ship a tx pkt coalesce wr
+ * @adap: the adapter
+ * @q: the Tx queue
+ */
+static inline void ship_tx_pkt_coalesce_wr(adapter_t *adap, struct sge_txq *q)
+{
+ finalize_tx_pkt_coalesce_wr(q);
+ check_ring_tx_db(adap, q);
+
+ q->eth_coalesce_idx = 0;
+ q->eth_coalesce_bytes = 0;
+
+ q->pidx++;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+}
+
+/**
+ * try_finalize_tx_pkt_coalesce_wr - try sending a pend. tx pkt coalesce wr
+ * @adap: the adapter
+ * @q: the Tx queue
+ */
+static void try_finalize_tx_pkt_coalesce_wr(adapter_t *adap, struct sge_txq *q)
+{
+ if (spin_trylock(&q->lock)) {
+
+ if (q->eth_coalesce_idx)
+ ship_tx_pkt_coalesce_wr(adap, q);
+
+ spin_unlock(&q->lock);
+ }
+}
+
+/**
+ * should_finalize_tx_pkt_coalescing - is it time to stop coalescing
+ * @q: the Tx queue
+ */
+static inline int should_finalize_tx_pkt_coalescing(const struct sge_txq *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 3);
+}
+
+/**
+ * write_tx_pkt_coalesce_wr - write a TX_PKT coalesce work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @pi: the egress interface port structure
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @q: the Tx queue
+ * @compl: the value of the COMPL bit to use
+ * @coalesce_idx: idx in the coalesce WR
+ *
+ * Generate a TX_PKT work request to send the supplied packet.
+ */
+static inline void write_tx_pkt_coalesce_wr(adapter_t *adap,
+ struct sk_buff *skb,
+ const struct port_info *pi,
+ unsigned int pidx,
+ unsigned int gen,
+ struct sge_txq *q,
+ unsigned int compl,
+ unsigned int coalesce_idx)
+{
+ struct tx_pkt_coalesce_wr *wr =
+ (struct tx_pkt_coalesce_wr *)&q->desc[pidx];
+ struct cpl_tx_pkt_coalesce *cpl = &wr->cpl[coalesce_idx];
+ struct tx_sw_desc *sd = &q->sdesc[pidx];
+ unsigned int cntrl, len = skb->len;
+
+ if (!coalesce_idx) {
+ wr->wr.wr_hi =
+ htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | F_WR_SOP | F_WR_EOP |
+ V_WR_DATATYPE(1) | compl);
+ sd->eop_coalesce = PKT_COALESCE_WR;
+ sd->skb = ETH_COALESCE_DUMMY_SKB;
+ }
+ sd->addr_idx_coalesce_num = coalesce_idx + 1;
+ q->eth_coalesce_sdesc[pidx].skb[coalesce_idx] = skb;
+
+ cntrl =
+ V_TXPKT_OPCODE(CPL_TX_PKT) | V_TXPKT_INTF(pi->txpkt_intf) |
+ F_TXPKT_IPCSUM_DIS |
+ V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+
+ if (vlan_tx_tag_present(skb) && pi->vlan_grp)
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+
+ cpl->cntrl = htonl(cntrl);
+ cpl->len = htonl(len | 0x81000000);
+
+ if (skb_headlen(skb)) {
+ cpl->addr =
+ cpu_to_be64(pci_map_single(adap->pdev, skb->data, len,
+ PCI_DMA_TODEVICE));
+ } else {
+ skb_frag_t *frag = skb_shinfo(skb)->frags;
+
+ cpl->addr =
+ cpu_to_be64(pci_map_page(adap->pdev, frag->page,
+ frag->page_offset, len, PCI_DMA_TODEVICE));
+ }
+}
+
/**
* eth_xmit - add a packet to the Ethernet Tx queue
* @skb: the packet
*/
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int ndesc, pidx, credits, gen, compl;
+ unsigned int ndesc, pidx, pidx_ndesc, credits, gen, compl,
+ len = skb->len;
+ int coalesce_idx = -1;
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
struct sge_qset *qs = dev2qset(dev);
* The chip min packet length is 9 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
- if (unlikely(skb->len < ETH_HLEN)) {
+ if (unlikely(len < ETH_HLEN)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- spin_lock(&q->lock);
- reclaim_completed_tx(adap, q);
+ if (spin_trylock(&q->lock))
+ reclaim_completed_tx(adap, q);
+ else
+ return NETDEV_TX_LOCKED;
credits = q->size - q->in_use;
- ndesc = calc_tx_descs(skb);
+
+#ifdef T3_TRACE
+ T3_TRACE5(adap->tb[q->cntxt_id & 7],
+ "t3_eth_xmit: len %u headlen %u frags %u idx %u bytes %u",
+ len, skb_headlen(skb), skb_shinfo(skb)->nr_frags,
+ q->eth_coalesce_idx, q->eth_coalesce_bytes);
+#endif
+ /* If the Tx descriptor ring is filling up we try to coalesce small
+ * outgoing packets into a single WR. The coalesce WR format doesn't
+ * handle fragmented skbs but that is unlikely anyway for small pkts.
+ * The benefit of coalescing are manifold, including more efficiency
+ * on the IO bus as well as more efficient processing in the T3
+ * silicon.
+ */
+ if ((skb_shinfo(skb)->nr_frags < 2) &&
+ ((skb_shinfo(skb)->nr_frags == 1) ^ !!skb_headlen(skb)) &&
+ ((q->eth_coalesce_idx || credits < (q->size >> 1)) &&
+ (q->eth_coalesce_bytes + len < 11000))) {
+
+ q->eth_coalesce_bytes += len;
+ coalesce_idx = q->eth_coalesce_idx++;
+
+ if (!coalesce_idx) {
+ ndesc = 1;
+ qs->port_stats[SGE_PSTAT_TX_COALESCE_WR]++;
+ } else
+ ndesc = 0;
+
+ qs->port_stats[SGE_PSTAT_TX_COALESCE_PKT]++;
+ pidx_ndesc = 0;
+ } else {
+ if (q->eth_coalesce_idx)
+ ship_tx_pkt_coalesce_wr(adap, q);
+
+ ndesc = pidx_ndesc = calc_tx_descs(skb);
+ }
if (unlikely(credits < ndesc)) {
+ q->eth_coalesce_idx = 0;
+ q->eth_coalesce_bytes = 0;
+
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
gen = q->gen;
q->unacked += ndesc;
- compl = (q->unacked & 8) << (S_WR_COMPL - 3);
- q->unacked &= 7;
+#ifdef CHELSIO_FREE_TXBUF_ASAP
+ /*
+ * Some Guest OS clients get terrible performance when they have bad
+ * message size / socket send buffer space parameters. For instance,
+ * if an application selects an 8KB message size and an 8KB send
+ * socket buffer size. This forces the application into a single
+ * packet stop-and-go mode where it's only willing to have a single
+ * message outstanding. The next message is only sent when the
+ * previous message is noted as having been sent. Until we issue a
+ * kfree_skb() against the TX skb, the skb is charged against the
+ * application's send buffer space. We only free up TX skbs when we
+ * get a TX credit return from the hardware / firmware which is fairly
+ * lazy about this. So we request a TX WR Completion Notification on
+ * every TX descriptor in order to accellerate TX credit returns. See
+ * also the change in handle_rsp_cntrl_info() to free up TX skb's when
+ * we receive the TX WR Completion Notifications ...
+ */
+ compl = F_WR_COMPL;
+#else
+ compl = (q->unacked & 32) << (S_WR_COMPL - 5);
+#endif
+ q->unacked &= 31;
+
pidx = q->pidx;
- q->pidx += ndesc;
+ q->pidx += pidx_ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
+#ifdef T3_TRACE
+// T3_TRACE5(adap->tb[q->cntxt_id & 7],
+// "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+// ndesc, credits, pidx, skb->len, skb_shinfo(skb)->nr_frags);
+#endif
/* update port statistics */
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+#ifndef NETIF_F_TSO_FAKE
+ /* TSO supported */
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
+#endif
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
dev->trans_start = jiffies;
- spin_unlock(&q->lock);
+
+ if (coalesce_idx < 0)
+ spin_unlock(&q->lock);
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
*/
if (likely(!skb_shared(skb)))
skb_orphan(skb);
+ if (coalesce_idx >= 0) {
+ write_tx_pkt_coalesce_wr(adap, skb, pi, pidx, gen, q,
+ compl, coalesce_idx);
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
- check_ring_tx_db(adap, q);
+ if (coalesce_idx == ETH_COALESCE_PKT_NUM - 1)
+ ship_tx_pkt_coalesce_wr(adap, q);
+
+ spin_unlock(&q->lock);
+ } else {
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+ check_ring_tx_db(adap, q);
+ }
return NETDEV_TX_OK;
}
* needs to retry because there weren't enough descriptors at the
* beginning of the call but some freed up in the mean time.
*/
-static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+static inline int check_desc_avail(adapter_t *adap, struct sge_txq *q,
struct sk_buff *skb, unsigned int ndesc,
unsigned int qid)
{
if (unlikely(!skb_queue_empty(&q->sendq))) {
- addq_exit:__skb_queue_tail(&q->sendq, skb);
+addq_exit: __skb_queue_tail(&q->sendq, skb);
return 1;
}
if (unlikely(q->size - q->in_use < ndesc)) {
q->cleaned += reclaim;
}
+/**
+ * immediate - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ * Currently this happens if the packet fits in one Tx descriptor.
+ */
static inline int immediate(const struct sk_buff *skb)
{
return skb->len <= WR_LEN;
* a control queue must fit entirely as immediate data in a single Tx
* descriptor and have no page fragments.
*/
-static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
- struct sk_buff *skb)
+static int ctrl_xmit(adapter_t *adap, struct sge_txq *q, struct sk_buff *skb)
{
int ret;
struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
wrp->wr_lo = htonl(V_WR_TID(q->token));
spin_lock(&q->lock);
- again:reclaim_completed_tx_imm(q);
+again: reclaim_completed_tx_imm(q);
ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
if (unlikely(ret)) {
/**
* restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
+ * @data: the queue set cotaining the control queue
*
* Resumes transmission on a suspended Tx control queue.
*/
struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
- again:reclaim_completed_tx_imm(q);
+again: reclaim_completed_tx_imm(q);
- while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
-/*
- * Send a management message through control queue 0
+/**
+ * t3_mgmt_tx - send a management message
+ * @adap: the adapter
+ * @skb: the packet containing the management message
+ *
+ * Send a management message through control queue 0.
*/
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
{
- int ret;
+ int ret;
+
local_bh_disable();
ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
local_bh_enable();
-
return ret;
}
static void deferred_unmap_destructor(struct sk_buff *skb)
{
int i;
- const dma_addr_t *p;
const struct skb_shared_info *si;
+ const dma_addr_t *p;
const struct deferred_unmap_info *dui;
dui = (struct deferred_unmap_info *)skb->head;
p = dui->addr;
- if (skb->tail - skb->h.raw)
+ if (skb->tail - skb->transport_header)
pci_unmap_single(dui->pdev, *p++,
- skb->tail - skb->h.raw,
+ skb->tail - skb->transport_header,
PCI_DMA_TODEVICE);
si = skb_shinfo(skb);
* Write an offload work request to send the supplied packet. The packet
* data already carry the work request with most fields populated.
*/
-static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+static void write_ofld_wr(adapter_t *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
unsigned int gen, unsigned int ndesc)
{
/* Only TX_DATA builds SGLs */
from = (struct work_request_hdr *)skb->data;
- memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
+ memcpy(&d->flit[1], &from[1],
+ skb_transport_offset(skb) - sizeof(*from));
- flits = (skb->h.raw - skb->data) / 8;
+ flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
- adap->pdev);
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
}
-
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
gen, from->wr_hi, from->wr_lo);
}
unsigned int flits, cnt;
if (skb->len <= WR_LEN)
- return 1; /* packet fits as immediate data */
+ return 1; /* packet fits as immediate data */
- flits = (skb->h.raw - skb->data) / 8; /* headers */
+ flits = skb_transport_offset(skb) / 8; /* headers */
cnt = skb_shinfo(skb)->nr_frags;
- if (skb->tail != skb->h.raw)
+ if (skb->tail != skb->transport_header)
cnt++;
return flits_to_desc(flits + sgl_len(cnt));
}
*
* Send an offload packet through an SGE offload queue.
*/
-static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
- struct sk_buff *skb)
+static int ofld_xmit(adapter_t *adap, struct sge_txq *q, struct sk_buff *skb)
{
int ret;
unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
spin_lock(&q->lock);
- again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q);
ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
if (unlikely(ret)) {
if (ret == 1) {
- skb->priority = ndesc; /* save for restart */
+ skb->priority = ndesc; /* save for restart */
spin_unlock(&q->lock);
return NET_XMIT_CN;
}
q->pidx -= q->size;
q->gen ^= 1;
}
+#ifdef T3_TRACE
+ T3_TRACE5(adap->tb[q->cntxt_id & 7],
+ "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
+ ndesc, pidx, skb->len, skb->len - skb->data_len,
+ skb_shinfo(skb)->nr_frags);
+#endif
spin_unlock(&q->lock);
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
/**
* restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
+ * @data: the queue set cotaining the offload queue
*
* Resumes transmission on a suspended Tx offload queue.
*/
struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
- again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q);
while ((skb = skb_peek(&q->sendq)) != NULL) {
unsigned int gen, pidx;
*/
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
{
- struct adapter *adap = tdev2adap(tdev);
+ adapter_t *adap = tdev2adap(tdev);
struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
if (unlikely(is_ctrl_pkt(skb)))
{
if (n) {
q->offload_bundles++;
- tdev->recv(tdev, skbs, n);
+ cxgb3_ofld_recv(tdev, skbs, n);
}
}
while (avail) {
struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
int ngathered;
+ unsigned long flags;
- spin_lock_irq(&q->lock);
+ spin_lock_irqsave(&q->lock, flags);
head = q->rx_head;
if (!head) {
work_done = limit - avail;
*budget -= work_done;
dev->quota -= work_done;
__netif_rx_complete(dev);
- spin_unlock_irq(&q->lock);
+ spin_unlock_irqrestore(&q->lock, flags);
return 0;
}
tail = q->rx_tail;
q->rx_head = q->rx_tail = NULL;
- spin_unlock_irq(&q->lock);
+ spin_unlock_irqrestore(&q->lock, flags);
for (ngathered = 0; avail && head; avail--) {
prefetch(head->data);
skbs[ngathered]->next = NULL;
if (++ngathered == RX_BUNDLE_SIZE) {
q->offload_bundles++;
- adapter->tdev.recv(&adapter->tdev, skbs,
- ngathered);
+ cxgb3_ofld_recv(&adapter->tdev, skbs,
+ ngathered);
ngathered = 0;
}
}
- if (head) { /* splice remaining packets back onto Rx queue */
- spin_lock_irq(&q->lock);
+ if (head) { /* splice remaining packets back onto Rx queue */
+ spin_lock_irqsave(&q->lock, flags);
tail->next = q->rx_head;
if (!q->rx_head)
q->rx_tail = tail;
q->rx_head = head;
- spin_unlock_irq(&q->lock);
+ spin_unlock_irqrestore(&q->lock, flags);
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
}
struct sk_buff *skb, struct sk_buff *rx_gather[],
unsigned int gather_idx)
{
- skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
if (rq->polling) {
rx_gather[gather_idx++] = skb;
if (gather_idx == RX_BUNDLE_SIZE) {
- tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+ cxgb3_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE);
gather_idx = 0;
rq->offload_bundles++;
}
}
}
+
/**
* rx_eth - process an ingress ethernet packet
* @adap: the adapter
* @rq: the response queue that received the packet
* @skb: the packet
* @pad: amount of padding at the start of the buffer
+ * @npkts: number of packets aggregated in the skb (>= 1 for LRO)
*
* Process an ingress ethernet pakcet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
- * if it was immediate data in a response.
+ * if it was immediate data in a response. @npkts represents the number
+ * of Ethernet packets as seen by the device that have been collected in
+ * the @skb; it's > 1 only in the case of LRO.
*/
-static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
- struct sk_buff *skb, int pad)
+static void rx_eth(adapter_t *adap, struct sge_rspq *rq,
+ struct sk_buff *skb, int pad, int npkts)
{
struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
struct port_info *pi;
+ rq->eth_pkts += npkts;
skb_pull(skb, sizeof(*p) + pad);
- skb->dev = adap->port[p->iff];
- skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->dev = adap->port[adap->rxpkt_map[p->iff]];
skb->dev->last_rx = jiffies;
+ skb->protocol = eth_type_trans(skb, skb->dev);
pi = netdev_priv(skb->dev);
+
if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
!p->fragment) {
- rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD] += npkts;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
if (unlikely(p->vlan_valid)) {
struct vlan_group *grp = pi->vlan_grp;
- rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
- if (likely(grp))
+ rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX] += npkts;
+ if (likely(grp != NULL))
__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
rq->polling);
else
netif_rx(skb);
}
+static inline int is_eth_tcp(u32 rss)
+{
+ return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+static inline int lro_active(const struct lro_session *s)
+{
+ return s->head != NULL;
+}
+
+/**
+ * lro_match - check if a new packet matches an existing LRO packet
+ * @skb: LRO packet
+ * @iph: pointer to IP header of new packet
+ *
+ * Determine whether a new packet with the given IP header belongs
+ * to the same connection as an existing LRO packet by checking that the
+ * two packets have the same 4-tuple. Note that LRO assumes no IP options.
+ */
+static inline int lro_match(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ const struct iphdr *s_iph = ip_hdr(skb);
+ const struct tcphdr *s_tcph = (const struct tcphdr *)(s_iph + 1);
+ const struct tcphdr *tcph = (const struct tcphdr *)(iph + 1);
+
+ return *(u32 *)&tcph->source == *(u32 *)&s_tcph->source &&
+ iph->saddr == s_iph->saddr && iph->daddr == s_iph->daddr;
+}
+
+/**
+ * lro_lookup - find an LRO session
+ * @p: the LRO state
+ * @idx: index of first session to try
+ * @iph: IP header supplying the session information to look up
+ *
+ * Return an exitsing LRO session that matches the TCP/IP information in
+ * the supplied IP header. @idx is a hint suggesting the first session
+ * to try. If no matching session is found %NULL is returned.
+ */
+static inline struct lro_session *lro_lookup(struct lro_state *p, int idx,
+ const struct iphdr *iph)
+{
+ struct lro_session *s = NULL;
+ unsigned int active = p->nactive;
+
+ while (active) {
+ s = &p->sess[idx];
+ if (s->head) {
+ if (lro_match(s->head, iph))
+ break;
+ active--;
+ }
+ idx = (idx + 1) & (MAX_LRO_SES - 1);
+ }
+ return s;
+}
+
+#define IPH_OFFSET (2 + ETH_HLEN + sizeof(struct cpl_rx_pkt))
+
+/**
+ * lro_init_session - initialize an LRO session
+ * @s: LRO session to initialize
+ * @skb: first packet for the session
+ * @iph: pointer to start of IP header
+ * @vlan: session vlan
+ * @plen: TCP payload length
+ *
+ * Initialize an LRO session with the given packet.
+ */
+static void lro_init_session(struct lro_session *s, struct sk_buff *skb,
+ struct iphdr *iph, __be32 vlan, int plen)
+{
+ const struct tcphdr *tcph = (struct tcphdr *)(iph + 1);
+
+ cxgb3_set_skb_header(skb, iph, IPH_OFFSET);
+ s->head = s->tail = skb;
+ s->iplen = ntohs(iph->tot_len);
+ s->mss = plen;
+ s->seq = ntohl(tcph->seq) + plen;
+ s->vlan = vlan;
+ s->npkts = 1;
+}
+
+/**
+ * lro_flush_session - complete an LRO session
+ * @adap: the adapter
+ * @qs: the queue set associated with the LRO session
+ * @s: the LRO session
+ *
+ * Complete an active LRO session and send the packet it has been building
+ * upstream.
+ */
+static void lro_flush_session(struct adapter *adap, struct sge_qset *qs,
+ struct lro_session *s)
+{
+ struct iphdr *iph = ip_hdr(s->head);
+
+ if (iph->tot_len != htons(s->iplen)) {
+ /* IP length has changed, fix up IP header */
+ iph->tot_len = htons(s->iplen);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+#ifndef NETIF_F_TSO_FAKE
+ /* TSO supported */
+ /* tcp_measure_rcv_mss in recent kernels looks at gso_size */
+ skb_shinfo(s->head)->gso_size = s->mss;
+#ifdef GSO_TYPE
+ skb_shinfo(s->head)->gso_type = SKB_GSO_TCPV4;
+#endif
+#endif
+ }
+
+ qs->port_stats[SGE_PSTAT_LRO]++;
+ rx_eth(adap, &qs->rspq, s->head, 2, s->npkts);
+ s->head = NULL;
+ qs->lro.nactive--;
+}
+
+/**
+ * lro_flush - flush all active LRO sessions
+ * @adap: the adapter
+ * @qs: associated queue set
+ * @state: the LRO state
+ *
+ * Flush all active LRO sessions and reset the LRO state.
+ */
+static void lro_flush(struct adapter *adap, struct sge_qset *qs,
+ struct lro_state *state)
+{
+ unsigned int idx = state->active_idx;
+
+ while (state->nactive) {
+ struct lro_session *s = &state->sess[idx];
+
+ if (s->head)
+ lro_flush_session(adap, qs, s);
+ idx = (idx + 1) & (MAX_LRO_SES - 1);
+ }
+}
+
+/**
+ * lro_alloc_session - allocate a new LRO session
+ * @adap: the adapter
+ * @qs: associated queue set
+ * @hash: hash value for the connection to be associated with the session
+ *
+ * Allocate a new LRO session. If there are no more session slots one of
+ * the existing active sessions is completed and taken over.
+ */
+static struct lro_session *lro_alloc_session(struct adapter *adap,
+ struct sge_qset *qs, unsigned int hash)
+{
+ struct lro_state *state = &qs->lro;
+ unsigned int idx = hash & (MAX_LRO_SES - 1);
+ struct lro_session *s = &state->sess[idx];
+
+ if (likely(!s->head)) /* session currently inactive, use it */
+ goto done;
+
+ if (unlikely(state->nactive == MAX_LRO_SES)) {
+ lro_flush_session(adap, qs, s);
+ qs->port_stats[SGE_PSTAT_LRO_OVFLOW]++;
+ } else {
+ qs->port_stats[SGE_PSTAT_LRO_COLSN]++;
+ do {
+ idx = (idx + 1) & (MAX_LRO_SES - 1);
+ s = &state->sess[idx];
+ } while (s->head);
+ }
+
+done: state->nactive++;
+ state->active_idx = idx;
+ return s;
+}
+
+/**
+ * lro_frame_ok - check if an ingress packet is eligible for LRO
+ * @p: the CPL header of the packet
+ * @rss_status: RSS info for the packet
+ *
+ * Returns true if a received packet is eligible for LRO.
+ * The following conditions must be true:
+ * - packet is TCP/IP Ethernet II (checked elsewhere)
+ * - not an IP fragment
+ * - no IP options
+ * - TCP/IP checksums are correct
+ * - the packet is for this host
+ */
+static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
+{
+ const struct ethhdr *eh = (struct ethhdr *)(p + 1);
+ const struct iphdr *ih = (struct iphdr *)(eh + 1);
+
+ return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
+ eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
+}
+
+#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
+ TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
+ TCP_FLAG_SYN | TCP_FLAG_FIN)
+#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
+
+/**
+ * lro_segment_ok - check if a TCP segment is eligible for LRO
+ * @tcph: the TCP header of the packet
+ *
+ * Returns true if a TCP packet is eligible for LRO. This requires that
+ * the packet have only the ACK flag set and no TCP options besides
+ * time stamps.
+ */
+static inline int lro_segment_ok(const struct tcphdr *tcph)
+{
+ int optlen;
+
+ if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
+ return 0;
+
+ optlen = (tcph->doff << 2) - sizeof(*tcph);
+ if (optlen) {
+ const u32 *opt = (const u32 *)(tcph + 1);
+
+ if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
+ *opt != htonl(TSTAMP_WORD) || !opt[2])
+ return 0;
+ }
+ return 1;
+}
+
+static inline int lro_update_session(struct lro_session *s,
+ const struct iphdr *iph, __be16 vlan, int plen)
+{
+ struct sk_buff *skb;
+ const struct tcphdr *tcph;
+ struct tcphdr *s_tcph;
+
+ if (unlikely(vlan != s->vlan))
+ return -1;
+
+ tcph = (const struct tcphdr *)(iph + 1);
+ if (unlikely(ntohl(tcph->seq) != s->seq || plen > 65535 - s->iplen))
+ return -1;
+
+ skb = s->head;
+ s_tcph = (struct tcphdr *)(ip_hdr(skb) + 1);
+
+ if (tcph->doff != sizeof(*tcph) / 4) { /* TCP options present */
+ const u32 *opt = (u32 *)(tcph + 1);
+ u32 *s_opt = (u32 *)(s_tcph + 1);
+
+ if (unlikely(ntohl(s_opt[1]) > ntohl(opt[1])))
+ return -1;
+ s_opt[1] = opt[1];
+ s_opt[2] = opt[2];
+ }
+ s_tcph->ack_seq = tcph->ack_seq;
+ s_tcph->window = tcph->window;
+
+ s->seq += plen;
+ s->iplen += plen;
+ if (plen > s->mss)
+ s->mss = plen;
+ s->npkts++;
+ skb->len += plen;
+ skb->data_len += plen;
+ return 0;
+}
+
+/*
+ * Length of a packet buffer examined by LRO, it extends up to and including TCP
+ * timestamps. This part of the packet must be made memory coherent for CPU
+ * accesses.
+ */
+#define LRO_PEEK_LEN (IPH_OFFSET + sizeof(struct iphdr) + \
+ sizeof(struct tcphdr) + 12)
+
+/**
+ * lro_add_page - add a page chunk to an LRO session
+ * @adap: the adapter
+ * @qs: the associated queue set
+ * @fl: the free list containing the page chunk to add
+ * @hash: hash value for the packet
+ *
+ * Add a received packet contained in a page chunk to an existing LRO
+ * session. There are four possible outcomes:
+ * - packet is not eligible for LRO; return -1
+ * - packet is eligible but there's no appropriate session; return 1
+ * - packet is added and the page chunk consumed; return 0
+ * - packet is added but the page chunk isn't needed; return 0
+ */
+static int lro_add_page(struct adapter *adap, struct sge_qset *qs,
+ struct sge_fl *fl, u32 hash)
+{
+ int tcpiplen, plen, ret;
+ struct lro_session *s;
+ const struct iphdr *iph;
+ const struct tcphdr *tcph;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ const struct cpl_rx_pkt *cpl = sd->pg_chunk.va + 2;
+
+ pci_dma_sync_single_for_cpu(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ LRO_PEEK_LEN, PCI_DMA_FROMDEVICE);
+
+ if (!lro_frame_ok(cpl)) {
+ret_1: ret = -1;
+sync: pci_dma_sync_single_for_device(adap->pdev,
+ pci_unmap_addr(sd, dma_addr),
+ LRO_PEEK_LEN, PCI_DMA_FROMDEVICE);
+ return ret;
+ }
+
+ iph = (const struct iphdr *)(sd->pg_chunk.va + IPH_OFFSET);
+ s = lro_lookup(&qs->lro, hash & (MAX_LRO_SES - 1), iph);
+ if (!s) {
+ ret = 1;
+ goto sync;
+ }
+
+ tcph = (const struct tcphdr *)(iph + 1);
+ tcpiplen = sizeof(*iph) + (tcph->doff << 2);
+ plen = ntohs(iph->tot_len) - tcpiplen;
+
+ if (!lro_segment_ok(tcph) ||
+ lro_update_session(s, iph,
+ cpl->vlan_valid ? cpl->vlan : htons(0xffff),
+ plen)) {
+ lro_flush_session(adap, qs, s);
+ goto ret_1;
+ }
+
+ fl->credits--;
+ if (plen) {
+ struct sk_buff *tskb = s->tail;
+ struct skb_shared_info *shinfo = skb_shinfo(tskb);
+
+ pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb_fill_page_desc(tskb, shinfo->nr_frags, sd->pg_chunk.page,
+ sd->pg_chunk.offset + IPH_OFFSET + tcpiplen,
+ plen);
+ s->head->truesize += plen;
+ if (s->head != tskb) {
+ /*
+ * lro_update_session updates the sizes of the head skb,
+ * do the same here for the component skb the fragment
+ * was actually added to.
+ */
+ tskb->len += plen;
+ tskb->data_len += plen;
+ tskb->truesize += plen;
+ }
+ if (unlikely(shinfo->nr_frags == MAX_SKB_FRAGS))
+ lro_flush_session(adap, qs, s);
+ /* No refill, caller does it. */
+ qs->port_stats[SGE_PSTAT_LRO_PG]++;
+ } else {
+ pci_dma_sync_single_for_device(adap->pdev,
+ pci_unmap_addr(sd, dma_addr),
+ LRO_PEEK_LEN, PCI_DMA_FROMDEVICE);
+ recycle_rx_buf(adap, fl, fl->cidx);
+ qs->port_stats[SGE_PSTAT_LRO_ACK]++;
+ }
+
+ return 0;
+}
+
+/**
+ * lro_add_skb - add an sk_buff to an LRO session
+ * @adap: the adapter
+ * @qs: the associated queue set
+ * @skb: the sk_buff to add
+ * @hash: hash value for the packet
+ *
+ * Add a received packet contained in an sk_buff to an existing LRO
+ * session. Returns -1 if the packet is not eligible for LRO, and 0
+ * if it is added successfully.
+ */
+static int lro_add_skb(struct adapter *adap, struct sge_qset *qs,
+ struct sk_buff *skb, u32 hash)
+{
+ __be16 vlan;
+ int tcpiplen, plen;
+ struct lro_session *s;
+ struct iphdr *iph;
+ const struct tcphdr *tcph;
+ const struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(skb->data + 2);
+
+ if (!lro_frame_ok(cpl))
+ return -1;
+
+ iph = (struct iphdr *)(skb->data + IPH_OFFSET);
+ s = lro_lookup(&qs->lro, hash & (MAX_LRO_SES - 1), iph);
+
+ tcph = (struct tcphdr *)(iph + 1);
+ if (!lro_segment_ok(tcph)) {
+ if (s)
+ lro_flush_session(adap, qs, s);
+ return -1;
+ }
+
+ tcpiplen = sizeof(*iph) + (tcph->doff << 2);
+ plen = ntohs(iph->tot_len) - tcpiplen;
+ vlan = cpl->vlan_valid ? cpl->vlan : htons(0xffff);
+ if (likely(s && !lro_update_session(s, iph, vlan, plen))) {
+ /*
+ * Pure ACKs have nothing useful left and can be freed.
+ */
+ if (plen) {
+ skb_pull(skb, IPH_OFFSET + tcpiplen);
+ s->head->truesize += skb->truesize;
+
+ /* TP trims IP packets, no skb_trim needed */
+ if (s->head == s->tail)
+ skb_shinfo(s->head)->frag_list = skb;
+ else
+ s->tail->next = skb;
+ s->tail = skb;
+ qs->port_stats[SGE_PSTAT_LRO_SKB]++;
+ } else {
+ __kfree_skb(skb); /* no destructors, ok from irq */
+ qs->port_stats[SGE_PSTAT_LRO_ACK]++;
+ }
+ } else {
+ if (s)
+ lro_flush_session(adap, qs, s);
+ s = lro_alloc_session(adap, qs, hash);
+ lro_init_session(s, skb, iph, vlan, plen);
+ qs->port_stats[SGE_PSTAT_LRO_SKB]++;
+ }
+ return 0;
+}
+
/**
* handle_rsp_cntrl_info - handles control information in a response
* @qs: the queue set corresponding to the response
#endif
credits = G_RSPD_TXQ0_CR(flags);
- if (credits)
+ if (credits) {
qs->txq[TXQ_ETH].processed += credits;
+#ifdef CHELSIO_FREE_TXBUF_ASAP
+ /*
+ * In the normal Linux driver t3_eth_xmit() routine, we call
+ * skb_orphan() on unshared TX skb. This results in a call to
+ * the destructor for the skb which frees up the send buffer
+ * space it was holding down. This, in turn, allows the
+ * application to make forward progress generating more data
+ * which is important at 10Gb/s. For Virtual Machine Guest
+ * Operating Systems this doesn't work since the send buffer
+ * space is being held down in the Virtual Machine. Thus we
+ * need to get the TX skb's freed up as soon as possible in
+ * order to prevent applications from stalling.
+ *
+ * This code is largely copied from the corresponding code in
+ * sge_timer_cb() and should probably be kept in sync with any
+ * changes there.
+ */
+ if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
+ struct sge_txq *q = &qs->txq[TXQ_ETH];
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+
+ if (q->eth_coalesce_idx)
+ ship_tx_pkt_coalesce_wr(adap, q);
+
+ reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
+ spin_unlock(&qs->txq[TXQ_ETH].lock);
+ }
+#endif
+ }
credits = G_RSPD_TXQ2_CR(flags);
if (credits)
/**
* check_ring_db - check if we need to ring any doorbells
- * @adapter: the adapter
+ * @adap: the adapter
* @qs: the queue set whose Tx queues are to be examined
* @sleeping: indicates which Tx queue sent GTS
*
* to resume transmission after idling while they still have unprocessed
* descriptors.
*/
-static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+static void check_ring_db(adapter_t *adap, struct sge_qset *qs,
unsigned int sleeping)
{
if (sleeping & F_RSPD_TXQ0_GTS) {
if (txq->cleaned + txq->in_use != txq->processed &&
!test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
set_bit(TXQ_RUNNING, &txq->flags);
+#ifdef T3_TRACE
+ T3_TRACE0(adap->tb[txq->cntxt_id & 7], "doorbell ETH");
+#endif
t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
V_EGRCNTX(txq->cntxt_id));
}
if (txq->cleaned + txq->in_use != txq->processed &&
!test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
set_bit(TXQ_RUNNING, &txq->flags);
+#ifdef T3_TRACE
+ T3_TRACE0(adap->tb[txq->cntxt_id & 7],
+ "doorbell offload");
+#endif
t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
V_EGRCNTX(txq->cntxt_id));
}
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
-static int process_responses(struct adapter *adap, struct sge_qset *qs,
- int budget)
+static int process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
{
struct sge_rspq *q = &qs->rspq;
struct rsp_desc *r = &q->desc[q->cidx];
q->next_holdoff = q->holdoff_tmr;
while (likely(budget_left && is_new_response(r, q))) {
- int eth, ethpad = 2;
+ int eth, ethpad = 2, lro = qs->lro.enabled;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
+#ifdef T3_TRACE
+ T3_TRACE5(adap->tb[q->cntxt_id],
+ "response: RSS 0x%x flags 0x%x len %u, type 0x%x rss hash 0x%x",
+ ntohl(rss_hi), flags, ntohl(r->len_cq),
+ r->rss_hdr.hash_type, ntohl(rss_lo));
+#endif
+
eth = r->rss_hdr.opcode == CPL_RX_PKT;
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
} else if ((len = ntohl(r->len_cq)) != 0) {
struct sge_fl *fl;
+ if (eth)
+ lro = qs->lro.enabled & is_eth_tcp(rss_hi);
+
fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
#endif
__refill_fl(adap, fl);
+ if (lro > 0) {
+ lro = lro_add_page(adap, qs, fl,
+ rss_lo);
+ if (!lro)
+ goto next_fl;
+ }
skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
eth ? SGE_RX_DROP_THRES : 0);
} else
q->rx_drops++;
} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
__skb_pull(skb, 2);
-
+next_fl:
if (++fl->cidx == fl->size)
fl->cidx = 0;
} else
}
if (likely(skb != NULL)) {
- if (eth)
- rx_eth(adap, q, skb, ethpad);
- else {
+ if (eth) {
+ if (lro <= 0 ||
+ lro_add_skb(adap, qs, skb, rss_lo))
+ rx_eth(adap, q, skb, ethpad, 1);
+ } else {
q->offload_pkts++;
/* Preserve the RSS info in csum & priority */
skb->csum = rss_hi;
ngathered);
}
}
+
--budget_left;
}
deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+ lro_flush(adap, qs, &qs->lro);
+
if (sleeping)
check_ring_db(adap, qs, sleeping);
- smp_mb(); /* commit Tx queue .processed updates */
+ smp_mb(); /* commit Tx queue .processed updates */
if (unlikely(qs->txq_stopped != 0))
restart_tx(qs);
+ if (qs->txq[TXQ_ETH].eth_coalesce_idx &&
+ should_finalize_tx_pkt_coalescing(&qs->txq[TXQ_ETH]))
+ try_finalize_tx_pkt_coalesce_wr(adap, &qs->txq[TXQ_ETH]);
+
budget -= budget_left;
+#ifdef T3_TRACE
+ T3_TRACE4(adap->tb[q->cntxt_id],
+ "process_responses: <- cidx %u gen %u ret %u credit %u",
+ q->cidx, q->gen, budget, q->credits);
+#endif
return budget;
}
* @dev: the net device
* @budget: how many packets we can process in this round
*
- * Handler for new data events when using NAPI.
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
*/
static int napi_rx_handler(struct net_device *dev, int *budget)
{
struct adapter *adap = pi->adapter;
struct sge_qset *qs = dev2qset(dev);
int effective_budget = min(*budget, dev->quota);
-
int work_done = process_responses(adap, qs, effective_budget);
+
*budget -= work_done;
dev->quota -= work_done;
*
* Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
*/
-static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+static int process_pure_responses(adapter_t *adap, struct sge_qset *qs,
struct rsp_desc *r)
{
struct sge_rspq *q = &qs->rspq;
do {
u32 flags = ntohl(r->flags);
+#ifdef T3_TRACE
+ T3_TRACE2(adap->tb[q->cntxt_id],
+ "pure response: RSS 0x%x flags 0x%x",
+ ntohl(*(u32 *)r), flags);
+#endif
r++;
if (unlikely(++q->cidx == q->size)) {
q->cidx = 0;
if (sleeping)
check_ring_db(adap, qs, sleeping);
- smp_mb(); /* commit Tx queue .processed updates */
+ smp_mb(); /* commit Tx queue .processed updates */
if (unlikely(qs->txq_stopped != 0))
restart_tx(qs);
+ if (qs->txq[TXQ_ETH].eth_coalesce_idx &&
+ should_finalize_tx_pkt_coalescing(&qs->txq[TXQ_ETH]))
+ try_finalize_tx_pkt_coalesce_wr(adap, &qs->txq[TXQ_ETH]);
+
return is_new_response(r, q);
}
*
* The caller must ascertain NAPI is not already running.
*/
-static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+static inline int handle_responses(adapter_t *adap, struct sge_rspq *q)
{
struct sge_qset *qs = rspq_to_qset(q);
struct rsp_desc *r = &q->desc[q->cidx];
return -1;
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
- V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+ V_NEWTIMER(q->holdoff_tmr) |
+ V_NEWINDEX(q->cidx));
return 0;
}
if (likely(__netif_rx_schedule_prep(qs->netdev)))
* The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
* (i.e., response queue serviced in hard interrupt).
*/
-irqreturn_t t3_sge_intr_msix(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3_sge_intr_msix, irq, cookie, regs)
{
struct sge_qset *qs = cookie;
const struct port_info *pi = netdev_priv(qs->netdev);
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
* (i.e., response queue serviced by NAPI polling).
*/
-static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3_sge_intr_msix_napi, irq, cookie, regs)
{
struct sge_qset *qs = cookie;
const struct port_info *pi = netdev_priv(qs->netdev);
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
-
if (handle_responses(adap, q) < 0)
q->unhandled_irqs++;
spin_unlock(&q->lock);
* the same MSI vector. We use one SGE response queue per port in this mode
* and protect all response queues with queue 0's lock.
*/
-static irqreturn_t t3_intr_msi(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3_intr_msi, irq, cookie, regs)
{
- int new_packets = 0;
- struct adapter *adap = cookie;
+ int i, new_packets = 0;
+ adapter_t *adap = cookie;
struct sge_rspq *q = &adap->sge.qs[0].rspq;
spin_lock(&q->lock);
- if (process_responses(adap, &adap->sge.qs[0], -1)) {
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
- V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
- new_packets = 1;
- }
-
- if (adap->params.nports == 2 &&
- process_responses(adap, &adap->sge.qs[1], -1)) {
- struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+ for_each_port(adap, i)
+ if (process_responses(adap, &adap->sge.qs[i], -1)) {
+ struct sge_rspq *q1 = &adap->sge.qs[i].rspq;
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
- V_NEWTIMER(q1->next_holdoff) |
- V_NEWINDEX(q1->cidx));
- new_packets = 1;
- }
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+ V_NEWTIMER(q1->next_holdoff) |
+ V_NEWINDEX(q1->cidx));
+ new_packets = 1;
+ }
if (!new_packets && t3_slow_intr_handler(adap) == 0)
q->unhandled_irqs++;
* one SGE response queue per port in this mode and protect all response
* queues with queue 0's lock.
*/
-static irqreturn_t t3_intr_msi_napi(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3_intr_msi_napi, irq, cookie, regs)
{
- int new_packets;
- struct adapter *adap = cookie;
+ int i, new_packets = 0;
+ adapter_t *adap = cookie;
struct sge_rspq *q = &adap->sge.qs[0].rspq;
spin_lock(&q->lock);
- new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
- if (adap->params.nports == 2)
- new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
- &adap->sge.qs[1].rspq);
+ for_each_port(adap, i) {
+ new_packets += rspq_check_napi(adap->sge.qs[i].netdev,
+ &adap->sge.qs[i].rspq);
+ }
+
if (!new_packets && t3_slow_intr_handler(adap) == 0)
q->unhandled_irqs++;
/*
* A helper function that processes responses and issues GTS.
*/
-static inline int process_responses_gts(struct adapter *adap,
- struct sge_rspq *rq)
+static inline int process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
{
int work;
* the same interrupt pin. We use one SGE response queue per port in this mode
* and protect all response queues with queue 0's lock.
*/
-static irqreturn_t t3_intr(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3_intr, irq, cookie, regs)
{
int work_done, w0, w1;
- struct adapter *adap = cookie;
+ adapter_t *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
w0 = is_new_response(&q0->desc[q0->cidx], q0);
w1 = adap->params.nports == 2 &&
- is_new_response(&q1->desc[q1->cidx], q1);
+ is_new_response(&q1->desc[q1->cidx], q1);
if (likely(w0 | w1)) {
t3_write_reg(adap, A_PL_CLI, 0);
- t3_read_reg(adap, A_PL_CLI); /* flush */
+ (void) t3_read_reg(adap, A_PL_CLI); /* flush */
if (likely(w0))
process_responses_gts(adap, q0);
* response queue per port in this mode and protect all response queues with
* queue 0's lock.
*/
-static irqreturn_t t3b_intr(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3b_intr, irq, cookie, regs)
{
- u32 map;
- struct adapter *adap = cookie;
+ u32 i, map;
+ adapter_t *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
t3_write_reg(adap, A_PL_CLI, 0);
map = t3_read_reg(adap, A_SG_DATA_INTR);
- if (unlikely(!map)) /* shared interrupt, most likely */
+ if (unlikely(!map)) /* shared interrupt, most likely */
return IRQ_NONE;
spin_lock(&q0->lock);
if (unlikely(map & F_ERRINTR))
t3_slow_intr_handler(adap);
- if (likely(map & 1))
- process_responses_gts(adap, q0);
-
- if (map & 2)
- process_responses_gts(adap, &adap->sge.qs[1].rspq);
+ for_each_port(adap, i)
+ if (map & (1 << i))
+ process_responses_gts(adap, &adap->sge.qs[i].rspq);
spin_unlock(&q0->lock);
return IRQ_HANDLED;
* response queue per port in this mode and protect all response queues with
* queue 0's lock.
*/
-static irqreturn_t t3b_intr_napi(int irq, void *cookie, struct pt_regs *regs)
+DECLARE_INTR_HANDLER(t3b_intr_napi, irq, cookie, regs)
{
- u32 map;
- struct net_device *dev;
- struct adapter *adap = cookie;
+ u32 i, map;
+ adapter_t *adap = cookie;
struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
t3_write_reg(adap, A_PL_CLI, 0);
map = t3_read_reg(adap, A_SG_DATA_INTR);
- if (unlikely(!map)) /* shared interrupt, most likely */
+ if (unlikely(!map)) /* shared interrupt, most likely */
return IRQ_NONE;
spin_lock(&q0->lock);
if (unlikely(map & F_ERRINTR))
t3_slow_intr_handler(adap);
- if (likely(map & 1)) {
- dev = adap->sge.qs[0].netdev;
-
- if (likely(__netif_rx_schedule_prep(dev)))
- __netif_rx_schedule(dev);
- }
- if (map & 2) {
- dev = adap->sge.qs[1].netdev;
+ for_each_port(adap, i)
+ if (map & (1 << i)) {
+ struct net_device *dev = adap->sge.qs[i].netdev;
- if (likely(__netif_rx_schedule_prep(dev)))
- __netif_rx_schedule(dev);
- }
+ if (likely(__netif_rx_schedule_prep(dev)))
+ __netif_rx_schedule(dev);
+ }
spin_unlock(&q0->lock);
return IRQ_HANDLED;
* (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
* response queues.
*/
-intr_handler_t t3_intr_handler(struct adapter *adap, int polling, struct pt_regs *ptregs)
+intr_handler_t t3_intr_handler(adapter_t *adap, int polling)
{
if (adap->flags & USING_MSIX)
return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
*
* Interrupt handler for SGE asynchronous (non-data) events.
*/
-void t3_sge_err_intr_handler(struct adapter *adapter)
+void t3_sge_err_intr_handler(adapter_t *adapter)
{
unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
if (status & SGE_FRAMINGERR)
CH_ALERT(adapter, "SGE framing error (0x%x)\n",
status & SGE_FRAMINGERR);
-
if (status & F_RSPQCREDITOVERFOW)
CH_ALERT(adapter, "SGE response queue credit overflow\n");
v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
CH_ALERT(adapter,
- "packet delivered to disabled response queue "
- "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+ "packet delivered to disabled response queue (0x%x)\n",
+ (v >> S_RSPQ0DISABLED) & 0xff);
}
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
status & F_HIPIODRBDROPERR ? "high" : "lo");
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
- if (status & SGE_FATALERR)
+ if (status & SGE_FATALERR)
t3_fatal_err(adapter);
}
+/* Update offload traffic scheduler for a particular port */
+static void update_max_bw(struct sge_qset *qs, struct port_info *pi)
+{
+ struct sge_txq *q = &qs->txq[TXQ_ETH];
+ int max_bw, update_bw;
+
+ if (!netif_carrier_ok(qs->netdev))
+ return;
+
+ if ((q->cntxt_id - FW_TUNNEL_SGEEC_START) != pi->first_qset)
+ return;
+
+ max_bw = pi->link_config.speed * 940;
+
+ /* use q->in_use as an indicator of ongoing NIC traffic */
+ update_bw = ((q->in_use && pi->max_ofld_bw == max_bw) ||
+ (!q->in_use && pi->max_ofld_bw < max_bw));
+
+ if (update_bw) {
+ pi->max_ofld_bw = q->in_use ?
+ pi->link_config.speed * 470 :
+ pi->link_config.speed * 940;
+ t3_config_sched(pi->adapter, pi->max_ofld_bw, pi->port_id);
+#ifdef T3_TRACE
+ T3_TRACE3(pi->adapter->tb[q->cntxt_id & 7],
+ "%s: updating max bw to %d for port %d",
+ __func__, pi->max_ofld_bw, pi->port_id);
+#endif
+ }
+}
+
/**
* sge_timer_cb - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
* when out of memory a queue can become empty. We try to add only a few
* buffers here, the queue will be replenished fully as these new buffers
* are used up if memory shortage has subsided.
+ *
+ * c) Return coalesced response queue credits in case a response queue is
+ * starved.
+ *
+ * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
+ * fifo overflows and the FW doesn't implement any recovery scheme yet.
*/
static void sge_timer_cb(unsigned long data)
{
spinlock_t *lock;
+ unsigned long flags;
struct sge_qset *qs = (struct sge_qset *)data;
- const struct port_info *pi = netdev_priv(qs->netdev);
+ struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
+ /*
+ * Disable interrupts which we deal with sensitive structures ...
+ */
+ local_irq_save(flags);
+
+
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
+ struct sge_txq *q = &qs->txq[TXQ_ETH];
+
+ if (q->eth_coalesce_idx)
+ ship_tx_pkt_coalesce_wr(adap, q);
+
reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
spin_unlock(&qs->txq[TXQ_ETH].lock);
}
spin_unlock(&qs->txq[TXQ_OFLD].lock);
}
lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
- &adap->sge.qs[0].rspq.lock;
- if (spin_trylock_irq(lock)) {
+ &adap->sge.qs[0].rspq.lock;
+ if (spin_trylock(lock)) {
if (!napi_is_scheduled(qs->netdev)) {
u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
}
}
}
- spin_unlock_irq(lock);
+ spin_unlock(lock);
+ }
+
+ if (adap->params.nports > 2)
+ update_max_bw(qs, pi);
+
+ local_irq_restore(flags);
+
+ if (adap->params.nports > 2) {
+ int i;
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ const struct port_info *pi = netdev_priv(dev);
+
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX |
+ (FW_TUNNEL_SGEEC_START + pi->first_qset));
+ }
}
+
mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
}
if (!qs->netdev)
return;
- qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+ qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U); // can't be 0
qs->rspq.polling = p->polling;
qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
}
* Tx queues. The Tx queues are assigned roles in the order Ethernet
* queue, offload queue, and control queue.
*/
-int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
- int irq_vec_idx, const struct qset_params *p,
+int t3_sge_alloc_qset(adapter_t *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *netdev)
{
- int i, ret = -ENOMEM;
+ int i, avail, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
init_qset_cntxt(q, id);
sizeof(struct rx_desc),
sizeof(struct rx_sw_desc),
&q->fl[0].phys_addr, &q->fl[0].sdesc);
- if (!q->fl[0].desc)
+ if (!q->fl[0].desc && p->fl_size)
goto err;
q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
sizeof(struct rx_desc),
sizeof(struct rx_sw_desc),
&q->fl[1].phys_addr, &q->fl[1].sdesc);
- if (!q->fl[1].desc)
+ if (!q->fl[1].desc && p->jumbo_size)
goto err;
q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
sizeof(struct rsp_desc), 0,
&q->rspq.phys_addr, NULL);
- if (!q->rspq.desc)
+ if (!q->rspq.desc && p->rspq_size)
goto err;
for (i = 0; i < ntxq; ++i) {
if (!q->txq[i].desc)
goto err;
+ if (i == TXQ_ETH) {
+ q->txq[TXQ_ETH].eth_coalesce_sdesc =
+ kcalloc(p->txq_size[TXQ_ETH],
+ sizeof (struct eth_coalesce_sw_desc),
+ GFP_KERNEL);
+ if (!q->txq[TXQ_ETH].eth_coalesce_sdesc)
+ goto err;
+ }
+
q->txq[i].gen = 1;
q->txq[i].size = p->txq_size[i];
spin_lock_init(&q->txq[i].lock);
spin_lock_init(&q->rspq.lock);
q->txq[TXQ_ETH].stop_thres = nports *
- flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+ flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
#if FL0_PG_CHUNK_SIZE > 0
q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
spin_unlock(&adapter->sge.reg_lock);
q->netdev = netdev;
t3_update_qset_coalesce(q, p);
+ q->lro.enabled = p->lro;
/*
* We use atalk_ptr as a backpointer to a qset. In case a device is
if (netdev->atalk_ptr == NULL)
netdev->atalk_ptr = q;
- refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
- refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+ avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
+ if (!avail) {
+ CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ goto err;
+ }
+ if (avail < q->fl[0].size)
+ CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+ avail);
+ avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+ if (avail < q->fl[1].size)
+ CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+ avail);
refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
return 0;
- err_unlock:
+err_unlock:
spin_unlock(&adapter->sge.reg_lock);
- err:
+err:
t3_free_qset(adapter, q);
return ret;
}
*
* Frees resources used by the SGE queue sets.
*/
-void t3_free_sge_resources(struct adapter *adap)
+void t3_free_sge_resources(adapter_t *adap)
{
int i;
* Enables the SGE for DMAs. This is the last step in starting packet
* transfers.
*/
-void t3_sge_start(struct adapter *adap)
+void t3_sge_start(adapter_t *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
}
* later from process context, at which time the tasklets will be stopped
* if they are still running.
*/
-void t3_sge_stop(struct adapter *adap)
+void t3_sge_stop(adapter_t *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
if (!in_interrupt()) {
* top-level must request those individually. We also do not enable DMA
* here, that should be done after the queues have been set up.
*/
-void t3_sge_init(struct adapter *adap, struct sge_params *p)
+void t3_sge_init(adapter_t *adap, struct sge_params *p)
{
unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
- F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
- V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
- V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
+ V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+ V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
#if SGE_NUM_GENBITS == 1
ctrl |= F_EGRGENCTRL;
#endif
* defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE.
*/
-void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+void __devinit t3_sge_prep(adapter_t *adap, struct sge_params *p)
{
int i;
p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
for (i = 0; i < SGE_QSETS; ++i) {
struct qset_params *q = p->qset + i;
+ if (adap->params.nports > 2)
+ q->coalesce_usecs = 50;
+ else
+ q->coalesce_usecs = 5;
+
q->polling = adap->params.rev > 0;
- q->coalesce_usecs = 5;
+ q->lro = 1;
q->rspq_size = 1024;
q->fl_size = 1024;
q->jumbo_size = 512;
#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
-#endif /* _SGE_DEFS_H */
+#endif /* _SGE_DEFS_H */
/*
- * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
+ * Definitions of the CPL 5 commands and status codes.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2004-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
+ * Written by Dimitris Michailidis (dm@chelsio.com)
*
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef T3_CPL_H
#define T3_CPL_H
#endif
enum CPL_opcode {
- CPL_PASS_OPEN_REQ = 0x1,
- CPL_PASS_ACCEPT_RPL = 0x2,
- CPL_ACT_OPEN_REQ = 0x3,
- CPL_SET_TCB = 0x4,
- CPL_SET_TCB_FIELD = 0x5,
- CPL_GET_TCB = 0x6,
- CPL_PCMD = 0x7,
- CPL_CLOSE_CON_REQ = 0x8,
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB = 0x4,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_PCMD = 0x7,
+ CPL_CLOSE_CON_REQ = 0x8,
CPL_CLOSE_LISTSRV_REQ = 0x9,
- CPL_ABORT_REQ = 0xA,
- CPL_ABORT_RPL = 0xB,
- CPL_TX_DATA = 0xC,
- CPL_RX_DATA_ACK = 0xD,
- CPL_TX_PKT = 0xE,
- CPL_RTE_DELETE_REQ = 0xF,
- CPL_RTE_WRITE_REQ = 0x10,
- CPL_RTE_READ_REQ = 0x11,
- CPL_L2T_WRITE_REQ = 0x12,
- CPL_L2T_READ_REQ = 0x13,
- CPL_SMT_WRITE_REQ = 0x14,
- CPL_SMT_READ_REQ = 0x15,
- CPL_TX_PKT_LSO = 0x16,
- CPL_PCMD_READ = 0x17,
- CPL_BARRIER = 0x18,
- CPL_TID_RELEASE = 0x1A,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_RTE_DELETE_REQ = 0xF,
+ CPL_RTE_WRITE_REQ = 0x10,
+ CPL_RTE_READ_REQ = 0x11,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_L2T_READ_REQ = 0x13,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_SMT_READ_REQ = 0x15,
+ CPL_TX_PKT_LSO = 0x16,
+ CPL_PCMD_READ = 0x17,
+ CPL_BARRIER = 0x18,
+ CPL_TID_RELEASE = 0x1A,
CPL_CLOSE_LISTSRV_RPL = 0x20,
- CPL_ERROR = 0x21,
- CPL_GET_TCB_RPL = 0x22,
- CPL_L2T_WRITE_RPL = 0x23,
- CPL_PCMD_READ_RPL = 0x24,
- CPL_PCMD_RPL = 0x25,
- CPL_PEER_CLOSE = 0x26,
- CPL_RTE_DELETE_RPL = 0x27,
- CPL_RTE_WRITE_RPL = 0x28,
- CPL_RX_DDP_COMPLETE = 0x29,
- CPL_RX_PHYS_ADDR = 0x2A,
- CPL_RX_PKT = 0x2B,
- CPL_RX_URG_NOTIFY = 0x2C,
- CPL_SET_TCB_RPL = 0x2D,
- CPL_SMT_WRITE_RPL = 0x2E,
- CPL_TX_DATA_ACK = 0x2F,
-
- CPL_ABORT_REQ_RSS = 0x30,
- CPL_ABORT_RPL_RSS = 0x31,
- CPL_CLOSE_CON_RPL = 0x32,
- CPL_ISCSI_HDR = 0x33,
- CPL_L2T_READ_RPL = 0x34,
- CPL_RDMA_CQE = 0x35,
+ CPL_ERROR = 0x21,
+ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PCMD_READ_RPL = 0x24,
+ CPL_PCMD_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_RTE_DELETE_RPL = 0x27,
+ CPL_RTE_WRITE_RPL = 0x28,
+ CPL_RX_DDP_COMPLETE = 0x29,
+ CPL_RX_PHYS_ADDR = 0x2A,
+ CPL_RX_PKT = 0x2B,
+ CPL_RX_URG_NOTIFY = 0x2C,
+ CPL_SET_TCB_RPL = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_TX_DATA_ACK = 0x2F,
+
+ CPL_ABORT_REQ_RSS = 0x30,
+ CPL_ABORT_RPL_RSS = 0x31,
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_L2T_READ_RPL = 0x34,
+ CPL_RDMA_CQE = 0x35,
CPL_RDMA_CQE_READ_RSP = 0x36,
- CPL_RDMA_CQE_ERR = 0x37,
- CPL_RTE_READ_RPL = 0x38,
- CPL_RX_DATA = 0x39,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RTE_READ_RPL = 0x38,
+ CPL_RX_DATA = 0x39,
- CPL_ACT_OPEN_RPL = 0x40,
- CPL_PASS_OPEN_RPL = 0x41,
- CPL_RX_DATA_DDP = 0x42,
- CPL_SMT_READ_RPL = 0x43,
+ CPL_ACT_OPEN_RPL = 0x40,
+ CPL_PASS_OPEN_RPL = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_SMT_READ_RPL = 0x43,
- CPL_ACT_ESTABLISH = 0x50,
- CPL_PASS_ESTABLISH = 0x51,
+ CPL_ACT_ESTABLISH = 0x50,
+ CPL_PASS_ESTABLISH = 0x51,
- CPL_PASS_ACCEPT_REQ = 0x70,
+ CPL_PASS_ACCEPT_REQ = 0x70,
- CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
+ CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
- CPL_TX_DMA_ACK = 0xA0,
- CPL_RDMA_READ_REQ = 0xA1,
- CPL_RDMA_TERMINATE = 0xA2,
- CPL_TRACE_PKT = 0xA3,
- CPL_RDMA_EC_STATUS = 0xA5,
+ CPL_TX_DMA_ACK = 0xA0,
+ CPL_RDMA_READ_REQ = 0xA1,
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_TRACE_PKT = 0xA3,
+ CPL_RDMA_EC_STATUS = 0xA5,
+ CPL_SGE_EC_CR_RETURN = 0xA6,
- NUM_CPL_CMDS /* must be last and previous entries must be sorted */
+ NUM_CPL_CMDS /* must be last and previous entries must be sorted */
};
enum CPL_error {
- CPL_ERR_NONE = 0,
- CPL_ERR_TCAM_PARITY = 1,
- CPL_ERR_TCAM_FULL = 3,
- CPL_ERR_CONN_RESET = 20,
- CPL_ERR_CONN_EXIST = 22,
- CPL_ERR_ARP_MISS = 23,
- CPL_ERR_BAD_SYN = 24,
- CPL_ERR_CONN_TIMEDOUT = 30,
- CPL_ERR_XMIT_TIMEDOUT = 31,
- CPL_ERR_PERSIST_TIMEDOUT = 32,
- CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
- CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
- CPL_ERR_ABORT_FAILED = 42,
- CPL_ERR_GENERAL = 99
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
};
enum {
CPL_CONN_POLICY_AUTO = 0,
- CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_FILTER = 2,
CPL_CONN_POLICY_DENY = 3
};
enum {
- ULP_MODE_NONE = 0,
- ULP_MODE_ISCSI = 2,
- ULP_MODE_RDMA = 4,
- ULP_MODE_TCPDDP = 5
+ ULP_MODE_NONE = 0,
+ ULP_MODE_TCP_DDP = 1,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5
};
enum {
ULP_CRC_HEADER = 1 << 0,
- ULP_CRC_DATA = 1 << 1
+ ULP_CRC_DATA = 1 << 1
};
enum {
CPL_PASS_OPEN_ACCEPT,
- CPL_PASS_OPEN_REJECT
+ CPL_PASS_OPEN_REJECT,
+ CPL_PASS_OPEN_ACCEPT_TNL
};
enum {
CPL_ABORT_POST_CLOSE_REQ = 2
};
-enum { /* TX_PKT_LSO ethernet types */
+enum { /* TX_PKT_LSO ethernet types */
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
-enum { /* TCP congestion control algorithms */
+enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
CONG_ALG_TAHOE,
CONG_ALG_NEWRENO,
CONG_ALG_HIGHSPEED
};
+enum { /* RSS hash type */
+ RSS_HASH_NONE = 0,
+ RSS_HASH_2_TUPLE = 1,
+ RSS_HASH_4_TUPLE = 2,
+ RSS_HASH_TCPV6 = 3
+};
+
union opcode_tid {
__be32 opcode_tid;
__u8 opcode;
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
#define G_TID(x) ((x) & 0xFFFFFF)
+#define S_HASHTYPE 22
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+#define S_QNUM 0
+#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+
/* tid is assumed to be 24-bits */
#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
__be16 mss;
__u8 wsf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:5;
+ __u8 :5;
__u8 ecn:1;
__u8 sack:1;
__u8 tstamp:1;
__u8 tstamp:1;
__u8 sack:1;
__u8 ecn:1;
- __u8:5;
+ __u8 :5;
#endif
};
#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+/* Applicable to BYPASS WRs only: the uP will added a CPL_BARRIER before
+ * and after the BYPASS WR if the ATOMIC bit is set.
+ */
+#define S_WR_ATOMIC 16
+#define V_WR_ATOMIC(x) ((x) << S_WR_ATOMIC)
+#define F_WR_ATOMIC V_WR_ATOMIC(1U)
+
+/* Applicable to BYPASS WRs only: the uP will flush buffered non abort
+ * related WRs.
+ */
+#define S_WR_FLUSH 17
+#define V_WR_FLUSH(x) ((x) << S_WR_FLUSH)
+#define F_WR_FLUSH V_WR_FLUSH(1U)
+
#define S_WR_DATATYPE 20
#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
#define F_WR_DATATYPE V_WR_DATATYPE(1U)
#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+#define S_OPT1_VLAN 6
+#define M_OPT1_VLAN 0xFFF
+#define V_OPT1_VLAN(x) ((x) << S_OPT1_VLAN)
+#define G_OPT1_VLAN(x) (((x) >> S_OPT1_VLAN) & M_OPT1_VLAN)
+
#define S_MAC_MATCH_VALID 18
#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
};
struct cpl_pass_open_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
};
struct cpl_pass_establish {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
struct cpl_pass_accept_req {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 tos_tid;
struct tcp_options tcp_options;
- __u8 dst_mac[6];
+ __u8 dst_mac[6];
__be16 vlan_tag;
- __u8 src_mac[6];
+ __u8 src_mac[6];
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:3;
- __u8 addr_idx:3;
- __u8 port_idx:1;
- __u8 exact_match:1;
+ __u8 :3;
+ __u8 addr_idx:3;
+ __u8 port_idx:1;
+ __u8 exact_match:1;
#else
- __u8 exact_match:1;
- __u8 port_idx:1;
- __u8 addr_idx:3;
- __u8:3;
+ __u8 exact_match:1;
+ __u8 port_idx:1;
+ __u8 addr_idx:3;
+ __u8 :3;
#endif
- __u8 rsvd;
+ __u8 rsvd;
__be32 rcv_isn;
__be32 rsvd2;
};
#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
struct cpl_act_open_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
__be32 peer_ip;
__be32 atid;
- __u8 rsvd[3];
- __u8 status;
+ __u8 rsvd[3];
+ __u8 status;
};
struct cpl_act_establish {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be32 local_ip;
};
struct cpl_get_tcb_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 rsvd;
__u8 status;
__be16 len;
struct cpl_set_tcb {
WR_HDR;
union opcode_tid ot;
- __u8 reply;
- __u8 cpu_idx;
+ __u8 reply;
+ __u8 cpu_idx;
__be16 len;
};
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
- __u8 reply;
- __u8 cpu_idx;
+ __u8 reply;
+ __u8 cpu_idx;
__be16 word;
__be64 mask;
__be64 val;
};
struct cpl_set_tcb_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
__u8 src:1;
__u8 bundle:1;
__u8 channel:1;
- __u8:5;
+ __u8 :5;
#else
- __u8:5;
+ __u8 :5;
__u8 channel:1;
__u8 bundle:1;
__u8 src:1;
};
struct cpl_pcmd_reply {
- RSS_HDR union opcode_tid ot;
- __u8 status;
- __u8 rsvd;
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd;
__be16 len;
};
};
struct cpl_close_con_rpl {
- RSS_HDR union opcode_tid ot;
- __u8 rsvd[3];
- __u8 status;
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
__be32 snd_nxt;
__be32 rcv_nxt;
};
struct cpl_close_listserv_req {
WR_HDR;
union opcode_tid ot;
- __u8 rsvd0;
- __u8 cpu_idx;
+ __u8 rsvd0;
+ __u8 cpu_idx;
__be16 rsvd1;
};
struct cpl_close_listserv_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
struct cpl_abort_req_rss {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 rsvd0;
- __u8 rsvd1;
- __u8 status;
- __u8 rsvd2[6];
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
};
struct cpl_abort_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
- __u8 rsvd1;
- __u8 cmd;
- __u8 rsvd2[6];
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
};
struct cpl_abort_rpl_rss {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 rsvd0;
- __u8 rsvd1;
- __u8 status;
- __u8 rsvd2[6];
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
};
struct cpl_abort_rpl {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
- __u8 rsvd1;
- __u8 cmd;
- __u8 rsvd2[6];
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
};
struct cpl_peer_close {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 rcv_nxt;
};
__be32 param;
};
+/* tx_data_wr.flags fields */
+#define S_TX_ACK_PAGES 21
+#define M_TX_ACK_PAGES 0x7
+#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
/* tx_data_wr.param fields */
#define S_TX_PORT 0
#define M_TX_PORT 0x7
#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
struct cpl_tx_data_ack {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 ack_seq;
};
struct cpl_wr_ack {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 credits;
__be16 rsvd;
__be32 snd_nxt;
__be32 snd_una;
};
+struct cpl_sge_ec_cr_return {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 sge_ec_id;
+ __u8 cr;
+ __u8 rsvd;
+};
+
struct cpl_rdma_ec_status {
- RSS_HDR union opcode_tid ot;
- __u8 rsvd[3];
- __u8 status;
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
};
struct mngt_pktsched_wr {
__be32 wr_hi;
__be32 wr_lo;
- __u8 mngt_opcode;
- __u8 rsvd[7];
- __u8 sched;
- __u8 idx;
- __u8 min;
- __u8 max;
- __u8 binding;
- __u8 rsvd1[3];
+ __u8 mngt_opcode;
+ __u8 rsvd[7];
+ __u8 sched;
+ __u8 idx;
+ __u8 min;
+ __u8 max;
+ __u8 binding;
+ __u8 rsvd1[3];
};
struct cpl_iscsi_hdr {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 pdu_len_ddp;
__be16 len;
__be32 seq;
__be16 urg;
- __u8 rsvd;
- __u8 status;
+ __u8 rsvd;
+ __u8 status;
};
/* cpl_iscsi_hdr.pdu_len_ddp fields */
#define F_ISCSI_DDP V_ISCSI_DDP(1U)
struct cpl_rx_data {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 rsvd;
__be16 len;
__be32 seq;
__be16 urg;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 dack_mode:2;
- __u8 psh:1;
- __u8 heartbeat:1;
- __u8:4;
+ __u8 dack_mode:2;
+ __u8 psh:1;
+ __u8 heartbeat:1;
+ __u8 :4;
#else
- __u8:4;
- __u8 heartbeat:1;
- __u8 psh:1;
- __u8 dack_mode:2;
+ __u8 :4;
+ __u8 heartbeat:1;
+ __u8 psh:1;
+ __u8 dack_mode:2;
#endif
- __u8 status;
+ __u8 status;
};
struct cpl_rx_data_ack {
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
struct cpl_rx_urg_notify {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 seq;
};
struct cpl_rx_ddp_complete {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 ddp_report;
};
struct cpl_rx_data_ddp {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be16 urg;
__be16 len;
__be32 seq;
#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+#define S_DDP_DACK_MODE 22
+#define M_DDP_DACK_MODE 0x3
+#define V_DDP_DACK_MODE(x) ((x) << S_DDP_DACK_MODE)
+#define G_DDP_DACK_MODE(x) (((x) >> S_DDP_DACK_MODE) & M_DDP_DACK_MODE)
+
#define S_DDP_URG 24
#define V_DDP_URG(x) ((x) << S_DDP_URG)
#define F_DDP_URG V_DDP_URG(1U)
__be32 len;
};
+struct cpl_tx_pkt_coalesce {
+ __be32 cntrl;
+ __be32 len;
+ __be64 addr;
+};
+
+struct tx_pkt_coalesce_wr {
+ WR_HDR;
+ struct cpl_tx_pkt_coalesce cpl[0];
+};
+
struct cpl_tx_pkt_lso {
WR_HDR;
__be32 cntrl;
__u8 rss_opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 err:1;
- __u8:7;
+ __u8 :7;
#else
- __u8:7;
+ __u8 :7;
__u8 err:1;
#endif
__u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 qid:4;
- __u8:4;
+ __u8 :4;
#else
- __u8:4;
+ __u8 :4;
__u8 qid:4;
#endif
__be32 tstamp;
-#endif /* CHELSIO_FW */
+#endif /* CHELSIO_FW */
- __u8 opcode;
+ __u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 iff:4;
- __u8:4;
+ __u8 iff:4;
+ __u8 :4;
#else
- __u8:4;
- __u8 iff:4;
+ __u8 :4;
+ __u8 iff:4;
#endif
- __u8 rsvd[4];
+ __u8 rsvd[4];
__be16 len;
};
struct cpl_rx_pkt {
- RSS_HDR __u8 opcode;
+ RSS_HDR
+ __u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 iff:4;
__u8 csum_valid:1;
WR_HDR;
union opcode_tid ot;
__be32 params;
- __u8 rsvd[2];
- __u8 dst_mac[6];
+ __u8 rsvd;
+ __u8 port_idx;
+ __u8 dst_mac[6];
};
/* cpl_l2t_write_req.params fields */
#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
struct cpl_l2t_write_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
};
struct cpl_l2t_read_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__be32 params;
__u8 rsvd[2];
__u8 dst_mac[6];
#endif
__be16 rsvd2;
__be16 rsvd3;
- __u8 src_mac1[6];
+ __u8 src_mac1[6];
__be16 rsvd4;
- __u8 src_mac0[6];
+ __u8 src_mac0[6];
};
struct cpl_smt_write_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
union opcode_tid ot;
__u8 rsvd0;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:4;
+ __u8 :4;
__u8 iff:4;
#else
__u8 iff:4;
- __u8:4;
+ __u8 :4;
#endif
__be16 rsvd2;
};
struct cpl_smt_read_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 mtu_idx:4;
- __u8:4;
+ __u8 :4;
#else
- __u8:4;
+ __u8 :4;
__u8 mtu_idx:4;
#endif
__be16 rsvd2;
__be16 rsvd3;
- __u8 src_mac1[6];
+ __u8 src_mac1[6];
__be16 rsvd4;
- __u8 src_mac0[6];
+ __u8 src_mac0[6];
};
struct cpl_rte_delete_req {
#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
struct cpl_rte_delete_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
WR_HDR;
union opcode_tid ot;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:6;
+ __u8 :6;
__u8 write_tcam:1;
__u8 write_l2t_lut:1;
#else
__u8 write_l2t_lut:1;
__u8 write_tcam:1;
- __u8:6;
+ __u8 :6;
#endif
__u8 rsvd[3];
__be32 lut_params;
#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
struct cpl_rte_write_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
__u8 rsvd[3];
};
};
struct cpl_rte_read_rpl {
- RSS_HDR union opcode_tid ot;
+ RSS_HDR
+ union opcode_tid ot;
__u8 status;
__u8 rsvd0;
__be16 l2t_idx;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:7;
+ __u8 :7;
__u8 select:1;
#else
__u8 select:1;
- __u8:7;
+ __u8 :7;
#endif
__u8 rsvd2[3];
__be32 addr;
__u8 rsvd[2];
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 rspq:3;
- __u8:5;
+ __u8 :5;
#else
- __u8:5;
+ __u8 :5;
__u8 rspq:3;
#endif
__be32 tid_len;
#endif
__be32 msn;
__be32 mo;
- __u8 data[0];
+ __u8 data[0];
};
/* cpl_rdma_terminate.tid_len fields */
#define M_TERM_TID 0xFFFFF
#define V_TERM_TID(x) ((x) << S_TERM_TID)
#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
-#endif /* T3_CPL_H */
+
+/* ULP_TX opcodes */
+enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
+
+#define S_ULPTX_CMD 28
+#define M_ULPTX_CMD 0xF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULPTX_NFLITS 0
+#define M_ULPTX_NFLITS 0xFF
+#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd_lock_addr;
+ __be32 len;
+};
+
+/* ulp_mem_io.cmd_lock_addr fields */
+#define S_ULP_MEMIO_ADDR 0
+#define M_ULP_MEMIO_ADDR 0x7FFFFFF
+#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
+
+#define S_ULP_MEMIO_LOCK 27
+#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
+#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.len fields */
+#define S_ULP_MEMIO_DATA_LEN 28
+#define M_ULP_MEMIO_DATA_LEN 0xF
+#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DEST 24
+#define M_ULP_TXPKT_DEST 0xF
+#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#endif /* T3_CPL_H */
/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
* Wait until an operation is completed by checking a bit in a register
* up to @attempts times. If @valp is not NULL the value of the register
* at the time it indicated completion is stored there. Returns 0 if the
- * operation completes and -EAGAIN otherwise.
+ * operation completes and -EAGAIN otherwise.
*/
-
-int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp)
+int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t3_read_reg(adapter, reg);
* value to the corresponding register. Register addresses are adjusted
* by the supplied offset.
*/
-void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
- int n, unsigned int offset)
+void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
+ unsigned int offset)
{
while (n--) {
t3_write_reg(adapter, p->reg_addr + offset, p->val);
* Sets a register field specified by the supplied mask to the
* given value.
*/
-void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
- u32 val)
+void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
{
u32 v = t3_read_reg(adapter, addr) & ~mask;
t3_write_reg(adapter, addr, v | val);
- t3_read_reg(adapter, addr); /* flush */
+ (void) t3_read_reg(adapter, addr); /* flush */
}
/**
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals,
- unsigned int nregs, unsigned int start_idx)
+void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
{
while (nregs--) {
t3_write_reg(adap, addr_reg, start_idx);
* accesses.
*/
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
- u64 *buf)
+ u64 *buf)
{
- static const int shift[] = { 0, 0, 16, 24 };
- static const int step[] = { 0, 32, 16, 8 };
+ static int shift[] = { 0, 0, 16, 24 };
+ static int step[] = { 0, 32, 16, 8 };
- unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
- struct adapter *adap = mc7->adapter;
+ unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
+ adapter_t *adap = mc7->adapter;
if (start >= size64 || start + n > size64)
return -EINVAL;
int attempts = 10;
u32 val;
- t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
+ start);
t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
while ((val & F_BUSY) && attempts--)
val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
if (mc7->width == 0) {
val64 = t3_read_reg(adap,
- mc7->offset +
- A_MC7_BD_DATA0);
- val64 |= (u64) val << 32;
+ mc7->offset + A_MC7_BD_DATA0);
+ val64 |= (u64)val << 32;
} else {
if (mc7->width > 1)
val >>= shift[mc7->width];
- val64 |= (u64) val << (step[mc7->width] * i);
+ val64 |= (u64)val << (step[mc7->width] * i);
}
start += 8;
}
/*
* Initialize MI1.
*/
-static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
{
- u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
- u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
- V_CLKDIV(clkdiv);
+ u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+ u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
+ V_CLKDIV(clkdiv);
if (!(ai->caps & SUPPORTED_10000baseT_Full))
val |= V_ST(1);
- t3_write_reg(adap, A_MI1_CFG, val);
+ t3_write_reg(adap, A_MI1_CFG, val);
}
-#define MDIO_ATTEMPTS 10
+#define MDIO_ATTEMPTS 20
/*
* MI1 read/write operations for direct-addressed PHYs.
*/
-static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *valp)
{
int ret;
if (mmd_addr)
return -EINVAL;
- mutex_lock(&adapter->mdio_lock);
+ MDIO_LOCK(adapter);
t3_write_reg(adapter, A_MI1_ADDR, addr);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
- ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
if (!ret)
*valp = t3_read_reg(adapter, A_MI1_DATA);
- mutex_unlock(&adapter->mdio_lock);
+ MDIO_UNLOCK(adapter);
return ret;
}
-static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val)
{
int ret;
if (mmd_addr)
return -EINVAL;
- mutex_lock(&adapter->mdio_lock);
+ MDIO_LOCK(adapter);
t3_write_reg(adapter, A_MI1_ADDR, addr);
t3_write_reg(adapter, A_MI1_DATA, val);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
- ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
- mutex_unlock(&adapter->mdio_lock);
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ MDIO_UNLOCK(adapter);
return ret;
}
-static const struct mdio_ops mi1_mdio_ops = {
+static struct mdio_ops mi1_mdio_ops = {
mi1_read,
mi1_write
};
/*
* MI1 read/write operations for indirect-addressed PHYs.
*/
-static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *valp)
{
int ret;
u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
- mutex_lock(&adapter->mdio_lock);
+ MDIO_LOCK(adapter);
t3_write_reg(adapter, A_MI1_ADDR, addr);
t3_write_reg(adapter, A_MI1_DATA, reg_addr);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
- ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
if (!ret) {
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
- MDIO_ATTEMPTS, 20);
+ MDIO_ATTEMPTS, 10);
if (!ret)
*valp = t3_read_reg(adapter, A_MI1_DATA);
}
- mutex_unlock(&adapter->mdio_lock);
+ MDIO_UNLOCK(adapter);
return ret;
}
-static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val)
{
int ret;
u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
- mutex_lock(&adapter->mdio_lock);
+ MDIO_LOCK(adapter);
t3_write_reg(adapter, A_MI1_ADDR, addr);
t3_write_reg(adapter, A_MI1_DATA, reg_addr);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
- ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
if (!ret) {
t3_write_reg(adapter, A_MI1_DATA, val);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
- MDIO_ATTEMPTS, 20);
+ MDIO_ATTEMPTS, 10);
}
- mutex_unlock(&adapter->mdio_lock);
+ MDIO_UNLOCK(adapter);
return ret;
}
-static const struct mdio_ops mi1_mdio_ext_ops = {
+static struct mdio_ops mi1_mdio_ext_ops = {
mi1_ext_read,
mi1_ext_write
};
return mdio_write(phy, 0, MII_ADVERTISE, val);
}
+/**
+ * t3_phy_advertise_fiber - set fiber PHY advertisement register
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a fiber PHY's advertisement register to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
+{
+ unsigned int val = 0;
+
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000XHALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000XFULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_1000XPAUSE;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_1000XPSE_ASYM;
+ return mdio_write(phy, 0, MII_ADVERTISE, val);
+}
+
/**
* t3_set_phy_speed_duplex - force PHY speed and duplex
* @phy: the PHY to operate on
if (duplex == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
}
- if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
ctl |= BMCR_ANENABLE;
return mdio_write(phy, 0, MII_BMCR, ctl);
}
-static const struct adapter_info t3_adap_info[] = {
- {2, 0, 0, 0,
- F_GPIO2_OEN | F_GPIO4_OEN |
- F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
- 0,
- &mi1_mdio_ops, "Chelsio PE9000"},
- {2, 0, 0, 0,
- F_GPIO2_OEN | F_GPIO4_OEN |
- F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
- 0,
- &mi1_mdio_ops, "Chelsio T302"},
- {1, 0, 0, 0,
- F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
- F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
- 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
- &mi1_mdio_ext_ops, "Chelsio T310"},
- {2, 0, 0, 0,
- F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
- F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
- F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
- SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
- &mi1_mdio_ext_ops, "Chelsio T320"},
+static struct adapter_info t3_adap_info[] = {
+ { 1, 1, 0, 0, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+ 0,
+ &mi1_mdio_ops, "Chelsio PE9000" },
+ { 1, 1, 0, 0, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+ 0,
+ &mi1_mdio_ops, "Chelsio T302" },
+ { 1, 0, 0, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+ F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T310" },
+ { 1, 1, 0, 0, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+ F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+ F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T320" },
+ { 4, 0, 0, 0, 0,
+ F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
+ F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
+ F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
+ &mi1_mdio_ops, "Chelsio T304" },
};
/*
return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
}
-#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
- SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
-#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
-
-static const struct port_type_info port_types[] = {
- {NULL},
- {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
- "10GBASE-XR"},
- {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
- "10/100/1000BASE-T"},
- {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
- "10/100/1000BASE-T"},
- {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
- {NULL, CAPS_10G, "10GBASE-KX4"},
- {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
- {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
- "10GBASE-SR"},
- {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+static struct port_type_info port_types[] = {
+ { NULL },
+ { t3_ael1002_phy_prep },
+ { t3_vsc8211_phy_prep },
+ { t3_mv88e1xxx_phy_prep },
+ { t3_xaui_direct_phy_prep },
+ { NULL },
+ { t3_qt2045_phy_prep },
+ { t3_ael1006_phy_prep },
+ { NULL },
};
-#undef CAPS_1G
-#undef CAPS_10G
-
#define VPD_ENTRY(name, len) \
u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
* VPD-R sections.
*/
struct t3_vpd {
- u8 id_tag;
- u8 id_len[2];
- u8 id_data[16];
- u8 vpdr_tag;
- u8 vpdr_len[2];
- VPD_ENTRY(pn, 16); /* part number */
- VPD_ENTRY(ec, 16); /* EC level */
- VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
- VPD_ENTRY(na, 12); /* MAC address base */
- VPD_ENTRY(cclk, 6); /* core clock */
- VPD_ENTRY(mclk, 6); /* mem clock */
- VPD_ENTRY(uclk, 6); /* uP clk */
- VPD_ENTRY(mdc, 6); /* MDIO clk */
- VPD_ENTRY(mt, 2); /* mem timing */
- VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
- VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
- VPD_ENTRY(port0, 2); /* PHY0 complex */
- VPD_ENTRY(port1, 2); /* PHY1 complex */
- VPD_ENTRY(port2, 2); /* PHY2 complex */
- VPD_ENTRY(port3, 2); /* PHY3 complex */
- VPD_ENTRY(rv, 1); /* csum */
- u32 pad; /* for multiple-of-4 sizing and alignment */
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[16];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+ VPD_ENTRY(pn, 16); /* part number */
+ VPD_ENTRY(ec, 16); /* EC level */
+ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
+ VPD_ENTRY(na, 12); /* MAC address base */
+ VPD_ENTRY(cclk, 6); /* core clock */
+ VPD_ENTRY(mclk, 6); /* mem clock */
+ VPD_ENTRY(uclk, 6); /* uP clk */
+ VPD_ENTRY(mdc, 6); /* MDIO clk */
+ VPD_ENTRY(mt, 2); /* mem timing */
+ VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
+ VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
+ VPD_ENTRY(port0, 2); /* PHY0 complex */
+ VPD_ENTRY(port1, 2); /* PHY1 complex */
+ VPD_ENTRY(port2, 2); /* PHY2 complex */
+ VPD_ENTRY(port3, 2); /* PHY3 complex */
+ VPD_ENTRY(rv, 1); /* csum */
+ u32 pad; /* for multiple-of-4 sizing and alignment */
};
-#define EEPROM_MAX_POLL 4
+#define EEPROM_MAX_POLL 40
#define EEPROM_STAT_ADDR 0x4000
#define VPD_BASE 0xc00
* addres is written to the control register. The hardware device will
* set the flag to 1 when 4 bytes have been read into the data register.
*/
-int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
{
u16 val;
int attempts = EEPROM_MAX_POLL;
if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
return -EINVAL;
- pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
+ t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
do {
udelay(10);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+ t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
} while (!(val & PCI_VPD_ADDR_F) && --attempts);
if (!(val & PCI_VPD_ADDR_F)) {
CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
return -EIO;
}
- pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
+ t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
*data = le32_to_cpu(*data);
return 0;
}
* Write a 32-bit word to a location in VPD EEPROM using the card's PCI
* VPD ROM capability.
*/
-int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
{
u16 val;
int attempts = EEPROM_MAX_POLL;
if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
return -EINVAL;
- pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
- cpu_to_le32(data));
- pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
- addr | PCI_VPD_ADDR_F);
+ t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
do {
msleep(1);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+ t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
} while ((val & PCI_VPD_ADDR_F) && --attempts);
if (val & PCI_VPD_ADDR_F) {
*
* Enables or disables write protection on the serial EEPROM.
*/
-int t3_seeprom_wp(struct adapter *adapter, int enable)
+int t3_seeprom_wp(adapter_t *adapter, int enable)
{
return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
}
*
* Reads card parameters stored in VPD EEPROM.
*/
-static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
{
int i, addr, ret;
struct t3_vpd vpd;
p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
} else {
- p->port_type[0] = hex2int(vpd.port0_data[0]);
- p->port_type[1] = hex2int(vpd.port1_data[0]);
+ p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
+ p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
+ p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
+ p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
}
return 0;
}
+/* BIOS boot header */
+typedef struct boot_header_s {
+ u8 signature[2]; /* signature */
+ u8 length; /* image length (include header) */
+ u8 offset[4]; /* initialization vector */
+ u8 reserved[19]; /* reserved */
+ u8 exheader[2]; /* offset to expansion header */
+} boot_header_t;
+
/* serial flash and firmware constants */
enum {
- SF_ATTEMPTS = 5, /* max retries for SF1 operations */
- SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
- SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
+ SF_ATTEMPTS = 5, /* max retries for SF1 operations */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
/* flash command opcodes */
- SF_PROG_PAGE = 2, /* program page */
- SF_WR_DISABLE = 4, /* disable writes */
- SF_RD_STATUS = 5, /* read status register */
- SF_WR_ENABLE = 6, /* enable writes */
- SF_RD_DATA_FAST = 0xb, /* read flash */
- SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
- FW_MIN_SIZE = 8 /* at least version and csum */
+ FW_MIN_SIZE = 8, /* at least version and csum */
+ FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
+
+ BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
+ BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
+ BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
+ BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
+ BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */
};
/**
* the read needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
-static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
u32 *valp)
{
int ret;
* the write needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
-static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
u32 val)
{
if (!byte_cnt || byte_cnt > 4)
*
* Wait for a flash operation to complete by polling the status register.
*/
-static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
{
int ret;
u32 status;
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-int t3_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
+ u32 *data, int byte_oriented)
{
int ret;
(ret = sf1_read(adapter, 1, 1, data)) != 0)
return ret;
- for (; nwords; nwords--, data++) {
+ for ( ; nwords; nwords--, data++) {
ret = sf1_read(adapter, 4, nwords > 1, data);
if (ret)
return ret;
* @addr: the start address to write
* @n: length of data to write
* @data: the data to write
+ * @byte_oriented: whether to store data as bytes or as words
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address.
+ * If @byte_oriented is set the write data is stored as a 32-bit
+ * big-endian array, otherwise in the processor's native endianess.
+ *
*/
-static int t3_write_flash(struct adapter *adapter, unsigned int addr,
- unsigned int n, const u8 *data)
+static int t3_write_flash(adapter_t *adapter, unsigned int addr,
+ unsigned int n, const u8 *data,
+ int byte_oriented)
{
int ret;
u32 buf[64];
- unsigned int i, c, left, val, offset = addr & 0xff;
+ unsigned int c, left, val, offset = addr & 0xff;
if (addr + n > SF_SIZE || offset + n > 256)
return -EINVAL;
for (left = n; left; left -= c) {
c = min(left, 4U);
- for (val = 0, i = 0; i < c; ++i)
- val = (val << 8) + *data++;
+ val = *(u32*)data;
+ data += c;
+ if (byte_oriented)
+ val = htonl(val);
ret = sf1_write(adapter, c, c != left, val);
if (ret)
return ret;
/* Read the page to verify the write succeeded */
- ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+ byte_oriented);
if (ret)
return ret;
- if (memcmp(data - n, (u8 *) buf + offset, n))
+ if (memcmp(data - n, (u8 *)buf + offset, n))
return -EIO;
return 0;
}
*
* Reads the protocol sram version from sram.
*/
-int t3_get_tp_version(struct adapter *adapter, u32 *vers)
+int t3_get_tp_version(adapter_t *adapter, u32 *vers)
{
int ret;
1, 1, 5, 1);
if (ret)
return ret;
-
+
*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
return 0;
/**
* t3_check_tpsram_version - read the tp sram version
* @adapter: the adapter
- * @must_load: set to 1 if loading a new microcode image is required
*
- * Reads the protocol sram version from flash.
*/
-int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
+int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
{
int ret;
u32 vers;
ret = t3_get_tp_version(adapter, &vers);
if (ret)
return ret;
+
+ vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
major = G_TP_VERSION_MAJOR(vers);
minor = G_TP_VERSION_MINOR(vers);
- if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+ if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
return 0;
if (major != TP_VERSION_MAJOR)
}
/**
- * t3_check_tpsram - check if provided protocol SRAM
+ * t3_check_tpsram - check if provided protocol SRAM
* is compatible with this driver
* @adapter: the adapter
* @tp_sram: the firmware image to write
* Checks if an adapter's tp sram is compatible with the driver.
* Returns 0 if the versions are compatible, a negative error otherwise.
*/
-int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
+int t3_check_tpsram(adapter_t *adapter, u8 *tp_sram, unsigned int size)
{
u32 csum;
unsigned int i;
*
* Reads the FW version from flash.
*/
-int t3_get_fw_version(struct adapter *adapter, u32 *vers)
+int t3_get_fw_version(adapter_t *adapter, u32 *vers)
{
return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
}
/**
* t3_check_fw_version - check if the FW is compatible with this driver
* @adapter: the adapter
- * @must_load: set to 1 if loading a new FW image is required
-
+ *
* Checks if an adapter's FW is compatible with the driver. Returns 0
* if the versions are compatible, a negative error otherwise.
*/
-int t3_check_fw_version(struct adapter *adapter, int *must_load)
+int t3_check_fw_version(adapter_t *adapter, int *must_load)
{
int ret;
u32 vers;
CH_ERR(adapter, "found wrong FW version(%u.%u), "
"driver needs version %u.%u\n", major, minor,
FW_VERSION_MAJOR, FW_VERSION_MINOR);
- else if (minor < FW_VERSION_MINOR) {
+ else if ((int)minor < FW_VERSION_MINOR) {
*must_load = 0;
CH_WARN(adapter, "found old FW minor version(%u.%u), "
"driver compiled for version %u.%u\n", major, minor,
*
* Erases the sectors in the given range.
*/
-static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
+static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
{
while (start <= end) {
int ret;
* data, followed by 4 bytes of FW version, followed by the 32-bit
* 1's complement checksum of the whole image.
*/
-int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
+int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
{
u32 csum;
unsigned int i;
if ((size & 3) || size < FW_MIN_SIZE)
return -EINVAL;
- if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+ if (size - 8 > FW_MAX_SIZE)
return -EFBIG;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
if (ret)
goto out;
- size -= 8; /* trim off version and checksum */
- for (addr = FW_FLASH_BOOT_ADDR; size;) {
+ size -= 8; /* trim off version and checksum */
+ for (addr = FW_FLASH_BOOT_ADDR; size; ) {
unsigned int chunk_size = min(size, 256U);
- ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+ ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
if (ret)
goto out;
size -= chunk_size;
}
- ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+ ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
out:
if (ret)
CH_ERR(adapter, "firmware download failed, error %d\n", ret);
return ret;
}
+/*
+ * t3_load_boot - download boot flash
+ * @adapter: the adapter
+ * @boot_data: the boot image to write
+ * @size: image size
+ *
+ * Write the supplied boot image to the card's serial flash.
+ * The boot image has the following sections: a 28-byte header and the
+ * boot image.
+ */
+int t3_load_boot(adapter_t *adapter, const u8 *boot_data, unsigned int size)
+{
+ boot_header_t *header = (boot_header_t *)boot_data;
+ int ret;
+ unsigned int addr;
+ unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
+ unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
+
+ /*
+ * Perform some primitive sanity testing to avoid accidentally
+ * writing garbage over the boot sectors. We ought to check for
+ * more but it's not worth it for now ...
+ */
+ if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
+ CH_ERR(adapter, "boot image too small/large\n");
+ return -EFBIG;
+ }
+ if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
+ CH_ERR(adapter, "boot image missing signature\n");
+ return -EINVAL;
+ }
+ if (header->length * BOOT_SIZE_INC != size) {
+ CH_ERR(adapter, "boot image header length != image length\n");
+ return -EINVAL;
+ }
+
+ ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
+ if (ret)
+ goto out;
+
+ for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
+ unsigned int chunk_size = min(size, 256U);
+
+ ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
+ if (ret)
+ goto out;
+
+ addr += chunk_size;
+ boot_data += chunk_size;
+ size -= chunk_size;
+ }
+
+out:
+ if (ret)
+ CH_ERR(adapter, "boot image download failed, error %d\n", ret);
+ return ret;
+}
+
#define CIM_CTL_BASE 0x2000
/**
- * t3_cim_ctl_blk_read - read a block from CIM control region
- *
- * @adap: the adapter
- * @addr: the start address within the CIM control region
- * @n: number of words to read
- * @valp: where to store the result
+ * t3_cim_ctl_blk_read - read a block from CIM control region
+ * @adap: the adapter
+ * @addr: the start address within the CIM control region
+ * @n: number of words to read
+ * @valp: where to store the result
*
- * Reads a block of 4-byte words from the CIM control region.
+ * Reads a block of 4-byte words from the CIM control region.
*/
-int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
- unsigned int n, unsigned int *valp)
+int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
{
int ret = 0;
return ret;
}
-
/**
* t3_link_changed - handle interface link changes
* @adapter: the adapter
* to the associated PHY and MAC. After performing the common tasks it
* invokes an OS-specific handler.
*/
-void t3_link_changed(struct adapter *adapter, int port_id)
+void t3_link_changed(adapter_t *adapter, int port_id)
{
int link_ok, speed, duplex, fc;
struct port_info *pi = adap2pinfo(adapter, port_id);
t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
link_ok ? F_TXACTENABLE | F_RXEN : 0);
}
- lc->link_ok = link_ok;
+ lc->link_ok = (unsigned char)link_ok;
lc->speed = speed < 0 ? SPEED_INVALID : speed;
lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
if (lc->requested_fc & PAUSE_AUTONEG)
if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
/* Set MAC speed, duplex, and flow control to match PHY. */
t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
- lc->fc = fc;
+ lc->fc = (unsigned char)fc;
}
t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
fc);
/* Also disables autoneg */
phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
- phy->ops->reset(phy, 0);
} else
phy->ops->autoneg_enable(phy);
} else {
*
* Enables or disables HW extraction of VLAN tags for the given port.
*/
-void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
+void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
{
t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
ports << S_VLANEXTRACTIONENABLE,
}
struct intr_info {
- unsigned int mask; /* bits to check in interrupt status */
- const char *msg; /* message to print or NULL */
- short stat_idx; /* stat counter to increment or -1 */
- unsigned short fatal:1; /* whether the condition reported is fatal */
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal:1; /* whether the condition reported is fatal */
};
/**
* incrementing a stat counter. The table is terminated by an entry
* specifying mask 0. Returns the number of fatal interrupt conditions.
*/
-static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
+static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
unsigned int mask,
const struct intr_info *acts,
unsigned long *stats)
int fatal = 0;
unsigned int status = t3_read_reg(adapter, reg) & mask;
- for (; acts->mask; ++acts) {
- if (!(status & acts->mask))
- continue;
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask)) continue;
if (acts->fatal) {
fatal++;
CH_ALERT(adapter, "%s (0x%x)\n",
if (acts->stat_idx >= 0)
stats[acts->stat_idx]++;
}
- if (status) /* clear processed interrupts */
+ if (status) /* clear processed interrupts */
t3_write_reg(adapter, reg, status);
return fatal;
}
/*
* Interrupt handler for the PCIX1 module.
*/
-static void pci_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info pcix1_intr_info[] = {
- {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
- {F_SIGTARABT, "PCI signaled target abort", -1, 1},
- {F_RCVTARABT, "PCI received target abort", -1, 1},
- {F_RCVMSTABT, "PCI received master abort", -1, 1},
- {F_SIGSYSERR, "PCI signaled system error", -1, 1},
- {F_DETPARERR, "PCI detected parity error", -1, 1},
- {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
- {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
- {F_RCVSPLCMPERR, "PCI received split completion error", -1,
- 1},
- {F_DETCORECCERR, "PCI correctable ECC error",
- STAT_PCI_CORR_ECC, 0},
- {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
- {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
- {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
- 1},
- {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
- 1},
- {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
- 1},
- {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
- "error", -1, 1},
- {0}
+static void pci_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pcix1_intr_info[] = {
+ { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
+ { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
+ { F_RCVTARABT, "PCI received target abort", -1, 1 },
+ { F_RCVMSTABT, "PCI received master abort", -1, 1 },
+ { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
+ { F_DETPARERR, "PCI detected parity error", -1, 1 },
+ { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
+ { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
+ { F_RCVSPLCMPERR, "PCI received split completion error", -1,
+ 1 },
+ { F_DETCORECCERR, "PCI correctable ECC error",
+ STAT_PCI_CORR_ECC, 0 },
+ { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
+ { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
+ { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+ 1 },
+ { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+ 1 },
+ { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+ 1 },
+ { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+ "error", -1, 1 },
+ { 0 }
};
if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
/*
* Interrupt handler for the PCIE module.
*/
-static void pcie_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info pcie_intr_info[] = {
- {F_PEXERR, "PCI PEX error", -1, 1},
- {F_UNXSPLCPLERRR,
- "PCI unexpected split completion DMA read error", -1, 1},
- {F_UNXSPLCPLERRC,
- "PCI unexpected split completion DMA command error", -1, 1},
- {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
- {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
- {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
- {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
- {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
- "PCI MSI-X table/PBA parity error", -1, 1},
- {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
- {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
- {F_RXPARERR, "PCI Rx parity error", -1, 1},
- {F_TXPARERR, "PCI Tx parity error", -1, 1},
- {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
- {0}
+static void pcie_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pcie_intr_info[] = {
+ { F_PEXERR, "PCI PEX error", -1, 1 },
+ { F_UNXSPLCPLERRR,
+ "PCI unexpected split completion DMA read error", -1, 1 },
+ { F_UNXSPLCPLERRC,
+ "PCI unexpected split completion DMA command error", -1, 1 },
+ { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
+ { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
+ { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
+ { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
+ { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+ "PCI MSI-X table/PBA parity error", -1, 1 },
+ { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
+ { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
+ { F_RXPARERR, "PCI Rx parity error", -1, 1 },
+ { F_TXPARERR, "PCI Tx parity error", -1, 1 },
+ { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
+ { 0 }
};
if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
/*
* TP interrupt handler.
*/
-static void tp_intr_handler(struct adapter *adapter)
+static void tp_intr_handler(adapter_t *adapter)
{
- static const struct intr_info tp_intr_info[] = {
- {0xffffff, "TP parity error", -1, 1},
- {0x1000000, "TP out of Rx pages", -1, 1},
- {0x2000000, "TP out of Tx pages", -1, 1},
- {0}
+ static struct intr_info tp_intr_info[] = {
+ { 0xffffff, "TP parity error", -1, 1 },
+ { 0x1000000, "TP out of Rx pages", -1, 1 },
+ { 0x2000000, "TP out of Tx pages", -1, 1 },
+ { 0 }
};
-
static struct intr_info tp_intr_info_t3c[] = {
- {0x1fffffff, "TP parity error", -1, 1},
- {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
- {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
- {0}
+ { 0x1fffffff, "TP parity error", -1, 1 },
+ { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
+ { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0 }
};
if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
adapter->params.rev < T3_REV_C ?
- tp_intr_info : tp_intr_info_t3c, NULL))
+ tp_intr_info : tp_intr_info_t3c, NULL))
t3_fatal_err(adapter);
}
/*
* CIM interrupt handler.
*/
-static void cim_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info cim_intr_info[] = {
- {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
- {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
- {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
- {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
- {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
- {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
- {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
- {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
- {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
- {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
- {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
- {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
- {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
- {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
- {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
- {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
- {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
- {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
- {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
- {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
- {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
- {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
- {F_ITAGPARERR, "CIM itag parity error", -1, 1},
- {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
- {0}
- };
-
- if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+static void cim_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info cim_intr_info[] = {
+ { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
+ { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
+ { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
+ { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
+ { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
+ { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
+ { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
+ { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
+ { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
+ { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
+ { F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
+ { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
+ { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
+ { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
+ { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
+ { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
+ { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
+ { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
+ { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
+ { F_ITAGPARERR, "CIM itag parity error", -1, 1 },
+ { F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
cim_intr_info, NULL))
t3_fatal_err(adapter);
}
/*
* ULP RX interrupt handler.
*/
-static void ulprx_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info ulprx_intr_info[] = {
- {F_PARERRDATA, "ULP RX data parity error", -1, 1},
- {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
- {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
- {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
- {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
- {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
- {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
- {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
- {0}
- };
+static void ulprx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { F_PARERRDATA, "ULP RX data parity error", -1, 1 },
+ { F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
+ { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
+ { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
+ { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
+ { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
+ { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
+ { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
+ { 0 }
+ };
if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
ulprx_intr_info, NULL))
/*
* ULP TX interrupt handler.
*/
-static void ulptx_intr_handler(struct adapter *adapter)
+static void ulptx_intr_handler(adapter_t *adapter)
{
- static const struct intr_info ulptx_intr_info[] = {
- {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
- STAT_ULP_CH0_PBL_OOB, 0},
- {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
- STAT_ULP_CH1_PBL_OOB, 0},
- {0xfc, "ULP TX parity error", -1, 1},
- {0}
- };
+ static struct intr_info ulptx_intr_info[] = {
+ { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+ STAT_ULP_CH0_PBL_OOB, 0 },
+ { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+ STAT_ULP_CH1_PBL_OOB, 0 },
+ { 0xfc, "ULP TX parity error", -1, 1 },
+ { 0 }
+ };
if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
ulptx_intr_info, adapter->irq_stats))
/*
* PM TX interrupt handler.
*/
-static void pmtx_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info pmtx_intr_info[] = {
- {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
- {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
- {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
- {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
- "PMTX ispi parity error", -1, 1},
- {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
- "PMTX ospi parity error", -1, 1},
- {0}
- };
+static void pmtx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
+ { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
+ { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+ "PMTX ispi parity error", -1, 1 },
+ { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+ "PMTX ospi parity error", -1, 1 },
+ { 0 }
+ };
if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
pmtx_intr_info, NULL))
/*
* PM RX interrupt handler.
*/
-static void pmrx_intr_handler(struct adapter *adapter)
-{
- static const struct intr_info pmrx_intr_info[] = {
- {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
- {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
- {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
- {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
- "PMRX ispi parity error", -1, 1},
- {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
- "PMRX ospi parity error", -1, 1},
- {0}
- };
+static void pmrx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
+ { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
+ { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+ "PMRX ispi parity error", -1, 1 },
+ { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+ "PMRX ospi parity error", -1, 1 },
+ { 0 }
+ };
if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
pmrx_intr_info, NULL))
/*
* CPL switch interrupt handler.
*/
-static void cplsw_intr_handler(struct adapter *adapter)
+static void cplsw_intr_handler(adapter_t *adapter)
{
- static const struct intr_info cplsw_intr_info[] = {
- {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
- {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
- {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
- {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
- {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
- {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
- {0}
- };
+ static struct intr_info cplsw_intr_info[] = {
+ { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
+ { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
+ { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
+ { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
+ { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
+ { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
+ { 0 }
+ };
if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
cplsw_intr_info, NULL))
/*
* MPS interrupt handler.
*/
-static void mps_intr_handler(struct adapter *adapter)
+static void mps_intr_handler(adapter_t *adapter)
{
- static const struct intr_info mps_intr_info[] = {
- {0x1ff, "MPS parity error", -1, 1},
- {0}
+ static struct intr_info mps_intr_info[] = {
+ { 0x1ff, "MPS parity error", -1, 1 },
+ { 0 }
};
if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
*/
static void mc7_intr_handler(struct mc7 *mc7)
{
- struct adapter *adapter = mc7->adapter;
+ adapter_t *adapter = mc7->adapter;
u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
if (cause & F_CE) {
/*
* XGMAC interrupt handler.
*/
-static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+static int mac_intr_handler(adapter_t *adap, unsigned int idx)
{
- struct cmac *mac = &adap2pinfo(adap, idx)->mac;
- u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
+ u32 cause;
+ struct cmac *mac;
+
+ idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
+ mac = &adap2pinfo(adap, idx)->mac;
+ cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
mac->stats.tx_fifo_parity_err++;
/*
* Interrupt handler for PHY events.
*/
-int t3_phy_intr_handler(struct adapter *adapter)
+int t3_phy_intr_handler(adapter_t *adapter)
{
u32 mask, gpi = adapter_info(adapter)->gpio_intr;
u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
mask = gpi - (gpi & (gpi - 1));
gpi -= mask;
- if (!(p->port_type->caps & SUPPORTED_IRQ))
+ if (!(p->phy.caps & SUPPORTED_IRQ))
continue;
if (cause & mask) {
return 0;
}
-/*
- * T3 slow path (non-data) interrupt handler.
+/**
+ * t3_slow_intr_handler - control path interrupt handler
+ * @adapter: the adapter
+ *
+ * T3 interrupt handler for non-data interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
*/
-int t3_slow_intr_handler(struct adapter *adapter)
+int t3_slow_intr_handler(adapter_t *adapter)
{
u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
/* Clear the interrupts just processed. */
t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
- t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+ (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
return 1;
}
* various HW modules and then enabling the top-level interrupt
* concentrator.
*/
-void t3_intr_enable(struct adapter *adapter)
-{
- static const struct addr_val_pair intr_en_avp[] = {
- {A_SG_INT_ENABLE, SGE_INTR_MASK},
- {A_MC7_INT_ENABLE, MC7_INTR_MASK},
- {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
- MC7_INTR_MASK},
- {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
- MC7_INTR_MASK},
- {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
- {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
- {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
- {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
- {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
- {A_MPS_INT_ENABLE, MPS_INTR_MASK},
+void t3_intr_enable(adapter_t *adapter)
+{
+ static struct addr_val_pair intr_en_avp[] = {
+ { A_SG_INT_ENABLE, SGE_INTR_MASK },
+ { A_MC7_INT_ENABLE, MC7_INTR_MASK },
+ { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ MC7_INTR_MASK },
+ { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ MC7_INTR_MASK },
+ { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
+ { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
+ { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
+ { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
+ { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
+ { A_MPS_INT_ENABLE, MPS_INTR_MASK },
};
adapter->slow_intr_mask = PL_INTR_MASK;
else
t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
- t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+ (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
}
/**
* Disable interrupts. We only disable the top-level interrupt
* concentrator and the SGE data interrupts.
*/
-void t3_intr_disable(struct adapter *adapter)
+void t3_intr_disable(adapter_t *adapter)
{
t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
- t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+ (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
adapter->slow_intr_mask = 0;
}
*
* Clears all interrupts.
*/
-void t3_intr_clear(struct adapter *adapter)
+void t3_intr_clear(adapter_t *adapter)
{
static const unsigned int cause_reg_addr[] = {
A_SG_INT_CAUSE,
/* Clear PHY and MAC interrupts for each port. */
for_each_port(adapter, i)
- t3_port_intr_clear(adapter, i);
+ t3_port_intr_clear(adapter, i);
for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
if (is_pcie(adapter))
t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
- t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+ (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
}
/**
* Enable port-specific (i.e., MAC and PHY) interrupts for the given
* adapter port.
*/
-void t3_port_intr_enable(struct adapter *adapter, int idx)
+void t3_port_intr_enable(adapter_t *adapter, int idx)
{
- struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+ struct port_info *pi = adap2pinfo(adapter, idx);
- t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
- t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
- phy->ops->intr_enable(phy);
+ t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
+ pi->phy.ops->intr_enable(&pi->phy);
}
/**
* Disable port-specific (i.e., MAC and PHY) interrupts for the given
* adapter port.
*/
-void t3_port_intr_disable(struct adapter *adapter, int idx)
+void t3_port_intr_disable(adapter_t *adapter, int idx)
{
- struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+ struct port_info *pi = adap2pinfo(adapter, idx);
- t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
- t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
- phy->ops->intr_disable(phy);
+ t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
+ pi->phy.ops->intr_disable(&pi->phy);
}
/**
* Clear port-specific (i.e., MAC and PHY) interrupts for the given
* adapter port.
*/
-void t3_port_intr_clear(struct adapter *adapter, int idx)
+void t3_port_intr_clear(adapter_t *adapter, int idx)
{
- struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+ struct port_info *pi = adap2pinfo(adapter, idx);
- t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
- t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
- phy->ops->intr_clear(phy);
+ t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
+ pi->phy.ops->intr_clear(&pi->phy);
}
#define SG_CONTEXT_CMD_ATTEMPTS 100
* Program an SGE context with the values already loaded in the
* CONTEXT_DATA? registers.
*/
-static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
+static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
unsigned int type)
{
t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
-static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
- unsigned int type)
+static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
{
t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
* platform allows concurrent context operations, the caller is
* responsible for appropriate locking.
*/
-int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx)
{
unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
- if (base_addr & 0xfff) /* must be 4K aligned */
+ if (base_addr & 0xfff) /* must be 4K aligned */
return -EINVAL;
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
- V_EC_BASE_LO(base_addr & 0xffff));
+ V_EC_BASE_LO((u32)base_addr & 0xffff));
base_addr >>= 16;
- t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
base_addr >>= 32;
t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
- V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
+ V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
F_EC_VALID);
return t3_sge_write_context(adapter, id, F_EGRESS);
* caller is responsible for ensuring only one context operation occurs
* at a time.
*/
-int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
- int gts_enable, u64 base_addr, unsigned int size,
- unsigned int bsize, unsigned int cong_thres, int gen,
- unsigned int cidx)
+int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ u64 base_addr, unsigned int size, unsigned int bsize,
+ unsigned int cong_thres, int gen, unsigned int cidx)
{
- if (base_addr & 0xfff) /* must be 4K aligned */
+ if (base_addr & 0xfff) /* must be 4K aligned */
return -EINVAL;
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
base_addr >>= 12;
- t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
base_addr >>= 32;
t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
- V_FL_BASE_HI((u32) base_addr) |
+ V_FL_BASE_HI((u32)base_addr) |
V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
* The caller is responsible for ensuring only one context operation
* occurs at a time.
*/
-int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
- int irq_vec_idx, u64 base_addr, unsigned int size,
+int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
+ u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx)
{
unsigned int intr = 0;
- if (base_addr & 0xfff) /* must be 4K aligned */
+ if (base_addr & 0xfff) /* must be 4K aligned */
return -EINVAL;
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
base_addr >>= 12;
t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
V_CQ_INDEX(cidx));
- t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
base_addr >>= 32;
if (irq_vec_idx >= 0)
intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
- V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
+ V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
return t3_sge_write_context(adapter, id, F_RESPONSEQ);
}
* The caller is responsible for ensuring only one context operation
* occurs at a time.
*/
-int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres)
{
- if (base_addr & 0xfff) /* must be 4K aligned */
+ if (base_addr & 0xfff) /* must be 4K aligned */
return -EINVAL;
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
base_addr >>= 12;
t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
- t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
base_addr >>= 32;
t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
- V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
+ V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
V_CQ_ERR(ovfl_mode));
t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
* Enable or disable an SGE egress context. The caller is responsible for
* ensuring only one context operation occurs at a time.
*/
-int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
+int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
{
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
* Disable an SGE free-buffer list. The caller is responsible for
* ensuring only one context operation occurs at a time.
*/
-int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
+int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
{
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
* Disable an SGE response queue. The caller is responsible for
* ensuring only one context operation occurs at a time.
*/
-int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
+int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
{
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
* Disable an SGE completion queue. The caller is responsible for
* ensuring only one context operation occurs at a time.
*/
-int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
+int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
{
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
* @adapter: the adapter
* @id: the context id
* @op: the operation to perform
+ * @credits: credits to return to the CQ
*
* Perform the selected operation on an SGE completion queue context.
* The caller is responsible for ensuring only one context operation
* occurs at a time.
+ *
+ * For most operations the function returns the current HW position in
+ * the completion queue.
*/
-int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
unsigned int credits)
{
u32 val;
* Read an SGE egress context. The caller is responsible for ensuring
* only one context operation occurs at a time.
*/
-static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
+static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
unsigned int id, u32 data[4])
{
if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
* Read an SGE egress context. The caller is responsible for ensuring
* only one context operation occurs at a time.
*/
-int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
+int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
{
if (id >= 65536)
return -EINVAL;
* Read an SGE CQ context. The caller is responsible for ensuring
* only one context operation occurs at a time.
*/
-int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
+int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
{
if (id >= 65536)
return -EINVAL;
* Read an SGE free-list context. The caller is responsible for ensuring
* only one context operation occurs at a time.
*/
-int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
+int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
{
if (id >= SGE_QSETS * 2)
return -EINVAL;
* Read an SGE response queue context. The caller is responsible for
* ensuring only one context operation occurs at a time.
*/
-int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
+int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
{
if (id >= SGE_QSETS)
return -EINVAL;
* provide fewer values than the size of the tables the supplied values
* are used repeatedly until the tables are fully populated.
*/
-void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
- const u8 * cpus, const u16 *rspq)
+void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
+ const u16 *rspq)
{
int i, j, cpu_idx = 0, q_idx = 0;
*
* Reads the contents of the receive packet steering tables.
*/
-int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
+int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
{
int i;
u32 val;
val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
if (!(val & 0x80000000))
return -EAGAIN;
- *lkup++ = val;
- *lkup++ = (val >> 8);
+ *lkup++ = (u8)val;
+ *lkup++ = (u8)(val >> 8);
}
if (map)
val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
if (!(val & 0x80000000))
return -EAGAIN;
- *map++ = val;
+ *map++ = (u16)val;
}
return 0;
}
*
* Switches TP to NIC/offload mode.
*/
-void t3_tp_set_offload_mode(struct adapter *adap, int enable)
+void t3_tp_set_offload_mode(adapter_t *adap, int enable)
{
if (is_offload(adap) || !enable)
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
V_NICMODE(!enable));
}
+/**
+ * tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+ val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+ t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
/**
* pm_num_pages - calculate the number of pages of the payload memory
* @mem_size: the size of the payload memory
* Partitions context and payload memory and configures TP's memory
* registers.
*/
-static void partition_mem(struct adapter *adap, const struct tp_params *p)
+static void partition_mem(adapter_t *adap, const struct tp_params *p)
{
unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
unsigned int timers = 0, timers_shift = 22;
adap->params.mc5.nservers += m - tids;
}
-static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
- u32 val)
+static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
{
t3_write_reg(adap, A_TP_PIO_ADDR, addr);
t3_write_reg(adap, A_TP_PIO_DATA, val);
}
-static void tp_config(struct adapter *adap, const struct tp_params *p)
+static void tp_config(adapter_t *adap, const struct tp_params *p)
{
t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
F_MTUENABLE | V_WINDOWSCALEMODE(1) |
- V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
+ V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
- V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
+ V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
F_IPV6ENABLE | F_NICMODE);
t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
adap->params.rev > 0 ? F_ENABLEESND :
- F_T3A_ENABLEESND);
-
+ F_T3A_ENABLEESND);
t3_set_reg_field(adap, A_TP_PC_CONFIG,
F_ENABLEEPCMDAFULL,
F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
if (adap->params.rev > 0) {
tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
- t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
- F_TXPACEAUTO);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
+ F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
- t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+ tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
+ tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
+ tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
} else
t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
-}
-/* Desired TP timer resolution in usec */
-#define TP_TMR_RES 50
+ if (adap->params.nports > 2) {
+ t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
+ F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
+ F_ENABLERXPORTFROMADDR);
+ tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
+ V_RXMAPMODE(M_RXMAPMODE), 0);
+ tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
+ V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
+ F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
+ F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
+ tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
+ tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
+ tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
+ }
+}
/* TCP timer values in ms */
#define TP_DACK_TIMER 50
* Set TP's timing parameters, such as the various timer resolutions and
* the TCP timer values.
*/
-static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
+static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
{
- unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
- unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
- unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
+ unsigned int tre = adap->params.tp.tre;
+ unsigned int dack_re = adap->params.tp.dack_re;
+ unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
unsigned int tps = core_clk >> tre;
t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
#define SECONDS * tps
- t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
+ t3_write_reg(adap, A_TP_MSL,
+ adap->params.rev > 0 ? 0 : 2 SECONDS);
t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
#undef SECONDS
}
+#ifdef CONFIG_CHELSIO_T3_CORE
/**
* t3_tp_set_coalescing_size - set receive coalescing size
* @adap: the adapter
*
* Set the receive coalescing size and PSH bit handling.
*/
-int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
+int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
{
u32 val;
* Set TP's max receive size. This is the limit that applies when
* receive coalescing is disabled.
*/
-void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
+void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
{
t3_write_reg(adap, A_TP_PARA_REG7,
V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
mtus[15] = 9600;
}
-/*
- * Initial congestion control parameters.
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
*/
static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
{
* t3_load_mtus - write the MTU and congestion control HW tables
* @adap: the adapter
* @mtus: the unrestricted values for the MTU table
- * @alphs: the values for the congestion control alpha parameter
+ * @alpha: the values for the congestion control alpha parameter
* @beta: the values for the congestion control beta parameter
* @mtu_cap: the maximum permitted effective MTU
*
* Update the high-speed congestion control table with the supplied alpha,
* beta, and MTUs.
*/
-void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
{
static const unsigned int avg_pkts[NCCTRL_WIN] = {
2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
- 28672, 40960, 57344, 81920, 114688, 163840, 229376
- };
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
unsigned int i, w;
unsigned int mtu = min(mtus[i], mtu_cap);
unsigned int log2 = fls(mtu);
- if (!(mtu & ((1 << log2) >> 2))) /* round */
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
log2--;
t3_write_reg(adap, A_TP_MTU_TABLE,
(i << 24) | (log2 << 16) | mtu);
*
* Reads the HW MTU table.
*/
-void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
+void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
{
int i;
* Reads the additive increments programmed into the HW congestion
* control table.
*/
-void t3_get_cong_cntl_tab(struct adapter *adap,
+void t3_get_cong_cntl_tab(adapter_t *adap,
unsigned short incr[NMTUS][NCCTRL_WIN])
{
unsigned int mtu, w;
for (w = 0; w < NCCTRL_WIN; ++w) {
t3_write_reg(adap, A_TP_CCTRL_TABLE,
0xffff0000 | (mtu << 5) | w);
- incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
- 0x1fff;
+ incr[mtu][w] = (unsigned short)t3_read_reg(adap,
+ A_TP_CCTRL_TABLE) & 0x1fff;
}
}
*
* Returns the values of TP's MIB counters.
*/
-void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
+void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
{
- t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
+ t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
sizeof(*tps) / sizeof(u32), 0);
}
+/**
+ * t3_read_pace_tbl - read the pace table
+ * @adap: the adapter
+ * @pace_vals: holds the returned values
+ *
+ * Returns the values of TP's pace table in nanoseconds.
+ */
+void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
+{
+ unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
+
+ for (i = 0; i < NTX_SCHED; i++) {
+ t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
+ pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
+ }
+}
+
+/**
+ * t3_set_pace_tbl - set the pace table
+ * @adap: the adapter
+ * @pace_vals: the pace values in nanoseconds
+ * @start: index of the first entry in the HW pace table to set
+ * @n: how many entries to set
+ *
+ * Sets (a subset of the) HW pace table.
+ */
+void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
+ unsigned int start, unsigned int n)
+{
+ unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
+
+ for ( ; n; n--, start++, pace_vals++)
+ t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
+ ((*pace_vals + tick_ns / 2) / tick_ns));
+}
+
#define ulp_region(adap, name, start, len) \
t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
(start) + (len) - 1)
-static void ulp_config(struct adapter *adap, const struct tp_params *p)
+static void ulp_config(adapter_t *adap, const struct tp_params *p)
{
unsigned int m = p->chan_rx_size;
t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
}
+
/**
* t3_set_proto_sram - set the contents of the protocol sram
* @adapter: the adapter
*
* Write the contents of the protocol SRAM.
*/
-int t3_set_proto_sram(struct adapter *adap, u8 *data)
+int t3_set_proto_sram(adapter_t *adap, u8 *data)
{
int i;
u32 *buf = (u32 *)data;
t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
-
+
t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
return -EIO;
}
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
-
return 0;
}
+#endif
-void t3_config_trace_filter(struct adapter *adapter,
- const struct trace_params *tp, int filter_index,
- int invert, int enable)
+/**
+ * t3_config_trace_filter - configure one of the tracing filters
+ * @adapter: the adapter
+ * @tp: the desired trace filter parameters
+ * @filter_index: which filter to configure
+ * @invert: if set non-matching packets are traced instead of matching ones
+ * @enable: whether to enable or disable the filter
+ *
+ * Configures one of the tracing filters available in HW.
+ */
+void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
+ int filter_index, int invert, int enable)
{
u32 addr, key[4], mask[4];
tp_wr_indirect(adapter, addr++, key[2]);
tp_wr_indirect(adapter, addr++, mask[2]);
tp_wr_indirect(adapter, addr++, key[3]);
- tp_wr_indirect(adapter, addr, mask[3]);
- t3_read_reg(adapter, A_TP_PIO_DATA);
+ tp_wr_indirect(adapter, addr, mask[3]);
+ (void) t3_read_reg(adapter, A_TP_PIO_DATA);
}
/**
* @kbps: target rate in Kbps
* @sched: the scheduler index
*
- * Configure a HW scheduler for the target rate
+ * Configure a Tx HW scheduler for the target rate.
*/
-int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
+int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
{
unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
unsigned int clk = adap->params.vpd.cclk * 1000;
unsigned int selected_cpt = 0, selected_bpt = 0;
if (kbps > 0) {
- kbps *= 125; /* -> bytes */
+ kbps *= 125; /* -> bytes */
for (cpt = 1; cpt <= 255; cpt++) {
tps = clk / cpt;
bpt = (kbps + tps / 2) / tps;
if (bpt > 0 && bpt <= 255) {
v = bpt * tps;
delta = v >= kbps ? v - kbps : kbps - v;
- if (delta <= mindelta) {
+ if (delta < mindelta) {
mindelta = delta;
selected_cpt = cpt;
selected_bpt = bpt;
return 0;
}
-static int tp_init(struct adapter *adap, const struct tp_params *p)
+/**
+ * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ *
+ * Set the interpacket delay for a HW packet rate scheduler.
+ */
+int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
+{
+ unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+
+ /* convert ipg to nearest number of core clocks */
+ ipg *= core_ticks_per_usec(adap);
+ ipg = (ipg + 5000) / 10000;
+ if (ipg > 0xffff)
+ return -EINVAL;
+
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & 0xffff) | (ipg << 16);
+ else
+ v = (v & 0xffff0000) | ipg;
+ t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ return 0;
+}
+
+/**
+ * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @kbps: the byte rate in Kbps
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ *
+ * Return the current configuration of a HW Tx scheduler.
+ */
+void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
+ unsigned int *ipg)
+{
+ unsigned int v, addr, bpt, cpt;
+
+ if (kbps) {
+ addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ bpt = (v >> 8) & 0xff;
+ cpt = v & 0xff;
+ if (!cpt)
+ *kbps = 0; /* scheduler disabled */
+ else {
+ v = (adap->params.vpd.cclk * 1000) / cpt;
+ *kbps = (v * bpt) / 125;
+ }
+ }
+ if (ipg) {
+ addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ v &= 0xffff;
+ *ipg = (10000 * v) / core_ticks_per_usec(adap);
+ }
+}
+
+/**
+ * tp_init - configure TP
+ * @adap: the adapter
+ * @p: TP configuration parameters
+ *
+ * Initializes the TP HW module.
+ */
+static int tp_init(adapter_t *adap, const struct tp_params *p)
{
int busy = 0;
return busy;
}
-int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
+/**
+ * t3_mps_set_active_ports - configure port failover
+ * @adap: the adapter
+ * @port_mask: bitmap of active ports
+ *
+ * Sets the active ports according to the supplied bitmap.
+ */
+int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
{
if (port_mask & ~((1 << adap->params.nports) - 1))
return -EINVAL;
return 0;
}
-/*
- * Perform the bits of HW initialization that are dependent on the number
- * of available ports.
+/**
+ * chan_init_hw - channel-dependent HW initialization
+ * @adap: the adapter
+ * @chan_map: bitmap of Tx channels being used
+ *
+ * Perform the bits of HW initialization that are dependent on the Tx
+ * channels being used.
*/
-static void init_hw_for_avail_ports(struct adapter *adap, int nports)
+static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
{
int i;
- if (nports == 1) {
+ if (chan_map != 3) { /* one channel */
t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
- t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
- F_PORT0ACTIVE | F_ENFORCEPKT);
- t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
- } else {
+ t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
+ (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
+ F_TPTXPORT1EN | F_PORT1ACTIVE));
+ t3_write_reg(adap, A_PM1_TX_CFG,
+ chan_map == 1 ? 0xffffffff : 0);
+ if (chan_map == 2)
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(0xff));
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
+ } else { /* two channels */
t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
for (i = 0; i < 16; i++)
t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
(i << 16) | 0x1010);
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
}
}
-static int calibrate_xgm(struct adapter *adapter)
+static int calibrate_xgm(adapter_t *adapter)
{
if (uses_xaui(adapter)) {
unsigned int v, i;
for (i = 0; i < 5; ++i) {
t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
- t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
msleep(1);
v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
return 0;
}
-static void calibrate_xgm_t3b(struct adapter *adapter)
+static void calibrate_xgm_t3b(adapter_t *adapter)
{
if (!uses_xaui(adapter)) {
t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
* writes normally complete in a cycle or two, so one read should suffice.
* The very first read exists to flush the posted write to the device.
*/
-static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
+static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
{
- t3_write_reg(adapter, addr, val);
- t3_read_reg(adapter, addr); /* flush */
+ t3_write_reg(adapter, addr, val);
+ (void) t3_read_reg(adapter, addr); /* flush */
if (!(t3_read_reg(adapter, addr) & F_BUSY))
return 0;
CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
0x632, 0x642, 0x652, 0x432, 0x442
};
static const struct mc7_timing_params mc7_timings[] = {
- {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
- {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
- {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
- {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
- {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
+ { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
+ { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
+ { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
+ { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
+ { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
};
u32 val;
unsigned int width, density, slow, attempts;
- struct adapter *adapter = mc7->adapter;
+ adapter_t *adapter = mc7->adapter;
const struct mc7_timing_params *p = &mc7_timings[mem_type];
if (!mc7->size)
density = G_DEN(val);
t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
- val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
msleep(1);
if (!slow) {
t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
- t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
msleep(1);
if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
(F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
val | F_CLKEN | F_TERM150);
- t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
if (!slow)
t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
if (!slow) {
t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
- t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
+ F_DLLRST, 0);
udelay(5);
}
goto out_fail;
/* clock value is in KHz */
- mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
- mc7_clock /= 1000000; /* KHz->MHz, ns->us */
+ mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
+ mc7_clock /= 1000000; /* KHz->MHz, ns->us */
t3_write_reg(adapter, mc7->offset + A_MC7_REF,
F_PERREFEN | V_PREREFDIV(mc7_clock));
- t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
- t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
+ t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
+ F_ECCGENEN | F_ECCCHKEN);
t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
(mc7->size << width) - 1);
t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
- t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
attempts = 50;
do {
t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
return 0;
-out_fail:
+ out_fail:
return -1;
}
-static void config_pcie(struct adapter *adap)
+static void config_pcie(adapter_t *adap)
{
static const u16 ack_lat[4][6] = {
- {237, 416, 559, 1071, 2095, 4143},
- {128, 217, 289, 545, 1057, 2081},
- {73, 118, 154, 282, 538, 1050},
- {67, 107, 86, 150, 278, 534}
+ { 237, 416, 559, 1071, 2095, 4143 },
+ { 128, 217, 289, 545, 1057, 2081 },
+ { 73, 118, 154, 282, 538, 1050 },
+ { 67, 107, 86, 150, 278, 534 }
};
static const u16 rpl_tmr[4][6] = {
- {711, 1248, 1677, 3213, 6285, 12429},
- {384, 651, 867, 1635, 3171, 6243},
- {219, 354, 462, 846, 1614, 3150},
- {201, 321, 258, 450, 834, 1602}
+ { 711, 1248, 1677, 3213, 6285, 12429 },
+ { 384, 651, 867, 1635, 3171, 6243 },
+ { 219, 354, 462, 846, 1614, 3150 },
+ { 201, 321, 258, 450, 834, 1602 }
};
u16 val;
unsigned int log2_width, pldsize;
unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
- pci_read_config_word(adap->pdev,
- adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
- &val);
+ t3_os_pci_read_config_2(adap,
+ adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
+ &val);
pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
- pci_read_config_word(adap->pdev,
- adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
- &val);
+
+ t3_os_pci_read_config_2(adap,
+ adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
+ &val);
fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
- G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+ G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
log2_width = fls(adap->params.pci.width) - 1;
acklat = ack_lat[log2_width][pldsize];
- if (val & 1) /* check LOsEnable */
+ if (val & 1) /* check LOsEnable */
acklat += fst_trn_tx * 4;
rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
}
-/*
- * Initialize and configure T3 HW modules. This performs the
- * initialization steps that need to be done once after a card is reset.
- * MAC and PHY initialization is handled separarely whenever a port is enabled.
+/**
+ * t3_init_hw - initialize and configure T3 HW modules
+ * @adapter: the adapter
+ * @fw_params: initial parameters to pass to firmware (optional)
*
- * fw_params are passed to FW and their value is platform dependent. Only the
- * top 8 bits are available for use, the rest must be 0.
+ * Initialize and configure T3 HW modules. This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is
+ * enabled.
+ *
+ * @fw_params are passed to FW and their value is platform dependent.
+ * Only the top 8 bits are available for use, the rest must be 0.
*/
-int t3_init_hw(struct adapter *adapter, u32 fw_params)
+int t3_init_hw(adapter_t *adapter, u32 fw_params)
{
int err = -EIO, attempts, i;
const struct vpd_params *vpd = &adapter->params.vpd;
if (adapter->params.rev > 0)
calibrate_xgm_t3b(adapter);
- else if (calibrate_xgm(adapter)) {
+ else if (calibrate_xgm(adapter))
goto out_err;
- }
+
+ if (adapter->params.nports > 2)
+ t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
if (vpd->mclk) {
partition_mem(adapter, &adapter->params.tp);
mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
- adapter->params.mc5.nfilters,
- adapter->params.mc5.nroutes))
+ adapter->params.mc5.nfilters,
+ adapter->params.mc5.nroutes))
goto out_err;
for (i = 0; i < 32; i++)
goto out_err;
}
- if (tp_init(adapter, &adapter->params.tp)) {
+ if (tp_init(adapter, &adapter->params.tp))
goto out_err;
- }
+#ifdef CONFIG_CHELSIO_T3_CORE
t3_tp_set_coalescing_size(adapter,
min(adapter->params.sge.max_pkt_size,
MAX_RX_COALESCING_LEN), 1);
t3_tp_set_max_rxsize(adapter,
min(adapter->params.sge.max_pkt_size, 16384U));
ulp_config(adapter, &adapter->params.tp);
-
+#endif
if (is_pcie(adapter))
config_pcie(adapter);
else
t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
t3_write_reg(adapter, A_PM1_RX_MODE, 0);
t3_write_reg(adapter, A_PM1_TX_MODE, 0);
- init_hw_for_avail_ports(adapter, adapter->params.nports);
+ chan_init_hw(adapter, adapter->params.chan_map);
t3_sge_init(adapter, &adapter->params.sge);
t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
t3_write_reg(adapter, A_CIM_BOOT_CFG,
V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
- t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
+ (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
attempts = 100;
- do { /* wait for uP to initialize */
+ do { /* wait for uP to initialize */
msleep(20);
} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
if (!attempts) {
}
err = 0;
-out_err:
+ out_err:
return err;
}
* Determines a card's PCI mode and associated parameters, such as speed
* and width.
*/
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap;
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
if (pcie_cap) {
u16 val;
p->variant = PCI_VARIANT_PCIE;
p->pcie_cap_addr = pcie_cap;
- pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+ t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
&val);
p->width = (val >> 4) & 0x3f;
return;
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
- * @ai: information about the current card
+ * @caps: link capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/duplex/flow-control/autonegotiation
return MBs << 20;
}
-static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
unsigned int base_addr, const char *name)
{
u32 cfg;
mc7->name = name;
mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
- mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+ mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
mc7->width = G_WIDTH(cfg);
}
-void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
+void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
{
mac->adapter = adapter;
+ mac->multiport = adapter->params.nports > 2;
+ if (mac->multiport) {
+ mac->ext_port = (unsigned char)index;
+ mac->nucast = 8;
+ index = 0;
+ } else
+ mac->nucast = 1;
+
mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
- mac->nucast = 1;
if (adapter->params.rev == 0 && uses_xaui(adapter)) {
t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
}
}
-void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
+/**
+ * early_hw_init - HW initialization done at card detection time
+ * @adapter: the adapter
+ * @ai: contains information about the adapter type and properties
+ *
+ * Perfoms the part of HW initialization that is done early on when the
+ * driver first detecs the card. Most of the HW state is initialized
+ * lazily later on when a port or an offload function are first used.
+ */
+void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
{
- u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+ u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
+ 3 : 2);
mi1_init(adapter, ai);
- t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
+ t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
t3_write_reg(adapter, A_T3DBG_GPIO_EN,
ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
/* Enable MAC clocks so we can access the registers */
t3_write_reg(adapter, A_XGM_PORT_CFG, val);
- t3_read_reg(adapter, A_XGM_PORT_CFG);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
val |= F_CLKDIVRESET_;
t3_write_reg(adapter, A_XGM_PORT_CFG, val);
- t3_read_reg(adapter, A_XGM_PORT_CFG);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
- t3_read_reg(adapter, A_XGM_PORT_CFG);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
}
-/*
- * Reset the adapter.
- * Older PCIe cards lose their config space during reset, PCI-X
- * ones don't.
+/**
+ * t3_reset_adapter - reset the adapter
+ * @adapter: the adapter
+ *
+ * Reset the adapter.
*/
-static int t3_reset_adapter(struct adapter *adapter)
+int t3_reset_adapter(adapter_t *adapter)
{
- int i, save_and_restore_pcie =
+ int i, save_and_restore_pcie =
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
uint16_t devid = 0;
if (save_and_restore_pcie)
- pci_save_state(adapter->pdev);
+ t3_os_pci_save_state(adapter);
t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
- /*
+ /*
* Delay. Give Some time to device to reset fully.
* XXX The delay time should be modified.
*/
for (i = 0; i < 10; i++) {
msleep(50);
- pci_read_config_word(adapter->pdev, 0x00, &devid);
+ t3_os_pci_read_config_2(adapter, 0x00, &devid);
if (devid == 0x1425)
break;
}
return -1;
if (save_and_restore_pcie)
- pci_restore_state(adapter->pdev);
+ t3_os_pci_restore_state(adapter);
return 0;
}
-static int __devinit init_parity(struct adapter *adap)
+static int __devinit init_parity(adapter_t *adap)
{
- int i, err, addr;
+ int i, err, addr;
if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
return 0;
}
-/*
- * Initialize adapter SW state for the various HW modules, set initial values
- * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
- * interface.
+/**
+ * t3_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ * @ai: contains information about the adapter type and properties
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
*/
-int __devinit t3_prep_adapter(struct adapter *adapter,
+int __devinit t3_prep_adapter(adapter_t *adapter,
const struct adapter_info *ai, int reset)
{
int ret;
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.info = ai;
- adapter->params.nports = ai->nports;
+ adapter->params.nports = ai->nports0 + ai->nports1;
+ adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
adapter->params.linkpoll_period = 0;
- adapter->params.stats_update_period = is_10G(adapter) ?
- MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+ if (adapter->params.nports > 2)
+ adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
+ else
+ adapter->params.stats_update_period = is_10G(adapter) ?
+ MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
- pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
- p->nchan = ai->nports;
+ p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
p->pmrx_size = t3_mc7_size(&adapter->pmrx);
p->pmtx_size = t3_mc7_size(&adapter->pmtx);
p->cm_size = t3_mc7_size(&adapter->cm);
- p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
+ p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
p->chan_tx_size = p->pmtx_size / p->nchan;
p->rx_pg_size = 64 * 1024;
p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
p->ntimer_qs = p->cm_size >= (128 << 20) ||
- adapter->params.rev > 0 ? 12 : 6;
+ adapter->params.rev > 0 ? 12 : 6;
+ p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
+ 1;
+ p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
}
adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
if (is_offload(adapter)) {
adapter->params.mc5.nservers = DEFAULT_NSERVERS;
adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
- DEFAULT_NFILTERS : 0;
+ DEFAULT_NFILTERS : 0;
adapter->params.mc5.nroutes = 0;
t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+#ifdef CONFIG_CHELSIO_T3_CORE
init_mtus(adapter->params.mtus);
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+#endif
}
early_hw_init(adapter, ai);
if (ret)
return ret;
+ if (adapter->params.nports > 2 &&
+ (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
+ return ret;
+
for_each_port(adapter, i) {
u8 hw_addr[6];
+ const struct port_type_info *pti;
struct port_info *p = adap2pinfo(adapter, i);
while (!adapter->params.vpd.port_type[j])
++j;
- p->port_type = &port_types[adapter->params.vpd.port_type[j]];
- p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
- ai->mdio_ops);
+ pti = &port_types[adapter->params.vpd.port_type[j]];
+ ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ ai->mdio_ops);
+ if (ret)
+ return ret;
mac_prep(&p->mac, adapter, j);
++j;
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
- memcpy(adapter->port[i]->perm_addr, hw_addr,
- ETH_ALEN);
- init_link_config(&p->link_config, p->port_type->caps);
+ t3_os_set_hw_addr(adapter, i, hw_addr);
+ init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
- if (!(p->port_type->caps & SUPPORTED_IRQ))
+ if (!(p->phy.caps & SUPPORTED_IRQ))
adapter->params.linkpoll_period = 10;
}
return 0;
}
-void t3_led_ready(struct adapter *adapter)
+void t3_led_ready(adapter_t *adapter)
{
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
F_GPIO0_OUT_VAL);
}
+
+void t3_port_failover(adapter_t *adapter, int port)
+{
+ u32 val;
+
+ val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ val);
+}
+
+void t3_failover_done(adapter_t *adapter, int port)
+{
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ F_PORT0ACTIVE | F_PORT1ACTIVE);
+}
+
+void t3_failover_clear(adapter_t *adapter)
+{
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ F_PORT0ACTIVE | F_PORT1ACTIVE);
+}
/*
- * Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#ifndef _T3CDEV_H_
#define _T3CDEV_H_
enum t3ctype {
T3A = 0,
- T3B
+ T3B,
+ T3C,
};
struct t3cdev {
- char name[T3CNAMSIZ]; /* T3C device name */
+ char name[T3CNAMSIZ]; /* T3C device name */
enum t3ctype type;
- struct list_head ofld_dev_list; /* for list linking */
- struct net_device *lldev; /* LL dev associated with T3C messages */
- struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
+ struct list_head ofld_dev_list; /* for list linking */
+ struct net_device *lldev; /* LL dev associated with T3C messages */
+ struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
int (*send)(struct t3cdev *dev, struct sk_buff *skb);
int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
- void *priv; /* driver private data */
- void *l2opt; /* optional layer 2 data */
- void *l3opt; /* optional layer 3 data */
- void *l4opt; /* optional layer 4 data */
- void *ulp; /* ulp stuff */
+ void *priv; /* driver private data */
+ void *l2opt; /* optional layer 2 data */
+ void *l3opt; /* optional layer 3 data */
+ void *l4opt; /* optional layer 4 data */
+ void *ulp; /* ulp stuff */
+ void *ulp_iscsi; /* ulp iscsi */
};
-#endif /* _T3CDEV_H_ */
+#endif /* _T3CDEV_H_ */
--- /dev/null
+/* This file is automatically generated --- do not edit */
+
+#ifndef _TCB_DEFS_H
+#define _TCB_DEFS_H
+
+#define W_TCB_T_STATE 0
+#define S_TCB_T_STATE 0
+#define M_TCB_T_STATE 0xfULL
+#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
+
+#define W_TCB_TIMER 0
+#define S_TCB_TIMER 4
+#define M_TCB_TIMER 0x1ULL
+#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
+
+#define W_TCB_DACK_TIMER 0
+#define S_TCB_DACK_TIMER 5
+#define M_TCB_DACK_TIMER 0x1ULL
+#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
+
+#define W_TCB_DEL_FLAG 0
+#define S_TCB_DEL_FLAG 6
+#define M_TCB_DEL_FLAG 0x1ULL
+#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
+
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 7
+#define M_TCB_L2T_IX 0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+#define W_TCB_SMAC_SEL 0
+#define S_TCB_SMAC_SEL 18
+#define M_TCB_SMAC_SEL 0x3ULL
+#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
+
+#define W_TCB_TOS 0
+#define S_TCB_TOS 20
+#define M_TCB_TOS 0x3fULL
+#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
+
+#define W_TCB_MAX_RT 0
+#define S_TCB_MAX_RT 26
+#define M_TCB_MAX_RT 0xfULL
+#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
+
+#define W_TCB_T_RXTSHIFT 0
+#define S_TCB_T_RXTSHIFT 30
+#define M_TCB_T_RXTSHIFT 0xfULL
+#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
+
+#define W_TCB_T_DUPACKS 1
+#define S_TCB_T_DUPACKS 2
+#define M_TCB_T_DUPACKS 0xfULL
+#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
+
+#define W_TCB_T_MAXSEG 1
+#define S_TCB_T_MAXSEG 6
+#define M_TCB_T_MAXSEG 0xfULL
+#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
+
+#define W_TCB_T_FLAGS1 1
+#define S_TCB_T_FLAGS1 10
+#define M_TCB_T_FLAGS1 0xffffffffULL
+#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
+
+#define W_TCB_T_FLAGS2 2
+#define S_TCB_T_FLAGS2 10
+#define M_TCB_T_FLAGS2 0x7fULL
+#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
+
+#define W_TCB_SND_SCALE 2
+#define S_TCB_SND_SCALE 17
+#define M_TCB_SND_SCALE 0xfULL
+#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
+
+#define W_TCB_RCV_SCALE 2
+#define S_TCB_RCV_SCALE 21
+#define M_TCB_RCV_SCALE 0xfULL
+#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
+
+#define W_TCB_SND_UNA_RAW 2
+#define S_TCB_SND_UNA_RAW 25
+#define M_TCB_SND_UNA_RAW 0x7ffffffULL
+#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
+
+#define W_TCB_SND_NXT_RAW 3
+#define S_TCB_SND_NXT_RAW 20
+#define M_TCB_SND_NXT_RAW 0x7ffffffULL
+#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
+
+#define W_TCB_RCV_NXT 4
+#define S_TCB_RCV_NXT 15
+#define M_TCB_RCV_NXT 0xffffffffULL
+#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
+
+#define W_TCB_RCV_ADV 5
+#define S_TCB_RCV_ADV 15
+#define M_TCB_RCV_ADV 0xffffULL
+#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
+
+#define W_TCB_SND_MAX_RAW 5
+#define S_TCB_SND_MAX_RAW 31
+#define M_TCB_SND_MAX_RAW 0x7ffffffULL
+#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
+
+#define W_TCB_SND_CWND 6
+#define S_TCB_SND_CWND 26
+#define M_TCB_SND_CWND 0x7ffffffULL
+#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
+
+#define W_TCB_SND_SSTHRESH 7
+#define S_TCB_SND_SSTHRESH 21
+#define M_TCB_SND_SSTHRESH 0x7ffffffULL
+#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
+
+#define W_TCB_T_RTT_TS_RECENT_AGE 8
+#define S_TCB_T_RTT_TS_RECENT_AGE 16
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#define W_TCB_T_RTSEQ_RECENT 9
+#define S_TCB_T_RTSEQ_RECENT 16
+#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
+#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
+
+#define W_TCB_T_SRTT 10
+#define S_TCB_T_SRTT 16
+#define M_TCB_T_SRTT 0xffffULL
+#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
+
+#define W_TCB_T_RTTVAR 11
+#define S_TCB_T_RTTVAR 0
+#define M_TCB_T_RTTVAR 0xffffULL
+#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
+
+#define W_TCB_TS_LAST_ACK_SENT_RAW 11
+#define S_TCB_TS_LAST_ACK_SENT_RAW 16
+#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
+#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
+
+#define W_TCB_DIP 12
+#define S_TCB_DIP 11
+#define M_TCB_DIP 0xffffffffULL
+#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
+
+#define W_TCB_SIP 13
+#define S_TCB_SIP 11
+#define M_TCB_SIP 0xffffffffULL
+#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
+
+#define W_TCB_DP 14
+#define S_TCB_DP 11
+#define M_TCB_DP 0xffffULL
+#define V_TCB_DP(x) ((x) << S_TCB_DP)
+
+#define W_TCB_SP 14
+#define S_TCB_SP 27
+#define M_TCB_SP 0xffffULL
+#define V_TCB_SP(x) ((x) << S_TCB_SP)
+
+#define W_TCB_TIMESTAMP 15
+#define S_TCB_TIMESTAMP 11
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+#define W_TCB_TIMESTAMP_OFFSET 16
+#define S_TCB_TIMESTAMP_OFFSET 11
+#define M_TCB_TIMESTAMP_OFFSET 0xfULL
+#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
+
+#define W_TCB_TX_MAX 16
+#define S_TCB_TX_MAX 15
+#define M_TCB_TX_MAX 0xffffffffULL
+#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
+
+#define W_TCB_TX_HDR_PTR_RAW 17
+#define S_TCB_TX_HDR_PTR_RAW 15
+#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
+
+#define W_TCB_TX_LAST_PTR_RAW 18
+#define S_TCB_TX_LAST_PTR_RAW 0
+#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
+
+#define W_TCB_TX_COMPACT 18
+#define S_TCB_TX_COMPACT 17
+#define M_TCB_TX_COMPACT 0x1ULL
+#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
+
+#define W_TCB_RX_COMPACT 18
+#define S_TCB_RX_COMPACT 18
+#define M_TCB_RX_COMPACT 0x1ULL
+#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
+
+#define W_TCB_RCV_WND 18
+#define S_TCB_RCV_WND 19
+#define M_TCB_RCV_WND 0x7ffffffULL
+#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
+
+#define W_TCB_RX_HDR_OFFSET 19
+#define S_TCB_RX_HDR_OFFSET 14
+#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
+#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
+
+#define W_TCB_RX_FRAG0_START_IDX_RAW 20
+#define S_TCB_RX_FRAG0_START_IDX_RAW 9
+#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
+
+#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
+#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
+#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
+#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
+
+#define W_TCB_RX_FRAG0_LEN 21
+#define S_TCB_RX_FRAG0_LEN 31
+#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
+#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
+
+#define W_TCB_RX_FRAG1_LEN 22
+#define S_TCB_RX_FRAG1_LEN 26
+#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
+#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
+
+#define W_TCB_NEWRENO_RECOVER 23
+#define S_TCB_NEWRENO_RECOVER 21
+#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
+#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
+
+#define W_TCB_PDU_HAVE_LEN 24
+#define S_TCB_PDU_HAVE_LEN 16
+#define M_TCB_PDU_HAVE_LEN 0x1ULL
+#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
+
+#define W_TCB_PDU_LEN 24
+#define S_TCB_PDU_LEN 17
+#define M_TCB_PDU_LEN 0xffffULL
+#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
+
+#define W_TCB_RX_QUIESCE 25
+#define S_TCB_RX_QUIESCE 1
+#define M_TCB_RX_QUIESCE 0x1ULL
+#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
+
+#define W_TCB_RX_PTR_RAW 25
+#define S_TCB_RX_PTR_RAW 2
+#define M_TCB_RX_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
+
+#define W_TCB_CPU_NO 25
+#define S_TCB_CPU_NO 19
+#define M_TCB_CPU_NO 0x7fULL
+#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
+
+#define W_TCB_ULP_TYPE 25
+#define S_TCB_ULP_TYPE 26
+#define M_TCB_ULP_TYPE 0xfULL
+#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
+
+#define W_TCB_RX_FRAG1_PTR_RAW 25
+#define S_TCB_RX_FRAG1_PTR_RAW 30
+#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
+
+#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
+#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
+#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
+
+#define W_TCB_RX_FRAG2_PTR_RAW 27
+#define S_TCB_RX_FRAG2_PTR_RAW 10
+#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
+
+#define W_TCB_RX_FRAG2_LEN_RAW 27
+#define S_TCB_RX_FRAG2_LEN_RAW 27
+#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
+
+#define W_TCB_RX_FRAG3_PTR_RAW 28
+#define S_TCB_RX_FRAG3_PTR_RAW 22
+#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
+
+#define W_TCB_RX_FRAG3_LEN_RAW 29
+#define S_TCB_RX_FRAG3_LEN_RAW 7
+#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
+
+#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
+#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
+#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
+
+#define W_TCB_PDU_HDR_LEN 30
+#define S_TCB_PDU_HDR_LEN 29
+#define M_TCB_PDU_HDR_LEN 0xffULL
+#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
+
+#define W_TCB_SLUSH1 31
+#define S_TCB_SLUSH1 5
+#define M_TCB_SLUSH1 0x7ffffULL
+#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
+
+#define W_TCB_ULP_RAW 31
+#define S_TCB_ULP_RAW 24
+#define M_TCB_ULP_RAW 0xffULL
+#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
+
+#define W_TCB_DDP_RDMAP_VERSION 25
+#define S_TCB_DDP_RDMAP_VERSION 30
+#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
+#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
+
+#define W_TCB_MARKER_ENABLE_RX 25
+#define S_TCB_MARKER_ENABLE_RX 31
+#define M_TCB_MARKER_ENABLE_RX 0x1ULL
+#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
+
+#define W_TCB_MARKER_ENABLE_TX 26
+#define S_TCB_MARKER_ENABLE_TX 0
+#define M_TCB_MARKER_ENABLE_TX 0x1ULL
+#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
+
+#define W_TCB_CRC_ENABLE 26
+#define S_TCB_CRC_ENABLE 1
+#define M_TCB_CRC_ENABLE 0x1ULL
+#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
+
+#define W_TCB_IRS_ULP 26
+#define S_TCB_IRS_ULP 2
+#define M_TCB_IRS_ULP 0x1ffULL
+#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
+
+#define W_TCB_ISS_ULP 26
+#define S_TCB_ISS_ULP 11
+#define M_TCB_ISS_ULP 0x1ffULL
+#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
+
+#define W_TCB_TX_PDU_LEN 26
+#define S_TCB_TX_PDU_LEN 20
+#define M_TCB_TX_PDU_LEN 0x3fffULL
+#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
+
+#define W_TCB_TX_PDU_OUT 27
+#define S_TCB_TX_PDU_OUT 2
+#define M_TCB_TX_PDU_OUT 0x1ULL
+#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
+
+#define W_TCB_CQ_IDX_SQ 27
+#define S_TCB_CQ_IDX_SQ 3
+#define M_TCB_CQ_IDX_SQ 0xffffULL
+#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
+
+#define W_TCB_CQ_IDX_RQ 27
+#define S_TCB_CQ_IDX_RQ 19
+#define M_TCB_CQ_IDX_RQ 0xffffULL
+#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
+
+#define W_TCB_QP_ID 28
+#define S_TCB_QP_ID 3
+#define M_TCB_QP_ID 0xffffULL
+#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
+
+#define W_TCB_PD_ID 28
+#define S_TCB_PD_ID 19
+#define M_TCB_PD_ID 0xffffULL
+#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
+
+#define W_TCB_STAG 29
+#define S_TCB_STAG 3
+#define M_TCB_STAG 0xffffffffULL
+#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
+
+#define W_TCB_RQ_START 30
+#define S_TCB_RQ_START 3
+#define M_TCB_RQ_START 0x3ffffffULL
+#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
+
+#define W_TCB_RQ_MSN 30
+#define S_TCB_RQ_MSN 29
+#define M_TCB_RQ_MSN 0x3ffULL
+#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
+
+#define W_TCB_RQ_MAX_OFFSET 31
+#define S_TCB_RQ_MAX_OFFSET 7
+#define M_TCB_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
+
+#define W_TCB_RQ_WRITE_PTR 31
+#define S_TCB_RQ_WRITE_PTR 11
+#define M_TCB_RQ_WRITE_PTR 0x3ffULL
+#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
+
+#define W_TCB_INB_WRITE_PERM 31
+#define S_TCB_INB_WRITE_PERM 21
+#define M_TCB_INB_WRITE_PERM 0x1ULL
+#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
+
+#define W_TCB_INB_READ_PERM 31
+#define S_TCB_INB_READ_PERM 22
+#define M_TCB_INB_READ_PERM 0x1ULL
+#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
+
+#define W_TCB_ORD_L_BIT_VLD 31
+#define S_TCB_ORD_L_BIT_VLD 23
+#define M_TCB_ORD_L_BIT_VLD 0x1ULL
+#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
+
+#define W_TCB_RDMAP_OPCODE 31
+#define S_TCB_RDMAP_OPCODE 24
+#define M_TCB_RDMAP_OPCODE 0xfULL
+#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
+
+#define W_TCB_TX_FLUSH 31
+#define S_TCB_TX_FLUSH 28
+#define M_TCB_TX_FLUSH 0x1ULL
+#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
+
+#define W_TCB_TX_OOS_RXMT 31
+#define S_TCB_TX_OOS_RXMT 29
+#define M_TCB_TX_OOS_RXMT 0x1ULL
+#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
+
+#define W_TCB_TX_OOS_TXMT 31
+#define S_TCB_TX_OOS_TXMT 30
+#define M_TCB_TX_OOS_TXMT 0x1ULL
+#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
+
+#define W_TCB_SLUSH_AUX2 31
+#define S_TCB_SLUSH_AUX2 31
+#define M_TCB_SLUSH_AUX2 0x1ULL
+#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
+
+#define W_TCB_RX_FRAG1_PTR_RAW2 25
+#define S_TCB_RX_FRAG1_PTR_RAW2 30
+#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
+#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
+
+#define W_TCB_RX_DDP_FLAGS 26
+#define S_TCB_RX_DDP_FLAGS 15
+#define M_TCB_RX_DDP_FLAGS 0xffffULL
+#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
+
+#define W_TCB_SLUSH_AUX3 26
+#define S_TCB_SLUSH_AUX3 31
+#define M_TCB_SLUSH_AUX3 0x1ffULL
+#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
+
+#define W_TCB_RX_DDP_BUF0_OFFSET 27
+#define S_TCB_RX_DDP_BUF0_OFFSET 8
+#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
+#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
+
+#define W_TCB_RX_DDP_BUF0_LEN 27
+#define S_TCB_RX_DDP_BUF0_LEN 30
+#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
+#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
+
+#define W_TCB_RX_DDP_BUF1_OFFSET 28
+#define S_TCB_RX_DDP_BUF1_OFFSET 20
+#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
+#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
+
+#define W_TCB_RX_DDP_BUF1_LEN 29
+#define S_TCB_RX_DDP_BUF1_LEN 10
+#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
+#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
+
+#define W_TCB_RX_DDP_BUF0_TAG 30
+#define S_TCB_RX_DDP_BUF0_TAG 0
+#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
+
+#define W_TCB_RX_DDP_BUF1_TAG 31
+#define S_TCB_RX_DDP_BUF1_TAG 0
+#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
+
+#define S_TF_DACK 10
+#define V_TF_DACK(x) ((x) << S_TF_DACK)
+
+#define S_TF_NAGLE 11
+#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+
+#define S_TF_RECV_SCALE 12
+#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
+
+#define S_TF_RECV_TSTMP 13
+#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
+
+#define S_TF_RECV_SACK 14
+#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
+
+#define S_TF_TURBO 15
+#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
+
+#define S_TF_KEEPALIVE 16
+#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+
+#define S_TF_TCAM_BYPASS 17
+#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
+
+#define S_TF_CORE_FIN 18
+#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
+
+#define S_TF_CORE_MORE 19
+#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
+
+#define S_TF_MIGRATING 20
+#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
+
+#define S_TF_ACTIVE_OPEN 21
+#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
+
+#define S_TF_ASK_MODE 22
+#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
+
+#define S_TF_NON_OFFLOAD 23
+#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+
+#define S_TF_MOD_SCHD 24
+#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
+
+#define S_TF_MOD_SCHD_REASON0 25
+#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
+
+#define S_TF_MOD_SCHD_REASON1 26
+#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
+
+#define S_TF_MOD_SCHD_RX 27
+#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
+
+#define S_TF_CORE_PUSH 28
+#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
+
+#define S_TF_RCV_COALESCE_ENABLE 29
+#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
+
+#define S_TF_RCV_COALESCE_PUSH 30
+#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
+
+#define S_TF_RCV_COALESCE_LAST_PSH 31
+#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
+
+#define S_TF_RCV_COALESCE_HEARTBEAT 32
+#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
+
+#define S_TF_LOCK_TID 33
+#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
+
+#define S_TF_DACK_MSS 34
+#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
+
+#define S_TF_CCTRL_SEL0 35
+#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
+
+#define S_TF_CCTRL_SEL1 36
+#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
+
+#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
+#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
+
+#define S_TF_TX_PACE_AUTO 38
+#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
+
+#define S_TF_PEER_FIN_HELD 39
+#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
+
+#define S_TF_CORE_URG 40
+#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
+
+#define S_TF_RDMA_ERROR 41
+#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
+
+#define S_TF_SSWS_DISABLED 42
+#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
+
+#define S_TF_DUPACK_COUNT_ODD 43
+#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
+
+#define S_TF_TX_CHANNEL 44
+#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
+
+#define S_TF_RX_CHANNEL 45
+#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
+
+#define S_TF_TX_PACE_FIXED 46
+#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
+
+#define S_TF_RDMA_FLM_ERROR 47
+#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
+
+#define S_TF_RX_FLOW_CONTROL_DISABLE 48
+#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+
+#define S_TF_DDP_INDICATE_OUT 15
+#define V_TF_DDP_INDICATE_OUT(x) ((x) << S_TF_DDP_INDICATE_OUT)
+
+#define S_TF_DDP_ACTIVE_BUF 16
+#define V_TF_DDP_ACTIVE_BUF(x) ((x) << S_TF_DDP_ACTIVE_BUF)
+
+#define S_TF_DDP_BUF0_VALID 17
+#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
+
+#define S_TF_DDP_BUF1_VALID 18
+#define V_TF_DDP_BUF1_VALID(x) ((x) << S_TF_DDP_BUF1_VALID)
+
+#define S_TF_DDP_BUF0_INDICATE 19
+#define V_TF_DDP_BUF0_INDICATE(x) ((x) << S_TF_DDP_BUF0_INDICATE)
+
+#define S_TF_DDP_BUF1_INDICATE 20
+#define V_TF_DDP_BUF1_INDICATE(x) ((x) << S_TF_DDP_BUF1_INDICATE)
+
+#define S_TF_DDP_PUSH_DISABLE_0 21
+#define V_TF_DDP_PUSH_DISABLE_0(x) ((x) << S_TF_DDP_PUSH_DISABLE_0)
+
+#define S_TF_DDP_PUSH_DISABLE_1 22
+#define V_TF_DDP_PUSH_DISABLE_1(x) ((x) << S_TF_DDP_PUSH_DISABLE_1)
+
+#define S_TF_DDP_OFF 23
+#define V_TF_DDP_OFF(x) ((x) << S_TF_DDP_OFF)
+
+#define S_TF_DDP_WAIT_FRAG 24
+#define V_TF_DDP_WAIT_FRAG(x) ((x) << S_TF_DDP_WAIT_FRAG)
+
+#define S_TF_DDP_BUF_INF 25
+#define V_TF_DDP_BUF_INF(x) ((x) << S_TF_DDP_BUF_INF)
+
+#define S_TF_DDP_RX2TX 26
+#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+
+#define S_TF_DDP_BUF0_FLUSH 27
+#define V_TF_DDP_BUF0_FLUSH(x) ((x) << S_TF_DDP_BUF0_FLUSH)
+
+#define S_TF_DDP_BUF1_FLUSH 28
+#define V_TF_DDP_BUF1_FLUSH(x) ((x) << S_TF_DDP_BUF1_FLUSH)
+
+#define S_TF_DDP_PSH_NO_INVALIDATE0 29
+#define V_TF_DDP_PSH_NO_INVALIDATE0(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE0)
+
+#define S_TF_DDP_PSH_NO_INVALIDATE1 30
+#define V_TF_DDP_PSH_NO_INVALIDATE1(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE1)
+
+#endif /* _TCB_DEFS_H */
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
+ *
+ * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+/*
+ * Routines to allocate and free T3 trace buffers.
+ *
+ * Authors:
+ * Felix Marti <felix@chelsio.com>
+ *
+ * The code suffers from a trace buffer count increment race, which might
+ * lead to entries being overwritten. I don't really care about this,
+ * because the trace buffer is a simple debug/perfomance tuning aid.
+ *
+ * Trace buffers are created in /proc, which needs to be fixed.
+ */
+
+#include "trace.h"
+
+#ifdef T3_TRACE
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "cxgb3_compat.h"
+
+/*
+ * SEQ OPS
+ */
+static void *t3_trace_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct trace_buf *tb = seq->private;
+ struct trace_entry *e = NULL;
+ unsigned int start, count;
+
+ if (tb->idx > tb->capacity) {
+ start = tb->idx & (tb->capacity - 1);
+ count = tb->capacity;
+ } else {
+ start = 0;
+ count = tb->idx;
+ }
+
+ if (*pos < count)
+ e = &tb->ep[(start + *pos) & (tb->capacity - 1)];
+
+ return e;
+}
+
+static void *t3_trace_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct trace_buf *tb = seq->private;
+ struct trace_entry *e = v;
+ unsigned int count = min(tb->idx, tb->capacity);
+
+ if (++*pos < count) {
+ e++;
+ if (e >= &tb->ep[tb->capacity])
+ e = tb->ep;
+ } else
+ e = NULL;
+
+ return e;
+}
+
+static void t3_trace_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int t3_trace_seq_show(struct seq_file *seq, void *v)
+{
+ struct trace_entry *ep = v;
+
+ seq_printf(seq, "%016llx ", (unsigned long long) ep->tsc);
+ seq_printf(seq, ep->fmt, ep->param[0], ep->param[1], ep->param[2],
+ ep->param[3], ep->param[4], ep->param[5]);
+ seq_printf(seq, "\n");
+
+ return 0;
+}
+
+static struct seq_operations t3_trace_seq_ops = {
+ .start = t3_trace_seq_start,
+ .next = t3_trace_seq_next,
+ .stop = t3_trace_seq_stop,
+ .show = t3_trace_seq_show
+};
+
+/*
+ * FILE OPS
+ */
+static int t3_trace_seq_open(struct inode *inode, struct file *file)
+{
+ int rc = seq_open(file, &t3_trace_seq_ops);
+
+ if (!rc) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+
+ return rc;
+}
+
+static struct file_operations t3_trace_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = t3_trace_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * TRACEBUFFER API
+ */
+struct trace_buf *trace_alloc(struct dentry *root, const char *name,
+ unsigned int capacity)
+{
+ struct trace_buf *tb;
+ unsigned int size;
+
+ if (!name || !capacity)
+ return NULL;
+ if (capacity & (capacity - 1)) /* require power of 2 */
+ return NULL;
+
+ size = sizeof(*tb) + sizeof(struct trace_entry) * capacity;
+ tb = kmalloc(size, GFP_KERNEL);
+ if (!tb)
+ return NULL;
+
+ memset(tb, 0, size);
+ tb->capacity = capacity;
+ tb->debugfs_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, root,
+ tb, &t3_trace_seq_fops);
+ if (!tb->debugfs_dentry) {
+ kfree(tb);
+ return NULL;
+ }
+
+ return tb;
+}
+
+void trace_free(struct trace_buf *tb)
+{
+ if (tb) {
+ if (tb->debugfs_dentry)
+ debugfs_remove(tb->debugfs_dentry);
+ kfree(tb);
+ }
+}
+#endif /* T3_TRACE */
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver for Linux.
+ *
+ * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+/*
+ * Definitions and inline functions for the T3 trace buffers.
+ *
+ * Authors:
+ * Felix Marti <felix@chelsio.com>
+ */
+
+#ifndef __T3_TRACE_H__
+#define __T3_TRACE_H__
+
+#ifdef T3_TRACE
+
+#include <linux/time.h>
+#include <linux/timex.h>
+
+#define T3_TRACE_NUM_PARAM 6
+
+typedef unsigned long tracearg_t;
+
+#define T3_TRACE0(b, s) \
+ if ((b) != NULL) \
+ (void) t3_trace((b), (s));
+#define T3_TRACE1(b, s, p0) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ }
+#define T3_TRACE2(b, s, p0, p1) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ _p[1] = (tracearg_t) (p1); \
+ }
+#define T3_TRACE3(b, s, p0, p1, p2) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ _p[1] = (tracearg_t) (p1); \
+ _p[2] = (tracearg_t) (p2); \
+ }
+#define T3_TRACE4(b, s, p0, p1, p2, p3) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ _p[1] = (tracearg_t) (p1); \
+ _p[2] = (tracearg_t) (p2); \
+ _p[3] = (tracearg_t) (p3); \
+ }
+#define T3_TRACE5(b, s, p0, p1, p2, p3, p4) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ _p[1] = (tracearg_t) (p1); \
+ _p[2] = (tracearg_t) (p2); \
+ _p[3] = (tracearg_t) (p3); \
+ _p[4] = (tracearg_t) (p4); \
+ }
+#define T3_TRACE6(b, s, p0, p1, p2, p3, p4, p5) \
+ if ((b) != NULL) { \
+ tracearg_t *_p = t3_trace((b), (s)); \
+ _p[0] = (tracearg_t) (p0); \
+ _p[1] = (tracearg_t) (p1); \
+ _p[2] = (tracearg_t) (p2); \
+ _p[3] = (tracearg_t) (p3); \
+ _p[4] = (tracearg_t) (p4); \
+ _p[5] = (tracearg_t) (p5); \
+ }
+
+struct trace_entry {
+ cycles_t tsc;
+ char *fmt;
+ tracearg_t param[T3_TRACE_NUM_PARAM];
+};
+
+struct dentry;
+
+struct trace_buf {
+ unsigned int capacity; /* size of ring buffer */
+ unsigned int idx; /* index of next entry to write */
+ struct dentry *debugfs_dentry;
+ struct trace_entry ep[0]; /* the ring buffer */
+};
+
+static inline unsigned long *t3_trace(struct trace_buf *tb, char *fmt)
+{
+ struct trace_entry *ep = &tb->ep[tb->idx++ & (tb->capacity - 1)];
+
+ ep->fmt = fmt;
+ ep->tsc = get_cycles();
+
+ return (unsigned long *) &ep->param[0];
+}
+
+struct trace_buf *trace_alloc(struct dentry *root, const char *name,
+ unsigned int capacity);
+void trace_free(struct trace_buf *tb);
+
+#else
+#define T3_TRACE0(b, s)
+#define T3_TRACE1(b, s, p0)
+#define T3_TRACE2(b, s, p0, p1)
+#define T3_TRACE3(b, s, p0, p1, p2)
+#define T3_TRACE4(b, s, p0, p1, p2, p3)
+#define T3_TRACE5(b, s, p0, p1, p2, p3, p4)
+#define T3_TRACE6(b, s, p0, p1, p2, p3, p4, p5)
+
+#define trace_alloc(root, name, capacity) NULL
+#define trace_free(tb)
+#endif
+
+#endif /* __T3_TRACE_H__ */
-/*
- * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
+/*****************************************************************************
+ * *
+ * File: *
+ * version.h *
+ * *
+ * Description: *
+ * Chelsio driver version defines. *
+ * *
+ * Copyright (c) 2003 - 2006 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * http://www.chelsio.com *
+ * *
+ ****************************************************************************/
+/* $Date: 2008/07/28 16:04:26 $ $RCSfile: version.h,v $ $Revision: 1.4.16.1 $ */
#ifndef __CHELSIO_VERSION_H
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
-/* Driver version */
-#define DRV_VERSION "1.0-ko"
-
-/* Firmware version */
-#define FW_VERSION_MAJOR 5
-#define FW_VERSION_MINOR 0
-#define FW_VERSION_MICRO 0
-#endif /* __CHELSIO_VERSION_H */
+// Driver version
+#define DRV_VERSION "1.0.146"
+#endif //__CHELSIO_VERSION_H
--- /dev/null
+/*
+ * This file is part of the Chelsio T3 Ethernet driver.
+ *
+ * Copyright (C) 2007-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include "common.h"
+
+enum {
+ ELMR_ADDR = 0,
+ ELMR_STAT = 1,
+ ELMR_DATA_LO = 2,
+ ELMR_DATA_HI = 3,
+
+ ELMR_THRES0 = 0xe000,
+ ELMR_BW = 0xe00c,
+ ELMR_FIFO_SZ = 0xe00d,
+ ELMR_STATS = 0xf000,
+
+ ELMR_MDIO_ADDR = 10
+};
+
+#define VSC_REG(block, subblock, reg) \
+ ((reg) | ((subblock) << 8) | ((block) << 12))
+
+int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n)
+{
+ int ret;
+ const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
+
+ ELMR_LOCK(adap);
+ ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
+ for ( ; !ret && n; n--, vals++) {
+ ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO,
+ *vals & 0xffff);
+ if (!ret)
+ ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
+ *vals >> 16);
+ }
+ ELMR_UNLOCK(adap);
+ return ret;
+}
+
+static int elmr_write(adapter_t *adap, int addr, u32 val)
+{
+ return t3_elmr_blk_write(adap, addr, &val, 1);
+}
+
+int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n)
+{
+ int i, ret;
+ unsigned int v;
+ const struct mdio_ops *mo = adapter_info(adap)->mdio_ops;
+
+ ELMR_LOCK(adap);
+
+ ret = mo->write(adap, ELMR_MDIO_ADDR, 0, ELMR_ADDR, start);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < 5; i++) {
+ ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_STAT, &v);
+ if (ret)
+ goto out;
+ if (v == 1)
+ break;
+ udelay(5);
+ }
+ if (v != 1) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ for ( ; !ret && n; n--, vals++) {
+ ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_LO, vals);
+ if (!ret) {
+ ret = mo->read(adap, ELMR_MDIO_ADDR, 0, ELMR_DATA_HI,
+ &v);
+ *vals |= v << 16;
+ }
+ }
+out: ELMR_UNLOCK(adap);
+ return ret;
+}
+
+int t3_vsc7323_init(adapter_t *adap, int nports)
+{
+ static struct addr_val_pair sys_avp[] = {
+ { VSC_REG(7, 15, 0xf), 2 },
+ { VSC_REG(7, 15, 0x19), 0xd6 },
+ { VSC_REG(7, 15, 7), 0xc },
+ { VSC_REG(7, 1, 0), 0x220 },
+ };
+ static struct addr_val_pair fifo_avp[] = {
+ { VSC_REG(2, 0, 0x2f), 0 },
+ { VSC_REG(2, 0, 0xf), 0xa0010291 },
+ { VSC_REG(2, 1, 0x2f), 1 },
+ { VSC_REG(2, 1, 0xf), 0xa026301 }
+ };
+ static struct addr_val_pair xg_avp[] = {
+ { VSC_REG(1, 10, 0), 0x600b },
+ { VSC_REG(1, 10, 1), 0x70600 }, //QUANTA = 96*1024*8/512
+ { VSC_REG(1, 10, 2), 0x2710 },
+ { VSC_REG(1, 10, 5), 0x65 },
+ { VSC_REG(1, 10, 7), 0x23 },
+ { VSC_REG(1, 10, 0x23), 0x800007bf },
+ { VSC_REG(1, 10, 0x23), 0x000007bf },
+ { VSC_REG(1, 10, 0x23), 0x800007bf },
+ { VSC_REG(1, 10, 0x24), 4 }
+ };
+
+ int i, ret, ing_step, egr_step, ing_bot, egr_bot;
+
+ for (i = 0; i < ARRAY_SIZE(sys_avp); i++)
+ if ((ret = t3_elmr_blk_write(adap, sys_avp[i].reg_addr,
+ &sys_avp[i].val, 1)))
+ return ret;
+
+ ing_step = 0xc0 / nports;
+ egr_step = 0x40 / nports;
+ ing_bot = egr_bot = 0;
+// ing_wm = ing_step * 64;
+// egr_wm = egr_step * 64;
+
+ /* {ING,EGR}_CONTROL.CLR = 1 here */
+ for (i = 0; i < nports; i++) {
+ if (
+ (ret = elmr_write(adap, VSC_REG(2, 0, 0x10 + i),
+ ((ing_bot + ing_step) << 16) | ing_bot)) ||
+ (ret = elmr_write(adap, VSC_REG(2, 0, 0x40 + i),
+ 0x6000bc0)) ||
+ (ret = elmr_write(adap, VSC_REG(2, 0, 0x50 + i), 1)) ||
+ (ret = elmr_write(adap, VSC_REG(2, 1, 0x10 + i),
+ ((egr_bot + egr_step) << 16) | egr_bot)) ||
+ (ret = elmr_write(adap, VSC_REG(2, 1, 0x40 + i),
+ 0x2000280)) ||
+ (ret = elmr_write(adap, VSC_REG(2, 1, 0x50 + i), 0)))
+ return ret;
+ ing_bot += ing_step;
+ egr_bot += egr_step;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fifo_avp); i++)
+ if ((ret = t3_elmr_blk_write(adap, fifo_avp[i].reg_addr,
+ &fifo_avp[i].val, 1)))
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(xg_avp); i++)
+ if ((ret = t3_elmr_blk_write(adap, xg_avp[i].reg_addr,
+ &xg_avp[i].val, 1)))
+ return ret;
+
+ for (i = 0; i < nports; i++)
+ if ((ret = elmr_write(adap, VSC_REG(1, i, 0), 0xa59c)) ||
+ (ret = elmr_write(adap, VSC_REG(1, i, 5),
+ (i << 12) | 0x63)) ||
+ (ret = elmr_write(adap, VSC_REG(1, i, 0xb), 0x96)) ||
+ (ret = elmr_write(adap, VSC_REG(1, i, 0x15), 0x21)) ||
+ (ret = elmr_write(adap, ELMR_THRES0 + i, 768)))
+ return ret;
+
+ if ((ret = elmr_write(adap, ELMR_BW, 7)))
+ return ret;
+
+ return ret;
+}
+
+int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port)
+{
+ int mode, clk, r;
+
+ if (speed >= 0) {
+ if (speed == SPEED_10)
+ mode = clk = 1;
+ else if (speed == SPEED_100)
+ mode = 1, clk = 2;
+ else if (speed == SPEED_1000)
+ mode = clk = 3;
+ else
+ return -EINVAL;
+
+ if ((r = elmr_write(adap, VSC_REG(1, port, 0),
+ 0xa590 | (mode << 2))) ||
+ (r = elmr_write(adap, VSC_REG(1, port, 0xb),
+ 0x91 | (clk << 1))) ||
+ (r = elmr_write(adap, VSC_REG(1, port, 0xb),
+ 0x90 | (clk << 1))) ||
+ (r = elmr_write(adap, VSC_REG(1, port, 0),
+ 0xa593 | (mode << 2))))
+ return r;
+ }
+
+ r = (fc & PAUSE_RX) ? 0x60200 : 0x20200; //QUANTA = 32*1024*8/512
+ if (fc & PAUSE_TX)
+ r |= (1 << 19);
+ return elmr_write(adap, VSC_REG(1, port, 1), r);
+}
+
+int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port)
+{
+ return elmr_write(adap, VSC_REG(1, port, 2), mtu);
+}
+
+int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port)
+{
+ int ret;
+
+ ret = elmr_write(adap, VSC_REG(1, port, 3),
+ (addr[0] << 16) | (addr[1] << 8) | addr[2]);
+ if (!ret)
+ ret = elmr_write(adap, VSC_REG(1, port, 4),
+ (addr[3] << 16) | (addr[4] << 8) | addr[5]);
+ return ret;
+}
+
+int t3_vsc7323_enable(adapter_t *adap, int port, int which)
+{
+ int ret;
+ unsigned int v, orig;
+
+ ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
+ if (!ret) {
+ orig = v;
+ if (which & MAC_DIRECTION_TX)
+ v |= 1;
+ if (which & MAC_DIRECTION_RX)
+ v |= 2;
+ if (v != orig)
+ ret = elmr_write(adap, VSC_REG(1, port, 0), v);
+ }
+ return ret;
+}
+
+int t3_vsc7323_disable(adapter_t *adap, int port, int which)
+{
+ int ret;
+ unsigned int v, orig;
+
+ ret = t3_elmr_blk_read(adap, VSC_REG(1, port, 0), &v, 1);
+ if (!ret) {
+ orig = v;
+ if (which & MAC_DIRECTION_TX)
+ v &= ~1;
+ if (which & MAC_DIRECTION_RX)
+ v &= ~2;
+ if (v != orig)
+ ret = elmr_write(adap, VSC_REG(1, port, 0), v);
+ }
+ return ret;
+}
+
+#define STATS0_START 1
+#define STATS1_START 0x24
+#define NSTATS0 (0x1d - STATS0_START + 1)
+#define NSTATS1 (0x2a - STATS1_START + 1)
+
+#define ELMR_STAT(port, reg) (ELMR_STATS + port * 0x40 + reg)
+
+const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac)
+{
+ int ret;
+ u64 rx_ucast, tx_ucast;
+ u32 stats0[NSTATS0], stats1[NSTATS1];
+
+ ret = t3_elmr_blk_read(mac->adapter,
+ ELMR_STAT(mac->ext_port, STATS0_START),
+ stats0, NSTATS0);
+ if (!ret)
+ ret = t3_elmr_blk_read(mac->adapter,
+ ELMR_STAT(mac->ext_port, STATS1_START),
+ stats1, NSTATS1);
+ if (ret)
+ goto out;
+
+ /*
+ * HW counts Rx/Tx unicast frames but we want all the frames.
+ */
+ rx_ucast = mac->stats.rx_frames - mac->stats.rx_mcast_frames -
+ mac->stats.rx_bcast_frames;
+ rx_ucast += (u64)(stats0[6 - STATS0_START] - (u32)rx_ucast);
+ tx_ucast = mac->stats.tx_frames - mac->stats.tx_mcast_frames -
+ mac->stats.tx_bcast_frames;
+ tx_ucast += (u64)(stats0[27 - STATS0_START] - (u32)tx_ucast);
+
+#define RMON_UPDATE(mac, name, hw_stat) \
+ mac->stats.name += (u64)((hw_stat) - (u32)(mac->stats.name))
+
+ RMON_UPDATE(mac, rx_octets, stats0[4 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames, stats0[6 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames, stats0[7 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames, stats0[8 - STATS0_START]);
+ RMON_UPDATE(mac, rx_mcast_frames, stats0[7 - STATS0_START]);
+ RMON_UPDATE(mac, rx_bcast_frames, stats0[8 - STATS0_START]);
+ RMON_UPDATE(mac, rx_fcs_errs, stats0[9 - STATS0_START]);
+ RMON_UPDATE(mac, rx_pause, stats0[2 - STATS0_START]);
+ RMON_UPDATE(mac, rx_jabber, stats0[16 - STATS0_START]);
+ RMON_UPDATE(mac, rx_short, stats0[11 - STATS0_START]);
+ RMON_UPDATE(mac, rx_symbol_errs, stats0[1 - STATS0_START]);
+ RMON_UPDATE(mac, rx_too_long, stats0[15 - STATS0_START]);
+
+ RMON_UPDATE(mac, rx_frames_64, stats0[17 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_65_127, stats0[18 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_128_255, stats0[19 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_256_511, stats0[20 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_512_1023, stats0[21 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_1024_1518, stats0[22 - STATS0_START]);
+ RMON_UPDATE(mac, rx_frames_1519_max, stats0[23 - STATS0_START]);
+
+ RMON_UPDATE(mac, tx_octets, stats0[26 - STATS0_START]);
+ RMON_UPDATE(mac, tx_frames, stats0[27 - STATS0_START]);
+ RMON_UPDATE(mac, tx_frames, stats0[28 - STATS0_START]);
+ RMON_UPDATE(mac, tx_frames, stats0[29 - STATS0_START]);
+ RMON_UPDATE(mac, tx_mcast_frames, stats0[28 - STATS0_START]);
+ RMON_UPDATE(mac, tx_bcast_frames, stats0[29 - STATS0_START]);
+ RMON_UPDATE(mac, tx_pause, stats0[25 - STATS0_START]);
+
+ RMON_UPDATE(mac, tx_underrun, 0);
+
+ RMON_UPDATE(mac, tx_frames_64, stats1[36 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_65_127, stats1[37 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_128_255, stats1[38 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_256_511, stats1[39 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_512_1023, stats1[40 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_1024_1518, stats1[41 - STATS1_START]);
+ RMON_UPDATE(mac, tx_frames_1519_max, stats1[42 - STATS1_START]);
+
+#undef RMON_UPDATE
+
+ mac->stats.rx_frames = rx_ucast + mac->stats.rx_mcast_frames +
+ mac->stats.rx_bcast_frames;
+ mac->stats.tx_frames = tx_ucast + mac->stats.tx_mcast_frames +
+ mac->stats.tx_bcast_frames;
+out: return &mac->stats;
+}
/*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2005-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include "common.h"
/* VSC8211 PHY specific registers. */
enum {
- VSC8211_INTR_ENABLE = 25,
- VSC8211_INTR_STATUS = 26,
+ VSC8211_SIGDET_CTRL = 19,
+ VSC8211_EXT_CTRL = 23,
+ VSC8211_INTR_ENABLE = 25,
+ VSC8211_INTR_STATUS = 26,
+ VSC8211_LED_CTRL = 27,
VSC8211_AUX_CTRL_STAT = 28,
+ VSC8211_EXT_PAGE_AXS = 31,
};
enum {
- VSC_INTR_RX_ERR = 1 << 0,
- VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
- VSC_INTR_CABLE = 1 << 2, /* cable impairment */
- VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
- VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
- VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
- VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
- VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
- VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
- VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
- VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
- VSC_INTR_LINK_CHG = 1 << 13, /* link change */
- VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+ VSC_INTR_RX_ERR = 1 << 0,
+ VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
+ VSC_INTR_CABLE = 1 << 2, /* cable impairment */
+ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
+ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
+ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
+ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
+ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
+ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
+ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+ VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
+ VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+ VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
+ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+enum {
+ VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */
+ VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */
};
#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+ VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
VSC_INTR_NEG_DONE)
#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
VSC_INTR_ENABLE)
}
static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
- int *speed, int *duplex, int *fc)
+ int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
return 0;
}
+static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
+ if (!err)
+ err = mdio_read(cphy, 0, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = mdio_read(cphy, 0, MII_BMSR, &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = mdio_read(cphy, 0, MII_LPA, &lpa);
+ if (!err)
+ err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
+ if (err)
+ return err;
+
+ if (adv & lpa & ADVERTISE_1000XFULL) {
+ dplx = DUPLEX_FULL;
+ sp = SPEED_1000;
+ } else if (adv & lpa & ADVERTISE_1000XHALF) {
+ dplx = DUPLEX_HALF;
+ sp = SPEED_1000;
+ }
+
+ if (fc && dplx == DUPLEX_FULL) {
+ if (lpa & adv & ADVERTISE_1000XPAUSE)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPAUSE) &&
+ (adv & lpa & ADVERTISE_1000XPSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
+ (adv & ADVERTISE_1000XPAUSE))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+/*
+ * Enable/disable auto MDI/MDI-X in forced link speed mode.
+ */
+static int vsc8211_set_automdi(struct cphy *phy, int enable)
+{
+ int err;
+
+ if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0x52b5)) != 0 ||
+ (err = mdio_write(phy, 0, 18, 0x12)) != 0 ||
+ (err = mdio_write(phy, 0, 17, enable ? 0x2803 : 0x3003)) != 0 ||
+ (err = mdio_write(phy, 0, 16, 0x87fa)) != 0 ||
+ (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0)
+ return err;
+ return 0;
+}
+
+int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+
+ err = t3_set_phy_speed_duplex(phy, speed, duplex);
+ if (!err)
+ err = vsc8211_set_automdi(phy, 1);
+ return err;
+}
+
static int vsc8211_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
return cphy_cause;
}
+#ifdef C99_NOT_SUPPORTED
static struct cphy_ops vsc8211_ops = {
- .reset = vsc8211_reset,
- .intr_enable = vsc8211_intr_enable,
- .intr_disable = vsc8211_intr_disable,
- .intr_clear = vsc8211_intr_clear,
- .intr_handler = vsc8211_intr_handler,
- .autoneg_enable = vsc8211_autoneg_enable,
- .autoneg_restart = vsc8211_autoneg_restart,
- .advertise = t3_phy_advertise,
- .set_speed_duplex = t3_set_phy_speed_duplex,
- .get_link_status = vsc8211_get_link_status,
- .power_down = vsc8211_power_down,
+ vsc8211_reset,
+ vsc8211_intr_enable,
+ vsc8211_intr_disable,
+ vsc8211_intr_clear,
+ vsc8211_intr_handler,
+ vsc8211_autoneg_enable,
+ vsc8211_autoneg_restart,
+ t3_phy_advertise,
+ NULL,
+ vsc8211_set_speed_duplex,
+ vsc8211_get_link_status,
+ vsc8211_power_down,
+};
+
+static struct cphy_ops vsc8211_fiber_ops = {
+ vsc8211_reset,
+ vsc8211_intr_enable,
+ vsc8211_intr_disable,
+ vsc8211_intr_clear,
+ vsc8211_intr_handler,
+ vsc8211_autoneg_enable,
+ vsc8211_autoneg_restart,
+ t3_phy_advertise_fiber,
+ NULL,
+ t3_set_phy_speed_duplex,
+ vsc8211_get_link_status_fiber,
+ vsc8211_power_down,
+};
+#else
+static struct cphy_ops vsc8211_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_speed_duplex = vsc8211_set_speed_duplex,
+ .get_link_status = vsc8211_get_link_status,
+ .power_down = vsc8211_power_down,
+};
+
+static struct cphy_ops vsc8211_fiber_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise_fiber,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status_fiber,
+ .power_down = vsc8211_power_down,
};
+#endif
-void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
- int phy_addr, const struct mdio_ops *mdio_ops)
+int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
+ int err;
+ unsigned int val;
+
+ cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
+ SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
+ msleep(20); /* PHY needs ~10ms to start responding to MDIO */
+
+ err = mdio_read(phy, 0, VSC8211_EXT_CTRL, &val);
+ if (err)
+ return err;
+ if (val & VSC_CTRL_MEDIA_MODE_HI) {
+ /* copper interface, just need to configure the LEDs */
+ return mdio_write(phy, 0, VSC8211_LED_CTRL, 0x100);
+ }
+
+ phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
+ phy->desc = "1000BASE-X";
+ phy->ops = &vsc8211_fiber_ops;
+
+ if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 1)) != 0 ||
+ (err = mdio_write(phy, 0, VSC8211_SIGDET_CTRL, 1)) != 0 ||
+ (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0 ||
+ (err = mdio_write(phy, 0, VSC8211_EXT_CTRL,
+ val | VSC_CTRL_CLAUSE37_VIEW)) != 0 ||
+ (err = vsc8211_reset(phy, 0)) != 0)
+ return err;
+
+ udelay(5); /* delay after reset before next SMI */
+ return 0;
}
/*
- * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
+ * This file is part of the Chelsio T3 Ethernet driver.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Copyright (C) 2005-2008 Chelsio Communications. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
*/
+
#include "common.h"
#include "regs.h"
static void xaui_serdes_reset(struct cmac *mac)
{
static const unsigned int clear[] = {
- F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
- F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+ F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+ F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
};
int i;
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
F_RESETPLL23 | F_RESETPLL01);
- t3_read_reg(adap, ctrl);
+ (void)t3_read_reg(adap, ctrl);
udelay(15);
for (i = 0; i < ARRAY_SIZE(clear); i++) {
}
}
+/**
+ * t3b_pcs_reset - reset the PCS on T3B+ adapters
+ * @mac: the XGMAC handle
+ *
+ * Reset the XGMAC PCS block on T3B+ adapters.
+ */
void t3b_pcs_reset(struct cmac *mac)
{
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_);
}
+/**
+ * t3_mac_reset - reset a MAC
+ * @mac: the MAC to reset
+ *
+ * Reset the given MAC.
+ */
int t3_mac_reset(struct cmac *mac)
{
- static const struct addr_val_pair mac_reset_avp[] = {
- {A_XGM_TX_CTRL, 0},
- {A_XGM_RX_CTRL, 0},
- {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
- F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
- {A_XGM_RX_HASH_LOW, 0},
- {A_XGM_RX_HASH_HIGH, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
- {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
- {A_XGM_STAT_CTRL, F_CLRSTATS}
+ static struct addr_val_pair mac_reset_avp[] = {
+ { A_XGM_TX_CTRL, 0 },
+ { A_XGM_RX_CTRL, 0 },
+ { A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
+ { A_XGM_RX_HASH_LOW, 0 },
+ { A_XGM_RX_HASH_HIGH, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
+ { A_XGM_STAT_CTRL, F_CLRSTATS }
};
u32 val;
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
- t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
xaui_serdes_reset(mac);
}
+
+ if (mac->multiport) {
+ t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
+ MAX_FRAME_SIZE - 4);
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0,
+ F_DISPREAMBLE);
+ t3_set_reg_field(adap, A_XGM_RX_CFG + oft, 0, F_COPYPREAMBLE |
+ F_ENNON802_3PREAMBLE);
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft,
+ V_TXFIFOTHRESH(M_TXFIFOTHRESH),
+ V_TXFIFOTHRESH(64));
+ t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+ t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+ }
+
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
- val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
- if (is_10G(adap))
+ val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
+ if (is_10G(adap) || mac->multiport)
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
- t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
return 0;
}
-static int t3b2_mac_reset(struct cmac *mac)
+int t3b2_mac_reset(struct cmac *mac)
{
- struct adapter *adap = mac->adapter;
- unsigned int oft = mac->offset;
u32 val;
+ adapter_t *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+ /* Stop egress traffic to xgm*/
if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
else
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
+ /* PCS in reset */
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
- t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
msleep(10);
return -1;
}
- t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
- t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
val = F_MAC_RESET_;
if (is_10G(adap))
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
- t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
t3_write_reg(adap, A_XGM_RX_CFG + oft,
- F_DISPAUSEFRAMES | F_EN1536BFRAMES |
- F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
+ F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
+ /* Resume egress traffic to xgm */
if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
else
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
-static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
+static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
{
u32 addr_lo, addr_hi;
unsigned int oft = mac->offset + idx * 8;
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
}
-/* Set one of the station's unicast MAC addresses. */
+/**
+ * t3_mac_set_address - set one of the station's unicast MAC addresses
+ * @mac: the MAC handle
+ * @idx: index of the exact address match filter to use
+ * @addr: the Ethernet address
+ *
+ * Set one of the station's unicast MAC addresses.
+ */
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
{
+ if (mac->multiport)
+ idx = mac->ext_port + idx * mac->adapter->params.nports;
if (idx >= mac->nucast)
return -EINVAL;
set_addr_filter(mac, idx, addr);
+ if (mac->multiport && idx < mac->adapter->params.nports)
+ t3_vsc7323_set_addr(mac->adapter, addr, idx);
return 0;
}
-/*
- * Specify the number of exact address filters that should be reserved for
- * unicast addresses. Caller should reload the unicast and multicast addresses
- * after calling this.
+/**
+ * t3_mac_set_num_ucast - set the number of unicast addresses needed
+ * @mac: the MAC handle
+ * @n: number of unicast addresses needed
+ *
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses. Caller should reload the unicast and multicast
+ * addresses after calling this.
*/
-int t3_mac_set_num_ucast(struct cmac *mac, int n)
+int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n)
{
if (n > EXACT_ADDR_FILTERS)
return -EINVAL;
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
- t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
static void enable_exact_filters(struct cmac *mac)
u32 v = t3_read_reg(mac->adapter, reg);
t3_write_reg(mac->adapter, reg, v);
}
- t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
}
/* Calculate the RX hash filter index of an Ethernet address */
-static int hash_hw_addr(const u8 * addr)
+static int hash_hw_addr(const u8 *addr)
{
int hash = 0, octet, bit, i = 0, c;
return hash;
}
+/**
+ * t3_mac_set_rx_mode - set the Rx mode and address filters
+ * @mac: the MAC to configure
+ * @rm: structure containing the Rx mode and MAC addresses needed
+ *
+ * Configures the MAC Rx mode (promiscuity, etc) and exact and hash
+ * address filters.
+ */
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
{
- u32 val, hash_lo, hash_hi;
- struct adapter *adap = mac->adapter;
+ u32 hash_lo, hash_hi;
+ adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
- val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
- if (rm->dev->flags & IFF_PROMISC)
- val |= F_COPYALLFRAMES;
- t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+ if (promisc_rx_mode(rm))
+ mac->promisc_map |= 1 << mac->ext_port;
+ else
+ mac->promisc_map &= ~(1 << mac->ext_port);
+ t3_set_reg_field(adap, A_XGM_RX_CFG + oft, F_COPYALLFRAMES,
+ mac->promisc_map ? F_COPYALLFRAMES : 0);
- if (rm->dev->flags & IFF_ALLMULTI)
+ if (allmulti_rx_mode(rm) || mac->multiport)
hash_lo = hash_hi = 0xffffffff;
else {
u8 *addr;
return min(hwm, MAC_RXFIFO_SIZE - 8192);
}
+/**
+ * t3_mac_set_mtu - set the MAC MTU
+ * @mac: the MAC to configure
+ * @mtu: the MTU
+ *
+ * Sets the MAC MTU and adjusts the FIFO PAUSE watermarks accordingly.
+ */
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
int hwm, lwm, divisor;
int ipg;
unsigned int thres, v, reg;
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
/*
* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
* packet size register includes header, but not FCS.
*/
mtu += 14;
+ if (mac->multiport)
+ mtu += 8; /* for preamble */
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
- t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
-
- /*
- * Adjust the PAUSE frame watermarks. We always set the LWM, and the
- * HWM only if flow-control is enabled.
- */
- hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
- MAC_RXFIFO_SIZE * 38 / 100);
- hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
- lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+ if (mac->multiport)
+ return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port);
if (adap->params.rev >= T3_REV_B2 &&
(t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
V_RXMAXPKTSIZE(mtu));
-
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
hwm = rx_fifo_hwm(mtu);
- lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+ lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4);
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
if (is_10G(adap))
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
- thres = max(thres, 8U); /* need at least 8 */
+ thres = max(thres, 8U); /* need at least 8 */
ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
+ /* Assuming a minimum drain rate of 2.5Gbps...
+ */
if (adap->params.rev > 0) {
divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
return 0;
}
+/**
+ * t3_mac_set_speed_duplex_fc - set MAC speed, duplex and flow control
+ * @mac: the MAC to configure
+ * @speed: the desired speed (10/100/1000/10000)
+ * @duplex: the desired duplex
+ * @fc: desired Tx/Rx PAUSE configuration
+ *
+ * Set the MAC speed, duplex (actually only full-duplex is supported), and
+ * flow control. If a parameter value is negative the corresponding
+ * MAC setting is left at its current value.
+ */
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
{
u32 val;
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -EINVAL;
+ if (mac->multiport) {
+ val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+ val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+ val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
+ A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+ F_TXPAUSEEN);
+ return t3_vsc7323_set_speed_fc(adap, speed, fc, mac->ext_port);
+ }
if (speed >= 0) {
if (speed == SPEED_10)
val = V_PORTSPEED(0);
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
if (fc & PAUSE_TX)
- val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(
- t3_read_reg(adap,
- A_XGM_RX_MAX_PKT_SIZE
- + oft)) / 8);
+ val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
+ A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
- (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+ (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
}
+/**
+ * t3_mac_enable - enable the MAC in the given directions
+ * @mac: the MAC to configure
+ * @which: bitmap indicating which directions to enable
+ *
+ * Enables the MAC for operation in the given directions.
+ * %MAC_DIRECTION_TX enables the Tx direction, and %MAC_DIRECTION_RX
+ * enables the Rx one.
+ */
int t3_mac_enable(struct cmac *mac, int which)
{
int idx = macidx(mac);
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
unsigned int oft = mac->offset;
struct mac_stats *s = &mac->stats;
+ if (mac->multiport)
+ return t3_vsc7323_enable(adap, mac->ext_port, which);
+
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
- t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
+ t3_write_reg(adap, A_TP_PIO_DATA,
+ adap->params.rev == T3_REV_C ?
+ 0xc4ffff01 : 0xc0ede401);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
- t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
+ adap->params.rev == T3_REV_C ?
+ 0 : 1 << idx);
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
mac->tx_mcnt = s->tx_frames;
mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
- A_TP_PIO_DATA)));
+ A_TP_PIO_DATA)));
mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
oft)));
return 0;
}
+/**
+ * t3_mac_disable - disable the MAC in the given directions
+ * @mac: the MAC to configure
+ * @which: bitmap indicating which directions to disable
+ *
+ * Disables the MAC in the given directions.
+ * %MAC_DIRECTION_TX disables the Tx direction, and %MAC_DIRECTION_RX
+ * disables the Rx one.
+ */
int t3_mac_disable(struct cmac *mac, int which)
{
- struct adapter *adap = mac->adapter;
+ adapter_t *adap = mac->adapter;
+
+ if (mac->multiport)
+ return t3_vsc7323_disable(adap, mac->ext_port, which);
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
int t3b2_mac_watchdog_task(struct cmac *mac)
{
- struct adapter *adap = mac->adapter;
- struct mac_stats *s = &mac->stats;
+ int status;
unsigned int tx_tcnt, tx_xcnt;
- unsigned int tx_mcnt = s->tx_frames;
- unsigned int rx_mcnt = s->rx_frames;
+ adapter_t *adap = mac->adapter;
+ struct mac_stats *s = &mac->stats;
+ unsigned int tx_mcnt = (unsigned int)s->tx_frames;
+ unsigned int rx_mcnt = (unsigned int)s->rx_frames;
unsigned int rx_xcnt;
- int status;
+ if (mac->multiport) {
+ tx_mcnt = t3_read_reg(adap, A_XGM_STAT_TX_FRAME_LOW);
+ rx_mcnt = t3_read_reg(adap, A_XGM_STAT_RX_FRAMES_LOW);
+ } else {
+ tx_mcnt = (unsigned int)s->tx_frames;
+ rx_mcnt = (unsigned int)s->rx_frames;
+ }
status = 0;
- tx_xcnt = 1; /* By default tx_xcnt is making progress */
- tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
- rx_xcnt = 1; /* By default rx_xcnt is making progress */
+ tx_xcnt = 1; /* By default tx_xcnt is making progress*/
+ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt*/
+ rx_xcnt = 1; /* By default rx_xcnt is making progress*/
if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
mac->offset)));
if (tx_xcnt == 0) {
t3_write_reg(adap, A_TP_PIO_ADDR,
- A_TP_TX_DROP_CNT_CH0 + macidx(mac));
+ A_TP_TX_DROP_CNT_CH0 + macidx(mac));
tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
- A_TP_PIO_DATA)));
+ A_TP_PIO_DATA)));
} else {
goto rxcheck;
}
rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
mac->offset))) +
- (s->rx_fifo_ovfl -
- mac->rx_ocnt);
+ (s->rx_fifo_ovfl - mac->rx_ocnt);
mac->rx_ocnt = s->rx_fifo_ovfl;
} else
goto out;
- if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 &&
- mac->rx_xcnt == 0) {
- status = 2;
+ if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) {
+ if (!mac->multiport)
+ status = 2;
goto out;
}
return status;
}
-/*
- * This function is called periodically to accumulate the current values of the
- * RMON counters into the port statistics. Since the packet counters are only
- * 32 bits they can overflow in ~286 secs at 10G, so the function should be
- * called more frequently than that. The byte counters are 45-bit wide, they
- * would overflow in ~7.8 hours.
+/**
+ * t3_mac_update_stats - accumulate MAC statistics
+ * @mac: the MAC handle
+ *
+ * This function is called periodically to accumulate the current values
+ * of the RMON counters into the port statistics. Since the packet
+ * counters are only 32 bits they can overflow in ~286 secs at 10G, so the
+ * function should be called more frequently than that. The byte counters
+ * are 45-bit wide, they would overflow in ~7.8 hours.
*/
const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
{
u32 v, lo;
+ if (mac->multiport)
+ return t3_vsc7323_update_stats(mac);
+
RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
v &= 0x7fffffff;
mac->stats.rx_too_long += v;
- RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
- RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
- RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
- RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
- RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
- RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
/* This counts error frames in general (bad FCS, underrun, etc). */
RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
- RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
- RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
- RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
- RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
- RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
- RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
/* The next stat isn't clear-on-read. */
t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
- lo = (u32) mac->stats.rx_cong_drops;
- mac->stats.rx_cong_drops += (u64) (v - lo);
+ lo = (u32)mac->stats.rx_cong_drops;
+ mac->stats.rx_cong_drops += (u64)(v - lo);
return &mac->stats;
}