]> xenbits.xensource.com Git - xenclient/kernel.git/commitdiff
imported patch bnx2i-1.3.5 sata_nv-disable-adma-by-dflt
authort_jeang <devnull@localhost>
Tue, 6 Jan 2009 12:06:00 +0000 (12:06 +0000)
committert_jeang <devnull@localhost>
Tue, 6 Jan 2009 12:06:00 +0000 (12:06 +0000)
14 files changed:
buildconfigs/conf.linux-utility/bnx2i [new file with mode: 0644]
buildconfigs/conf.linux/bnx2i [new file with mode: 0644]
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/bnx2i/57xx_iscsi_constants.h [new file with mode: 0644]
drivers/scsi/bnx2i/57xx_iscsi_hsi.h [new file with mode: 0644]
drivers/scsi/bnx2i/Kconfig [new file with mode: 0644]
drivers/scsi/bnx2i/Makefile [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i.h [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_hwi.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_init.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_ioctl.h [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_iscsi.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_sysfs.c [new file with mode: 0644]

diff --git a/buildconfigs/conf.linux-utility/bnx2i b/buildconfigs/conf.linux-utility/bnx2i
new file mode 100644 (file)
index 0000000..d400977
--- /dev/null
@@ -0,0 +1 @@
+# CONFIG_SCSI_BNX2_ISCSI is not set
diff --git a/buildconfigs/conf.linux/bnx2i b/buildconfigs/conf.linux/bnx2i
new file mode 100644 (file)
index 0000000..7c597f4
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_SCSI_BNX2_ISCSI=m
index 99db502ca479a8eb439f92507e38da0675905b63..e526b4e178e2ddb94a223fd5f9805203a7fc228d 100644 (file)
@@ -1739,6 +1739,8 @@ config ZFCP
           called zfcp. If you want to compile it as a module, say M here
           and read <file:Documentation/modules.txt>.
 
+source "drivers/scsi/bnx2i/Kconfig"
+
 endmenu
 
 source "drivers/scsi/pcmcia/Kconfig"
index bcca39c3bcbf33096bb3fdc5c9a493996c2c9492..c9f4b786a3e7ec5d4b43ad66e84c37f0e8ee797e 100644 (file)
@@ -128,6 +128,7 @@ obj-$(CONFIG_SCSI_IPR)              += ipr.o
 obj-$(CONFIG_SCSI_IBMVSCSI)    += ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)      += hptiop.o
 obj-$(CONFIG_SCSI_STEX)                += stex.o
+obj-$(CONFIG_SCSI_BNX2_ISCSI)  += bnx2i/
 
 obj-$(CONFIG_ARM)              += arm/
 
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644 (file)
index 0000000..e143a32
--- /dev/null
@@ -0,0 +1,153 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST    (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE          (0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION    (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ    (0)
+#define ISCSI_TASK_TYPE_WRITE   (1)
+#define ISCSI_TASK_TYPE_MPATH   (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN    (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE   (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN   (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN  (3)
+#define ISCSI_KWQE_OPCODE_INIT1         (4)
+#define ISCSI_KWQE_OPCODE_INIT2         (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN  (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN   (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN  (0x13)
+#define ISCSI_KCQE_OPCODE_INIT          (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK        (0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET     (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN       (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN       (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR     (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR   (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS                            (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE                     (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE                  (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE                   (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR                          (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR                        (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR                       (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE     (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE                (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN               (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT                   (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN                (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN            (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T              (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO  (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG  (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0                 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1                 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2                 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3                 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4                 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5                 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6                 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN        (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN       (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO            (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV          (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN                (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN      (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF            (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN                   (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN                 (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED       (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV           (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN         (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN        (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN         (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN         (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP   (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT               (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS                (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG               (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS                (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED                (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED              (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE    (16)
+#define ISCSI_RQ_DB_SIZE    (16)
+#define ISCSI_CQ_DB_SIZE    (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID                                  0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256     (0)
+#define ISCSI_PAGE_SIZE_512     (1)
+#define ISCSI_PAGE_SIZE_1K      (2)
+#define ISCSI_PAGE_SIZE_2K      (3)
+#define ISCSI_PAGE_SIZE_4K      (4)
+#define ISCSI_PAGE_SIZE_8K      (5)
+#define ISCSI_PAGE_SIZE_16K     (6)
+#define ISCSI_PAGE_SIZE_32K     (7)
+#define ISCSI_PAGE_SIZE_64K     (8)
+#define ISCSI_PAGE_SIZE_128K    (9)
+#define ISCSI_PAGE_SIZE_256K    (10)
+#define ISCSI_PAGE_SIZE_512K    (11)
+#define ISCSI_PAGE_SIZE_1M      (12)
+#define ISCSI_PAGE_SIZE_2M      (13)
+#define ISCSI_PAGE_SIZE_4M      (14)
+#define ISCSI_PAGE_SIZE_8M      (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE   (48)
+#define ISCSI_DIGEST_SHIFT  (2)
+#define ISCSI_DIGEST_SIZE   (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE   3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644 (file)
index 0000000..eea4830
--- /dev/null
@@ -0,0 +1,1524 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__ 
+#define __57XX_ISCSI_HSI_LINUX_LE__ 
+
+/*
+ * iSCSI Async CQE
+ */
+struct iscsi_async_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u8 async_event;
+       u8 async_vcode;
+       u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+       u16 param1;
+       u8 async_vcode;
+       u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 param2;
+       u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 param3;
+       u16 param2;
+#endif
+       u32 reserved7[3];
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+       u32 buffer_addr_hi;
+       u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+       u16 reserved0;
+       u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+       u16 buffer_length;
+       u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+       u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+       u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct iscsi_cleanup_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+       u16 reserved3;
+#endif
+       u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct iscsi_cleanup_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 status;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 status;
+       u8 op_code;
+#endif
+       u32 reserved1[3];
+       u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+       u16 reserved6;
+       u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+       u16 reserved6;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 ud_buffer_offset;
+       u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sd_buffer_offset;
+       u16 ud_buffer_offset;
+#endif
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+       u32 total_data_transfer_length;
+       u32 cmd_sn;
+       u32 reserved3;
+       u32 cdb[4];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 sd_start_bd_index;
+       u8 ud_start_bd_index;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 ud_start_bd_index;
+       u8 sd_start_bd_index;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct iscsi_write_resp_task_stat {
+       u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct iscsi_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+       u16 num_data_outs;
+       u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+       u16 num_r2ts;
+       u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union iscsi_cmd_resp_task_stat {
+       struct iscsi_write_resp_task_stat write_stat;
+       struct iscsi_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct iscsi_cmd_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+       u8 response;
+       u8 status;
+#elif defined(__LITTLE_ENDIAN)
+       u8 status;
+       u8 response;
+       u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved2;
+       u32 residual_count;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[5];
+       union iscsi_cmd_resp_task_stat task_stat;
+       u32 reserved6;
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct iscsi_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+       u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+       u16 hdr_opaque1;
+       u8 op_attr;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved0;
+       u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+       u16 reserved0;
+#endif
+       u32 hdr_opaque3[4];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 reserved3;
+       u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+       u8 reserved3;
+       u16 reserved4;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u8 reserved5;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct iscsi_fw_response {
+       u32 hdr_dword1[2];
+       u32 hdr_exp_cmd_sn;
+       u32 hdr_max_cmd_sn;
+       u32 hdr_ttt;
+       u32 hdr_res_cnt;
+       u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+       u32 stat_sn;
+       u32 hdr_dword2[2];
+       u32 hdr_dword3[2];
+       u32 task_stat;
+       u32 reserved0;
+       u32 hdr_itt;
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+       u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+       u32 iscsi_conn_id;
+       u32 completion_status;
+       u32 iscsi_conn_context_id;
+       union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+       u8 op_code;
+       u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+       u16 qe_self_seq;
+       u8 op_code;
+       u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+       u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+       u8 op_code;
+       u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u8 reserved0;
+       u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_cqs;
+       u8 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 dummy_buffer_addr_lo;
+       u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u16 num_ccells_per_conn;
+       u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+       u16 num_tasks_per_conn;
+       u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 sq_wqes_per_page;
+       u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sq_num_wqes;
+       u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 cq_log_wqes_per_page;
+       u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+       u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 cq_num_wqes;
+       u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+       u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cq_num_pages;
+       u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sq_num_pages;
+       u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 rq_buffer_size;
+       u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 rq_num_wqes;
+       u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+       u16 max_cq_sqn;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 error_bit_map[2];
+       u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+       u16 iscsi_conn_id;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 sq_page_table_addr_lo;
+       u32 sq_page_table_addr_hi;
+       u32 cq_page_table_addr_lo;
+       u32 cq_page_table_addr_hi;
+       u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+       u32 hi;
+       u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 rq_page_table_addr_lo;
+       u32 rq_page_table_addr_hi;
+       struct iscsi_pte sq_first_pte;
+       struct iscsi_pte cq_first_pte;
+       u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 reserved1;
+       struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 session_error_recovery_level;
+       u8 max_outstanding_r2ts;
+       u8 reserved2;
+       u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+       u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+       u8 reserved2;
+       u8 max_outstanding_r2ts;
+       u8 session_error_recovery_level;
+#endif
+       u32 context_id;
+       u32 max_send_pdu_length;
+       u32 max_recv_pdu_length;
+       u32 first_burst_length;
+       u32 max_burst_length;
+       u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 context_id;
+       u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+       struct iscsi_kwqe_init1 init1;
+       struct iscsi_kwqe_init2 init2;
+       struct iscsi_kwqe_conn_offload1 conn_offload1;
+       struct iscsi_kwqe_conn_offload2 conn_offload2;
+       struct iscsi_kwqe_conn_update conn_update;
+       struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+
+
+
+
+
+
+
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct iscsi_login_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+       u8 version_max;
+       u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+       u8 version_min;
+       u8 version_max;
+       u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+       u16 isid_hi;
+       u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+       u16 tsih;
+       u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cid;
+       u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved3;
+       u16 cid;
+#endif
+       u32 cmd_sn;
+       u32 exp_stat_sn;
+       u32 reserved4;
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved8;
+       u8 reserved7;
+       u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+       u8 reserved7;
+       u16 reserved8;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved10;
+       u8 reserved9;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved9;
+       u8 reserved10;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct iscsi_login_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+       u8 version_max;
+       u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+       u8 version_active;
+       u8 version_max;
+       u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u8 err_code;
+       u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved2;
+       u8 err_code;
+       u16 reserved3;
+#endif
+       u32 stat_sn;
+       u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+       u16 isid_hi;
+       u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+       u16 tsih;
+       u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 status_class;
+       u8 status_detail;
+       u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved4;
+       u8 status_detail;
+       u8 status_class;
+#endif
+       u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved6;
+       u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+       u16 reserved6;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct iscsi_logout_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cid;
+       u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved3;
+       u16 cid;
+#endif
+       u32 cmd_sn;
+       u32 reserved4[5];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u8 reserved5;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct iscsi_logout_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 response;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 response;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+       u16 time_to_wait;
+       u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+       u16 time_to_retain;
+       u16 time_to_wait;
+#endif
+       u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved8;
+       u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+       u16 reserved8;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct iscsi_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 ttt;
+       u32 reserved2;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5;
+       u32 lun[2];
+       u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct iscsi_nop_out_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+       u32 ttt;
+       u32 cmd_sn;
+       u32 reserved3[2];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u8 reserved6;
+       u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+       u8 reserved6;
+       u16 reserved7;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved9;
+       u8 reserved8;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved8;
+       u8 reserved9;
+       u8 cq_index;
+#endif
+};
+
+
+
+
+/*
+ * iSCSI Reject CQE
+ */
+struct iscsi_reject_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 reason;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 reason;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[8];
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI TMF SQ WQE
+ */
+struct iscsi_tmf_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved1;
+       u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+       u16 reserved1;
+#endif
+       u32 ref_itt;
+       u32 cmd_sn;
+       u32 reserved2;
+       u32 ref_cmd_sn;
+       u32 reserved3[3];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved5;
+       u8 reserved4;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved4;
+       u8 reserved5;
+       u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct iscsi_text_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+       u16 reserved3;
+#endif
+       u32 ttt;
+       u32 cmd_sn;
+       u32 reserved4[2];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved7;
+       u8 reserved6;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved6;
+       u8 reserved7;
+       u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+       struct iscsi_cmd_request cmd;
+       struct iscsi_tmf_request tmf;
+       struct iscsi_nop_out_request nop_out;
+       struct iscsi_login_request login_req;
+       struct iscsi_text_request text;
+       struct iscsi_logout_request logout_req;
+       struct iscsi_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct iscsi_tmf_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 response;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 response;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct iscsi_text_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 ttt;
+       u32 reserved2;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5;
+       u32 lun[2];
+       u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+       struct iscsi_cmd_response cmd;
+       struct iscsi_tmf_response tmf;
+       struct iscsi_login_response login_resp;
+       struct iscsi_text_response text;
+       struct iscsi_logout_response logout_resp;
+       struct iscsi_cleanup_response cleanup;
+       struct iscsi_reject_msg reject;
+       struct iscsi_async_msg async;
+       struct iscsi_nop_in_msg nop_in;
+};
+
+
+
+
+
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644 (file)
index 0000000..7690eb8
--- /dev/null
@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+       tristate "Broadcom NetXtreme II iSCSI support"
+       select CNIC
+       select SCSI_ISCSI_ATTRS
+       ---help---
+       This driver supports iSCSI offload for the Broadcom NetXtreme II
+       devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644 (file)
index 0000000..8d0caa4
--- /dev/null
@@ -0,0 +1,6 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
+
+EXTRA_CFLAGS += -ffast-math -mhard-float -I$(srctree)/drivers/net -D_SYSFS_INCL_
+EXTRA_CFLAGS += -D_CREATE_SESS_NEW_
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644 (file)
index 0000000..49c884b
--- /dev/null
@@ -0,0 +1,1283 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/version.h>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <net/tcp.h>
+#include <linux/if_vlan.h>
+
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <asm/semaphore.h>
+#include <linux/bitops.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/iscsi_proto.h>
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+#include <asm/compat.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#include "../../net/cnic_if.h"
+#else
+#include <cnic_if.h>
+#endif
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+#include "bnx2i_ioctl.h"
+
+#define BNX2_ISCSI_DRIVER_NAME                 "bnx2i"
+
+#ifndef PCI_DEVICE_ID_NX2_5709
+#define PCI_DEVICE_ID_NX2_5709                 0x1639
+#endif
+
+#ifndef PCI_DEVICE_ID_NX2_5709S
+#define PCI_DEVICE_ID_NX2_5709S                        0x163a
+#endif
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710                0x164e
+#endif
+
+#define ISCSI_MAX_CONNS_PER_HBA                        128
+#define ISCSI_MAX_SESS_PER_HBA                 ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS                        128
+
+#define ISCSI_MAX_BDS_PER_CMD                  32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL         16
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS     4
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH                          65535
+#define BD_SPLIT_SIZE                          32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN                      16
+#define BNX2I_SQ_WQES_MAX                      512
+#define BNX2I_570x_SQ_WQES_DEFAULT             128
+#define BNX2I_5770x_SQ_WQES_DEFAULT            256
+
+#define BNX2I_CQ_WQES_MIN                      16
+#define BNX2I_CQ_WQES_MAX                      256
+#define BNX2I_CQ_WQES_DEFAULT                  128
+
+#define BNX2I_RQ_WQES_MIN                      16
+#define BNX2I_RQ_WQES_MAX                      32
+#define BNX2I_RQ_WQES_DEFAULT                  16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN                       16
+#define BNX2I_CCELLS_MAX                       96
+#define BNX2I_CCELLS_DEFAULT                   64
+
+#define ISCSI_CONN_LOGIN_BUF_SIZE              16384
+#define ITT_INVALID_SIGNATURE                  0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT              100
+
+#define BNX2I_CONN_CTX_BUF_SIZE                        16384
+
+#define BNX2I_SQ_WQE_SIZE                      64
+#define BNX2I_RQ_WQE_SIZE                      256
+#define BNX2I_CQE_SIZE                         64
+
+#define MB_KERNEL_CTX_SHIFT                    8
+#define MB_KERNEL_CTX_SIZE                     (1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT                              7
+#define GET_CID_NUM(cid_addr)                  ((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET                             0x10000
+#define MAX_CID_CNT                            0x4000
+
+#define BNX2_TXP_SCRATCH                       0x00060000
+#define BNX2_TPAT_SCRATCH                      0x000a0000
+#define BNX2_RXP_SCRATCH                       0x000e0000
+#define BNX2_COM_SCRATCH                       0x00120000
+#define BNX2_CP_SCRATCH                                0x001a0000
+
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS         0x00000078
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL     (0xfffffL<<2)
+#define BNX2_PCICFG_REG_WINDOW                 0x00000080
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2                                0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ                        (0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5             (0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR                 2
+#define BNX2X_MAX_CQS                          8
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK                                 ((u64) 0xffffffffffffffffULL)
+#define DMA_32BIT_MASK                                 ((u64) 0x00000000ffffffffULL)
+#endif
+
+#ifndef DMA_40BIT_MASK
+#define DMA_40BIT_MASK                                 ((u64) 0x000000ffffffffffULL)
+#endif
+
+#define CNIC_ARM_CQE                   1
+#define CNIC_DISARM_CQE                        0
+
+#define REG_RD(__hba, offset)                          \
+               readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val)                     \
+               writel(val, __hba->regview + offset)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+#define scsi_sg_count(cmd) ((cmd)->use_sg)
+#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
+#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+
+#ifdef _DEFINE_SCSI_SET_RESID
+static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+{
+        cmd->resid = resid;
+}
+#endif
+
+#ifdef _DEFINE_SCSI_GET_RESID
+static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+{
+        return cmd->resid;
+}
+#endif
+
+#define scsi_for_each_sg(cmd, sg, nseg, __i)                    \
+        for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++)
+
+#endif
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @pdu_hdr:            buffer to copy iscsi header prepared by 'iscsid'
+ * @cmd:                iSCSI command pointer
+ * @login_itt:          iSCSI ITT to be used with login exchanges
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         BD table to indicate login request payload buffer details
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        BD table to indicate login response payload buffer details
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *     Logout and NOP
+ */
+struct generic_pdu_resc {
+       struct iscsi_hdr pdu_hdr;
+       struct bnx2i_cmd *cmd;
+       u32 login_itt;
+       char *req_buf;
+       dma_addr_t req_dma_addr;
+       u32 req_buf_size;
+       char *req_wr_ptr;
+       struct iscsi_hdr resp_hdr;
+       char *resp_buf;
+       dma_addr_t resp_dma_addr;
+       u32 resp_buf_size;
+       char *resp_wr_ptr;
+       char *req_bd_tbl;
+       dma_addr_t req_bd_dma;
+       char *resp_bd_tbl;
+       dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link:               list head to link elements
+ * @max_ptrs:           maximun pointers that can be stored in this page
+ * @num_valid:          number of pointer valid in this page
+ * @page:               base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+       struct list_head link;
+       u32 max_ptrs;
+       u32 num_valid;
+       void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @link:               list head to link elements
+ * @bd_tbl:             BD table's virtual address
+ * @bd_tbl_dma:         BD table's dma address
+ * @cmdp:               command structure this BD is allocated
+ * @max_bd_cnt:         max BD entries in this table
+ * @bd_valid:           num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+       struct list_head link;
+       struct iscsi_bd *bd_tbl;
+       dma_addr_t bd_tbl_dma;
+       struct bnx2i_cmd *cmdp;
+       u16 max_bd_cnt;
+       u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @link:               list head to link elements
+ * @iscsi_opcode:       iscsi command opcode, NOPIN, LOGIN, SCSICMD, etc'
+ * @cmd_state:          command state tracking flag
+ * @scsi_status_rcvd:   flag determines whether SCSI response is received
+ *                      for this task or not
+ * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
+ * @tmf_ref_itt:        reference ITT of the command being aborted
+ * @tmf_ref_cmd:        pointer of the command being aborted by this command
+ * @tmf_ref_sc:         SCSI-ML's task pointer of aborted command
+ * @sg:                 SG list
+ * @bd_tbl:             buffer descriptor (BD) table
+ * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+       struct list_head link;
+       u8 iscsi_opcode;
+       u16 rsvd1[3];
+
+       u32 cmd_state;
+               #define ISCSI_CMD_STATE_INITIATED               0x001
+               #define ISCSI_CMD_STATE_ABORT_PEND              0x002
+               #define ISCSI_CMD_STATE_ABORT_COMPL             0x004
+               #define ISCSI_CMD_STATE_CLEANUP_PEND            0x008
+               #define ISCSI_CMD_STATE_CLEANUP_CMPL            0x010
+               #define ISCSI_CMD_STATE_FAILED                  0x100
+               #define ISCSI_CMD_STATE_TMF_TIMEOUT             0x200
+               #define ISCSI_CMD_STATE_COMPLETED               0x800
+       int scsi_status_rcvd;
+
+       struct bnx2i_conn *conn;
+       struct scsi_cmnd *scsi_cmd;
+       u32 tmf_ref_itt;
+       struct bnx2i_cmd *tmf_ref_cmd;
+       struct scsi_cmnd *tmf_ref_sc;
+
+       struct scatterlist *sg;
+       struct io_bdt *bd_tbl;
+       dma_addr_t bd_tbl_dma;
+       u32 reserved0;
+
+       struct iscsi_cmd_request req;
+};
+
+
+/*
+ * TCP port manager
+ */
+struct tcp_port_mngt {
+       int num_required;
+       u32 port_tbl_size;
+       u32 num_free_ports;
+       u32 prod_idx;
+       u32 cons_idx;
+       u32 max_idx;
+       u16 *free_q;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @link:                  list head to link elements
+ * @sess:                  iscsi session pointer
+ * @cls_conn:              pointer to iscsi cls conn
+ * @state:                 flag to trace command state
+ * @stop_state:            stop state request by open-iscsi
+ * @stage:                 iscsi login state
+ * @in_shutdown:           flags to indicate connection is in shutdown mode
+ * @lead_conn:             lead iscsi connection of session
+ * @conn_cid:              iscsi cid per rfc
+ * @exp_statsn:            iscsi expected statsn
+ * @header_digest_en:      header digest parameter
+ * @data_digest_en:        data digest parameter
+ * @max_data_seg_len_xmit: iscsi initiator's mrdsl
+ * @max_data_seg_len_recv: iscsi target's mrdsl
+ * @ifmarker_enable:       ifmarker parameter
+ * @ofmarker_enable:       ofmarker parameter
+ * @persist_port:          iscsi target side TCP port number
+ * @persist_address:       iscsi target's IP address
+ * @iscsi_conn_cid:        iscsi conn id
+ * @fw_cid:                firmware iscsi context id
+ * @lock:                  lock to synchronize access
+ * @ep:                    endpoint structure pointer
+ * @gen_pdu:               login/nopout/logout pdu resources
+ * @nopout_num_scsi_cmds:  scsi cmds issue counter to detect idle link
+ * @total_data_octets_sent:conn stats - data bytes sent on this conn
+ * @total_data_octets_rcvd:conn stats - data bytes received on this conn
+ * @num_login_req_pdus:    conn stats - num login pdus sent
+ * @num_login_resp_pdus:   conn stats - num login pdus received
+ * @num_scsi_cmd_pdus:     conn stats - num scsicmd pdus sent
+ * @num_scsi_resp_pdus:    conn stats - num scsicmd pdus received
+ * @num_nopout_pdus:       conn stats - num nopout pdus sent
+ * @num_nopin_pdus         conn stats - num nopout pdus received:
+ * @num_reject_pdus:       conn stats - num reject pdus received
+ * @num_async_pdus:        conn stats - num async pdus received
+ * @num_dataout_pdus:      conn stats - num dout pdus sent
+ * @num_r2t_pdus:          conn stats - num r2t pdus received
+ * @num_datain_pdus:       conn stats - num din pdus received
+ * @num_snack_pdus:        conn stats - num snack pdus received
+ * @num_text_req_pdus:     conn stats - num text pdus sent
+ * @num_text_resp_pdus:    conn stats - num text pdus received
+ * @num_tmf_req_pdus:      conn stats - num tmf pdus sent
+ * @num_tmf_resp_pdus:     conn stats - num tmf pdus received
+ * @num_logout_req_pdus:   conn stats - num logout pdus sent
+ * @num_logout_resp_pdus:  conn stats - num logout pdus received
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+       struct list_head link;
+       struct bnx2i_sess *sess;
+       struct iscsi_cls_conn *cls_conn;
+
+       u32 state;
+               #define CONN_STATE_IDLE                         0x00
+               #define CONN_STATE_XPORT_READY                  0x01
+               #define CONN_STATE_IN_LOGIN                     0x02
+               #define CONN_STATE_FFP_STATE                    0x04
+               #define CONN_STATE_IN_LOGOUT                    0x08
+               #define CONN_STATE_IN_CLEANUP                   0x10
+               #define CONN_STATE_XPORT_FREEZE                 0x20
+               #define CONN_STATE_STOPPED                      0x80
+       u32 stop_state;
+       u32 stage;
+       u32 in_shutdown;
+
+       u32 lead_conn;
+       u32 conn_cid;
+
+       /*
+        * Following are iSCSI sequencing & operational parameters
+        */
+       u32 exp_statsn;
+               #define STATSN_UPDATE_SIGNATURE         0xFABCAFE
+       u32 header_digest_en;
+       u32 data_digest_en;
+       u32 max_data_seg_len_xmit;      /* Target */
+       u32 max_data_seg_len_recv;      /* Initiator */
+       int ifmarker_enable;
+       int ofmarker_enable;
+       int persist_port;
+       char *persist_address;
+
+       u32 iscsi_conn_cid;
+               #define BNX2I_CID_RESERVED      0x5AFF
+       u32 fw_cid;
+
+       spinlock_t lock; /*protects conn structure elements */
+
+       /*
+        * Queue Pair (QP) related structure elements.
+        */
+       struct bnx2i_endpoint *ep;
+
+       /*
+        * Buffer for login negotiation process
+        */
+       struct generic_pdu_resc gen_pdu;
+
+       u32 nopout_num_scsi_cmds;
+       /*
+        * Connection Statistics
+        */
+       u64 total_data_octets_sent;
+       u64 total_data_octets_rcvd;
+       u32 num_login_req_pdus;
+       u32 num_login_resp_pdus;
+       u32 num_scsi_cmd_pdus;
+       u32 num_scsi_resp_pdus;
+       u32 num_nopout_pdus;
+       u32 num_nopin_pdus;
+       u32 num_reject_pdus;
+       u32 num_async_pdus;
+       u32 num_dataout_pdus;
+       u32 num_r2t_pdus;
+       u32 num_datain_pdus;
+       u32 num_snack_pdus;
+       u32 num_text_req_pdus;
+       u32 num_text_resp_pdus;
+       u32 num_tmf_req_pdus;
+       u32 num_tmf_resp_pdus;
+       u32 num_logout_req_pdus;
+       u32 num_logout_resp_pdus;
+};
+
+
+
+
+/**
+ * struct itt_queue - free ITT queue management structure
+ *
+ * @itt_que_base:           queue base memory
+ * @itt_que:                queue memory pointer
+ * @itt_q_prod_idx:         produce index
+ * @itt_q_cons_idx:         consumer index
+ * @itt_q_max_idx:          max index. used to detect wrap around condition
+ * @itt_q_count:            queue size
+ * @itt_cmd:                ITT to command structure mapping table
+ */
+struct itt_queue {
+       void *itt_que_base;
+       u32 *itt_que;
+       u32 itt_q_prod_idx;
+       u32 itt_q_cons_idx;
+       u32 itt_q_max_idx;
+       u32 itt_q_count;
+       struct bnx2i_cmd **itt_cmd;
+               #define get_cmnd(sess, itt)     sess->itt_q.itt_cmd[itt]
+};
+
+
+/**
+ * struct bnx2i_sess - iscsi session structure
+ *
+ * @link:                  list head to link elements
+ * @hba:                   adapter structure pointer
+ * @host:                  scsi host pointer
+ * @state:                 flag to track session state
+ * @recovery_state:        recovery state identifier
+ * @old_recovery_state:    old recovery state identifier
+ * @tmf_active:            TMF is active on this session
+ * @lock:                  session lock to synchronize access
+ * @abort_timer:           TMF timer
+ * @er_wait:               wait queue for recovery process
+ * @cmd_pages:             table to track pages allocated for cmd struct
+ * @pend_cmds:             pend command list
+ * @num_pend_cmds:         number of pend command
+ * @free_cmds:             free command list
+ * @num_free_cmds:         num free commands
+ * @allocated_cmds:        total number of allocated commands
+ * @sq_size:               SQ size
+ * @itt_q:                 ITT queue
+ * @bd_resc_page:          table to track BD resource page memory
+ * @bd_tbl_list:           BD table list
+ * @bd_tbl_active:         active BD table list
+ * @active_cmds:           active command list
+ * @num_active_cmds:       num active commands
+ * @cmdsn:                 iscsi command sequence number
+ * @exp_cmdsn:             iscsi expected command sequence number
+ * @max_cmdsn:             iscsi max command sequence number
+ * @initial_r2t:           intial R2T is enabled/disable
+ * @max_r2t:               maximun outstanding T2T 
+ * @imm_data:              indicates if immediate data is enabled
+ * @first_burst_len:       negotiated first burst length
+ * @max_burst_len:         negotiated max burst length
+ * @time2wait:             time 2 wait value
+ * @time2retain:           time 2 retain value
+ * @pdu_inorder:           indicated if PDU order needs to be maintained
+ * @dataseq_inorder:       indicated if data sequence order needs to be
+ *                         maintained
+ * @erl:                   supported error recovery level
+ * @tgt_prtl_grp:          target portal group tag
+ * @target_name:           target name
+ * @isid:                  isid for this session
+ * @tsih:                  target returned TSIH
+ * @lead_conn:             points to lead connection pointer
+ * @conn_list:             list of connection belonging to this session
+ * @num_active_conn:       num active connections
+ * @max_conns:             maximun connection per session
+ * @violation_notified:    bit mask used to track iscsi error/warning messages
+ *                         already printed out
+ * iSCSI Session Structure
+ */
+struct bnx2i_sess {
+       struct list_head link;
+       struct bnx2i_hba *hba;
+       struct Scsi_Host *host;
+
+       u32 state;
+               #define BNX2I_SESS_INITIAL              0x01
+               #define BNX2I_SESS_IN_FFP               0x02
+               #define BNX2I_SESS_NEED_RECOVERY        0x04
+               #define BNX2I_SESS_IN_RECOVERY          0x08
+               #define BNX2I_SESS_IN_SHUTDOWN          0x10
+               #define BNX2I_SESS_IN_LOGOUT            0x40
+               #define is_sess_active(_sess)   \
+                       (((_sess)->state & BNX2I_SESS_IN_FFP))
+       unsigned long recovery_state;
+               #define ISCSI_SESS_RECOVERY_START       0x01
+               #define ISCSI_SESS_RECOVERY_OPEN_ISCSI  0x02
+               #define ISCSI_SESS_RECOVERY_COMPLETE    0x04
+               #define ISCSI_SESS_RECOVERY_FAILED      0x08
+       unsigned long old_recovery_state;
+       int tmf_active;
+
+       spinlock_t lock;        /* protects session structure */
+
+       /* Command abort timer */
+       struct timer_list abort_timer;
+       /* event wait queue used during error recovery */
+       wait_queue_head_t er_wait;
+
+       /*
+        * Per session command (task) structure management
+        */
+       void *cmd_pages[MAX_PAGES_PER_CTRL_STRUCT_POOL];
+       struct list_head pend_cmds;
+       u32 num_pend_cmds;
+       struct list_head free_cmds;
+       int num_free_cmds;
+       int allocated_cmds;
+
+       int sq_size;
+       struct itt_queue itt_q;
+               #define MAX_BD_RESOURCE_PAGES           8
+
+       struct list_head bd_resc_page;
+       struct list_head bd_tbl_list;
+       struct list_head bd_tbl_active;
+
+       /*
+        * command queue, management parameters
+        */
+       struct list_head active_cmds;
+       u32 num_active_cmds;
+
+       /*
+        * iSCSI session related sequencing parameters.
+        */
+       unsigned int cmdsn;
+       unsigned int exp_cmdsn;
+       unsigned int max_cmdsn;
+
+       /*
+        * Following pointers are linked to corresponding entry in
+        * operational parameter table associated with this session.
+        * These are to be filled when session becomes operational (FFP).
+        */
+       int initial_r2t;
+       int max_r2t;
+       int imm_data;
+       u32 first_burst_len;
+       u32 max_burst_len;
+       int time2wait;
+       int time2retain;
+       int pdu_inorder;
+       int dataseq_inorder;
+       int erl;
+       int tgt_prtl_grp;
+       char *target_name;
+
+       unsigned char isid[8];
+       unsigned short tsih;
+
+       struct bnx2i_conn *lead_conn;
+       struct list_head conn_list;
+       u32 num_active_conn;
+       u32 max_conns;
+
+       /* Driver private statistics */
+       u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+       void *cid_que_base;
+       u32 *cid_que;
+       u32 cid_q_prod_idx;
+       u32 cid_q_cons_idx;
+       u32 cid_q_max_idx;
+       u32 cid_free_cnt;
+       struct bnx2i_conn **conn_cid_tbl;
+};
+
+
+/*
+ * Debug only - Idle counters for MIPS CPUs
+ */
+struct mips_idle_count {
+       u64 cp_idle_count;
+       u64 txp_idle_count;
+       u64 txp_tdma_count;
+       u64 txp_ctx_count;
+       u64 txp_hdrq_count;
+       u64 tpat_idle_count;
+       u64 rxp_idle_count;
+       u64 com_idle_count;
+};
+
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link:                  list head to link elements
+ * @cnic:                  pointer to cnic device
+ * @pcidev:                pointer to pci dev
+ * @netdev:                pointer to netdev structure
+ * @regview:               mapped PCI register space
+ * @class_dev:             class dev to operate sysfs node
+ * @age:                   age, incremented by every recovery
+ * @cnic_dev_type:         cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access:     mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic:         indicates whether the device is register with CNIC
+ * @adapter_state:         adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported:         Ethernet MTU supported
+ * @scsi_template:         pointer to scsi host template
+ * @iscsi_transport:       pointer to iscsi transport template
+ * @shost_template:        pointer to shost template
+ * @max_sqes:              SQ size
+ * @max_rqes:              RQ size
+ * @max_cqes:              CQ size
+ * @num_ccell:             number of command cells per connection
+ * @active_sess:           active session list head
+ * @num_active_sess:       number of active connections
+ * @ofld_conns_active:     active connection list
+ * @max_active_conns:      max offload connections supported by this device
+ * @cid_que:               iscsi cid queue
+ * @ep_rdwr_lock:          read / write lock to synchronize various ep lists
+ * @ep_ofld_list:          connection list for pending offload completion
+ * @ep_ofld_prod_idx:      producer index to manage offload pend list
+ * @ep_ofld_cons_idx:      consumer index to manage offload pend list
+ * @ep_ofld_max_idx:       max index to manage offload pend list
+ * @ep_destroy_list:       connection list for pending offload completion
+ * @ep_destroy_prod_idx:   producer index to manage destroy pend list
+ * @ep_destroy_cons_idx:   consumer index to manage destroy pend list
+ * @ep_destroy_max_idx:    max index to manage destroy pend list
+ * @mp_bd_tbl:             BD table to be used with middle path requests
+ * @mp_bd_dma:             DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer:          Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma:         DMA address of 'dummy_buffer' memory buffer
+ * @lock:                         lock to synchonize access to hba structure
+ * @hba_timer:             timer block
+ * @eh_wait:               wait queue to be used during error handling
+ * @err_rec_task:          error handling worker
+ * @sess_recov_list:       session list which are queued for recovery
+ * @sess_recov_prod_idx:   producer index to manage session recovery list
+ * @sess_recov_cons_idx:   producer index to manage session recovery list
+ * @sess_recov_max_idx:    max index to manage session recovery list
+ * @mac_addr:              MAC address
+ * @pci_did:               PCI device ID
+ * @pci_vid:               PCI vendor ID
+ * @pci_sdid:              PCI subsystem device ID
+ * @pci_svid:              PCI subsystem vendor ID
+ * @pci_func:              PCI function number in system pci tree
+ * @pci_devno:             PCI device number in system pci tree
+ * @num_wqe_sent:          statistic counter, total wqe's sent
+ * @num_cqe_rcvd:          statistic counter, total cqe's received
+ * @num_intr_claimed:      statistic counter, total interrupts claimed
+ * @link_changed_count:    statistic counter, num of link change notifications
+ *                         received
+ * @ipaddr_changed_count:  statistic counter, num times IP address changed while
+ *                         at least one connection is offloaded
+ * @num_sess_opened:       statistic counter, total num sessions opened
+ * @num_conn_opened:       statistic counter, total num conns opened on this hba
+ * @mips_idle:             mips idle counter
+ * @ctx_addr:              buffer to stage iscsi context
+ * @ctx_size:              iscsi context size
+ * @ctx_dma_hndl:          dma handle of iscsi context buffer
+ * @ctx_read_cnt:          number of 4K chunks read by application via' 'sysfs'
+ * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
+ *                         currently offloaded connection, used to decode
+ *                         context memory
+ * @ictx_poll_mode:        identifies if 'ictx' application is running in poll mode
+ * @ictx_poll_cid:         chip context ID which needs to be pushed to 'ictx'
+ *                         (if running poll mode)
+ * @ictx_poll_iscsi_cid:   chip iscsi cid which needs to be pushed to 'ictx'
+ *                         (if running poll mode)
+ * @sq_cq_dump:            buffer to stage SQ/CQ region before pushing to 'ictx'
+ * @sq_cq_rdp:             buffer read marker 
+ * @sq_cq_size:            total valid bytes in 'sq_cq_dump'
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+       struct list_head link;
+       struct cnic_dev *cnic;
+       struct pci_dev *pcidev;
+       struct net_device *netdev;
+       void __iomem *regview;
+       struct class_device class_dev;
+       u32 age;
+       unsigned long cnic_dev_type;
+               #define BNX2I_NX2_DEV_5706              0x0
+               #define BNX2I_NX2_DEV_5708              0x1
+               #define BNX2I_NX2_DEV_5709              0x2
+               #define BNX2I_NX2_DEV_57710             0x3
+       u32 mail_queue_access;
+               #define BNX2I_MQ_KERNEL_MODE            0x0
+               #define BNX2I_MQ_KERNEL_BYPASS_MODE     0x1
+               #define BNX2I_MQ_BIN_MODE               0x2
+       unsigned long  reg_with_cnic;
+               #define BNX2I_CNIC_REGISTERED           1
+
+       unsigned long  adapter_state;
+               #define ADAPTER_STATE_UP                0
+               #define ADAPTER_STATE_GOING_DOWN        1
+               #define ADAPTER_STATE_LINK_DOWN         2
+               #define ADAPTER_STATE_INIT_FAILED       31
+       unsigned int mtu_supported;
+               #define BNX2I_MAX_MTU_SUPPORTED         1500
+
+       struct scsi_host_template *scsi_template;
+       struct iscsi_transport *iscsi_transport;
+               #define BRCM_ISCSI_XPORT_NAME_PREFIX            "bcm570x"
+               #define BRCM_ISCSI_XPORT_NAME_SIZE_MAX          128
+       struct scsi_transport_template *shost_template;
+
+       u32 max_sqes;
+       u32 max_rqes;
+       u32 max_cqes;
+       u32 num_ccell;
+
+       struct list_head active_sess;
+       int num_active_sess;
+       int ofld_conns_active;
+
+       int max_active_conns;
+       struct iscsi_cid_queue cid_que;
+
+       rwlock_t ep_rdwr_lock;
+       struct bnx2i_endpoint **ep_ofld_list;
+       int ep_ofld_prod_idx;
+       int ep_ofld_cons_idx;
+       int ep_ofld_max_idx;
+       struct bnx2i_endpoint **ep_destroy_list;
+       int ep_destroy_prod_idx;
+       int ep_destroy_cons_idx;
+       int ep_destroy_max_idx;
+
+       /*
+        * BD table to be used with MP (Middle Path requests.
+        */
+       char *mp_bd_tbl;
+       dma_addr_t mp_bd_dma;
+       char *dummy_buffer;
+       dma_addr_t dummy_buf_dma;
+
+       spinlock_t lock;        /* protects hba structure access */
+
+       /* Error handling */
+       struct timer_list hba_timer;
+       wait_queue_head_t eh_wait;
+       struct work_struct err_rec_task;
+       struct bnx2i_sess **sess_recov_list;
+       int sess_recov_prod_idx;
+       int sess_recov_cons_idx;
+       int sess_recov_max_idx;
+       
+       unsigned char mac_addr[MAX_ADDR_LEN];
+
+       /*
+        * PCI related info.
+        */
+       u16 pci_did;
+       u16 pci_vid;
+       u16 pci_sdid;
+       u16 pci_svid;
+       u16 pci_func;
+       u16 pci_devno;
+
+       /*
+        * Following are a bunch of statistics useful during development
+        * and later stage for score boarding.
+        */
+       u32 num_wqe_sent;
+       u32 num_cqe_rcvd;
+       u32 num_intr_claimed;
+       u32 link_changed_count;
+       u32 ipaddr_changed_count;
+       u32 num_sess_opened;
+       u32 num_conn_opened;
+
+       struct mips_idle_count mips_idle;
+
+       void *ctx_addr;
+       unsigned int ctx_size;
+       dma_addr_t ctx_dma_hndl;
+       unsigned int ctx_read_cnt;
+       unsigned int ctx_ccell_tasks;
+       int ictx_poll_mode;
+       unsigned int ictx_poll_cid;
+       unsigned int ictx_poll_iscsi_cid;
+       char *sq_cq_dump;
+       char *sq_cq_rdp;
+       unsigned int sq_cq_size;
+};
+
+
+/*******************************************************************************
+ *     QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct         sqe {
+       u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct         rqe {
+       u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct         cqe {
+       u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+       CNIC_EVENT_COAL_INDEX   = 0x0,
+       CNIC_SEND_DOORBELL      = 0x4,
+       CNIC_EVENT_CQ_ARM       = 0x7,
+       CNIC_RECV_DOORBELL      = 0x8
+#elif defined(__BIG_ENDIAN)
+       CNIC_EVENT_COAL_INDEX   = 0x2,
+       CNIC_SEND_DOORBELL      = 0x6,
+       CNIC_EVENT_CQ_ARM       = 0x4,
+       CNIC_RECV_DOORBELL      = 0xa
+#endif
+};
+
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+       /* CQ producer, updated by Ustorm */
+        u16 ustrom_prod;
+       /* CQ pending completion counter */
+        u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+        struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+       /* CQ pending completion ITT array */
+        u16 itt[BNX2X_MAX_CQS];
+       /* Cstorm CQ sequence to notify array, updated by driver */;
+        u16 sqn[BNX2X_MAX_CQS];
+        u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+       u16 prod_idx;
+       u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+        u8 header;
+       /* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX                         (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT                   0
+       /* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE                    (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT              1
+       /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE                   (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT             2
+       /* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE                  (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT            4
+};
+
+struct bnx2i_5771x_dbell {
+       struct bnx2i_5771x_dbell_hdr dbell;
+       u8 pad[3];
+
+};
+
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base:           ioremapped pci register base to access doorbell register
+ *                      pertaining to this offloaded connection
+ * @sq_virt:            virtual address of send queue (SQ) region
+ * @sq_phys:            DMA address of SQ memory region
+ * @sq_mem_size:        SQ size
+ * @sq_prod_qe:         SQ producer entry pointer
+ * @sq_cons_qe:         SQ consumer entry pointer
+ * @sq_first_qe:        virtaul address of first entry in SQ
+ * @sq_last_qe:         virtaul address of last entry in SQ
+ * @sq_prod_idx:        SQ producer index
+ * @sq_cons_idx:        SQ consumer index
+ * @sqe_left:           number sq entry left
+ * @sq_pgtbl_virt:      page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys:      dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size:      SQ page table size
+ * @cq_virt:            virtual address of completion queue (CQ) region
+ * @cq_phys:            DMA address of RQ memory region
+ * @cq_mem_size:        CQ size
+ * @cq_prod_qe:         CQ producer entry pointer
+ * @cq_cons_qe:         CQ consumer entry pointer
+ * @cq_first_qe:        virtaul address of first entry in CQ
+ * @cq_last_qe:         virtaul address of last entry in CQ
+ * @cq_prod_idx:        CQ producer index
+ * @cq_cons_idx:        CQ consumer index
+ * @cqe_left:           number cq entry left
+ * @cqe_size:           size of each CQ entry
+ * @cqe_exp_seq_sn:     next expected CQE sequence number
+ * @cq_pgtbl_virt:      page table describing buffer consituting CQ region  
+ * @cq_pgtbl_phys:      dma address of 'cq_pgtbl_virt'  
+ * @cq_pgtbl_size:     CQ page table size    
+ * @rq_virt:            virtual address of receive queue (RQ) region
+ * @rq_phys:            DMA address of RQ memory region
+ * @rq_mem_size:        RQ size
+ * @rq_prod_qe:         RQ producer entry pointer
+ * @rq_cons_qe:         RQ consumer entry pointer
+ * @rq_first_qe:        virtaul address of first entry in RQ
+ * @rq_last_qe:         virtaul address of last entry in RQ
+ * @rq_prod_idx:        RQ producer index
+ * @rq_cons_idx:        RQ consumer index
+ * @rqe_left:           number rq entry left
+ * @rq_pgtbl_virt:      page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys:      dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size:      RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ *     to send work requests (SQ), receive completion notifications (CQ)
+ *     and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ *     below holds queue memory, consumer/producer indexes and page table
+ *     information
+ */
+struct qp_info {
+       void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE                        0x40
+
+#define BNX2I_570x_QUE_DB_SIZE         0
+#define BNX2I_5771x_QUE_DB_SIZE                16
+       void *sq_virt;
+       dma_addr_t sq_phys;
+       u32 sq_mem_size;
+
+       struct sqe *sq_prod_qe;
+       struct sqe *sq_cons_qe;
+       struct sqe *sq_first_qe;
+       struct sqe *sq_last_qe;
+       u16 sq_prod_idx;
+       u16 sq_cons_idx;
+       u32 sqe_left;
+
+       void    *sq_pgtbl_virt;
+       dma_addr_t sq_pgtbl_phys;
+       u32 sq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+
+       void *cq_virt;
+       dma_addr_t cq_phys;
+       u32 cq_mem_size;
+
+       struct cqe *cq_prod_qe;
+       struct cqe *cq_cons_qe;
+       struct cqe *cq_first_qe;
+       struct cqe *cq_last_qe;
+       u16 cq_prod_idx;
+       u16 cq_cons_idx;
+       u32 cqe_left;
+       u32 cqe_size;
+       u32 cqe_exp_seq_sn;
+
+       void    *cq_pgtbl_virt;
+       dma_addr_t cq_pgtbl_phys;
+       u32 cq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+
+       void *rq_virt;
+       dma_addr_t rq_phys;
+       u32 rq_mem_size;
+
+       struct rqe *rq_prod_qe;
+       struct rqe *rq_cons_qe;
+       struct rqe *rq_first_qe;
+       struct rqe *rq_last_qe;
+       u16 rq_prod_idx;
+       u16 rq_cons_idx;
+       u32 rqe_left;
+
+       void    *rq_pgtbl_virt;
+       dma_addr_t rq_pgtbl_phys;
+       u32 rq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+       u32 fw_cid;
+       u32 drv_iscsi_cid;
+       u16 pg_cid;
+       u16 rsvd;
+};
+
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link:               list head to link elements
+ * @hba:                adapter to which this connection belongs
+ * @conn:               iscsi connection this EP is linked to
+ * @sess:               iscsi session this EP is linked to
+ * @cm_sk:              cnic sock struct
+ * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
+ *                      after HBA reset is completed by bnx2i/cnic/bnx2
+ *                      modules
+ * @state:              tracks offload connection state machine
+ * @tcp_port:           Local TCP port number used in this connection
+ * @qp:                 QP information
+ * @ids:                contains chip allocated *context id* & driver assigned
+ *                      *iscsi cid*
+ * @ofld_timer:         offload timer to detect timeout
+ * @ofld_wait:          wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+       struct list_head link;
+       struct bnx2i_hba *hba;
+       struct bnx2i_conn *conn;
+       struct bnx2i_sess *sess;
+       struct cnic_sock *cm_sk;
+       u32 hba_age;
+       u32 state;
+               #define EP_STATE_IDLE                   0x00000000
+               #define EP_STATE_PG_OFLD_START          0x00000001
+               #define EP_STATE_PG_OFLD_COMPL          0x00000002
+               #define EP_STATE_OFLD_START             0x00000004
+               #define EP_STATE_OFLD_COMPL             0x00000008
+               #define EP_STATE_CONNECT_START          0x00000010
+               #define EP_STATE_CONNECT_COMPL          0x00000020
+               #define EP_STATE_ULP_UPDATE_START       0x00000040
+               #define EP_STATE_ULP_UPDATE_COMPL       0x00000080
+               #define EP_STATE_DISCONN_START          0x00000100
+               #define EP_STATE_DISCONN_COMPL          0x00000200
+               #define EP_STATE_CLEANUP_START          0x00000400
+               #define EP_STATE_CLEANUP_CMPL           0x00000800
+               #define EP_STATE_TCP_FIN_RCVD           0x00001000
+               #define EP_STATE_TCP_RST_RCVD           0x00002000
+               #define EP_STATE_PG_OFLD_FAILED         0x01000000
+               #define EP_STATE_ULP_UPDATE_FAILED      0x02000000
+               #define EP_STATE_CLEANUP_FAILED         0x04000000
+               #define EP_STATE_OFLD_FAILED            0x08000000
+               #define EP_STATE_CONNECT_FAILED         0x10000000
+               #define EP_STATE_DISCONN_TIMEDOUT       0x20000000
+
+       int teardown_mode;
+#define BNX2I_ABORTIVE_SHUTDOWN                0
+#define BNX2I_GRACEFUL_SHUTDOWN                1
+       u16 tcp_port;
+
+       struct qp_info qp;
+       struct ep_handles ids;
+               #define ep_iscsi_cid    ids.drv_iscsi_cid
+               #define ep_cid          ids.fw_cid
+               #define ep_pg_cid       ids.pg_cid
+       struct timer_list ofld_timer;
+       wait_queue_head_t ofld_wait;
+};
+
+
+
+/*
+ * Function Prototypes
+ */
+extern int bnx2i_reg_device;
+void bnx2i_identify_device(struct bnx2i_hba *hba);
+void bnx2i_register_device(struct bnx2i_hba *hba);
+void bnx2i_check_nx2_dev_busy(void);
+void bnx2i_ep_disconnect(uint64_t ep_handle);
+
+void bnx2i_ulp_init(struct cnic_dev *dev);
+void bnx2i_ulp_exit(struct cnic_dev *dev);
+void bnx2i_start(void *handle);
+void bnx2i_stop(void *handle);
+void bnx2i_reg_dev_all(void);
+void bnx2i_unreg_dev_all(void);
+struct bnx2i_hba *get_adapter_list_head(void);
+
+int bnx2i_ioctl_init(void);
+void bnx2i_ioctl_cleanup(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+                                         u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_cmd *bnx2i_alloc_cmd(struct bnx2i_sess *sess);
+void bnx2i_free_cmd(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+struct bnx2i_hba *bnx2i_get_hba_from_template(
+       struct scsi_transport_template *scsit);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_process_scsi_resp(struct bnx2i_cmd *cmd,
+                            struct iscsi_cmd_response *resp_cqe);
+int bnx2i_process_nopin(struct bnx2i_conn *conn,
+       struct bnx2i_cmd *cmnd, char *data_buf, int data_len);
+
+
+void bnx2i_update_cmd_sequence(struct bnx2i_sess *sess, u32 expsn, u32 maxsn);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+int bnx2i_indicate_login_resp(struct bnx2i_conn *conn);
+int bnx2i_indicate_logout_resp(struct bnx2i_conn *conn);
+int bnx2i_indicate_async_mesg(struct bnx2i_conn *conn);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd);
+
+void bnx2i_start_iscsi_hba_shutdown(struct bnx2i_hba *hba);
+void bnx2i_iscsi_handle_ip_event(struct bnx2i_hba *hba);
+int bnx2i_do_iscsi_sess_recovery(struct bnx2i_sess *sess, int err_code);
+void bnx2i_return_failed_command(struct bnx2i_sess *sess,
+                                struct bnx2i_cmd *cmd, int err_code);
+
+int bnx2i_get_tcp_port_requirements(void);
+void bnx2i_cleanup_tcp_port_mngr(void);
+void bnx2i_init_tcp_port_mngr(void);
+
+int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba);
+void bnx2i_free_mp_bdt(struct bnx2i_hba *hba);
+void bnx2i_init_ctx_dump_mem(struct bnx2i_hba *hba);
+void bnx2i_free_ctx_dump_mem(struct bnx2i_hba *hba);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+                                 struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+                                struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+                               struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+                                   struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+                                  struct bnx2i_cmd *cmnd, u32 ttt,
+                                  char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+                                  struct bnx2i_cmd *cmnd);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+                                      struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+                                    struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct bnx2i_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+                                   struct bnx2i_endpoint *ep);
+extern void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+                                       struct bnx2i_conn *conn);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+                              struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+                              struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+
+void bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+int bnx2i_register_xport(struct bnx2i_hba *hba);
+int bnx2i_deregister_xport(struct bnx2i_hba *hba);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#ifndef _SYSFS_INCL_
+#define bnx2i_setup_ictx_dump(__hba, __conn)   do { } while (0)
+#define  bnx2i_sysfs_setup()                   do { } while (0)
+#define  bnx2i_sysfs_cleanup()                 do { } while (0)
+#define  bnx2i_register_sysfs(__hba)           0
+#define  bnx2i_unregister_sysfs(__hba)         do { } while (0)
+#define bnx2i_init_mips_idle_counters(__hba)   do { } while (0)
+#else
+extern void bnx2i_setup_ictx_dump(struct bnx2i_hba *hba,
+                                 struct bnx2i_conn *conn);
+extern int bnx2i_sysfs_setup(void);
+extern void bnx2i_sysfs_cleanup(void);
+extern int bnx2i_register_sysfs(struct bnx2i_hba *hba);
+extern void bnx2i_unregister_sysfs(struct bnx2i_hba *hba);
+void bnx2i_init_mips_idle_counters(struct bnx2i_hba *hba);
+void bnx2i_tcp_port_new_entry(u16 tcp_port);
+
+#endif
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644 (file)
index 0000000..71031c6
--- /dev/null
@@ -0,0 +1,2520 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+
+/**
+ * bnx2i_get_cid_num -
+ * @ep:        endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ **/
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+       u32 cid;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               cid = ep->ep_cid;
+       else
+               cid = GET_CID_NUM(ep->ep_cid);
+       return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba:               Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ **/
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+       u32 num_elements_per_pg;
+
+       if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+           test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+           test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+               return;
+
+       /* Adjust each queue size if the user selection does not
+        * yield integral num of page buffers
+        */
+       /* adjust SQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+       if (hba->max_sqes < num_elements_per_pg)
+               hba->max_sqes = num_elements_per_pg;
+       else if (hba->max_sqes % num_elements_per_pg)
+               hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+
+       /* adjust CQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+       if (hba->max_cqes < num_elements_per_pg)
+               hba->max_cqes = num_elements_per_pg;
+       else if (hba->max_cqes % num_elements_per_pg)
+               hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+
+       /* adjust RQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+       if (hba->max_rqes < num_elements_per_pg)
+               hba->max_rqes = num_elements_per_pg;
+       else if (hba->max_rqes % num_elements_per_pg)
+               hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba:               adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ **/
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+               if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+               set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+       else
+               clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ *
+ * @hba:               adapter instance pointer
+ * @error_code:                error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ *     due to license restrictions
+ **/
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+       if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+               /* iSCSI offload not supported on this device */
+               printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+                               hba->netdev->name);
+       if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+               /* iSCSI offload not supported on this LOM device */
+               printk(KERN_ERR "bnx2i: LOM is not enable to "
+                               "offload iSCSI connections, dev=%s\n",
+                               hba->netdev->name);
+       set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+extern unsigned int event_coal_div;
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ *
+ * @ep:                endpoint (transport indentifier) structure
+ * @action:            action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ *     the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ *     outstanding and on chip timer expires
+ **/
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+       struct bnx2i_5771x_cq_db *cq_db;
+       u16 cq_index;
+
+#ifndef _570X_ENABLE_EC_
+       if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               return;
+#endif
+
+       if ((action == CNIC_ARM_CQE) && ep->sess) {
+               cq_index = ep->qp.cqe_exp_seq_sn +
+                          ep->sess->num_active_cmds / event_coal_div;
+               cq_index %= (ep->qp.cqe_size * 2 + 1);
+               if (!cq_index)
+                       cq_index = 1;
+#ifdef _570X_ENABLE_EC_
+               if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+#endif
+                       cq_db = (struct bnx2i_5771x_cq_db*)ep->qp.cq_pgtbl_virt;
+                       cq_db->sqn[0] = cq_index;
+#ifdef _570X_ENABLE_EC_
+                       return;
+               }
+               writew(cq_index, ep->qp.ctx_base + CNIC_EVENT_COAL_INDEX);
+#endif
+       }
+#ifdef _570X_ENABLE_EC_
+       writeb(action, ep->qp.ctx_base + CNIC_EVENT_CQ_ARM);
+#endif
+}
+
+/**
+ * bnx2i_iscsi_license_error - copy RQ buffer contents to driver buffer
+ *
+ * @conn:              iscsi connection on which RQ event occured
+ * @ptr:               driver buffer to which RQ buffer contents is to
+ *                     be copied
+ * @len:               length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ *     driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ *     scsi sense info
+ **/
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len)
+{
+       if (conn->ep->qp.rqe_left) {
+               conn->ep->qp.rqe_left--;
+               memcpy(ptr, (u8 *) conn->ep->qp.rq_cons_qe, len);
+               if (conn->ep->qp.rq_cons_qe == conn->ep->qp.rq_last_qe) {
+                       conn->ep->qp.rq_cons_qe = conn->ep->qp.rq_first_qe;
+                       conn->ep->qp.rq_cons_idx = 0;
+               } else {
+                       conn->ep->qp.rq_cons_qe++;
+                       conn->ep->qp.rq_cons_idx++;
+               }
+       }
+}
+
+/**
+ * bnx2i_ring_577xx_doorbell - ring doorbell register to wake-up the
+ *                     processing engine
+ * @conn:              iscsi connection
+ *
+ * Only applicable to 57710 family of devices
+ **/
+void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+       struct bnx2i_5771x_dbell dbell;
+       u32 msg;
+
+       memset(&dbell, 0, sizeof(dbell));
+       dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+                             B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+       msg = *((u32 *)&dbell);
+       /* TODO : get doorbell register mapping */
+       writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn:              iscsi connection on which event to post
+ * @count:             number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ **/
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count)
+{
+       struct bnx2i_5771x_sq_rq_db *rq_db;
+       u16 hi_bit = (conn->ep->qp.rq_prod_idx & 0x8000);
+
+       conn->ep->qp.rqe_left += count;
+       conn->ep->qp.rq_prod_idx &= 0x7FFF;
+       conn->ep->qp.rq_prod_idx += count;
+
+       if (conn->ep->qp.rq_prod_idx > conn->sess->hba->max_rqes) {
+               conn->ep->qp.rq_prod_idx %= conn->sess->hba->max_rqes;
+               if (!hi_bit)
+                       conn->ep->qp.rq_prod_idx |= 0x8000;
+       } else
+               conn->ep->qp.rq_prod_idx |= hi_bit;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &conn->ep->hba->cnic_dev_type)) {
+               rq_db = (struct bnx2i_5771x_sq_rq_db *)
+                               conn->ep->qp.rq_pgtbl_virt;
+               rq_db->prod_idx = conn->ep->qp.rq_prod_idx;
+               /* no need to ring hardware doorbell for 57710 */
+       } else {
+               writew(conn->ep->qp.rq_prod_idx,
+                      conn->ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+       }
+       mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ *
+ * @conn:              iscsi connection to which new SQ entries belong
+ * @count:             number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ *     of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ *     doorbell register
+ **/
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *conn, int count)
+{
+       struct bnx2i_5771x_sq_rq_db *sq_db;
+
+       wmb();  /* flush SQ WQE memory before the doorbell is rung */
+       if (test_bit(BNX2I_NX2_DEV_57710, &conn->ep->hba->cnic_dev_type)) {
+               sq_db = (struct bnx2i_5771x_sq_rq_db *)
+                               conn->ep->qp.sq_pgtbl_virt;
+               sq_db->prod_idx = conn->ep->qp.sq_prod_idx;
+               bnx2i_ring_577xx_doorbell(conn);
+       } else {
+               writew(count, conn->ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+       }
+       mmiowb(); /* flush posted PCI writes */
+}
+
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ *
+ * @conn:              iscsi connection to which new SQ entries belong
+ * @count:             number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ **/
+void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *conn, int count)
+{
+       int tmp_cnt;
+
+       if (count == 1) {
+               if (conn->ep->qp.sq_prod_qe == conn->ep->qp.sq_last_qe)
+                       conn->ep->qp.sq_prod_qe = conn->ep->qp.sq_first_qe;
+               else
+                       conn->ep->qp.sq_prod_qe++;
+       } else {
+               if ((conn->ep->qp.sq_prod_qe + count) <=
+                   conn->ep->qp.sq_last_qe)
+                       conn->ep->qp.sq_prod_qe += count;
+               else {
+                       tmp_cnt = conn->ep->qp.sq_last_qe -
+                               conn->ep->qp.sq_prod_qe;
+                       conn->ep->qp.sq_prod_qe =
+                               &conn->ep->qp.sq_first_qe[count - (tmp_cnt + 1)];
+               }
+       }
+       conn->ep->qp.sq_prod_idx += count;
+       /* Ring the doorbell */
+       bnx2i_ring_sq_dbell(conn, conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd)
+{
+       struct iscsi_login_request *login_wqe;
+       struct iscsi_login *login_hdr;
+       u32 dword;
+
+       if (!conn->gen_pdu.req_buf || !conn->gen_pdu.resp_buf)
+               return -EINVAL;
+
+       login_hdr = (struct iscsi_login *) &conn->gen_pdu.pdu_hdr;
+       login_wqe = (struct iscsi_login_request *) conn->ep->qp.sq_prod_qe;
+
+       login_wqe->op_code = ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE;
+       login_wqe->op_attr = login_hdr->flags;
+       login_wqe->version_max = login_hdr->max_version;
+       login_wqe->version_min = login_hdr->min_version;
+       login_wqe->data_length = ((login_hdr->dlength[0] << 16) |
+                                 (login_hdr->dlength[1] << 8) |
+                                 login_hdr->dlength[2]);
+
+       login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+       login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+       login_wqe->tsih = login_hdr->tsih;
+       login_wqe->itt = (cmd->req.itt |
+                         (ISCSI_TASK_TYPE_MPATH <<
+                          ISCSI_LOGIN_REQUEST_TYPE_SHIFT));
+       login_wqe->cid = login_hdr->cid;
+
+       login_wqe->cmd_sn = ntohl(login_hdr->cmdsn);
+       login_wqe->exp_stat_sn = ntohl(login_hdr->exp_statsn);
+
+       login_wqe->resp_bd_list_addr_lo = (u32) conn->gen_pdu.resp_bd_dma;
+       login_wqe->resp_bd_list_addr_hi =
+               (u32) ((u64) conn->gen_pdu.resp_bd_dma >> 32);
+
+       dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+                (conn->gen_pdu.resp_buf_size <<
+                 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+       login_wqe->resp_buffer = dword;
+       login_wqe->flags = 0;
+       login_wqe->bd_list_addr_lo = (u32) conn->gen_pdu.req_bd_dma;
+       login_wqe->bd_list_addr_hi =
+               (u32) ((u64) conn->gen_pdu.req_bd_dma >> 32);
+       login_wqe->num_bds = 1;
+       login_wqe->cq_index = 0; /* CQ# used for completion, Everest only */
+       conn->num_login_req_pdus++;
+
+       bnx2i_ring_dbell_update_sq_params(conn, 1);
+       return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_text - post iSCSI text request MP WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd)
+{
+       conn->num_text_req_pdus++;
+       return 0;
+}
+
+
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd)
+{
+       struct bnx2i_hba *hba = conn->sess->hba;
+       u32 dword;
+       u32 scsi_lun[2];
+       struct iscsi_tmf_request *tmfabort_wqe;
+       tmfabort_wqe = (struct iscsi_tmf_request *) conn->ep->qp.sq_prod_qe;
+
+       tmfabort_wqe->op_code = cmd->iscsi_opcode;
+       tmfabort_wqe->op_attr = 0;
+       tmfabort_wqe->op_attr =
+               ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+       int_to_scsilun(cmd->tmf_ref_cmd->scsi_cmd->device->lun,
+                      (struct scsi_lun *) scsi_lun);
+       tmfabort_wqe->lun[0] = ntohl(scsi_lun[0]);
+       tmfabort_wqe->lun[1] = ntohl(scsi_lun[1]);
+
+       tmfabort_wqe->itt = (cmd->req.itt | (ISCSI_TASK_TYPE_MPATH << 14));
+       tmfabort_wqe->reserved2 = 0;
+       tmfabort_wqe->cmd_sn = cmd->req.cmd_sn;
+
+       if (cmd->tmf_ref_cmd->req.op_attr == ISCSI_CMD_REQUEST_READ)
+               dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       else
+               dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       tmfabort_wqe->ref_itt = (dword |= cmd->tmf_ref_itt);
+       tmfabort_wqe->ref_cmd_sn = cmd->tmf_ref_cmd->req.cmd_sn;
+
+       tmfabort_wqe->bd_list_addr_lo = (u32) hba->mp_bd_dma;
+       tmfabort_wqe->bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+       tmfabort_wqe->num_bds = 1;
+       tmfabort_wqe->cq_index = 0; /* CQ# used for completion, Everest only */
+       conn->num_tmf_req_pdus++;
+
+       bnx2i_ring_dbell_update_sq_params(conn, 1);
+       return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd)
+{
+       struct iscsi_cmd_request *scsi_cmd_wqe;
+
+       scsi_cmd_wqe = (struct iscsi_cmd_request *) conn->ep->qp.sq_prod_qe;
+       memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct iscsi_cmd_request));
+       scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, Everest only */
+
+       conn->num_scsi_cmd_pdus++;
+       bnx2i_ring_dbell_update_sq_params(conn, 1);
+       return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ * @ttt:               TTT to be used when building pdu header
+ * @datap:             payload buffer pointer
+ * @data_len:          payload data length
+ * @unsol:             indicated whether nopout pdu is unsolicited pdu or
+ *                     in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+/*
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd,
+                           u32 ttt, char *datap, int data_len, int unsol)
+{
+       struct Scsi_Host *host = conn->sess->host;
+       unsigned long flags;
+       struct iscsi_nop_out_request *nopout_wqe;
+       struct iscsi_nopout *nopout_hdr;
+
+       spin_lock_irqsave(host->host_lock, flags);
+       nopout_hdr = (struct iscsi_nopout *) &conn->gen_pdu.pdu_hdr;
+       nopout_wqe = (struct iscsi_nop_out_request *) conn->ep->qp.sq_prod_qe;
+       nopout_wqe->op_code = (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE);
+       nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+       memcpy(nopout_hdr->lun, nopout_wqe->lun, 8);
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &conn->ep->hba->cnic_dev_type)) {
+               u32 tmp = nopout_hdr->lun[0];
+               /* 57710 requires LUN field to be swapped */
+               nopout_hdr->lun[0] = nopout_hdr->lun[1];
+               nopout_hdr->lun[1] = tmp;
+       }
+
+       nopout_wqe->itt = ((u16) cmd->req.itt |
+                          (ISCSI_TASK_TYPE_MPATH <<
+                           ISCSI_TMF_REQUEST_TYPE_SHIFT));
+       nopout_wqe->ttt = ttt;
+       nopout_wqe->flags = 0;
+       if (!unsol)
+               nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+       else if (nopout_hdr->itt == ISCSI_RESERVED_TAG)
+               nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+       nopout_wqe->cmd_sn = conn->sess->cmdsn;
+       nopout_wqe->data_length = data_len;
+       if (data_len) {
+               /* handle payload data, not required in first release */
+               printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+       } else {
+               nopout_wqe->bd_list_addr_lo = (u32) conn->sess->hba->mp_bd_dma;
+               nopout_wqe->bd_list_addr_hi =
+                       (u32) ((u64) conn->sess->hba->mp_bd_dma >> 32);
+               nopout_wqe->num_bds = 1;
+       }
+       nopout_wqe->cq_index = 0; /* CQ# used for completion, Everest only */
+
+       list_add_tail(&cmd->link, &conn->sess->active_cmds);
+       conn->sess->num_active_cmds++;
+
+       conn->num_nopout_pdus++;
+       bnx2i_ring_dbell_update_sq_params(conn, 1);
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ *
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd)
+{
+       struct iscsi_logout_request *logout_wqe;
+       struct iscsi_logout *logout_hdr;
+       struct bnx2i_hba *hba = conn->sess->hba;
+       unsigned long flags;
+
+       spin_lock_irqsave(conn->sess->host->host_lock, flags);
+       conn->sess->state = BNX2I_SESS_IN_LOGOUT;
+       cmd->req.cmd_sn = conn->sess->cmdsn;
+
+       logout_hdr = (struct iscsi_logout *) &conn->gen_pdu.pdu_hdr;
+
+       logout_wqe = (struct iscsi_logout_request *) conn->ep->qp.sq_prod_qe;
+       memset(logout_wqe, 0x00, sizeof(struct iscsi_logout_request));
+
+       logout_wqe->op_code = (logout_hdr->opcode | ISCSI_OP_IMMEDIATE);
+       logout_wqe->op_attr =
+                       logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+       logout_wqe->itt = ((u16) cmd->req.itt |
+                          (ISCSI_TASK_TYPE_MPATH <<
+                           ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+       logout_wqe->data_length = 0;
+       logout_wqe->cmd_sn = cmd->req.cmd_sn;
+       logout_wqe->cid = conn->conn_cid;
+
+       logout_wqe->bd_list_addr_lo = (u32) hba->mp_bd_dma;
+       logout_wqe->bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+       logout_wqe->num_bds = 1;
+       logout_wqe->cq_index = 0; /* CQ# used for completion, Everest only */
+
+       conn->num_logout_req_pdus++;
+       bnx2i_ring_dbell_update_sq_params(conn, 1);
+       spin_unlock_irqrestore(conn->sess->host->host_lock, flags);
+       return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ *
+ * @conn:              iscsi connection which requires iscsi parameter update 
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct bnx2i_conn *conn)
+{
+       struct bnx2i_hba *hba = conn->sess->hba;
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_update *update_wqe;
+       struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+       update_wqe = &conn_update_kwqe;
+
+       update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+       update_wqe->hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       /* 57710 requires conn context id to be passed as is */
+       if (test_bit(BNX2I_NX2_DEV_57710, &conn->ep->hba->cnic_dev_type))
+               update_wqe->context_id = conn->ep->ep_cid;
+       else
+               update_wqe->context_id = (conn->ep->ep_cid >> 7);
+       update_wqe->conn_flags = 0;
+       if (conn->header_digest_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+       if (conn->data_digest_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+       if (conn->sess->initial_r2t)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+       if (conn->sess->imm_data)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+       update_wqe->max_send_pdu_length = conn->max_data_seg_len_xmit;
+       update_wqe->max_recv_pdu_length = conn->max_data_seg_len_recv;
+       update_wqe->first_burst_length = conn->sess->first_burst_len;
+       update_wqe->max_burst_length = conn->sess->max_burst_len;
+       update_wqe->exp_stat_sn = conn->exp_statsn;
+       update_wqe->max_outstanding_r2ts = conn->sess->max_r2t;
+       update_wqe->session_error_recovery_level = conn->sess->erl;
+       printk(KERN_ALERT "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+                         "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+                         update_wqe->max_burst_length,
+                         update_wqe->first_burst_length,
+                         update_wqe->max_recv_pdu_length,
+                         update_wqe->max_send_pdu_length);
+
+       kwqe_arr[0] = (struct kwqe *) update_wqe;
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ *
+ * @data:              endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+       if (ep->state == EP_STATE_OFLD_START) {
+               printk(KERN_ALERT "bnx2i[%lx]: ofld_timer: CONN_OFLD timeout\n", jiffies);
+               ep->state = EP_STATE_OFLD_FAILED;
+       } else if (ep->state == EP_STATE_DISCONN_START) {
+               printk(KERN_ALERT "bnx2i[%lx]: ofld_timer: CONN_DISCON timeout\n", jiffies);
+               ep->state = EP_STATE_DISCONN_TIMEDOUT;
+       } else if (ep->state == EP_STATE_CLEANUP_START) {
+               printk(KERN_ALERT "bnx2i[%lx]: ofld_timer: CONN_CLEANUP timeout\n", jiffies);
+               ep->state = EP_STATE_CLEANUP_FAILED;
+       }
+
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+       u32 power = 0;
+       if (val & (val - 1))
+               return power;
+       val--;
+       while (val) {
+               val = val >> 1;
+               power++;
+       }
+       return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ *
+ * @hba:               adapter structure pointer
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+       struct iscsi_cleanup_request *cmd_cleanup;
+
+       cmd_cleanup =
+               (struct iscsi_cleanup_request *) cmd->conn->ep->qp.sq_prod_qe;
+       memset(cmd_cleanup, 0x00, sizeof(struct iscsi_cleanup_request));
+
+       cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+       cmd_cleanup->itt = cmd->req.itt;
+       cmd_cleanup->cq_index = 0; /* CQ# used for completion, Everest only */
+
+       bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ *     iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+       memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+       conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+       conn_cleanup.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+       /* 57710 requires conn context id to be passed as is */
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               conn_cleanup.context_id = ep->ep_cid;
+       else
+               conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+       conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+       kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_offload1 ofld_req1;
+       struct iscsi_kwqe_conn_offload2 ofld_req2;
+       dma_addr_t dma_addr;
+       int num_kwqes = 2;
+       u32 *ptbl;
+
+       ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+       ofld_req1.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+       dma_addr = ep->qp.sq_pgtbl_phys;
+       ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       dma_addr = ep->qp.cq_pgtbl_phys;
+       ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+       ofld_req2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       dma_addr = ep->qp.rq_pgtbl_phys;
+       ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+       ofld_req2.sq_first_pte.hi = *ptbl++;
+       ofld_req2.sq_first_pte.lo = *ptbl;
+
+       ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+       ofld_req2.cq_first_pte.hi = *ptbl++;
+       ofld_req2.cq_first_pte.lo = *ptbl;
+
+       kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+       kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+       ofld_req2.num_additional_wqes = 0;
+
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[5];
+       struct iscsi_kwqe_conn_offload1 ofld_req1;
+       struct iscsi_kwqe_conn_offload2 ofld_req2;
+       struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+       dma_addr_t dma_addr;
+       int num_kwqes = 2;
+       u32 *ptbl;
+
+       ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+       ofld_req1.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+       dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+       ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+       ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+       ofld_req2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+       ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+       ofld_req2.sq_first_pte.hi = *ptbl++;
+       ofld_req2.sq_first_pte.lo = *ptbl;
+
+       ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+       ofld_req2.cq_first_pte.hi = *ptbl++;
+       ofld_req2.cq_first_pte.lo = *ptbl;
+
+       kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+       kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+       ofld_req2.num_additional_wqes = 1;
+       memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+       ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+       ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+       ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+       kwqe_arr[2] = (struct kwqe *) ofld_req3;
+       /* need if we decide to go with multiple KCQE's per conn */
+       num_kwqes += 1;
+
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+               bnx2i_5771x_send_conn_ofld_req(hba, ep);
+       else
+               bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ *
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ *     64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ *     PT in little endian format
+ */
+void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+       int num_pages;
+       u32 *ptbl;
+       dma_addr_t page;
+       int cnic_dev_10g;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               cnic_dev_10g = 1;
+       else
+               cnic_dev_10g = 0;
+
+       /* SQ page table */
+       memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+       num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+       page = ep->qp.sq_phys;
+
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+
+       /* RQ page table */
+       memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+       num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+       page = ep->qp.rq_phys;
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+
+       /* CQ page table */
+       memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+       num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+       page = ep->qp.cq_phys;
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates requires resources for QP (transport layer
+ *                     for iSCSI connection)
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * Allocate QP resources, DMA'able memory for SQ/RQ/CQ and page tables.
+ *     EP structure elements such as producer/consumer indexes/pointers,
+ *     queue sizes and page table contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct bnx2i_5771x_cq_db *cq_db;
+       u32 num_que_elements;
+       
+       ep->hba = hba;
+       ep->conn = NULL;
+       ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+       /* Allocate page table memory for SQ which is page aligned */
+       ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+       ep->qp.sq_mem_size =
+               (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       num_que_elements = hba->max_sqes;
+
+       ep->qp.sq_pgtbl_size =
+               (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.sq_pgtbl_size =
+               (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.sq_pgtbl_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.sq_pgtbl_size,
+                                    &ep->qp.sq_pgtbl_phys);
+       if (!ep->qp.sq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+                                 ep->qp.sq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual SQ element */
+       ep->qp.sq_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.sq_mem_size,
+                                    &ep->qp.sq_phys);
+       if (!ep->qp.sq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+                                 ep->qp.sq_mem_size);
+               goto mem_alloc_err;
+       }
+
+       memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+
+       ep->qp.sq_first_qe = ep->qp.sq_virt;
+       ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+       ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+       ep->qp.sq_last_qe = &ep->qp.sq_first_qe[num_que_elements - 1];
+       ep->qp.sq_prod_idx = 0;
+       ep->qp.sq_cons_idx = 0;
+       ep->qp.sqe_left = num_que_elements;
+
+       /* Allocate page table memory for CQ which is page aligned */
+       ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+       ep->qp.cq_mem_size =
+               (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       num_que_elements = hba->max_cqes;
+
+       ep->qp.cq_pgtbl_size =
+               (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.cq_pgtbl_size =
+               (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.cq_pgtbl_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.cq_pgtbl_size,
+                                    &ep->qp.cq_pgtbl_phys);
+       if (!ep->qp.cq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+                                 ep->qp.cq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual CQ element */
+       ep->qp.cq_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.cq_mem_size,
+                                    &ep->qp.cq_phys);
+       if (!ep->qp.cq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+                                 ep->qp.cq_mem_size);
+               goto mem_alloc_err;
+       }
+       memset (ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+       ep->qp.cq_first_qe = ep->qp.cq_virt;
+       ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+       ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+       ep->qp.cq_last_qe = &ep->qp.cq_first_qe[num_que_elements - 1];
+       ep->qp.cq_prod_idx = 0;
+       ep->qp.cq_cons_idx = 0;
+       ep->qp.cqe_left = num_que_elements;
+       ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+       ep->qp.cqe_size = hba->max_cqes;
+
+       /* Invalidate all EQ CQE index, req only for 57710 */
+       cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+       memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+       /* Allocate page table memory for RQ which is page aligned */
+       ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+
+       ep->qp.rq_mem_size =
+               (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       num_que_elements = hba->max_rqes;
+
+       ep->qp.rq_pgtbl_size =
+               (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.rq_pgtbl_size =
+               (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.rq_pgtbl_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.rq_pgtbl_size,
+                                    &ep->qp.rq_pgtbl_phys);
+       if (!ep->qp.rq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+                                 ep->qp.rq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual RQ element */
+       ep->qp.rq_virt =
+               pci_alloc_consistent(hba->pcidev, ep->qp.rq_mem_size,
+                                    &ep->qp.rq_phys);
+       if (!ep->qp.rq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+                                 ep->qp.rq_mem_size);
+               goto mem_alloc_err;
+       }
+
+       ep->qp.rq_first_qe = ep->qp.rq_virt;
+       ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+       ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+       ep->qp.rq_last_qe = &ep->qp.rq_first_qe[num_que_elements - 1];
+       ep->qp.rq_prod_idx = 0x8000;
+       ep->qp.rq_cons_idx = 0;
+       ep->qp.rqe_left = num_que_elements;
+
+       setup_qp_page_tables(ep);
+
+       return 0;
+
+mem_alloc_err:
+       bnx2i_free_qp_resc(hba, ep);
+       return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       if (ep->qp.ctx_base) {
+               iounmap(ep->qp.ctx_base);
+               ep->qp.ctx_base = NULL;
+       }
+       /* Free SQ mem */
+       if (ep->qp.sq_pgtbl_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.sq_pgtbl_size,
+                                   ep->qp.sq_pgtbl_virt,
+                                   ep->qp.sq_pgtbl_phys);
+               ep->qp.sq_pgtbl_virt = NULL;
+               ep->qp.sq_pgtbl_phys = 0;
+       }
+       if (ep->qp.sq_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.sq_mem_size,
+                                   ep->qp.sq_virt, ep->qp.sq_phys);
+               ep->qp.sq_virt = NULL;
+               ep->qp.sq_phys = 0;
+       }
+
+       /* Free RQ mem */
+       if (ep->qp.rq_pgtbl_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.rq_pgtbl_size,
+                                   ep->qp.rq_pgtbl_virt,
+                                   ep->qp.rq_pgtbl_phys);
+               ep->qp.rq_pgtbl_virt = NULL;
+               ep->qp.rq_pgtbl_phys = 0;
+       }
+       if (ep->qp.rq_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.rq_mem_size,
+                                   ep->qp.rq_virt, ep->qp.rq_phys);
+               ep->qp.rq_virt = NULL;
+               ep->qp.rq_phys = 0;
+       }
+
+       /* Free CQ mem */
+       if (ep->qp.cq_pgtbl_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.cq_pgtbl_size,
+                                   ep->qp.cq_pgtbl_virt,
+                                   ep->qp.cq_pgtbl_phys);
+               ep->qp.cq_pgtbl_virt = NULL;
+               ep->qp.cq_pgtbl_phys = 0;
+       }
+       if (ep->qp.cq_virt) {
+               pci_free_consistent(ep->hba->pcidev, ep->qp.cq_mem_size,
+                                   ep->qp.cq_virt, ep->qp.cq_phys);
+               ep->qp.cq_virt = NULL;
+               ep->qp.cq_phys = 0;
+       }
+}
+
+extern unsigned int error_mask1, error_mask2;
+extern unsigned int en_tcp_dack;
+extern u64 iscsi_error_mask;
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ *
+ * @hba:               adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ *     This results in iSCSi support validation and on-chip context manager
+ *     initialization.  Firmware completes this handshake with a CQE carrying
+ *     the result of iscsi support validation. Parameter carried by 
+ *     iscsi init request determines the number of offloaded connection and
+ *     tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+       struct kwqe *kwqe_arr[3];
+       struct iscsi_kwqe_init1 iscsi_init;
+       struct iscsi_kwqe_init2 iscsi_init2;
+       int rc = 0;
+       u64 mask64;
+
+       bnx2i_adjust_qp_size(hba);
+
+       iscsi_init.flags =
+               ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+       if (en_tcp_dack)
+               iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+       iscsi_init.reserved0 = 0;
+       iscsi_init.num_cqs = 1;
+       iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+       iscsi_init.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+       iscsi_init.dummy_buffer_addr_hi =
+               (u32) ((u64) hba->dummy_buf_dma >> 32);
+
+       hba->ctx_ccell_tasks =
+                       ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+       iscsi_init.num_ccells_per_conn = hba->num_ccell;
+       iscsi_init.num_tasks_per_conn = hba->max_sqes;
+       iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+       iscsi_init.sq_num_wqes = hba->max_sqes;
+       iscsi_init.cq_log_wqes_per_page =
+               (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+       iscsi_init.cq_num_wqes = hba->max_cqes;
+       iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+                                  (PAGE_SIZE - 1)) / PAGE_SIZE;
+       iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+                                  (PAGE_SIZE - 1)) / PAGE_SIZE;
+       iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+       iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+       iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+       iscsi_init2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+       iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+       mask64 = 0x0ULL;
+       mask64 |= (
+               /* CISCO MDS */
+               (1UL <<
+                 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+               /* HP MSA1510i */
+               (1UL <<
+                 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+               /* EMC */
+               (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+       if (error_mask1)
+               iscsi_init2.error_bit_map[0] = error_mask1;
+       else {
+               iscsi_init2.error_bit_map[0] = (u32) mask64;
+       }
+       if (error_mask2)
+               iscsi_init2.error_bit_map[1] = error_mask2;
+       else
+               iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+       iscsi_error_mask = mask64;
+
+       kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+       kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+       if (hba->cnic && hba->cnic->submit_kwqes) {
+               rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+       }
+       return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi command
+ *                     completion CQE processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct bnx2i_conn *conn,
+                                      struct cqe *cqe)
+{
+       struct iscsi_cmd_response *resp_cqe;
+       struct bnx2i_cmd *cmd;
+       struct scsi_cmnd *sc;
+       u32 itt;
+       struct Scsi_Host *host;
+       unsigned long flags;
+
+       if (conn->sess->recovery_state)
+               return -1;
+       resp_cqe = (struct iscsi_cmd_response *) cqe;
+
+       bnx2i_update_cmd_sequence(conn->sess, resp_cqe->exp_cmd_sn,
+                                 resp_cqe->max_cmd_sn);
+
+       itt = (resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd || !cmd->scsi_cmd) {
+               printk(KERN_ALERT "bnx2i: scsi rsp ITT=%x not active\n", itt);
+               return 0;
+       }
+
+       if (cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+               conn->num_datain_pdus +=
+                       resp_cqe->task_stat.read_stat.num_data_outs;
+               conn->total_data_octets_rcvd +=
+                       cmd->req.total_data_transfer_length;
+       } else {
+               conn->num_dataout_pdus +=
+                       resp_cqe->task_stat.read_stat.num_data_outs;
+               conn->num_r2t_pdus +=
+                       resp_cqe->task_stat.read_stat.num_r2ts;
+               conn->total_data_octets_sent +=
+                       cmd->req.total_data_transfer_length;
+       }
+
+       sc = cmd->scsi_cmd;
+       cmd->scsi_status_rcvd = 1;
+       cmd->req.itt &= ISCSI_CMD_RESPONSE_INDEX;
+
+       host = cmd->conn->sess->host;
+       if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) {
+               sc->result = (DID_OK << 16) | resp_cqe->status;
+               goto call_done;
+       }
+
+       bnx2i_process_scsi_resp(cmd, resp_cqe);
+
+call_done:
+       bnx2i_iscsi_unmap_sg_list(conn->sess->hba, cmd);
+       spin_lock_irqsave(host->host_lock, flags);
+       list_del_init(&cmd->link);
+       cmd->scsi_cmd = NULL;
+       conn->sess->num_active_cmds--;
+       sc->SCp.ptr = NULL;
+       sc->scsi_done(sc);
+       bnx2i_free_cmd(cmd->conn->sess, cmd);
+       spin_unlock_irqrestore(host->host_lock, flags);
+       return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ *                     CQE processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       struct bnx2i_cmd *cmd;
+       struct iscsi_login_response *login;
+       struct iscsi_login_rsp *resp_hdr;
+       u32 itt;
+       u32 dword;
+       int pld_len;
+       int pad_len;
+
+       login = (struct iscsi_login_response *) cqe;
+       resp_hdr = (struct iscsi_login_rsp *) &conn->gen_pdu.resp_hdr;
+       itt = (login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd) {
+               printk(KERN_WARNING "bnx2i - itt=%x not valid\n", itt);
+               return 0;
+       }
+
+       resp_hdr->opcode = login->op_code;
+       resp_hdr->flags = login->response_flags;
+       resp_hdr->max_version = login->version_max;
+       resp_hdr->active_version = login->version_active;;
+       resp_hdr->hlength = 0;
+
+       dword = login->data_length;
+       resp_hdr->dlength[2] = (u8) dword;
+       resp_hdr->dlength[1] = (u8) (dword >> 8);
+       resp_hdr->dlength[0] = (u8) (dword >> 16);
+
+       memcpy(resp_hdr->isid, &login->isid_lo, 6);
+       resp_hdr->tsih = htons(login->tsih);
+       resp_hdr->itt = conn->gen_pdu.pdu_hdr.itt;
+       resp_hdr->statsn = htonl(login->stat_sn);
+
+       conn->sess->cmdsn = login->exp_cmd_sn;
+       resp_hdr->exp_cmdsn = htonl(login->exp_cmd_sn);
+       resp_hdr->max_cmdsn = htonl(login->max_cmd_sn);
+       resp_hdr->status_class = login->status_class;
+       resp_hdr->status_detail = login->status_detail;
+       pld_len = login->data_length;
+       conn->gen_pdu.resp_wr_ptr = conn->gen_pdu.resp_buf + pld_len;
+
+       pad_len = 0;
+       if (pld_len & 0x3)
+               pad_len = 4 - (pld_len % 4);
+
+       if (pad_len) {
+               int i = 0;
+               for (i = 0; i < pad_len; i++) {
+                       conn->gen_pdu.resp_wr_ptr[0] = 0;
+                       conn->gen_pdu.resp_wr_ptr++;
+               }
+       }
+       bnx2i_indicate_login_resp(conn);
+       return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ *                     CQE processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       u32 itt;
+       struct bnx2i_cmd *cmd;
+       struct bnx2i_cmd *aborted_cmd;
+       struct iscsi_tmf_response *tmf_cqe;
+
+       if (conn->sess->recovery_state)
+               return -1;
+       tmf_cqe = (struct iscsi_tmf_response *) cqe;
+       itt = (tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd) {
+               printk(KERN_ALERT "tmf_resp: ITT 0x%x is not active\n",
+                                 tmf_cqe->itt);
+               return 0;
+       }
+
+       bnx2i_update_cmd_sequence(conn->sess, tmf_cqe->exp_cmd_sn,
+                                 tmf_cqe->max_cmd_sn);
+
+       aborted_cmd = cmd->tmf_ref_cmd;
+
+       if (tmf_cqe->response == ISCSI_TMF_RSP_COMPLETE) {
+               if (aborted_cmd->scsi_cmd) {
+                       aborted_cmd->scsi_cmd->result = DID_ABORT << 16;
+                       aborted_cmd->scsi_cmd->resid =
+                               aborted_cmd->scsi_cmd->request_bufflen;
+                       cmd->cmd_state = ISCSI_CMD_STATE_COMPLETED;
+               }
+       } else if (tmf_cqe->response == ISCSI_TMF_RSP_NO_TASK) {
+               if (aborted_cmd->scsi_cmd == cmd->tmf_ref_sc)
+                       printk(KERN_ALERT "TMF_RESP: task still allegiant\n");
+               cmd->cmd_state = ISCSI_CMD_STATE_COMPLETED;
+       } else {
+               printk(KERN_ALERT "TMF_RESP: failed, ITT 0x%x REF ITT 0x%x\n",
+                                 cmd->req.itt, aborted_cmd->req.itt);
+               cmd->cmd_state = ISCSI_CMD_STATE_FAILED;
+       }
+
+       wake_up(&cmd->conn->sess->er_wait);
+       return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ *                     CQE processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       struct bnx2i_cmd *cmd;
+       struct iscsi_logout_response *logout;
+       struct iscsi_logout_rsp *resp_hdr;
+       u32 itt;
+
+       logout = (struct iscsi_logout_response *) cqe;
+       resp_hdr = (struct iscsi_logout_rsp *) &conn->gen_pdu.resp_hdr;
+       itt = (logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd || cmd != conn->gen_pdu.cmd)
+               return 0;
+
+       resp_hdr->opcode = logout->op_code;
+       resp_hdr->flags = logout->response;
+       resp_hdr->hlength = 0;
+
+       resp_hdr->itt = conn->gen_pdu.pdu_hdr.itt;
+       resp_hdr->statsn = conn->gen_pdu.pdu_hdr.exp_statsn;
+       resp_hdr->exp_cmdsn = htonl(logout->exp_cmd_sn);
+       resp_hdr->max_cmdsn = htonl(logout->max_cmd_sn);
+
+       resp_hdr->t2wait = htonl(logout->time_to_wait);
+       resp_hdr->t2retain = htonl(logout->time_to_retain);
+
+       conn->ep->teardown_mode = BNX2I_GRACEFUL_SHUTDOWN;
+
+       bnx2i_indicate_logout_resp(conn);
+       return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ *                     processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+void bnx2i_process_nopin_local_cmpl(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       u32 itt;
+       struct Scsi_Host *host;
+       struct bnx2i_cmd *cmd;
+       struct iscsi_nop_in_msg *nop_in;
+
+       nop_in = (struct iscsi_nop_in_msg *) cqe;
+       host = conn->sess->host;
+
+       itt = (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd) {
+               printk(KERN_ALERT "nop_in_local: ITT %x not active\n", itt);
+               return;
+       }
+       spin_lock(host->host_lock);
+       list_del_init(&cmd->link);
+       conn->sess->num_active_cmds--;
+       bnx2i_free_cmd(cmd->conn->sess, cmd);
+       spin_unlock(host->host_lock);
+
+       return;
+}
+
+/**
+ * bnx2i_process_tgt_noop_resp - this function handles iscsi nopout CQE
+ *                     processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * Process iSCSI target's nopin response to initiator's proactive nopout
+ */
+static int bnx2i_process_tgt_noop_resp(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       struct iscsi_nopin *resp_hdr;
+       u32 itt;
+       struct bnx2i_cmd *cmd;
+       struct iscsi_nop_in_msg *nop_in;
+
+       nop_in = (struct iscsi_nop_in_msg *) cqe;
+       itt = (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+       resp_hdr = (struct iscsi_nopin *) &conn->gen_pdu.resp_hdr;
+
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd)
+               return 0;
+
+       resp_hdr->opcode = nop_in->op_code;
+       resp_hdr->flags = ISCSI_FLAG_CMD_FINAL;
+       resp_hdr->rsvd2 = resp_hdr->rsvd3 = 0;
+       resp_hdr->itt = conn->gen_pdu.pdu_hdr.itt;
+       resp_hdr->ttt = ISCSI_RESERVED_TAG;
+
+       memcpy(resp_hdr->lun, nop_in->lun, 8);
+       resp_hdr->statsn = conn->gen_pdu.pdu_hdr.exp_statsn;
+       resp_hdr->exp_cmdsn = htonl(nop_in->exp_cmd_sn);
+       resp_hdr->max_cmdsn = htonl(nop_in->max_cmd_sn);
+       memset(resp_hdr->rsvd4, 0x00, 12);
+
+       bnx2i_process_nopin(conn, cmd, NULL, 0);
+       return 0;
+}
+
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsolicited pdu
+ *                     is received
+ *
+ * @conn:              iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ *     payload data length is '0'. This function makes corresponding 
+ *     adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *conn)
+{
+       char dummy_rq_data[2];
+       bnx2i_get_rq_buf(conn, dummy_rq_data, 1);
+       bnx2i_put_rq_buf(conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ *                     processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static void bnx2i_process_nopin_mesg(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       u32 itt;
+       u32 ttt;
+       struct iscsi_nop_in_msg *nop_in;
+
+       nop_in = (struct iscsi_nop_in_msg *) cqe;
+       itt = nop_in->itt;
+       ttt = nop_in->ttt;
+
+       bnx2i_update_cmd_sequence(conn->sess,
+                                 nop_in->exp_cmd_sn, nop_in->max_cmd_sn);
+
+       if (itt == (u16) ISCSI_RESERVED_TAG) {
+               struct bnx2i_cmd *cmd;
+
+               bnx2i_unsol_pdu_adjust_rq(conn);
+               if (ttt == ISCSI_RESERVED_TAG)
+                       return;
+
+               spin_lock(conn->sess->host->host_lock);
+               cmd = bnx2i_alloc_cmd(conn->sess);
+               if (!cmd) {
+                       /* should not happen as cmd list size == SHT->can_queue
+                        */
+                       spin_unlock(conn->sess->host->host_lock);
+                       return;
+               }
+               spin_unlock(conn->sess->host->host_lock);
+               cmd->conn = conn;
+               cmd->scsi_cmd = NULL;
+               cmd->req.total_data_transfer_length = 0;
+               cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
+
+               /* requires reply in the form of Nop-Out */
+               if (nop_in->data_length)
+                       printk(KERN_ALERT "Tgt NOPIN with dlen > 0\n");
+               bnx2i_send_iscsi_nopout(conn, cmd, ttt, NULL, 0, 0);
+       } else  /* target's reply to initiator's Nop-Out */
+               bnx2i_process_tgt_noop_resp(conn, cqe);
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ *                     processing
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       struct iscsi_async_msg *async_cqe;
+       struct iscsi_async *resp_hdr;
+       u8 async_event;
+
+       bnx2i_unsol_pdu_adjust_rq(conn);
+
+       async_cqe = (struct iscsi_async_msg *) cqe;
+       async_event = async_cqe->async_event;
+
+       if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+               printk(KERN_ALERT "async: scsi events not supported\n");
+               return;
+       }
+
+       resp_hdr = (struct iscsi_async *) &conn->gen_pdu.resp_hdr;
+       resp_hdr->opcode = async_cqe->op_code;
+       resp_hdr->flags = 0x80;
+       resp_hdr->rsvd2[0] = resp_hdr->rsvd2[1] = resp_hdr->rsvd3 = 0;
+       resp_hdr->dlength[0] = resp_hdr->dlength[1] = resp_hdr->dlength[2] = 0;
+
+       memcpy(resp_hdr->lun, async_cqe->lun, 8);
+       resp_hdr->statsn = htonl(0);
+       resp_hdr->exp_cmdsn = htonl(async_cqe->exp_cmd_sn);
+       resp_hdr->max_cmdsn = htonl(async_cqe->max_cmd_sn);
+
+       resp_hdr->async_event = async_cqe->async_event;
+       resp_hdr->async_vcode = async_cqe->async_vcode;
+
+       resp_hdr->param1 = htons(async_cqe->param1);
+       resp_hdr->param2 = htons(async_cqe->param2);
+       resp_hdr->param3 = htons(async_cqe->param3);
+       resp_hdr->rsvd5[0] = resp_hdr->rsvd5[1] = 0;
+       resp_hdr->rsvd5[2] = resp_hdr->rsvd5[3] = 0;
+
+       bnx2i_indicate_async_mesg(conn);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct bnx2i_conn *conn, struct cqe *cqe)
+{
+       struct iscsi_reject_msg *reject;
+       char rej_pdu[BNX2I_RQ_WQE_SIZE];
+       int rej_data_len, idx;
+
+       reject = (struct iscsi_reject_msg *) cqe;
+       rej_data_len = reject->data_length;
+       if (rej_data_len) {
+               bnx2i_get_rq_buf(conn, rej_pdu, rej_data_len);
+               bnx2i_put_rq_buf(conn, 1);
+               printk(KERN_ALERT "bnx2i - printing rejected PDU contents");
+               idx = 0;
+               printk(KERN_ALERT "\n[%x]: ", idx);
+               while (idx <= rej_data_len) {
+                       printk(KERN_ALERT "%x ", rej_pdu[idx++]);
+                       if (!(idx % 8))
+                               printk(KERN_ALERT "\n[%x]: ", idx);
+               }
+       } else
+               bnx2i_unsol_pdu_adjust_rq(conn);
+
+       bnx2i_recovery_que_add_conn(conn->sess->hba, conn);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ *
+ * @conn:              iscsi connection
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct bnx2i_conn *conn,
+                                          struct cqe *cqe)
+{
+       u32 itt;
+       struct bnx2i_cmd *cmd;
+       struct iscsi_cleanup_response *cmd_clean_rsp;
+
+       cmd_clean_rsp = (struct iscsi_cleanup_response *) cqe;
+       itt = (cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+       cmd = get_cmnd(conn->sess, itt);
+       if (!cmd)
+               printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", itt);
+               /* may be completion & cleanup request crossed each other */
+       else
+               cmd->cmd_state = ISCSI_CMD_STATE_CLEANUP_CMPL;
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ *
+ * @conn:              iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *conn)
+{
+       struct qp_info *qp = &conn->ep->qp;
+       struct iscsi_nop_in_msg *nopin;
+       volatile u32 *sess_state = &conn->sess->state;
+
+       while (1) {
+               nopin = (struct iscsi_nop_in_msg *) qp->cq_cons_qe;
+               if ((nopin->cq_req_sn != qp->cqe_exp_seq_sn) ||
+                   (*sess_state == BNX2I_SESS_IN_SHUTDOWN))
+                       break;
+
+               if (nopin->op_code == ISCSI_OP_SCSI_CMD_RSP) {
+                       conn->num_scsi_resp_pdus++;
+                       bnx2i_process_scsi_cmd_resp(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OP_SCSI_DATA_IN)
+                       bnx2i_process_scsi_cmd_resp(conn, qp->cq_cons_qe);
+               else if (nopin->op_code == ISCSI_OP_LOGIN_RSP) {
+                       conn->num_login_resp_pdus++;
+                       bnx2i_process_login_resp(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OP_SCSI_TMFUNC_RSP) {
+                       conn->num_tmf_resp_pdus++;
+                       bnx2i_process_tmf_resp(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OP_LOGOUT_RSP) {
+                       conn->num_logout_resp_pdus++;
+                       bnx2i_process_logout_resp(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OP_NOOP_IN) {
+                       conn->num_nopin_pdus++;
+                       bnx2i_process_nopin_mesg(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code ==
+                          ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION)
+                       bnx2i_process_nopin_local_cmpl(conn, qp->cq_cons_qe);
+               else if (nopin->op_code == ISCSI_OP_ASYNC_EVENT) {
+                       conn->num_async_pdus++;
+                       bnx2i_process_async_mesg(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OP_REJECT) {
+                       conn->num_reject_pdus++;
+                       bnx2i_process_reject_mesg(conn, qp->cq_cons_qe);
+               } else if (nopin->op_code == ISCSI_OPCODE_CLEANUP_RESPONSE)
+                       bnx2i_process_cmd_cleanup_resp(conn, qp->cq_cons_qe);
+               else
+                       printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+                                         nopin->op_code);
+
+               /* clear out in production version only, till beta keep opcode
+                * field intact, will be helpful in debugging (context dump)
+               nopin->op_code = 0;
+                */
+               qp->cqe_exp_seq_sn++;
+               if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+                       qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+               if (qp->cq_cons_qe == qp->cq_last_qe) {
+                       qp->cq_cons_qe = qp->cq_first_qe;
+                       qp->cq_cons_idx = 0;
+               } else {
+                       qp->cq_cons_qe++;
+                       qp->cq_cons_idx++;
+               }
+       }
+       bnx2i_arm_cq_event_coalescing(conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba:               adapter structure pointer
+ * @new_cqe_kcqe:      pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ *     of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+                                       struct iscsi_kcqe *new_cqe_kcqe)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+               return;
+       }
+       if (!conn->ep) {
+               printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+               return;
+       }
+
+       bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ *
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+                                          struct iscsi_kcqe *update_kcqe)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = update_kcqe->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+               return;
+       }
+       if (!conn->ep) {
+               printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+               return;
+       }
+
+       if (update_kcqe->completion_status) {
+               printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+               conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+       } else
+               conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+       wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ *
+ * @hba:               adapter structure pointer
+ * @conn:              iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+                                struct bnx2i_conn *conn)
+{
+       int prod_idx = hba->sess_recov_prod_idx;
+
+       if (conn->sess->recovery_state || conn->sess->tmf_active)
+               return;
+
+       spin_lock(&hba->lock);
+       conn->sess->recovery_state = ISCSI_SESS_RECOVERY_START;
+       hba->sess_recov_list[prod_idx] = conn->sess;
+       if (hba->sess_recov_max_idx == hba->sess_recov_prod_idx)
+               hba->sess_recov_prod_idx = 0;
+       else
+               hba->sess_recov_prod_idx++;
+       spin_unlock(&hba->lock);
+
+       schedule_work(&hba->err_rec_task);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba:               adapter structure pointer
+ * @tcp_err:           tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+                                   struct iscsi_kcqe *tcp_err)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = tcp_err->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+               return;
+       }
+
+       printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+                         iscsi_cid, tcp_err->completion_status);
+       bnx2i_recovery_que_add_conn(conn->sess->hba, conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ *
+ * @hba:               adapter structure pointer
+ * @iscsi_err:                 iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ *     handshake classifies iscsi protocol / TCP rfc violation into either
+ *     warning or error indications. If indication is of "Error" type, driver
+ *     will initiate session recovery for that connection/session. For
+ *     "Warning" type indication, driver will put out a system log message
+ *     (there will be only one message for each type for the life of the
+ *     session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+                                     struct iscsi_kcqe *iscsi_err)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+       char warn_notice[] = "iscsi_warning";
+       char error_notice[] = "iscsi_error";
+       char additional_notice[64];
+       char *message;
+       int need_recovery;
+       u64 err_mask64;
+
+       iscsi_cid = iscsi_err->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+               return;
+       }
+
+       err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+       if (err_mask64 & iscsi_error_mask) {
+               need_recovery = 0;
+               message = warn_notice;
+       } else {
+               need_recovery = 1;
+               message = error_notice;
+       }
+
+       switch (iscsi_err->completion_status) {
+       case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+               strcpy(additional_notice, "hdr digest err");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+               strcpy(additional_notice, "data digest err");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+               strcpy(additional_notice, "wrong opcode rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+               strcpy(additional_notice, "AHS len > 0 rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+               strcpy(additional_notice, "invalid ITT rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+               strcpy(additional_notice, "wrong StatSN rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+               strcpy(additional_notice, "wrong DataSN rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T :
+               strcpy(additional_notice, "pend R2T violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+               strcpy(additional_notice, "ERL0, UO");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+               strcpy(additional_notice, "ERL0, U1");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+               strcpy(additional_notice, "ERL0, U2");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+               strcpy(additional_notice, "ERL0, U3");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+               strcpy(additional_notice, "ERL0, U4");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+               strcpy(additional_notice, "ERL0, U5");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+               strcpy(additional_notice, "ERL0, U6");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+               strcpy(additional_notice, "invalid resi len");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+               strcpy(additional_notice, "MRDSL violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+               strcpy(additional_notice, "F-bit not set");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+               strcpy(additional_notice, "invalid TTT");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+               strcpy(additional_notice, "invalid DataSN");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+               strcpy(additional_notice, "burst len violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+               strcpy(additional_notice, "buf offset violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+               strcpy(additional_notice, "invalid LUN field");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+               strcpy(additional_notice, "invalid R2TSN field");
+               break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0      \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+       case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+               strcpy(additional_notice, "invalid cmd len1");
+               break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1      \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+       case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+               strcpy(additional_notice, "invalid cmd len2");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+               strcpy(additional_notice,
+                      "pend r2t exceeds MaxOutstandingR2T value");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+               strcpy(additional_notice, "TTT is rsvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+               strcpy(additional_notice, "MBL violation");
+               break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO        \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+       case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+               strcpy(additional_notice, "data seg len != 0");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+               strcpy(additional_notice, "reject pdu len error");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+               strcpy(additional_notice, "async pdu len error");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+               strcpy(additional_notice, "nopin pdu len error");
+               break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP                   \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+       case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+               strcpy(additional_notice, "pend r2t in cleanup");
+               break;
+
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+               strcpy(additional_notice, "IP fragments rcvd");
+               break;
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+               strcpy(additional_notice, "IP options error");
+               break;
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+               strcpy(additional_notice, "urgent flag error");
+               break;
+       default:
+               printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+                                 iscsi_err->completion_status);
+       }
+
+       if (need_recovery) {
+               printk(KERN_ALERT "bnx2i: %s - %s\n",
+                                 message, additional_notice);
+
+               printk(KERN_ALERT "conn_err - hostno %d conn %p, "
+                                 "iscsi_cid %x cid %x\n",
+                                 conn->sess->host->host_no, conn,
+                                 conn->ep->ep_iscsi_cid,
+                                 conn->ep->ep_cid);
+               bnx2i_recovery_que_add_conn(conn->sess->hba, conn);
+       } else
+               if (!test_and_set_bit(iscsi_err->completion_status,
+                                     (void *) &conn->sess->violation_notified))
+                       printk(KERN_ALERT "bnx2i: %s - %s\n",
+                                         message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ *
+ * @hba:               adapter structure pointer
+ * @conn_destroy:      conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+                                           struct iscsi_kcqe *conn_destroy)
+{
+       struct bnx2i_endpoint *ep;
+
+       ep = bnx2i_ep_destroy_list_next(hba);
+       if (!ep) {
+               printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+                                 "offload request, unexpected complection\n");
+               return;
+       }
+
+       if (hba != ep->hba) {
+               printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+               return;
+       }
+
+       if (conn_destroy->completion_status) {
+               printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+               ep->state = EP_STATE_CLEANUP_FAILED;
+       } else
+               ep->state = EP_STATE_CLEANUP_CMPL;
+       wake_up_interruptible(&ep->ofld_wait);
+
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
+               wake_up_interruptible(&hba->eh_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ *
+ * @hba:               adapter structure pointer
+ * @ofld_kcqe:                 conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ *     woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+                                   struct iscsi_kcqe *ofld_kcqe)
+{
+       u32 cid_addr;
+       struct bnx2i_endpoint *ep;
+       u32 cid_num;
+
+       ep = bnx2i_ep_ofld_list_next(hba);
+       if (!ep) {
+               printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+               return;
+       }
+
+       if (hba != ep->hba) {
+               printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+               return;
+       }
+
+       if (ofld_kcqe->completion_status) {
+               if (ofld_kcqe->completion_status ==
+                   ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+                       printk(KERN_ALERT "bnx2i: unable to allocate"
+                                         " iSCSI context resources\n");
+               ep->state = EP_STATE_OFLD_FAILED;
+       } else {
+               ep->state = EP_STATE_OFLD_COMPL;
+               cid_addr = ofld_kcqe->iscsi_conn_context_id;
+               cid_num = bnx2i_get_cid_num(ep);
+               ep->ep_cid = cid_addr;
+               ep->qp.ctx_base = NULL;
+       }
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ *
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+                               u32 num_cqe)
+{
+       struct bnx2i_hba *hba = (struct bnx2i_hba *) context;
+       int i = 0;
+       struct iscsi_kcqe *ikcqe = NULL;
+
+       while (i < num_cqe) {
+               ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+               if (ikcqe->op_code ==
+                   ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+                       bnx2i_fastpath_notification(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+                       bnx2i_process_ofld_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+                       bnx2i_process_update_conn_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+                       if (ikcqe->completion_status !=
+                           ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+                               bnx2i_iscsi_license_error(hba, ikcqe->\
+                                                         completion_status);
+                       else {
+                               set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+                               bnx2i_get_link_state(hba);
+                               printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+                                                "ISCSI_INIT passed\n",
+                                                (u8)hba->pcidev->bus->number,
+                                                hba->pci_devno,
+                                                (u8)hba->pci_func);
+
+
+                       }
+               } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+                       bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+                       bnx2i_process_iscsi_error(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+                       bnx2i_process_tcp_error(hba, ikcqe);
+               else
+                       printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+                                         ikcqe->op_code);
+       }
+}
+
+
+/**
+ * bnx2i_indicate_inetevent - Generic netstack event handler
+ *
+ * @context:           adapter structure pointer
+ * @event:             event type
+ *
+ * Only required to handle NETDEV_UP event at this time
+ */
+static void bnx2i_indicate_inetevent(void *context, unsigned long event)
+{
+       struct bnx2i_hba *hba = (struct bnx2i_hba *) context;
+
+       switch (event) {
+       case NETDEV_UP:
+               bnx2i_iscsi_handle_ip_event(hba);
+               break;
+       default:
+               ;
+       }
+}
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ *
+ * @context:           adapter structure pointer
+ * @event:             event type
+ * 
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ *     NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+       struct bnx2i_hba *hba = (struct bnx2i_hba *) context;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+                       bnx2i_send_fw_iscsi_init_msg(hba);
+               break;
+       case NETDEV_DOWN:
+               clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+               clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+               break;
+       case NETDEV_GOING_DOWN:
+               set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+               bnx2i_start_iscsi_hba_shutdown(hba);
+                       break;
+       case NETDEV_CHANGE:
+               bnx2i_get_link_state(hba);
+               break;
+       default:
+               ;
+       }
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ *
+ * @cm_sk:             cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+               ep->state = EP_STATE_CONNECT_FAILED;
+       else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+               ep->state = EP_STATE_CONNECT_COMPL;
+       else
+               ep->state = EP_STATE_CONNECT_FAILED;
+
+printk("bnx2i[%lx]: tcp_conn_cmpl - ep 0x%p, state 0x%x\n", jiffies, ep, ep->state);
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ *
+ * @cm_sk:             cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_DISCONN_COMPL;
+printk("bnx2i[%lx]: tcp_close_cmpl - ep 0x%p, state 0x%x\n", jiffies, ep, ep->state);
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ *
+ * @cm_sk:             cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_DISCONN_COMPL;
+printk("bnx2i[%lx]: tcp_abort_cmpl - ep 0x%p, state 0x%x\n", jiffies, ep, ep->state);
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ *
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ *     async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_TCP_FIN_RCVD;
+printk("bnx2i[%lx]: tcp_remote_close - ep 0x%p, state 0x%x\n", jiffies, ep, ep->state);
+       if (ep->conn && ep->conn->sess &&
+               (ep->conn->sess->state == BNX2I_SESS_IN_FFP))
+               bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ *
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_TCP_RST_RCVD;
+printk("bnx2i[%lx]: tcp_remote_abort - ep 0x%p, state 0x%x\n", jiffies, ep, ep->state);
+       if (ep->conn && ep->conn->sess &&
+               (ep->conn->sess->state == BNX2I_SESS_IN_FFP)) {
+               bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+       }
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ *                     carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+       .cnic_init = bnx2i_ulp_init,
+       .cnic_exit = bnx2i_ulp_exit,
+       .cnic_start = bnx2i_start,
+       .cnic_stop = bnx2i_stop,
+       .indicate_kcqes = bnx2i_indicate_kcqe,
+       .indicate_netevent = bnx2i_indicate_netevent,
+       .indicate_inetevent = bnx2i_indicate_inetevent,
+       .cm_connect_complete = bnx2i_cm_connect_cmpl,
+       .cm_close_complete = bnx2i_cm_close_cmpl,
+       .cm_abort_complete = bnx2i_cm_abort_cmpl,
+       .cm_remote_close = bnx2i_cm_remote_close,
+       .cm_remote_abort = bnx2i_cm_remote_abort,
+       .owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ *     register in BAR #0. Whereas in 57710 these register are accessed by
+ *     mapping BAR #1
+ */
+void bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+       u32 cid_num;
+       u32 reg_off;
+       u32 first_l4l5;
+       u32 ctx_sz;
+       u32 config2;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+       resource_size_t reg_base;
+#else
+       unsigned long reg_base;
+#endif
+
+       cid_num = bnx2i_get_cid_num(ep);
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+printk("bnx2i: map_dbell 57710 - ep 0x%p, iscsi_cid 0x%x, cid 0x%x\n", ep, ep->ep_iscsi_cid, ep->ep_cid);
+               reg_base = pci_resource_start(ep->hba->pcidev,
+                                             BNX2X_DOORBELL_PCI_BAR);
+               reg_off = PAGE_SIZE * cid_num + DPM_TRIGER_TYPE;
+               ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+               goto arm_cq;
+       }
+
+       reg_base = ep->hba->netdev->base_addr;
+       if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+           (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+               config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+               first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+               ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+               if (ctx_sz)
+                       reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+                                 + PAGE_SIZE *
+                                 (((cid_num - first_l4l5) / ctx_sz) + 256);
+               else
+                       reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+       } else
+               /* 5709 device in normal node and 5706/5708 devices */
+               reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+                                         MB_KERNEL_CTX_SIZE);
+arm_cq:
+       ;
+       bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644 (file)
index 0000000..2179425
--- /dev/null
@@ -0,0 +1,622 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+int bnx2i_reg_device = 0;
+
+#define DRV_MODULE_NAME                "bnx2i"
+#define DRV_MODULE_VERSION     "1.3.5"
+#define DRV_MODULE_RELDATE     "Oct 06, 2008"
+
+static char version[] __devinitdata =
+               "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+               " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int bnx2i_nopout_when_cmds_active = 1;
+module_param(bnx2i_nopout_when_cmds_active, int, 0664);
+MODULE_PARM_DESC(bnx2i_nopout_when_cmds_active,
+               "iSCSI NOOP even when connection is not idle");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size = 0;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+extern spinlock_t bnx2i_resc_lock; /* protects global data structures */
+extern struct tcp_port_mngt bnx2i_tcp_port_tbl;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+static int bnx2i_bind_adapter_devices(struct bnx2i_hba *hba);
+static void bnx2i_unbind_adapter_devices(struct bnx2i_hba *hba);
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ *
+ * @hba:               Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ *     queue mailbox register access method, 5709 requires driver to
+ *     access MBOX regs using *bin* mode
+ **/
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+       hba->cnic_dev_type = 0;
+       if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+               set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+       else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+               set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+       else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+               set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+               hba->mail_queue_access = 0| BNX2I_MQ_BIN_MODE;
+       } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710)
+               set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+/**
+ * bnx2i_get_tcp_port_requirements - returns num tcp ports to alloc/bind
+ *
+ * driver returns the number of TCP ports to be allocated/bound by 'bnx2id'
+ *     daemon. Return value of '0' means driver has everything to support
+ *     max iscsi connections on enumerated NX2 devices
+ **/
+int bnx2i_get_tcp_port_requirements(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+       struct bnx2i_hba *hba;
+       int count = 0;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_safe(list, temp, &adapter_list) {
+               hba = (struct bnx2i_hba *) list;
+               count += hba->max_active_conns;
+       }
+       read_unlock(&bnx2i_dev_lock);
+       return count;
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ *
+ **/
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+       struct list_head *list;
+       struct bnx2i_hba *hba = NULL;
+
+       if (!adapter_count)
+               goto hba_not_found;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each(list, &adapter_list) {
+               hba = (struct bnx2i_hba *) list;
+               if (hba && hba->cnic && hba->cnic->cm_select_dev)
+                       break;
+               hba = NULL;
+       }
+       read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+       return hba;
+}
+
+/**
+ * bnx2i_get_hba_from_template - maps scsi_transport_template to
+ *             bnx2i adapter pointer
+ * @scsit:             scsi transport template pointer
+ *
+ **/
+struct bnx2i_hba *bnx2i_get_hba_from_template(
+                       struct scsi_transport_template *scsit)
+{
+       struct list_head *list;
+       struct bnx2i_hba *hba = NULL;
+       int match_found = 0;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each(list, &adapter_list) {
+               hba = (struct bnx2i_hba *) list;
+               if (hba->shost_template == scsit) {
+                       match_found = 1;
+                       break;
+               }
+       }
+       read_unlock(&bnx2i_dev_lock);
+
+       if (match_found == 0)
+               hba = NULL;
+       return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ *
+ * @cnic:              pointer to cnic device instance
+ *
+ **/
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+       struct list_head *list;
+       struct list_head *temp;
+       struct bnx2i_hba *hba;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_safe(list, temp, &adapter_list) {
+               hba = (struct bnx2i_hba *) list;
+
+               if (hba->cnic == cnic) {
+                       read_unlock(&bnx2i_dev_lock);
+                       return hba;
+               }
+       }
+       read_unlock(&bnx2i_dev_lock);
+       return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle:            transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *     firmware handshake to enable/initialize on chip iscsi components
+ *     This bnx2i - cnic interface api callback is issued after following
+ *     2 conditions are met -
+ *       a) underlying network interface is up (marked by event 'NETDEV_UP'
+ *             from netdev
+ *       b) bnx2i adapter instance is registered
+ **/
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME   (1000 / HZ)
+       struct bnx2i_hba *hba = handle;
+       int i = HZ;
+
+       bnx2i_bind_adapter_devices(hba);
+       bnx2i_send_fw_iscsi_init_msg(hba);
+       while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+               msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle:            transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ *     the shutdown process
+ **/
+void bnx2i_stop(void *handle)
+{
+       struct bnx2i_hba *hba = handle;
+
+       /* check if cleanup happened in GOING_DOWN context */
+       if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+               bnx2i_start_iscsi_hba_shutdown(hba);
+       }
+       clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+       
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ *
+ * @hba:               Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ *     adapter structure lock
+ **/
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+       spin_lock(&hba->lock);  /* called from ep_connect context */
+           
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+           test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               goto rel_lock;
+       }
+
+       hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, (void *) hba);
+       bnx2i_reg_device++;
+       set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+rel_lock:
+       spin_unlock(&hba->lock);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all bnx2i adapter instances with the
+ *                     cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ *     the global resource lock
+ **/
+void bnx2i_reg_dev_all(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_safe(list, temp, &adapter_list)
+               bnx2i_register_device((struct bnx2i_hba *) list);
+       read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister bnx2i adapter instance with
+ *                     the cnic driver
+ *
+ * @hba:               Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ *     the adapter structure lock
+ **/
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) 
+{
+       spin_lock(&hba->lock); /* ep_connect/ep_disconnect() */
+       if (hba->ofld_conns_active ||
+           !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+           test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+               spin_unlock(&hba->lock);
+               return;
+       }
+
+       hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+       bnx2i_reg_device--;
+       /* ep_disconnect could come before NETDEV_DOWN, driver won't
+        * see NETDEV_DOWN as it already unregistered itself.
+        */
+       hba->adapter_state = 0;
+       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+       bnx2i_unbind_adapter_devices(hba);
+       spin_unlock(&hba->lock);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i adapter instances with the
+ *                     cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ *     the global resource lock
+ **/
+void bnx2i_unreg_dev_all(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_safe(list, temp, &adapter_list)
+               bnx2i_unreg_one_device((struct bnx2i_hba *) list);
+       read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_bind_adapter_devices - binds bnx2i adapter with the associated
+ *                     pcidev structure
+ *
+ * @hba:               Adapter instance
+ *
+ * With newly introduced changes to bnx2i - cnic interface, cnic_dev's 'pcidev'
+ *     field will be valid only after bnx2i registers the device instance.
+ *     Earlier this field was valid right during device enumeration process 
+ **/
+static int bnx2i_bind_adapter_devices(struct bnx2i_hba *hba)
+{
+       struct cnic_dev *cnic;
+
+       if (!hba->cnic)
+               return -ENODEV;
+
+       cnic = hba->cnic;
+       hba->pcidev = cnic->pcidev;
+       if (hba->pcidev) {
+               pci_dev_get(hba->pcidev);
+               hba->pci_did = hba->pcidev->device;
+               hba->pci_vid = hba->pcidev->vendor;
+               hba->pci_sdid = hba->pcidev->subsystem_device;
+               hba->pci_svid = hba->pcidev->subsystem_vendor;
+               hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+               hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+       }
+
+       bnx2i_identify_device(hba);
+
+       if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+               hba->regview = ioremap_nocache(hba->netdev->base_addr,
+                                              BNX2_MQ_CONFIG2);
+               if (!hba->regview)
+                       goto mem_err;
+       }
+
+       if (bnx2i_setup_mp_bdt(hba))
+               goto mem_err;
+
+       bnx2i_init_ctx_dump_mem(hba);
+       return 0;
+
+mem_err:
+       bnx2i_unbind_adapter_devices(hba);
+       return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_unbind_adapter_devices - removes bnx2i adapter to pcidev mapping
+ *
+ * @hba:               Adapter instance
+ *
+ **/
+void bnx2i_unbind_adapter_devices(struct bnx2i_hba *hba)
+{
+       if (hba->regview) {
+               iounmap(hba->regview);
+               hba->regview = NULL;
+       }
+
+       bnx2i_free_ctx_dump_mem(hba);
+       bnx2i_free_mp_bdt(hba);
+
+       if (hba->pcidev)
+               pci_dev_put(hba->pcidev);
+       hba->pcidev = NULL;
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate necessary
+ *             memory resources
+ *
+ * @hba:               bnx2i adapter instance
+ * @cnic:              cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *     below. This routine is called from cnic_register_driver() context and
+ *     work horse thread which does majority of device specific initialization
+ **/
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+       int rc;
+
+       read_lock(&bnx2i_dev_lock);
+       hba->netdev = cnic->netdev;
+       if (bnx2i_reg_device &&
+               !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               spin_lock(&hba->lock);  /* hot plug */
+               rc = cnic->register_device(cnic, CNIC_ULP_ISCSI,
+                                          (void *) hba);
+               if (rc)         /* duplicate registration */
+                       printk(KERN_ERR "bnx2i- dev reg failed\n");
+               bnx2i_reg_device++;
+               hba->age++;
+               set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+               spin_unlock(&hba->lock);
+       }
+       read_unlock(&bnx2i_dev_lock);
+
+       if (cnic->netdev)
+               memcpy(hba->mac_addr, cnic->netdev->dev_addr, MAX_ADDR_LEN);
+
+       /* Allocate memory & initialize the SCSI/iSCSI host templates */
+       rc = bnx2i_register_xport(hba);
+       if (rc)
+               goto failed_xport_reg;
+
+       /* create 'sysfs' device objects */
+       rc = bnx2i_register_sysfs(hba);
+       if (rc)
+               goto failed_sysfs_reg;
+
+       bnx2i_init_mips_idle_counters(hba);
+       bnx2i_tcp_port_tbl.num_required += hba->max_active_conns;
+
+       write_lock(&bnx2i_dev_lock);
+       list_add_tail(&hba->link, &adapter_list);
+       adapter_count++;
+       write_unlock(&bnx2i_dev_lock);
+
+       return 0;
+
+failed_sysfs_reg:
+       bnx2i_deregister_xport(hba);
+failed_xport_reg:
+       bnx2i_unbind_adapter_devices(hba);
+       cnic->unregister_device(cnic, CNIC_ULP_ISCSI);
+       bnx2i_reg_device--;
+       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+       return rc;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ *
+ * @dev:               cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ *     cnic devices. This routine allocate adapter structure and other
+ *     device specific resources.
+ **/
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+       struct bnx2i_hba *hba;
+
+       /* Allocate a HBA structure for this device */
+       hba = bnx2i_alloc_hba(dev);
+       if (!hba) {
+               printk(KERN_ERR "init: hba initialization failed\n");
+               return;
+       }
+
+       /* Get PCI related information and update hba struct members */
+       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+       if (bnx2i_init_one(hba, dev))
+               printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev:               cnic device handle
+ *
+ **/
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+       struct bnx2i_hba *hba;
+
+       hba = bnx2i_find_hba_for_cnic(dev);
+       if (!hba) {
+               printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+                                "found, dev 0x%p\n", dev);
+               return;
+       }
+       write_lock(&bnx2i_dev_lock);
+       list_del_init(&hba->link);
+       bnx2i_tcp_port_tbl.num_required -= hba->max_active_conns;
+       adapter_count--;
+
+       /* cleanup 'sysfs' devices and classes */
+       bnx2i_unregister_sysfs(hba);
+
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               spin_lock(&hba->lock);  /* hot remove */
+               hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+               clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+               bnx2i_reg_device--;
+               bnx2i_unbind_adapter_devices(hba);
+               spin_unlock(&hba->lock);
+       }
+       write_unlock(&bnx2i_dev_lock);
+
+       if (hba->pcidev)
+               pci_dev_put(hba->pcidev);
+
+       bnx2i_deregister_xport(hba);
+       bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ *     tcp port manager/queue, sysfs. finally driver will register itself
+ *     with the cnic module
+ **/
+static int __init bnx2i_mod_init(void)
+{
+       printk(KERN_INFO "%s", version);
+
+       INIT_LIST_HEAD(&adapter_list);
+       adapter_count = 0;
+
+       bnx2i_alloc_ep_pool();
+       bnx2i_init_tcp_port_mngr();
+
+       /* create 'sysfs' class object */
+       bnx2i_sysfs_setup();
+
+       cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+
+       bnx2i_ioctl_init();
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *     in this function. Driver will browse through the adapter list, cleans-up
+ *     each instance, unregisters iscsi transport name and finally driver will
+ *     unregister itself with the cnic module
+ **/
+static void __exit bnx2i_mod_exit(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+       struct bnx2i_hba *hba;
+
+       write_lock(&bnx2i_dev_lock);
+       list_for_each_safe(list, temp, &adapter_list) {
+               list_del_init(list);
+               hba = (struct bnx2i_hba *) list;
+
+               /* cleanup 'sysfs' devices and classes */
+               bnx2i_unregister_sysfs(hba);
+
+               if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+                       hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+                       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+                       bnx2i_reg_device--;
+                       bnx2i_unbind_adapter_devices(hba);
+               }
+
+               if (hba->pcidev)
+                       pci_dev_put(hba->pcidev);
+               bnx2i_deregister_xport(hba);
+               bnx2i_free_hba(hba);
+               adapter_count--;
+       }
+       write_unlock(&bnx2i_dev_lock);
+
+       bnx2i_ioctl_cleanup();
+       cnic_unregister_driver(CNIC_ULP_ISCSI);
+
+       bnx2i_sysfs_cleanup();
+       bnx2i_cleanup_tcp_port_mngr();
+       bnx2i_release_ep_pool();
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_ioctl.h b/drivers/scsi/bnx2i/bnx2i_ioctl.h
new file mode 100644 (file)
index 0000000..d602dce
--- /dev/null
@@ -0,0 +1,46 @@
+/* bnx2i_ioctl.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef _BNX2I_IOCTL_H
+#define _BNX2I_IOCTL_H
+
+#define MAX_SIG_SIZE           32
+#define MAX_XPORT_NAME         16
+#define MAX_DEV_NAME_SIZE      16
+
+#define BNX2I_MGMT_SIGNATURE   "bnx2i-mgmt:1.0"
+
+
+
+struct bnx2i_ioctl_header {
+       char signature[MAX_SIG_SIZE];
+       char xport_name[MAX_XPORT_NAME];
+       char dev_name[MAX_DEV_NAME_SIZE];
+};
+
+
+struct bnx2i_get_port_count {
+       struct bnx2i_ioctl_header hdr;
+       unsigned int port_count;
+};
+
+struct bnx2i_set_port_num {
+        struct bnx2i_ioctl_header hdr;
+        unsigned int num_ports;
+        unsigned short tcp_port[1];
+};
+
+
+#define BNX2I_IOCTL_GET_PORT_REQ       \
+               _IOWR('I', 101, struct bnx2i_get_port_count)
+#define BNX2I_IOCTL_SET_TCP_PORT       \
+               _IOWR('I', 102, struct bnx2i_set_port_num)
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644 (file)
index 0000000..079c29e
--- /dev/null
@@ -0,0 +1,4543 @@
+/* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+#include <linux/ethtool.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+struct scsi_host_template bnx2i_host_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+struct file_operations bnx2i_mgmt_fops;
+extern unsigned int bnx2i_nopout_when_cmds_active;
+
+/*
+ * Global endpoint resource info
+ */
+static void *bnx2i_ep_pages[MAX_PAGES_PER_CTRL_STRUCT_POOL];
+static struct list_head bnx2i_free_ep_list;
+static struct list_head bnx2i_unbound_ep;
+static u32 bnx2i_num_free_ep;
+static u32 bnx2i_max_free_ep;
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+struct tcp_port_mngt bnx2i_tcp_port_tbl;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+/* Char device major number */
+static int bnx2i_major_no;
+
+static struct io_bdt *bnx2i_alloc_bd_table(struct bnx2i_sess *sess,
+                                          struct bnx2i_cmd *);
+
+void bnx2i_unbind_adapter_devices(struct bnx2i_hba *hba);
+/**
+ * bnx2i_alloc_tcp_port - allocates a tcp port from the free list
+ *
+ * assumes this function is called with 'bnx2i_resc_lock' held
+ **/
+static u16 bnx2i_alloc_tcp_port(void)
+{
+       u16 tcp_port;
+
+       if (!bnx2i_tcp_port_tbl.num_free_ports || !bnx2i_tcp_port_tbl.free_q)
+               return 0;
+
+       tcp_port = bnx2i_tcp_port_tbl.free_q[bnx2i_tcp_port_tbl.cons_idx];
+       bnx2i_tcp_port_tbl.cons_idx++;
+       bnx2i_tcp_port_tbl.cons_idx %= bnx2i_tcp_port_tbl.max_idx;
+       bnx2i_tcp_port_tbl.num_free_ports--;
+
+       return tcp_port;
+}
+
+
+/**
+ * bnx2i_free_tcp_port - Frees the given tcp port back to free pool
+ *
+ * @port:              tcp port number being freed
+ *
+ * assumes this function is called with 'bnx2i_resc_lock' held
+ **/
+static void bnx2i_free_tcp_port(u16 port)
+{
+       if (!bnx2i_tcp_port_tbl.free_q)
+               return;
+
+       bnx2i_tcp_port_tbl.free_q[bnx2i_tcp_port_tbl.prod_idx] = port;
+       bnx2i_tcp_port_tbl.prod_idx++;
+       bnx2i_tcp_port_tbl.prod_idx %= bnx2i_tcp_port_tbl.max_idx;
+       bnx2i_tcp_port_tbl.num_free_ports++;
+}
+
+/**
+ * bnx2i_tcp_port_new_entry - place 'bnx2id' allocated tcp port number
+ *             to free list
+ *
+ * @port:              tcp port number being added to free pool
+ *
+ * 'bnx2i_resc_lock' is held while operating on global tcp port table
+ **/
+void bnx2i_tcp_port_new_entry(u16 tcp_port)
+{
+       u32 idx = bnx2i_tcp_port_tbl.prod_idx;
+
+       spin_lock(&bnx2i_resc_lock);
+       bnx2i_tcp_port_tbl.free_q[idx] = tcp_port;
+       bnx2i_tcp_port_tbl.prod_idx++;
+       bnx2i_tcp_port_tbl.prod_idx %= bnx2i_tcp_port_tbl.max_idx;
+       bnx2i_tcp_port_tbl.num_free_ports++;
+       bnx2i_tcp_port_tbl.num_required--;
+       spin_unlock(&bnx2i_resc_lock);
+}
+
+/**
+ * bnx2i_init_tcp_port_mngr - initializes tcp port manager
+ *
+ */
+void bnx2i_init_tcp_port_mngr(void)
+{
+       int mem_size;
+
+       bnx2i_tcp_port_tbl.num_free_ports = 0;
+       bnx2i_tcp_port_tbl.prod_idx = 0;
+       bnx2i_tcp_port_tbl.cons_idx = 0;
+       bnx2i_tcp_port_tbl.max_idx = 0;
+       bnx2i_tcp_port_tbl.num_required = 0;
+
+#define BNX2I_MAX_TCP_PORTS    1024
+
+       bnx2i_tcp_port_tbl.port_tbl_size = BNX2I_MAX_TCP_PORTS;
+
+       mem_size = sizeof(u16) * bnx2i_tcp_port_tbl.port_tbl_size;
+       if (bnx2i_tcp_port_tbl.port_tbl_size) {
+               bnx2i_tcp_port_tbl.free_q = kmalloc(mem_size, GFP_KERNEL);
+
+               if (bnx2i_tcp_port_tbl.free_q)
+                       bnx2i_tcp_port_tbl.max_idx =
+                               bnx2i_tcp_port_tbl.port_tbl_size;
+       }
+}
+
+
+/**
+ * bnx2i_cleanup_tcp_port_mngr - frees memory held by global tcp port table
+ *
+ */
+void bnx2i_cleanup_tcp_port_mngr(void)
+{
+       kfree(bnx2i_tcp_port_tbl.free_q);
+       bnx2i_tcp_port_tbl.free_q = NULL;
+       bnx2i_tcp_port_tbl.num_free_ports = 0;
+}
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+       int retval = 0;
+
+       if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+           test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+           test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+               retval = -EPERM;
+       return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks for a
+ *                     scsi write command
+ *
+ * @cmd:               iscsi cmd struct pointer
+ * @buf_off:           absolute buffer offset
+ * @start_bd_off:      u32 pointer to return the offset within the BD
+ *                     indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx:      index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for imm data, unsolicited data
+ *     and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+                                      u32 *start_bd_off, u32 *start_bd_idx)
+{
+       u32 cur_offset = 0;
+       u32 cur_bd_idx = 0;
+       struct iscsi_bd *bd_tbl = cmd->bd_tbl->bd_tbl;
+
+       if (buf_off) {
+               while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+                       cur_offset += bd_tbl->buffer_length;
+                       cur_bd_idx++;
+                       bd_tbl++;
+               }
+       }
+
+       *start_bd_off = buf_off - cur_offset;
+       *start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information for
+ *                     scsi write command
+ *
+ * @cmd:               iscsi cmd struct pointer
+ *
+ * identifies & marks various bd info for immediate data, unsolicited data
+ *     and first solicited data seq which includes BD start index & BD buf off
+ *     This function takes into account iscsi parameter such as immediate data
+ *     and unsolicited data is support on this connection
+ *     
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct bnx2i_cmd *cmd)
+{
+       struct bnx2i_sess *sess;
+       u32 start_bd_offset;
+       u32 start_bd_idx; 
+       u32 buffer_offset = 0;
+       u32 seq_len = 0;
+       u32 fbl, mrdsl;
+       u32 cmd_len = cmd->req.total_data_transfer_length;
+
+       sess = cmd->conn->sess;
+
+       /* if ImmediateData is turned off & IntialR2T is turned on,
+        * there will be no immediate or unsolicited data, just return.
+        */
+       if (sess->initial_r2t && !sess->imm_data)
+               return;
+
+       fbl = sess->first_burst_len;
+       mrdsl = cmd->conn->max_data_seg_len_xmit;
+
+       /* Immediate data */
+       if (sess->imm_data) {
+               seq_len = min(mrdsl, fbl);
+               seq_len = min(cmd_len, seq_len);
+               buffer_offset += seq_len;
+       }
+
+       if (seq_len == cmd_len)
+               return;
+
+       if (!sess->initial_r2t) {
+               if (seq_len >= fbl)
+                       goto r2t_data;
+               seq_len = min(fbl, cmd_len) - seq_len;
+               bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+                                          &start_bd_offset, &start_bd_idx);
+               cmd->req.ud_buffer_offset = start_bd_offset;
+               cmd->req.ud_start_bd_index = start_bd_idx;
+               buffer_offset += seq_len;
+       }
+r2t_data:
+       if (buffer_offset != cmd_len) {
+               bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+                                          &start_bd_offset, &start_bd_idx);
+               if ((start_bd_offset > fbl) ||
+                   (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+                       int i = 0;
+
+                       printk(KERN_ALERT "bnx2i- error, buf offset 0x%x "
+                                         "bd_valid %d use_sg %d\n",
+                                         buffer_offset, cmd->bd_tbl->bd_valid,
+                                         scsi_sg_count(cmd->scsi_cmd));
+                       for (i = 0; i < cmd->bd_tbl->bd_valid; i++)
+                               printk(KERN_ALERT "bnx2i err, bd[%d]: len %x\n",
+                                                 i, cmd->bd_tbl->bd_tbl[i].\
+                                                 buffer_length);
+               }
+               cmd->req.sd_buffer_offset = start_bd_offset;
+               cmd->req.sd_start_bd_index = start_bd_idx;
+       }
+}
+
+
+/**
+ * bnx2i_split_bd - splits buffer > 64KB into 32KB chunks
+ *
+ * @cmd:               iscsi cmd struct pointer
+ * @addr:              base address of the buffer
+ * @sg_len:            buffer length
+ * @bd_index:          starting index into BD table
+ *
+ * This is not required as driver limits max buffer size of less than 64K by
+ *     advertising 'max_sectors' within this limit. 5706/5708 hardware limits
+ *     BD length to less than or equal to 0xFFFF 
+ **/
+static int bnx2i_split_bd(struct bnx2i_cmd *cmd, u64 addr, int sg_len,
+                         int bd_index)
+{
+       struct iscsi_bd *bd = cmd->bd_tbl->bd_tbl;
+       int frag_size, sg_frags;
+
+       sg_frags = 0;
+       while (sg_len) {
+               if (sg_len >= BD_SPLIT_SIZE)
+                       frag_size = BD_SPLIT_SIZE;
+               else
+                       frag_size = sg_len;
+               bd[bd_index + sg_frags].buffer_addr_lo = (u32) addr;
+               bd[bd_index + sg_frags].buffer_addr_hi = addr >> 32;
+               bd[bd_index + sg_frags].buffer_length = frag_size;
+               bd[bd_index + sg_frags].flags = 0;
+               if ((bd_index + sg_frags) == 0)
+                       bd[0].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+               addr += (u64) frag_size;
+               sg_frags++;
+               sg_len -= frag_size;
+       }
+       return sg_frags;
+}
+
+
+/**
+ * bnx2i_map_single_buf - maps a single buffer and updates the BD table
+ *
+ * @hba:               adapter instance
+ * @cmd:               iscsi cmd struct pointer
+ *
+ */
+static int bnx2i_map_single_buf(struct bnx2i_hba *hba,
+                                      struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct iscsi_bd *bd = cmd->bd_tbl->bd_tbl;
+       int byte_count;
+       int bd_count;
+       u64 addr;
+
+       byte_count = scsi_bufflen(sc);
+       sc->SCp.dma_handle =
+               pci_map_single(hba->pcidev, scsi_sglist(sc),
+                              scsi_bufflen(sc), sc->sc_data_direction);
+       addr = sc->SCp.dma_handle;
+
+       if (byte_count > MAX_BD_LENGTH) {
+               bd_count = bnx2i_split_bd(cmd, addr, byte_count, 0);
+       } else {
+               bd_count = 1;
+               bd[0].buffer_addr_lo = addr & 0xffffffff;
+               bd[0].buffer_addr_hi = addr >> 32;
+               bd[0].buffer_length = scsi_bufflen(sc);
+               bd[0].flags = ISCSI_BD_FIRST_IN_BD_CHAIN |
+                             ISCSI_BD_LAST_IN_BD_CHAIN;
+       }
+       bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+       return bd_count;
+}
+
+
+/**
+ * bnx2i_map_sg - maps IO buffer and prepares the BD table
+ *
+ * @hba:               adapter instance
+ * @cmd:               iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct iscsi_bd *bd = cmd->bd_tbl->bd_tbl;
+       struct scatterlist *sg;
+       int byte_count = 0;
+       int sg_frags;
+       int bd_count = 0;
+       int sg_count;
+       int sg_len;
+       u64 addr;
+       int i;
+
+       sg = scsi_sglist(sc);
+       sg_count = pci_map_sg(hba->pcidev, sg, scsi_sg_count(sc),
+                             sc->sc_data_direction);
+
+       for (i = 0; i < sg_count; i++) {
+               sg_len = sg_dma_len(sg);
+               addr = sg_dma_address(sg);
+               if (sg_len > MAX_BD_LENGTH)
+                       sg_frags = bnx2i_split_bd(cmd, addr, sg_len,
+                                                 bd_count);
+               else {
+                       sg_frags = 1;
+                       bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+                       bd[bd_count].buffer_addr_hi = addr >> 32;
+                       bd[bd_count].buffer_length = sg_len;
+                       bd[bd_count].flags = 0;
+                       if (bd_count == 0)
+                               bd[bd_count].flags =
+                                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+               }
+               byte_count += sg_len;
+               sg++;
+               bd_count += sg_frags;
+       }
+       bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+       BUG_ON(byte_count != scsi_bufflen(sc));
+       return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ *
+ * @cmd:               iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       int bd_count;
+
+       if (scsi_sg_count(sc))
+               bd_count = bnx2i_map_sg(cmd->conn->sess->hba, cmd);
+       else if (scsi_bufflen(sc))
+               bd_count = bnx2i_map_single_buf(cmd->conn->sess->hba, cmd);
+       else {
+               struct iscsi_bd *bd = cmd->bd_tbl->bd_tbl;
+               bd_count  = 0;
+               bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+               bd[0].buffer_length = bd[0].flags = 0;
+       }
+       cmd->bd_tbl->bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ *
+ * @cmd:               iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct scatterlist *sg;
+
+       if (cmd->bd_tbl->bd_valid && sc) {
+               if (scsi_sg_count(sc)) {
+                       sg = scsi_sglist(sc);
+                       pci_unmap_sg(hba->pcidev, sg, scsi_sg_count(sc),
+                                    sc->sc_data_direction);
+               } else {
+                       pci_unmap_single(hba->pcidev, sc->SCp.dma_handle,
+                                        scsi_bufflen(sc),
+                                        sc->sc_data_direction);
+               }
+               cmd->bd_tbl->bd_valid = 0;
+       }
+}
+
+
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+       memset(&cmd->req, 0x00, sizeof(cmd->req));
+       cmd->req.op_code = 0xFF;
+       cmd->req.bd_list_addr_lo = (u32) cmd->bd_tbl->bd_tbl_dma;
+       cmd->req.bd_list_addr_hi =
+               (u32) ((u64) cmd->bd_tbl->bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ *
+ * @conn:              pointer to iscsi connection
+ * @iscsi_cid:         iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ *     driver to quickly get hold of connection structure pointer in
+ *     completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_conn *conn,
+                                        u32 iscsi_cid)
+{
+       struct bnx2i_hba *hba;
+
+       if (!conn || !conn->sess)
+               return -EINVAL;
+
+       hba = conn->sess->hba;
+
+       if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+               printk(KERN_ERR "bnx2i: conn bind - entry #%d not free\n",
+                               iscsi_cid);
+               return -EBUSY;
+       }
+
+       hba->cid_que.conn_cid_tbl[iscsi_cid] = conn;
+       return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * 
+ * @hba:               pointer to adapter instance
+ * @iscsi_cid:         iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+                                                u16 iscsi_cid)
+{
+       if (!hba->cid_que.conn_cid_tbl) {
+               printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+               return NULL;
+
+       } else if (iscsi_cid >= hba->max_active_conns) {
+               printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+               return NULL;
+       }
+       return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ *
+ * @hba:               pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+       int idx;
+
+       if (!hba->cid_que.cid_free_cnt)
+               return ISCSI_RESERVED_TAG;
+
+       idx = hba->cid_que.cid_q_cons_idx;
+       hba->cid_que.cid_q_cons_idx++;
+       if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+               hba->cid_que.cid_q_cons_idx = 0;
+
+       hba->cid_que.cid_free_cnt--;
+       return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ *
+ * @hba:               pointer to adapter instance
+ * @iscsi_cid:         iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+       int idx;
+
+       if (iscsi_cid == (u16)ISCSI_RESERVED_TAG)
+               return;
+
+       hba->cid_que.cid_free_cnt++;
+
+       idx = hba->cid_que.cid_q_prod_idx;
+       hba->cid_que.cid_que[idx] = iscsi_cid;
+       hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+       hba->cid_que.cid_q_prod_idx++;
+       if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+               hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ *     and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+       int mem_size;
+       int i;
+
+       mem_size = hba->max_active_conns * sizeof(u16);
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+       if (!hba->cid_que.cid_que_base)
+               return -ENOMEM;
+
+       mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+       if (!hba->cid_que.conn_cid_tbl) {
+               kfree(hba->cid_que.cid_que_base);
+               hba->cid_que.cid_que_base = NULL;
+               return -ENOMEM;
+       }
+
+       hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+       hba->cid_que.cid_q_prod_idx = 0;
+       hba->cid_que.cid_q_cons_idx = 0;
+       hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+       hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+       for (i = 0; i < hba->max_active_conns; i++) {
+               hba->cid_que.cid_que[i] = i;
+               hba->cid_que.conn_cid_tbl[i] = NULL;
+       }
+       return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ *
+ * @hba:               pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+       kfree(hba->cid_que.cid_que_base);
+       hba->cid_que.cid_que_base = NULL;
+
+       kfree(hba->cid_que.conn_cid_tbl);
+       hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ *     a tcp port to be used for this connection.  Global resource lock,
+ *     'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct bnx2i_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+       struct bnx2i_endpoint *endpoint;
+       struct list_head *listp;
+       u16 tcp_port;
+       u32 flags;
+
+       spin_lock_irqsave(&bnx2i_resc_lock, flags);
+
+       tcp_port = bnx2i_alloc_tcp_port();
+       if (!tcp_port) {
+               printk(KERN_ERR "bnx2i: run 'bnx2id' to alloc tcp ports\n");
+               spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+               return NULL;
+       }
+       if (list_empty(&bnx2i_free_ep_list)) {
+               spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+               printk(KERN_ERR "bnx2i: ep struct pool empty\n");
+               return NULL;
+       }
+       listp = (struct list_head *) bnx2i_free_ep_list.next;
+       list_del_init(listp);
+       bnx2i_num_free_ep--;
+
+       endpoint = (struct bnx2i_endpoint *) listp;
+       endpoint->state = EP_STATE_IDLE;
+       endpoint->hba = hba;
+       endpoint->hba_age = hba->age;
+       hba->ofld_conns_active++;
+       endpoint->tcp_port = tcp_port;
+       init_waitqueue_head(&endpoint->ofld_wait);
+
+       spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+       return endpoint;
+}
+
+
+/**
+ * bnx2i_free_ep - returns endpoint struct and tcp port to free pool
+ *
+ * @endpoint:          pointer to endpoint structure
+ *
+ */
+static void bnx2i_free_ep(struct bnx2i_endpoint *endpoint)
+{
+       u32 flags;
+
+       spin_lock_irqsave(&bnx2i_resc_lock, flags);
+       endpoint->state = EP_STATE_IDLE;
+       endpoint->hba->ofld_conns_active--;
+
+       bnx2i_free_iscsi_cid(endpoint->hba, endpoint->ep_iscsi_cid);
+       if (endpoint->conn) {
+               endpoint->conn->ep = NULL;
+               endpoint->conn = NULL;
+       }
+       endpoint->sess = NULL;
+
+       if (endpoint->tcp_port)
+               bnx2i_free_tcp_port(endpoint->tcp_port);
+
+       endpoint->hba = NULL;
+       list_add_tail(&endpoint->link, &bnx2i_free_ep_list);
+       bnx2i_num_free_ep++;
+       spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+}
+
+
+/**
+ * bnx2i_alloc_ep_pool - alloccates a pool of endpoint structures
+ *
+ * allocates free pool of endpoint structures, which is used to store
+ *     QP related control & PT info and other option-2 information
+ */
+int bnx2i_alloc_ep_pool(void)
+{
+       struct bnx2i_endpoint *endpoint;
+       int index, count;
+       int ret_val = 1;
+       int total_endpoints;
+       int page_count = 0;
+       int num_endpoints_per_page;
+       void *mem_ptr;
+
+       spin_lock_init(&bnx2i_resc_lock);
+       INIT_LIST_HEAD(&bnx2i_free_ep_list);
+       INIT_LIST_HEAD(&bnx2i_unbound_ep);
+
+       for (index = 0; index < MAX_PAGES_PER_CTRL_STRUCT_POOL; index++) {
+               bnx2i_ep_pages[index] = NULL;
+       }
+
+       num_endpoints_per_page =
+               PAGE_SIZE / sizeof(struct bnx2i_endpoint);
+
+       total_endpoints = ISCSI_MAX_CONNS_PER_HBA;
+       if (total_endpoints >
+           (num_endpoints_per_page * MAX_PAGES_PER_CTRL_STRUCT_POOL)) {
+               total_endpoints = (num_endpoints_per_page *
+                                  MAX_PAGES_PER_CTRL_STRUCT_POOL);
+       }
+
+       bnx2i_num_free_ep = 0;
+       for (index = 0; index < total_endpoints;) {
+               mem_ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (mem_ptr == NULL) {
+                       printk(KERN_ERR "ep_pool: mem alloc failed\n");
+                       break;
+               }
+               bnx2i_ep_pages[page_count++] = mem_ptr;
+
+               memset(mem_ptr, 0, PAGE_SIZE);
+
+               endpoint = mem_ptr;
+               for (count = 0; count < num_endpoints_per_page; count++) {
+                       list_add_tail(&endpoint->link, &bnx2i_free_ep_list);
+                       endpoint++;
+               }
+
+               bnx2i_num_free_ep += num_endpoints_per_page;
+               index += num_endpoints_per_page;
+       }
+       if (bnx2i_num_free_ep == 0)
+               ret_val = 0;
+       bnx2i_max_free_ep = bnx2i_num_free_ep;
+
+       return ret_val;
+}
+
+
+/**
+ * bnx2i_release_ep_pool - releases memory resources held by endpoint structs
+ */
+void bnx2i_release_ep_pool(void)
+{
+       int index;
+       void *mem_ptr;
+
+       for (index = 0; index < MAX_PAGES_PER_CTRL_STRUCT_POOL; index++) {
+               mem_ptr = bnx2i_ep_pages[index];
+               kfree(mem_ptr);
+               bnx2i_ep_pages[index] = NULL;
+       }
+       bnx2i_num_free_ep = 0;
+       return;
+}
+
+
+/**
+ * bnx2i_alloc_itt - allocates an ITT from the free pool
+ *
+ * @sess:              iscsi session pointer
+ * @cmd:               iscsi cmd pointer
+ *
+ * iSCSI Session ITT queue manager
+ */
+static u32 bnx2i_alloc_itt(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd)
+{
+       u32 itt_val = ITT_INVALID_SIGNATURE;
+
+       if (sess->itt_q.itt_q_count) {
+               itt_val = sess->itt_q.itt_que[sess->itt_q.itt_q_cons_idx++];
+               sess->itt_q.itt_q_cons_idx %= sess->itt_q.itt_q_max_idx;
+               sess->itt_q.itt_cmd[itt_val] = cmd;
+               sess->itt_q.itt_q_count--;
+       }
+       return itt_val & ISCSI_CMD_REQUEST_INDEX;
+}
+
+
+/**
+ * bnx2i_free_itt - releases ITT back to free pool
+ *
+ * @sess:              iscsi session pointer
+ * @cmd:               iscsi cmd pointer
+ *
+ * iSCSI Session ITT queue manager
+ */
+static void bnx2i_free_itt(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd)
+{
+/* check, BUG_ON() */
+       if (cmd->req.itt == ITT_INVALID_SIGNATURE) {
+               printk(KERN_ALERT "free_itt: RSVD ITT - sess 0x%p\n", sess);
+               return;
+       }
+       sess->itt_q.itt_que[sess->itt_q.itt_q_prod_idx++] =
+               cmd->req.itt & ISCSI_CMD_RESPONSE_INDEX;
+       sess->itt_q.itt_q_prod_idx %= sess->itt_q.itt_q_max_idx;
+       sess->itt_q.itt_cmd[cmd->req.itt] = NULL;
+       sess->itt_q.itt_q_count++;
+       cmd->req.itt = ITT_INVALID_SIGNATURE;
+}
+
+
+/**
+ * bnx2i_setup_free_itt_queue - allocates a tcp port from the free list
+ *
+ * @sess:              iscsi session pointer
+ *
+ * Allocate memory for ITT queue area and setup queue atrributes. ITT queue
+ *     is a circular array of ITTs [range 0 - (SQ SIZE-1)] managed by
+ *     producer / consumer indexes
+ */
+static int bnx2i_setup_free_itt_queue(struct bnx2i_sess *sess)
+{
+       u16 itt_q_size = (u16)sess->sq_size;
+       u32 itt_value;
+       int unit_size = sizeof(u16);
+       int mem_size = PAGE_SIZE;
+
+       if ((itt_q_size * unit_size) > mem_size)
+               mem_size = (itt_q_size * unit_size);
+
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       sess->itt_q.itt_que_base = kmalloc(mem_size, GFP_KERNEL);
+       if (!sess->itt_q.itt_que_base)
+               return -ENOMEM;
+
+       mem_size = (itt_q_size * sizeof(struct bnx2i_cmd *));
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       sess->itt_q.itt_cmd = kmalloc(mem_size, GFP_KERNEL);
+       if (!sess->itt_q.itt_cmd) {
+               kfree(sess->itt_q.itt_que_base);
+               sess->itt_q.itt_que_base = NULL;
+               return -1;
+       }
+       memset(sess->itt_q.itt_cmd, 0x00, mem_size);
+
+       sess->itt_q.itt_que = (u32 *)sess->itt_q.itt_que_base;
+       sess->itt_q.itt_q_prod_idx = 0;
+       sess->itt_q.itt_q_cons_idx = 0;
+       sess->itt_q.itt_q_max_idx = itt_q_size;
+       sess->itt_q.itt_q_count = itt_q_size;
+
+       itt_value = 0;
+       while (itt_value < itt_q_size) {
+               sess->itt_q.itt_cmd[itt_value] = NULL;
+               sess->itt_q.itt_que[sess->itt_q.itt_q_prod_idx++] =
+                       itt_value++;
+               if (sess->itt_q.itt_q_prod_idx >= sess->itt_q.itt_q_max_idx)
+                       sess->itt_q.itt_q_prod_idx = 0;
+       }
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_release_free_itt_queue - releases resources held by the ITT queue
+ *
+ * @sess:              iscsi session pointer
+ *
+ * free resources held by free ITT queue
+ */
+static void bnx2i_release_free_itt_queue(struct bnx2i_sess *sess)
+{
+       sess->itt_q.itt_q_count = 0;
+       kfree (sess->itt_q.itt_que_base);
+       sess->itt_q.itt_que_base = NULL;
+
+       kfree (sess->itt_q.itt_cmd);
+       sess->itt_q.itt_cmd = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_cmd - allocates a command structure from free pool
+ *
+ * @sess:              iscsi session pointer
+ *
+ * allocates a command structures and ITT from free pool
+ */
+struct bnx2i_cmd *bnx2i_alloc_cmd(struct bnx2i_sess *sess)
+{
+       struct bnx2i_cmd *cmd;
+       struct list_head *listp;
+
+       if (unlikely(!sess || (sess->num_free_cmds == 0))) {
+               return NULL;
+       }
+
+       listp = (struct list_head *) sess->free_cmds.next;
+       list_del_init(listp);
+       sess->num_free_cmds--;
+       cmd = (struct bnx2i_cmd *) listp;
+       cmd->scsi_status_rcvd = 0;
+
+       bnx2i_setup_cmd_wqe_template(cmd);
+
+       cmd->req.itt = bnx2i_alloc_itt(sess, cmd);
+
+       return cmd;
+}
+
+
+/**
+ * bnx2i_free_cmd - releases iscsi cmd struct & ITT to respective free pool
+ *
+ * @sess:              iscsi session pointer
+ * @cmd:               iscsi cmd pointer
+ *
+ * return command structure and ITT back to free pool.
+ */
+void bnx2i_free_cmd(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd)
+{
+       bnx2i_free_itt(sess, cmd);
+       list_del_init(&cmd->link);
+       list_add_tail(&cmd->link, &sess->free_cmds);
+       sess->num_free_cmds++;
+}
+
+
+/**
+ * bnx2i_alloc_cmd_pool - allocates and initializes iscsi command pool
+ *
+ * @sess:              iscsi session pointer
+ *
+ * Allocate command structure pool for a given iSCSI session. Return 'ENOMEM'
+ *     if memory allocation fails
+ */
+int bnx2i_alloc_cmd_pool(struct bnx2i_sess *sess)
+{
+       struct bnx2i_cmd *cmdp;
+       int index, count;
+       int ret_val = 0;
+       int total_cmds;
+       int num_cmds;
+       int page_count = 0;
+       int num_cmds_per_page;
+       void *mem_ptr;
+
+       INIT_LIST_HEAD(&sess->free_cmds);
+       for (index = 0; index < MAX_PAGES_PER_CTRL_STRUCT_POOL; index++)
+               sess->cmd_pages[index] = NULL;
+
+       num_cmds_per_page = PAGE_SIZE / sizeof(struct bnx2i_cmd);
+       total_cmds = sess->hba->scsi_template->can_queue +
+                    BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS;
+       if (total_cmds >
+           (num_cmds_per_page * MAX_PAGES_PER_CTRL_STRUCT_POOL))
+               total_cmds = num_cmds_per_page *
+                            MAX_PAGES_PER_CTRL_STRUCT_POOL;
+
+       for (index = 0; index < total_cmds;) {
+               mem_ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (mem_ptr == NULL)
+                       break;
+
+               sess->cmd_pages[page_count++] = mem_ptr;
+               num_cmds = num_cmds_per_page;
+               if ((total_cmds - index) < num_cmds_per_page)
+                       num_cmds = (total_cmds - index);
+
+               memset(mem_ptr, 0, PAGE_SIZE);
+               cmdp = mem_ptr;
+               for (count = 0; count < num_cmds; count++) {
+                       cmdp->req.itt = ITT_INVALID_SIGNATURE;
+
+                       /* Allocate BD table */
+                       cmdp->bd_tbl = bnx2i_alloc_bd_table(sess, cmdp);
+                       if (!cmdp->bd_tbl) {
+                               /* should never fail, as it's guaranteed to have
+                                * (ISCSI_MAX_CMDS_PER_SESS + 1) BD tables
+                                * allocated before calling this function.
+                                */
+                               printk(KERN_ERR "no BD table cmd %p\n", cmdp);
+                               goto bd_table_failed;
+                       }
+                       list_add_tail(&cmdp->link, &sess->free_cmds);
+                       cmdp++;
+               }
+
+               sess->num_free_cmds += num_cmds;
+               index += num_cmds;
+       }
+       sess->allocated_cmds = sess->num_free_cmds;
+
+       if (sess->num_free_cmds == 0)
+               ret_val = -ENOMEM;
+       return ret_val;
+
+bd_table_failed:
+       return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_free_cmd_pool - releases memory held by free iscsi cmd pool
+ *
+ * @sess:              iscsi session pointer
+ *
+ * Release memory held by command struct pool.
+ */
+void bnx2i_free_cmd_pool(struct bnx2i_sess *sess)
+{
+       int index;
+       void *mem_ptr;
+
+       if (sess->num_free_cmds != sess->allocated_cmds) {
+               /*
+                * WARN: either there is some command struct leak or
+                * still some SCSI commands are pending.
+                */
+               printk(KERN_ERR "bnx2i: missing cmd structs - %d, %d\n",
+                                sess->num_free_cmds, sess->allocated_cmds);
+       }
+       for (index = 0; index < MAX_PAGES_PER_CTRL_STRUCT_POOL; index++) {
+               mem_ptr = sess->cmd_pages[index];
+               kfree(mem_ptr);
+               sess->cmd_pages[index] = NULL;
+       }
+       sess->num_free_cmds = sess->allocated_cmds = 0;
+       return;
+}
+
+
+/**
+ * bnx2i_alloc_bd_table - Alloc BD table to associate with this iscsi cmd 
+ *
+ * @sess:              iscsi session pointer
+ * @cmd:               iscsi cmd pointer
+ *
+ * allocates a BD table and assigns it to given command structure. There is
+ *     no synchronization issue as this code is executed in initialization
+ *     thread
+ */
+static struct io_bdt *bnx2i_alloc_bd_table(struct bnx2i_sess *sess,
+                                          struct bnx2i_cmd *cmd)
+{
+       struct io_bdt *bd_tbl;
+
+       if (list_empty(&sess->bd_tbl_list))
+               return NULL;
+
+       bd_tbl = (struct io_bdt *)sess->bd_tbl_list.next;
+       list_del(&bd_tbl->link);
+       list_add_tail(&bd_tbl->link, &sess->bd_tbl_active);
+       bd_tbl->bd_valid = 0;
+       bd_tbl->cmdp = cmd;
+
+       return bd_tbl;
+}
+
+
+/**
+ * bnx2i_free_all_bdt_resc_pages - releases memory held by BD memory tracker tbl
+ *
+ * @sess:              iscsi session pointer
+ *
+ * Free up memory pages allocated held by BD resources
+ */
+static void bnx2i_free_all_bdt_resc_pages(struct bnx2i_sess *sess)
+{
+       int i;
+       struct bd_resc_page *resc_page;
+
+       spin_lock_bh(&sess->lock);
+       while (!list_empty(&sess->bd_resc_page)) {
+               resc_page = (struct bd_resc_page *)sess->bd_resc_page.prev;
+               list_del(sess->bd_resc_page.prev);
+               for(i = 0; i < resc_page->num_valid; i++)
+                       kfree(resc_page->page[i]);
+               kfree(resc_page);
+       }
+       spin_unlock_bh(&sess->lock);
+}
+
+
+
+/**
+ * bnx2i_alloc_bdt_resc_page - allocated a page to track BD table memory
+ *
+ * @sess:              iscsi session pointer
+ *
+ * allocated a page to track BD table memory
+ */
+struct bd_resc_page *bnx2i_alloc_bdt_resc_page(struct bnx2i_sess *sess)
+{
+       void *mem_ptr;
+       struct bd_resc_page *resc_page;
+
+       mem_ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!mem_ptr)
+               return NULL;
+
+       resc_page = mem_ptr;
+       list_add_tail(&resc_page->link, &sess->bd_resc_page);
+       resc_page->max_ptrs = (PAGE_SIZE -
+               (u32) &((struct bd_resc_page *) 0)->page[0]) / sizeof(void *);
+       resc_page->num_valid = 0;
+
+       return resc_page;
+}
+
+
+/**
+ * bnx2i_add_bdt_resc_page - add newly allocated memory page to list
+ *
+ * @sess:              iscsi session pointer
+ * @bd_page:           pointer to page memory
+ *
+ * link newly allocated memory page to tracker list
+ */
+int bnx2i_add_bdt_resc_page(struct bnx2i_sess *sess, void *bd_page)
+{
+       struct bd_resc_page *resc_page;
+
+#define is_resc_page_full(_resc_pg) (_resc_pg->num_valid == _resc_pg->max_ptrs)
+#define active_resc_page(_resc_list)   \
+                       (list_empty(_resc_list) ? NULL : (_resc_list)->prev)
+       if (list_empty(&sess->bd_resc_page)) {
+               resc_page = bnx2i_alloc_bdt_resc_page(sess);
+       } else {
+               resc_page = (struct bd_resc_page *)
+                                       active_resc_page(&sess->bd_resc_page);
+       }
+
+       if (!resc_page)
+               return -ENOMEM;
+
+       resc_page->page[resc_page->num_valid++] = bd_page;
+       if (is_resc_page_full(resc_page)) {
+               resc_page = bnx2i_alloc_bdt_resc_page(sess);
+       }
+       return 0;
+}
+
+
+/**
+ * bnx2i_alloc_bd_table_pool - Allocates buffer descriptor (BD) pool
+ *
+ * @sess:              iscsi session pointer
+ *
+ * Allocate a pool of buffer descriptor tables and associated DMA'able memory
+ *     to be used with the session.
+ */
+static int bnx2i_alloc_bd_table_pool(struct bnx2i_sess *sess)
+{
+       int index, count;
+       int ret_val = 0;
+       int num_elem_per_page;
+       struct io_bdt *bdt_info;
+       void *mem_ptr;
+       u32 bd_tbl_size;
+       u32 mem_size;
+       int total_bd_tbl;
+/* check */
+
+       INIT_LIST_HEAD(&sess->bd_resc_page);
+       INIT_LIST_HEAD(&sess->bd_tbl_list);
+       INIT_LIST_HEAD(&sess->bd_tbl_active);
+       total_bd_tbl = sess->hba->scsi_template->can_queue +
+                    BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS;
+       mem_size = total_bd_tbl * sizeof(struct io_bdt);
+       num_elem_per_page = PAGE_SIZE / sizeof(struct io_bdt);
+       for (index = 0; index < total_bd_tbl; index += num_elem_per_page) {
+               if (((total_bd_tbl - index) * sizeof(struct io_bdt))
+                   >= PAGE_SIZE) {
+                       mem_size = PAGE_SIZE;
+                       num_elem_per_page = PAGE_SIZE / sizeof(struct io_bdt);
+               } else {
+                       mem_size =
+                               (total_bd_tbl - index) * sizeof(struct io_bdt);
+                       num_elem_per_page = (total_bd_tbl - index);
+               }
+               mem_ptr = kmalloc(mem_size, GFP_KERNEL);
+               if (mem_ptr == NULL) {
+                       printk(KERN_ERR "alloc_bd_tbl: mem alloc failed\n");
+                       ret_val = -ENOMEM;
+                       goto resc_alloc_failed;
+               }
+               bnx2i_add_bdt_resc_page(sess, mem_ptr);
+
+               memset(mem_ptr, 0, mem_size);
+               bdt_info = mem_ptr;
+               for (count = 0; count < num_elem_per_page; count++) {
+                       list_add_tail(&bdt_info->link, &sess->bd_tbl_list);
+                       bdt_info++;
+               }
+       }
+
+       bd_tbl_size = ISCSI_MAX_BDS_PER_CMD * sizeof(struct iscsi_bd);
+       bdt_info = (struct io_bdt *)sess->bd_tbl_list.next;
+       while (bdt_info && (bdt_info != (struct io_bdt *)&sess->bd_tbl_list)) {
+               mem_ptr = pci_alloc_consistent(sess->hba->pcidev,
+                                              bd_tbl_size,
+                                              &bdt_info->bd_tbl_dma);
+               if (!mem_ptr) {
+                       printk(KERN_ERR "bd_tbl: DMA mem alloc failed\n");
+                       ret_val = -ENOMEM;
+                       goto dma_alloc_failed;
+               }
+               bdt_info->bd_tbl = mem_ptr;
+               bdt_info->max_bd_cnt = ISCSI_MAX_BDS_PER_CMD;
+               bdt_info->bd_valid = 0;
+               bdt_info->cmdp = NULL;
+
+               bdt_info = (struct io_bdt *)bdt_info->link.next;
+       }
+       return ret_val;
+
+resc_alloc_failed:
+dma_alloc_failed:
+       return ret_val;
+}
+
+
+/**
+ * bnx2i_free_bd_table_pool - releases resources held by BD table pool
+ *
+ * @sess:              iscsi session pointer
+ *
+ * releases BD table pool memory
+ */
+void bnx2i_free_bd_table_pool(struct bnx2i_sess *sess)
+{
+       struct list_head *list;
+       struct io_bdt *bdt_info;
+       u32 bd_tbl_size;
+
+       bd_tbl_size = ISCSI_MAX_BDS_PER_CMD * sizeof(struct iscsi_bd);
+       list_for_each(list, &sess->bd_tbl_list) {
+               bdt_info = list_entry(list, struct io_bdt, link);
+               pci_free_consistent(sess->hba->pcidev, bd_tbl_size,
+                                   bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
+               bdt_info->bd_tbl = NULL;
+               if (bdt_info->cmdp) {
+                       bdt_info->cmdp->bd_tbl = NULL;
+                       bdt_info->cmdp = NULL;
+               }
+       }
+
+       list_for_each(list, &sess->bd_tbl_active) {
+               bdt_info = list_entry(list, struct io_bdt, link);
+               pci_free_consistent(sess->hba->pcidev, bd_tbl_size,
+                                   bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
+               bdt_info->bd_tbl = NULL;
+               if (bdt_info->cmdp) {
+                       bdt_info->cmdp->bd_tbl = NULL;
+                       bdt_info->cmdp = NULL;
+               }
+       }
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocated BD table resources to be used as
+ *                     the dummy buffer for '0' payload length iscsi requests
+ *
+ * @hba:               pointer to adapter structure
+ *
+ * allocate memory for dummy buffer and associated BD table to be used by
+ *     middle path (MP) requests
+ */
+int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+       int rc = 0;
+       struct iscsi_bd *mp_bdt;
+       u64 addr;
+
+       hba->mp_bd_tbl = pci_alloc_consistent(hba->pcidev,
+                                             PAGE_SIZE, &hba->mp_bd_dma);
+       if (!hba->mp_bd_tbl) {
+               printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+               rc = -1;
+               goto out;
+       }
+
+       hba->dummy_buffer =
+               pci_alloc_consistent(hba->pcidev,
+                                    PAGE_SIZE, &hba->dummy_buf_dma);
+       if (!hba->dummy_buffer) {
+               printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+               pci_free_consistent(hba->pcidev, PAGE_SIZE,
+                                   hba->mp_bd_tbl, hba->mp_bd_dma);
+               hba->mp_bd_tbl = NULL;
+               rc = -1;
+               goto out;
+       }
+
+       mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+       addr = (unsigned long) hba->dummy_buf_dma;
+       mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+       mp_bdt->buffer_addr_hi = addr >> 32;
+       mp_bdt->buffer_length = PAGE_SIZE;
+       mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+       return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+       if (hba->mp_bd_tbl) {
+               pci_free_consistent(hba->pcidev, PAGE_SIZE,
+                                   hba->mp_bd_tbl, hba->mp_bd_dma);
+               hba->mp_bd_tbl = NULL;
+       }
+       if (hba->dummy_buffer) {
+               pci_free_consistent(hba->pcidev, PAGE_SIZE,
+                                   hba->dummy_buffer, hba->dummy_buf_dma);
+               hba->dummy_buffer = NULL;
+       }
+               return;
+}
+
+
+/**
+ * bnx2i_start_iscsi_hba_shutdown - start hba shutdown by cleaning up
+ *                     all active sessions
+ *
+ * @hba:               pointer to adapter instance
+ *
+ *  interface is being brought down by the user, fail all active iSCSI sessions
+ *     belonging to this adapter
+ */
+void bnx2i_start_iscsi_hba_shutdown(struct bnx2i_hba *hba)
+{
+       struct list_head *list;
+       struct list_head *tmp;
+       struct bnx2i_sess *sess;
+       int lpcnt;
+       int rc;
+
+       list_for_each_safe(list, tmp, &hba->active_sess) {
+               sess = (struct bnx2i_sess *)list;
+               lpcnt = 4;
+               rc = bnx2i_do_iscsi_sess_recovery(sess, DID_NO_CONNECT);
+               while ((rc != SUCCESS) && lpcnt--) {
+                       msleep(1000);
+                       rc = bnx2i_do_iscsi_sess_recovery(sess, DID_NO_CONNECT);
+               }
+       }
+       wait_event_interruptible_timeout(hba->eh_wait,
+                                        ((hba->ofld_conns_active == 0)),
+                                        10 * HZ);
+}
+
+
+/**
+ * bnx2i_iscsi_handle_ip_event - inetdev callback to handle ip address change
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * IP address change indication, fail all iSCSI connections on this adapter
+ *     and let 'iscsid' reinstate the connections
+ */
+void bnx2i_iscsi_handle_ip_event(struct bnx2i_hba *hba)
+{
+       struct list_head *list;
+       struct list_head *tmp;
+       struct bnx2i_sess *sess;
+       u32 flags;
+
+       spin_lock_irqsave(&hba->lock, flags);
+       list_for_each_safe(list, tmp, &hba->active_sess) {
+               sess = (struct bnx2i_sess *)list;
+               spin_unlock_irqrestore(&hba->lock, flags);
+               bnx2i_do_iscsi_sess_recovery(sess, DID_RESET);
+               spin_lock_irqsave(&hba->lock, flags);
+       }
+       spin_unlock_irqrestore(&hba->lock, flags);
+}
+
+
+/**
+ * conn_err_recovery_task - does recovery on all queued sessions
+ *
+ * @work:              pointer to work struct
+ *
+ * iSCSI Session recovery queue manager
+ */
+static void
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+conn_err_recovery_task(struct work_struct *work)
+#else
+conn_err_recovery_task(void *data)
+#endif
+{
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+       struct bnx2i_hba *hba = container_of(work, struct bnx2i_hba,
+                                            err_rec_task);
+#else
+       struct bnx2i_hba *hba = data;
+#endif
+       struct bnx2i_sess *sess;
+       int cons_idx = hba->sess_recov_cons_idx;
+
+       while (hba->sess_recov_prod_idx != cons_idx) {
+               sess = hba->sess_recov_list[cons_idx];
+               if (sess->state == BNX2I_SESS_IN_LOGOUT)
+                       bnx2i_do_iscsi_sess_recovery(sess, DID_NO_CONNECT);
+               else
+                       bnx2i_do_iscsi_sess_recovery(sess, DID_RESET);
+               if (cons_idx == hba->sess_recov_max_idx)
+                       cons_idx = 0;
+               else
+                       cons_idx++;
+       }
+       hba->sess_recov_cons_idx = cons_idx;
+}
+
+/**
+ * bnx2i_init_ctx_dump_mem - allocate iscsi context dump buffer
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * allocate memory buffer to stage iscsi conn context from the chip
+ */
+void bnx2i_init_ctx_dump_mem(struct bnx2i_hba *hba)
+{
+       if (hba->ctx_addr)
+               return;
+
+       hba->ictx_poll_mode = 0;
+       hba->ctx_size = 0;
+       hba->ctx_read_cnt = 0xffffffff;
+       hba->ctx_addr = pci_alloc_consistent(hba->pcidev,
+                                            BNX2I_CONN_CTX_BUF_SIZE,
+                                            &hba->ctx_dma_hndl);
+       if (!hba->ctx_addr)
+               return;
+       hba->ctx_size = BNX2I_CONN_CTX_BUF_SIZE;
+}
+
+
+/**
+ * bnx2i_free_ctx_dump_mem - free context dump memory
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * free context memory buffer
+ */
+void bnx2i_free_ctx_dump_mem(struct bnx2i_hba *hba)
+{
+       if (!hba->ctx_addr || (hba->ctx_size == 0))
+               return;
+
+       pci_free_consistent(hba->pcidev, hba->ctx_size,
+                           hba->ctx_addr, hba->ctx_dma_hndl);
+       hba->ctx_dma_hndl = 0;
+       hba->ctx_addr = NULL;
+       hba->ctx_size = 0;
+}
+
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ *
+ * @hba:               pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+                                 struct bnx2i_endpoint *ep)
+{
+       int cur_idx;
+
+       write_lock(&hba->ep_rdwr_lock);
+       cur_idx = hba->ep_destroy_prod_idx++;
+       hba->ep_destroy_list[cur_idx] = ep;
+       hba->ep_destroy_prod_idx %= hba->ep_destroy_max_idx;
+       write_unlock(&hba->ep_rdwr_lock);
+       return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_next - returns head of destroy EP list
+ *
+ * @hba:               pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba)
+{
+       int cur_idx;
+
+       read_lock(&hba->ep_rdwr_lock);
+       if (hba->ep_destroy_prod_idx == hba->ep_destroy_cons_idx) {
+               read_unlock(&hba->ep_rdwr_lock);
+               return NULL;
+       }
+       cur_idx = hba->ep_destroy_cons_idx++;
+       hba->ep_destroy_cons_idx %= hba->ep_destroy_max_idx;
+       read_unlock(&hba->ep_rdwr_lock);
+
+       return (hba->ep_destroy_list[cur_idx]);
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ *
+ * @hba:               pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+                                 struct bnx2i_endpoint *ep)
+{
+       int cur_idx;
+
+       write_lock(&hba->ep_rdwr_lock);
+       cur_idx = hba->ep_ofld_prod_idx++;
+       hba->ep_ofld_list[cur_idx] = ep;
+       hba->ep_ofld_prod_idx %= hba->ep_ofld_max_idx;
+       write_unlock(&hba->ep_rdwr_lock);
+       return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_next - get head of pending conn offload list
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * pending conn offload completion queue manager
+ */
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba)
+{
+       int cur_idx;
+
+       read_lock(&hba->ep_rdwr_lock);
+       if (hba->ep_ofld_prod_idx == hba->ep_ofld_cons_idx) {
+               read_unlock(&hba->ep_rdwr_lock);
+               return NULL;
+       }
+       cur_idx = hba->ep_ofld_cons_idx++;
+       hba->ep_ofld_cons_idx %= hba->ep_ofld_max_idx;
+       read_unlock(&hba->ep_rdwr_lock);
+
+       return (hba->ep_ofld_list[cur_idx]);
+}
+
+/**
+ * bnx2i_init_ep_ofld_destroy_que - init EP destroy queue
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_init_ep_ofld_destroy_que(struct bnx2i_hba *hba)
+{
+       rwlock_init(&hba->ep_rdwr_lock);
+       hba->ep_ofld_list = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!hba->ep_ofld_list)
+               return -ENOMEM;
+
+       hba->ep_ofld_prod_idx = 0;
+       hba->ep_ofld_cons_idx = 0;
+       hba->ep_ofld_max_idx =
+               PAGE_SIZE / sizeof(struct bnx2i_endpoint *) - 1;
+
+       hba->ep_destroy_list = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!hba->ep_destroy_list) {
+               kfree(hba->ep_ofld_list);
+               hba->ep_ofld_list = NULL;
+               return -ENOMEM;
+       }
+
+       hba->ep_destroy_prod_idx = 0;
+       hba->ep_destroy_cons_idx = 0;
+       hba->ep_destroy_max_idx =
+               PAGE_SIZE / sizeof(struct bnx2i_endpoint *) - 1;
+       return 0;
+}
+
+
+/**
+ * bnx2i_free_ep_ofld_destroy_que - free resources held by EP destroy queue
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * EP destroy queue manager
+ */
+static void bnx2i_free_ep_ofld_destroy_que(struct bnx2i_hba *hba)
+{
+       kfree(hba->ep_ofld_list);
+       hba->ep_ofld_list = NULL;
+
+       kfree(hba->ep_destroy_list);
+       hba->ep_destroy_list = NULL;
+}
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ *
+ * @cnic:              cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ *     support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+       struct bnx2i_hba *hba;
+
+       hba = kmalloc(sizeof(struct bnx2i_hba), GFP_KERNEL);
+       if (!hba)
+               return NULL;
+
+       memset(hba, 0, sizeof(struct bnx2i_hba));
+       /* Get PCI related information and update hba struct members */
+       hba->cnic = cnic;
+       hba->netdev = cnic->netdev;
+
+       INIT_LIST_HEAD(&hba->active_sess);
+       if (bnx2i_init_ep_ofld_destroy_que(hba))
+               goto ep_ofld_que_err;
+
+       hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+       hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+       if (bnx2i_setup_free_cid_que(hba))
+               goto cid_que_err;
+
+       /* Get device type required to determine default SQ size */
+       if (cnic->pcidev) {
+               hba->pci_did = cnic->pcidev->device;
+               bnx2i_identify_device(hba);
+       }
+
+       /* SQ/RQ/CQ size can be changed via sysfs interface */
+       if (sq_size)
+               hba->max_sqes = sq_size;
+       else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+               hba->max_sqes = BNX2I_5770x_SQ_WQES_DEFAULT;
+       else
+               hba->max_sqes = BNX2I_570x_SQ_WQES_DEFAULT;
+
+       hba->max_rqes = rq_size;
+       hba->max_cqes = sq_size + rq_size;
+       hba->num_ccell = hba->max_sqes / 2;
+
+       spin_lock_init(&hba->lock);
+       /* initialize timer and wait queue used for resource cleanup when
+        * interface is brought down */
+       init_timer(&hba->hba_timer);
+       init_waitqueue_head(&hba->eh_wait);
+
+#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
+       INIT_WORK(&hba->err_rec_task, conn_err_recovery_task);
+#else
+       INIT_WORK(&hba->err_rec_task, conn_err_recovery_task, hba);
+#endif
+       hba->sess_recov_prod_idx = 0;
+       hba->sess_recov_cons_idx = 0;
+       hba->sess_recov_max_idx = 0;
+       hba->sess_recov_list = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!hba->sess_recov_list)
+               goto rec_que_err;
+       hba->sess_recov_max_idx = PAGE_SIZE / sizeof (struct bnx2i_sess *) - 1;
+
+       return hba;
+
+rec_que_err:
+       bnx2i_release_free_cid_que(hba);
+cid_que_err:
+       bnx2i_free_ep_ofld_destroy_que(hba);
+ep_ofld_que_err:
+       bnx2i_free_hba(hba);
+
+       return NULL;
+}
+
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+       bnx2i_release_free_cid_que(hba);
+       bnx2i_free_ep_ofld_destroy_que(hba);
+
+       INIT_LIST_HEAD(&hba->active_sess);
+       /* Free memory held by hba structure */
+       kfree(hba);
+}
+
+
+/**
+ * bnx2i_flush_active_cmd_queue - flush active command queue
+ *
+ * @sess:              iscsi session pointer
+ * @err_code:          SCSI ML error code, DID_BUS_BUSY
+ *
+ * return all commands in active queue which should already have been
+ *     cleaned up by the cnic device.
+ */
+static void bnx2i_flush_active_cmd_queue(struct bnx2i_sess *sess, int err_code)
+{
+       struct list_head *list;
+       struct list_head *tmp;
+       struct bnx2i_cmd *cmd;
+       unsigned long flags;
+
+       if (!sess->num_active_cmds)
+               return;
+
+       spin_lock_irqsave(sess->host->host_lock, flags);
+       list_for_each_safe(list, tmp, &sess->active_cmds) {
+               cmd = (struct bnx2i_cmd *) list;
+               cmd->req.itt &= ISCSI_CMD_RESPONSE_INDEX;
+               list_del_init(&cmd->link);
+               if (cmd->req.op_code == ISCSI_OP_SCSI_CMD) {
+                       bnx2i_iscsi_unmap_sg_list(sess->hba, cmd);
+                       cmd->cmd_state = ISCSI_CMD_STATE_COMPLETED;
+                       bnx2i_return_failed_command(sess, cmd, err_code);
+               } else
+                       sess->num_active_cmds--;
+               bnx2i_free_cmd(sess, cmd);
+       }
+       spin_unlock_irqrestore(sess->host->host_lock, flags);
+}
+
+
+/**
+ * bnx2i_session_recovery_start - start recovery process on given session
+ *
+ * @sess:              iscsi session pointer
+ * @err_code:          SCSI ML error code, DID_BUS_BUSY
+ *
+ * initiate cleanup of outstanding commands for sess recovery
+ */
+static int bnx2i_session_recovery_start(struct bnx2i_sess *sess, int err_code)
+{
+       if (sess->state == BNX2I_SESS_IN_LOGOUT)
+               return 0;
+
+       if (!is_sess_active(sess)) {
+               if (sess->recovery_state)
+                       return -EPERM;
+               wait_event_interruptible_timeout(sess->er_wait,
+                                                (sess->state ==
+                                                 BNX2I_SESS_IN_FFP), 20 * HZ);
+               if (signal_pending(current))
+                       flush_signals(current);
+               if (!is_sess_active(sess)) {
+                       printk(KERN_ALERT "sess_reco: sess still not active\n");
+                       sess->lead_conn->state = CONN_STATE_XPORT_FREEZE;
+                       return -EPERM;
+               }
+       }
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_do_iscsi_sess_recovery - implements session recovery code
+ *
+ * @sess:              iscsi session pointer
+ * @err_code:          SCSI ML error code, DID_BUS_BUSY, DID_NO_CONNECT,
+ *                     DID_RESET
+ *
+ * SCSI host reset handler, which is translates to iSCSI session
+ *     recovery. This routine starts internal driver session recovery,
+ *     indicates connection error to 'iscsid' which does session reinstatement
+ *     This is an synchronous call which waits for completion and returns
+ *     the ultimate result of session recovery process to caller
+ */
+int bnx2i_do_iscsi_sess_recovery(struct bnx2i_sess *sess, int err_code)
+{
+       struct bnx2i_hba *hba;
+       struct bnx2i_conn *conn = sess->lead_conn;
+
+       if (bnx2i_session_recovery_start(sess, err_code)) {
+               printk(KERN_INFO "bnx2i: sess rec start returned error\n");
+               return FAILED;
+       }
+       hba = sess->hba;
+
+       sess->recovery_state = ISCSI_SESS_RECOVERY_OPEN_ISCSI;
+       iscsi_conn_error(conn->cls_conn, ISCSI_ERR_CONN_FAILED);
+
+       /* if session teardown is because of net interface down,
+        * no need to wait for complete recovery */
+       if (err_code == DID_NO_CONNECT)
+               wait_event_interruptible_timeout(sess->er_wait,
+                                                !conn->ep,
+                                                msecs_to_jiffies(1000));
+       else
+               wait_event_interruptible(sess->er_wait,
+                                        ((sess->recovery_state &
+                                          ISCSI_SESS_RECOVERY_COMPLETE) ||
+                                         (sess->recovery_state &
+                                          ISCSI_SESS_RECOVERY_FAILED)));
+
+       if (signal_pending(current))
+               flush_signals(current);
+
+       if (err_code == DID_NO_CONNECT)
+               goto ret_success;
+
+       if (sess->recovery_state & ISCSI_SESS_RECOVERY_COMPLETE) {
+               printk(KERN_INFO "bnx2i: host #%d reset succeeded\n",
+                                 sess->host->host_no);
+               sess->state = BNX2I_SESS_IN_FFP;
+       } else
+               return FAILED;
+
+ret_success:
+       sess->recovery_state = 0;
+       return SUCCESS;
+}
+
+
+/**
+ * bnx2i_iscsi_sess_release - cleanup iscsi session & reclaim all resources
+ *
+ * @hba:               pointer to adapter instance
+ * @sess:              iscsi session pointer
+ *
+ * free up resources held by this session including ITT queue, cmd struct pool,
+ *     BD table pool. HBA lock is held while manipulating active session list
+ */
+void bnx2i_iscsi_sess_release(struct bnx2i_hba *hba, struct bnx2i_sess *sess)
+{
+       u32 flags;
+
+       bnx2i_release_free_itt_queue(sess);
+       bnx2i_free_cmd_pool(sess);
+       bnx2i_free_bd_table_pool(sess);
+       bnx2i_free_all_bdt_resc_pages(sess);
+
+       spin_lock_irqsave(&hba->lock, flags);
+       list_del_init(&sess->link);
+       hba->num_active_sess--;
+       spin_unlock_irqrestore(&hba->lock, flags);
+}
+
+
+/**
+ * bnx2i_iscsi_sess_new - initialize newly allocated session structure
+ *
+ * @hba:               pointer to adapter instance
+ * @sess:              iscsi session pointer
+ *
+ * initialize session structure elements and allocate per sess resources.
+ *     Some of the per session resources allocated are command struct pool,
+ *     BD table pool and ITT queue region
+ */
+int bnx2i_iscsi_sess_new(struct bnx2i_hba *hba, struct bnx2i_sess *sess)
+{
+       u32 flags;
+
+       spin_lock_irqsave(&hba->lock, flags);
+       list_add_tail(&sess->link, &hba->active_sess);
+       hba->num_active_sess++;
+       spin_unlock_irqrestore(&hba->lock, flags);
+
+       sess->sq_size = hba->max_sqes;
+       sess->tsih = 0;
+       sess->lead_conn = NULL;
+
+       spin_lock_init(&sess->lock);
+
+       /* initialize active connection list */
+       INIT_LIST_HEAD(&sess->conn_list);
+       INIT_LIST_HEAD(&sess->free_cmds);
+
+       INIT_LIST_HEAD(&sess->active_cmds);
+       sess->num_active_cmds = 0;
+
+       sess->num_active_conn = 0;
+       sess->max_conns = 1;
+       sess->target_name = NULL;
+
+       sess->state = BNX2I_SESS_INITIAL;
+       sess->recovery_state = 0;
+       sess->old_recovery_state = 0;
+       sess->tmf_active = 0;
+
+       if (bnx2i_alloc_bd_table_pool(sess) != 0) {
+               printk(KERN_ERR "sess_new: unable to alloc bd table pool\n");
+               goto err_bd_pool;
+       }
+
+       if (bnx2i_alloc_cmd_pool(sess) != 0) {
+               printk(KERN_ERR "sess_new: alloc cmd pool failed\n");
+               goto err_cmd_pool;
+       }
+
+       if (bnx2i_setup_free_itt_queue(sess)) {
+               printk(KERN_ERR "sess_new: unable to alloc itt queue\n");
+               goto err_itt_que;
+       }
+
+       init_timer(&sess->abort_timer);
+       init_waitqueue_head(&sess->er_wait);
+
+       return 0;
+
+err_itt_que:
+       bnx2i_free_cmd_pool(sess);
+err_cmd_pool:
+       bnx2i_free_bd_table_pool(sess);
+err_bd_pool:
+       return -ENOMEM;
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ *
+ * @hba:               pointer to adapter instance
+ * @conn:              iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+                                    struct bnx2i_conn *conn)
+{
+       if (conn->gen_pdu.resp_bd_tbl) {
+               pci_free_consistent(hba->pcidev, PAGE_SIZE,
+                                   conn->gen_pdu.resp_bd_tbl,
+                                   conn->gen_pdu.resp_bd_dma);
+               conn->gen_pdu.resp_bd_tbl = NULL;
+       }
+
+       if (conn->gen_pdu.req_bd_tbl) {
+               pci_free_consistent(hba->pcidev, PAGE_SIZE,
+                                   conn->gen_pdu.req_bd_tbl,
+                                   conn->gen_pdu.req_bd_dma);
+               conn->gen_pdu.req_bd_tbl = NULL;
+       }
+
+       if (conn->gen_pdu.resp_buf) {
+               pci_free_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                                   conn->gen_pdu.resp_buf,
+                                   conn->gen_pdu.resp_dma_addr);
+               conn->gen_pdu.resp_buf = NULL;
+       }
+
+       if (conn->gen_pdu.req_buf) {
+               pci_free_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                                   conn->gen_pdu.req_buf,
+                                   conn->gen_pdu.req_dma_addr);
+               conn->gen_pdu.req_buf = NULL;
+       }
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources used for
+ *                     login / nopout pdus
+ *
+ * @hba:               pointer to adapter instance
+ * @conn:              iscsi connection pointer
+ *
+ * Login & nop-in related resources is allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+                                           struct bnx2i_conn *conn)
+{
+       /* Allocate memory for login request/response buffers */
+       conn->gen_pdu.req_buf =
+               pci_alloc_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                                    &conn->gen_pdu.req_dma_addr);
+       if (conn->gen_pdu.req_buf == NULL)
+               goto login_req_buf_failure;
+
+       conn->gen_pdu.req_buf_size = 0;
+       conn->gen_pdu.req_wr_ptr = conn->gen_pdu.req_buf;
+
+       conn->gen_pdu.resp_buf =
+               pci_alloc_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                                    &conn->gen_pdu.resp_dma_addr);
+       if (conn->gen_pdu.resp_buf == NULL)
+               goto login_resp_buf_failure;
+
+       conn->gen_pdu.resp_buf_size = ISCSI_CONN_LOGIN_BUF_SIZE;
+       conn->gen_pdu.resp_wr_ptr = conn->gen_pdu.resp_buf;
+       
+       conn->gen_pdu.req_bd_tbl =
+               pci_alloc_consistent(hba->pcidev, PAGE_SIZE,
+                                    &conn->gen_pdu.req_bd_dma);
+       if (conn->gen_pdu.req_bd_tbl == NULL)
+               goto login_req_bd_tbl_failure;
+
+       conn->gen_pdu.resp_bd_tbl =
+               pci_alloc_consistent(hba->pcidev, PAGE_SIZE,
+                                    &conn->gen_pdu.resp_bd_dma);
+       if (conn->gen_pdu.resp_bd_tbl == NULL)
+               goto login_resp_bd_tbl_failure;
+
+       return 0;
+
+login_resp_bd_tbl_failure:
+       pci_free_consistent(hba->pcidev, PAGE_SIZE, conn->gen_pdu.req_bd_tbl,
+                           conn->gen_pdu.req_bd_dma);
+       conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+       pci_free_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                           conn->gen_pdu.resp_buf,
+                           conn->gen_pdu.resp_dma_addr);
+       conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+       pci_free_consistent(hba->pcidev, ISCSI_CONN_LOGIN_BUF_SIZE,
+                           conn->gen_pdu.req_buf, conn->gen_pdu.req_dma_addr);
+       conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+       printk(KERN_ERR "bnx2i:a conn login resource alloc failed!!\n");
+       return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_conn_new - initialize newly created connection structure
+ *
+ * @sess:              iscsi session pointer
+ * @conn:              iscsi connection pointer
+ *
+ * connection structure is initialized which mainly includes allocation of
+ *     login resources and lock/time initialization
+ */
+int bnx2i_iscsi_conn_new(struct bnx2i_sess *sess, struct bnx2i_conn *conn)
+{
+       struct bnx2i_hba *hba = sess->hba;
+
+       conn->sess = sess;
+       conn->header_digest_en = 0;
+       conn->data_digest_en = 0;
+
+       spin_lock_init(&conn->lock);
+
+       conn->gen_pdu.cmd = NULL;
+
+       /* 'ep' ptr will be assigned in bind() call */
+       conn->ep = NULL;
+
+       if (bnx2i_conn_alloc_login_resources(hba, conn)) {
+               printk(KERN_ALERT "conn_new: login resc alloc failed!!\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_login_resp_update_cmdsn - extracts SN & MAX_SN from login response header &
+ *                     updates driver 'cmdsn' with 
+ *
+ * @conn:              iscsi connection pointer
+ *
+ * extract & update SN counters from login response
+ */
+static int bnx2i_login_resp_update_cmdsn(struct bnx2i_conn *conn)
+{
+       u32 max_cmdsn;
+       u32 exp_cmdsn;
+       u32 stat_sn;
+       struct bnx2i_sess *sess = conn->sess;
+       struct iscsi_nopin *hdr;
+
+       hdr = (struct iscsi_nopin *) &conn->gen_pdu.resp_hdr;
+
+       max_cmdsn = ntohl(hdr->max_cmdsn);
+       exp_cmdsn = ntohl(hdr->exp_cmdsn);
+       stat_sn = ntohl(hdr->statsn);
+#define SN_DELTA_ISLAND                0xffff
+       if (max_cmdsn < exp_cmdsn -1 &&
+           max_cmdsn > exp_cmdsn - SN_DELTA_ISLAND)
+               return -EINVAL;
+
+       if (max_cmdsn > sess->max_cmdsn ||
+           max_cmdsn < sess->max_cmdsn - SN_DELTA_ISLAND)
+               sess->max_cmdsn = max_cmdsn;
+
+       if (exp_cmdsn > sess->exp_cmdsn ||
+           exp_cmdsn < sess->exp_cmdsn - SN_DELTA_ISLAND)
+               sess->exp_cmdsn = exp_cmdsn;
+
+       if (stat_sn == conn->exp_statsn)
+               conn->exp_statsn++;
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_update_cmd_sequence - update session sequencing parameter
+ *
+ * @sess:              iscsi session pointer
+ * @exp_sn:            iscsi expected command seq num
+ * @max_sn:            iscsi max command seq num
+ *
+ * update iSCSI SN counters for the given session
+ */
+void bnx2i_update_cmd_sequence(struct bnx2i_sess *sess,
+                              u32 exp_sn, u32 max_sn)
+{
+       u32 exp_cmdsn = exp_sn;
+       u32 max_cmdsn = max_sn;
+
+       if (max_cmdsn < exp_cmdsn -1 &&
+           max_cmdsn > exp_cmdsn - SN_DELTA_ISLAND) {
+               printk(KERN_ALERT "cmd_sequence: error, exp 0x%x, max 0x%x\n",
+                                  exp_cmdsn, max_cmdsn);
+               BUG_ON(1);
+       }
+       if (max_cmdsn > sess->max_cmdsn ||
+           max_cmdsn < sess->max_cmdsn - SN_DELTA_ISLAND)
+               sess->max_cmdsn = max_cmdsn;
+       if (exp_cmdsn > sess->exp_cmdsn ||
+           exp_cmdsn < sess->exp_cmdsn - SN_DELTA_ISLAND)
+               sess->exp_cmdsn = exp_cmdsn;
+}
+
+
+/**
+ * bnx2i_process_scsi_resp - complete SCSI command processing by calling
+ *                     'scsi_done', free iscsi cmd structure to free list
+ *
+ * @cmd:               iscsi cmd pointer
+ * @resp_cqe:          scsi response cqe pointer
+ *
+ * validates scsi response indication for normal completion, sense data if any
+ *     underflow/overflow condition and propogates SCSI response to SCSI-ML by
+ *     calling scsi_done() and also returns command struct back to free pool
+ */
+void bnx2i_process_scsi_resp(struct bnx2i_cmd *cmd,
+                           struct iscsi_cmd_response *resp_cqe)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct Scsi_Host *host;
+       u16 sense_data[128];
+       int data_len;
+       u16 sense_len;
+
+       host = cmd->conn->sess->host;
+       sc->result = (DID_OK << 16) | resp_cqe->status;
+
+       if (resp_cqe->response != ISCSI_STATUS_CMD_COMPLETED) {
+               sc->result = (DID_ERROR << 16);
+               goto out;
+       }
+
+       if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+               data_len = resp_cqe->data_length;
+               if (data_len < 2) {
+                       printk(KERN_ERR "bnx2i: CHK_CONDITION - invalid "
+                                        "data length %d\n", data_len);
+                       goto out;
+               }
+
+               if (data_len > BNX2I_RQ_WQE_SIZE) {
+                       printk(KERN_ALERT "bnx2i: sense data len %d > RQ sz\n",
+                                         data_len);
+                       data_len = BNX2I_RQ_WQE_SIZE;
+               }
+               if (data_len) {
+                       memset(sc->sense_buffer, 0, sizeof(sc->sense_buffer));
+                       bnx2i_get_rq_buf(cmd->conn, (char *)sense_data, data_len);
+                       bnx2i_put_rq_buf(cmd->conn, 1);
+                       cmd->conn->total_data_octets_rcvd += data_len;
+                       sense_len = be16_to_cpu(*((__be16 *) sense_data));
+
+                       if (sense_len > SCSI_SENSE_BUFFERSIZE)
+                               sense_len = SCSI_SENSE_BUFFERSIZE;
+
+                       memcpy(sc->sense_buffer, &sense_data[1],
+                              (int) sense_len);
+               }
+       }
+
+
+       if (sc->sc_data_direction == DMA_TO_DEVICE)
+               goto out;
+       
+
+       if (resp_cqe->response_flags &
+           ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW) {
+               if (resp_cqe->residual_count &&
+                   (resp_cqe->residual_count <= scsi_bufflen(sc))) {
+                       scsi_set_resid(sc, resp_cqe->residual_count);
+                       cmd->conn->total_data_octets_rcvd -= scsi_get_resid(sc);
+               } else
+                       sc->result = (DID_BAD_TARGET << 16) | resp_cqe->status;
+       } else if (resp_cqe->response_flags &
+                  ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW) {
+               scsi_set_resid(sc, resp_cqe->residual_count);
+               cmd->conn->total_data_octets_rcvd += scsi_get_resid(sc);
+       }
+
+out:
+       return;
+
+}
+
+/**
+ * bnx2i_indicate_login_resp - process iscsi login response
+ *
+ * @conn:              iscsi connection pointer
+ *
+ * pushes login response PDU to application daemon, 'iscsid' by
+ *             calling iscsi_recv_pdu()
+ */
+int bnx2i_indicate_login_resp(struct bnx2i_conn *conn)
+{
+       int data_len;
+       struct iscsi_login_rsp *login_resp =
+               (struct iscsi_login_rsp *) &conn->gen_pdu.resp_hdr;
+
+       /* check if this is the first login response for this connection.
+        * If yes, we need to copy initial StatSN to connection structure.
+        */
+       if (conn->exp_statsn == STATSN_UPDATE_SIGNATURE) {
+               conn->exp_statsn = ntohl(login_resp->statsn) + 1;
+       }
+
+       if (bnx2i_login_resp_update_cmdsn(conn))
+               return -EINVAL;
+
+       data_len = conn->gen_pdu.resp_wr_ptr - conn->gen_pdu.resp_buf;
+       iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *) login_resp,
+                      (char *) conn->gen_pdu.resp_buf, data_len);
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_indicate_logout_resp - process iscsi logout response
+ *
+ * @conn:              iscsi connection pointer
+ *
+ * pushes logout response PDU to application daemon, 'iscsid' by
+ *             calling iscsi_recv_pdu()
+ */
+int bnx2i_indicate_logout_resp(struct bnx2i_conn *conn)
+{
+       unsigned long flags;
+       struct iscsi_logout_rsp *logout_resp =
+               (struct iscsi_logout_rsp *) &conn->gen_pdu.resp_hdr;
+
+       iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *) logout_resp,
+                      (char *) NULL, 0);
+       
+       if (conn->gen_pdu.cmd) {
+               spin_lock_irqsave(conn->sess->host->host_lock, flags);
+               list_del_init(&conn->gen_pdu.cmd->link);
+               bnx2i_free_cmd(conn->sess, conn->gen_pdu.cmd);
+               conn->gen_pdu.cmd = NULL;
+               spin_unlock_irqrestore(conn->sess->host->host_lock, flags);
+       }
+       return 0;
+}
+
+
+/**
+ * bnx2i_indicate_async_mesg - process iscsi ASYNC message indication
+ *
+ * @conn:              iscsi connection pointer
+ *
+ * pushes iSCSI async PDU to application daemon, 'iscsid' by calling
+ *     iscsi_recv_pdu()
+ */
+int bnx2i_indicate_async_mesg(struct bnx2i_conn *conn)
+{
+       struct iscsi_async *async_msg =
+               (struct iscsi_async *) &conn->gen_pdu.resp_hdr;
+
+       iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *) async_msg,
+                      (char *) NULL, 0);
+       return 0;
+}
+
+
+
+/**
+ * bnx2i_process_nopin - process iscsi nopin pdu
+ *
+ * @conn:              iscsi connection pointer
+ * @cmd:               iscsi cmd pointer
+ * @data_buf:          payload buffer pointer
+ * @data_len:          payload length
+ *
+ * pushes nopin pdu to application daemon, 'iscsid' by calling iscsi_recv_pdu
+ */
+int bnx2i_process_nopin(struct bnx2i_conn *conn, struct bnx2i_cmd *cmd,
+                       char *data_buf, int data_len)
+{
+       struct iscsi_nopin *nopin_msg =
+               (struct iscsi_nopin *) &conn->gen_pdu.resp_hdr;
+
+       iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *) nopin_msg,
+                      (char *) data_buf, data_len);
+
+       spin_lock(conn->sess->host->host_lock);
+       list_del_init(&cmd->link);
+       conn->sess->num_active_cmds--;
+       bnx2i_free_cmd(cmd->conn->sess, cmd);
+       spin_unlock(conn->sess->host->host_lock);
+
+       return 0;
+}
+
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table to be used with
+ *                     generic iscsi pdus
+ *
+ * @conn:              iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ *     for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *conn)
+{
+       struct iscsi_bd *bd_tbl;
+
+       bd_tbl = (struct iscsi_bd *) conn->gen_pdu.req_bd_tbl;
+
+       bd_tbl->buffer_addr_hi =
+               (u32) ((u64) conn->gen_pdu.req_dma_addr >> 32);
+       bd_tbl->buffer_addr_lo = (u32) conn->gen_pdu.req_dma_addr;
+       bd_tbl->buffer_length = conn->gen_pdu.req_wr_ptr -
+                               conn->gen_pdu.req_buf;
+       bd_tbl->reserved0 = 0;
+       bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+       bd_tbl = (struct iscsi_bd  *) conn->gen_pdu.resp_bd_tbl;
+       bd_tbl->buffer_addr_hi = (u64) conn->gen_pdu.resp_dma_addr >> 32;
+       bd_tbl->buffer_addr_lo = (u32) conn->gen_pdu.resp_dma_addr;
+       bd_tbl->buffer_length = ISCSI_CONN_LOGIN_BUF_SIZE;
+       bd_tbl->reserved0 = 0;
+       bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_nopout_check_active_cmds - checks if iscsi link is idle
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * called to check if iscsi connection is idle or not. Pro-active nopout
+ *      is sent only if the link is idle
+ */
+static int bnx2i_nopout_check_active_cmds(struct bnx2i_conn *conn,
+                                         struct bnx2i_cmd *cmnd)
+{
+       struct iscsi_nopin *nopin_msg =
+               (struct iscsi_nopin *) &conn->gen_pdu.resp_hdr;
+       int len;
+       char *buf = NULL;
+       unsigned long flags;
+
+       if ((conn->nopout_num_scsi_cmds == conn->num_scsi_cmd_pdus) &&
+           !conn->sess->num_active_cmds) {
+               return -1;
+       }
+
+       len = conn->gen_pdu.req_buf_size;
+       if (len)
+               buf = conn->gen_pdu.req_buf;
+
+       memset(nopin_msg, 0x00, sizeof(struct iscsi_nopin));
+        nopin_msg->opcode = ISCSI_OP_NOOP_IN;
+        nopin_msg->flags = ISCSI_FLAG_CMD_FINAL;
+        memcpy(nopin_msg->lun, conn->gen_pdu.pdu_hdr.lun, 8);
+        nopin_msg->itt = conn->gen_pdu.pdu_hdr.itt;
+        nopin_msg->ttt = ISCSI_RESERVED_TAG;
+        nopin_msg->statsn = conn->gen_pdu.pdu_hdr.exp_statsn;;
+        nopin_msg->exp_cmdsn = htonl(conn->sess->exp_cmdsn);
+        nopin_msg->max_cmdsn = htonl(conn->sess->max_cmdsn);
+
+       iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *) nopin_msg,
+                      (char *) buf, len);
+
+       conn->nopout_num_scsi_cmds = conn->num_scsi_cmd_pdus;
+
+       spin_lock_irqsave(conn->sess->host->host_lock, flags);
+       list_del_init(&cmnd->link);
+       bnx2i_free_cmd(conn->sess, cmnd);
+       spin_unlock_irqrestore(conn->sess->host->host_lock, flags);
+       return 0;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send iscsi login/nopout/logout
+ *                     pdus
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ *     Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct bnx2i_cmd *cmnd)
+{
+       int rc = 0;
+       char *buf;
+       int data_len;
+       struct bnx2i_conn *conn = cmnd->conn;
+
+       bnx2i_iscsi_prep_generic_pdu_bd(conn);
+       switch (cmnd->iscsi_opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+               bnx2i_send_iscsi_login(conn, cmnd);
+               break;
+
+       case ISCSI_OP_NOOP_OUT:
+               if (!bnx2i_nopout_when_cmds_active)
+                       if (!bnx2i_nopout_check_active_cmds(conn, cmnd)) {
+                               return 0;
+                       }
+
+               conn->nopout_num_scsi_cmds = conn->num_scsi_cmd_pdus;
+               data_len = conn->gen_pdu.req_buf_size;
+               buf = conn->gen_pdu.req_buf;
+               if (data_len)
+                       rc = bnx2i_send_iscsi_nopout(conn, cmnd,
+                                                    ISCSI_RESERVED_TAG,
+                                                    buf, data_len, 1);
+               else
+                       rc = bnx2i_send_iscsi_nopout(conn, cmnd,
+                                                    ISCSI_RESERVED_TAG,
+                                                    NULL, 0, 1);
+               break;
+
+       case ISCSI_OP_LOGOUT:
+               rc = bnx2i_send_iscsi_logout(conn, cmnd);
+               break;
+
+       default:
+               printk(KERN_ALERT "send_gen: unsupported op 0x%x\n",
+                                  cmnd->iscsi_opcode);
+       }
+       return rc;
+}
+
+
+/**********************************************************************
+ *             SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ *
+ * @sc:                SCSI-ML command pointer
+ * @cmd:               iscsi cmd pointer
+ *
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc,
+                                     struct bnx2i_cmd *cmd)
+{
+       u32 dword;
+       int lpcnt;
+       u8 *srcp;
+       u32 *dstp;
+       u32 scsi_lun[2];
+
+       int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+       cmd->req.lun[0] = ntohl(scsi_lun[0]);
+       cmd->req.lun[1] = ntohl(scsi_lun[1]);
+
+       lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+       srcp = (u8 *) sc->cmnd;
+       dstp = (u32 *) cmd->req.cdb;
+       while (lpcnt--) {
+               memcpy(&dword, (const void *) srcp, 4);
+               *dstp = cpu_to_be32(dword);
+               srcp += 4;
+               dstp++;
+       }
+       if (sc->cmd_len & 0x3) {
+               dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+               *dstp = cpu_to_be32(dword);
+       }
+}
+
+
+
+/**
+ * bnx2i_queuecommand - SCSI ML - bnx2i interface function to issue new commands
+*                      to be shipped to iscsi target
+ *
+ * @sc:                SCSI-ML command pointer
+ * @done:              callback function pointer to complete the task
+ *
+ * handles SCSI command queued by SCSI-ML, allocates a command structure,
+ *     assigning CMDSN, mapping SG buffers and delivers it to CNIC for further
+ *     processing. This routine also takes care of iSCSI command window full
+ *     condition, if session is in recovery process and other error conditions
+ */
+int bnx2i_queuecommand(struct scsi_cmnd *sc,
+                      void (*done) (struct scsi_cmnd *))
+{
+       struct bnx2i_sess *sess;
+       struct bnx2i_conn *conn;
+       struct bnx2i_cmd *cmd;
+
+       sc->scsi_done = done;
+       sc->result = 0;
+       sess = iscsi_hostdata(sc->device->host->hostdata);
+
+#define iscsi_cmd_win_closed(_sess)    \
+       ((int) (_sess->max_cmdsn - _sess->cmdsn) < 0)
+
+       if (iscsi_cmd_win_closed(sess))
+               goto iscsi_win_closed;
+
+       if ((sess->state & BNX2I_SESS_IN_SHUTDOWN) ||
+               (sess->state & BNX2I_SESS_IN_LOGOUT))
+               goto dev_not_found;
+
+       if (sess->recovery_state) {
+               if (sess->old_recovery_state != sess->recovery_state)
+                       sess->old_recovery_state = sess->recovery_state;
+
+               if (sess->recovery_state & ISCSI_SESS_RECOVERY_FAILED)
+                       goto dev_not_found;
+               else if (!(sess->recovery_state & ISCSI_SESS_RECOVERY_COMPLETE))
+                       goto iscsi_win_closed;
+               else {  /* ISCSI_SESS_RECOVERY_COMPLETE */
+                       sess->old_recovery_state = sess->recovery_state = 0;
+               }
+       }
+
+       if (test_bit(ADAPTER_STATE_LINK_DOWN, &sess->hba->adapter_state))
+               goto iscsi_win_closed;
+
+       cmd = bnx2i_alloc_cmd(sess);
+       if (cmd == NULL)
+               /* should never happen as cmd list size == SHT->can_queue */
+               goto cmd_not_accepted;
+
+       cmd->conn = conn = sess->lead_conn;
+       cmd->scsi_cmd = sc;
+       cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+       cmd->iscsi_opcode = cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+       cmd->req.cmd_sn = sess->cmdsn++;
+
+       bnx2i_iscsi_map_sg_list(cmd);
+       bnx2i_cpy_scsi_cdb(sc, cmd);
+
+       cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+       if (sc->sc_data_direction == DMA_TO_DEVICE) {
+               cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+               cmd->req.itt |= (ISCSI_TASK_TYPE_WRITE <<
+                                ISCSI_CMD_REQUEST_TYPE_SHIFT);
+               bnx2i_setup_write_cmd_bd_info(cmd);
+       } else {
+               if (scsi_bufflen(sc))
+                       cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+               cmd->req.itt |= (ISCSI_TASK_TYPE_READ <<
+                                ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       }
+       cmd->req.num_bds = cmd->bd_tbl->bd_valid;
+       if (!cmd->bd_tbl->bd_valid) {
+               cmd->req.bd_list_addr_lo = (u32) sess->hba->mp_bd_dma;
+               cmd->req.bd_list_addr_hi =
+                       (u32) ((u64) sess->hba->mp_bd_dma >> 32);
+               cmd->req.num_bds = 1;
+       }
+
+       cmd->cmd_state = ISCSI_CMD_STATE_INITIATED;
+       sc->SCp.ptr = (char *) cmd;
+
+       if (cmd->req.itt != ITT_INVALID_SIGNATURE) {
+               bnx2i_send_iscsi_scsicmd(conn, cmd);
+               list_add_tail(&cmd->link, &sess->active_cmds);
+               sess->num_active_cmds++;
+       }
+       return 0;
+
+iscsi_win_closed:
+cmd_not_accepted:
+       return SCSI_MLQUEUE_HOST_BUSY;
+
+dev_not_found:
+       sc->result = (DID_NO_CONNECT << 16);
+       scsi_set_resid(sc, scsi_bufflen(sc));
+       sc->scsi_done(sc);
+       return 0;
+}
+
+
+
+/**
+ * bnx2i_iscsi_tmf_timer - iSCSI TMF timeout routine
+ *
+ * @data:              pointer to TMF command struct
+ *
+ * TMF request timeout handler, triggers session recovery process
+ */
+static void bnx2i_iscsi_tmf_timer(unsigned long data)
+{
+       struct bnx2i_cmd *cmd = (struct bnx2i_cmd *) data;
+
+       printk(KERN_ALERT "TMF timer: abort failed, cmd 0x%p\n", cmd);
+       cmd->cmd_state = ISCSI_CMD_STATE_TMF_TIMEOUT;
+       cmd->conn->sess->recovery_state = ISCSI_SESS_RECOVERY_OPEN_ISCSI;
+       wake_up(&cmd->conn->sess->er_wait);
+       iscsi_conn_error(cmd->conn->cls_conn, ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_initiate_abort_cmd - executes scsi command abort process
+ *
+ * @sc:                SCSI-ML command pointer
+ *
+ * initiate command abort process by requesting CNIC to send
+ *     an iSCSI TMF request to target
+ */
+static int bnx2i_initiate_abort_cmd(struct scsi_cmnd *sc)
+{
+       struct bnx2i_cmd *cmd = (struct bnx2i_cmd *) sc->SCp.ptr;
+       struct bnx2i_cmd *tmf_cmd;
+       struct Scsi_Host *shost = cmd->scsi_cmd->device->host;
+       struct bnx2i_conn *conn = cmd->conn;
+       struct bnx2i_sess *sess;
+       struct bnx2i_hba *hba;
+
+       shost = cmd->scsi_cmd->device->host;
+       sess = iscsi_hostdata(shost->hostdata);
+       BUG_ON(shost != sess->host);
+
+       if (sess && (is_sess_active(sess)))
+               hba = sess->hba;
+       else
+               return FAILED;
+
+       if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+               return FAILED;
+
+       if (cmd->scsi_cmd != sc)
+               /* command already completed to scsi mid-layer */
+               goto cmd_not_active;
+
+       tmf_cmd = bnx2i_alloc_cmd(sess);
+       if (tmf_cmd == NULL)
+               goto lack_of_resc;
+
+       sess->tmf_active = 1;
+       tmf_cmd->conn = conn = sess->lead_conn;
+       tmf_cmd->scsi_cmd = NULL;
+       tmf_cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+       tmf_cmd->req.cmd_sn = sess->cmdsn;
+       tmf_cmd->tmf_ref_itt = cmd->req.itt;
+       tmf_cmd->tmf_ref_cmd = cmd;
+       tmf_cmd->tmf_ref_sc = cmd->scsi_cmd;
+       cmd->cmd_state = ISCSI_CMD_STATE_ABORT_PEND;
+       tmf_cmd->cmd_state = ISCSI_CMD_STATE_INITIATED;
+
+#define BNX2I_TMF_TIMEOUT      10 * HZ
+       sess->abort_timer.expires = BNX2I_TMF_TIMEOUT + jiffies;
+       sess->abort_timer.function = bnx2i_iscsi_tmf_timer;
+       sess->abort_timer.data = (unsigned long)tmf_cmd;
+       add_timer(&sess->abort_timer);
+
+       bnx2i_send_iscsi_tmf(conn, tmf_cmd);
+
+       /* update iSCSI context for this conn, wait for CNIC to complete */
+       wait_event_interruptible(sess->er_wait, (!conn->ep ||
+                                        (tmf_cmd->cmd_state !=
+                                         ISCSI_CMD_STATE_TMF_TIMEOUT)));
+
+       if (signal_pending(current))
+               flush_signals(current);
+
+       del_timer_sync(&sess->abort_timer);
+
+       if (tmf_cmd->cmd_state == ISCSI_CMD_STATE_TMF_TIMEOUT) {
+               printk(KERN_ALERT "abort: abort failed, cmd 0x%p\n", tmf_cmd);
+               /* TMF timed out, return error status and let SCSI-ML do
+                * session recovery.
+                */
+               list_del_init(&tmf_cmd->link);
+               sess->tmf_active = 0;
+               bnx2i_free_cmd(sess, tmf_cmd);
+               return SUCCESS;
+       }
+
+       list_del_init(&tmf_cmd->link);
+       sess->tmf_active = 0;
+       bnx2i_free_cmd(sess, tmf_cmd);
+
+       if (!cmd->scsi_cmd) {
+               goto cmd_completed;
+       }
+       if ((cmd->scsi_cmd->result & 0xFF0000) == (DID_ABORT << 16)) {
+               cmd->cmd_state = ISCSI_CMD_STATE_CLEANUP_PEND;
+               bnx2i_send_cmd_cleanup_req(hba, cmd);
+               wait_event_interruptible_timeout(sess->er_wait,
+                                                (cmd->cmd_state ==
+                                                 ISCSI_CMD_STATE_CLEANUP_CMPL),
+                                                msecs_to_jiffies(
+                                                 ISCSI_CMD_CLEANUP_TIMEOUT));
+
+               if (signal_pending(current))
+                       flush_signals(current);
+       } else
+               cmd->scsi_cmd->result = (DID_ABORT << 16);
+
+       list_del_init(&cmd->link);
+       bnx2i_return_failed_command(sess, cmd, DID_ABORT);
+       bnx2i_free_cmd(sess, cmd);
+
+cmd_completed:
+cmd_not_active:
+       return SUCCESS;
+
+lack_of_resc:
+       return FAILED;
+}
+
+
+
+static void bnx2i_wait_for_tmf_completion(struct bnx2i_sess *sess)
+{
+       int lpcnt = 20;
+       while (lpcnt-- && sess->tmf_active) {
+               msleep(1000);
+       }
+}
+
+
+/**
+ * bnx2i_abort - 'eh_abort_handler' api function to abort an oustanding
+ *                     scsi command
+ *
+ * @sc:                SCSI-ML command pointer
+ *
+ * SCSI abort request handler.
+ */
+int bnx2i_abort(struct scsi_cmnd *sc)
+{
+       int reason;
+       struct bnx2i_cmd *cmd = (struct bnx2i_cmd *) sc->SCp.ptr;
+
+       if (unlikely(!cmd)) {
+               /* command already completed to scsi mid-layer
+               printk(KERN_INFO "bnx2i_abort: sc 0x%p, not active\n", sc);
+               */
+               return SUCCESS;
+       }
+
+       reason = bnx2i_initiate_abort_cmd(sc);
+       return reason;
+}
+
+
+
+/**
+ * bnx2i_return_failed_command - return failed command back to SCSI-ML
+ *
+ * @sess:              iscsi session pointer
+ * @cmd:               iscsi cmd pointer
+ * @err_code:          SCSI-ML error code, DID_ABORT, DID_BUS_BUSY
+ *
+ * completes scsi command with appropriate error code to SCSI-ML
+ */
+void bnx2i_return_failed_command(struct bnx2i_sess *sess,
+                                struct bnx2i_cmd *cmd, int err_code)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       if (!sc) {
+               return;
+       }
+       sc->result = err_code << 16;
+       scsi_set_resid(sc, scsi_bufflen(cmd->scsi_cmd));
+       cmd->scsi_cmd = NULL;
+       sess->num_active_cmds--;
+       sc->SCp.ptr = NULL;
+       sc->scsi_done(sc);
+}
+
+
+
+/**
+ * bnx2i_host_reset - 'eh_host_reset_handler' entry point
+ *
+ * @sc:                SCSI-ML command pointer
+ *
+ * SCSI host reset handler - iSCSI session recovery
+ */
+int bnx2i_host_reset(struct scsi_cmnd *sc)
+{
+       struct Scsi_Host *shost;
+       struct bnx2i_sess *sess;
+       int rc = 0;
+
+       shost = sc->device->host;
+       sess = iscsi_hostdata(shost->hostdata);
+       printk(KERN_INFO "bnx2i: attempting to reset host, #%d\n",
+                         sess->host->host_no);
+
+       BUG_ON(shost != sess->host);
+       rc = bnx2i_do_iscsi_sess_recovery(sess, DID_RESET);
+
+       if (!list_empty(&sess->active_cmds))
+               bnx2i_flush_active_cmd_queue(sess, DID_NO_CONNECT);
+
+       return rc;
+}
+
+
+
+/**********************************************************************
+ *             open-iscsi interface
+ **********************************************************************/
+
+
+/*
+ * iSCSI Session's hostdata organization:
+ *
+ *    *------------------* <== hostdata_session(host->hostdata)
+ *    | ptr to class sess|
+ *    |------------------| <== iscsi_hostdata(host->hostdata)
+ *    | iscsi_session    |
+ *    *------------------*
+ */
+
+#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
+                                _sz % sizeof(unsigned long))
+
+#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+#define session_to_cls(_sess)  hostdata_session(_sess->host->hostdata)
+
+
+
+
+/**
+ * bnx2i_register_xport - register a bnx2i device transport name with
+ *                     the iscsi transport module
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * allocates memory for SCSI host template, iSCSI template and registers
+ *     this instance of NX2 device with iSCSI transport kernel module.
+ */
+int bnx2i_register_xport(struct bnx2i_hba *hba)
+{
+       void *mem_ptr;
+       u32 pci_bus_no;
+       u32 pci_dev_no;
+       u32 pci_func_no;
+       u32 extra;
+       struct ethtool_drvinfo drv_info;
+
+       mem_ptr = kmalloc(sizeof(struct scsi_host_template), GFP_KERNEL);
+       hba->scsi_template = mem_ptr;
+       if (hba->scsi_template == NULL) {
+               printk(KERN_ALERT "bnx2i: failed to alloc memory for sht\n");
+               return -ENOMEM;
+       }
+
+       mem_ptr = kmalloc(sizeof(struct iscsi_transport), GFP_KERNEL);
+       hba->iscsi_transport = mem_ptr;
+       if (hba->iscsi_transport == NULL) {
+               printk(KERN_ALERT "mem error for iscsi_transport template\n");
+               goto iscsi_xport_err;
+       }
+
+       mem_ptr = kmalloc(BRCM_ISCSI_XPORT_NAME_SIZE_MAX, GFP_KERNEL);
+       if (mem_ptr == NULL) {
+               printk(KERN_ALERT "failed to alloc memory for xport name\n");
+               goto scsi_name_mem_err;
+       }
+
+       memcpy(hba->scsi_template, (const void *) &bnx2i_host_template,
+              sizeof(struct scsi_host_template));
+       hba->scsi_template->name = mem_ptr;
+       memcpy((void *) hba->scsi_template->name,
+              (const void *) bnx2i_host_template.name,
+              strlen(bnx2i_host_template.name) + 1);
+
+       mem_ptr = kmalloc(BRCM_ISCSI_XPORT_NAME_SIZE_MAX, GFP_KERNEL);
+       if (mem_ptr == NULL) {
+               printk(KERN_ALERT "failed to alloc proc name mem\n");
+               goto scsi_proc_name_mem_err;
+       }
+       hba->scsi_template->proc_name = mem_ptr;
+       /* Can't determine device type, 5706/5708 has 40-bit dma addr limit */
+       hba->scsi_template->dma_boundary = DMA_40BIT_MASK;
+       hba->scsi_template->can_queue = hba->max_sqes -
+                                       BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS;
+       hba->scsi_template->cmd_per_lun = hba->scsi_template->can_queue / 4;
+
+       memcpy((void *) hba->iscsi_transport,
+              (const void *) &bnx2i_iscsi_transport,
+              sizeof(struct iscsi_transport));
+
+       hba->iscsi_transport->host_template = hba->scsi_template;
+
+       mem_ptr = kmalloc(BRCM_ISCSI_XPORT_NAME_SIZE_MAX, GFP_KERNEL);
+       if (mem_ptr == NULL) {
+               printk(KERN_ALERT "mem alloc error, iscsi xport name\n");
+               goto xport_name_mem_err;
+       }
+       hba->iscsi_transport->name = mem_ptr;
+
+       if (hba->cnic && hba->cnic->netdev) {
+               hba->cnic->netdev->ethtool_ops->get_drvinfo(hba->cnic->netdev,
+                                                           &drv_info);
+               sscanf(drv_info.bus_info, "%x:%x:%x.%d", &extra,
+                      &pci_bus_no, &pci_dev_no, &pci_func_no);
+
+               sprintf(mem_ptr, "%s-%.2x%.2x%.2x", BRCM_ISCSI_XPORT_NAME_PREFIX,
+                        (u8)pci_bus_no, (u8)pci_dev_no, (u8)pci_func_no);
+       }
+
+       memcpy((void *) hba->scsi_template->proc_name, (const void *) mem_ptr,
+              strlen(mem_ptr) + 1);
+
+       hba->shost_template = iscsi_register_transport(hba->iscsi_transport);
+       if (!hba->shost_template) {
+               printk(KERN_ALERT "bnx2i: xport reg failed, hba 0x%p\n", hba);
+               goto failed_registration;
+       }
+       printk(KERN_ALERT "bnx2i: netif=%s, iscsi=%s\n",
+                         hba->cnic->netdev->name, hba->scsi_template->proc_name);
+       return 0;
+
+failed_registration:
+       kfree(hba->iscsi_transport->name);
+xport_name_mem_err:
+       kfree(hba->scsi_template->proc_name);
+scsi_proc_name_mem_err:
+       kfree(hba->scsi_template->name);
+scsi_name_mem_err:
+       kfree(hba->iscsi_transport);
+iscsi_xport_err:
+       kfree(hba->scsi_template);
+       printk(KERN_ALERT "register iscsi xport failed, hba 0x%p\n", hba);
+       return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_deregister_xport - unregisters bnx2i adapter's iscsi transport name
+ *
+ * @hba:               pointer to adapter instance
+ * 
+ * de-allocates memory for SCSI host template, iSCSI template and de-registers
+ *     a NX2 device instance
+ */
+int bnx2i_deregister_xport(struct bnx2i_hba *hba)
+{
+       iscsi_unregister_transport(hba->iscsi_transport);
+       hba->shost_template = NULL;
+
+       kfree(hba->scsi_template->proc_name);
+       kfree(hba->scsi_template->name);
+       hba->scsi_template->name = NULL;
+
+       kfree(hba->scsi_template);
+       hba->scsi_template = NULL;
+
+       kfree(hba->iscsi_transport->name);
+       hba->iscsi_transport->name = NULL;
+
+       kfree(hba->iscsi_transport);
+       hba->iscsi_transport = NULL;
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ *
+ * @it:                iscsi transport pointer
+ * @scsit:             scsi transport template pointer
+ * @cmds_max:          max commands supported
+ * @qdepth:            scsi queue depth to support
+ * @initial_cmdsn:     initial iscsi CMDSN to be used for this session
+ * @host_no:           pointer to u32 to return host no
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+struct iscsi_cls_session *
+       bnx2i_session_create(struct iscsi_transport *it,
+                            struct scsi_transport_template *scsit,
+#ifdef _CREATE_SESS_NEW_
+                            uint16_t cmds_max, uint16_t qdepth,
+#endif
+                            uint32_t initial_cmdsn, uint32_t *host_no)
+{
+       struct bnx2i_hba *hba;
+       struct bnx2i_sess *sess;
+       struct Scsi_Host *shost;
+       struct iscsi_cls_session *cls_session;
+       int ret_code;
+
+       hba = bnx2i_get_hba_from_template(scsit);
+       if (bnx2i_adapter_ready(hba))
+               return NULL;
+
+       shost = scsi_host_alloc(hba->iscsi_transport->host_template,
+                               hostdata_privsize(sizeof(struct bnx2i_sess)));
+       if (!shost)
+               return NULL;
+
+       shost->max_id = 1;
+       shost->max_channel = 1;
+       shost->max_lun = hba->iscsi_transport->max_lun;
+       shost->max_cmd_len = hba->iscsi_transport->max_cmd_len;
+#ifdef _NEW_CREATE_SESSION_
+       if (cmds_max)
+               shost->can_queue = cmds_max;
+       if (qdepth)
+               shost->cmd_per_lun = qdepth;
+#endif
+       shost->transportt = scsit;
+       *host_no = shost->host_no;
+       sess = iscsi_hostdata(shost->hostdata);
+
+       if (!sess)
+               goto sess_resc_fail;
+
+       memset(sess, 0, sizeof(struct bnx2i_sess));
+       sess->hba = hba;
+       sess->host = shost;
+
+       /*
+        * For Open-iSCSI, only normal sessions go through bnx2i.
+        * Discovery session goes through host stack TCP/IP stack.
+        */
+       ret_code = bnx2i_iscsi_sess_new(hba, sess);
+       if (ret_code) {
+               /* failed to allocate memory */
+               printk(KERN_ALERT "bnx2i_sess_create: unable to alloc sess\n");
+               goto sess_resc_fail;
+       }
+
+       /* Update CmdSN related parameters */
+       sess->cmdsn = initial_cmdsn;
+       sess->exp_cmdsn = initial_cmdsn + 1;
+       sess->max_cmdsn = initial_cmdsn + 1;
+
+       if (scsi_add_host(shost, NULL))
+               goto add_sh_fail;
+
+       if (!try_module_get(it->owner))
+               goto cls_sess_falied;
+
+       cls_session = iscsi_create_session(shost, it, 0);
+       if (!cls_session)
+               goto module_put;
+       *(unsigned long *)shost->hostdata = (unsigned long)cls_session;
+
+       return hostdata_session(shost->hostdata);
+
+module_put:
+       module_put(it->owner);
+cls_sess_falied:
+       scsi_remove_host(shost);
+add_sh_fail:
+       bnx2i_iscsi_sess_release(hba, sess);
+sess_resc_fail:
+       scsi_host_put(shost);
+       return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ *
+ * @cls_session:       pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ *     all resources held by it
+ */
+void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_sess *sess = iscsi_hostdata(shost->hostdata);
+       struct module *owner = cls_session->transport->owner;
+
+        iscsi_remove_session(cls_session);
+        scsi_remove_host(shost);
+
+       bnx2i_iscsi_sess_release(sess->hba, sess);
+
+       kfree(sess->target_name);
+       sess->target_name = NULL;
+
+        iscsi_free_session(cls_session);
+       scsi_host_put(shost);
+        module_put(owner);
+}
+
+
+/**
+ * bnx2i_sess_recovery_timeo - session recovery timeout handler
+ *
+ * @cls_session:       pointer to iscsi cls session
+ *
+ * session recovery timeout handling routine
+ */
+void bnx2i_sess_recovery_timeo(struct iscsi_cls_session *cls_session)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_sess *sess = iscsi_hostdata(shost->hostdata);
+
+       sess->recovery_state |= ISCSI_SESS_RECOVERY_FAILED;
+       wake_up(&sess->er_wait);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ *
+ * @cls_session:       pointer to iscsi cls session
+ * @cid:               iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+struct iscsi_cls_conn *bnx2i_conn_create(struct iscsi_cls_session *cls_session,
+                                        uint32_t cid)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_sess *sess = iscsi_hostdata(shost->hostdata);
+       struct bnx2i_conn *conn;
+       struct iscsi_cls_conn *cls_conn;
+
+       cls_conn = iscsi_create_conn(cls_session, cid);
+       if (!cls_conn)
+               return NULL;
+
+       conn = cls_conn->dd_data;
+       memset(conn, 0, sizeof(struct bnx2i_conn));
+       conn->cls_conn = cls_conn;
+       conn->exp_statsn = STATSN_UPDATE_SIGNATURE;
+       conn->state = CONN_STATE_IDLE;
+       /* Initialize the connection structure */
+       bnx2i_iscsi_conn_new(sess, conn);
+       conn->conn_cid = cid;
+       return cls_conn;
+}
+
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ *
+ * @cls_session:       pointer to iscsi cls session
+ * @cls_conn:          pointer to iscsi cls conn
+ * @transport_fd:      64-bit EP handle
+ * @is_leading:        leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ *     and the TCP connection. This routine returns error code if
+ *     TCP connection does not belong on the device iSCSI sess/conn
+ *     is bound
+ */
+int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+                   struct iscsi_cls_conn *cls_conn,
+                   uint64_t transport_fd, int is_leading)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_sess *sess = iscsi_hostdata(shost->hostdata);
+       struct bnx2i_conn *tmp;
+       struct bnx2i_conn *conn = cls_conn->dd_data;
+       int ret_code;
+       struct bnx2i_endpoint *ep;
+
+       ep = (struct bnx2i_endpoint *) (unsigned long) transport_fd;
+
+       if ((ep->state == EP_STATE_TCP_FIN_RCVD) ||
+           (ep->state == EP_STATE_TCP_RST_RCVD))
+               /* Peer disconnect via' FIN or RST */
+               return -EINVAL;
+
+       if (ep->hba != sess->hba) {
+               /* Error - TCP connection does not belong to this device
+                */
+               printk(KERN_ALERT "bnx2i: conn bind, ep=0x%p (%s) does not",
+                                 ep, ep->hba->netdev->name);
+               printk(KERN_ALERT "belong to hba (%s)\n",
+                                 sess->hba->netdev->name);
+               return -EEXIST;
+       }
+       if (!conn->gen_pdu.cmd)
+               conn->gen_pdu.cmd = bnx2i_alloc_cmd(sess);
+
+       /* adjust dma boundary limit which was set to lower bound of 40-bit
+        * address as required by 5706/5708. 5709/57710 does not have any
+        * address limitation requirements. 'dma_mask' parameter is set
+        * by bnx2 module based on device requirements, we just use whatever
+        * is set.
+        */
+       shost->dma_boundary = ep->hba->pcidev->dma_mask;
+
+       /* look-up for existing connection, MC/S is not currently supported */
+       spin_lock_bh(&sess->lock);
+       tmp = NULL;
+       if (!list_empty(&sess->conn_list)) {
+               list_for_each_entry(tmp, &sess->conn_list, link) {
+                       if (tmp == conn)
+                               break;
+               }
+       }
+       if ((tmp != conn) && (conn->sess == sess)) {
+               /* bind iSCSI connection to this session */
+               list_add(&conn->link, &sess->conn_list);
+               if (is_leading)
+                       sess->lead_conn = conn;
+       }
+
+       if (conn->ep) {
+               /* This happens when 'iscsid' is killed and restarted. Daemon
+                * has no clue of tranport handle, but knows active conn/sess
+                * and tried to rebind a new tranport (EP) to already active
+                * iSCSI session/connection
+                */
+               spin_unlock_bh(&sess->lock);
+               bnx2i_ep_disconnect((uint64_t) (unsigned long) conn->ep);
+               spin_lock_bh(&sess->lock);
+       }
+
+       conn->ep = (struct bnx2i_endpoint *) (unsigned long) transport_fd;
+       conn->ep->conn = conn;
+       conn->ep->sess = sess;
+       conn->state = CONN_STATE_XPORT_READY;
+       conn->iscsi_conn_cid = conn->ep->ep_iscsi_cid;
+       conn->fw_cid = conn->ep->ep_cid;
+
+       ret_code = bnx2i_bind_conn_to_iscsi_cid(conn, ep->ep_iscsi_cid);
+       spin_unlock_bh(&sess->lock);
+
+       /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+        * driver needs to explicitly replenish RQ index during setup.
+        */
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               bnx2i_put_rq_buf(conn, 0);
+
+       bnx2i_arm_cq_event_coalescing(conn->ep, CNIC_ARM_CQE);
+       return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ *     this connection
+ */
+void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+       struct bnx2i_conn *conn = cls_conn->dd_data;
+
+       bnx2i_conn_free_login_resources(conn->sess->hba, conn);
+
+       kfree(conn->persist_address);
+       conn->persist_address = NULL;
+       iscsi_destroy_conn(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_set_param - set iscsi connection parameter
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ * @param:             parameter type identifier
+ * @buf:               buffer pointer
+ * @buflen:            buffer length
+ *
+ * During FFP migration, user daemon will issue this call to
+ *     update negotiated iSCSI parameters to driver.
+ */
+int bnx2i_conn_set_param(struct iscsi_cls_conn *cls_conn,
+                        enum iscsi_param param, char *buf, int buflen)
+{
+       struct bnx2i_conn *conn = cls_conn->dd_data;
+       struct bnx2i_sess *sess = conn->sess;
+
+       spin_lock_bh(&sess->lock);
+       if (conn->state != CONN_STATE_IN_LOGIN) {
+               printk(KERN_ERR "bnx2i: can't change param [%d]\n", param);
+               spin_unlock_bh(&sess->lock);
+               return 0;
+       }
+       spin_unlock_bh(&sess->lock);
+       switch (param) {
+       case ISCSI_PARAM_MAX_RECV_DLENGTH:
+               sscanf(buf, "%d", &conn->max_data_seg_len_recv);
+               break;
+       case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+               sscanf(buf, "%d", &conn->max_data_seg_len_xmit);
+               break;
+       case ISCSI_PARAM_HDRDGST_EN:
+               sscanf(buf, "%d", &conn->header_digest_en);
+               break;
+       case ISCSI_PARAM_DATADGST_EN:
+               sscanf(buf, "%d", &conn->data_digest_en);
+               break;
+       case ISCSI_PARAM_INITIAL_R2T_EN:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->initial_r2t);
+               break;
+       case ISCSI_PARAM_MAX_R2T:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->max_r2t);
+               break;
+       case ISCSI_PARAM_IMM_DATA_EN:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->imm_data);
+               break;
+       case ISCSI_PARAM_FIRST_BURST:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->first_burst_len);
+               break;
+       case ISCSI_PARAM_MAX_BURST:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->max_burst_len);
+               break;
+       case ISCSI_PARAM_PDU_INORDER_EN:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->pdu_inorder);
+               break;
+       case ISCSI_PARAM_DATASEQ_INORDER_EN:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->dataseq_inorder);
+               break;
+       case ISCSI_PARAM_ERL:
+               if (conn == sess->lead_conn)
+                       sscanf(buf, "%d", &sess->erl);
+               break;
+       case ISCSI_PARAM_IFMARKER_EN:
+               sscanf(buf, "%d", &conn->ifmarker_enable);
+               BUG_ON(conn->ifmarker_enable);
+               break;
+       case ISCSI_PARAM_OFMARKER_EN:
+               sscanf(buf, "%d", &conn->ofmarker_enable);
+               BUG_ON(conn->ofmarker_enable);
+               break;
+       case ISCSI_PARAM_EXP_STATSN:
+               sscanf(buf, "%u", &conn->exp_statsn);
+               break;
+       case ISCSI_PARAM_TARGET_NAME:
+               if (sess->target_name)
+                       break;
+               sess->target_name = kstrdup(buf, GFP_KERNEL);
+               if (!sess->target_name)
+                       return -ENOMEM;
+               break;
+       case ISCSI_PARAM_TPGT:
+               sscanf(buf, "%d", &sess->tgt_prtl_grp);
+               break;
+       case ISCSI_PARAM_PERSISTENT_PORT:
+               sscanf(buf, "%d", &conn->persist_port);
+               break;
+       case ISCSI_PARAM_PERSISTENT_ADDRESS:
+               if (conn->persist_address)
+                       break;
+               conn->persist_address = kstrdup(buf, GFP_KERNEL);
+               if (!conn->persist_address)
+                       return -ENOMEM;
+               break;
+       default:
+               printk(KERN_ALERT "PARAM_UNKNOWN: 0x%x\n", param);
+               break;
+       }
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ * @param:             parameter type identifier
+ * @buf:               buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+                        enum iscsi_param param, char *buf)
+{
+       struct bnx2i_conn *conn;
+       int len;
+
+       conn = (struct bnx2i_conn *)cls_conn->dd_data;
+       if (!conn || !conn->ep ||
+           (conn->ep->state != EP_STATE_ULP_UPDATE_COMPL))
+               return -EINVAL;
+
+       len = 0;
+       switch (param) {
+       case ISCSI_PARAM_MAX_RECV_DLENGTH:
+               len = sprintf(buf, "%u\n", conn->max_data_seg_len_recv);
+               break;
+       case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+               len = sprintf(buf, "%u\n", conn->max_data_seg_len_xmit);
+               break;
+       case ISCSI_PARAM_HDRDGST_EN:
+               len = sprintf(buf, "%d\n", conn->header_digest_en);
+               break;
+       case ISCSI_PARAM_DATADGST_EN:
+               len = sprintf(buf, "%d\n", conn->data_digest_en);
+               break;
+       case ISCSI_PARAM_IFMARKER_EN:
+               len = sprintf(buf, "%u\n", conn->ifmarker_enable);
+               break;
+       case ISCSI_PARAM_OFMARKER_EN:
+               len = sprintf(buf, "%u\n", conn->ofmarker_enable);
+               break;
+       case ISCSI_PARAM_EXP_STATSN:
+               len = sprintf(buf, "%u\n", conn->exp_statsn);
+               break;
+       case ISCSI_PARAM_PERSISTENT_PORT:
+               len = sprintf(buf, "%d\n", conn->persist_port);
+               break;
+       case ISCSI_PARAM_PERSISTENT_ADDRESS:
+               if (conn->persist_address)
+                       len = sprintf(buf, "%s\n", conn->persist_address);
+               break;
+       case ISCSI_PARAM_CONN_PORT:
+               len = sprintf(buf, "%hu\n", conn->ep->cm_sk->dst_port);
+               break;
+       case ISCSI_PARAM_CONN_ADDRESS:
+               len = sprintf(buf, NIPQUAD_FMT "\n",
+                             NIPQUAD(conn->ep->cm_sk->dst_ip));
+               break;
+       default:
+               printk(KERN_ALERT "get_param: conn 0x%p param %d not found\n",
+                                 conn, (u32)param);
+               len = -ENOSYS;
+       }
+
+       return len;
+}
+
+
+/**
+ * bnx2i_session_get_param - returns iscsi session parameter
+ *
+ * @cls_session:       pointer to iscsi cls session
+ * @param:             parameter type identifier
+ * @buf:               buffer pointer
+ *
+ * returns iSCSI session parameters
+ */
+int bnx2i_session_get_param(struct iscsi_cls_session *cls_session,
+                           enum iscsi_param param, char *buf)
+{
+       struct Scsi_Host *shost;
+       struct bnx2i_sess *sess;
+       int len;
+
+       shost = iscsi_session_to_shost(cls_session);
+       sess = iscsi_hostdata(shost->hostdata);
+
+       len = 0;
+       switch (param) {
+       case ISCSI_PARAM_INITIAL_R2T_EN:
+               len = sprintf(buf, "%d\n", sess->initial_r2t);
+               break;
+       case ISCSI_PARAM_MAX_R2T:
+               len = sprintf(buf, "%hu\n", sess->max_r2t);
+               break;
+       case ISCSI_PARAM_IMM_DATA_EN:
+               len = sprintf(buf, "%d\n", sess->imm_data);
+               break;
+       case ISCSI_PARAM_FIRST_BURST:
+               len = sprintf(buf, "%u\n", sess->first_burst_len);
+               break;
+       case ISCSI_PARAM_MAX_BURST:
+               len = sprintf(buf, "%u\n", sess->max_burst_len);
+               break;
+       case ISCSI_PARAM_PDU_INORDER_EN:
+               len = sprintf(buf, "%d\n", sess->pdu_inorder);
+               break;
+       case ISCSI_PARAM_DATASEQ_INORDER_EN:
+               len = sprintf(buf, "%d\n", sess->dataseq_inorder);
+               break;
+       case ISCSI_PARAM_ERL:
+               len = sprintf(buf, "%d\n", sess->erl);
+               break;
+       case ISCSI_PARAM_TARGET_NAME:
+               if (sess->target_name)
+                       len = sprintf(buf, "%s\n", sess->target_name);
+               break;
+       case ISCSI_PARAM_TPGT:
+               len = sprintf(buf, "%d\n", sess->tgt_prtl_grp);
+               break;
+       default:
+               printk(KERN_ALERT "sess_get_param: sess 0x%p", sess);
+               printk(KERN_ALERT  "param (0x%x) not found\n", (u32) param);
+               return -ENOSYS;
+       }
+
+       return len;
+}
+
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+       struct bnx2i_conn *conn = (struct bnx2i_conn *) cls_conn->dd_data;
+       struct bnx2i_sess *sess;
+
+       if (conn->state != CONN_STATE_IN_LOGIN) {
+               printk(KERN_ALERT "conn_start: conn 0x%p state 0x%x err!!\n",
+                                 conn, conn->state);
+               return -EINVAL;
+       }
+       sess = conn->sess;
+
+       if ((sess->imm_data || !sess->initial_r2t) &&
+               sess->first_burst_len > sess->max_burst_len) {
+               printk(KERN_ALERT "bnx2i: invalid params, FBL > MBL\n");
+                       return -EINVAL;
+       }
+
+       conn->state = CONN_STATE_FFP_STATE;
+       if (sess->lead_conn == conn)
+               sess->state = BNX2I_SESS_IN_FFP;
+
+       conn->ep->state = EP_STATE_ULP_UPDATE_START;
+       bnx2i_update_iscsi_conn(conn);
+
+       conn->ep->ofld_timer.expires = 10*HZ + jiffies;
+       conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       conn->ep->ofld_timer.data = (unsigned long) conn->ep;
+       add_timer(&conn->ep->ofld_timer);
+       /* update iSCSI context for this conn, wait for CNIC to complete */
+       wait_event_interruptible(conn->ep->ofld_wait,
+                                conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&conn->ep->ofld_timer);
+
+       /* Free login ITT, not required anymore */
+       if (conn->gen_pdu.cmd) {
+               list_del_init(&conn->gen_pdu.cmd->link);
+               bnx2i_free_cmd(sess, conn->gen_pdu.cmd);
+               conn->gen_pdu.cmd = NULL;
+       }
+
+       switch (conn->stop_state) {
+       case STOP_CONN_RECOVER:
+               sess->recovery_state = ISCSI_SESS_RECOVERY_COMPLETE;
+               sess->state = BNX2I_SESS_IN_FFP;
+               iscsi_unblock_session(session_to_cls(sess));
+               wake_up(&sess->er_wait);
+               break;
+       case STOP_CONN_TERM:
+               break;
+       default:
+               ;
+       }
+
+       return 0;
+}
+
+/**
+ * bnx2i_conn_stop - stop any further processing on this connection
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ * @flags:             reason for freezing this connection
+ *
+ * call to take control of iscsi conn from the driver. Could be called
+ *     when login failed, when recovery is to be attempted or during
+ *     connection teardown
+ */
+void bnx2i_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+       struct bnx2i_conn *conn = (struct bnx2i_conn *)cls_conn->dd_data;
+
+       conn->stop_state = flag;
+       iscsi_block_session(session_to_cls(conn->sess));
+
+       /* Free unfinished login ITT, logout, not required anymore */
+       if (conn->gen_pdu.cmd) {
+               list_del_init(&conn->gen_pdu.cmd->link);
+               bnx2i_free_cmd(conn->sess, conn->gen_pdu.cmd);
+               conn->gen_pdu.cmd = NULL;
+       }
+
+       switch (flag) {
+       case STOP_CONN_RECOVER:
+               conn->sess->state = BNX2I_SESS_IN_RECOVERY;
+               if (!conn->sess->recovery_state) {      /* nopout timeout */
+                       unsigned long flags;
+                       struct Scsi_Host *host = conn->sess->host;
+                       spin_lock_irqsave(host->host_lock, flags);
+                       conn->sess->recovery_state =
+                               ISCSI_SESS_RECOVERY_OPEN_ISCSI;
+                       spin_unlock_irqrestore(host->host_lock, flags);
+               }
+               break;
+       case STOP_CONN_TERM:
+               if (conn->sess && (conn->sess->state & BNX2I_SESS_IN_FFP)) {
+                       conn->sess->state = BNX2I_SESS_IN_SHUTDOWN;
+               }
+               break;
+       default:
+               printk(KERN_ERR "bnx2i: invalid conn stop req %d\n", flag);
+       }
+
+       return;
+}
+
+
+/**
+ * bnx2i_conn_send_pdu - iscsi transport callback entry point to send
+ *                     iscsi slow path pdus, such as LOGIN/LOGOUT/NOPOUT, etc
+ *
+ * @hba:               pointer to adapter instance
+ *
+ * sends iSCSI PDUs prepared by user daemon, only login, logout, nop-out pdu
+ *     will flow this path.
+ */
+int bnx2i_conn_send_pdu(struct iscsi_cls_conn *cls_conn,
+                       struct iscsi_hdr *hdr, char *data,
+                       uint32_t data_size)
+{
+       struct bnx2i_conn *conn;
+       struct bnx2i_cmd *cmnd;
+       uint32_t payload_size;
+       int rc;
+       unsigned long flags;
+
+       if (!cls_conn) {
+               printk(KERN_ALERT "bnx2i_conn_send_pdu: NULL conn ptr. \n");
+               return -EIO;
+       }
+       conn = (struct bnx2i_conn *)cls_conn->dd_data;
+       if (!conn->gen_pdu.req_buf) {
+               printk(KERN_ALERT "send_pdu: login buf not allocated\n");
+               /* ERR - buffer not allocated, should not happen */
+               return -EIO;
+       }
+
+       if (conn->gen_pdu.cmd) {
+               if ((conn->state != CONN_STATE_XPORT_READY) &&
+                   (conn->state != CONN_STATE_IN_LOGIN)) {
+                       printk(KERN_ALERT "send_pdu: %d != XPORT_READY\n",
+                                         conn->state);
+                       return -EPERM;
+               }
+               cmnd = conn->gen_pdu.cmd;
+       } else {        /* could be NOPOUT or the LOGOUT request */
+               spin_lock_irqsave(conn->sess->host->host_lock, flags);
+               cmnd = bnx2i_alloc_cmd(conn->sess);
+               spin_unlock_irqrestore(conn->sess->host->host_lock, flags);
+
+               if (!cmnd) {
+                       printk(KERN_ALERT "bnx2i: Error - cmd not allocated\n");
+                       return -EIO;
+               }
+       }
+       memset(conn->gen_pdu.req_buf, 0, ISCSI_CONN_LOGIN_BUF_SIZE);
+       /* Login request, copy hdr & data to buffer in conn struct */
+       memcpy(&conn->gen_pdu.pdu_hdr, (const void *) hdr,
+              sizeof(struct iscsi_hdr));
+
+       cmnd->iscsi_opcode = hdr->opcode;
+       switch (hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+               if (conn->state == CONN_STATE_XPORT_READY)
+                       conn->state = CONN_STATE_IN_LOGIN;
+               break;
+       case ISCSI_OP_LOGOUT:
+               if (!conn->gen_pdu.cmd)
+                       conn->gen_pdu.cmd = cmnd;
+               conn->state = CONN_STATE_IN_LOGOUT;
+               conn->sess->state = BNX2I_SESS_IN_LOGOUT;
+               if (conn->sess->tmf_active)
+                       bnx2i_wait_for_tmf_completion(conn->sess);
+               break;
+       case ISCSI_OP_NOOP_OUT:
+               break;
+       default:
+               ;
+       }
+
+       conn->gen_pdu.req_buf_size = data_size;
+       payload_size = (hdr->dlength[0] << 16) | (hdr->dlength[1] << 8) |
+                      hdr->dlength[2];
+
+       if (data_size) {
+               memcpy(conn->gen_pdu.req_buf, (const void *)data, data_size);
+               conn->gen_pdu.req_wr_ptr =
+                       conn->gen_pdu.req_buf + payload_size;
+       }
+       cmnd->conn = conn;
+       cmnd->scsi_cmd = NULL;
+       rc = bnx2i_iscsi_send_generic_request(cmnd);
+       return rc;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ *
+ * @cls_conn:          pointer to iscsi cls conn
+ * @stats:             pointer to iscsi statistic struct
+ */
+void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+                         struct iscsi_stats *stats)
+{
+       struct bnx2i_conn *conn = (struct bnx2i_conn *) cls_conn->dd_data;
+
+       stats->txdata_octets = conn->total_data_octets_sent;
+       stats->rxdata_octets = conn->total_data_octets_rcvd;
+
+       stats->noptx_pdus = conn->num_nopin_pdus;
+       stats->scsicmd_pdus = conn->num_scsi_cmd_pdus;
+       stats->tmfcmd_pdus = conn->num_tmf_req_pdus;
+       stats->login_pdus = conn->num_login_req_pdus;
+       stats->text_pdus = 0;
+       stats->dataout_pdus = conn->num_dataout_pdus;
+       stats->logout_pdus = conn->num_logout_req_pdus;
+       stats->snack_pdus = 0;
+
+       stats->noprx_pdus = conn->num_nopout_pdus;
+       stats->scsirsp_pdus = conn->num_scsi_resp_pdus;
+       stats->tmfrsp_pdus = conn->num_tmf_resp_pdus;
+       stats->textrsp_pdus = 0;
+       stats->datain_pdus = conn->num_datain_pdus;
+       stats->logoutrsp_pdus = conn->num_logout_resp_pdus;
+       stats->r2t_pdus = conn->num_r2t_pdus;
+       stats->async_pdus = conn->num_async_pdus;
+       stats->rjt_pdus = conn->num_reject_pdus;
+
+       stats->digest_err = 0;
+       stats->timeout_err = 0;
+       stats->custom_length = 0;
+}
+
+
+
+/**
+ * bnx2i_check_nx2_dev_busy - this routine unregister devices if
+ *                     there are no active conns
+ */
+void bnx2i_check_nx2_dev_busy(void)
+{
+       bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of
+ *                     NX2 devices
+ *
+ * @dst_addr:          target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+       struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+       struct bnx2i_hba *hba;
+       struct cnic_dev *cnic = NULL;
+
+       bnx2i_reg_dev_all();
+
+       hba = get_adapter_list_head();
+       if (hba && hba->cnic)
+               cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+
+       if (!cnic) {
+               printk(KERN_ALERT "bnx2i: check route, can't connect using cnic\n");
+               goto no_nx2_route;
+       }
+       hba = bnx2i_find_hba_for_cnic(cnic);
+       if (!hba) {
+               goto no_nx2_route;
+       }
+
+       if (bnx2i_adapter_ready(hba)) {
+               printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+               goto no_nx2_route;
+       }
+       if (hba->netdev->mtu > hba->mtu_supported) {
+               printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+                                 hba->netdev->name, hba->netdev->mtu);
+               printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+                                 hba->mtu_supported);
+               goto no_nx2_route;
+       }
+       return hba;
+no_nx2_route:
+       return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ *
+ * @hba:               pointer to adapter instance
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+                                struct bnx2i_endpoint *ep)
+{
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+               hba->cnic->cm_destroy(ep->cm_sk);
+
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+               ep->state = EP_STATE_DISCONN_COMPL;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+           ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+               printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+                                 " NW/PCIe trace, driver msgs to developers"
+                                 " for analysis\n");
+               return 1;
+       }
+
+       ep->state = EP_STATE_CLEANUP_START;
+       init_timer(&ep->ofld_timer);
+       ep->ofld_timer.expires = 10*HZ + jiffies;
+       ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       ep->ofld_timer.data = (unsigned long) ep;
+       add_timer(&ep->ofld_timer);
+
+       bnx2i_ep_destroy_list_add(hba, ep);
+
+       /* destroy iSCSI context, wait for it to complete */
+       bnx2i_send_conn_destroy(hba, ep);
+       wait_event_interruptible(ep->ofld_wait,
+                                (ep->state != EP_STATE_CLEANUP_START));
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&ep->ofld_timer);
+       if (ep->state != EP_STATE_CLEANUP_CMPL)
+               /* should never happen */
+               printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+       return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ *
+ * @dst_addr:          target IP address
+ * @non_blocking:      blocking or non-blocking call
+ * @ep_handle:                 placeholder to return new created  endpoint handle
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ *     with l5_core and the CNIC. This is a multi-step process of resolving
+ *     route to target, create a iscsi connection context, handshaking with
+ *     CNIC module to create/initialize the socket struct and finally
+ *     sending down option-2 request to complete TCP 3-way handshake
+ */
+int bnx2i_ep_connect(struct sockaddr *dst_addr, int non_blocking,
+                    uint64_t *ep_handle)
+{
+       u32 iscsi_cid = BNX2I_CID_RESERVED;
+       struct sockaddr_in *desti;
+       struct sockaddr_in6 *desti6;
+       struct bnx2i_endpoint *endpoint;
+       struct bnx2i_hba *hba;
+       struct cnic_dev *cnic;
+       struct cnic_sockaddr saddr;
+       int rc = 0;
+
+       /*
+        * check if the given destination can be reached through NX2 device
+        */
+
+       hba = bnx2i_check_route(dst_addr);
+       if (!hba) {
+               rc = -ENOMEM;
+               goto check_busy;
+       }
+
+       cnic = hba->cnic;
+       endpoint = bnx2i_alloc_ep(hba);
+       if (!endpoint) {
+               *ep_handle = (uint64_t) 0;
+               rc = -ENOMEM;
+               goto check_busy;
+       }
+
+       endpoint->state = EP_STATE_IDLE;
+       endpoint->teardown_mode = BNX2I_ABORTIVE_SHUTDOWN;
+       endpoint->ep_iscsi_cid = (u16)ISCSI_RESERVED_TAG;
+       iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+       if (iscsi_cid == (u16) ISCSI_RESERVED_TAG) {
+               printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+               rc = -ENOMEM;
+               goto iscsi_cid_err;
+       }
+       endpoint->hba_age = hba->age;
+
+       rc = bnx2i_alloc_qp_resc(hba, endpoint);
+       if (rc != 0) {
+               printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+               rc = -ENOMEM;
+               goto qp_resc_err;
+       }
+
+       endpoint->ep_iscsi_cid = iscsi_cid;
+       endpoint->state = EP_STATE_OFLD_START;
+       bnx2i_ep_ofld_list_add(hba, endpoint);
+
+       bnx2i_send_conn_ofld_req(hba, endpoint);
+
+       init_timer(&endpoint->ofld_timer);
+       endpoint->ofld_timer.expires = 2 * HZ + jiffies;
+       endpoint->ofld_timer.function = bnx2i_ep_ofld_timer;
+       endpoint->ofld_timer.data = (unsigned long) endpoint;
+       add_timer(&endpoint->ofld_timer);
+       /* Wait for CNIC hardware to setup conn context and return 'cid' */
+       wait_event_interruptible(endpoint->ofld_wait,
+                                endpoint->state != EP_STATE_OFLD_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&endpoint->ofld_timer);
+       list_del_init(&endpoint->link);
+       
+       if (endpoint->state != EP_STATE_OFLD_COMPL) {
+               rc = -ENOSPC;
+               goto conn_failed;
+       }
+
+       if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               rc = -EINVAL;
+               goto conn_failed;
+       } else
+               rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, endpoint->ep_cid,
+                                    iscsi_cid, &endpoint->cm_sk, endpoint);
+       if (rc) {
+               rc = -EINVAL;
+               goto conn_failed;
+       }
+
+       endpoint->cm_sk->rcv_buf = 256 * 1024;
+       endpoint->cm_sk->snd_buf = 256 * 1024;
+       clear_bit(SK_TCP_TIMESTAMP, &endpoint->cm_sk->tcp_flags);
+
+       memset(&saddr, 0, sizeof(saddr));
+
+       if (dst_addr->sa_family == AF_INET) {
+               desti = (struct sockaddr_in *) dst_addr;
+               saddr.remote.v4 = *desti;
+               saddr.local.v4.sin_port = htons(endpoint->tcp_port);
+               saddr.local.v4.sin_family = desti->sin_family;
+       } else if (dst_addr->sa_family == AF_INET6) {
+               desti6 = (struct sockaddr_in6 *) dst_addr;
+               saddr.remote.v6 = *desti6;
+               saddr.local.v6.sin6_port = htons(endpoint->tcp_port);
+               saddr.local.v6.sin6_family = desti6->sin6_family;
+       }
+
+       endpoint->state = EP_STATE_CONNECT_START;
+       if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               rc = -EINVAL;
+               goto conn_failed;
+       } else
+               rc = cnic->cm_connect(endpoint->cm_sk, &saddr);
+
+       if (rc)
+               goto release_ep;
+
+       bnx2i_map_ep_dbell_regs(endpoint);
+
+       *ep_handle = (uint64_t) (unsigned long) endpoint;
+       return 0;
+
+release_ep:
+       bnx2i_tear_down_conn(hba, endpoint);
+conn_failed:
+iscsi_cid_err:
+       bnx2i_free_qp_resc(hba, endpoint);
+qp_resc_err:
+       bnx2i_free_ep(endpoint);
+check_busy:
+       *ep_handle = (uint64_t) 0;
+       bnx2i_check_nx2_dev_busy();
+       return rc;
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ *
+ * @ep_handle:                 TCP connection (endpoint) handle
+ * @timeout_ms:        timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+int bnx2i_ep_poll(uint64_t ep_handle, int timeout_ms)
+{
+       struct bnx2i_endpoint *ep;
+       int rc = 0;
+
+       ep = (struct bnx2i_endpoint *) (unsigned long) ep_handle;
+       if (!ep)
+               return -EINVAL;
+       if ((ep->state == EP_STATE_IDLE) ||
+           (ep->state == EP_STATE_OFLD_FAILED))
+               return -1;
+       if (ep->state == EP_STATE_CONNECT_COMPL)
+               return 1;
+
+       rc = wait_event_interruptible_timeout(ep->ofld_wait,
+                                             ((ep->state ==
+                                               EP_STATE_OFLD_FAILED) ||
+                                             (ep->state ==
+                                              EP_STATE_CONNECT_COMPL)),
+                                             msecs_to_jiffies(timeout_ms));
+       if (ep->state == EP_STATE_OFLD_FAILED)
+               rc = -1;
+
+       if (rc > 0)
+               return 1;
+       else if (!rc)
+               return 0;       /* timeout */
+       else
+               return rc;
+}
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition to check
+ *             if underlying TCP connection is active
+ *
+ * @ep:                endpoint pointer
+ *
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *ep)
+{
+       int ret;
+
+       switch (ep->state) {
+       case EP_STATE_CONNECT_START:
+       case EP_STATE_CLEANUP_FAILED:
+       case EP_STATE_OFLD_FAILED:
+       case EP_STATE_DISCONN_TIMEDOUT:
+               ret = 0;
+               break;
+       case EP_STATE_CONNECT_COMPL:
+       case EP_STATE_ULP_UPDATE_START:
+       case EP_STATE_ULP_UPDATE_COMPL:
+       case EP_STATE_TCP_FIN_RCVD:
+       case EP_STATE_TCP_RST_RCVD:
+       case EP_STATE_ULP_UPDATE_FAILED:
+               ret = 1;
+               break;
+       case EP_STATE_CONNECT_FAILED:
+               if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+                       ret = 1;
+               else
+                       ret = 0;
+               break;
+       default:
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ *
+ * @ep_handle:                 TCP connection (endpoint) handle
+ *
+ * executes  TCP connection teardown process
+ */
+void bnx2i_ep_disconnect(uint64_t ep_handle)
+{
+       struct bnx2i_endpoint *ep;
+       struct cnic_dev *cnic;
+       struct bnx2i_hba *hba;
+       struct bnx2i_sess *sess;
+
+       ep = (struct bnx2i_endpoint *) (unsigned long) ep_handle;
+       if (!ep || (ep_handle == -1))
+               return;
+
+       hba = ep->hba;
+       if (ep->state == EP_STATE_IDLE)
+               goto return_ep;
+       cnic = hba->cnic;
+
+       if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+               goto free_resc;
+       if (ep->hba_age != hba->age)
+               goto dev_reset;
+
+       if (!bnx2i_ep_tcp_conn_active(ep))
+               goto destory_conn;
+
+       ep->state = EP_STATE_DISCONN_START;
+
+       init_timer(&ep->ofld_timer);
+       ep->ofld_timer.expires = 20 * HZ + jiffies;
+       ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       ep->ofld_timer.data = (unsigned long) ep;
+       add_timer(&ep->ofld_timer);
+
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               if (ep->teardown_mode == BNX2I_GRACEFUL_SHUTDOWN)
+                       cnic->cm_close(ep->cm_sk);
+               else
+                       cnic->cm_abort(ep->cm_sk);
+       } else
+               goto free_resc;
+
+       /* wait for option-2 conn teardown */
+       wait_event_interruptible(ep->ofld_wait,
+                                ep->state != EP_STATE_DISCONN_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&ep->ofld_timer);
+
+destory_conn:
+       if (bnx2i_tear_down_conn(hba, ep))
+               return;
+
+dev_reset:
+       /* in case of 3-way handshake failure, there won't be any binding
+        * between EP and SESS
+        */
+       if (ep->sess)
+               bnx2i_flush_active_cmd_queue(ep->sess, DID_BUS_BUSY);
+
+free_resc:
+       bnx2i_free_qp_resc(hba, ep);
+return_ep:
+       /* check if session recovery in progress */
+       sess = ep->sess;
+       bnx2i_free_ep(ep);
+       if (sess)
+               wake_up(&sess->er_wait);
+       bnx2i_check_nx2_dev_busy();
+
+       return;
+}
+
+
+int bnx2i_check_ioctl_signature(struct bnx2i_ioctl_header *ioc_hdr)
+{
+       if (strcmp(ioc_hdr->signature, BNX2I_MGMT_SIGNATURE))
+               return -EPERM;
+       return 0;
+}
+
+static int bnx2i_tcp_port_count_ioctl(struct file *file, unsigned long arg)
+{
+       struct bnx2i_get_port_count __user *user_ioc =
+               (struct bnx2i_get_port_count __user *)arg;
+       struct bnx2i_get_port_count ioc_req;
+       int error = 0;
+       unsigned int count = 0;
+
+       if (copy_from_user(&ioc_req, user_ioc, sizeof(ioc_req))) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       error = bnx2i_check_ioctl_signature(&ioc_req.hdr);
+       if (error)
+               goto out;
+
+       if (bnx2i_tcp_port_tbl.num_required > 0)
+               count = bnx2i_tcp_port_tbl.num_required;
+
+       ioc_req.port_count = count;
+
+       if (copy_to_user(&user_ioc->port_count, &ioc_req.port_count,
+                        sizeof(ioc_req.port_count))) {
+               error = -EFAULT;
+               goto out;
+       }
+
+out:
+       return error;
+}
+
+
+static int bnx2i_tcp_port_ioctl(struct file *file, unsigned long arg)
+{
+       struct bnx2i_set_port_num __user *user_ioc =
+               (struct bnx2i_set_port_num __user *)arg;
+       struct bnx2i_set_port_num ioc_req;
+       struct bnx2i_set_port_num *ioc_req_mp = NULL;
+       int ioc_msg_size = sizeof(ioc_req);
+       int error;
+       int i;
+
+       if (copy_from_user(&ioc_req, user_ioc, ioc_msg_size)) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       error = bnx2i_check_ioctl_signature(&ioc_req.hdr);
+       if (error)
+               goto out;
+
+       if (ioc_req.num_ports > 1) {
+               ioc_msg_size += (ioc_req.num_ports - 1) *
+                               sizeof(ioc_req.tcp_port[0]);
+
+               ioc_req_mp = kmalloc(ioc_msg_size, GFP_KERNEL);
+               if (!ioc_req_mp)
+                       goto out;
+
+               if (copy_from_user(ioc_req_mp, user_ioc, ioc_msg_size)) {
+                       error = -EFAULT;
+                       goto out_kfree;
+               }
+       }
+
+       if (ioc_req.num_ports)
+               bnx2i_tcp_port_new_entry(ioc_req.tcp_port[0]);
+
+       i = 1;
+       while (i < ioc_req_mp->num_ports)
+               bnx2i_tcp_port_new_entry(ioc_req_mp->tcp_port[i++]);
+
+       return 0;
+
+out_kfree:
+       kfree(ioc_req_mp);
+out:
+       return error;
+}
+
+
+/*
+ * bnx2i_ioctl_init: initialization routine, registers char driver
+ */
+int bnx2i_ioctl_init(void)
+{
+       int ret;
+
+        /* Register char device node */
+        ret = register_chrdev(0, "bnx2i", &bnx2i_mgmt_fops);
+
+        if (ret < 0) {
+                printk(KERN_ERR "bnx2i: failed to register device node\n");
+                return ret;
+        }
+
+        bnx2i_major_no = ret;
+
+       return 0;
+}
+
+void bnx2i_ioctl_cleanup(void)
+{
+       if (bnx2i_major_no) {
+               unregister_chrdev(bnx2i_major_no, "bnx2i");
+       }
+}
+
+/*
+ * bnx2i_mgmt_open -  "open" entry point
+ */
+static int bnx2i_mgmt_open(struct inode *inode, struct file *filep)
+{
+        /* only allow access to admin user */
+        if (!capable(CAP_SYS_ADMIN)) {
+                return -EACCES;
+       }
+
+        return 0;
+}
+
+/*
+ * bnx2i_mgmt_release- "release" entry point
+ */
+static int bnx2i_mgmt_release(struct inode *inode, struct file *filep)
+{
+        return 0;
+}
+
+
+
+/*
+ * bnx2i_mgmt_ioctl - char driver ioctl entry point
+ */
+static int bnx2i_mgmt_ioctl(struct inode *node, struct file *file,
+                           unsigned int cmd, unsigned long arg)
+{
+       long rc = 0;
+       switch (cmd) {
+               case BNX2I_IOCTL_GET_PORT_REQ:
+                       rc = bnx2i_tcp_port_count_ioctl(file, arg);
+                       break;
+               case BNX2I_IOCTL_SET_TCP_PORT:
+                       rc = bnx2i_tcp_port_ioctl(file, arg);
+                       break;
+               default:
+                       printk(KERN_ERR "bnx2i: unknown ioctl cmd %x\n", cmd);
+                       return -ENOTTY;
+       }
+
+       return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+
+static int bnx2i_tcp_port_count_compat_ioctl(struct file *file, unsigned long arg)
+{
+       struct bnx2i_get_port_count __user *user_ioc =
+               (struct bnx2i_get_port_count __user *)arg;
+       struct bnx2i_get_port_count *ioc_req =
+               compat_alloc_user_space(sizeof(struct bnx2i_get_port_count));
+       int error;
+       unsigned int count = 0;
+
+       if (clear_user(ioc_req, sizeof(*ioc_req)))
+               return -EFAULT;
+
+       if (copy_in_user(ioc_req, user_ioc, sizeof(*ioc_req))) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       error = bnx2i_check_ioctl_signature(&ioc_req->hdr);
+       if (error)
+               goto out;
+
+       if (bnx2i_tcp_port_tbl.num_required > 0)
+               count = bnx2i_tcp_port_tbl.num_required;
+
+       if (copy_to_user(&ioc_req->port_count, &count,
+                        sizeof(ioc_req->port_count))) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       if (copy_in_user(&user_ioc->port_count, &ioc_req->port_count,
+                        sizeof(u32))) {
+               error = -EFAULT;
+               goto out;
+       }
+       return 0;
+
+out:
+       return error;
+}
+
+static int bnx2i_tcp_port_compat_ioctl(struct file *file, unsigned long arg)
+{
+       struct bnx2i_set_port_num __user *user_ioc =
+               (struct bnx2i_set_port_num __user *)arg;
+       struct bnx2i_set_port_num *ioc_req =
+               compat_alloc_user_space(sizeof(struct bnx2i_set_port_num));
+       struct bnx2i_set_port_num *ioc_req_mp = NULL;
+       int ioc_msg_size = sizeof(*ioc_req);
+       int error;
+       int i;
+
+       if (clear_user(ioc_req, sizeof(*ioc_req)))
+               return -EFAULT;
+
+       if (copy_in_user(ioc_req, user_ioc, ioc_msg_size)) {
+               error = -EFAULT;
+               goto out;
+       }
+
+       error = bnx2i_check_ioctl_signature(&ioc_req->hdr);
+       if (error)
+               goto out;
+
+       if (ioc_req->num_ports > 1) {
+               ioc_msg_size += (ioc_req->num_ports - 1) *
+                               sizeof(ioc_req->tcp_port[0]);
+
+               ioc_req_mp = compat_alloc_user_space(ioc_msg_size);
+               if (!ioc_req_mp)
+                       goto out;
+
+               if (copy_in_user(ioc_req_mp, user_ioc, ioc_msg_size)) {
+                       error = -EFAULT;
+                       goto out;
+               }
+
+               i = 0;
+               while ((i < ioc_req_mp->num_ports) && ioc_req_mp)
+                       bnx2i_tcp_port_new_entry(ioc_req_mp->tcp_port[i++]);
+
+       } else if (ioc_req->num_ports == 1)
+               bnx2i_tcp_port_new_entry(ioc_req->tcp_port[0]);
+
+out:
+       return error;
+
+
+}
+
+
+/*
+ * bnx2i_mgmt_compat_ioctl - char node ioctl entry point
+ */
+static long bnx2i_mgmt_compat_ioctl(struct file *file,
+                                   unsigned int cmd, unsigned long arg)
+{
+       int rc = -ENOTTY;
+
+       switch (cmd) {
+               case BNX2I_IOCTL_GET_PORT_REQ:
+                       rc = bnx2i_tcp_port_count_compat_ioctl(file, arg);
+                       break;
+               case BNX2I_IOCTL_SET_TCP_PORT:
+                       rc = bnx2i_tcp_port_compat_ioctl(file, arg);
+                       break;
+       }
+
+        return rc;
+}
+
+#endif
+
+/*
+ * File operations structure - management interface
+ */
+struct file_operations bnx2i_mgmt_fops = {
+        .owner = THIS_MODULE,
+        .open = bnx2i_mgmt_open,
+        .release = bnx2i_mgmt_release,
+        .ioctl = bnx2i_mgmt_ioctl,
+#ifdef CONFIG_COMPAT
+        .compat_ioctl = bnx2i_mgmt_compat_ioctl,
+#endif
+};
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the iSCSI trnaport module.
+ */
+struct scsi_host_template bnx2i_host_template = {
+       .module                         = THIS_MODULE,
+       .name                           = "Broadcom Offload iSCSI Initiator",
+       .queuecommand                   = bnx2i_queuecommand,
+       .eh_abort_handler               = bnx2i_abort,
+       .eh_host_reset_handler          = bnx2i_host_reset,
+       .bios_param                     = NULL,
+       .can_queue                      = 128,
+       .max_sectors                    = 127,
+       .this_id                        = -1,
+       .cmd_per_lun                    = 64,
+       .use_clustering                 = ENABLE_CLUSTERING,
+       .sg_tablesize                   = ISCSI_MAX_BDS_PER_CMD,
+       .proc_name                      = NULL
+       };
+
+
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+       .owner                  = THIS_MODULE,
+       .name                   = "bnx2i",
+       .caps                   = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T
+                                 | CAP_DATADGST,
+       .param_mask             = ISCSI_MAX_RECV_DLENGTH |
+                                 ISCSI_MAX_XMIT_DLENGTH |
+                                 ISCSI_HDRDGST_EN |
+                                 ISCSI_DATADGST_EN |
+                                 ISCSI_INITIAL_R2T_EN |
+                                 ISCSI_MAX_R2T |
+                                 ISCSI_IMM_DATA_EN |
+                                 ISCSI_FIRST_BURST |
+                                 ISCSI_MAX_BURST |
+                                 ISCSI_PDU_INORDER_EN |
+                                 ISCSI_DATASEQ_INORDER_EN |
+                                 ISCSI_ERL |
+                                 ISCSI_CONN_PORT |
+                                 ISCSI_CONN_ADDRESS |
+                                 ISCSI_EXP_STATSN |
+                                 ISCSI_PERSISTENT_PORT |
+                                 ISCSI_PERSISTENT_ADDRESS |
+                                 ISCSI_TARGET_NAME |
+                                 ISCSI_TPGT,
+       .host_template          = &bnx2i_host_template,
+       .sessiondata_size       = sizeof(struct bnx2i_sess),
+       .conndata_size          = sizeof(struct bnx2i_conn),
+       .max_conn               = 1,
+       .max_cmd_len            = 16,
+       .max_lun                = 512,
+       .create_session         = bnx2i_session_create,
+       .destroy_session        = bnx2i_session_destroy,
+       .create_conn            = bnx2i_conn_create,
+       .bind_conn              = bnx2i_conn_bind,
+       .destroy_conn           = bnx2i_conn_destroy,
+       .set_param              = bnx2i_conn_set_param,
+       .get_conn_param         = bnx2i_conn_get_param,
+       .get_session_param      = bnx2i_session_get_param,
+       .start_conn             = bnx2i_conn_start,
+       .stop_conn              = bnx2i_conn_stop,
+       .send_pdu               = bnx2i_conn_send_pdu,
+       .get_stats              = bnx2i_conn_get_stats,
+       /* TCP connect - disconnect - option-2 interface calls */
+       .ep_connect             = bnx2i_ep_connect,
+       .ep_poll                = bnx2i_ep_poll,
+       .ep_disconnect          = bnx2i_ep_disconnect,
+       /* Error recovery timeout call */
+       .session_recovery_timedout = bnx2i_sess_recovery_timeo
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644 (file)
index 0000000..44a4bae
--- /dev/null
@@ -0,0 +1,788 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2008 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+#include <linux/ethtool.h>
+
+#ifdef _SYSFS_INCL_
+
+#define BNX2I_SYSFS_VERSION    0x2
+
+#define BNX2I_PCICFG_REG_WINDOW_ADDRESS                0x00000078
+#define BNX2I_PCICFG_REG_WINDOW_ADDRESS_VAL    (0xfffffL<<2)
+#define BNX2I_PCICFG_REG_WINDOW                        0x00000080
+
+#define BNX2I_CP_IDLE_COUNTER          (BNX2_CP_SCRATCH + 0x28 + 0x10)
+#define BNX2I_COM_IDLE_COUNTER         (BNX2_COM_SCRATCH + 0x0A8 + 0x10)
+#define BNX2I_TPAT_IDLE_COUNTER                (BNX2_TPAT_SCRATCH + 0x048 + 0x810)
+#define BNX2I_RXP_IDLE_COUNTER         (BNX2_RXP_SCRATCH + 0x0E8 + 0x10)
+#define BNX2I_TXP_IDLE_COUNTER         (BNX2_TXP_SCRATCH + 0x048 + 0x10)
+
+#define BNX2I_TXP_TDMA_IDLE_COUNTER    (BNX2I_TXP_IDLE_COUNTER + 0x8)
+#define BNX2I_TXP_CTX_IDLE_COUNTER     (BNX2I_TXP_IDLE_COUNTER + 0x10)
+#define BNX2I_TXP_HDRQ_IDLE_COUNTER    (BNX2I_TXP_IDLE_COUNTER + 0x18)
+
+#define CTX_READ_TIMEOUT               2 * HZ
+
+#define BNX2I_CP_CTX_ADDR_HI                   (BNX2_CP_SCRATCH + 0x68)
+#define BNX2I_CP_CTX_ADDR_LO                   (BNX2_CP_SCRATCH + 0x6C)
+#define BNX2I_CP_CTX_ID                                (BNX2_CP_SCRATCH + 0x70)
+
+
+
+static u32
+bnx2i_reg_rd_ind(struct bnx2i_hba *hba, u32 offset)
+{
+        u32 val;
+
+        spin_lock_bh(&hba->lock);
+        REG_WR(hba, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+        val = REG_RD(hba, BNX2_PCICFG_REG_WINDOW);
+        spin_unlock_bh(&hba->lock);
+        return val;
+}
+
+static void
+bnx2i_reg_wr_ind(struct bnx2i_hba *hba, u32 offset, u32 val)
+{
+        spin_lock_bh(&hba->lock);
+        REG_WR(hba, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+        REG_WR(hba, BNX2_PCICFG_REG_WINDOW, val);
+        spin_unlock_bh(&hba->lock);
+}
+
+static void bnx2i_read_idle_counter(struct bnx2i_hba *hba, u32 offset,
+                                   u64 *counter)
+{
+       u32 cntr32_lsb;
+       u32 cntr32_msb;
+       cntr32_lsb = bnx2i_reg_rd_ind(hba, offset);
+       cntr32_msb = bnx2i_reg_rd_ind(hba, (offset + 4));
+       *counter = ((u64) cntr32_lsb << 32) | (u64) cntr32_msb;
+}
+
+void bnx2i_read_mips_idle_counters(struct bnx2i_hba *hba)
+{
+       bnx2i_read_idle_counter(hba, BNX2I_CP_IDLE_COUNTER,
+                               &hba->mips_idle.cp_idle_count);
+       bnx2i_read_idle_counter(hba, BNX2I_TXP_IDLE_COUNTER,
+                               &hba->mips_idle.txp_idle_count);
+       bnx2i_read_idle_counter(hba, BNX2I_TXP_TDMA_IDLE_COUNTER,
+                               &hba->mips_idle.txp_tdma_count);
+       bnx2i_read_idle_counter(hba, BNX2I_TXP_CTX_IDLE_COUNTER,
+                               &hba->mips_idle.txp_ctx_count);
+       bnx2i_read_idle_counter(hba, BNX2I_TXP_HDRQ_IDLE_COUNTER,
+                               &hba->mips_idle.txp_hdrq_count);
+       bnx2i_read_idle_counter(hba, BNX2I_TPAT_IDLE_COUNTER,
+                               &hba->mips_idle.tpat_idle_count);
+       bnx2i_read_idle_counter(hba, BNX2I_RXP_IDLE_COUNTER,
+                               &hba->mips_idle.rxp_idle_count);
+       bnx2i_read_idle_counter(hba, BNX2I_COM_IDLE_COUNTER,
+                               &hba->mips_idle.com_idle_count);
+}
+
+
+void bnx2i_init_mips_idle_counters(struct bnx2i_hba *hba)
+{
+       memset(&hba->mips_idle, 0x00, sizeof(struct mips_idle_count));
+}
+
+
+int bnx2i_select_ctx_dump_cid(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+       struct bnx2i_conn *conn;
+       u32 addr_lo, addr_hi;
+       u32 cid, read_cid;
+       int ret = 0;
+       int i = 2 * HZ;
+
+       if (!hba->regview)
+               return -EPERM;
+
+       if (hba->ctx_read_cnt % 4) {
+               printk(KERN_INFO "bnx2i - ctx read, previous request was ");
+               printk(KERN_INFO "partially consumed, %d\n",
+                                hba->ctx_read_cnt % 4);
+       }
+
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+       if (!conn) {
+               printk(KERN_INFO "bnx2i - ctx read, iscsi_cid #%d "
+                                "not active\n", iscsi_cid);
+               return -EINVAL;
+       }
+
+       hba->ctx_read_cnt = 0;
+       cid = conn->ep->ep_cid;
+       addr_lo = (u64) hba->ctx_dma_hndl & 0xffffffff;
+       addr_hi = (u64) hba->ctx_dma_hndl >> 32;
+
+       bnx2i_reg_wr_ind(hba, BNX2I_CP_CTX_ADDR_HI, addr_hi);
+       bnx2i_reg_wr_ind(hba, BNX2I_CP_CTX_ADDR_LO, addr_lo);
+       bnx2i_reg_wr_ind(hba, BNX2I_CP_CTX_ID, cid);
+       msleep(10);
+       read_cid = bnx2i_reg_rd_ind(hba, BNX2I_CP_CTX_ID);
+       while (!read_cid && i--) {
+               msleep(1000 / HZ);
+               read_cid = bnx2i_reg_rd_ind(hba, BNX2I_CP_CTX_ID);
+       }
+
+       if (read_cid) {
+               ret = -1;
+               printk(KERN_INFO "bnx2i - error in context read\n");
+       }
+
+       return ret;
+}
+
+
+int bnx2i_list_iscsi_cid(struct bnx2i_hba *hba, u32 *iscsi_cid_arr,
+                        u32 *cid_arr)
+{
+       int i, idx;
+       struct bnx2i_conn *conn;
+
+       for (i = 0, idx = 0; i < hba->cid_que.cid_q_max_idx; i++) {
+               if (hba->cid_que.conn_cid_tbl[i]) {
+                       conn = hba->cid_que.conn_cid_tbl[i];
+                       iscsi_cid_arr[idx] = i;
+                       if (conn->ep)
+                               cid_arr[idx] = conn->ep->ep_cid;
+                       idx++;
+               }
+       }
+       return idx;
+}
+
+
+/*
+ * mark iscsi conn encountered error, this 'iscsi_cid' will be notified
+ * to 'ictx' apps next time it polls
+ */
+void bnx2i_setup_ictx_dump(struct bnx2i_hba *hba,
+                          struct bnx2i_conn *conn)
+{
+       if (hba->ictx_poll_mode && !hba->ictx_poll_cid) {
+               hba->ictx_poll_cid = conn->ep->ep_cid;
+               hba->ictx_poll_iscsi_cid = conn->ep->ep_iscsi_cid;
+       } else {
+               printk(KERN_ALERT "conn_err - cnxt dump module is either");
+               printk(KERN_ALERT "busy or not in poll mode\n");
+       }
+}
+
+
+static ssize_t bnx2i_show_mips_status(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       ssize_t len;
+
+       if (!hba->regview)
+               return 0;
+
+       bnx2i_read_mips_idle_counters(hba);
+
+       len = sprintf(buf, "%d\n%lu\n%d\n%llu\n%llu\n"
+                          "%llu\n%llu\n%llu\n%llu\n%llu\n%llu\n",
+                          BNX2I_SYSFS_VERSION, jiffies, HZ,
+                          hba->mips_idle.cp_idle_count,
+                          hba->mips_idle.txp_idle_count,
+                          hba->mips_idle.txp_tdma_count,
+                          hba->mips_idle.txp_ctx_count,
+                          hba->mips_idle.txp_hdrq_count,
+                          hba->mips_idle.tpat_idle_count,
+                          hba->mips_idle.rxp_idle_count,
+                          hba->mips_idle.com_idle_count);
+       return len;
+}
+
+static ssize_t bnx2i_show_net_if_name(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       return sprintf(buf, "%s\n", hba->netdev->name);
+}
+
+static ssize_t bnx2i_show_sq_info(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+static ssize_t bnx2i_set_sq_info(struct class_device *cdev,
+                                const char *buf, size_t count)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       u32 val;
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_SQ_WQES_MIN) &&
+                   (val <= BNX2I_SQ_WQES_MAX)) {
+                       hba->max_sqes = val;
+               }
+       }
+       return count;
+}
+
+static ssize_t bnx2i_show_cq_info(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       return sprintf(buf, "0x%x\n", hba->max_cqes);
+}
+
+static ssize_t bnx2i_set_cq_info(struct class_device *cdev,
+                                const char *buf, size_t count)
+{
+       u32 val;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_CQ_WQES_MIN) &&
+                   (val <= BNX2I_CQ_WQES_MAX)) {
+                       hba->max_cqes = val;
+               }
+       }
+       return count;
+}
+
+static ssize_t bnx2i_show_rq_info(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       return sprintf(buf, "0x%x\n", hba->max_rqes);
+}
+
+static ssize_t bnx2i_set_rq_info(struct class_device *cdev, const char *buf,
+                                                       size_t count)
+{
+       u32 val;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_RQ_WQES_MIN) &&
+                   (val <= BNX2I_RQ_WQES_MAX)) {
+                       hba->max_rqes = val;
+               }
+       }
+       return count;
+}
+
+
+static ssize_t bnx2i_show_ccell_info(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+static ssize_t bnx2i_set_ccell_info(struct class_device *cdev,
+                                   const char *buf, size_t count)
+{
+       u32 val;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_CCELLS_MIN) &&
+                   (val <= BNX2I_CCELLS_MAX)) {
+                       hba->num_ccell = val;
+               }
+       }
+       return count;
+}
+
+
+static ssize_t bnx2i_read_pci_trigger_reg(struct class_device *cdev,
+                                         char *buf)
+{
+       u32 reg_val;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (!hba->regview)
+               return 0;
+
+#define PCI_EVENT_TRIGGER_REG  0xCAC   /* DMA WCHAN STAT10 REG */
+       reg_val = readl(hba->regview + PCI_EVENT_TRIGGER_REG);
+       return sprintf(buf, "0x%x\n", reg_val);
+}
+
+
+static ssize_t bnx2i_get_iscsi_cntx_dump(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       unsigned int *ptr = (unsigned int *) hba->ctx_addr;
+       unsigned int *dst_ptr = (unsigned int *) buf;
+       int unit_sz = sizeof(unsigned int);
+#define SYSFS_BUF_SIZE                 4096
+#define NUM_SYSFS_BUFS_PER_CTX         4
+
+       if ((hba->ctx_read_cnt == NUM_SYSFS_BUFS_PER_CTX) || !ptr)
+               return 0;
+
+       ptr += (((hba->ctx_read_cnt % NUM_SYSFS_BUFS_PER_CTX) *
+                SYSFS_BUF_SIZE) / unit_sz);
+       hba->ctx_read_cnt++;
+       memcpy(dst_ptr, ptr, SYSFS_BUF_SIZE);
+
+       return SYSFS_BUF_SIZE;
+}
+
+static ssize_t bnx2i_select_iscsi_cntx_dump(struct class_device *cdev,
+                                           const char *buf, size_t count)
+{
+       u32 iscsi_cid;
+       int ret = 0;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (sscanf(buf, " 0x%x ", &iscsi_cid) > 0) {
+               ret = bnx2i_select_ctx_dump_cid(hba, iscsi_cid);
+       }
+       if (!ret)
+               ret = count;
+       return ret;
+}
+
+static ssize_t bnx2i_get_active_iscsi_cid_list(struct class_device *cdev,
+                                              char *buf)
+{
+       u32 active_iscsi_cid[32];
+       u32 active_cid[32];
+       int num_cid = 0;
+       ssize_t total_len = 0;
+       char *cur_ptr = buf;
+       int i;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       u32 num_ccell = hba->ctx_ccell_tasks & 0xFFFF;
+       u32 num_tasks_per_conn = hba->ctx_ccell_tasks >> 16;
+
+       if (!hba->ictx_poll_mode) {
+               num_cid = bnx2i_list_iscsi_cid(hba, active_iscsi_cid,
+                                              active_cid);
+       }
+       total_len += sprintf(cur_ptr, "0x%x\n", BNX2I_SYSFS_VERSION);
+       cur_ptr = buf + total_len;
+       total_len += sprintf(cur_ptr, "0x%x\n", num_ccell);
+       cur_ptr = buf + total_len;
+       total_len += sprintf(cur_ptr, "0x%x\n", num_tasks_per_conn);
+       if (hba->ictx_poll_mode) {
+               if (hba->ictx_poll_cid) {
+                       cur_ptr = buf + total_len;
+                       total_len += sprintf(cur_ptr, "0x%x, 0x%x\n",
+                                            hba->ictx_poll_iscsi_cid,
+                                            hba->ictx_poll_cid);
+                       hba->ictx_poll_cid = hba->ictx_poll_iscsi_cid = 0;
+               }
+       } else {
+               for (i = 0; i < num_cid; i++) {
+                       cur_ptr = buf + total_len;
+                       total_len += sprintf(cur_ptr, "0x%x, 0x%x\n",
+                                            active_iscsi_cid[i],
+                                            active_cid[i]);
+               }
+       }
+       return total_len;
+}
+
+
+static ssize_t bnx2i_set_iscsi_cid_err_poll_mode(struct class_device *cdev,
+                                                const char *buf, size_t count)
+{
+       u32 poll_mode;
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+
+       if (sscanf(buf, "0x%x", &poll_mode) > 0) {
+               if (poll_mode)
+                       hba->ictx_poll_mode = 1;
+               else
+                       hba->ictx_poll_mode = 0;
+       }
+       return count;
+}
+
+
+static ssize_t bnx2i_get_qp_shmem_dump(struct class_device *cdev, char *buf)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       int resi_len = hba->sq_cq_size -
+                      (hba->sq_cq_rdp - hba->sq_cq_dump);
+
+       if (!hba->sq_cq_dump || !hba->sq_cq_rdp) {
+               return -EINVAL;
+       } else if ((hba->sq_cq_dump + hba->sq_cq_size) ==
+                  hba->sq_cq_rdp) {
+               kfree(hba->sq_cq_dump);
+               hba->sq_cq_dump = hba->sq_cq_rdp = NULL;
+               return 0;
+       }
+
+       if (resi_len > SYSFS_BUF_SIZE) {
+               resi_len = SYSFS_BUF_SIZE;
+       }
+       memcpy(buf, hba->sq_cq_rdp, resi_len);
+       hba->sq_cq_rdp += resi_len;
+
+       return resi_len;
+}
+
+
+
+
+static void bnx2i_dup_cq_mem(struct bnx2i_hba *hba,
+                            struct bnx2i_conn *conn, int count)
+{
+       struct cqe *cqe_s;
+       struct cqe *cqe_d;
+       int total_cnt = count;
+
+       if (conn->ep->qp.cq_cons_qe == conn->ep->qp.cq_virt)
+               cqe_s = conn->ep->qp.cq_last_qe;
+       else
+               cqe_s = conn->ep->qp.cq_cons_qe - 1;
+       cqe_d = (struct cqe *)hba->sq_cq_rdp;
+       while (count--) {
+               memcpy(cqe_d, cqe_s, sizeof(struct cqe));
+               if (cqe_s == conn->ep->qp.cq_virt)
+                       cqe_s = conn->ep->qp.cq_last_qe;
+               else
+                       cqe_s--;
+               cqe_d++;
+               if ((cqe_d - (struct cqe *)hba->sq_cq_rdp) > total_cnt) {
+                       printk(KERN_ALERT "bnx2i - SQ Dump: mem overflow\n");
+                       break;
+               }
+       }
+}
+
+
+static int bnx2i_init_cq_dump(struct bnx2i_hba *hba, u32 iscsi_cid,
+                             u32 count)
+{
+       struct bnx2i_conn *conn;
+       int cq_size;
+
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "CQ dump: cid #%x not valid\n",
+                                 iscsi_cid);
+               return -EPERM;
+       }
+
+       if (hba->sq_cq_dump)
+               return -EPERM;
+
+       cq_size = (conn->ep->qp.cq_last_qe - conn->ep->qp.cq_first_qe) + 1;
+
+       if (!count || (count > cq_size))
+               count = cq_size;
+
+       hba->sq_cq_size = count * sizeof(struct cqe);
+
+       if (!hba->sq_cq_size)
+               return -EINVAL;
+
+       hba->sq_cq_dump = kmalloc(hba->sq_cq_size, GFP_KERNEL);
+       if (!hba->sq_cq_dump)
+               return -ENOMEM;
+       hba->sq_cq_rdp = hba->sq_cq_dump;
+
+       bnx2i_dup_cq_mem(hba, conn, count);
+       return 0;
+}
+
+
+
+static void bnx2i_dup_sq_mem(struct bnx2i_hba *hba,
+                            struct bnx2i_conn *conn, int count)
+{
+       struct sqe *sqe_s;
+       struct sqe *sqe_d;
+       int total_cnt = count;
+
+       if (conn->ep->qp.sq_prod_qe == conn->ep->qp.sq_virt)
+               sqe_s = conn->ep->qp.sq_last_qe;
+       else
+               sqe_s = conn->ep->qp.sq_prod_qe - 1;
+       sqe_d = (struct sqe *)hba->sq_cq_rdp;
+       while (count--) {
+               memcpy(sqe_d, sqe_s, sizeof(struct sqe));
+               if (sqe_s == conn->ep->qp.sq_virt) {
+                       sqe_s = conn->ep->qp.sq_last_qe;
+               } else {
+                       sqe_s--;
+               }
+               sqe_d++;
+               if ((sqe_d - (struct sqe *) hba->sq_cq_rdp) >
+                   total_cnt) {
+                       printk(KERN_ALERT "bnx2i - SQ Dump: mem overflow\n");
+                       break;
+               }
+       }
+}
+
+
+static int bnx2i_init_sq_dump(struct bnx2i_hba *hba,
+                             u32 iscsi_cid, u32 count)
+{
+       struct bnx2i_conn *conn;
+       int sq_size;
+
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "SQ dump: cid #%x not valid\n",
+                                 iscsi_cid);
+               return -EINVAL;
+       }
+
+       if (hba->sq_cq_dump)
+               return -EINVAL;
+
+       sq_size = (conn->ep->qp.sq_last_qe - conn->ep->qp.sq_first_qe) + 1;
+
+       if (!count || (count > sq_size))
+               count = sq_size;
+
+       hba->sq_cq_size = count * sizeof(struct sqe);
+
+       if (!hba->sq_cq_size)
+               return -EINVAL;
+
+       hba->sq_cq_dump = kmalloc(hba->sq_cq_size, GFP_KERNEL);
+       if (!hba->sq_cq_dump)
+               return -ENOMEM;
+       hba->sq_cq_rdp = hba->sq_cq_dump;
+
+       bnx2i_dup_sq_mem(hba, conn, count);
+       return 0;
+}
+
+static ssize_t bnx2i_setup_qp_shmem_dump(struct class_device *cdev,
+                                        const char *buf, size_t count)
+{
+       struct bnx2i_hba *hba =
+               container_of(cdev, struct bnx2i_hba, class_dev);
+       u32 iscsi_cid;
+       char queue[32];
+       ssize_t ret = count;
+       u32 num_count;
+
+
+       if (sscanf(buf, "%c%c,%d,%d", &queue[0], &queue[1],
+                  &iscsi_cid, &num_count) > 0) {
+               if (!strncmp(queue, "SQ", 2)) {
+                       ret = bnx2i_init_sq_dump(hba, iscsi_cid, num_count);
+               } else if (!strncmp(queue, "CQ", 2)) {
+                       ret = bnx2i_init_cq_dump(hba, iscsi_cid, num_count);
+               } else {
+                       ret = -EINVAL;
+               }
+       }
+       return ret;
+}
+
+
+static ssize_t bnx2i_read_tcp_portd_options(struct class_device *cdev,
+                                           char *buf)
+{
+       extern struct tcp_port_mngt bnx2i_tcp_port_tbl;
+       return sprintf(buf, "0x%x\n", bnx2i_tcp_port_tbl.num_required);
+}
+
+static ssize_t bnx2i_write_tcp_portd_results(struct class_device *cdev,
+                                            const char *buf, size_t count)
+{
+       extern struct tcp_port_mngt bnx2i_tcp_port_tbl;
+       u32 tcp_port, bind_stat;
+
+       if (!bnx2i_tcp_port_tbl.free_q)
+               return count;
+
+       if (sscanf(buf, "%d,%d", &tcp_port, &bind_stat) > 0) {
+               if (bind_stat && tcp_port) {
+                       bnx2i_tcp_port_new_entry(tcp_port);
+               }
+       }
+       return count;
+}
+
+
+static CLASS_DEVICE_ATTR (mips_info, S_IRUGO,
+                        bnx2i_show_mips_status, NULL);
+static CLASS_DEVICE_ATTR (net_if_name, S_IRUGO,
+                        bnx2i_show_net_if_name, NULL);
+static CLASS_DEVICE_ATTR (sq_size, S_IRUGO | S_IWUSR,
+                        bnx2i_show_sq_info, bnx2i_set_sq_info);
+static CLASS_DEVICE_ATTR (cq_size, S_IRUGO | S_IWUSR,
+                        bnx2i_show_cq_info, bnx2i_set_cq_info);
+static CLASS_DEVICE_ATTR (rq_size, S_IRUGO | S_IWUSR,
+                        bnx2i_show_rq_info, bnx2i_set_rq_info);
+static CLASS_DEVICE_ATTR (num_ccell, S_IRUGO | S_IWUSR,
+                        bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+static CLASS_DEVICE_ATTR (pci_trigger, S_IRUGO,
+                        bnx2i_read_pci_trigger_reg, NULL);
+static CLASS_DEVICE_ATTR (ctx_dump, S_IRUGO | S_IWUSR,
+                        bnx2i_get_iscsi_cntx_dump,
+                        bnx2i_select_iscsi_cntx_dump);
+static CLASS_DEVICE_ATTR (cid_list, S_IRUGO | S_IWUSR,
+                        bnx2i_get_active_iscsi_cid_list,
+                        bnx2i_set_iscsi_cid_err_poll_mode);
+static CLASS_DEVICE_ATTR (qp_shmem_dump, S_IRUGO | S_IWUSR,
+                        bnx2i_get_qp_shmem_dump,
+                        bnx2i_setup_qp_shmem_dump);
+static CLASS_DEVICE_ATTR (port_bind, S_IRUGO | S_IWUSR,
+                        bnx2i_read_tcp_portd_options,
+                        bnx2i_write_tcp_portd_results);
+
+
+static struct class_device_attribute *bnx2i_class_attributes[] = {
+       &class_device_attr_mips_info,
+       &class_device_attr_net_if_name,
+       &class_device_attr_sq_size,
+       &class_device_attr_cq_size,
+       &class_device_attr_rq_size,
+       &class_device_attr_num_ccell,
+       &class_device_attr_pci_trigger,
+       &class_device_attr_ctx_dump,
+       &class_device_attr_cid_list,
+       &class_device_attr_qp_shmem_dump,
+};
+
+static struct class_device_attribute *tcp_port_class_attributes[] = {
+       &class_device_attr_port_bind
+};
+
+static void bnx2i_sysfs_release(struct class_device *class_dev)
+{
+}
+
+struct class_device port_class_dev;
+
+
+static struct class bnx2i_class = {
+       .name   = "bnx2i",
+       .release = bnx2i_sysfs_release,
+};
+
+
+
+static int bnx2i_register_port_class_dev(struct class_device *class_dev)
+{
+       char dev_name[BUS_ID_SIZE];
+       int ret;
+       int i;
+
+       class_dev->class = &bnx2i_class;
+       class_dev->class_data = class_dev;
+       snprintf(dev_name, BUS_ID_SIZE, "%s", "tcp_portd");
+       strlcpy(class_dev->class_id, dev_name, BUS_ID_SIZE);
+
+       ret = class_device_register(class_dev);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < ARRAY_SIZE(tcp_port_class_attributes); ++i) {
+               ret = class_device_create_file(class_dev,
+                                              tcp_port_class_attributes[i]);
+               if (ret)
+                       goto err_unregister;
+       }
+
+       return 0;
+
+err_unregister:
+       class_device_unregister(class_dev);
+err:
+       return ret;
+}
+
+
+int bnx2i_register_sysfs(struct bnx2i_hba *hba)
+{
+       struct class_device *class_dev = &hba->class_dev;
+       char dev_name[BUS_ID_SIZE];
+       struct ethtool_drvinfo drv_info;
+       u32 bus_no;
+       u32 dev_no;
+       u32 func_no;
+       u32 extra;
+       int ret;
+       int i;
+
+       if (hba->cnic && hba->cnic->netdev) {
+               hba->cnic->netdev->ethtool_ops->get_drvinfo(hba->cnic->netdev,
+                                                           &drv_info);
+               sscanf(drv_info.bus_info, "%x:%x:%x.%d",
+                      &extra, &bus_no, &dev_no, &func_no);
+       }
+       class_dev->class = &bnx2i_class;
+       class_dev->class_data = hba;
+       snprintf(dev_name, BUS_ID_SIZE, "%.2x:%.2x.%.1x",
+                        bus_no, dev_no, func_no);
+       strlcpy(class_dev->class_id, dev_name, BUS_ID_SIZE);
+
+       ret = class_device_register(class_dev);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < ARRAY_SIZE(bnx2i_class_attributes); ++i) {
+               ret = class_device_create_file(class_dev,
+                                              bnx2i_class_attributes[i]);
+               if (ret)
+                       goto err_unregister;
+       }
+
+       return 0;
+
+err_unregister:
+       class_device_unregister(class_dev);
+err:
+       return ret;
+}
+
+void bnx2i_unregister_sysfs(struct bnx2i_hba *hba)
+{
+       class_device_unregister(&hba->class_dev);
+}
+
+int bnx2i_sysfs_setup(void)
+{
+       int ret;
+       ret = class_register(&bnx2i_class);
+
+       bnx2i_register_port_class_dev(&port_class_dev);
+       return ret;
+}
+
+void bnx2i_sysfs_cleanup(void)
+{
+       class_device_unregister(&port_class_dev);
+       class_unregister(&bnx2i_class);
+}
+
+#endif