Patchwork [v6,4/8] csiostor: Chelsio FCoE offload driver submission (sources part 1).

login
register
mail settings
Submitter Naresh Kumar Inna
Date Nov. 15, 2012, 5:11 p.m.
Message ID <1352999484-17812-5-git-send-email-naresh@chelsio.com>
Download mbox | patch
Permalink /patch/199256/
State Not Applicable
Delegated to: David Miller
Headers show

Comments

Naresh Kumar Inna - Nov. 15, 2012, 5:11 p.m.
This patch contains code for driver initialization, driver resource
allocation and the Work Request module functionality. Driver initialization
includes module entry/exit points, registration with PCI, FC transport and
SCSI mid layer subsystems. The Work Request module provides services for
allocation of DMA queues, posting Work Requests on them and processing
completions.

Signed-off-by: Naresh Kumar Inna <naresh@chelsio.com>
---
V2: Removed module parameters.
V3: Corrected comment.

V5:
- Removed needless header inclusion.
- Re-wrote error paths to reduce lock/unlock ambiguity.

 drivers/scsi/csiostor/csio_init.c | 1274 +++++++++++++++++++++++++++++
 drivers/scsi/csiostor/csio_wr.c   | 1632 +++++++++++++++++++++++++++++++++++++
 2 files changed, 2906 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/csiostor/csio_init.c
 create mode 100644 drivers/scsi/csiostor/csio_wr.c

Patch

diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
new file mode 100644
index 0000000..fdd408f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -0,0 +1,1274 @@ 
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "csio_init.h"
+#include "csio_defs.h"
+
+#define CSIO_MIN_MEMPOOL_SZ	64
+
+static struct dentry *csio_debugfs_root;
+
+static struct scsi_transport_template *csio_fcoe_transport;
+static struct scsi_transport_template *csio_fcoe_transport_vport;
+
+/*
+ * debugfs support
+ */
+static int
+csio_mem_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	loff_t avail = file->f_path.dentry->d_inode->i_size;
+	unsigned int mem = (uintptr_t)file->private_data & 3;
+	struct csio_hw *hw = file->private_data - mem;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= avail)
+		return 0;
+	if (count > avail - pos)
+		count = avail - pos;
+
+	while (count) {
+		size_t len;
+		int ret, ofst;
+		__be32 data[16];
+
+		if (mem == MEM_MC)
+			ret = csio_hw_mc_read(hw, pos, data, NULL);
+		else
+			ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+		if (ret)
+			return ret;
+
+		ofst = pos % sizeof(data);
+		len = min(count, sizeof(data) - ofst);
+		if (copy_to_user(buf, (u8 *)data + ofst, len))
+			return -EFAULT;
+
+		buf += len;
+		pos += len;
+		count -= len;
+	}
+	count = pos - *ppos;
+	*ppos = pos;
+	return count;
+}
+
+static const struct file_operations csio_mem_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = csio_mem_open,
+	.read    = csio_mem_read,
+	.llseek  = default_llseek,
+};
+
+static void __devinit
+csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+		     unsigned int idx, unsigned int size_mb)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
+				 (void *)hw + idx, &csio_mem_debugfs_fops);
+	if (de && de->d_inode)
+		de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit
+csio_setup_debugfs(struct csio_hw *hw)
+{
+	int i;
+
+	if (IS_ERR_OR_NULL(hw->debugfs_root))
+		return -1;
+
+	i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+	if (i & EDRAM0_ENABLE)
+		csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
+	if (i & EDRAM1_ENABLE)
+		csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
+	if (i & EXT_MEM_ENABLE)
+		csio_add_debugfs_mem(hw, "mc", MEM_MC,
+		      EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+	return 0;
+}
+
+/*
+ * csio_dfs_create - Creates and sets up per-hw debugfs.
+ *
+ */
+static int
+csio_dfs_create(struct csio_hw *hw)
+{
+	if (csio_debugfs_root) {
+		hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
+							csio_debugfs_root);
+		csio_setup_debugfs(hw);
+	}
+
+	return 0;
+}
+
+/*
+ * csio_dfs_destroy - Destroys per-hw debugfs.
+ */
+static int
+csio_dfs_destroy(struct csio_hw *hw)
+{
+	if (hw->debugfs_root)
+		debugfs_remove_recursive(hw->debugfs_root);
+
+	return 0;
+}
+
+/*
+ * csio_dfs_init - Debug filesystem initialization for the module.
+ *
+ */
+static int
+csio_dfs_init(void)
+{
+	csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!csio_debugfs_root)
+		pr_warn("Could not create debugfs entry, continuing\n");
+
+	return 0;
+}
+
+/*
+ * csio_dfs_exit - debugfs cleanup for the module.
+ */
+static void
+csio_dfs_exit(void)
+{
+	debugfs_remove(csio_debugfs_root);
+}
+
+/*
+ * csio_pci_init - PCI initialization.
+ * @pdev: PCI device.
+ * @bars: Bitmask of bars to be requested.
+ *
+ * Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ */
+static int
+csio_pci_init(struct pci_dev *pdev, int *bars)
+{
+	int rv = -ENODEV;
+
+	*bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+	if (pci_enable_device_mem(pdev))
+		goto err;
+
+	if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
+		goto err_disable_device;
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	} else {
+		dev_err(&pdev->dev, "No suitable DMA available.\n");
+		goto err_release_regions;
+	}
+
+	return 0;
+
+err_release_regions:
+	pci_release_selected_regions(pdev, *bars);
+err_disable_device:
+	pci_disable_device(pdev);
+err:
+	return rv;
+
+}
+
+/*
+ * csio_pci_exit - PCI unitialization.
+ * @pdev: PCI device.
+ * @bars: Bars to be released.
+ *
+ */
+static void
+csio_pci_exit(struct pci_dev *pdev, int *bars)
+{
+	pci_release_selected_regions(pdev, *bars);
+	pci_disable_device(pdev);
+}
+
+/*
+ * csio_hw_init_workers - Initialize the HW module's worker threads.
+ * @hw: HW module.
+ *
+ */
+static void
+csio_hw_init_workers(struct csio_hw *hw)
+{
+	INIT_WORK(&hw->evtq_work, csio_evtq_worker);
+}
+
+static void
+csio_hw_exit_workers(struct csio_hw *hw)
+{
+	cancel_work_sync(&hw->evtq_work);
+	flush_scheduled_work();
+}
+
+static int
+csio_create_queues(struct csio_hw *hw)
+{
+	int i, j;
+	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+	int rv;
+	struct csio_scsi_cpu_info *info;
+
+	if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
+		return 0;
+
+	if (hw->intr_mode != CSIO_IM_MSIX) {
+		rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
+					0, hw->pport[0].portid, false, NULL);
+		if (rv != 0) {
+			csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
+			return rv;
+		}
+	}
+
+	/* FW event queue */
+	rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
+			       csio_get_fwevt_intr_idx(hw),
+			       hw->pport[0].portid, true, NULL);
+	if (rv != 0) {
+		csio_err(hw, "FW event IQ config failed!: %d\n", rv);
+		return rv;
+	}
+
+	/* Create mgmt queue */
+	rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
+			mgmtm->iq_idx, hw->pport[0].portid, NULL);
+
+	if (rv != 0) {
+		csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
+		goto err;
+	}
+
+	/* Create SCSI queues */
+	for (i = 0; i < hw->num_pports; i++) {
+		info = &hw->scsi_cpu_info[i];
+
+		for (j = 0; j < info->max_cpus; j++) {
+			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+
+			rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
+					       sqset->intr_idx, i, false, NULL);
+			if (rv != 0) {
+				csio_err(hw,
+				   "SCSI module IQ config failed [%d][%d]:%d\n",
+				   i, j, rv);
+				goto err;
+			}
+			rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
+					       sqset->iq_idx, i, NULL);
+			if (rv != 0) {
+				csio_err(hw,
+				   "SCSI module EQ config failed [%d][%d]:%d\n",
+				   i, j, rv);
+				goto err;
+			}
+		} /* for all CPUs */
+	} /* For all ports */
+
+	hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
+	return 0;
+err:
+	csio_wr_destroy_queues(hw, true);
+	return -EINVAL;
+}
+
+/*
+ * csio_config_queues - Configure the DMA queues.
+ * @hw: HW module.
+ *
+ * Allocates memory for queues are registers them with FW.
+ */
+int
+csio_config_queues(struct csio_hw *hw)
+{
+	int i, j, idx, k = 0;
+	int rv;
+	struct csio_scsi_qset *sqset;
+	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+	struct csio_scsi_qset *orig;
+	struct csio_scsi_cpu_info *info;
+
+	if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
+		return csio_create_queues(hw);
+
+	/* Calculate number of SCSI queues for MSIX we would like */
+	hw->num_scsi_msix_cpus = num_online_cpus();
+	hw->num_sqsets = num_online_cpus() * hw->num_pports;
+
+	if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
+		hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
+		hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
+	}
+
+	/* Initialize max_cpus, may get reduced during msix allocations */
+	for (i = 0; i < hw->num_pports; i++)
+		hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
+
+	csio_dbg(hw, "nsqsets:%d scpus:%d\n",
+		    hw->num_sqsets, hw->num_scsi_msix_cpus);
+
+	csio_intr_enable(hw);
+
+	if (hw->intr_mode != CSIO_IM_MSIX) {
+
+		/* Allocate Forward interrupt iq. */
+		hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
+						CSIO_INTR_WRSIZE, CSIO_INGRESS,
+						(void *)hw, 0, 0, NULL);
+		if (hw->intr_iq_idx == -1) {
+			csio_err(hw,
+				 "Forward interrupt queue creation failed\n");
+			goto intr_disable;
+		}
+	}
+
+	/* Allocate the FW evt queue */
+	hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
+					   CSIO_FWEVT_WRSIZE,
+					   CSIO_INGRESS, (void *)hw,
+					   CSIO_FWEVT_FLBUFS, 0,
+					   csio_fwevt_intx_handler);
+	if (hw->fwevt_iq_idx == -1) {
+		csio_err(hw, "FW evt queue creation failed\n");
+		goto intr_disable;
+	}
+
+	/* Allocate the mgmt queue */
+	mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
+				      CSIO_MGMT_EQ_WRSIZE,
+				      CSIO_EGRESS, (void *)hw, 0, 0, NULL);
+	if (mgmtm->eq_idx == -1) {
+		csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
+		goto intr_disable;
+	}
+
+	/* Use FW IQ for MGMT req completion */
+	mgmtm->iq_idx = hw->fwevt_iq_idx;
+
+	/* Allocate SCSI queues */
+	for (i = 0; i < hw->num_pports; i++) {
+		info = &hw->scsi_cpu_info[i];
+
+		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+			sqset = &hw->sqset[i][j];
+
+			if (j >= info->max_cpus) {
+				k = j % info->max_cpus;
+				orig = &hw->sqset[i][k];
+				sqset->eq_idx = orig->eq_idx;
+				sqset->iq_idx = orig->iq_idx;
+				continue;
+			}
+
+			idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
+					      CSIO_EGRESS, (void *)hw, 0, 0,
+					      NULL);
+			if (idx == -1) {
+				csio_err(hw, "EQ creation failed for idx:%d\n",
+					    idx);
+				goto intr_disable;
+			}
+
+			sqset->eq_idx = idx;
+
+			idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
+					     CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
+					     (void *)hw, 0, 0,
+					     csio_scsi_intx_handler);
+			if (idx == -1) {
+				csio_err(hw, "IQ creation failed for idx:%d\n",
+					    idx);
+				goto intr_disable;
+			}
+			sqset->iq_idx = idx;
+		} /* for all CPUs */
+	} /* For all ports */
+
+	hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
+
+	rv = csio_create_queues(hw);
+	if (rv != 0)
+		goto intr_disable;
+
+	/*
+	 * Now request IRQs for the vectors. In the event of a failure,
+	 * cleanup is handled internally by this function.
+	 */
+	rv = csio_request_irqs(hw);
+	if (rv != 0)
+		return -EINVAL;
+
+	return 0;
+
+intr_disable:
+	csio_intr_disable(hw, false);
+
+	return -EINVAL;
+}
+
+static int
+csio_resource_alloc(struct csio_hw *hw)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	int rv = -ENOMEM;
+
+	wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
+		       CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
+
+	hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+						  sizeof(struct csio_mb));
+	if (!hw->mb_mempool)
+		goto err;
+
+	hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+						     sizeof(struct csio_rnode));
+	if (!hw->rnode_mempool)
+		goto err_free_mb_mempool;
+
+	hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
+					    CSIO_SCSI_RSP_LEN, 8, 0);
+	if (!hw->scsi_pci_pool)
+		goto err_free_rn_pool;
+
+	return 0;
+
+err_free_rn_pool:
+	mempool_destroy(hw->rnode_mempool);
+	hw->rnode_mempool = NULL;
+err_free_mb_mempool:
+	mempool_destroy(hw->mb_mempool);
+	hw->mb_mempool = NULL;
+err:
+	return rv;
+}
+
+static void
+csio_resource_free(struct csio_hw *hw)
+{
+	pci_pool_destroy(hw->scsi_pci_pool);
+	hw->scsi_pci_pool = NULL;
+	mempool_destroy(hw->rnode_mempool);
+	hw->rnode_mempool = NULL;
+	mempool_destroy(hw->mb_mempool);
+	hw->mb_mempool = NULL;
+}
+
+/*
+ * csio_hw_alloc - Allocate and initialize the HW module.
+ * @pdev: PCI device.
+ *
+ * Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ */
+static struct csio_hw * __devinit
+csio_hw_alloc(struct pci_dev *pdev)
+{
+	struct csio_hw *hw;
+
+	hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
+	if (!hw)
+		goto err;
+
+	hw->pdev = pdev;
+	strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+
+	/* memory pool/DMA pool allocation */
+	if (csio_resource_alloc(hw))
+		goto err_free_hw;
+
+	/* Get the start address of registers from BAR 0 */
+	hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+				       pci_resource_len(pdev, 0));
+	if (!hw->regstart) {
+		csio_err(hw, "Could not map BAR 0, regstart = %p\n",
+			 hw->regstart);
+		goto err_resource_free;
+	}
+
+	csio_hw_init_workers(hw);
+
+	if (csio_hw_init(hw))
+		goto err_unmap_bar;
+
+	csio_dfs_create(hw);
+
+	csio_dbg(hw, "hw:%p\n", hw);
+
+	return hw;
+
+err_unmap_bar:
+	csio_hw_exit_workers(hw);
+	iounmap(hw->regstart);
+err_resource_free:
+	csio_resource_free(hw);
+err_free_hw:
+	kfree(hw);
+err:
+	return NULL;
+}
+
+/*
+ * csio_hw_free - Uninitialize and free the HW module.
+ * @hw: The HW module
+ *
+ * Disable interrupts, uninit the HW module, free resources, free hw.
+ */
+static void
+csio_hw_free(struct csio_hw *hw)
+{
+	csio_intr_disable(hw, true);
+	csio_hw_exit_workers(hw);
+	csio_hw_exit(hw);
+	iounmap(hw->regstart);
+	csio_dfs_destroy(hw);
+	csio_resource_free(hw);
+	kfree(hw);
+}
+
+/**
+ * csio_shost_init - Create and initialize the lnode module.
+ * @hw:		The HW module.
+ * @dev:	The device associated with this invocation.
+ * @probe:	Called from probe context or not?
+ * @os_pln:	Parent lnode if any.
+ *
+ * Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initializes lnode module and registers with SCSI ML
+ * via scsi_host_add. This function is shared between physical and
+ * virtual node ports.
+ */
+struct csio_lnode *
+csio_shost_init(struct csio_hw *hw, struct device *dev,
+		  bool probe, struct csio_lnode *pln)
+{
+	struct Scsi_Host  *shost = NULL;
+	struct csio_lnode *ln;
+
+	csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
+	csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
+
+	/*
+	 * hw->pdev is the physical port's PCI dev structure,
+	 * which will be different from the NPIV dev structure.
+	 */
+	if (dev == &hw->pdev->dev)
+		shost = scsi_host_alloc(
+				&csio_fcoe_shost_template,
+				sizeof(struct csio_lnode));
+	else
+		shost = scsi_host_alloc(
+				&csio_fcoe_shost_vport_template,
+				sizeof(struct csio_lnode));
+
+	if (!shost)
+		goto err;
+
+	ln = shost_priv(shost);
+	memset(ln, 0, sizeof(struct csio_lnode));
+
+	/* Link common lnode to this lnode */
+	ln->dev_num = (shost->host_no << 16);
+
+	shost->can_queue = CSIO_MAX_QUEUE;
+	shost->this_id = -1;
+	shost->unique_id = shost->host_no;
+	shost->max_cmd_len = 16; /* Max CDB length supported */
+	shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
+			      hw->fres_info.max_ssns);
+	shost->max_lun = CSIO_MAX_LUN;
+	if (dev == &hw->pdev->dev)
+		shost->transportt = csio_fcoe_transport;
+	else
+		shost->transportt = csio_fcoe_transport_vport;
+
+	/* root lnode */
+	if (!hw->rln)
+		hw->rln = ln;
+
+	/* Other initialization here: Common, Transport specific */
+	if (csio_lnode_init(ln, hw, pln))
+		goto err_shost_put;
+
+	if (scsi_add_host(shost, dev))
+		goto err_lnode_exit;
+
+	return ln;
+
+err_lnode_exit:
+	csio_lnode_exit(ln);
+err_shost_put:
+	scsi_host_put(shost);
+err:
+	return NULL;
+}
+
+/**
+ * csio_shost_exit - De-instantiate the shost.
+ * @ln:		The lnode module corresponding to the shost.
+ *
+ */
+void
+csio_shost_exit(struct csio_lnode *ln)
+{
+	struct Scsi_Host *shost = csio_ln_to_shost(ln);
+	struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+	/* Inform transport */
+	fc_remove_host(shost);
+
+	/* Inform SCSI ML */
+	scsi_remove_host(shost);
+
+	/* Flush all the events, so that any rnode removal events
+	 * already queued are all handled, before we remove the lnode.
+	 */
+	spin_lock_irq(&hw->lock);
+	csio_evtq_flush(hw);
+	spin_unlock_irq(&hw->lock);
+
+	csio_lnode_exit(ln);
+	scsi_host_put(shost);
+}
+
+struct csio_lnode *
+csio_lnode_alloc(struct csio_hw *hw)
+{
+	return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
+}
+
+void
+csio_lnodes_block_request(struct csio_hw *hw)
+{
+	struct Scsi_Host  *shost;
+	struct csio_lnode *sln;
+	struct csio_lnode *ln;
+	struct list_head *cur_ln, *cur_cln;
+	struct csio_lnode **lnode_list;
+	int cur_cnt = 0, ii;
+
+	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+			GFP_KERNEL);
+	if (!lnode_list) {
+		csio_err(hw, "Failed to allocate lnodes_list");
+		return;
+	}
+
+	spin_lock_irq(&hw->lock);
+	/* Traverse sibling lnodes */
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+		lnode_list[cur_cnt++] = sln;
+
+		/* Traverse children lnodes */
+		list_for_each(cur_cln, &sln->cln_head)
+			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+		ln = lnode_list[ii];
+		shost = csio_ln_to_shost(ln);
+		scsi_block_requests(shost);
+
+	}
+	kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_request(struct csio_hw *hw)
+{
+	struct csio_lnode *ln;
+	struct Scsi_Host  *shost;
+	struct csio_lnode *sln;
+	struct list_head *cur_ln, *cur_cln;
+	struct csio_lnode **lnode_list;
+	int cur_cnt = 0, ii;
+
+	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+			GFP_KERNEL);
+	if (!lnode_list) {
+		csio_err(hw, "Failed to allocate lnodes_list");
+		return;
+	}
+
+	spin_lock_irq(&hw->lock);
+	/* Traverse sibling lnodes */
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+		lnode_list[cur_cnt++] = sln;
+
+		/* Traverse children lnodes */
+		list_for_each(cur_cln, &sln->cln_head)
+			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+		ln = lnode_list[ii];
+		shost = csio_ln_to_shost(ln);
+		scsi_unblock_requests(shost);
+	}
+	kfree(lnode_list);
+}
+
+void
+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
+{
+	struct csio_lnode *ln;
+	struct Scsi_Host  *shost;
+	struct csio_lnode *sln;
+	struct list_head *cur_ln, *cur_cln;
+	struct csio_lnode **lnode_list;
+	int cur_cnt = 0, ii;
+
+	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+			GFP_KERNEL);
+	if (!lnode_list) {
+		csio_err(hw, "Failed to allocate lnodes_list");
+		return;
+	}
+
+	spin_lock_irq(&hw->lock);
+	/* Traverse sibling lnodes */
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+		if (sln->portid != portid)
+			continue;
+
+		lnode_list[cur_cnt++] = sln;
+
+		/* Traverse children lnodes */
+		list_for_each(cur_cln, &sln->cln_head)
+			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+		ln = lnode_list[ii];
+		shost = csio_ln_to_shost(ln);
+		scsi_block_requests(shost);
+	}
+	kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
+{
+	struct csio_lnode *ln;
+	struct Scsi_Host  *shost;
+	struct csio_lnode *sln;
+	struct list_head *cur_ln, *cur_cln;
+	struct csio_lnode **lnode_list;
+	int cur_cnt = 0, ii;
+
+	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+			GFP_KERNEL);
+	if (!lnode_list) {
+		csio_err(hw, "Failed to allocate lnodes_list");
+		return;
+	}
+
+	spin_lock_irq(&hw->lock);
+	/* Traverse sibling lnodes */
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+		if (sln->portid != portid)
+			continue;
+		lnode_list[cur_cnt++] = sln;
+
+		/* Traverse children lnodes */
+		list_for_each(cur_cln, &sln->cln_head)
+			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+		ln = lnode_list[ii];
+		shost = csio_ln_to_shost(ln);
+		scsi_unblock_requests(shost);
+	}
+	kfree(lnode_list);
+}
+
+void
+csio_lnodes_exit(struct csio_hw *hw, bool npiv)
+{
+	struct csio_lnode *sln;
+	struct csio_lnode *ln;
+	struct list_head *cur_ln, *cur_cln;
+	struct csio_lnode **lnode_list;
+	int cur_cnt = 0, ii;
+
+	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+			GFP_KERNEL);
+	if (!lnode_list) {
+		csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
+		return;
+	}
+
+	/* Get all child lnodes(NPIV ports) */
+	spin_lock_irq(&hw->lock);
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+
+		/* Traverse children lnodes */
+		list_for_each(cur_cln, &sln->cln_head)
+			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	/* Delete NPIV lnodes */
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
+		ln = lnode_list[ii];
+		fc_vport_terminate(ln->fc_vport);
+	}
+
+	/* Delete only npiv lnodes */
+	if (npiv)
+		goto free_lnodes;
+
+	cur_cnt = 0;
+	/* Get all physical lnodes */
+	spin_lock_irq(&hw->lock);
+	/* Traverse sibling lnodes */
+	list_for_each(cur_ln, &hw->sln_head) {
+		sln = (struct csio_lnode *) cur_ln;
+		lnode_list[cur_cnt++] = sln;
+	}
+	spin_unlock_irq(&hw->lock);
+
+	/* Delete physical lnodes */
+	for (ii = 0; ii < cur_cnt; ii++) {
+		csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
+		csio_shost_exit(lnode_list[ii]);
+	}
+
+free_lnodes:
+	kfree(lnode_list);
+}
+
+/*
+ * csio_lnode_init_post: Set lnode attributes after starting HW.
+ * @ln: lnode.
+ *
+ */
+static void
+csio_lnode_init_post(struct csio_lnode *ln)
+{
+	struct Scsi_Host  *shost = csio_ln_to_shost(ln);
+
+	csio_fchost_attr_init(ln);
+
+	scsi_scan_host(shost);
+}
+
+/*
+ * csio_probe_one - Instantiate this function.
+ * @pdev: PCI device
+ * @id: Device ID
+ *
+ * This is the .probe() callback of the driver. This function:
+ * - Initializes the PCI function by enabling MMIO, setting bus
+ *   mastership and setting DMA mask.
+ * - Allocates HW structure, DMA, memory resources, maps BARS to
+ *   host memory and initializes HW module.
+ * - Allocates lnode structure via scsi_host_alloc, initializes
+ *   shost, initialized lnode module and registers with SCSI ML
+ *   via scsi_host_add.
+ * - Enables interrupts, and starts the chip by kicking off the
+ *   HW state machine.
+ * - Once hardware is ready, initiated scan of the host via
+ *   scsi_scan_host.
+ */
+static int __devinit
+csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int rv;
+	int bars;
+	int i;
+	struct csio_hw *hw;
+	struct csio_lnode *ln;
+
+	rv = csio_pci_init(pdev, &bars);
+	if (rv)
+		goto err;
+
+	hw = csio_hw_alloc(pdev);
+	if (!hw) {
+		rv = -ENODEV;
+		goto err_pci_exit;
+	}
+
+	pci_set_drvdata(pdev, hw);
+
+	if (csio_hw_start(hw) != 0) {
+		dev_err(&pdev->dev,
+			"Failed to start FW, continuing in debug mode.\n");
+		return 0;
+	}
+
+	sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+		    FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+		    FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+		    FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+		    FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+
+	for (i = 0; i < hw->num_pports; i++) {
+		ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+		if (!ln) {
+			rv = -ENODEV;
+			break;
+		}
+		/* Initialize portid */
+		ln->portid = hw->pport[i].portid;
+
+		spin_lock_irq(&hw->lock);
+		if (csio_lnode_start(ln) != 0)
+			rv = -ENODEV;
+		spin_unlock_irq(&hw->lock);
+
+		if (rv)
+			break;
+
+		csio_lnode_init_post(ln);
+	}
+
+	if (rv)
+		goto err_lnode_exit;
+
+	return 0;
+
+err_lnode_exit:
+	csio_lnodes_block_request(hw);
+	spin_lock_irq(&hw->lock);
+	csio_hw_stop(hw);
+	spin_unlock_irq(&hw->lock);
+	csio_lnodes_unblock_request(hw);
+	pci_set_drvdata(hw->pdev, NULL);
+	csio_lnodes_exit(hw, 0);
+	csio_hw_free(hw);
+err_pci_exit:
+	csio_pci_exit(pdev, &bars);
+err:
+	dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
+	return rv;
+}
+
+/*
+ * csio_remove_one - Remove one instance of the driver at this PCI function.
+ * @pdev: PCI device
+ *
+ * Used during hotplug operation.
+ */
+static void __devexit
+csio_remove_one(struct pci_dev *pdev)
+{
+	struct csio_hw *hw = pci_get_drvdata(pdev);
+	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+	csio_lnodes_block_request(hw);
+	spin_lock_irq(&hw->lock);
+
+	/* Stops lnode, Rnode s/m
+	 * Quiesce IOs.
+	 * All sessions with remote ports are unregistered.
+	 */
+	csio_hw_stop(hw);
+	spin_unlock_irq(&hw->lock);
+	csio_lnodes_unblock_request(hw);
+
+	csio_lnodes_exit(hw, 0);
+	csio_hw_free(hw);
+	pci_set_drvdata(pdev, NULL);
+	csio_pci_exit(pdev, &bars);
+}
+
+/*
+ * csio_pci_error_detected - PCI error was detected
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct csio_hw *hw = pci_get_drvdata(pdev);
+
+	csio_lnodes_block_request(hw);
+	spin_lock_irq(&hw->lock);
+
+	/* Post PCI error detected evt to HW s/m
+	 * HW s/m handles this evt by quiescing IOs, unregisters rports
+	 * and finally takes the device to offline.
+	 */
+	csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
+	spin_unlock_irq(&hw->lock);
+	csio_lnodes_unblock_request(hw);
+	csio_lnodes_exit(hw, 0);
+	csio_intr_disable(hw, true);
+	pci_disable_device(pdev);
+	return state == pci_channel_io_perm_failure ?
+		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * csio_pci_slot_reset - PCI slot has been reset.
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct csio_hw *hw = pci_get_drvdata(pdev);
+	int ready;
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	/* Bring HW s/m to ready state.
+	 * but don't resume IOs.
+	 */
+	spin_lock_irq(&hw->lock);
+	csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
+	ready = csio_is_hw_ready(hw);
+	spin_unlock_irq(&hw->lock);
+
+	if (ready) {
+		return PCI_ERS_RESULT_RECOVERED;
+	} else {
+		dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+}
+
+/*
+ * csio_pci_resume - Resume normal operations
+ * @pdev: PCI device
+ *
+ */
+static void
+csio_pci_resume(struct pci_dev *pdev)
+{
+	struct csio_hw *hw = pci_get_drvdata(pdev);
+	struct csio_lnode *ln;
+	int rv = 0;
+	int i;
+
+	/* Bring the LINK UP and Resume IO */
+
+	for (i = 0; i < hw->num_pports; i++) {
+		ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+		if (!ln) {
+			rv = -ENODEV;
+			break;
+		}
+		/* Initialize portid */
+		ln->portid = hw->pport[i].portid;
+
+		spin_lock_irq(&hw->lock);
+		if (csio_lnode_start(ln) != 0)
+			rv = -ENODEV;
+		spin_unlock_irq(&hw->lock);
+
+		if (rv)
+			break;
+
+		csio_lnode_init_post(ln);
+	}
+
+	if (rv)
+		goto err_resume_exit;
+
+	return;
+
+err_resume_exit:
+	csio_lnodes_block_request(hw);
+	spin_lock_irq(&hw->lock);
+	csio_hw_stop(hw);
+	spin_unlock_irq(&hw->lock);
+	csio_lnodes_unblock_request(hw);
+	csio_lnodes_exit(hw, 0);
+	csio_hw_free(hw);
+	dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
+}
+
+static struct pci_error_handlers csio_err_handler = {
+	.error_detected = csio_pci_error_detected,
+	.slot_reset	= csio_pci_slot_reset,
+	.resume		= csio_pci_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
+	CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0),	/* T440DBG FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0),		/* T420CR FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0),		/* T422CR FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0),		/* T440CR FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0),	/* T420BCH FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0),	/* T440BCH FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0),		/* T440CH FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0),		/* T420SO FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0),		/* T420CX FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0),		/* T420BT FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0),		/* T404BT FCOE */
+	CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0),		/* B420 FCOE */
+	CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0),		/* B404 FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0),		/* T480 CR FCOE */
+	CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0),	/* T440 LP-CR FCOE */
+	CSIO_DEVICE(CSIO_DEVID_PE10K, 0),		/* PE10K FCOE */
+	CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0),	/* PE10K FCOE on PF1 */
+	{ 0, 0, 0, 0, 0, 0, 0 }
+};
+
+
+static struct pci_driver csio_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.driver		= {
+		.owner	= THIS_MODULE,
+	},
+	.id_table	= csio_pci_tbl,
+	.probe		= csio_probe_one,
+	.remove		= csio_remove_one,
+	.err_handler	= &csio_err_handler,
+};
+
+/*
+ * csio_init - Chelsio storage driver initialization function.
+ *
+ */
+static int __init
+csio_init(void)
+{
+	int rv = -ENOMEM;
+
+	pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
+
+	csio_dfs_init();
+
+	csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
+	if (!csio_fcoe_transport)
+		goto err;
+
+	csio_fcoe_transport_vport =
+			fc_attach_transport(&csio_fc_transport_vport_funcs);
+	if (!csio_fcoe_transport_vport)
+		goto err_vport;
+
+	rv = pci_register_driver(&csio_pci_driver);
+	if (rv)
+		goto err_pci;
+
+	return 0;
+
+err_pci:
+	fc_release_transport(csio_fcoe_transport_vport);
+err_vport:
+	fc_release_transport(csio_fcoe_transport);
+err:
+	csio_dfs_exit();
+	return rv;
+}
+
+/*
+ * csio_exit - Chelsio storage driver uninitialization .
+ *
+ * Function that gets called in the unload path.
+ */
+static void __exit
+csio_exit(void)
+{
+	pci_unregister_driver(&csio_pci_driver);
+	csio_dfs_exit();
+	fc_release_transport(csio_fcoe_transport_vport);
+	fc_release_transport(csio_fcoe_transport);
+}
+
+module_init(csio_init);
+module_exit(csio_exit);
+MODULE_AUTHOR(CSIO_DRV_AUTHOR);
+MODULE_DESCRIPTION(CSIO_DRV_DESC);
+MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+MODULE_VERSION(CSIO_DRV_VERSION);
+MODULE_FIRMWARE(CSIO_FW_FNAME);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
new file mode 100644
index 0000000..c32df1b
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -0,0 +1,1632 @@ 
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <linux/cache.h>
+
+#include "csio_hw.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_defs.h"
+
+int csio_intr_coalesce_cnt;		/* value:SGE_INGRESS_RX_THRESHOLD[0] */
+static int csio_sge_thresh_reg;		/* SGE_INGRESS_RX_THRESHOLD[0] */
+
+int csio_intr_coalesce_time = 10;	/* value:SGE_TIMER_VALUE_1 */
+static int csio_sge_timer_reg = 1;
+
+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val)				\
+	csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+
+static void
+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
+{
+	sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+							reg * sizeof(uint32_t));
+}
+
+/* Free list buffer size */
+static inline uint32_t
+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
+{
+	return sge->sge_fl_buf_size[buf->paddr & 0xF];
+}
+
+/* Size of the egress queue status page */
+static inline uint32_t
+csio_wr_qstat_pgsz(struct csio_hw *hw)
+{
+	return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ?  128 : 64;
+}
+
+/* Ring freelist doorbell */
+static inline void
+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
+{
+	/*
+	 * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
+	 * number of bytes in the freelist queue. This translates to atleast
+	 * 8 freelist buffer pointers (since each pointer is 8 bytes).
+	 */
+	if (flq->inc_idx >= 8) {
+		csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+			      PIDX(flq->inc_idx / 8),
+			      MYPF_REG(SGE_PF_KDOORBELL));
+		flq->inc_idx &= 7;
+	}
+}
+
+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
+static void
+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
+{
+	csio_wr_reg32(hw, CIDXINC(0)		|
+			  INGRESSQID(iqid)	|
+			  TIMERREG(X_TIMERREG_RESTART_COUNTER),
+			  MYPF_REG(SGE_PF_GTS));
+}
+
+/*
+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ * Fill up freelist buffer entries with buffers of size specified
+ * in the size register.
+ *
+ */
+static int
+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+	__be64 *d = (__be64 *)(flq->vstart);
+	struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
+	uint64_t paddr;
+	int sreg = flq->un.fl.sreg;
+	int n = flq->credits;
+
+	while (n--) {
+		buf->len = sge->sge_fl_buf_size[sreg];
+		buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
+						  &buf->paddr);
+		if (!buf->vaddr) {
+			csio_err(hw, "Could only fill %d buffers!\n", n + 1);
+			return -ENOMEM;
+		}
+
+		paddr = buf->paddr | (sreg & 0xF);
+
+		*d++ = cpu_to_be64(paddr);
+		buf++;
+	}
+
+	return 0;
+}
+
+/*
+ * csio_wr_update_fl -
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ *
+ */
+static inline void
+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
+{
+
+	flq->inc_idx += n;
+	flq->pidx += n;
+	if (unlikely(flq->pidx >= flq->credits))
+		flq->pidx -= (uint16_t)flq->credits;
+
+	CSIO_INC_STATS(flq, n_flq_refill);
+}
+
+/*
+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.
+ * @hw: HW module
+ * @qsize: Size of the queue in bytes
+ * @wrsize: Since of WR in this queue, if fixed.
+ * @type: Type of queue (Ingress/Egress/Freelist)
+ * @owner: Module that owns this queue.
+ * @nflb: Number of freelist buffers for FL.
+ * @sreg: What is the FL buffer size register?
+ * @iq_int_handler: Ingress queue handler in INTx mode.
+ *
+ * This function allocates and sets up a queue for the caller
+ * of size qsize, aligned at the required boundary. This is subject to
+ * be free entries being available in the queue array. If one is found,
+ * it is initialized with the allocated queue, marked as being used (owner),
+ * and a handle returned to the caller in form of the queue's index
+ * into the q_arr array.
+ * If user has indicated a freelist (by specifying nflb > 0), create
+ * another queue (with its own index into q_arr) for the freelist. Allocate
+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated
+ * with its owning ingress queue.
+ */
+int
+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
+		uint16_t type, void *owner, uint32_t nflb, int sreg,
+		iq_handler_t iq_intx_handler)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_q	*q, *flq;
+	int		free_idx = wrm->free_qidx;
+	int		ret_idx = free_idx;
+	uint32_t	qsz;
+	int flq_idx;
+
+	if (free_idx >= wrm->num_q) {
+		csio_err(hw, "No more free queues.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case CSIO_EGRESS:
+		qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
+		break;
+	case CSIO_INGRESS:
+		switch (wrsize) {
+		case 16:
+		case 32:
+		case 64:
+		case 128:
+			break;
+		default:
+			csio_err(hw, "Invalid Ingress queue WR size:%d\n",
+				    wrsize);
+			return -1;
+		}
+
+		/*
+		 * Number of elements must be a multiple of 16
+		 * So this includes status page size
+		 */
+		qsz = ALIGN(qsize/wrsize, 16) * wrsize;
+
+		break;
+	case CSIO_FREELIST:
+		qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
+		break;
+	default:
+		csio_err(hw, "Invalid queue type: 0x%x\n", type);
+		return -1;
+	}
+
+	q = wrm->q_arr[free_idx];
+
+	q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
+	if (!q->vstart) {
+		csio_err(hw,
+			 "Failed to allocate DMA memory for "
+			 "queue at id: %d size: %d\n", free_idx, qsize);
+		return -1;
+	}
+
+	/*
+	 * We need to zero out the contents, importantly for ingress,
+	 * since we start with a generatiom bit of 1 for ingress.
+	 */
+	memset(q->vstart, 0, qsz);
+
+	q->type		= type;
+	q->owner	= owner;
+	q->pidx		= q->cidx = q->inc_idx = 0;
+	q->size		= qsz;
+	q->wr_sz	= wrsize;	/* If using fixed size WRs */
+
+	wrm->free_qidx++;
+
+	if (type == CSIO_INGRESS) {
+		/* Since queue area is set to zero */
+		q->un.iq.genbit	= 1;
+
+		/*
+		 * Ingress queue status page size is always the size of
+		 * the ingress queue entry.
+		 */
+		q->credits	= (qsz - q->wr_sz) / q->wr_sz;
+		q->vwrap	= (void *)((uintptr_t)(q->vstart) + qsz
+							- q->wr_sz);
+
+		/* Allocate memory for FL if requested */
+		if (nflb > 0) {
+			flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
+						  sizeof(__be64), CSIO_FREELIST,
+						  owner, 0, sreg, NULL);
+			if (flq_idx == -1) {
+				csio_err(hw,
+					 "Failed to allocate FL queue"
+					 " for IQ idx:%d\n", free_idx);
+				return -1;
+			}
+
+			/* Associate the new FL with the Ingress quue */
+			q->un.iq.flq_idx = flq_idx;
+
+			flq = wrm->q_arr[q->un.iq.flq_idx];
+			flq->un.fl.bufs = kzalloc(flq->credits *
+						  sizeof(struct csio_dma_buf),
+						  GFP_KERNEL);
+			if (!flq->un.fl.bufs) {
+				csio_err(hw,
+					 "Failed to allocate FL queue bufs"
+					 " for IQ idx:%d\n", free_idx);
+				return -1;
+			}
+
+			flq->un.fl.packen = 0;
+			flq->un.fl.offset = 0;
+			flq->un.fl.sreg = sreg;
+
+			/* Fill up the free list buffers */
+			if (csio_wr_fill_fl(hw, flq))
+				return -1;
+
+			/*
+			 * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
+			 * remains unpopulated,otherwise HW thinks
+			 * FLQ is empty.
+			 */
+			flq->pidx = flq->inc_idx = flq->credits - 8;
+		} else {
+			q->un.iq.flq_idx = -1;
+		}
+
+		/* Associate the IQ INTx handler. */
+		q->un.iq.iq_intx_handler = iq_intx_handler;
+
+		csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
+
+	} else if (type == CSIO_EGRESS) {
+		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
+		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
+						- csio_wr_qstat_pgsz(hw));
+		csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
+	} else { /* Freelist */
+		q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
+		q->vwrap   = (void *)((uintptr_t)(q->vstart) + qsz
+						- csio_wr_qstat_pgsz(hw));
+		csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
+	}
+
+	return ret_idx;
+}
+
+/*
+ * csio_wr_iq_create_rsp - Response handler for IQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that got created.
+ *
+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
+ */
+static int
+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+	struct csio_iq_params iqp;
+	enum fw_retval retval;
+	uint32_t iq_id;
+	int flq_idx;
+
+	memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+	csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
+
+	if (retval != FW_SUCCESS) {
+		csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+
+	csio_q_iqid(hw, iq_idx)		= iqp.iqid;
+	csio_q_physiqid(hw, iq_idx)	= iqp.physiqid;
+	csio_q_pidx(hw, iq_idx)		= csio_q_cidx(hw, iq_idx) = 0;
+	csio_q_inc_idx(hw, iq_idx)	= 0;
+
+	/* Actual iq-id. */
+	iq_id = iqp.iqid - hw->wrm.fw_iq_start;
+
+	/* Set the iq-id to iq map table. */
+	if (iq_id >= CSIO_MAX_IQ) {
+		csio_err(hw,
+			 "Exceeding MAX_IQ(%d) supported!"
+			 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
+			 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+	csio_q_set_intr_map(hw, iq_idx, iq_id);
+
+	/*
+	 * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
+	 * ingress context of this queue. This will block interrupts to
+	 * this queue until the next GTS write. Therefore, we do a
+	 * 0-cidx increment GTS write for this queue just to clear the
+	 * interrupt_sent bit. This will re-enable interrupts to this
+	 * queue.
+	 */
+	csio_wr_sge_intr_enable(hw, iqp.physiqid);
+
+	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+	if (flq_idx != -1) {
+		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+		csio_q_flid(hw, flq_idx) = iqp.fl0id;
+		csio_q_cidx(hw, flq_idx) = 0;
+		csio_q_pidx(hw, flq_idx)    = csio_q_credits(hw, flq_idx) - 8;
+		csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+
+		/* Now update SGE about the buffers allocated during init */
+		csio_wr_ring_fldb(hw, flq);
+	}
+
+	mempool_free(mbp, hw->mb_mempool);
+
+	return 0;
+}
+
+/*
+ * csio_wr_iq_create - Configure an Ingress queue with FW.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index in the WR module.
+ * @vec: MSIX vector.
+ * @portid: PCIE Channel to be associated with this queue.
+ * @async: Is this a FW asynchronous message handling queue?
+ * @cbfn: Completion callback.
+ *
+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
+ * with alloc/write bits set.
+ */
+int
+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
+		  uint32_t vec, uint8_t portid, bool async,
+		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+	struct csio_mb  *mbp;
+	struct csio_iq_params iqp;
+	int flq_idx;
+
+	memset(&iqp, 0, sizeof(struct csio_iq_params));
+	csio_q_portid(hw, iq_idx) = portid;
+
+	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+	if (!mbp) {
+		csio_err(hw, "IQ command out of memory!\n");
+		return -ENOMEM;
+	}
+
+	switch (hw->intr_mode) {
+	case CSIO_IM_INTX:
+	case CSIO_IM_MSI:
+		/* For interrupt forwarding queue only */
+		if (hw->intr_iq_idx == iq_idx)
+			iqp.iqandst	= X_INTERRUPTDESTINATION_PCIE;
+		else
+			iqp.iqandst	= X_INTERRUPTDESTINATION_IQ;
+		iqp.iqandstindex	=
+			csio_q_physiqid(hw, hw->intr_iq_idx);
+		break;
+	case CSIO_IM_MSIX:
+		iqp.iqandst		= X_INTERRUPTDESTINATION_PCIE;
+		iqp.iqandstindex	= (uint16_t)vec;
+		break;
+	case CSIO_IM_NONE:
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+
+	/* Pass in the ingress queue cmd parameters */
+	iqp.pfn			= hw->pfn;
+	iqp.vfn			= 0;
+	iqp.iq_start		= 1;
+	iqp.viid		= 0;
+	iqp.type		= FW_IQ_TYPE_FL_INT_CAP;
+	iqp.iqasynch		= async;
+	if (csio_intr_coalesce_cnt)
+		iqp.iqanus	= X_UPDATESCHEDULING_COUNTER_OPTTIMER;
+	else
+		iqp.iqanus	= X_UPDATESCHEDULING_TIMER;
+	iqp.iqanud		= X_UPDATEDELIVERY_INTERRUPT;
+	iqp.iqpciech		= portid;
+	iqp.iqintcntthresh	= (uint8_t)csio_sge_thresh_reg;
+
+	switch (csio_q_wr_sz(hw, iq_idx)) {
+	case 16:
+		iqp.iqesize = 0; break;
+	case 32:
+		iqp.iqesize = 1; break;
+	case 64:
+		iqp.iqesize = 2; break;
+	case 128:
+		iqp.iqesize = 3; break;
+	}
+
+	iqp.iqsize		= csio_q_size(hw, iq_idx) /
+						csio_q_wr_sz(hw, iq_idx);
+	iqp.iqaddr		= csio_q_pstart(hw, iq_idx);
+
+	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+	if (flq_idx != -1) {
+		struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+		iqp.fl0paden	= 1;
+		iqp.fl0packen	= flq->un.fl.packen ? 1 : 0;
+		iqp.fl0fbmin	= X_FETCHBURSTMIN_64B;
+		iqp.fl0fbmax	= X_FETCHBURSTMAX_512B;
+		iqp.fl0size	= csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
+		iqp.fl0addr	= csio_q_pstart(hw, flq_idx);
+	}
+
+	csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+	if (csio_mb_issue(hw, mbp)) {
+		csio_err(hw, "Issue of IQ cmd failed!\n");
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+
+	if (cbfn != NULL)
+		return 0;
+
+	return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_create_rsp - Response handler for EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that got created.
+ *
+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
+ */
+static int
+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+	struct csio_eq_params eqp;
+	enum fw_retval retval;
+
+	memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+	csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
+
+	if (retval != FW_SUCCESS) {
+		csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+
+	csio_q_eqid(hw, eq_idx)	= (uint16_t)eqp.eqid;
+	csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
+	csio_q_pidx(hw, eq_idx)	= csio_q_cidx(hw, eq_idx) = 0;
+	csio_q_inc_idx(hw, eq_idx) = 0;
+
+	mempool_free(mbp, hw->mb_mempool);
+
+	return 0;
+}
+
+/*
+ * csio_wr_eq_create - Configure an Egress queue with FW.
+ * @hw: HW module.
+ * @priv: Private data.
+ * @eq_idx: Egress queue index in the WR module.
+ * @iq_idx: Associated ingress queue index.
+ * @cbfn: Completion callback.
+ *
+ * This API configures a offload egress queue with FW by issuing a
+ * FW_EQ_OFLD_CMD  (with alloc + write ) mailbox.
+ */
+int
+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
+		  int iq_idx, uint8_t portid,
+		  void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+	struct csio_mb  *mbp;
+	struct csio_eq_params eqp;
+
+	memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+	if (!mbp) {
+		csio_err(hw, "EQ command out of memory!\n");
+		return -ENOMEM;
+	}
+
+	eqp.pfn			= hw->pfn;
+	eqp.vfn			= 0;
+	eqp.eqstart		= 1;
+	eqp.hostfcmode		= X_HOSTFCMODE_STATUS_PAGE;
+	eqp.iqid		= csio_q_iqid(hw, iq_idx);
+	eqp.fbmin		= X_FETCHBURSTMIN_64B;
+	eqp.fbmax		= X_FETCHBURSTMAX_512B;
+	eqp.cidxfthresh		= 0;
+	eqp.pciechn		= portid;
+	eqp.eqsize		= csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
+	eqp.eqaddr		= csio_q_pstart(hw, eq_idx);
+
+	csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
+				    &eqp, cbfn);
+
+	if (csio_mb_issue(hw, mbp)) {
+		csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
+		mempool_free(mbp, hw->mb_mempool);
+		return -EINVAL;
+	}
+
+	if (cbfn != NULL)
+		return 0;
+
+	return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that was freed.
+ *
+ * Handle FW_IQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+	enum fw_retval retval = csio_mb_fw_retval(mbp);
+	int rv = 0;
+
+	if (retval != FW_SUCCESS)
+		rv = -EINVAL;
+
+	mempool_free(mbp, hw->mb_mempool);
+
+	return rv;
+}
+
+/*
+ * csio_wr_iq_destroy - Free an ingress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an ingress queue by issuing the FW_IQ_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
+		   void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+	int rv = 0;
+	struct csio_mb  *mbp;
+	struct csio_iq_params iqp;
+	int flq_idx;
+
+	memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+	if (!mbp)
+		return -ENOMEM;
+
+	iqp.pfn		= hw->pfn;
+	iqp.vfn		= 0;
+	iqp.iqid	= csio_q_iqid(hw, iq_idx);
+	iqp.type	= FW_IQ_TYPE_FL_INT_CAP;
+
+	flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+	if (flq_idx != -1)
+		iqp.fl0id = csio_q_flid(hw, flq_idx);
+	else
+		iqp.fl0id = 0xFFFF;
+
+	iqp.fl1id = 0xFFFF;
+
+	csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+	rv = csio_mb_issue(hw, mbp);
+	if (rv != 0) {
+		mempool_free(mbp, hw->mb_mempool);
+		return rv;
+	}
+
+	if (cbfn != NULL)
+		return 0;
+
+	return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that was freed.
+ *
+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+	enum fw_retval retval = csio_mb_fw_retval(mbp);
+	int rv = 0;
+
+	if (retval != FW_SUCCESS)
+		rv = -EINVAL;
+
+	mempool_free(mbp, hw->mb_mempool);
+
+	return rv;
+}
+
+/*
+ * csio_wr_eq_destroy - Free an Egress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @eq_idx: Egress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
+		   void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+	int rv = 0;
+	struct csio_mb  *mbp;
+	struct csio_eq_params eqp;
+
+	memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+	if (!mbp)
+		return -ENOMEM;
+
+	eqp.pfn		= hw->pfn;
+	eqp.vfn		= 0;
+	eqp.eqid	= csio_q_eqid(hw, eq_idx);
+
+	csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
+
+	rv = csio_mb_issue(hw, mbp);
+	if (rv != 0) {
+		mempool_free(mbp, hw->mb_mempool);
+		return rv;
+	}
+
+	if (cbfn != NULL)
+		return 0;
+
+	return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
+ * @hw: HW module
+ * @qidx: Egress queue index
+ *
+ * Cleanup the Egress queue status page.
+ */
+static void
+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
+{
+	struct csio_q	*q = csio_hw_to_wrm(hw)->q_arr[qidx];
+	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+
+	memset(stp, 0, sizeof(*stp));
+}
+
+/*
+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
+ * @hw: HW module
+ * @qidx: Ingress queue index
+ *
+ * Cleanup the footer entries in the given ingress queue,
+ * set to 1 the internal copy of genbit.
+ */
+static void
+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
+{
+	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
+	struct csio_q	*q	= wrm->q_arr[qidx];
+	void *wr;
+	struct csio_iqwr_footer *ftr;
+	uint32_t i = 0;
+
+	/* set to 1 since we are just about zero out genbit */
+	q->un.iq.genbit = 1;
+
+	for (i = 0; i < q->credits; i++) {
+		/* Get the WR */
+		wr = (void *)((uintptr_t)q->vstart +
+					   (i * q->wr_sz));
+		/* Get the footer */
+		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+					  (q->wr_sz - sizeof(*ftr)));
+		/* Zero out footer */
+		memset(ftr, 0, sizeof(*ftr));
+	}
+}
+
+int
+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
+{
+	int i, flq_idx;
+	struct csio_q *q;
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	int rv;
+
+	for (i = 0; i < wrm->free_qidx; i++) {
+		q = wrm->q_arr[i];
+
+		switch (q->type) {
+		case CSIO_EGRESS:
+			if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
+				csio_wr_cleanup_eq_stpg(hw, i);
+				if (!cmd) {
+					csio_q_eqid(hw, i) = CSIO_MAX_QID;
+					continue;
+				}
+
+				rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
+				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+					cmd = false;
+
+				csio_q_eqid(hw, i) = CSIO_MAX_QID;
+			}
+		case CSIO_INGRESS:
+			if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
+				csio_wr_cleanup_iq_ftr(hw, i);
+				if (!cmd) {
+					csio_q_iqid(hw, i) = CSIO_MAX_QID;
+					flq_idx = csio_q_iq_flq_idx(hw, i);
+					if (flq_idx != -1)
+						csio_q_flid(hw, flq_idx) =
+								CSIO_MAX_QID;
+					continue;
+				}
+
+				rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
+				if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+					cmd = false;
+
+				csio_q_iqid(hw, i) = CSIO_MAX_QID;
+				flq_idx = csio_q_iq_flq_idx(hw, i);
+				if (flq_idx != -1)
+					csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
+			}
+		default:
+			break;
+		}
+	}
+
+	hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
+
+	return 0;
+}
+
+/*
+ * csio_wr_get - Get requested size of WR entry/entries from queue.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @size: Cumulative size of Work request(s).
+ * @wrp: Work request pair.
+ *
+ * If requested credits are available, return the start address of the
+ * work request in the work request pair. Set pidx accordingly and
+ * return.
+ *
+ * NOTE about WR pair:
+ * ==================
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/size to be passed back to the caller -
+ * hence Work request pair format.
+ */
+int
+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
+	    struct csio_wr_pair *wrp)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_q *q = wrm->q_arr[qidx];
+	void *cwr = (void *)((uintptr_t)(q->vstart) +
+						(q->pidx * CSIO_QCREDIT_SZ));
+	struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+	uint16_t cidx = q->cidx = ntohs(stp->cidx);
+	uint16_t pidx = q->pidx;
+	uint32_t req_sz	= ALIGN(size, CSIO_QCREDIT_SZ);
+	int req_credits	= req_sz / CSIO_QCREDIT_SZ;
+	int credits;
+
+	CSIO_DB_ASSERT(q->owner != NULL);
+	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+	CSIO_DB_ASSERT(cidx <= q->credits);
+
+	/* Calculate credits */
+	if (pidx > cidx) {
+		credits = q->credits - (pidx - cidx) - 1;
+	} else if (cidx > pidx) {
+		credits = cidx - pidx - 1;
+	} else {
+		/* cidx == pidx, empty queue */
+		credits = q->credits;
+		CSIO_INC_STATS(q, n_qempty);
+	}
+
+	/*
+	 * Check if we have enough credits.
+	 * credits = 1 implies queue is full.
+	 */
+	if (!credits || (req_credits > credits)) {
+		CSIO_INC_STATS(q, n_qfull);
+		return -EBUSY;
+	}
+
+	/*
+	 * If we are here, we have enough credits to satisfy the
+	 * request. Check if we are near the end of q, and if WR spills over.
+	 * If it does, use the first addr/size to cover the queue until
+	 * the end. Fit the remainder portion of the request at the top
+	 * of queue and return it in the second addr/len. Set pidx
+	 * accordingly.
+	 */
+	if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
+		wrp->addr1 = cwr;
+		wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
+		wrp->addr2 = q->vstart;
+		wrp->size2 = req_sz - wrp->size1;
+		q->pidx	= (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
+							CSIO_QCREDIT_SZ);
+		CSIO_INC_STATS(q, n_qwrap);
+		CSIO_INC_STATS(q, n_eq_wr_split);
+	} else {
+		wrp->addr1 = cwr;
+		wrp->size1 = req_sz;
+		wrp->addr2 = NULL;
+		wrp->size2 = 0;
+		q->pidx	+= (uint16_t)req_credits;
+
+		/* We are the end of queue, roll back pidx to top of queue */
+		if (unlikely(q->pidx == q->credits)) {
+			q->pidx = 0;
+			CSIO_INC_STATS(q, n_qwrap);
+		}
+	}
+
+	q->inc_idx = (uint16_t)req_credits;
+
+	CSIO_INC_STATS(q, n_tot_reqs);
+
+	return 0;
+}
+
+/*
+ * csio_wr_copy_to_wrp - Copies given data into WR.
+ * @data_buf - Data buffer
+ * @wrp - Work request pair.
+ * @wr_off - Work request offset.
+ * @data_len - Data length.
+ *
+ * Copies the given data in Work Request. Work request pair(wrp) specifies
+ * address information of Work request.
+ * Returns: none
+ */
+void
+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
+		   uint32_t wr_off, uint32_t data_len)
+{
+	uint32_t nbytes;
+
+	/* Number of space available in buffer addr1 of WRP */
+	nbytes = ((wrp->size1 - wr_off) >= data_len) ?
+					data_len : (wrp->size1 - wr_off);
+
+	memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
+	data_len -= nbytes;
+
+	/* Write the remaining data from the begining of circular buffer */
+	if (data_len) {
+		CSIO_DB_ASSERT(data_len <= wrp->size2);
+		CSIO_DB_ASSERT(wrp->addr2 != NULL);
+		memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
+	}
+}
+
+/*
+ * csio_wr_issue - Notify chip of Work request.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @prio: 0: Low priority, 1: High priority
+ *
+ * Rings the SGE Doorbell by writing the current producer index of the passed
+ * in queue into the register.
+ *
+ */
+int
+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_q *q = wrm->q_arr[qidx];
+
+	CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+
+	wmb();
+	/* Ring SGE Doorbell writing q->pidx into it */
+	csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+		      PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+	q->inc_idx = 0;
+
+	return 0;
+}
+
+static inline uint32_t
+csio_wr_avail_qcredits(struct csio_q *q)
+{
+	if (q->pidx > q->cidx)
+		return q->pidx - q->cidx;
+	else if (q->cidx > q->pidx)
+		return q->credits - (q->cidx - q->pidx);
+	else
+		return 0;	/* cidx == pidx, empty queue */
+}
+
+/*
+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
+ * @hw: HW module.
+ * @flq: The freelist queue.
+ *
+ * Invalidate the driver's version of a freelist buffer entry,
+ * without freeing the associated the DMA memory. The entry
+ * to be invalidated is picked up from the current Free list
+ * queue cidx.
+ *
+ */
+static inline void
+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
+{
+	flq->cidx++;
+	if (flq->cidx == flq->credits) {
+		flq->cidx = 0;
+		CSIO_INC_STATS(flq, n_qwrap);
+	}
+}
+
+/*
+ * csio_wr_process_fl - Process a freelist completion.
+ * @hw: HW module.
+ * @q: The ingress queue attached to the Freelist.
+ * @wr: The freelist completion WR in the ingress queue.
+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
+ * @iq_handler: Caller's handler for this completion.
+ * @priv: Private pointer of caller
+ *
+ */
+static inline void
+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
+		   void *wr, uint32_t len_to_qid,
+		   void (*iq_handler)(struct csio_hw *, void *,
+				      uint32_t, struct csio_fl_dma_buf *,
+				      void *),
+		   void *priv)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+	struct csio_fl_dma_buf flb;
+	struct csio_dma_buf *buf, *fbuf;
+	uint32_t bufsz, len, lastlen = 0;
+	int n;
+	struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
+
+	CSIO_DB_ASSERT(flq != NULL);
+
+	len = len_to_qid;
+
+	if (len & IQWRF_NEWBUF) {
+		if (flq->un.fl.offset > 0) {
+			csio_wr_inval_flq_buf(hw, flq);
+			flq->un.fl.offset = 0;
+		}
+		len = IQWRF_LEN_GET(len);
+	}
+
+	CSIO_DB_ASSERT(len != 0);
+
+	flb.totlen = len;
+
+	/* Consume all freelist buffers used for len bytes */
+	for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
+		buf = &flq->un.fl.bufs[flq->cidx];
+		bufsz = csio_wr_fl_bufsz(sge, buf);
+
+		fbuf->paddr	= buf->paddr;
+		fbuf->vaddr	= buf->vaddr;
+
+		flb.offset	= flq->un.fl.offset;
+		lastlen		= min(bufsz, len);
+		fbuf->len	= lastlen;
+
+		len -= lastlen;
+		if (!len)
+			break;
+		csio_wr_inval_flq_buf(hw, flq);
+	}
+
+	flb.defer_free = flq->un.fl.packen ? 0 : 1;
+
+	iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
+		   &flb, priv);
+
+	if (flq->un.fl.packen)
+		flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
+	else
+		csio_wr_inval_flq_buf(hw, flq);
+
+}
+
+/*
+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?
+ * @q: Ingress quueue.
+ * @ftr: Ingress queue WR SGE footer.
+ *
+ * The entry is new if our generation bit matches the corresponding
+ * bit in the footer of the current WR.
+ */
+static inline bool
+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
+{
+	return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
+}
+
+/*
+ * csio_wr_process_iq - Process elements in Ingress queue.
+ * @hw:  HW pointer
+ * @qidx: Index of queue
+ * @iq_handler: Handler for this queue
+ * @priv: Caller's private pointer
+ *
+ * This routine walks through every entry of the ingress queue, calling
+ * the provided iq_handler with the entry, until the generation bit
+ * flips.
+ */
+int
+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
+		   void (*iq_handler)(struct csio_hw *, void *,
+				      uint32_t, struct csio_fl_dma_buf *,
+				      void *),
+		   void *priv)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
+	struct csio_iqwr_footer *ftr;
+	uint32_t wr_type, fw_qid, qid;
+	struct csio_q *q_completed;
+	struct csio_q *flq = csio_iq_has_fl(q) ?
+					wrm->q_arr[q->un.iq.flq_idx] : NULL;
+	int rv = 0;
+
+	/* Get the footer */
+	ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+					  (q->wr_sz - sizeof(*ftr)));
+
+	/*
+	 * When q wrapped around last time, driver should have inverted
+	 * ic.genbit as well.
+	 */
+	while (csio_is_new_iqwr(q, ftr)) {
+
+		CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
+						(uintptr_t)q->vwrap);
+		rmb();
+		wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
+
+		switch (wr_type) {
+		case X_RSPD_TYPE_CPL:
+			/* Subtract footer from WR len */
+			iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
+			break;
+		case X_RSPD_TYPE_FLBUF:
+			csio_wr_process_fl(hw, q, wr,
+					   ntohl(ftr->pldbuflen_qid),
+					   iq_handler, priv);
+			break;
+		case X_RSPD_TYPE_INTR:
+			fw_qid = ntohl(ftr->pldbuflen_qid);
+			qid = fw_qid - wrm->fw_iq_start;
+			q_completed = hw->wrm.intr_map[qid];
+
+			if (unlikely(qid ==
+					csio_q_physiqid(hw, hw->intr_iq_idx))) {
+				/*
+				 * We are already in the Forward Interrupt
+				 * Interrupt Queue Service! Do-not service
+				 * again!
+				 *
+				 */
+			} else {
+				CSIO_DB_ASSERT(q_completed);
+				CSIO_DB_ASSERT(
+					q_completed->un.iq.iq_intx_handler);
+
+				/* Call the queue handler. */
+				q_completed->un.iq.iq_intx_handler(hw, NULL,
+						0, NULL, (void *)q_completed);
+			}
+			break;
+		default:
+			csio_warn(hw, "Unknown resp type 0x%x received\n",
+				 wr_type);
+			CSIO_INC_STATS(q, n_rsp_unknown);
+			break;
+		}
+
+		/*
+		 * Ingress *always* has fixed size WR entries. Therefore,
+		 * there should always be complete WRs towards the end of
+		 * queue.
+		 */
+		if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
+
+			/* Roll over to start of queue */
+			q->cidx = 0;
+			wr	= q->vstart;
+
+			/* Toggle genbit */
+			q->un.iq.genbit ^= 0x1;
+
+			CSIO_INC_STATS(q, n_qwrap);
+		} else {
+			q->cidx++;
+			wr	= (void *)((uintptr_t)(q->vstart) +
+					   (q->cidx * q->wr_sz));
+		}
+
+		ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+						  (q->wr_sz - sizeof(*ftr)));
+		q->inc_idx++;
+
+	} /* while (q->un.iq.genbit == hdr->genbit) */
+
+	/*
+	 * We need to re-arm SGE interrupts in case we got a stray interrupt,
+	 * especially in msix mode. With INTx, this may be a common occurence.
+	 */
+	if (unlikely(!q->inc_idx)) {
+		CSIO_INC_STATS(q, n_stray_comp);
+		rv = -EINVAL;
+		goto restart;
+	}
+
+	/* Replenish free list buffers if pending falls below low water mark */
+	if (flq) {
+		uint32_t avail  = csio_wr_avail_qcredits(flq);
+		if (avail <= 16) {
+			/* Make sure in FLQ, atleast 1 credit (8 FL buffers)
+			 * remains unpopulated otherwise HW thinks
+			 * FLQ is empty.
+			 */
+			csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
+			csio_wr_ring_fldb(hw, flq);
+		}
+	}
+
+restart:
+	/* Now inform SGE about our incremental index value */
+	csio_wr_reg32(hw, CIDXINC(q->inc_idx)		|
+			  INGRESSQID(q->un.iq.physiqid)	|
+			  TIMERREG(csio_sge_timer_reg),
+			  MYPF_REG(SGE_PF_GTS));
+	q->stats.n_tot_rsps += q->inc_idx;
+
+	q->inc_idx = 0;
+
+	return rv;
+}
+
+int
+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
+		   void (*iq_handler)(struct csio_hw *, void *,
+				      uint32_t, struct csio_fl_dma_buf *,
+				      void *),
+		   void *priv)
+{
+	struct csio_wrm *wrm	= csio_hw_to_wrm(hw);
+	struct csio_q	*iq	= wrm->q_arr[qidx];
+
+	return csio_wr_process_iq(hw, iq, iq_handler, priv);
+}
+
+static int
+csio_closest_timer(struct csio_sge *s, int time)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+		delta = time - s->timer_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+static int
+csio_closest_thresh(struct csio_sge *s, int cnt)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+		delta = cnt - s->counter_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+static void
+csio_wr_fixup_host_params(struct csio_hw *hw)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+	uint32_t clsz = L1_CACHE_BYTES;
+	uint32_t s_hps = PAGE_SHIFT - 10;
+	uint32_t ingpad = 0;
+	uint32_t stat_len = clsz > 64 ? 128 : 64;
+
+	csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
+		      HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
+		      HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
+		      HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
+		      SGE_HOST_PAGE_SIZE);
+
+	sge->csio_fl_align = clsz < 32 ? 32 : clsz;
+	ingpad = ilog2(sge->csio_fl_align) - 5;
+
+	csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
+					    EGRSTATUSPAGESIZE(1),
+			   INGPADBOUNDARY(ingpad) |
+			   EGRSTATUSPAGESIZE(stat_len != 64));
+
+	/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
+	csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+	csio_wr_reg32(hw,
+		      (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+		      sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+		      SGE_FL_BUFFER_SIZE2);
+	csio_wr_reg32(hw,
+		      (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+		      sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+		      SGE_FL_BUFFER_SIZE3);
+
+	csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+
+	/* default value of rx_dma_offset of the NIC driver */
+	csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
+			   PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+}
+
+static void
+csio_init_intr_coalesce_parms(struct csio_hw *hw)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+
+	csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
+	if (csio_intr_coalesce_cnt) {
+		csio_sge_thresh_reg = 0;
+		csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
+		return;
+	}
+
+	csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
+}
+
+/*
+ * csio_wr_get_sge - Get SGE register values.
+ * @hw: HW module.
+ *
+ * Used by non-master functions and by master-functions relying on config file.
+ */
+static void
+csio_wr_get_sge(struct csio_hw *hw)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+	uint32_t ingpad;
+	int i;
+	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+	u32 ingress_rx_threshold;
+
+	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+	ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+
+	switch (ingpad) {
+	case X_INGPCIEBOUNDARY_32B:
+		sge->csio_fl_align = 32; break;
+	case X_INGPCIEBOUNDARY_64B:
+		sge->csio_fl_align = 64; break;
+	case X_INGPCIEBOUNDARY_128B:
+		sge->csio_fl_align = 128; break;
+	case X_INGPCIEBOUNDARY_256B:
+		sge->csio_fl_align = 256; break;
+	case X_INGPCIEBOUNDARY_512B:
+		sge->csio_fl_align = 512; break;
+	case X_INGPCIEBOUNDARY_1024B:
+		sge->csio_fl_align = 1024; break;
+	case X_INGPCIEBOUNDARY_2048B:
+		sge->csio_fl_align = 2048; break;
+	case X_INGPCIEBOUNDARY_4096B:
+		sge->csio_fl_align = 4096; break;
+	}
+
+	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+		csio_get_flbuf_size(hw, sge, i);
+
+	timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
+	timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
+	timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+
+	sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE0_GET(timer_value_0_and_1));
+	sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE1_GET(timer_value_0_and_1));
+	sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE2_GET(timer_value_2_and_3));
+	sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE3_GET(timer_value_2_and_3));
+	sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE4_GET(timer_value_4_and_5));
+	sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
+					TIMERVALUE5_GET(timer_value_4_and_5));
+
+	ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
+	sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+	sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+	sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+	sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+	csio_init_intr_coalesce_parms(hw);
+}
+
+/*
+ * csio_wr_set_sge - Initialize SGE registers
+ * @hw: HW module.
+ *
+ * Used by Master function to initialize SGE registers in the absence
+ * of a config file.
+ */
+static void
+csio_wr_set_sge(struct csio_hw *hw)
+{
+	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+	struct csio_sge *sge = &wrm->sge;
+	int i;
+
+	/*
+	 * Set up our basic SGE mode to deliver CPL messages to our Ingress
+	 * Queue and Packet Date to the Free List.
+	 */
+	csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+
+	sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+	/* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
+
+	/*
+	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+	 * and generate an interrupt when this occurs so we can recover.
+	 */
+	csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
+			   HP_INT_THRESH(HP_INT_THRESH_MASK) |
+			   LP_INT_THRESH(LP_INT_THRESH_MASK),
+			   HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+			   LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+	csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
+			   ENABLE_DROP);
+
+	/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
+
+	CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
+	CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
+	CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+	CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
+	CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
+	CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
+	CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
+	CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
+
+	for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+		csio_get_flbuf_size(hw, sge, i);
+
+	/* Initialize interrupt coalescing attributes */
+	sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
+	sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
+	sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
+	sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
+	sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
+	sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
+
+	sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
+	sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
+	sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
+	sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
+
+	csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
+		      THRESHOLD_1(sge->counter_val[1]) |
+		      THRESHOLD_2(sge->counter_val[2]) |
+		      THRESHOLD_3(sge->counter_val[3]),
+		      SGE_INGRESS_RX_THRESHOLD);
+
+	csio_wr_reg32(hw,
+		   TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+		   TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+		   SGE_TIMER_VALUE_0_AND_1);
+
+	csio_wr_reg32(hw,
+		   TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+		   TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+		   SGE_TIMER_VALUE_2_AND_3);
+
+	csio_wr_reg32(hw,
+		   TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+		   TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+		   SGE_TIMER_VALUE_4_AND_5);
+
+	csio_init_intr_coalesce_parms(hw);
+}
+
+void
+csio_wr_sge_init(struct csio_hw *hw)
+{
+	/*
+	 * If we are master:
+	 *    - If we plan to use the config file, we need to fixup some
+	 *      host specific registers, and read the rest of the SGE
+	 *      configuration.
+	 *    - If we dont plan to use the config file, we need to initialize
+	 *      SGE entirely, including fixing the host specific registers.
+	 * If we arent the master, we are only allowed to read and work off of
+	 *      the already initialized SGE values.
+	 *
+	 * Therefore, before calling this function, we assume that the master-
+	 * ship of the card, and whether to use config file or not, have
+	 * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
+	 * CSIO_HWF_MASTER should be set/unset.
+	 */
+	if (csio_is_hw_master(hw)) {
+		csio_wr_fixup_host_params(hw);
+
+		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
+			csio_wr_get_sge(hw);
+		else
+			csio_wr_set_sge(hw);
+	} else
+		csio_wr_get_sge(hw);
+}
+
+/*
+ * csio_wrm_init - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW pointer
+ *
+ * Allocates memory for an array of queue pointers starting at q_arr.
+ */
+int
+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+	int i;
+
+	if (!wrm->num_q) {
+		csio_err(hw, "Num queues is not set\n");
+		return -EINVAL;
+	}
+
+	wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+	if (!wrm->q_arr)
+		goto err;
+
+	for (i = 0; i < wrm->num_q; i++) {
+		wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
+		if (!wrm->q_arr[i]) {
+			while (--i >= 0)
+				kfree(wrm->q_arr[i]);
+			goto err_free_arr;
+		}
+	}
+	wrm->free_qidx	= 0;
+
+	return 0;
+
+err_free_arr:
+	kfree(wrm->q_arr);
+err:
+	return -ENOMEM;
+}
+
+/*
+ * csio_wrm_exit - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW module
+ *
+ * Uninitialize WR module. Free q_arr and pointers in it.
+ * We have the additional job of freeing the DMA memory associated
+ * with the queues.
+ */
+void
+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+	int i;
+	uint32_t j;
+	struct csio_q *q;
+	struct csio_dma_buf *buf;
+
+	for (i = 0; i < wrm->num_q; i++) {
+		q = wrm->q_arr[i];
+
+		if (wrm->free_qidx && (i < wrm->free_qidx)) {
+			if (q->type == CSIO_FREELIST) {
+				if (!q->un.fl.bufs)
+					continue;
+				for (j = 0; j < q->credits; j++) {
+					buf = &q->un.fl.bufs[j];
+					if (!buf->vaddr)
+						continue;
+					pci_free_consistent(hw->pdev, buf->len,
+							    buf->vaddr,
+							    buf->paddr);
+				}
+				kfree(q->un.fl.bufs);
+			}
+			pci_free_consistent(hw->pdev, q->size,
+					    q->vstart, q->pstart);
+		}
+		kfree(q);
+	}
+
+	hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
+
+	kfree(wrm->q_arr);
+}